maas-1.9.5+bzr4599.orig/.coveragerc0000644000000000000000000000033413056115004014774 0ustar 00000000000000[run] data_file = coverage.data [report] exclude_lines = # Have to re-enable the standard pragma pragma: no cover # Don't complain if tests don't hit defensive assertion code: raise NotImplementedError maas-1.9.5+bzr4599.orig/.ctags0000644000000000000000000000007113056115004013753 0ustar 00000000000000--python-kinds=-iv --exclude=*.js --extra=+f --links=yes maas-1.9.5+bzr4599.orig/.gitignore0000644000000000000000000000117413056115004014646 0ustar 00000000000000*.egg *.egg-info *.log /.db.lock /.hypothesis /.idea /.idea/scopes /.idea/workspace.xml /.installed.cfg /.noseids /acceptance/*.build /acceptance/*.changes /acceptance/*.deb /acceptance/build /acceptance/source /bin /build /coverage /coverage.data /coverage.xml /db /develop-eggs /dist /docs/_autosummary /docs/_build /docs/api.rst /eggs /etc/maas/clusterd.conf /etc/maas/regiond.conf /include /lib /local /logs/* /man/.doctrees /media/demo/* /media/development /parts /run/* /run/etc/* /run/etc/maas/* /services/*/introspect /services/*/supervise /src/maasserver/static/js/enums.js /TAGS /tags dropin.cache /src/**/*.pyc /etc/**/*.pyc maas-1.9.5+bzr4599.orig/.idea/0000755000000000000000000000000013056115004013633 5ustar 00000000000000maas-1.9.5+bzr4599.orig/CHANGELOG0000755000000000000000000000000013056115004017467 2docs/changelog.rstustar 00000000000000maas-1.9.5+bzr4599.orig/HACKING.txt0000644000000000000000000004137013056115004014465 0ustar 00000000000000.. -*- mode: rst -*- ************ Hacking MAAS ************ Coding style ============ MAAS follows the `Launchpad Python Style Guide`_, except where it gets Launchpad specific, and where it talks about `method naming`_. MAAS instead adopts `PEP-8`_ naming in all cases, so method names should usually use the ``lowercase_with_underscores`` form. .. _Launchpad Python Style Guide: https://dev.launchpad.net/PythonStyleGuide .. _method naming: https://dev.launchpad.net/PythonStyleGuide#Naming .. _PEP-8: http://www.python.org/dev/peps/pep-0008/ Prerequisites ============= You can grab MAAS's code manually from Launchpad but Bazaar_ makes it easy to fetch the last version of the code. First of all, install Bazaar:: $ sudo apt-get install bzr .. _Bazaar: http://bazaar.canonical.com/ Then go into the directory where you want the code to reside and run:: $ bzr branch lp:maas maas && cd maas MAAS depends on Postgres 9.1, Apache 2, daemontools, pyinotify, and many other packages. To install everything that's needed for running and developing MAAS, run:: $ make install-dependencies Careful: this will ``apt-get install`` many packages on your system, via ``sudo``. It may prompt you for your password. This will install ``bind9``. As a result you will have an extra daemon running. If you are a developer and don't intend to run BIND locally, you can disable the daemon by inserting ``exit 1`` at the top of ``/etc/default/bind9``. The package still needs to be installed for tests though. You may also need to install ``python-django-piston``, but installing it seems to cause import errors for ``oauth`` when running the test suite. All other development dependencies are pulled automatically from `PyPI`_ when ``buildout`` runs. (``buildout`` will be automatically configured to create a cache, in order to improve build times. See ``utilities/configure-buildout``.) .. _PyPI: http://pypi.python.org/ Optional ^^^^^^^^ The PyCharm_ IDE is a useful tool when developing MAAS. The MAAS team does not endorse any particular IDE, but ``.idea`` `project files are included with MAAS`_, so PyCharm_ is an easy choice. .. _PyCharm: https://www.jetbrains.com/pycharm/ .. _project files are included with MAAS: https://intellij-support.jetbrains.com/entries/23393067-How-to-manage-projects-under-Version-Control-Systems Running tests ============= To run the whole suite:: $ make test To run tests at a lower level of granularity:: $ ./bin/test.region src/maasserver/tests/test_api.py $ ./bin/test.region src/maasserver/tests/test_api.py:AnonymousEnlistmentAPITest The test runner is `nose`_, so you can pass in options like ``--with-coverage`` and ``--nocapture`` (short option: ``-s``). The latter is essential when using ``pdb`` so that stdout is not adulterated. .. _nose: http://readthedocs.org/docs/nose/en/latest/ .. Note:: When running ``make test`` through ssh from a machine with locales that are not set up on the machine that runs the tests, some tests will fail with a ``MismatchError`` and an "unsupported locale setting" message. Running ``locale-gen`` for the missing locales or changing your locales on your workstation to ones present on the server will solve the issue. Running JavaScript tests ^^^^^^^^^^^^^^^^^^^^^^^^ The JavaScript tests are run using Selenium_. Firefox is the default browser but any browser supported by Selenium can be used to run the tests. Note that you might need to download the appropriate driver and make it available in the path. You can then choose which browsers to use by setting the environment variable ``MAAS_TEST_BROWSERS`` to a comma-separated list of the names of the browsers to use. For instance, to run the tests with Firefox and Chrome:: $ export MAAS_TEST_BROWSERS="Firefox, Chrome" .. _Selenium: http://seleniumhq.org/ Development MAAS server setup ============================= Access to the database is configured in ``src/maas/development.py``. The ``Makefile`` or the test suite sets up a development database cluster inside your branch. It lives in the ``db`` directory, which gets created on demand. You'll want to shut it down before deleting a branch; see below. First, set up the project. This fetches all the required dependencies and sets up some useful commands in ``bin/``:: $ make Create the database cluster and initialise the development database:: $ make syncdb Optionally, populate your database with the sample data:: $ make sampledata By default, the snippet ``maas_proxy`` includes a definition for an http proxy running on port 8000 on the same host as the MAAS server. This means you can *either* install ``squid-deb-proxy``:: $ sudo apt-get install squid-deb-proxy *or* you can edit ``contrib/snippets_v2/generic`` to remove the proxy definition. Set the iSCSI config to include the MAAS configs:: $ sudo tee -a /etc/tgt/targets.conf < contrib/tgt.conf The http_proxy variable is only needed if you're downloading through a proxy; "sudo" wouldn't pass it on to the script without the assignment. Or if you don't have it set but do want to download through a proxy, pass your proxy's URL: "http_proxy=http://proxy.example.com/" Run the development webserver and watch all the logs go by:: $ make run Point your browser to http://localhost:5240/MAAS/ If you've populated your instance with the sample data, you can login as a simple user using the test account (username: 'test', password: 'test') or the admin account (username: 'admin', password: 'test'). At this point you may also want to `download PXE boot resources`_. .. _`download PXE boot resources`: `Downloading PXE boot resources`_ To shut down the database cluster and clean up all other generated files in your branch:: $ make distclean Downloading PXE boot resources ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To use PXE booting, each cluster controller needs to download several files relating to PXE booting. This process is automated, but it does not start by default. First create a superuser and start all MAAS services:: $ bin/maas-region-admin createadmin $ make run Substitute your own email. The command will prompt for a choice of password. Next, get the superuser's API key on the `account preferences`_ page in the web UI, and use it to log into MAAS at the command-line:: $ bin/maas login dev http://localhost:5240/MAAS/ .. _`account preferences`: http://localhost:5240/MAAS/account/prefs/ Start downloading PXE boot resources:: $ bin/maas dev node-groups import-boot-images This sends jobs to each cluster controller, asking each to download the boot resources they require. This may download dozens or hundreds of megabytes, so it may take a while. To save bandwidth, set an HTTP proxy beforehand:: $ bin/maas dev maas set-config name=http_proxy value=http://... Running the built-in TFTP server ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ You will need to run the built-in TFTP server on the real TFTP port (69) if you want to boot some real hardware. By default, it's set to start up on port 5244 for testing purposes. Make these changes:: * Use ``bin/maas-provision`` to change the tftp-port setting to 69 * Install the ``authbind``package: $ sudo apt-get install authbind * Create a file ``/etc/authbind/byport/69`` that is *executable* by the user running MAAS. $ sudo touch /etc/authbind/byport/69 $ sudo chmod a+x /etc/authbind/byport/69 Now when starting up the MAAS development webserver, "make run" and "make start" will detect authbind's presence and use it automatically. Running the BIND daemon for real ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ There's a BIND daemon that is started up as part of the development service but it runs on port 5246 by default. If you want to make it run as a real DNS server on the box then edit ``services/dns/run`` and change the port declaration there so it says:: port=53 Then as for TFTP above, create an authbind authorisation:: $ sudo touch /etc/authbind/byport/53 $ sudo chmod a+x /etc/authbind/byport/53 and run as normal. Running the cluster worker ^^^^^^^^^^^^^^^^^^^^^^^^^^ The cluster also needs authbind as it needs to bind a socket on UDP port 68 for DHCP probing:: $ sudo touch /etc/authbind/byport/68 $ sudo chmod a+x /etc/authbind/byport/68 If you omit this, nothing else will break, but you will get an error in the cluster log because it can't bind to the port. Configuring DHCP ^^^^^^^^^^^^^^^^ MAAS requires a properly configured DHCP server so it can boot machines using PXE. MAAS can work with its own instance of the ISC DHCP server, if you install the maas-dhcp package:: $ sudo apt-get install maas-dhcp If you choose to run your own ISC DHCP server, there is a bit more configuration to do. First, run this tool to generate a configuration that will work with MAAS:: $ maas-provision generate-dhcp-config [options] Run ``maas-provision generate-dhcp-config -h`` to see the options. You will need to provide various IP details such as the range of IP addresses to assign to clients. You can use the generated output to configure your system's ISC DHCP server, by inserting the configuration in the ``/var/lib/maas/dhcpd.conf`` file. Also, edit /etc/default/isc-dhcp-server to set the INTERFACES variable to just the network interfaces that should be serviced by this DHCP server. Now restart dhcpd:: $ sudo service isc-dhcp-server restart None of this work is needed if you let MAAS run its own DHCP server by installing ``maas-dhcp``. Development services ==================== The development environment uses *daemontools* to manage the various services that are required. These are all defined in subdirectories in ``services/``. There are familiar service-like commands:: $ make start $ make status $ make restart $ make stop The latter is a dependency of ``distclean`` so just running ``make distclean`` when you've finished with your branch is enough to stop everything. Individual services can be manipulated too:: $ make services/clusterd/@start The ``@`` pattern works for any of the services. There's an additional special action, ``run``:: $ make run This starts all services up and tails their log files. When you're done, kill ``tail`` (e.g. Ctrl-c), and all the services will be stopped. However, when used with individual services:: $ make services/regiond/@run it does something even cooler. First it shuts down the service, then it restarts it in the foreground so you can see the logs in the console. More importantly, it allows you to use ``pdb``, for example. A note of caution: some of the services have slightly different behaviour when run in the foreground: * regiond (the *webapp* service) will be run with its auto-reloading enabled. There's a convenience target for hacking regiond that starts everything up, but with regiond in the foreground:: $ make run+regiond Apparently Django needs a lot of debugging ;) Introspecting regiond and clusterd ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ By default, the ``regiond``, ``regiond2``, and ``clusterd`` services (when run from the tree) start an introspection service. You can connect to these from the terminal to get a REPL-like environment *inside* the running daemons. There's a convenient script to help with this: .. code-block:: console $ utilities/introspect --help usage: introspect [-h] service Connect to a regiond's or clusterd's introspection service. positional arguments: service The name of a MAAS service to introspect. Choose from: clusterd, regiond, regiond2 optional arguments: -h, --help show this help message and exit .. code-block:: console $ utilities/introspect regiond .------------------------------------------------------ | | Welcome to MAAS's Introspection Shell. | | This is the REGION. | | >>> | | ... Bear in mind that commands are evaluated **in the reactor thread**. If you execute a blocking call, Twisted's reactor will *freeze* until that call returns. You won't even be able to interact via the introspection service because that relies upon the reactor! Adding new dependencies ======================= Since MAAS is distributed mainly as an Ubuntu package, all runtime dependencies should be packaged, and we should develop with the packaged version if possible. All dependencies, from a package or not, need to be added to ``setup.py`` and ``buildout.cfg``, and the version specified in ``versions.cfg`` (``allowed-picked-version`` is disabled, hence ``buildout`` must be given precise version information). If it is a development-only dependency (i.e. only needed for the test suite, or for developers' convenience), simply running ``buildout`` like this will make the necessary updates to ``versions.cfg``:: $ ./bin/buildout -v buildout:allow-picked-versions=true Adding new source files ======================= When creating a new source file, a Python module or test for example, always start with the appropriate template from the ``templates`` directory. Database information ==================== MAAS uses South_ to manage changes to the database schema. .. _South: http://south.aeracode.org Be sure to have a look at `South's documentation`_ before you make any change. .. _South's documentation: http://south.aeracode.org/docs/ Changing the schema ^^^^^^^^^^^^^^^^^^^ Once you've made a model change (i.e. a change to a file in ``src//models/*.py``) you have to run South's `schemamigration`_ command to create a migration file that will be stored in ``src//migrations/``. Note that if you want to add a new model class you'll need to import it in ``src//models/__init__.py`` .. _schemamigration: http://south.aeracode.org/docs/commands.html#schemamigration Once you've changed the code, ensure the database is running and contains the starting schema:: $ make services/database/@start $ make syncdb then generate the migration script with:: $ ./bin/maas-region-admin schemamigration maasserver --auto description_of_the_change This will generate a migration module named ``src/maasserver/migrations/_description_of_the_change.py``. Don't forget to add that file to the project with:: $ bzr add src/maasserver/migrations/_description_of_the_change.py To apply that migration, run:: $ make syncdb .. Note:: In order to create or run a migration, you'll need to have the database server running. To do that, either run ``make start``, which will start all of the MAAS components or ``make services/database/@start``, which will start only the database server. Performing data migration ^^^^^^^^^^^^^^^^^^^^^^^^^ If you need to perform data migration, very much in the same way, you will need to run South's `datamigration`_ command. For instance, if you want to perform changes to the ``maasserver`` application, run:: $ ./bin/maas-region-admin datamigration maasserver description_of_the_change .. _datamigration: http://south.aeracode.org/docs/commands.html#datamigration This will generate a migration module named ``src/maasserver/migrations/_description_of_the_change.py``. You will need to edit that file and fill the ``forwards`` and ``backwards`` methods where data should be actually migrated. Again, don't forget to add that file to the project:: $ bzr add src/maasserver/migrations/_description_of_the_change.py Once the methods have been written, apply that migration with:: $ make syncdb Examining the database manually ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If you need to get an interactive ``psql`` prompt, you can use `dbshell`_:: $ bin/maas-region-admin dbshell .. _dbshell: https://docs.djangoproject.com/en/dev/ref/django-admin/#dbshell If you need to do the same thing with a version of MAAS you have installed from the package, you can use:: $ sudo maas-region-admin dbshell --installed You can use the ``\dt`` command to list the tables in the MAAS database. You can also execute arbitrary SQL. For example::: maasdb=# select system_id, hostname from maasserver_node; system_id | hostname -------------------------------------------+-------------------- node-709703ec-c304-11e4-804c-00163e32e5b5 | gross-debt.local node-7069401a-c304-11e4-a64e-00163e32e5b5 | round-attack.local (2 rows) Documentation ============= Use `reST`_ with the `convention for headings as used in the Python documentation`_. .. _reST: http://sphinx.pocoo.org/rest.html .. _convention for headings as used in the Python documentation: http://sphinx.pocoo.org/rest.html#sections Updating copyright notices ^^^^^^^^^^^^^^^^^^^^^^^^^^ Use the `Bazaar Copyright Updater`_:: bzr branch lp:bzr-update-copyright ~/.bazaar/plugins/update_copyright make copyright Then commit any changes. .. _Bazaar Copyright Updater: https://launchpad.net/bzr-update-copyright maas-1.9.5+bzr4599.orig/INSTALL.txt0000644000000000000000000003315413056115004014530 0ustar 00000000000000.. -*- mode: rst -*- Installing MAAS =============== There are two main ways to install MAAS: * :ref:`From a package repository. ` * :ref:`As a fresh install from Ubuntu Server install media. ` MAAS Packages and Repositories ------------------------------ MAAS Packages ^^^^^^^^^^^^^ Installing MAAS from packages is straightforward. There are actually several packages that go into making up a working MAAS install, but for convenience, many of these have been gathered into a virtual package called 'maas' which will install the necessary components for a 'seed cloud', that is a single server that will directly control a group of nodes. The main packages are: * ``maas`` - seed cloud setup, which includes both the region controller and the cluster controller below. * ``maas-region-controller`` - includes the web UI, API and database. * ``maas-cluster-controller`` - controls a group ("cluster") of nodes including DHCP management. * ``maas-dhcp``/``maas-dns`` - required when managing dhcp/dns. * ``maas-proxy`` - required to provide a MAAS proxy. If you need to separate these services or want to deploy an additional cluster controller, you should install the corresponding packages individually (see :ref:`the description of a typical setup ` for more background on how a typical hardware setup might be arranged). There are two suggested additional packages 'maas-dhcp' and 'maas-dns'. These set up MAAS-controlled DHCP and DNS services which greatly simplify deployment if you are running a typical setup where the MAAS controller can run the network (Note: These **must** be installed if you later set the options in the web interface to have MAAS manage DHCP/DNS). If you need to integrate your MAAS setup under an existing DHCP setup, see :ref:`manual-dhcp` MAAS Package Repositories ^^^^^^^^^^^^^^^^^^^^^^^^^ While MAAS is available in the Ubuntu Archives per each release of Ubuntu, the version might not be the latest. However, if you would like to install a newer version of MAAS (the latest stable release), this is available in the following PPA: * `ppa:maas/stable`_ .. Note:: The MAAS team also releases the latest development release of MAAS. The development release is available in `ppa:maas/next`_. However, this is meant to be used for testing and at your own risk. Adding MAAS package repository is simple. At the command line, type:: $ sudo add-apt-repository ppa:maas/stable You will be asked to confirm whether you would like to add this repository, and its key. Upon configumation, the following needs to be typed at the command line:: $ sudo apt-get update .. _ppa:maas/stable: https://launchpad.net/~maas/+archive/ubuntu/stable .. _ppa:maas/next: https://launchpad.net/~maas/+archive/ubuntu/next .. _pkg-install: Installing MAAS from the command line ------------------------------------- Installing a Single Node MAAS ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ At the command line, type:: $ sudo apt-get install maas This will install both the MAAS Region Controller and the MAAS Cluster Controller, and will select sane defaults for the communication between the Cluster Controller and the Region Controller. After installation, you can access the Web Interface. Then, there are just a few more setup steps :ref:`post_install` Reconfiguring a MAAS Installation ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ You will see a list of packages and a confirmation message to proceed. The exact list will obviously depend on what you already have installed on your server, but expect to add about 200MB of files. The configuration for the MAAS controller will automatically run and pop up this config screen: .. image:: media/install_cluster-config.* Here you will need to enter the hostname for where the region controller can be contacted. In many scenarios, you may be running the region controller (i.e. the web and API interface) from a different network address, for example where a server has several network interfaces. Adding Cluster Controllers ^^^^^^^^^^^^^^^^^^^^^^^^^^ If you would like to add addtional MAAS Cluster Controllers to your MAAS setup, you can do so by installing the Cluster Controller in a different Ubuntu machine. At the command line, type:: $ sudo apt-get install maas-cluster-controller Once installed, you will need to input the shared secret, in order to allow the Cluster Controller to authenticate with the Region Controller. You can obtain the shared secret from the Region Controller. The shared secret can be copied from the Region Controller in:: /var/lib/maas/secret Once you have copied the secret, you can install it in the Cluster Controller. At the command line, type:: $ sudo maas-provision Once installed, you will need to reconfigure the Cluster Controller to correctly point to the API hostname or IP address of the Region Controller that's on the same network as the Cluster Controller: $ sudo dpkg-reconfigure maas-cluster-controller The configuration for the MAAS Cluster Controller will automatically run and pop up this config screen: .. image:: media/install_cluster-config.* Once entered, the MAAS Cluster Controller configuration will request .. _disc-install: Installing MAAS from Ubuntu Server boot media --------------------------------------------- If you are installing MAAS as part of a fresh install it is easiest to choose the "Multiple Server install with MAAS" option from the installer and have pretty much everything set up for you. Boot from the Ubuntu Server media and you will be greeted with the usual language selection screen: .. image:: media/install_01.* On the next screen, you will see there is an entry in the menu called "Multiple server install with MAAS". Use the cursor keys to select this and then press Enter. .. image:: media/install_02.* The installer then runs through the usual language and keyboard options. Make your selections using Tab/Cursor keys/Enter to proceed through the install. The installer will then load various drivers, which may take a moment or two. .. image:: media/install_03.* The next screen asks for the hostname for this server. Choose something appropriate for your network. .. image:: media/install_04.* Finally we get to the MAAS part! Here there are just two options. We want to "Create a new MAAS on this server" so go ahead and choose that one. .. image:: media/install_05.* The install now continues as usual. Next you will be prompted to enter a username. This will be the admin user for the actual server that MAAS will be running on (not the same as the MAAS admin user!) .. image:: media/install_06.* As usual you will have the chance to encrypt your home directory. Continue to make selections based on whatever settings suit your usage. .. image:: media/install_07.* After making selections and partitioning storage, the system software will start to be installed. This part should only take a few minutes. .. image:: media/install_09.* Various packages will now be configured, including the package manager and update manager. It is important to set these up appropriately so you will receive timely updates of the MAAS server software, as well as other essential services that may run on this server. .. image:: media/install_10.* The configuration for MAAS will ask you to configure the host address of the server. This should be the IP address you will use to connect to the server (you may have additional interfaces e.g. to run node subnets) .. image:: media/install_cluster-config.* The next screen will confirm the web address that will be used to the web interface. .. image:: media/install_controller-config.* After configuring any other packages the installer will finally come to and end. At this point you should eject the boot media. .. image:: media/install_14.* After restarting, you should be able to login to the new server with the information you supplied during the install. The MAAS software will run automatically. .. image:: media/install_15.* **NOTE:** The maas-dhcp and maas-dns packages should be installed by default, but on older releases of MAAS they won't be. If you want to have MAAS run DHCP and DNS services, you should install these packages. Check whether they are installed with:: $ dpkg -l maas-dhcp maas-dns If they are missing, then:: $ sudo apt-get install maas-dhcp maas-dns And then proceed to the post-install setup below. .. _post_install: Post-Install tasks ================== Your MAAS is now installed, but there are a few more things to be done. If you now use a web browser to connect to the region controller, you should see that MAAS is running, but there will also be some errors on the screen: .. image:: media/install_web-init.* The on screen messages will tell you that there are no boot images present, and that you can't login because there is no admin user. Create a superuser account -------------------------- Once MAAS is installed, you'll need to create an administrator account:: $ sudo maas-region-admin createadmin --username=root --email=MYEMAIL@EXAMPLE.COM Substitute your own email address for MYEMAIL@EXAMPLE.COM. You may also use a different username for your administrator account, but "root" is a common convention and easy to remember. The command will prompt for a password to assign to the new user. You can run this command again for any further administrator accounts you may wish to create, but you need at least one. Log in on the server -------------------- Looking at the region controller's main web page again, you should now see a login screen. Log in using the user name and password which you have just created. .. image:: media/install-login.* Import the boot images ---------------------- Since version 1.7, MAAS stores the boot images in the region controller's database, from where the cluster controllers will synchronise with the region and pull images from the region to the cluster's local disk. This process is automatic and MAAS will check for and download new Ubuntu images every hour. However, on a new installation you'll need to start the import process manually once you have set up your MAAS region controller. There are two ways to start the import: through the web user interface, or through the remote API. To do it in the web user interface, go to the Images tab, check the boxes to say which images you want to import, and click the "Import images" button at the bottom of the Ubuntu section. .. image:: media/import-images.* A message will appear to let you know that the import has started, and after a while, the warnings about the lack of boot images will disappear. It may take a long time, depending on the speed of your Internet connection for import process to complete, as the images are several hundred megabytes. The import process will only download images that have changed since last import. You can check the progress of the import by hovering over the spinner next to each image. The other way to start the import is through the :ref:`region-controller API `, which you can invoke most conveniently through the :ref:`command-line interface `. To do this, connect to the MAAS API using the "maas" command-line client. See :ref:`Logging in ` for how to get set up with this tool. Then, run the command:: $ maas my-maas-session boot-resources import (Substitute a different profile name for 'my-maas-session' if you have named yours something else.) This will initiate the download, just as if you had clicked "Import images" in the web user interface. By default, the import is configured to download the most recent LTS release only for the amd64 architecture. Although this should suit most needs, you can change the selections on the Images tab, or over the API. Read :doc:`customise boot sources ` to see examples on how to do that. Speeding up repeated image imports by using a local mirror ---------------------------------------------------------- See :doc:`sstreams-mirror` for information on how to set up a mirror and configure MAAS to use it. Configure DHCP -------------- If you want MAAS to control DHCP, you can either: #. Follow the instructions at :doc:`cluster-configuration` to use the web UI to set up your cluster controller. #. Use the command line interface `maas` by first :ref:`logging in to the API ` and then :ref:`following this procedure ` If you are manually configuring a DHCP server, you should take a look at :ref:`manual-dhcp` Configure switches on the network --------------------------------- Some switches use Spanning-Tree Protocol (STP) to negotiate a loop-free path through a root bridge. While scanning, it can make each port wait up to 50 seconds before data is allowed to be sent on the port. This delay in turn can cause problems with some applications/protocols such as PXE, DHCP and DNS, of which MAAS makes extensive use. To alleviate this problem, you should enable `Portfast`_ for Cisco switches or its equivalent on other vendor equipment, which enables the ports to come up almost immediately. .. _Portfast: https://www.symantec.com/business/support/index?page=content&id=HOWTO6019 Traffic between the region contoller and cluster controllers ------------------------------------------------------------ * Each cluster controller must be able to: * Initiate TCP connections (for HTTP) to each region controller on port 80 or port 5240, the choice of which depends on the setting of the MAAS URL. * Initiate TCP connections (for RPC) to each region controller between port 5250 and 5259 inclusive. This permits up to 10 ``maas-regiond`` processes on each region controller host. At present this is not configurable. Once everything is set up and running, you are ready to :doc:`start enlisting nodes ` maas-1.9.5+bzr4599.orig/LICENSE0000644000000000000000000010437013056115004013665 0ustar 00000000000000MAAS is Copyright 2012-2015 Canonical Ltd. Canonical Ltd ("Canonical") distributes the MAAS source code under the GNU Affero General Public License, version 3 ("AGPLv3"). The full text of this licence is given below. Third-party copyright in this distribution is noted where applicable. All rights not expressly granted are reserved. ========================================================================= GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 (http://www.gnu.org/licenses/agpl.html) Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software. A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public. The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version. An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU Affero General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Remote Network Interaction; Use with the GNU General Public License. Notwithstanding any other provision of this License, if you modify the Program, your modified version must prominently offer all users interacting with it remotely through a computer network (if your version supports such interaction) an opportunity to receive the Corresponding Source of your version by providing access to the Corresponding Source from a network server at no charge, through some standard or customary means of facilitating copying of software. This Corresponding Source shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the work with which it is combined will remain governed by version 3 of the GNU General Public License. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU Affero General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a "Source" link that leads users to an archive of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see . ========================================================================= maas-1.9.5+bzr4599.orig/MANIFEST.in0000644000000000000000000000036713056115004014417 0ustar 00000000000000graft src/*/static graft src/*/templates graft src/*/fixtures graft src/*/specs graft src/provisioningserver/* graft src/metadataserver/commissioning prune src/*/testing prune src/*/tests prune src/maastesting prune src/provisioningserver/*/tests maas-1.9.5+bzr4599.orig/Makefile0000644000000000000000000004455013056115004014323 0ustar 00000000000000python := python2.7 # pkg_resources makes some incredible noise about version numbers. They # are not indications of bugs in MAAS so we silence them everywhere. export PYTHONWARNINGS = \ ignore:You have iterated over the result:RuntimeWarning:pkg_resources: # Network activity can be suppressed by setting offline=true (or any # non-empty string) at the command-line. ifeq ($(offline),) buildout := bin/buildout virtualenv := virtualenv else buildout := bin/buildout buildout:offline=true virtualenv := virtualenv --never-download endif # If offline has been selected, attempt to further block HTTP/HTTPS # activity by setting bogus proxies in the environment. ifneq ($(offline),) export http_proxy := broken export https_proxy := broken endif # Python enum modules. py_enums := $(wildcard src/*/enum.py) # JavaScript enum module (not modules). js_enums := src/maasserver/static/js/enums.js # MAAS SASS stylesheets. The first input file (maas-styles.css) imports # the others, so is treated specially in the target definitions. scss_inputs := \ src/maasserver/static/scss/maas-styles.scss \ $(wildcard src/maasserver/static/scss/*/*.scss) \ $(wildcard src/maasserver/static/scss/*/*/*.scss) scss_output := src/maasserver/static/css/maas-styles.css # Prefix commands with this when they need access to the database. # Remember to add a dependency on bin/database from the targets in # which those commands appear. dbrun := bin/database --preserve run -- # For things that care, postgresfixture for example, we always want to # use the "maas" databases. export PGDATABASE := maas # For anything we start, we want to hint as to its root directory. export MAAS_ROOT := $(CURDIR)/run build: \ bin/buildout \ bin/database \ bin/maas \ bin/maas-probe-dhcp \ bin/maas-provision \ bin/maas-region-admin \ bin/twistd.cluster \ bin/twistd.region \ bin/test.cli \ bin/test.cluster \ bin/test.config \ bin/test.e2e \ bin/test.js \ bin/test.region \ bin/test.testing \ bin/py bin/ipy \ $(js_enums) all: build doc release_codename = $(shell lsb_release -c -s) # Install all packages required for MAAS development & operation on # the system. This may prompt for a password. install-dependencies: sudo DEBIAN_FRONTEND=noninteractive apt-get -y \ --no-install-recommends install $(shell sort -u \ $(addprefix required-packages/,base build dev doc $(release_codename))) sudo DEBIAN_FRONTEND=noninteractive apt-get -y \ purge $(shell sort -u required-packages/forbidden) .gitignore: .bzrignore sed 's:^[.]/:/:' $^ > $@ echo '/src/**/*.pyc' >> $@ echo '/etc/**/*.pyc' >> $@ bin/python: $(virtualenv) --python=$(python) --system-site-packages $(CURDIR) configure-buildout: utilities/configure-buildout bin/buildout: bin/python bootstrap/zc.buildout-1.5.2.tar.gz @utilities/configure-buildout --quiet bin/python -m pip --quiet install --ignore-installed \ --no-dependencies bootstrap/zc.buildout-1.5.2.tar.gz $(RM) README.txt # zc.buildout installs an annoying README.txt. @touch --no-create $@ # Ensure it's newer than its dependencies. bin/database: bin/buildout buildout.cfg versions.cfg setup.py $(buildout) install database @touch --no-create $@ bin/maas-region-admin bin/twistd.region: \ bin/buildout buildout.cfg versions.cfg setup.py $(js_enums) $(buildout) install region @touch --no-create $@ bin/test.region: \ bin/buildout buildout.cfg versions.cfg setup.py $(js_enums) $(buildout) install region-test @touch --no-create $@ bin/maas: bin/buildout buildout.cfg versions.cfg setup.py $(buildout) install cli @touch --no-create $@ bin/test.cli: bin/buildout buildout.cfg versions.cfg setup.py bin/maas $(buildout) install cli-test @touch --no-create $@ bin/test.js: bin/karma bin/buildout buildout.cfg versions.cfg setup.py $(buildout) install js-test @touch --no-create $@ bin/test.e2e: bin/protractor bin/buildout buildout.cfg versions.cfg setup.py $(buildout) install e2e-test @touch --no-create $@ bin/test.testing: \ bin/buildout bin/sass buildout.cfg versions.cfg setup.py $(buildout) install testing-test @touch --no-create $@ bin/maas-probe-dhcp bin/maas-provision bin/twistd.cluster: \ bin/buildout buildout.cfg versions.cfg setup.py $(buildout) install cluster @touch --no-create $@ bin/test.cluster: bin/buildout buildout.cfg versions.cfg setup.py $(buildout) install cluster-test @touch --no-create $@ bin/test.config: bin/buildout buildout.cfg versions.cfg setup.py $(buildout) install config-test @touch --no-create $@ bin/flake8: bin/buildout buildout.cfg versions.cfg setup.py $(buildout) install flake8 @touch --no-create $@ bin/rst-lint: bin/buildout buildout.cfg versions.cfg setup.py $(buildout) install rst-lint @touch --no-create $@ bin/sphinx bin/sphinx-build: bin/buildout buildout.cfg versions.cfg setup.py $(buildout) install sphinx @touch --no-create $@ bin/py bin/ipy: bin/buildout buildout.cfg versions.cfg setup.py $(buildout) install repl @touch --no-create bin/py bin/ipy define karma-deps karma@0.12.32 karma-chrome-launcher@0.1.12 karma-firefox-launcher@0.1.6 karma-jasmine@0.3.5 karma-opera-launcher@0.1.0 karma-phantomjs-launcher@0.1.4 karma-failed-reporter@0.0.3 endef bin/karma: deps = $(strip $(karma-deps)) bin/karma: prefix = include/nodejs bin/karma: @mkdir -p $(@D) $(prefix) npm install --cache-min 600 --prefix $(prefix) $(deps) @ln -srf $(prefix)/node_modules/karma/bin/karma $@ bin/protractor: prefix = include/nodejs bin/protractor: @mkdir -p $(@D) $(prefix) npm install --cache-min 600 --prefix $(prefix) protractor@2.0.0 @ln -srf $(prefix)/node_modules/protractor/bin/protractor $@ bin/sass: prefix = include/nodejs bin/sass: @mkdir -p $(@D) $(prefix) npm install --cache-min 600 --prefix $(prefix) node-sass@3.1.0 @ln -srf $(prefix)/node_modules/node-sass/bin/node-sass $@ test: test-scripts-all = $(wildcard bin/test.*) # Don't run bin/test.e2e for now; it breaks. test: test-scripts = $(filter-out bin/test.e2e,$(test-scripts-all)) test: build @$(RM) coverage.data @echo $(test-scripts) | xargs --verbose -n1 env test+coverage: export NOSE_WITH_COVERAGE = 1 test+coverage: test coverage-report: coverage/index.html sensible-browser $< > /dev/null 2>&1 & coverage.xml: coverage.data python-coverage xml --include 'src/*' -o $@ coverage/index.html: coverage.data @$(RM) -r $(@D) python-coverage html --include 'src/*' -d $(@D) coverage.data: @$(error Use `$(MAKE) test` to generate coverage data, or invoke a \ test script using the `--with-coverage` flag) lint: lint-py lint-js lint-doc lint-rst pocketlint = $(call available,pocketlint,python-pocket-lint) # XXX jtv 2014-02-25: Clean up this lint, then make it part of "make lint". lint-css: sources = src/maasserver/static/css lint-css: @find $(sources) -type f \ -print0 | xargs -r0 $(pocketlint) --max-length=120 # Python lint checks are time-intensive, so we run them in parallel. It may # make things matters worse if the files need to be read from disk, though, so # this may need more tuning. # The -n50 -P4 setting roughly doubled speed on a high-end system with SSD and # all the files in cache. lint-py: sources = $(wildcard *.py contrib/*.py) src templates twisted utilities etc lint-py: bin/flake8 @find $(sources) -name '*.py' ! -path '*/migrations/*' -print0 \ | xargs -r0 -n50 -P4 bin/flake8 --ignore=E123,E402,E731 \ --config=/dev/null @utilities/check-maaslog-exception lint-doc: @utilities/doc-lint # lint-rst 0.11.1 shouldn't be used on our documentation because it # doesn't understand Sphinx's extensions, and doesn't grok linking # between documents, hence complaints about broken links. However, # Sphinx itself warns about lint when building the docs. lint-rst: sources = README HACKING.txt schema/README.rst lint-rst: bin/rst-lint @find $(sources) -type f \ -printf 'Linting %p...\n' \ -exec bin/rst-lint --encoding=utf8 {} \; # JavaScript lint is checked in parallel for speed. The -n20 -P4 setting # worked well on a multicore SSD machine with the files cached, roughly # doubling the speed, but it may need tuning for slower systems or cold caches. lint-js: sources = src/maasserver/static/js lint-js: @find $(sources) -type f ! -path '*/angular/3rdparty/*' -print0 '(' -name '*.html' -o -name '*.js' ')' \ | xargs -r0 -n20 -P4 $(pocketlint) # Apply automated formatting to all Python files. format: sources = $(wildcard *.py contrib/*.py) src templates twisted utilities etc format: @find $(sources) -name '*.py' -print0 | xargs -r0 utilities/format-imports # Update copyright dates from version history. Try to avoid updating # 3rd-party code by checking for "Canonical" or "MAAS" on the same line # as the copyright header. copyright: @bzr ls --versioned --recursive --kind=file --null | \ xargs -r0 egrep -iI 'copyright.*(canonical|maas)' -lZ | \ xargs -r0 bzr update-copyright --quiet --force-range check: clean test docs/api.rst: bin/maas-region-admin src/maasserver/api/doc_handler.py syncdb bin/maas-region-admin generate_api_doc > $@ sampledata: bin/maas-region-admin bin/database syncdb $(dbrun) bin/maas-region-admin loaddata src/maasserver/fixtures/dev_fixture.yaml doc: bin/sphinx docs/api.rst bin/sphinx docs/_build/html/index.html: doc doc-browse: docs/_build/html/index.html sensible-browser $< > /dev/null 2>&1 & doc-with-versions: bin/sphinx docs/api.rst $(MAKE) -C docs/_build SPHINXOPTS="-A add_version_switcher=true" html man: $(patsubst docs/man/%.rst,man/%,$(wildcard docs/man/*.rst)) man/%: docs/man/%.rst | bin/sphinx-build bin/sphinx-build -b man docs man $^ enums: $(js_enums) $(js_enums): bin/py src/maasserver/utils/jsenums.py $(py_enums) bin/py -m maasserver/utils/jsenums $(py_enums) > $@ styles: bin/sass clean-styles $(scss_output) $(scss_output): $(scss_inputs) bin/sass --include-path=src/maasserver/static/scss --output-style compressed $< -o $(dir $@) clean-styles: $(RM) $(scss_output) clean: stop clean-run $(MAKE) -C acceptance $@ find . -type f -name '*.py[co]' -print0 | xargs -r0 $(RM) find . -type f -name '*~' -print0 | xargs -r0 $(RM) find . -type f -name dropin.cache -print0 | xargs -r0 $(RM) $(RM) -r media/demo/* media/development $(RM) $(js_enums) $(RM) *.log $(RM) docs/api.rst $(RM) -r docs/_autosummary docs/_build $(RM) -r man/.doctrees $(RM) coverage.data coverage.xml $(RM) -r coverage $(RM) -r .hypothesis $(RM) -r bin include lib local $(RM) -r eggs develop-eggs $(RM) -r build dist logs/* parts $(RM) tags TAGS .installed.cfg $(RM) -r *.egg *.egg-info src/*.egg-info $(RM) -r services/*/supervise # Be selective about what to remove from run and run-e2e. define clean-run-template find $(1) -depth ! -type d \ ! -path $(1)/etc/maas/templates \ ! -path $(1)/etc/maas/drivers.yaml \ -print0 | xargs -r0 $(RM) find $(1) -depth -type d \ -print0 | xargs -r0 rmdir --ignore-fail-on-non-empty endef clean-run: $(call clean-run-template,run) $(call clean-run-template,run-e2e) clean+db: clean $(RM) -r db $(RM) .db.lock distclean: clean $(warning 'distclean' is deprecated; use 'clean') harness: bin/maas-region-admin bin/database $(dbrun) bin/maas-region-admin shell --settings=maas.demo dbharness: bin/database bin/database --preserve shell syncdb: bin/maas-region-admin bin/database $(dbrun) bin/maas-region-admin syncdb --noinput $(dbrun) bin/maas-region-admin migrate maasserver --noinput $(dbrun) bin/maas-region-admin migrate metadataserver --noinput # (Re)write the baseline schema. schema/baseline.sql: bin/database $(dbrun) pg_dump -h $(CURDIR)/db -d maas --no-owner --no-privileges -f $@ # Synchronise the database, and update the baseline schema. baseline-schema: syncdb schema/baseline.sql define phony_targets build check clean clean+db clean-run clean-styles configure-buildout copyright coverage-report dbharness distclean doc doc-browse enums format harness install-dependencies lint lint-css lint-doc lint-js lint-py lint-rst man sampledata styles syncdb test test+coverage endef # # Development services. # service_names_region := database dns regiond regiond2 reloader service_names_cluster := clusterd reloader service_names_all := $(service_names_region) $(service_names_cluster) # The following template is intended to be used with `call`, and it # accepts a single argument: a target name. The target name must # correspond to a service action (see "Pseudo-magic targets" below). # A region- and cluster-specific variant of the target will be # created, in addition to the target itself. These can be used to # apply the service action to the region services, the cluster # services, or all services, at the same time. define service_template $(1)-region: $(patsubst %,services/%/@$(1),$(service_names_region)) $(1)-cluster: $(patsubst %,services/%/@$(1),$(service_names_cluster)) $(1): $(1)-region $(1)-cluster phony_services_targets += $(1)-region $(1)-cluster $(1) endef # Expand out aggregate service targets using `service_template`. $(eval $(call service_template,pause)) $(eval $(call service_template,restart)) $(eval $(call service_template,start)) $(eval $(call service_template,status)) $(eval $(call service_template,stop)) $(eval $(call service_template,supervise)) # The `run` targets do not fit into the mould of the others. run-region: @services/run $(service_names_region) run-cluster: @services/run $(service_names_cluster) run: @services/run $(service_names_all) phony_services_targets += run-region run-cluster run # This one's for the rapper, yo. Don't run the load-balancing regiond2. run+regiond: @services/run $(filter-out regiond2,$(service_names_region)) +regiond phony_services_targets += run+regiond # Convenient variables and functions for service control. setlock = $(call available,setlock,daemontools) supervise = $(call available,supervise,daemontools) svc = $(call available,svc,daemontools) svok = $(call available,svok,daemontools) svstat = $(call available,svstat,daemontools) service_lock = $(setlock) -n /run/lock/maas.dev.$(firstword $(1)) # Pseudo-magic targets for controlling individual services. services/%/@run: services/%/@stop services/%/@deps @$(call service_lock, $*) services/$*/run services/%/@start: services/%/@supervise @$(svc) -u $(@D) services/%/@pause: services/%/@supervise @$(svc) -d $(@D) services/%/@status: @$(svstat) $(@D) services/%/@restart: services/%/@supervise @$(svc) -du $(@D) services/%/@stop: @if $(svok) $(@D); then $(svc) -dx $(@D); fi @while $(svok) $(@D); do sleep 0.1; done services/%/@supervise: services/%/@deps @mkdir -p logs/$* @touch $(@D)/down @if ! $(svok) $(@D); then \ logdir=$(CURDIR)/logs/$* \ $(call service_lock, $*) $(supervise) $(@D) & fi @while ! $(svok) $(@D); do sleep 0.1; done # Dependencies for individual services. services/dns/@deps: bin/py services/database/@deps: bin/database services/clusterd/@deps: bin/twistd.cluster services/reloader/@deps: services/regiond/@deps: bin/maas-region-admin services/regiond2/@deps: bin/maas-region-admin # # Package building # # This ought to be as simple as using bzr builddeb --export-upstream but it # has a bug and always considers apt-source tarballs before the specified # branch. Instead, export to a local tarball which is always found. Make sure # the packages listed in `required-packages/build` are installed before using # this. # Old names. PACKAGING := $(abspath ../packaging-1.9) PACKAGING_BRANCH := lp:~maas-maintainers/maas/packaging-1.9 packaging-tree = $(PACKAGING) packaging-branch = $(PACKAGING_BRANCH) packaging-build-area := $(abspath ../build-area) packaging-version = $(shell \ dpkg-parsechangelog -l$(packaging-tree)/debian/changelog \ | sed -rne 's,^Version: ([^-]+).*,\1,p') $(packaging-build-area): mkdir -p $(packaging-build-area) -packaging-fetch: bzr branch $(packaging-branch) $(packaging-tree) -packaging-pull: bzr pull -d $(packaging-tree) -packaging-refresh: -packaging-$(shell \ test -d $(packaging-tree) && echo "pull" || echo "fetch") -packaging-export-orig: $(packaging-build-area) bzr export $(packaging-export-extra) --root=maas-$(packaging-version).orig \ $(packaging-build-area)/maas_$(packaging-version).orig.tar.gz # To build binary packages from uncommitted changes: # make package-export-extra=--uncommitted package package: -packaging-refresh -packaging-export-orig bzr bd --merge $(packaging-tree) --result-dir=$(packaging-build-area) -- -uc -us @echo Binary packages built, see $(packaging-build-area). # ... or use the `package-dev` target. package-dev: packaging-export-extra = --uncommitted package-dev: package # To build a source package from uncommitted changes: # make package-export-extra=--uncommitted source-package source-package: -packaging-refresh -packaging-export-orig bzr bd --merge $(packaging-tree) --result-dir=$(packaging-build-area) -- -S -uc -us @echo Source package built, see $(packaging-build-area). # ... or use the `source-package-dev` target. source-package-dev: packaging-export-extra = --uncommitted source-package-dev: source-package # To rebuild packages (i.e. from a clean slate): package-rebuild: package-clean package package-dev-rebuild: package-clean package-dev source-package-rebuild: source-package-clean source-package source-package-dev-rebuild: source-package-clean source-package-dev # To clean built packages away: package-clean: patterns := *.deb *.dsc *.build *.changes package-clean: patterns += *.debian.tar.xz *.orig.tar.gz package-clean: @$(RM) -v $(addprefix $(packaging-build-area)/,$(patterns)) source-package-clean: patterns := *.dsc *.build *.changes source-package-clean: patterns += *.debian.tar.xz *.orig.tar.gz source-package-clean: @$(RM) -v $(addprefix $(packaging-build-area)/,$(patterns)) define phony_package_targets -packaging-export-orig -packaging-fetch -packaging-pull -packaging-refresh package package-clean package-dev package-dev-rebuild package-rebuild source-package source-package-clean source-package-dev source-package-dev-rebuild source-package-rebuild endef # # Phony stuff. # define phony $(phony_package_targets) $(phony_services_targets) $(phony_targets) endef phony := $(sort $(strip $(phony))) .PHONY: $(phony) # # Functions. # # Check if a command is found on PATH. Raise an error if not, citing # the package to install. Return the command otherwise. # Usage: $(call available,,) define available $(if $(shell which $(1)),$(1),$(error $(1) not found; \ install it with 'sudo apt-get install $(2)')) endef maas-1.9.5+bzr4599.orig/README0000644000000000000000000000241613056115004013536 0ustar 00000000000000.. -*- mode: rst -*- ************************ MAAS: Metal as a Service ************************ Metal as a Service -- MAAS -- lets you treat physical servers like virtual machines in the cloud. Rather than having to manage each server individually, MAAS turns your bare metal into an elastic cloud-like resource. What does that mean in practice? Tell MAAS about the machines you want it to manage and it will boot them, check the hardware's okay, and have them waiting for when you need them. You can then pull nodes up, tear them down and redeploy them at will; just as you can with virtual machines in the cloud. When you're ready to deploy a service, MAAS gives Juju the nodes it needs to power that service. It's as simple as that: no need to manually provision, check and, afterwards, clean-up. As your needs change, you can easily scale services up or down. Need more power for your Hadoop cluster for a few hours? Simply tear down one of your Nova compute nodes and redeploy it to Hadoop. When you're done, it's just as easy to give the node back to Nova. MAAS is ideal where you want the flexibility of the cloud, and the hassle-free power of Juju charms, but you need to deploy to bare metal. For more information see the `MAAS guide`_. .. _MAAS guide: https://maas.ubuntu.com/ maas-1.9.5+bzr4599.orig/acceptance/0000755000000000000000000000000013056115004014741 5ustar 00000000000000maas-1.9.5+bzr4599.orig/bootstrap/0000755000000000000000000000000013056115004014670 5ustar 00000000000000maas-1.9.5+bzr4599.orig/buildout.cfg0000644000000000000000000001452313056115004015170 0ustar 00000000000000[buildout] parts = cli cli-test cluster cluster-test config-test flake8 region region-test repl sphinx testing-test extensions = buildout-versions buildout_versions_file = versions.cfg versions = versions extends = versions.cfg offline = false newest = false # Since MAAS's main deployment target is Ubuntu, all runtime # dependencies should come from python packages. However, it's okay # for development-time dependencies to come from eggs. include-site-packages = true prefer-final = true allow-picked-versions = false [common] extra-paths = ${buildout:directory}/etc ${buildout:directory}/src ${buildout:directory} /usr/lib/django16 test-eggs = blessings coverage fixtures hypothesis mock nose nose-progressive postgresfixture python-subunit testresources testscenarios testtools initialization = ${common:warnings} ${common:environment} environment = from os import environ environ.setdefault("MAAS_ROOT", "${buildout:directory}/run") warnings = from warnings import filterwarnings filterwarnings("ignore", category=RuntimeWarning, module="pkg_resources") inject-test-options = # When running tests from a console use the progressive output plugin. # When running headless increase the verbosity so we can see the test # being run from a log file. An `options` list must be defined ahead # of the use of this snippet. options += ( ["--verbosity=0", "--with-progressive"] if sys.stdout.isatty() else ["--verbosity=2"] ) sys.argv[1:1] = options [database] recipe = z3c.recipe.scripts eggs = postgresfixture extra-paths = ${common:extra-paths} interpreter = entry-points = database=postgresfixture.main:main scripts = database [region] recipe = zc.recipe.egg test-eggs = ${common:test-eggs} django-nose selenium eggs = ${region:test-eggs} djorm-ext-pgarray docutils crochet entry-points = maas-region-admin=maasserver:execute_from_command_line twistd.region=twisted.scripts.twistd:run initialization = ${common:initialization} environ.setdefault("DJANGO_SETTINGS_MODULE", "maas.development") scripts = maas-region-admin twistd.region extra-paths = ${common:extra-paths} [region-test] recipe = zc.recipe.egg eggs = ${region:eggs} entry-points = test.region=maasserver:execute_from_command_line initialization = ${region:initialization} options = [ "test", "--noinput", "--with-crochet", "--with-select", "--select-dir=src/maas", "--select-dir=src/maasserver", "--select-dir=src/metadataserver", "--select-dir=src/apiclient", "--cover-package=maas,maasserver,metadataserver", # Reduce the logging level to INFO here as # DebuggingLoggerMiddleware logs the content of all the # requests at DEBUG level: we don't want this in the # tests as it's too verbose. "--logging-level=INFO", "--logging-clear-handlers", ] ${common:inject-test-options} scripts = test.region extra-paths = ${region:extra-paths} [cli] recipe = zc.recipe.egg eggs = entry-points = maas=maascli:main extra-paths = ${common:extra-paths} scripts = maas [cli-test] recipe = zc.recipe.egg eggs = ${region:eggs} ${common:test-eggs} entry-points = test.cli=maastesting.noseplug:main initialization = ${common:warnings} options = [ "--with-select", "--select-dir=src/maascli", "--cover-package=apiclient,maascli", ] ${common:inject-test-options} extra-paths = ${cli:extra-paths} scripts = test.cli [js-test] recipe = zc.recipe.egg eggs = crochet entry-points = test.js=maastesting.karma:run_karma extra-paths = ${common:extra-paths} scripts = test.js initialization = ${common:initialization} [testing-test] recipe = zc.recipe.egg eggs = ${common:test-eggs} entry-points = test.testing=maastesting.noseplug:main initialization = ${common:warnings} options = [ "--with-select", "--select-dir=src/maastesting", "--cover-package=maastesting", ] ${common:inject-test-options} extra-paths = ${common:extra-paths} scripts = test.testing scripts = test.testing extra-paths = ${region:extra-paths} [cluster] recipe = zc.recipe.egg eggs = crochet entry-points = maas-probe-dhcp=provisioningserver.dhcp.probe:main maas-provision=provisioningserver.__main__:main twistd.cluster=twisted.scripts.twistd:run extra-paths = ${common:extra-paths} scripts = maas-probe-dhcp maas-provision twistd.cluster initialization = ${common:initialization} environ.setdefault("MAAS_CLUSTER_DEVELOP", "TRUE") [cluster-test] recipe = zc.recipe.egg eggs = ${cluster:eggs} ${common:test-eggs} entry-points = test.cluster=maastesting.noseplug:main initialization = ${common:initialization} options = [ "--with-select", "--select-dir=src/provisioningserver", "--cover-package=provisioningserver", ] ${common:inject-test-options} extra-paths = ${cluster:extra-paths} scripts = test.cluster [config-test] recipe = zc.recipe.egg eggs = ${common:test-eggs} entry-points = test.config=maastesting.noseplug:main initialization = ${common:initialization} options = [ "--with-select", "--select-dir=etc/maas/templates/commissioning-user-data", "--cover-package=snippets", ] ${common:inject-test-options} extra-paths = ${common:extra-paths} scripts = test.config [e2e-test] recipe = zc.recipe.egg eggs = ${region:test-eggs} djorm-ext-pgarray docutils crochet entry-points = test.e2e=maastesting.protractor.runner:run_protractor extra-paths = ${common:extra-paths} scripts = test.e2e initialization = ${cluster:initialization} environ.setdefault("MAAS_ROOT", "${buildout:directory}/run-e2e") environ.setdefault("DJANGO_SETTINGS_MODULE", "maas.development") [flake8] recipe = zc.recipe.egg eggs = flake8 entry-points = flake8=flake8.run:main initialization = ${common:warnings} [rst-lint] recipe = zc.recipe.egg eggs = restructuredtext-lint scripts = rst-lint initialization = ${common:warnings} [sphinx] recipe = collective.recipe.sphinxbuilder source = ${buildout:directory}/docs build = ${buildout:directory}/docs/_build extra-paths = ${common:extra-paths} eggs = ${region:eggs} ${cluster:eggs} # Convenient REPLs with all eggs available. [repl] recipe = z3c.recipe.scripts eggs = ${region:eggs} ${cluster:eggs} ${common:test-eggs} extra-paths = ${common:extra-paths} interpreter = py scripts = ipy entry-points = ipy=IPython.terminal.ipapp:launch_new_instance maas-1.9.5+bzr4599.orig/contrib/0000755000000000000000000000000013056115004014313 5ustar 00000000000000maas-1.9.5+bzr4599.orig/docs/0000755000000000000000000000000013056115004013603 5ustar 00000000000000maas-1.9.5+bzr4599.orig/etc/0000755000000000000000000000000013056115004013426 5ustar 00000000000000maas-1.9.5+bzr4599.orig/logs/0000755000000000000000000000000013056115004013617 5ustar 00000000000000maas-1.9.5+bzr4599.orig/man/0000755000000000000000000000000013056115004013426 5ustar 00000000000000maas-1.9.5+bzr4599.orig/media/0000755000000000000000000000000013056115004013732 5ustar 00000000000000maas-1.9.5+bzr4599.orig/required-packages/0000755000000000000000000000000013056115004016247 5ustar 00000000000000maas-1.9.5+bzr4599.orig/run/0000755000000000000000000000000013056115004013457 5ustar 00000000000000maas-1.9.5+bzr4599.orig/run-e2e/0000755000000000000000000000000013056115004014130 5ustar 00000000000000maas-1.9.5+bzr4599.orig/schema/0000755000000000000000000000000013056115004014113 5ustar 00000000000000maas-1.9.5+bzr4599.orig/scripts/0000755000000000000000000000000013056115004014342 5ustar 00000000000000maas-1.9.5+bzr4599.orig/services/0000755000000000000000000000000013056115004014476 5ustar 00000000000000maas-1.9.5+bzr4599.orig/setup.py0000644000000000000000000001070113056115004014364 0ustar 00000000000000#!/usr/bin/env python2.7 # Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Distribute/Setuptools installer for MAAS.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None from glob import glob from os.path import ( dirname, join, ) import sys from setuptools import ( find_packages, setup, ) # The source tree's location in the filesystem. SOURCE_DIR = dirname(__file__) # Allow the setup code to import from the source tree. sys.path.append(join(SOURCE_DIR, 'src')) def read(filename): """Return the whitespace-stripped content of `filename`.""" path = join(SOURCE_DIR, filename) with open(path, "rb") as fin: return fin.read().strip() __version__ = "0.1" setup( name="maas", version=__version__, url="https://launchpad.net/maas", license="AGPLv3", description="Metal As A Service", long_description=read('README'), author="MAAS Developers", author_email="maas-devel@lists.launchpad.net", packages=find_packages( where=b'src', exclude=[ b"*.testing", b"*.tests", b"maastesting", ], ), package_dir={'': b'src'}, include_package_data=True, data_files=[ ('/etc/maas', ['etc/maas/drivers.yaml']), ('/etc/maas/templates/uefi', glob('etc/maas/templates/uefi/*.template')), ('/etc/maas/templates/dhcp', glob('etc/maas/templates/dhcp/*.template')), ('/etc/maas/templates/dns', glob('etc/maas/templates/dns/*.template')), ('/etc/maas/templates/power', glob('etc/maas/templates/power/*.template') + glob('etc/maas/templates/power/*.conf')), ('/etc/maas/templates/pxe', glob('etc/maas/templates/pxe/*.template')), ('/etc/maas/templates/commissioning-user-data', glob('etc/maas/templates/commissioning-user-data/*.template')), ('/etc/maas/templates/commissioning-user-data/snippets', glob('etc/maas/templates/commissioning-user-data/snippets/*.py') + glob('etc/maas/templates/commissioning-user-data/snippets/*.sh')), ('/usr/share/maas', ['contrib/maas-rsyslog.conf', 'contrib/maas-http.conf']), ('/etc/maas/preseeds', ['contrib/preseeds_v2/commissioning', 'contrib/preseeds_v2/enlist', 'contrib/preseeds_v2/generic', 'contrib/preseeds_v2/enlist_userdata', 'contrib/preseeds_v2/curtin', 'contrib/preseeds_v2/curtin_userdata', 'contrib/preseeds_v2/curtin_userdata_centos', 'contrib/preseeds_v2/curtin_userdata_custom', 'contrib/preseeds_v2/curtin_userdata_suse', 'contrib/preseeds_v2/curtin_userdata_windows', 'contrib/preseeds_v2/preseed_master', 'contrib/preseeds_v2/' 'preseed_master_windows_amd64_generic_win2012', 'contrib/preseeds_v2/' 'preseed_master_windows_amd64_generic_win2012hv', 'contrib/preseeds_v2/' 'preseed_master_windows_amd64_generic_win2012hvr2', 'contrib/preseeds_v2/' 'preseed_master_windows_amd64_generic_win2012r2']), ('/usr/bin', ['scripts/maas-generate-winrm-cert', 'scripts/uec2roottar']), ], install_requires=[ 'setuptools', 'Django', 'psycopg2', 'convoy', 'django-piston', 'FormEncode', 'oauth', 'PyYAML', 'South', 'Twisted', ], classifiers=[ 'Development Status :: 4 - Beta', 'Framework :: Django', 'Intended Audience :: Developers', "Intended Audience :: System Administrators", 'License :: OSI Approved :: GPL License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP', ], extras_require=dict( doc=[ 'collective.recipe.sphinxbuilder', 'Sphinx', ], tests=[ 'coverage', 'django-nose', 'lxml', 'fixtures', 'mock', 'nose', 'python-subunit', 'testresources', 'testscenarios', 'testtools', ], ) ) maas-1.9.5+bzr4599.orig/src/0000755000000000000000000000000013056115004013442 5ustar 00000000000000maas-1.9.5+bzr4599.orig/templates/0000755000000000000000000000000013056115004014651 5ustar 00000000000000maas-1.9.5+bzr4599.orig/twisted/0000755000000000000000000000000013056115004014336 5ustar 00000000000000maas-1.9.5+bzr4599.orig/utilities/0000755000000000000000000000000013056115004014666 5ustar 00000000000000maas-1.9.5+bzr4599.orig/versions.cfg0000644000000000000000000000173513056115004015212 0ustar 00000000000000[versions] <= versions-dev versions-doc versions-other versions-auto [versions-doc] docutils <= 0.12 Pygments <= 2.0.1 Sphinx <= 1.2.3 collective.recipe.sphinxbuilder = 0.8.2 [versions-dev] sqlparse = 0.1.10 ipython = 1.2.1 mock = 1.0.1 postgresfixture = 0.2.1 restructuredtext-lint = 0.11.1 <= versions-flake8 [versions-flake8] flake8 = 2.1.0 mccabe = 0.2.1 pep8 = 1.4.6 pyflakes = 0.8.1 [versions-other] blessings = 1.6 bson = 0.3.3 buildout-versions = 1.7 coverage = 3.5.1 crochet = 1.0.0 distribute = 0.6.34 django-nose = 1.2 djorm-ext-pgarray = 0.8 extras = 0.0.3 fixtures = 0.3.14 httplib2 = 0.8 hypothesis = 1.11.4 iso8601 = 0.1.4 junitxml = 0.6 nose = 1.3.1 nose-progressive = 1.5.1 python-mimeparse = 0.1.4 python-subunit = 0.0.18 pytz = 2012c selenium = 2.45 simplejson = 3.3.1 testresources = 0.2.7 testscenarios = 0.4 testtools = 0.9.35 unittest2 = 0.5.1 z3c.recipe.scripts = 1.0.1 zc.buildout = 1.5.2 zc.recipe.egg = 1.3.2 zope.interface = 4.0.5 [versions-auto] maas-1.9.5+bzr4599.orig/.idea/codeStyleSettings.xml0000644000000000000000000000061113056115004020027 0ustar 00000000000000 maas-1.9.5+bzr4599.orig/.idea/encodings.xml0000644000000000000000000000033413056115004016326 0ustar 00000000000000 maas-1.9.5+bzr4599.orig/.idea/inspectionProfiles/0000755000000000000000000000000013056115004017512 5ustar 00000000000000maas-1.9.5+bzr4599.orig/.idea/maas.iml0000644000000000000000000000167413056115004015267 0ustar 00000000000000 maas-1.9.5+bzr4599.orig/.idea/misc.xml0000644000000000000000000000125213056115004015310 0ustar 00000000000000 maas-1.9.5+bzr4599.orig/.idea/modules.xml0000644000000000000000000000040513056115004016024 0ustar 00000000000000 maas-1.9.5+bzr4599.orig/.idea/sqldialects.xml0000644000000000000000000000027313056115004016667 0ustar 00000000000000 maas-1.9.5+bzr4599.orig/.idea/inspectionProfiles/Project_Default.xml0000644000000000000000000000170513056115004023311 0ustar 00000000000000 maas-1.9.5+bzr4599.orig/.idea/inspectionProfiles/profiles_settings.xml0000644000000000000000000000035313056115004024000 0ustar 00000000000000 maas-1.9.5+bzr4599.orig/acceptance/Makefile0000644000000000000000000000647113056115004016411 0ustar 00000000000000# # Build and test everything in ephemeral containers: # # $ make # # Use a different packaging branch: # # $ make packaging=/path/to/branch # # Note: /path/to/branch can be anything that bzr recognises, so an lp: # link, or bzr+ssh, and so on. # # Build and test with a different Ubuntu series: # # $ make series=randy # include /etc/lsb-release # Default to the newer of Quantal or the local series. series := $(lastword $(sort quantal $(DISTRIB_CODENAME))) # Default to the main packaging branch on Launchpad, but treat Precise # specially. ifeq ($(series),precise) packaging := lp:~maas-maintainers/maas/packaging.precise else packaging := lp:~maas-maintainers/maas/packaging endif # Assume we're testing this branch. upstream := $(abspath ..) # The container on which to base ephemerals. container := maas-$(series) ## Convenience definitions. define ephexec sudo LC_ALL=C SSH_ASKPASS=$(abspath ubuntupass) setsid \ lxc-start-ephemeral -b $(upstream) -o $(container) -u ubuntu -- \ env DEBIAN_FRONTEND=noninteractive SUDO_ASKPASS=$(abspath ubuntupass) endef define ephexec-make $(ephexec) $(abspath with-make) make -C $(abspath .) endef define check-service test $(2) = $(wordlist 2,2,$(shell initctl --system status $(1))) # $(1) endef ## Top-level targets. test: build | services sudo container-updated $(ephexec-make) $@-inner # lxc-start-ephemeral does not return the exit code of any command it # runs, so we delete any existing packages before building and check # for their presence afterwards instead. build: source source/debian | services sudo container @$(RM) *.deb $(ephexec-make) $@-inner @ls -1 *.deb @touch $@ container: | services sudo test -n "$$(sudo lxc-ls -1 $(container))" || \ sudo lxc-create -n $(container) -f /etc/lxc/default.conf \ -t ubuntu -- --release $(series) container-updated: container | services sudo $(abspath update-container) $(container) services: $(call check-service,lxc,start/running) $(call check-service,lxc-net,start/running) define phony-outer-targets container container-updated services test endef ## Targets that run within an LXC container. # XXX: These packages appear to be missing from the dependencies # declared in the packaging branch. define missing-packages python-distribute python-django endef test-inner: upgrade-inner sudo -AE apt-get --assume-yes install $(strip $(missing-packages)) sudo -AE dpkg --unpack --force-depends -- *.deb sudo -AE apt-get --fix-broken --assume-yes install define build-packages debhelper devscripts dh-apport endef build-inner: | upgrade-inner sudo -AE apt-get --assume-yes install $(strip $(missing-packages)) sudo -AE apt-get --assume-yes install $(strip $(build-packages)) cd source && debuild -i -us -uc -b upgrade-inner: sudo -AE apt-get --assume-yes update sudo -AE apt-get --assume-yes upgrade define phony-inner-targets build-inner test-inner upgrade-inner endef ## Dependencies. source: bzr export --uncommitted $@ $(upstream) source/debian: | source bzr export $@ $(packaging)/debian ## Miscellaneous. sudo: @sudo -v clean: $(RM) -r source build *.build *.changes *.deb define phony-misc-targets clean sudo endef ## Phony. define phony $(phony-inner-targets) $(phony-misc-targets) $(phony-outer-targets) endef phony := $(sort $(strip $(phony))) .PHONY: $(phony) maas-1.9.5+bzr4599.orig/acceptance/README0000644000000000000000000000122013056115004015614 0ustar 00000000000000MAAS Packaging Acceptance Testing --------------------------------- The `test` make target will build binary packages for the current branch using the latest packaging branch from Launchpad, *in a clean ephemeral container*. The `build` target will install these packages in another clean ephemeral LXC container. Consider the `build-inner` and `test-inner` targets as bootstrap points for further work. It may not be suitable for full automated end-to-end testing of MAAS, so be clear about what you need to test before investing work here. OTOH, it is a good place to quickly test that the packages build, install and configure themselves as expected. maas-1.9.5+bzr4599.orig/acceptance/ubuntupass0000755000000000000000000000004013056115004017072 0ustar 00000000000000#!/usr/bin/env bash echo ubuntu maas-1.9.5+bzr4599.orig/acceptance/update-container0000755000000000000000000000137713056115004020141 0ustar 00000000000000#!/usr/bin/env bash # Copyright 2013-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). # Exit immediately if a command exits with a non-zero status. set -o errexit # Treat unset variables as an error when substituting. set -o nounset container="$1" start() { echo -n Starting... sudo lxc-start -n "${container}" --daemon echo " done." } attach() { sudo LC_ALL=C lxc-attach -n "${container}" -- "$@" } stop() { echo -n Stopping... sudo lxc-stop -n "${container}" echo " done." } start && trap stop EXIT && { sleep 5 # Allow container to get going. attach sudo -AE apt-get --assume-yes update attach sudo -AE apt-get --assume-yes dist-upgrade } maas-1.9.5+bzr4599.orig/acceptance/with-make0000755000000000000000000000115413056115004016556 0ustar 00000000000000#!/usr/bin/env bash # Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). # Exit immediately if a command exits with a non-zero status. set -o errexit # Treat unset variables as an error when substituting. set -o nounset # Ensure that GNU make is installed. if ! sudo -AE apt-get install --assume-yes make then # The installation of `make` may have failed because the package # lists are out of date, so update them and try again. sudo -AE apt-get update sudo -AE apt-get install --assume-yes make fi exec "$@" maas-1.9.5+bzr4599.orig/bootstrap/zc.buildout-1.5.2.tar.gz0000644000000000000000000120763113056115004020734 0ustar 00000000000000‹=¿²Lzc.buildout-1.5.2.tarìKŒÀÝÚPÑDÎ…Œ‹’PÁ)Ê$ åú:—ßb£ÃW™ ±å(¹.sbTs4m¦ÌLçC±âdiEäj ÓRÀ|â<™b7¡GP}ñ áL ×ÈèQ1“¹Â>DAEUvú ÛL˜H‰Þ°.˜r(’ ¨4¡¹:¢qiØ j€¹ÓÚ˜¹)ÔD $v@ ‡) 7J²qà‡:Ô{9:4E23À0ª´FH‘É À[9Ôz¥‰øebþPtTQ°ÅÂX&±£A–<Ï n‹e”ÄMƲ0=3~fY¬‘‚¹,Êjsz§hÁ=aL?# ©4bEÖ§„K®Å5,¢#¥RbS£'T1FÎs]ŠIr=.p¡<ʼn«›À*¶W^¦,áÍÕ%*Àr¢aör€õ¥Á%ð`„¸ÚWÀ¾¸Æû:.' ¬4›ó)•ÅÒ``ˆøÊT€TÝ0Ò8ÃDSÜ&&)œ©*ñã¸(¦Ï76f³™· m_¿ ‘Ïaý¦ÊD^—¼O qPÎ5@á8-‡ ôÊØ .m«ëkx:Ÿ&í)‰3¥ߵ¦0T$„@éÃŽ\á™d$S¨hfÐf=KŠ1.P’cÇ¡šªl”I”Õ>¶¯á6ȤF§ªÅÒ+€0ù\Lu ‘@!¤N¦ÔÇpóB±´P¦²/¸¦L ‰âŒ<ƒláI±㊯XöœðÇZÒ·2¾‘×D7'Ó0®DËUµ‘”Ûž@Œ´ù=ï´g¢%7JÔˆò è["ÓŠ#2…ʧ¹‚ÿ 72Q À­|×>H(&H ÂJzŒG: N?–¡|zîM+Õ•+ë:†Ó&þ !ª25a3kŒ@Q”9šœ±¤5cƺ€‘8%Ú5–-”)ò2Ëì9·ê0py™µÎ+„­±|ˆœÃ¼½={×ñú §j;«øÙç¨Õ:ØÕÕ ‘üêŶð-€Ø`€°·W w‘óú÷ss‘óÀ,r`a(§n®A‘hNSèxè¡h]$B@wQ8ÝI-„kâ„ø˜È´€¤³^À}Ä˦¹c9¼ó¶^yqoTÎFmÕÌ7ES5CC#…b8Ô,Þ¾él¤iRc@¢@™eŒœØÒ=}Ј‹$ùŽöjŸ(ºóX$ÅVq&ž¨P¹ƒXÕÄúfEÍ)29Ši®@1‚ŠhG?é²1«uC^[`«0B')ÒÃAØ"GÔ\¿äVi‡A²„!Šx5åTîXÞ¢Á‚q€4¨ÄÁÈ’i™VþêÈSµÞPg >YÓa2šcwh[#+1ÖºRW]¦C"#Lp~i4+ç5lÆL?™ '͉J³1­p6ô^c%uñèš/±Õw¬ß 8¢s/Ä/®Ó¯¤rÑ3Ý“9 5p?°åñ½ºj\гAÁÖq™FŒ5ÿr."º“Î…ÂâENa=rKæ½Wšt÷ób»2KÒ4 š‹rPÍHa-5²³CÓò@µ´È¾Ò1†Ìæ‘̯ÉS"!a^´\È V#…óƒYBSgS¤sv æVhAÖ!³hsáñk·¢ç9q€þxJ )ÏNši±‡N3ªl2õTRñ(tœïX™ :ÒÀ‡–¿oP«èQ3#Øhui'RÁÈ‹k­‡.ÂcQƒ†ð/ÚÄì:ñFÐÜfdp 7®Û@åfƒ|²*=¢²k9¸…h#'·1€ŽLõKÂT.¬‹ppf5“Ç5!îSȉ¬ÇŒ€‘€çœÁÍ5T Úõ4M;ÐcHjŠ_îÑ_• Vi+Æ9 u4âéà³T„±º¯Bñ¾.…¾ežƒ,D5%(ü­ÄÕ±>ž•sRcÑŠðü-u8m‰åô88¾v´à6E/rRORQñ³ebfa· Ë‚ˆÑ}]<²óÄ9nµÙáƒ@hÖ4ôû&Î5µÖ ,§À쑵Ùà=”SÞàA ìAòfˆ§)h„ oËCŒÂBéÆ 8þaàVŸ6¶Æײ‡ 0¢½ ZÒ…ùÓn‹Ìm‚ÎØDÌXO&¸¤ó)ÉNàÛ‚jìQ肾ÊÕm¢KâWcw=‚™"},OEW­–mtåøÊZÁzvJÌóBü³°f©ÖÆC›íÍö6ïM]…vKvŸ¤»†,îèâ&äñ¥¸‘Ô x(â÷^,j’e¤á¥A©h±TX={W“W½+nežÈWuóêÒ{Ÿ¤ë´ãžx’ ËmÔMË*@}›%À£ÚÙÆ5`¯ãù–‰‰‚­2kœ—¨%v8µ¾1äœâÝçR4hj‰:†jUC¡ÂÂ>PÍÛí¶s’°Êô"cæÍáã­V ÕòV&))Ôù&²¬IÒÉ­›,B‡ømÿÉÔ)cõo ÁÈr¦Ý›CV'?ÝoÆä£.vtÜ–*À(:ÆýÖ–3_´ûÚšÈüteCøpHÈ+oÝ™Ôé”"@,4ña x$K•ÎP[û•V9€Î‡VvXÀûV§äü/ÙÊB"ÛMa‹2’ì4S æ9õ‚.Q‚zU€'ZâØ¤ˆÏ…t¦^up +‘#úXÞÍÄÑ~÷ÌaÉ+SµK™&ÃjõŒPš°…¬ ìë*£ˆÖº8HµaW¡^1TÓTÏÑùĸ'ή3l^¢ZÆä: ›À¾Äšø#ì­¡Š¤Á3’ŒmÜÖ L>Ø”õ›ÁNÛ‡ˆ =›D:D Xx?!ÝúË2ÚP¸.Ó*áÆ¸Z¸¾lo‹$MŠÊ vQ!9›v²- ºr"jNãüMÐôü;:t¸c½,¯Å¹ÂS<ûX¿ÿ$¤Fn«A›Eq ‹bt5v,g#.z¯}€ÞçÝ“~ïàB œýÞÅ«£nïø`"ô“WGo÷{'¯›âåÛ¾89í‹£Þq¯CôO›€ÀÁr8§‡¢ßë4ÅñÁù«7PÚ}Ù;êõjŠîënï䢠ÏðÁñÁI¿Iöú'€@:üºâ¬{Þï½z{Ô=goÏÏN/h-~_nYYY¹@ÍB–K®.¸-hE.šßÍv§½‘g#Š’ …"ÚDt,ØY´5TÒÊ /«Øb;0Cr¸ºžã†«Y{N{j94Î3Ü“­jÓ¦”ßtRýãíW//qÛçòr­)„µ6\‹":ÆÚ+wg´:«4˜m†šTPÍÐ~ãbYBô…‡i+¯%X­3m •¯„u— 7lq -ÚSjñçXf:Ã͈60²mÞDƒæl̉üÖc0wž…Û3ÕÙõe½;Oƒé3oT^bcÍWý ÿ‘5Ä m‘/_\|ªqcùí&hT5YMîÁÁäq£)°Lø‡W ¿Ù‰ãׇM©±¯gYªåp9^aéSóFÍñ î®ØcÚ!![l)§ÒÜi˱Ù/QE’ªýУ4âùs±+Zâ,×Ã’vð6.è’Q£é{æÀgt@SÔ÷ÐUÃ{KÝr˜(¼fíìP€AÐÒ*T¬?½èá è_è¿—èÝ c_O¡š]8]NÅ¡%ú(¾ŸÓí(ä2OÑÁ2ï1P"U˜_ù[™§{ »óØP÷ÓÜ‹²R;yX7´`I`µ¨ÆïÊîÕ• ó™]w[x zÚýg£ñœjÿÞôÁ@uûÅ1h…SÃNÆÊì%ž$%¹ª± ´¶c^ì¡sשÈAÕÁ­;l°]oPéR7¸ÑµàÀ2lu‰×Üœvû¡Ñ法ªÆŽCÄ—öFÒ#À¹ñàƒž–eZ@Ý÷æù…] ñ} 4?õ±ÒüåÌ—€¨ßŠ0½ªàó@ýº´ÕßCÐÎé¥g3YH¯Ÿ—ú‡dziäHíÊÔØ²µÇ¶Çý¿Çµþ û?ÏîÆ;›Ïã¿oñy5–ÙµoCg|‹›8äi‹Õ2sWðÖ¢eGp«™1A2kÜ ¬­Îf§Õù—ÖÖ³å“÷àŠ·ðƒç(=NŒà ê=z#x‚A7áðºk¯GU§Œî ž)åÉâBÊÿ·øXA¬£n`ÝKà©[<\ær¼–Åuê=—¼P¿gÚ|ÈÃu®Š´ýß<þ‚3ÀÏ?ÿ{¶ýlëñüïµþP¾ñÖ÷Œó¿ÿ€ëÏå_}ý7·w:ëÿ‡][¾ñÕÖ»³õìqýÿðëÿÀk!÷ÿw·°¬îÿ#'<úÿßâsQ%…{”ö"¾sð€¸ý 38HÐHRI ÷Knk$œô„›ù# iS¤’ÂØ‹Æ†Ó•\º‹÷ùÎ|ÂÙÝJš„¢ðàvÉýSñÉbU:MžØÓ Ü&ˆ(CÉ%7Qöe}—Ìðú¶ð{&çÆo9,±Jß´˜Ù'2 îO„"!ü”%æ[MµáLê\0Ûò^¤…—:¸ ¾@ûRF8s¼úÍCòc™Y‚oÑÛÀ¯ž^MYÝ·ŠÓ®c=I>(Ky µ7¸¹Ír™º)á£IŽùx—½ÁtÛÔ¹Ûvœ“‡»9©œ:Œ1aF–ù°>çê~O鎒8Iºß¦<§çÏÜ(œ§I§˜å®ù¬Óe#øƒç6-z~ÀwõþÚÆuÜ“Êe‹î á/<ºhñÑütIëþU¢U•…‹½^¥òQ mé$XØÖ1×Ubö (êeþ‚¾åLä+*ªS¢º±É¹ÁT]‚¥Yá­ãaûà*4kd°o?`Þƒšb°Jõ Èb9¡¾wF'?Ù4BæD‹òÀh½jmv‰K GžÎ«³e>»vI ÂåiX9¡Í8O°&Ý᪓@ÔÐ!f!öa ¥jùPÚʽÍ`Ñ£@déy‚àÌdžÑ‹.˜4±®#È8ÚGì©ߩƳ»ˆYK ¹lÀ¡¡\iÀ×ÄíÅx…ýc”Ýáó®£è˜4@=¡š< —Ôe/šsŠ7I ߨ-²ŠbnõƒvN-íC,”\­«lR¦âz8wÉFt²É×êñ-ʱïdÜÅ«MÆgà’Ó´®S=0áX¸¼·2-çû†+"^IOm=bÑXo¸ó±!%ÐuI™¼Ž•¯ßpY‚Ÿð¾a&#ì`S<\>~j ?ü¡<÷ãSäÝ«î ÕD× Éõ2:]ç!ú“»DdS¥_]-é««ª¥LiD eÍöFIX$ x“š_]}b¾Ðk”¤d¢>Ƨ̲MŒÚ¾ê‘+;~§è!Ca#¦&SDäQûjQ¦?&#vPξ‹y(¦ðiÍ–¯%Ey»tÅ”F|(-Gt@[ñ9&ÿ>”¨˜¿˜ã‰Í-hÏž¦Ôt7—ù§*]:ºq•x%üÔ Ý¡Dc¯6ÐaP”„åáxAcóD¹ÆÃÍ´°úy"1[¦JS—·:BË©}}jN©z鈦¤âVÍ!H'þ%, ÛšŽ…¹macLd*9uÜC_ö[›*Þå|û6Mzý¢"Ö7õm+‚Èl” 4¾#ƒ ‚Ê‹^°vÑ>!æ¬Ç]V€‰2½Z ~‚°ÏŽx÷¨^Š \8 ž*€:‡²þBFõ²€i{hÔ+q7¿ÆDZ•œÔ ôu)Æ•¯ë¡E‡ ¹•̼+Á¨+á‹‚Ï pôÒñ®®š‘«¦fŸ¸ß9O®È?E0§˜ƒœFÿ|Ú šê»q‚Ï›çZ³Á˜†ä©y´ OS ÏçÄœn’\ÕcºÖqçÕc±%ùft©GIàÄÈÆfA,ÖZüDÑk[KÒlju£8õ6ÉíCw :¥”œHaöX $VOta“Y zÔÆùÃ6¦˜yžõ¢MvØ(Dü ÖŸ°'¾¨BtU:ù¾±Á›{hŒs¥ý;G¦ŒQÆF%ž­®±¢|ñâ…˜å «üNËe•ÇÙÏŸ¬‰{+ð¾¨û¾`’±Èeâz[Š6Ú·ÇÒªmõNÀm߯¾€ì´·}y@¾=ñý*§®Uý˜žaÕÁ<Ä÷\–¶ØCh—¶u@ŸiNïlQúꪛ°½rÖ«.v"†<Ó×Vó„·šI¦n ÛPÔMtÚ[éÊL°ÐÙ¾¶ÙÞŠ7ºÚ ^Ý–OÄÄ}ìÜhß3Oº˜»„'’ oËQ×5Kƒ-±…‘ÔÒýÖÒOu­ç´°{á„Ù)¾EÅê½-«[¢zÒ‹ëY†ïùaòœ¶80º«+â"“âò‰Ã‡mœ^•j° RG¬ýÿ^šêwð¦óßWÌÞfÉ2A»Gú>‡ûë=ÂY,ï6£È8?Þº»YeêÞô‚¯?Ò?4öu9'XŠ¥^Þ¨Eoø.wMçâ­çõͶÿ ZûÓì4ÿOY£—±êˆùƒ‚_zØð/ŒÐ;JÕ¸g…¤ôsÿb/m³YOƺüüª/‡8à=žýÔszrÖí¿qþ°S^¨üØz{Òû«»¿ô'zaôk]„Ÿ—‚S¬ÿøGŽŽÑßo`Êkâ;t8ÐÃx.~89=?îõ~>¸|÷¦×?¸8ë¾: PßýÓFirK'œìV{G´.¨îÇ—GÝ“;ê¼`×ͧ¿`%—6—õö¿ÙûóÆ6Ž+_ž¿ñ)zèä`à¦ÅfÂäÊ–s®,éj‰'#ë@ƒì@ÃÝ):×÷³¿u¶ªSÝÕXDJ–r& TW×zöó;-m¾ïÞ²ÄØü7ÑðD¾}½ô†¢¼†É™iKô A©Õæøq³ÆÇÝÅ»ûKKO½³dÑjº-ir¦‹êŒs[ŠdN¹.¯á¯ŽtðZ?üæM[ÓÿnrLX¦8z3Ó C,¼Iwp1lçm²ÓÉ)PÁÊuZÝbñ(;ðP €xÖ;ß(=ü(iª%ò¥bn %›Eƒ`(ŽÅ<¯U¶å‡ÁÈ®0ZAèiƒVÐ×bâWG{RN6mlÚd®úO=Bìj"x²íqè g!2ò~¢^¹/,=ô#á£îÐÈ9öÂt¢9öü'þ·Gÿà±å}Ý=8zÓÞáy¾®| ãÈ)»FBíίžôž€ &‰ + ©´‹R©zðMå‚)(#%Û(Ñž ¢»éÊ—Ê,¡±kÝE©÷úH™I‘ºH.‚OÇ#°÷¸‘Ág†ë ÕÂM!ϲñ@Ô60‹¤Â>!/8ø|m -0–¾²ÆR¶€~ÊwÆ Ø±Bô#vTñ¼›N kîRÞÎòäÅÿyŒ‡óŒ$÷N½Þ›‰j¨ð:³£¸ŸÙh×e»ÑùoÍ`š@PnZLÕ@pE6ÿf¡ŒÆ€+Ðz__5ÇÆµf¬ÉÁQY»TÉdž›èd*PxÂPw`7 MöPÊž(líwÙ%8ß:†ÛY+Þ9ᵪÐc: „W$–(sé¦\x–A«¡¼[‹IÛâbòe”÷pµ¬‘d`H¥YÔ·3 Õ‚Êov*©ý¶‰‡³aÒæ˜hT$€¿šå Ú_˜@pÂþz¶Y uu2–7 —ù„šâ°ƒŸÃ¶ë°«Œ Êsąϳv‚ÙdWÔ] “Ëü÷÷²ëi¡Ñ!ÓÅ}½Äwž§#sRÃó,5² ¹bðoS¼Py41ü1±“7üa¨kˆyÚ8†Äg³ 5]ÉY7o@[Ó0[ù6C@á_„(´¼¤(ÙÉ*±fílÐ`c°HÌÙW;BßM’©ø ˳3ájDüò|”®ëpSÑF]"'‚PéðÑ õSøGƒad¡Ÿeam´á9òXÀlÍ"cúJ´Ì´r¤:LuJ˜àmÒâá4Z“2°tq¯6æYQ¿Žá"F(à”ÏbL@ä[ãˆ* íᜱ|¼U¼ÑP—ðñÎNFbX7¸YèrfxŠ¿¢Ökð'Ü-sgË877_*!û7wÚ8=­ÔOOšŽ\n’ØZ7NßÁ£!^Ó4ëϯàA8ªN„•«®e²aœeÎ{­;ð±Í„ì£èÀc} ?&g+aåªOBl ‹ãˆ‡Ùòe°ãf³ù£Óò<>q³yí¥)‚ÜÛdP?ó¿ŸÍ‚Æ?ÇÓ¦}ÐôÛ.‹Q•aaƒ]w¬Xµë;œOÌN’RÀ¦ÄhæœqÅðÒÙ¢"ç²ÈÍZqØËÝþ˜¿:÷ SóÎ)}»z0Oö€ŽœÕc™ùûÄp#æ8ak"ñ@GÊE^ÿCs[Á¨ rå 7­ü?züøäÙ‹“¬MÔk VŸæ¦£õÅM© öÁÐ6àÃ.7»ûß›>èí_PÍ!Þ ÞMòz:4†Eœ4ú·¤™&o©ˆP‘[*ð  µgM‡maEŒeÑð‚Ãü–x~µD€‰ó@–“17”J'!X°è,—qe”,ë:ÓSKŠž!Œ»ÅuAmÓìS2· á´^dÒ‰¡"ð};@# £sÛš$èNï•+€Î%ArõZŠ‹M„˜xë4ðÉ+²ý‘.åAp1@=M¥H—¦QIµÈ)$ó$?ç8ô‘ý¬V#ÅK—¿pï HIßF ÒI”­ŒŠ´„Ò´VÌtR«°ölú"súí×ÜÆš÷“öE8é—™_GAÂw`ÕQµj* ÝËøá5mWfÕ±ë,A7N¯oTú·aëν=vð…¢/`пM’¹Œ†3HЬ[„ãÆS3íÇ(…pžQ²ž¹J!ä6'ZîxÃÇ÷°º;=ÈÄÜWôž%†7øzÆ/³† YÖ-‹Š–l¸³Ïw̽Ä+¡â"#­õCÂÈ+©¸™ 2 AÆK:àªÁÀ kï³ ²%!¤wk[X`A…Žv¢"“Ê9EF†>*O2û Ä%¡D6BÙ0“Q†#¡ˆ(:M²‘ŠSâÖ+â×ã’ÄsÈëÞgöe6LÊõ8GT]‹VUás*!w¯ß«‹Ùvu zP6.Ð4x«:S'¹‰<†ññZÄñpëÓ¹×@Ë:,7´xÔw¥„6“ó\ð®íB ¸PÂqä½;$³ñ5UËc„.>6B^kS¡ê©–é©ÊýyÑØ¿¿Nãún/‘aCT2*` =<“xhc}\‚äV¦fb'¾„à×uÔ"-J$´ãQ4b#Ü …žƒ¨-L«#cCâ %+nuÂå¡|j–¹Õ”õlßžüÍOþfñè[Þ‘qEè>/ vø.`³… aÝj.u÷tœZÔ2L |EiQÌ`Ìmoå†á¤âUg«îåy–•¦¯{²AIhNEõœ”›é)Ü`—‚å«–šØ{«€q¬mˆX2Ž9Ï“/¥@ó=;AèEø§º[$^¢…ÒLªØ‡i¢aÁhI$‚``™øÝrí[®}£\»ÊmåÌ!›:ú±©ÍÉþïøPܲ©_ƒM=O¦ ”["Nty /°µ&PRIJÖlƽ:Qb®\ä…&ê\DéFÇÏü Ó [Ÿ#“ük› ¨8B’wPo©ÿ07s 6fJ¡=  çF#oXÿ¤ 6 ùa;Œ1—@•tˆ!¿(òR¾f3[Õ©ªCÊÙšÀNôÌŠ»µv·~J¶ürä…àðü­éoÔ–Õ!…€€]Ь±£‡BiKÖõr,ìgÄb³KS²òwA¿•áw+1úØøû­õØ_‘âžkGæÜÕñ|u ŠìàM3œÕŠêÃr9ñN<)¬¾œ¾û/á‡ædCd-ÅÞÁ@÷âÐd€2š‘~ú¾EMJþ=Cæ#’Š€Ý–ßayÊp,6N™"ÀÆ™Y C<.¥¦q5'0œ]Œ¶Ø¶5ãcäo£œdJ~ñâQÞg¨;(‚1`Æ|”œµÈW!>DêÚf¡7àæå{¨жö8›#Å…Íÿæpd²l4Iš!Êç«ÛËk„õì§ÞølV…è­MÙzIÈ% bº *—”nôE—F,~.=gЧ/JW¢3¹\õ%hiÑhÉ®`¸^èÖòê“ötà|Iúíë3ÐëD~\'[ÑXÑ¡ùB+jNïH‚ÿ®’P>Ä2¼+éÔñ’† ìUO a$Fu$1Ø–¡4‚8p[4î@B µBËì†ùP´9£H#¸ãß–ª˜ß™Ö74‡„H—®/á‘AÀ‰àhØ’½ CcðŠF©Òq A@‹žBòÌÝ·©Ùµ©Ùa$ŸЙÝù.U¨qøØÎLÉÕtíSKýþ@'u×Ôë¬FÔT´¡¦Éõqª{ìà@üŸÄÅBaà§==hÌe è¨þBÛÅ»Òg"P•Tÿ!ºdŽÒãM‰ ÈQî1õ¥{’«Íqfó¿mÖ¼—Üú~BÉÁµ¤’ƒub õxç@Ä;U% ÁTEp-RTü”´®‚ÿ+Ok î©ÿqÿðþýþïÝý½[üßßMýׯºPÚæ¶ìmØš"°LŸ-6 òxòK?tÊ~5袱.C¢C1D$gzŒ ־ދ¤Ñh`ÁÍþh¹ÏÛìø‡Š±ý>¢ý~ 2F;.s•qÞ˜+„t5Ù¼Ñ [yž`'x›\u8”V•ÕyÍí_ÛNÞ¼éqbZ´ÚG•:€‚û[$ ðm¹n]õÒb9OòÖ×K óh÷ì´ê&äžðEî¼Ï¢€‘?‹¦ØV I{Ën{ô…k»©6–Ù(Íè5ix.¤ :^}^&µ_ÿ ͹ók;\VZùãÐz=7׸¢›ð@…eíÃTÛ.­6?$«½Xá¢Ãï)š·Úx~Pº3'‡0;‹ù$5ïûqÖl«IbwR!Ò mR3ÿi™äWýA–MJ3Ú0Kóå5;.Í»æÑ¨‰öÏêÁVß/{]Ö°ê«Ùlt¡çôИ,(ô¸ÖµéÝÁOרð*6V\÷&´mÒ#m7<ί8RE‚ìtØ}\v@}2"²¹­vù5ÒHÐÚåks½~£ (KrG)®¡U-KäÌ<7} ñR+žð°ÃèÅsȈØèA÷‘×Vm`— ¨Äò©}æTþ´ï4¿Os.‹N¤h¹¸¢¤bîzzç·SšÊ±}÷¾ÔñзWóÕ²”»ôráK÷W·C}÷t€ª×œò(Ë÷ý¸†øÏ T¾ž`ßÿÐ@ÑV®ônÝ–%x6Ô¯IzÛÏU«7_߭޽ReÔ¹¾½HŸÄEÂ=:¦â—‹awÝ%“ëàŒ^íÛû¶â¾Ñºö-d³?Mûqh†üèªe Ú†©¯oýmýÇëØDZYcZWÿuïþŠýçÎÁ­ýçcüžVÙrù×ד ÉT2ëíåùw¿ÿ˜h³Îú»îþÞ½»P©ÿv÷öþÿnì¿÷nM¿·¦ßÐiq–]Ë<~n–wÒA å€]ÉMíÉ|¯³3ñ¨iHòÝÒ/ðYG"OÙX •¸­ð¿<™! Cú3vð`0¦@òbµR@êDT ܲkÇXÅkQßΧOg±oi¥²:ÕLG)UƤ"å üE€9 ÝQ×úÅ&ˆþ1 J-ôh9#VØCô.©YÃ%We•É8Y]Y3s•"½ªlCœoи˜‚ô«¡ëÐkS½¥^Üäþ%³8²öÑ_vÍ’îšõ\ß–s3zò1Ó?8êàŸ8 ´}5£&y ×ÂúéKx›z=Yï¨2%X_¬ºÑ÷¹ø¶RRY` IKXЈHP° ¡ç½ÐæßÚµú˜É%ŒÉ¸[÷[eÚ:qƒZ@ïuL>aƒ(åcY([-¬ ¡#µRá$†ß,X Ž’/ñ¡¶D`Ž OMºUw>uçú4Õ‹ÑâÇOŸâæ¿­Fõáá™å¬í•´såqøýçS>@s¡_Käõ *2Ï ùɳžD_?a}— ´Ú)ò®$ú-¤6×'J°bƒÚ"ŒN ÜÁ¿‰>¹Y.{i}ËDºN'ý­«Ÿl©AHÆÚ¸pÁ' Q®T@>‚V,ðà0BÜgÍöŸ6§××"ÓŸÉyƒËFP²…%½è¶©h³óÈŽý ˆH)FÛ…ÇE¢¾3› n†–ü ø×þ'ñ?è÷¡d¿ßV=5}ÁÇÛ6âÕìˉ}í‡ËÑ,#µ`0½X˜ô×o`! Äf1Å2t ¡ã1ÐÛ'_IÍ¥³Ð9L¾0Pp†ècˆï_¾/¸8¥²…¹_†kÁ%8 v“Ÿ±èä~ý!eòÈâãÍ”*ñô‡W˜/{°ùX)¬‘šFéÆMUÔÐi h‹Ð5)Áó‹CLUšóÞc+žÁUCÁ#5ƒæÁš¿Q•HÕêH5î(&`.‹œ7æ»Ë8A9…©Y¾Ô–î2tâ‹.2r[¹ÊR 9ÄeÌSAšÃ’»~u0-Á#!Dz˜ûŽÓ wpõñƒÙ<›MJ´6JuÈÌâ' .õ|iäxi:°ÝbN³ålÔƒ¬ÿt˜ÀðÙî ðRŽÏ¸ 8(8|Å"EHó¸lhÃz8ùÍ*¸³RÞ뺈¯›©Šík"ÄÞ<°ço sÕ]úˆ8®R Î¥»(°(ï½\m‹òBU±H^ãÌ<S¥S)9aì2:q¥‚ÃÉ k³KoPÃÞÊ$7ç¿%hÉ–gç:ŒŒY”'ÿ‡k3IócÂÞ‚»Æ›<饩ÿe¥X=:„dÃ%\ü…Ù*ò`:¦ nîÑÇ:çæÈjMáƒÛYV>”~òÞÔnÕ¾—·\B\ôq8sj< r¸oyÿOf†T &*áɑüy$Sô΀ˆ±(ëdÆ>ÐÍ (\–~7¬n…ͨȓBæŠè)†‚Àö.Þk%â` ’J0 «˜vºï0ÅÛ‰,ª‘ÈNúV‹ [ÝrPvªÀ"Xi¯¬d…1] VEŽƒœQ]¤ j7™¡\&Í €#1¼ÅVÞé!þùª¡bÖ¨òÔþ‘(µrIó¤ ‚¾Îü¯L(¦‹a–ƒ”?BAyAfggF+èfÎCmÜŠl¿®êÉdkâ£Ï?>L=«õÎ ÎÂJÈ`xRñš€‹nÈdbAðh_bÉv:Õ 9ÕIîêkÕ¸=b›1Ÿ=®ÿ ±{6·‘Ë`ÎLÁÙËÊ’ÑfEÔ lá¯;R‘í±™Ù?S½”†ØG.mB­•+)¨ˆ‘ î­áÉPÍÖ‹¢a±šÏÛìs*~ÝY³½;„xqé ˆïÜðìUef‹2Øåì*bû;“]WŒªáŠQUŒ 0 U à}—ÔñL £ÑH#RùRÊËYÈ(¡¯æ-hÊRNuG4-é¿píƒÍAÚ“¶#³K“lÞ*¹~:ѶWî¶íÃÍá…ܯœŒ•ßú˜ŸØlà×~±²Ì³À? aÞKóŸØÂ%*³A½÷0~k„púÚ·Sü}oñnQ²ããa(qþ0»œ‡f#¼äF~?†µ ß&ù±—žÐ{þè‰û«õºz36DC¡@ —o4RÀosP¿GÍ~ ôûMº†V€O[l}³vÜt‚Dûýåoˆû°9þÃ^)ÿûðÎ[üÏòc¶yI¿ßjÎßžõó¤È–¹aÍvo” 'F¥ÂËQE)iÉEißb?ü[â?lyñßûþ˜æ·÷ÿöþßþ|ôû?Ȳ%ÍwoêtÅWÜÿ={ÿïß¹oîÿþݽƒ{ÿݽ½ÿ¿êþÛß¶#÷[ÒÿƒÃ»%üŸýûwnñ~oø?ßB¼Ó-üÏ-üB~ÿZHLÛpæ.„S$R£ÑxAáxŒ[Á…sÀ¤JKAÌ3$t]wµ‡1$üÙÕ`ílè4=É*é‹Bp9§†Tyé%ÎD—@ÔÉ"Égmà•h‹ VC¿Ê‘B8ÃÿtÍà·w‹K³ h™O&é@þ=è(¡™Akš{¥zÏࣘš2‰ã,Ÿ¢Aã2°5ü?-³Eb®¦S!@ýð¨„ø‰–’æÎ‹pˆ£ÏØžc𮑠/gÑ$žfEº¸‚¼ðK2kÚ^ ç ô9l¸¯pDŒ»È[éE’ÔûÖûçqÑäÙÛdÖÅÅyÿÃ!-Ò G\¸–­!,”CaÖq1Jr¨.å´÷ÌÐ ÆŽ]¶g6»/†¶~Û"¿:úqÖä¿TVØ7¸ç´ú®AòNYt‚m(–ÊšœÈûê X‹P›=ó!ƒÛÒLŽõøOž=’™•?oƒ1mºœAAˆ?¨.Ûq„(êXèŽMQìdŸaüj>"«4D'bìk60üRœžc£åô"V d§Âj¦¸¶ËŽ%~=ÿ$‹G›.õ€qt‰Zy:|{Õõgó—È cO,ÇÆôî5‰2Ã¥’`"rìâÆHzÈÜúÌe-wÍy™Äg¶òrd´7MÀQq‰îmØâå ‰(ŽÐ M‚ÔÎ8x+÷Ùü·ŽM][êgv‡Ü¦à´GøÊ Q‚ŵ¹ßf1’…Í>¡{ ½›~à´ïµª§uÔÂó8ŒE¬äÅM'tÀe«77ý©5_!c¶Â£Yl=õ9¤üÚGX¬£]”öÙ¯ÊaÎÉ[hÐÒ~ ÈÎòlˆçpÎ sªâÅ"o]tÀ /ë÷M€I“&C­.zòeûøxßû:®‹d¸G_ï½é4•1§iî¼£ÒŸÙW7ÀˆçÉ4ƒz‰ WÒC/Å<›·Þ’ýJüfJ=¼°VóŸñE Ös·P}Zȧ=_,æG»»ó$~Û[@$Q´Åж]8c»ÉÏ}|&ÑPžŠj'8”®kÒËò³]ý„톈¢XŒjŒp.”EÖ7º%xæß¾éˆk­t($2w§?öù­R“Ãhøk³î'€”„2MÌÅ™¤o!èÕóÇ=‰JÁ’&jÓÕ `Ï­Š;„Ä<°“0èF(xª.û.ygdà(?-ª#ÓnwÚz"ÏR $²dÐN}›$so*HéÏ2,‚|‘Ppò,Y€Úk@¤$i‚P§¥ànÁ1þ·ÍMzñh$Ñå;Ý‹N´Óírzžùcþ¼ûw£>Èð<™Ìw(0´£c*aUrþvÂoћ٠¿Œ*9ïf ±É—‰¤yu_?nËÚ| ™ÁÞh̯̘Ú#£¦œãšš•a‰o¯fÜÞ›—Ê!€!íð°]_ÚF#O—‰)\$#ŽïuèjžÀä|ìgÔÚy!jHÌâÙpÀ¼Ü8&RÈ¢Ú½ÝášÀ°·nA:‚=R ‡Þ›Ó¼Ã¡Ÿ*+û"ÚùÐJéÞmw¿bT7GU>÷ÅŽÙ¹š­ó®âª½“†}jø±7Ï-Ø8 ÜþË£¯Pž$Åcí6zvë½ò§³xòy]:8eÍ/U³ ¬¡úîÆ™:‚3ef‰Ùô–º E·³Ùì¼i!¹]§&ÛµrPÅšŽ]v<™CJTZû"®?_C³» Šuü·D‰¬jý°SÏÝÜ•³ê$[‡•¥O›çê§í !«ààÐÖ6ÈŠ6Ïÿ 7FùÅÁ,Awˆàœ¹;B{È|Yà‚‚ vûgÑp õØHoãå ¢AÂ5Ö»Q‹ † X³#§ùq÷u³;lvBϼiè΀øÐ“òo(ŠÇX!ÿëçÍŠ8©Zu#&M¨:¿Ã¢¨Wké&â&¡ùÎQ¨àëä¸Ê 2Í£Ní v%érK/…²lJÏÑJZy Í(þ£rAc£Fžò¬ú 7|£?=À+o(Á‹lš,RÈnô„å¶€ævš•ï5T…11?xò£#5UñÀ85¤FY8£'íÄÏ}ÄR:›vÏüë ¯uÛÓà—Q«ít±ÁÂÊA®Ü³éô_¿4¤¦•}•¹EÉÏ +‹õùÊ"@‘WÌQ=–3Û±‚Fß0Òøêx¯]>žX¤j8Ú®_7½&T4ôð¦']w=Ëúc£òb§/%*ùùuVæ¥7­Ï?wÏÙI”lNAó$×ö2£l©ï^—å ÜÚCÉV#°_àjÅ“Ëøªøk[ªê)<‘ˆkMÂL æøwƒÆÂ^CJã"1OÉÑ3£à—lEñ†¤«Yöð4&¥²»õÍ?Á#W-Êm §X•'¾ Õ…›"õ•?¨i“l•N»bo“çÑâÿ'üoK‡O7»ÓŸž¼•»–óÛ&Ú4KÓ|Íà{„aÖÚ'ƒ±™ä ö% >|V¹W×’ºóŸ´ÀnT’ÍÒ±.Gw]r¢y­é²ÜŒŽ‹þ56÷›æç򵧿<$tièàçÉOK#ƒ½ 0Šp)flªîQ ¢ÖŽFåy1%_=4Q5ÿùç®5 ­J/ívOôÖ†YR¡V%ÌA:dLzöàåwÇáј+MP£î5XU;ÏŸÀ2Ë—Ì­ o ®øYô-Z ZñáÖc­#ÝÂ`P¶o)#¹[O*F:$º>vÓ—|Ø&‰Øh­£_›®D'5å7’ñn$*ŽF´&'Râ5òé†Y#èdñúhÿ Z=?'+=}*ÄH0P«› ßZ(õ¡¥ò‚ÅW·½gô× üÑRÜ#·ç„7ú:|@ÞXþP¾ö½HÝ·-u#; –bz­\•êQׇ\ÞJoÈ`š„íªT‹˜l }xåPá“ m¼•ƒËëŽ=¿6]öØRŠ‹oü€Æ,:öü½.ouéôà“ý4-ihrmxHÑ_è‹£ ¾!„¦oj[ÈP/*-’‰y1ÏæxÝ‹DÚ„öž ¾ôÃæ…¹…-?óÆQhñº»ÿ&@:Žäpht|Ì8ùú6ªbS¡–ëß7›Um7ËÙÏéüš}8ðÍkv$¶ŽêQž®Ù—çìÞD_Äį¹_h¶»î%ƒåÙäŸ_¯Éb¿ækôšs>å:ü·G/Ößp>Q[ÞiC›P`îcœ ØD ôüõÐðÃl"°Å›†u‡êP·vaÉîHÆÆkE‰ß4pukØ öT=„/±NƒàïC£clÉ1}-c…¡R _“w,9T–FÍGÒÎ¥ÓèÛådaãqè«~2|pÓàŸéôa©^ṉ"ÌÐâÅ%cxx<‹Ïü”PP-•IŠÿºBÜËp3’%à )9‚Ÿ›\]fù¿òn³Ì'Ç.$qžö ‚áï]m©cìs cdJKßjöÌù{­9Í›6‡ˆ+ô´×|f½z^+vÙý«Ù<ŠðœÿÒ±{…!Ÿýrwü¼ ,±V”,¨Òˆ°îíËÌ$Ø…A JÇUÿÉ  JD!o±þð ›Ñ¨"ÍM}0ü¥_ÄãD‡ 'qQ¤ã4{êß<™µ5œôÁr”&³aEi×L[goo²ìß?}q=˜ƒ¨i4DLõàËln>7Í^ˆØúP óù׈ýR­ÚF=NF'´{óÇî}ON× ïÕ:}´ªÿ-²Q§ê£éwPÿ;Ø¿wgÿîÁ½»¨ÿíÝêå§Ñ5zŠÏ¢™d,ˆ5ãu£ašPåus º>´-½ÀX|Ô$1j &¬ØÎPa‹g®6Bå¥S² ÞÓÆW>†X`”ô É,à®j!ihõ¥‹µÌ•©|fÉ%GÒ´á)ó¹ÏѹQ6183a/¿É¦Ól&74z‰IºjLÿ4ZZæÐÂ;~þ‚TFˆòH4x¼D‚”è|ÂåáÍýý+^ѧ¹h›ò9t`¸Ú ÁÅyŽØðéÌ,dº@RÕÊ”SH½ê_iD`r]ÆWX§#O’ŸÄ5yÍ–`„ÙòÜ¡Cµ¶ˆP§ß#69B'`Í3#žÑ» (™ù }"ÒÙ8£4’!& ‚•G1L¨’Z§«z€‹ì–Ó‰ˆ'"R£‘ ²©Q†Xòcœ,†ç´”#ÏìÁ*Pê7E|Fn}#'ãµsíáÅ”¨"+~x óû¼ÿa«ä‡Äÿ8Ø+á?íß»»w÷öþŒŸ^µ!`ì:2ÝœˆÆçë~¨ˆšiˆ‚Üž‚ÐnDK…¢Ùr:0™i249UÖxfD÷I2-ÇüƒÕÑ–Å2žt¢9ÆVoöÓÔ}`þ:Ã_ü?§.åøÅÉßSÎĺ†¿ÙÑî®mÔ-Ò³ÿål/9í÷D2¢TA©m ìXÏæq¼œ ÏçF5ùÚH´/óp@ƒNaºŸÈ×½Y²Ð¦œÝ/ Wx‹²° Î#å!ï † ñû¨ð`¡<ˆ9ë yU'0]²4ßÈ!KÝz¾²Éˆª•Š·:\Ei9Ä(øXDØ#fåg ›ï4ÊÉ …®ŽusÙFeÈ~m-` 2jBVì‚•Ð›Û Í¬“bw¿w§>(êq!q[fw$YTóqÂKgïNïÄ pÌh¯QýŸþr~–Çù ˆãc#¬OŒŠ+ªJ°jшµ( ¹~Ÿ;Ì¢šÿ–26 9æ]^ņàitФËÇ@íÊ\€¥-Hí¹a§Ú^E¹”1€ÌFq®lÉK,vl¥'á@z æPŽ5Xã«òøµô9Òî£FkCƒägÎýb£Û‹.ëåpÝîÔ´¶Ùð8È—Kz¤ÆÞöÐyôs0P>ÇlE:`óÆfË\WÓ)–HWïï =;CkǨØ5à³"…„#ÊÜåÁúÕÜøìšk(Õð²b±i¸_Ù´cÐIHOÎ1¾ÑØïE¯?Fí݈VéÂAHÉÅlÃ+“·5w –þ€^lWCR‹¸x$c$ȵÁ™83œ}™€ñÝPÜóôÌìdpÞ+%ZaÆ0–ÍxzZ ߢ*É¢©V<§§0ɆNL¿iÎD¢À|®xf^§óVW½GºÅ˜R@”AZþœ×á%¬Ã ÚùòÑ<íÛ{†•ž_µBs¥À £¿¦xHí†Æ|!s4OÁFݤ†ÓWàVƒk!c1ó® W 6û=$ÞG!ŠX¡"“Èú˜Ç:¬ÍφÊÛBË &, ÉõÀFÀAxS‘MµWl…YQ' ¸™Ý„éžžB [×+¯®š”ê×âJ$“tš"œðYù!­H¹l‡ O¦ˆ:d‘ÿ>BXJ±Ú,¡bt û|N¦” Ø]†œgIGà§ÓÓ5µLÔLRЬàº-Ý ø^ 24˜ØÊc®@I©ä’+é<ÑÏ1…ÌCÈ0 ßâ2—'BF±l:™Ùà ¬xåÏVé®÷ö|f€ª§3²3ièvÿ Ð?僰õÉÍä“ÂËd³ñJ¸š'f¢5 h;„WšÉÇg˜›IkŒ¿ë4Æf›Î'W]—¶ÄÏË d“ÉŒU®U ¯Ák˨EpTTüQâác. fÞˆ .U'‹GL$W‡D\I,7Üm„7=E»™»wp~Œœ•-a•|–Â` ƒS$Ic%ï­`ñ~F÷x­7Ù.€˜y¿!d Ì]æSvT-Ÿepk¸¾³iØÉ,Å@]íñd0:i- ã”X2šs‘¹p‘#ÁM!€ (© Ók|oF–QQ!*g̶\Xp\|,äÔ™}Ö°ë †%ZP{`¸6/'Ü‘öQÉçMxÀJñĉ)¾L(W¦Ð¬ ´n±¸šXzêf&&ˆ[…%†BÂdANé(ãÓW->mÔÅ•+ ”.è–-Ì™Ž±žv‘Ñ>XÁ W yÃê7”ån퉫¤¼X„=K#Þ„f9Ê,ð&E0)œtõ#Ç3Çd`Zy®Ý ufWÃÖhR©ÜºòÏ­á̢̌xÛÛiÌ'K ëc#Éá½2à ¤åúOX_¢a±h ±Z¼bÏPø‘]E\ ’´cÃz\¹5=–úÛʬM©ìž˜ßYª±ÅØ5¹Ñ°8ÈsewGvS]½Q3çã)ž+F–íˆÁÍB9ý@•ÛCyAMcŸE{ð…Y@ì³P«%ÝT-H¸3Â"-[C”›‚}„(±u %šn$€5V `%éK& K¨ºF´3Ö¢„2u‹É˜tjÞ ­,øéï/Ód#ߦöl [*ì¨Ë|·+ (ùR†(žƒžáª£; |aNI‡ƒÝþë»b‡‚Œ¥n{¦h¡àZ õ$f5Ìø–—4ž Q2ÃÎô È\Ö”WÝé°VqzúÙþá_6Vi˜ûÎ AøÅœxÓ/´²µy’¤Ì¬~¨í¯–^ªòúD-¸rôY?㢆xÙÌ›ðÒ„/U»CÂs€>aƬD¹À²Ìz!a#êò)ÛÄ,ÕñFgBJ0NHáµ} 5Xji­¦â±ÐGô/7ÝófÊæ[äJ¯âLZI Úñ"^`ÑØEÚ0JCZ|1RÅ[Œº$g¦\_¡à…­½7Kçó„ËòÒ3RuÝ´*–RèzÁ%pçSV¸ û×ÁlDV;4º^Ré÷³sd$F”’!lÜÁgrëqEkï»$ _¢î üþÿb˜:ï˜øÒ/^îÓUaóg0z™3|÷:¦Î.·;ïÿûWÿ/^*76¶PK7ެŒôuzè†JèK0k9E†ŸeNI"H¸61 4ÝcÝµÖ FNÏfÌ÷Y‡X\fÁƒÌ»£tÁ3ŸGöîú´ ¾âìð/X–6±³Ò´,c&Äg/NÌHyÚ¢Çh@m+UñÇJ<þBþ%'ç?ì°MiöñÈ ÞšS̈.– O‡¸Ô‘üÎP#éâ‹§ØF$ûH@ÀŒ;+À·6Z±=Òëyÿûid#Ï~ÛníÄh$¦P<¤+”gQEP¨ñ¬ÃoY­z_DŸs˜À½·>@´q6ÓGÚZÿ¤}‹ØŸ›‘^¤ùÂè= Ëi±’lÅîËh¥­ÈµkÛá5ºÝYVò_ˆýžë| †**¨P€Þ¬ŠèÈAKÞfl<.[ÒÙô 'ÁÚ<dóDbí°¤:8ô”u¯ ·dÚCÆ·Õ©ÍsgyÂ6¶}¢¥tŒåRà%&‡âbaU¦³)œÞCc‚£¡–äx7Ú,­MVaç&–-‹ +p„ˆåqœ“³$à<9ËÄú ¾v(„ÚÊ»¨0."¡>Á<ãÑ…9´sæ¥ß ´ÈfÿdqœzuS‘ÑS„®ð’†3™-2R: ÑD!Í–h1x5ClîÓSµ6Õtîkðg 6@éP}¥´˜,#˜ä‰Qd}¨{Rz=#ý@ûyˆ —J¾H í\aØ*¶µ¬[7 Ò6 @ ‘mAÎQc•õ€sÁtÄ”#Ó—“1F€0eÄÇd°MßÜóBKÐzNñÞs|@¼/y7Ǥ4…Oë8B3;^`[cÅÎ Í×t2ž˜ÛÄm§I<#¥¶c‰dùO{'Ð^–³ådÑ]daA ‹Gmòn:ÕC§Ã'O¦:oH³lœ›\ÌŠCÀ ú"KN|þÁÌù ÜUò{¢I1ÔÞ¼NÎúÛ  üÐ‚Š¥ò6óS‹ÿT‰ª%þûþ½ƒ}…ÿqˆø÷oã??ÆÏ7d?û.-ئÙ‰G"jìíïuáÿ÷Û¯k½F‹1Õ³¹Ì'MÌ~ò©¯g5(²Éò…´cN"Äa)ÑC#ý÷Ïž>ùàÉË#t! ’ج+4ò\¡N 5‚FW¢S;hN Á4F’6»Az&æv"®äí÷(mÉЩ ë’÷cSÊ@Θò¬œíhÑK9$á/¥øV²ôV+Þ¤œZ\ “èˆ×”bä.ÐÔbØ$ ªC Q…NF/ФÁb«cTT§ï´^\{–gÀÛ`ÙJË¢Z ö]æ.ÉBÄo^†¬G£dy‚TfŠ3¡¬z»&µƒ"l®T×è?}~ò·“'÷Ý÷˜„Hj <Ì"°JPêD cÝÐ ‹hŽB“@žžÞˆ[ºòeŒh ²š‘˜¦ss$ , Þ¡Kºpû|áö¾ì|¾pO”ËùóµXWDðl¬ç>[³6 ”6"R Í•ìÍŒóµxÁ(& Û[“>%úI·ˆ£’)€ƒÈUhQÓÈÏ@Q²pøñÄtéÄtM8 Ä͉ ²ÞÊ'ú¾ëýCdItÛfÖ÷kîÆ\6 ܆èìž½ í>t÷«½è„áˆ7ðQÉ—„•i#qѕī¢;r-P°ÃFrœã<±Ð ¥02‰Iµ€o1°¥ÁHîˆnxDtèŽ zµ¥Ôî“ìhíÞ"±?Q–]J¼´ë †¸‹ÜAdž;—¨ž6X ±•e@I7$tuñ*çÖ24¥KQh†Då‹6%BŒ*òG*öVû*ϳyòðZĪwx÷Lau/¢ÄR ½ñxKâ1HðÓ˜ §‘f-aÝ‘¶>yúòÑ‘¯Çˆš“]ΜÒ&Ad½¦e@VeN¾×Ü’4Ë{ž_Òʼn é`ûŠ(Ûçk, ­OžœŠ!óæ{³pb˪;í­¬JÏl"ø¨!#ZâÒi)6À)§§tFºÔï2‡‡^[´ëZ•aVkjЏiÈ:_.Æ"Ž'öÜ´ÉÉ=UÕž] ŒsÉÒ<¨-ß¼]°|‰0‰ª W,>ƒº°ÄÊÖ€Ã׳M8^ àäÈy›pHcWeÃes\L½üÀ^…ˆG(òQÈAZØsئÈà(Ž¹Ú¨)1ͼ—B¨o¡Š‘U÷îÔ°N Œ6œ®ºœ8r8`O@ÅšTÔõ¾•Q><_RiIÐmÇhÍAFgÓöË[åÁ7¤Ë F tü[MÇ`Tª #(²áÛÄÐÄtêB©))={3|‰yÑH1˜ÐZÃ+FH„‡)1:9Ÿ¢>OŸBc®žµÎ2s§ã¶64H·LëÏX‹!CU/Ê”)îñÕ6úÀãÿJi“Ö~ @\°_è˜El3«('YÃi™Cè,¡l¼o¶‚=º~Ú‚„”” H•qPæ1¨6*´&Zо#|Çë^ ÜYƒ+™•2HÁ9Ê-Ë@¹NòžŽ Kh‹üú®§-¡ñÁÀC¾YX¶  ú¡²€ß¸Àœ«ˆØ¯µd³à‰ö&¨ÄŠöv‹†|¥n 8/H™ñŒ%ßáÁwxJ6zÙ³Xúzx|áú˜S` ž%oȾ,¡Œm”µ¬`ô‘ø]¬MXœv¡†®°O"Buu´Î8ûŽb-A2{ÆÅ–1Û¢5"1Ù’WÀÚa¹G_@%àkçUb|XNh ‡»ñ[¸oÏ^=XH:’ ©äÕë_™Õ…×ý ÷‚œ"ôæa’º¸eI΂oí9úª¯^çýhD‡¬ŸåP¦ò¹¥à`IÄ@Ûe>>*Åáµb¼ÄÕ¢åîµ-—s5®2Sgp0¸0k†öÐf (9n¤¶¸(…øFÚËU{÷ù0/Q³«0´âL?KÑi¸ŸGÀçd.0H ¶>Г`ÓRP \ÍWœª÷-aÅYžˆÐqÌ…[,[oÒé4j“‡&'q”fäŸÙÑÂ`‹ðhçË…"Ëœ¦½¼ô|žE‚¿–Yq€×FC´ÜdtkÍØZC±hÉ((3'½³^Ç«ÿÙæ¡|“å;`NB$‰^ð,*5„æPµAíYM ¿ò!æÞ¤äÙn¡÷âÕ`9[,…à‚³‹wÕëßåìéÝ;û{w¾Ô±@ðhÍ–¿*t©¬bªÎ%21–Du=NÙÔL–õNm‘y¨õ\tCîLy.Â,QœR¯‡ªÞÛ¦Ûã\«zìótØÏ nÇêéw:æ?wá?÷ˆUÕýOt ÒÊ›í¥­>cÓ­ô]¨7“¼ ´;O(ý~„¢w— ‚[E©±g(0pl¸nUÛ똣àc¤¨s´Š¾ê‹÷¦µ`/;×Cu°XWòÌlÚ/óy†)‹¢Ð¹žÒÂ_JØ«òY²VZvAvç©‘¯G]«*V“‡¤"è0E‡²úaÞv -H¬»d10h™˜c+I“d?ärœ£žgÍ5»=:c…¨F‡dàÓS¯Ào÷â‚“6£òWñÌl¹¡,Yd‰&3j¢$úHâé’“V© ne3]vX".ÈEÉ-–¼¡UÜõÄôŒ¿ôRCa¶"%É}ã`V'û°[<È—LóFnb쥻ˆó4{:É3w¼×œ@ކ&˜p ÌÖeÐȇ¡{Å9Š4¶B-RFoÔ.ÓJ-å@[IMž2æÆÔT¥p]2u}„UhöÉOK ßÞ‘÷Ùu¶Ã£enDå…½”do²»Z>RB»Ì!'3žLiêfóÎfT U÷¡gÅ%è”a†kh³HeoÀ|wÿ »¿6†/6«ÈÏAšŽÞÊ`€¼hQ{ö³¨Â8Dg #¥g¸¤gé àÑ(w«¼ûUÊhßÙÏ^·”XÚÙVŽ.,y‡á€;͈xh,´Y€XŠ’¶„ýÀØBa‡c7¬qžú6k&¨ƒL½zeàï7+K K¿ßÝÛ¯·C³ýB™/â וšp±ÉD½Ü¨”µgÌvŸ{V£Ò¹ Ø ´,@Ý*Ô9 ƒGÄ Óˆ$ºß«€#ǾÐâˆna1!ŸCú‹Ó¢áAaYN®ø\!^8´±À’t2[ÊUû{ƒ¬àüàÙ Ne ·ãû‡wiYŠå”ìâæíÇ.çÈbfRH‡oå> pQÜßîh^$ŒrѵnCeΈ DÒ9ný†Õýl)ô!Óf¶-ó×ËÜê“ÞdôNÿS Ëq{P©¸ý¢ïݹw¨*½ÑT¢G§ÒÅ2sz3AYµ¯§]ƒ*Ë Ä˜R&[2#Óïh;¼ yGq5dÆ×ˆW2Q{t‡š|…’ê½B‰á³ ™Ò©½€>3Ÿ'£íK°qXrèNe/ËëÈGžŽŽåž:–÷ê¢mt„º Ê`·Þ¹HK.,Á[Ù4]°VFö@„:G›ÙnzÀ,æòûåâ¼M®ºäÊžÇ)"d‚Uä¼Üä;îyÄIHÎd¹„"²}»CÇÇBNC±Y@ëXžàW*Ó¤”däwНÌÄÂŒ*DÊPV™Òû!XÒR4‡¨–‘™¥ìDS‚ëñžjò½PwO¨-ƒ*³¬ ¢›î¬ÜMLËwÛ!òKíå¼×=¨)ÅžHÙ6?ê' ¶ôØ ³¶ÉÛ5bÕ¥/]€fN"ÍÝœG[Þæ=PÈP¼ðÒ*ÀØ”Ai­=Ë“$¡à;}»¡±ŒPòjŽžP2Š0z5§ÜÉΛ Š2DÅe™n¬}†ôÃ9£~´­:‰s×€î™ûuöÂbxާùÁÜÞ5²%ûÍYöPë¿fŸ{nr®;zÂl%/,/±j¿lÇfá®ý…³²æþŠC$²æ·y2dšÜt7Q>m­@î4žð-ÜÒÖܯ÷îáK؃Q*aÑèú?z>‘Q—üÿhùõd}2#joúUË̹,Cgã´a‚ÊízTÚ95Xðˆt@T…¬F%}£+žÂíÞ=—²"Ykç [¨˜MŠœB1ÐáqCz{ÑwTÌÑ# ñjŒ¦½zþ¸ ˜…òØk”3‰®kJ£”@xÀ­G¸)`‰@UNVZµ¼-Á®¤àÚXíUAí›ì3‡¾èyÛƒ±â‚™ƒ±5= ÍÒ!E™/®íÙ*ß«ÓíìÐG'JØÝ ®S‹Èi“ZØuˆ‚EÃ?¼ûw÷¿<سÅAmX‰ Æ£HD6y‘2a©"§Ksñ+'ª²o]¨I?€%1îòÕã…¸70›mš’fÄ¥ÀÑNÅ÷œ› Ï·z\cN¤"¶üvnZà²Ãp§‹ö§H{ŒFÔr¨aMçyœ£b>MG\ß7zl1c0ØHІœLÂ…¨vF€ÒÁE­r”¼ëxKK®D=Íæt{Å µ/P¢±c´£=— $4Aâc.Ò˜;K.ëhjCt1°…âÁ£82ŽÈÜH÷O5–}&JZcΪêèöÓmðqî]‡n#X³U,îÇ™X›&š“ÀÎ$¥Tû8ü²†ž²ÅI½°Ê C%>ñ¸0Ëá,#k~"HpzìB|uS õŠ Òˆý³ÛüNÎ0¹ÓÊ'rX°\ŒÍÄÁ¯¨¬A¹¡¡}‹´•SÇŽ0²¬ÄŒ©,¬ò9N¿ü HúåÃðA©.DI¨y:â8#'Ò¥ÃHƒëÛ*4 1l+ƒ7D8E]Ö{±…b¼Üò:»¢®«ëJªƒame× ¶À ȃ;ŽvîÈq+/Ë'_Úâ?”+«Þ©ÉÄ“ ›&)‹ï¬»—éÈDôzˆ”͈GâÅC’Cmγ¹6ëÓFC¥\áú:b'^N«ME!?©Ñ·¦ô òd’s¾4Ù`=ì…Ì‚c0°¬Nsì‘nbkM@V³9U`¡7âMj„³Q¡?'Ýá&‘eÉc‹Î=L'l r„„’Úý &§ºÌa U" âA‰ŽLsvõìÄMy¥ppïÎý£è±”"•’Tñ͊ÏÑ¡Š9OåX($5_ ©1ÿ·–ÔÔ]0.Û 7ÒÞ¯šÈÞ²ÕUG¯%¶‹Á…€OìB/ÑJÇQ¸ÖXëÆ_Æo©¬¿„¡Û_G¹bµj†Bù)[Θà;Ã5wA ¨C.âl‰)­n÷Ü<ö¿ºÉ;*X‡`ã!U]ô äàè„Ö•Ï0›ôUl8³x«_prÝF¼·ä¸ô+çzñ(z& >ÅsJà庸hL³Ez‹?½bn*BÖÈ•Z84 ÈÄ¢lAEGî›va”/TãfÒqyyYI-öL,0´s¬PM÷FË £„RÀ·•we+ïv¯¥žÚ“fA‘XT‹øb…a±?1‡ì -—<è=$Lk‚&õb–ðN¦µd5úÞjU¬m"¤zn?Æ dù°{væü„âcEÃHÉDZ† bƹ0ÛH/Eõ:áÿ©8ª3ôÔáøøÑˆâ5…æ,¼åÄ+MÙDLŠºdt/ñ’KriA¾éþÊÎÇkäÀpYG’«¢eZLƒJÎPJœ| ÒXDÙíA‰¬ŠÒB !WFÄà ¾Y5køj(ÝìHE5'Ã<†³4§£(8蟖GR¦ÌáOͱÁå¯Ðæò$ˆH\肇 W†1ØRñ¸¯ähûwîíßq¦Ò‚ãa`Òˆ ¬ãáÀjçJe3؉Ê3Д`b”7Tü ñÓgfXÒCF+ªútDï~up†svær¤,«tS5À·,\E”‡çænB/÷ö¿üÊôK`ÈO´Ì'À(’dj‘èÒ™²ç4^Þ/k¡°V±BÔĈ$°ß;G2Z¯µR²Cíìî|3_vx!íÎóþÝ{{÷'£,2Ì©ZŠtηÝÝu·g©ßGýö®ÅÈ8!sÄW>O¤>Èëú!€©¦*@Œ$}ŒS{i¥qŒæ0¼9bú±âP£°-üxNŒp”jH6C¾!ÎA¿ùÑ*ÑGIHØžÝÚÙp¸¤ìªÇÙ™ò@»Âvµ䫃/¿2ŸËDæqqž;¯±äpp¼d‡{_ÞUþ£/6ÂÏIž1â«ØbÉa/%ß<¨cCŒ·õvóeó¯)ÅÐÂëØßJR ù@#=HškL-8<Ø·nq+˜ØÐÂDP&e¦Y•€$"¡’s+ÂÈØ6µå•dÿ(« ¦wž.8h¡…‰ñ{Ð5!SrV$Cš:ØŠ%E©õhËЀûû‡wW·“…Øéæ™a,.žADäùÂûÙ¸dCšŒÕñGY4¡u„î~úAY fúÈcy÷ŽÝIÌÀ/Áñ*;¬œk—À36Ü20’ž#®ŠF$ Ä \ðÎÀñ{Hôÿ µJÆ¡¼ìçT,Ø lQ)Bn´±ú^³}/JF‘6' ¯‘ÚŸpÏ[}>ÀV/ºS°Oy’<< ŽHF%ÎZ˜—æ’b ö¶£°„° èñ*LžÄæIUL¾!ú§è-؈+ó-§EE¶BkÒ¹†Ø Fª·zNÁ„TôÈ0³"BVDÛtÄåÜ)B1F(‘•#VÊV †–ALwˆcãƒFx••r(bJ5A}‚‹“<ØaÉd/Ô¾»P{÷n–&;()eC\9Âñ¶Á)óœcU<+k™ò²ûÈmáé2d|vUmPC gk¬].6Ún𒽟臦@5´\j‘]*QFdªñ\†…³÷é»rKÒ²5+im†ä‰àOV½<:¸I#äX¥”XqƒY?˜YP™Ø±° ±”†IÓ| †ª¾ýÚÆò–¢ßbkT$Ó)-.E…¹p¿XâŠ/9i7„Jä``8c¡ê¹²¥©{áˆG%þ"›,§é 1Åœ8ÎÚ“éÊìÑd©S\ÏÅÅvriˆ,Ô÷Î J@ <ëǽCè¸Me8O&s…|$¥r¡MÁ±(ÙXaß]vëb9èî}y®qgƒ ô>IŒòýÊÌÇ­*¢*ÂñkÈŽ¦I²ðl9K˜‚ªváŒâ蜕2‹Ô¨Q“ðT~1õãªkW¤†ËÏP9(–>‡à:”0lNEqÑw ·ð—XŒ…Ö#eÄ×LÌùÞÆ~N­>¤špùÞ¨>·AÓºÚFø)‚Í\rZýΔ-¸dh©næ[<ÊÛ†! /0S\-l9 ¹ñfº‰¢± lÀ¥0<˜¬ïö­Ó'Š…ÓºÑ‰Å ŒŸ¿ûÂIh×»r¿léWÈsr× ùÇhD‚p¢‘Uÿ\ oeé Xi¢µ øãÂVb"6² ê¸|˹bž>R­-8L.òýå/aAŽqZòl» TÎùQÍXë¨9HgÍNä &µáùzEà§Ó²1³ù$ž-QÕFìoq±~JÖ)ññòL҈͇SKÃö­÷s¿{pg{“þ#‹ië…BÊüˆ æhåªFDAüï*K›|qtÐ¥gF"my³3½Òí'v3ýRÍôàúÔZb)bò–lWøò‹l¼…ƒä¤°`ùZâzðì¤ðÝy6_N S/!“¬„w³è—½1Þ2F†Ñ†ÒBglÓ8!õÜê½u' N°ŽÉº’‚Ar toGËàÚ·ƒ(=XcŽùª¹—½â‚Rоùû‹ “Ý'ŸÓ=öØ»­8†Š\XtÕA‚ÈìrÙ üjÕº¬Ðú ÂmQ„Ôÿƒ;÷ YC¿»Û5fsðel€A;gz™RÚŸ,Ó½›Z&JÍöš 1î¾ê AÏ ¶ø c½[(c ]+$¼Ö¼Hûõ ÂB1¼¬‚rž u“¨•PË&·šVf]}ÆuN‰WXû¦ãRÏr`—XþØ ø8H7•¹]a=í@:+Í9Qb¶Ç Úü±#NŠq=³É"{ÝŸ«vüÞö”ûÕL&láH9dŠO›»¸#ÙÛhû7b Œ9kË™vÉд”Äu1ÇbÉ1û5ö»×§ÅËÊ\ÈAc<Û€ù¿sMm!ßa2ƒHß•þ•ÈI‡‚ÚãV‹}”~Ø5»K]»~&°‹ éù®ÛÂESÁ \**¿Ñ~Ä~Ì%åB *æKŠ[¦LqšÂöÕ.ܹÎ.ü=ˆÁIèã (ú‚Ñ»\ಠuð!f=°bú§Lg/¢ø(G#—~‚_cp7–èKéP.X¯ZëBy +^dCìô{Çݨ¤Ïç¾7òI¼¨1¸À‘I¬Ì¸=>Kg™€MðôƒGã®YXI6½úo·W6ôzBØ"¹Ó‰Õ‡ Ö%PïØæ%Vñ¥°S66¥èš5ŒdB‹Gu\Ó0¥5+Ù]P•3O¤& [Oy‡èÄΟŹá=ÝbqQ,Ách™Ê[ã,kí^ôtÆ1ä4í‚Æ3ÁœzìÇÌX%– ¤ [ÌŽh±i—»èK‚’¸x6:Ò2,<—MЭ·ƒVÆ&—ðx7 ¼ƒêóé‚ôÚ‡.mØË×SnÂi7Fd܀͙Ó;æaW”x.~\äÍW¾‡‰W‹¡xmÕ™6Á»V€ýŠŽ•¡4?åûW+ïÅ :¿Fdk@öÃ½Š¸º¹n­N‚Îð×¾Bâ„Äí­Ro67ÑÌJ5¡•½s­‰AüIšú®ß«Bx.#K(ò½¿ÑÜjNÄé½û÷ïGQvS)ûËiŸÊ×JÌ:Ü⟖€ ,ŠÅØ¥°SòÅ^ºU•–_B³ô 6ˆÊx4°‡ƒÿòþþû ÞlUÅ@†véå4{’·XìOP€6M ¦þ´Ñ(¢ÕâÅ wÐ^= ”µÀ’å%—tæÈ½†s \bDéD´¼_’ñ^ìÊßoØõuh|ÖÌ*^‡18D,¤HVôäVÃÛ`@D¸¹=÷Š Ó‚H½Ø›‡òÎ"d¢¹¯L4nL¹Î/Ñ) ¡«€ògá:¾4ã{nÆ´¶[ {w¿zí~Ô‰¾ôü›ï̧¾>y|òòèÁßœŽš—éìð yÄ`Çcâ»æâÑ'˜î1Žšæÿ ’Ã}?ìímîü±ØiFŒ†Ñg¤*SÂ"pôKÈ·œRX7Æçf»ôLŠ$Øç°á¾ÂEÇàpm˜óõ°WÄQ¹ÒÉdø{ÿÀð†¨gtí¢OÃýQ\œ÷_@¯ï¹!ÊÀ2¹ÄNÔì¾B&/«v 7ß´aX'ªê³Ôx7šö²°1¤c„v—Êá¢Áü’J9˜n4–‘_r”Bk”¥$oJjÊÓá[€Ð˜™@M‡(§×¡F7‰Á©*øˆPÕ3„×Àì"ùLR¾ˆÆ“øL ÷K±ï™|g» ,²›4±®8•øÉxqǪB•W¨w^‹§ ÖšÁ®hÃ?cúÄ¡8¤í”«t_PéG£±L%*w\V~ŽÃ6¶£ wÓY®BÎÒ¹üâµLŽÖaœ|&±[ƒ«¨7Gt¬…¯@œ^lø[ÏD}nZ¹G Ÿ¼íD¥}î¥P"´ÅDÆœ“–9$€®Ùº0›ÔÇgûýfÛ¦œÁ¹Ð­‹ž|Ù>>Þ÷¾†£–Ñ0z¨Ç-ù#©Ý£¯÷ÞtÌK@Ùê÷²ÝVäî3 W^O³¿L‚*yDÏlžÍ[€éšýÒ=öˆnOtç¥Õüg|7Mc§ÂõY=6”Yt’ømo‘L°dær¶(óÛ.™]UɶÙpåÐàPTÅ4DÔÐOØnT‡Ã<ÛlþØxýðÑ‹“çFdxö—ß=}‚Üý᣿?züô™Œ`qòøáÓW/ß—{Í€7†eÍEY­æ; Lÿkñg3ÃFùenæ«ye áÿÛæ&½x4êÓôZ;ݼt ºŽÇÈø'FëÏñ"”÷!hz0ù‹ÅñŽx_?.ÖÇão!k#ؘ-w^mT¾nÇœ+ÞŒŽ;žþÓ‡ÏZm” àWºÜ­]­&IΔ A{„ƒ+Æ"[‚[.y¬IÝÊŸþL\­_wP+‡W ¥¦•ËÒ@2ƒ&Æ&Ø™€Qkh}ÌŸHgMKm¤b¤(€ùjúv”ææCœ·L øŒ21šÜœ$´a²ÚïE~E Y?O0þ8Â}¥1íôŠ#Wš¿à:5b!‘ÞS@.ÝÚ5Õ,2lžæMl¬ß¬lFFqB%BŒDa Eµl³ºF“ ãîŸ^mH׎EH홌º¥·±ÝïO þÁd”VóÇüGLt1ÿmsG¦ýB¿&l{GÌÏê`ðÙ¥ÃEˬŸÙ¤c9‚#Ð%“øêx¯-ë¹êdú=¿nβþ8~›4ßè=I~~Ý„‡ÝB6ß´>ÿÜ=g_e·Tm þío$OÚˆk ó([x³O.ã«â¯m冰Π‰†3c#?ÈÜ JÄÈÊŸ (?÷”ü’âü!õø °=Žyð¥úúæ@SÑ èÅ¢l¤v³Î ‚ÍŒoteP¯[m Ù+þ¡"ºJÝEÖP VŒ~QT¿¾MøbzyÝ$ÞõìÁËïpåB›“ Þ߇žxÍX†ÓQ@Š”™5-ÏÁò'Euðƒ)þ÷þwT!Ioµâ1-ywJKµöIp9¡T¨eZu"3Çcó¿vï2N@Ñ7Yîf‘‘LÓ«‹Ì& h“Ôî†õ›`ÐßzB¥·lR#Á¥RªVIIÖô ½K…÷ŒT‹1Ä8¤–tcJ¬§™ìWjP!WŒ¢­Š!f8õMÏBº¸D¥šØ|`k8ñý¥~«|l€5YÊ"x‰t#ˆ)‹ /ª\·Bà{?Rl6Ɉ±¬%~Þy¾Î0·„kSë§Y^F)¶óAä¸XBðŸ ý(%Ù«P(tdb¸Þ%Úa5V•MF iõƒÙÜ&Y'’‘øç85“AŒé;ÌèŒ! Ð Ü«€«Ž!…àÝ!cë ]†wyž5 ³±vÑÕ,žÒzš‘=Ë3sSâIÃå“*yÈQE]]E¹©Ú"§ØõÈax"Ä-Ë ˆL »âæ¦Ð‰â ›×ˆCÈt|r(Ý[2cñA¼ÿè¹VUîŸí(s¿ø£þ.g=,Ô0%œb X‰ÛÅŽ R±:ðÅú° ¶àfy…ØÒ.S¼,˜ŠfýwS#ÞŒ¯*hf’Ûp°#m~è'ä>Î,Xß»ÑpÌ勽J(D1ŽÍƒ~gþ}ƒ…{x &ú½Ç9²xÚ°}ä¬wIòò^.Á²°œ&ìÚådqÄ•¹ÊõÙŸ63Øïíc\`–ñ}Ga•Ç»ë}þ³ ½ú=rÔ4<‡H‚1¥¸AE`øû ·ß[13vË]h©wfÙ[Ug9¶—É4?ÄN½õÁ6 Õ 'cð¯8'UÌÚ( §S9IÇæ{ï¯ùÒ:”õuÁÚxÊð.@Í"9˜‹€É#|&Vï{t|L“AtQ€©à’9Þy€FûîPà>–?úÙ|t ?‚‡VwÝGP>:ÕtÅ"%vØæUf™]ÐÐ\B¿ÙËKj…:ËãBŽj+¨O%j”1a]ñÌŸ¤±––û‰m‰OüTã*¼¸&‰ZfCWQJ΢‚SÉt@ùaߦ3[rA_n^'B\D2’ÚBËø‚À±XC¢H£Ò¹º.¹°¸8Zš0·„ä.3[7R{)„Ì0› ‰‚ëåè¨b›AbXR<;ñÒçÆ†ÖS­§¡}U7”lÁ±ØìJi½°Ú6HŸÚ1à§PÇèˆÐáüŠKÊ+üüÀ h’Î0ŠÛØ’£´3 ·ò°€,ªÁBèÿDiS ~Mýä½¾Yž>Űc2þ´½CÐÿîÜ3_ÅÏ´2íoõ¿ñ£  ^½¢:_c†äÁúç.ýsÏÑü1ÿrW~¹×ÀÈÝ$zþ÷›) ¥áÊæ‰+QË©›Èx¥ø*Ó[¾è3κ3×íÙx*fšI’¶$RêÚL~ÕªéA† Ó› ÿäöÜÙØKŽ¢ÇÏWdG˜üš®ÈAïÎ<<Žv—E¾‹‰Ñ»ƒt¶kâslH˜‘ܽÙwlïÿ;qáÖÿ÷«í¿™qvÖkÐîGÜÿûwoý¿ŸÚþ¿xúêù7^lïZ#ÿ›¿ãöÿÞþììß9¸õÿ|”ÈûmjƒãÂ=§?ç¿ñâ/^>ú¾Oqxýï=~†SÌxCâè!Õ±±ÈF¶¶!à»ö7xh” w˜Æ“Þ8§ŽõgðAmPBc¥»²A‡yW% 4jÏ÷³ÿý·îÉ“oŸ66¹õ,VŸ Ĭl&¯>e÷¯nisú¶rÞêöÙÂbÕ·’zw«ûZdó>bjèEµ@3ÕÕuß!|ÄyVx³s_»“üVèØ~zÐVªùVv¨¼AªË´~ÍN®ÐÈìw«¤ô¡^$d {ïû`¤Çwë_¾$îkŒ†½[ ‚ÕÃê}¨GùjÅc}³—ãô]mvªé½þ‹ý¿³3®Qh† `ÁSœvÍW€ž_sˆ\žcðkN¶Äï~>´Ö[Vïû»št‡¾ÿþÁ““o½xÙKgÁï }m©zðKë¡OXWµ£Ï·mΟ¯iø©÷|™˜O¶{Êž4ÏÌ^Ú$ÿ+oü¯ô„ýoÜ®”>¯2ºp‹ÐN¬jh>Z5œJÛ­:Žçéf½ìõF}rÓº…,²Mú´¤¢Ë´b£ÞåLüNä-*ÜhüçÞá}%ÿCü×Áþá­ýÿ£ü¸ü'¶³¾i`h&óÞ[Ù¿¥ýo#UåÆîÿþÝ{¥û¸w{ÿ?ÎOã5„Ff›cõ¦¡’ ô9°!/€ iˆ…úîMƒJ[ú(Ùüè!|߸uü–ï¿6(Ü,ÿßÛ/ÝÓìÞíýÿ(÷ÿö ÜÞÿÍî¿g¼ÙûX–ÿîÞÞÿ³ÿ·àöþotÿCŽÄÿïÞ¿åÿ·üÿöç“’ÿƒÈÂÿïìÝæÞòÿÛŸOêþK|ÄÖïXuÿ÷öî@°—ÿ÷îÞòÿòó}²ˆ!¥ËØØG×xbHý‘6æ5Ô׿4^,§Ó8¿:Š^.$° :„ªZëÕ/ßeSÀ0=3[PËyÚc‡ÀYÂßúè5,ÍwùQô_é4úv9Yd3þ¨›Lãtrý3þ/tW ³iƒ¾ÄÙ&dÓÄqÎ?6ÕékW£~*-ì/½&¹bâÑQõkÈrøºŒ[Á©>…WwÃöçP݃b–Rd‚ÁL˜B&éª]/ŠNT¿„þ½r ŠfØJõkLÍú¥uÊEÌùæñLjB“÷JÛÝ4¾$Ì%óýTyv–ÇÓ¢#¸ž»Žð¶ºn„Ææƒ5(zᕼ„Ôê9;„RPVîÈí(V¬Å|]ÉÞ—L]÷"ApÅD1›3ö°Ö-*ÔŒ/ÎäÜgÌäËèߘСu {ƒ` ¢Lç¦ )âÅcH·Í”ÿÛP>†!vÃÛ®¼óm–ít(eªCZ®íÍ­TZpÒª6ðkwàü]ÿ6s$°f˜úF­dƳ¡ÅЃdt3.'…ÞÁ± G@«ûRkÑNžjèèdœb;NJ÷Çü}~ø‚üNƲp¶”iÞ1gÉž9µwš7¬8í‡_ŒшŽ0 _`»Ç®£Ó‡†Ôbm¿‡\³g\×'Ì{ ÐñE2^N¸†%l'¾w¤»ìDPaQ3™ËdøzT·ÖPáËËKÇì þKpDÏÉ‘S&–Õ†ý¦ïÒ÷±èl€nžúÚ^Ã\S[<×Q è@§`N& ×8Íi… ³¥c3»Ó\wP½8±iXµØGÖe8?]’ »ÔÁj3ˆf×”Šî¦„ÒYXÂS,SJ“j…åãh¹w˜fQÍp™·¨à^5Q0¼ZúJhÓšõ¯´÷7óâ¼Mêo ž0ä"Éç9 ]ØXDVTuG9{ÒÂRW¬ ÇÉÔH¯qScáhH¸ÎãB÷¶~ K\*ňçKu¦§OŠ ÞEGL@¨O™ÒŸ* d¥«N¼•ê­¼èF…RTù¦7Ä=¸(ª;¸3DµaÖ˜;Èßð9‡IwH÷b¯Óêqƒ\vˆxÃKy’Rãv‚oTº+DåYB8įò,¬èœêÆQIV,áÄÀY:_Nœ3¶»`ëNCm»ƒì@7˜™m+Ëh𦳔ƒ] ÒÖ›Uqñ§ÔèfŒZä«>¤b,xrf#Wo¯Šâ‘2‡±ÚîX/2c)‰%,ÇšÓcz5÷§ª”/õƒa\>èß★ÒÚ_h5è[Q-)³e+'5LWŸ/gÜÂÜ$~ŽpnÙnP]c.\G™–«ÃÌG Ž©ýRh„=wbo‚|ah)#ôc0¸íÉÜêp[-„Ù¶ZQ«pwõ0(nÔ±Âûõ¨)‚oU,B©šV›òd䪫‘ ¾ÍY–Ê "„é²Éì,µ£Õ ¯»ôÝ.žowà &rí*hÃåH'1¨ ,¿ÇôéÅBJYêWÈQ/ò¡GlkÌ3Š]/-°Haf[Pp©Hsõt–1ØÅZ¯¤çNT¯RmZ…Љ—µ2.´r60]DÇéV ‡Áf{óˆ4ÖL†Skœ1QÁG;î\7ªšŽÒ<ø>­‰­,-{¬úïD'îþ1•ÃÛëî›à·á“m/“akÚ © (¯áI¶…QqÀà…!Þƒì]V8™N^µ|‰d¬úR¾dœ´{ 1_hm—Ž5ë/-Ä2HøË#0.Òò±P•5bFG¢´NAë+]e¨0oIìÓ¬X ‘«9Þù:”a'„ËÃ/„‚õÉ…†×ÔÊtÜ™uŠN·ËOåü ¾©Z}^‚ð]¢?HÅ9ïÛ3€öÃ6èS½sr$ê(›K*ò¦U+¬AVLÀàhVõf¨):IÞU(è&KËÐ@à£[Ëü¨ž#ÖsÆT^/í©¯¸œö­–ƒ] ¡õ„\,Æ<%@ˆy2‡úª7†ÑªgÕ5øüD]7ÐkÔÆÃsÀÔ$SÛ'xÕY~U8D_Ã\Ì›Þ'Ep4š!¼¼à¿È/§Åcm‰Oà%Ù•Ú€³ ܦpÙÙÅS"Ü-åI0bÚÐìˆ\xX2Îl—V ¢%EL%ÅꦞFX09¶{0‹?|ðŒ›„Z|©â]3Ûx’Žœ§è{s°0II2ªÃÃìç„`ÓÌì*A{å°•ˆ¹ºá(™O²+ô'O³,8©€ìlîòéYBÛϺö•5ê^£ÒmiíÄž!…ÐǿΕaY9±\­jæÚzÀ°¯Ü2.lŒmEŽÕoX¿´_/g#6uù4“LàLX}Ì^-Ò Ôÿµ„ åzû:¼ˆ]†+×SB5ÌÍ@þ™ B¢ÇÿÁ’°Í˜¾^žEÏðškŸöçõ?­Cƒ˜•éî'¯sePï'RædWÄõÅÉß"0 ^3Ë?Ã_‹Ì0Û¨[¤gÿË©cu^3týCcOåSÒ…­f³~™›Ý-ë~Pà¯k¡¥(‘\„_ÿB;2*©öý|ÝOXN2]D„ì‰tÀ'i=qäÏ$ÁáFú̈œ“#H.°”ËxÒ‰æ›îqio•Yrë=®n©ïfýX[käì9¯Šo“dÎVSŠ÷@y ì,ÄÒD/>ØHÒ^Å5\@Å1^© ̤P¥Ñ3Q##N®Ž"(wk.=ªD”_í$Ó$?ƒhð§¸ØbX9e†$i,± É…Þ rC˜Î“bw¿wg7 !Õé24Ôôg+*“-вùKwáNï8)=T믥VØ”f  DŠÕ.g¨i ;¼f‚ÔÏ:«’nAzcÉDÙ>Ñ•8§X ’c4©ï)×e“)Ê+ °–&x(ÖŠ6s!—°È©=7ìh 6áխНÎFq>rùŽŒjÇ*íÙÁŠ9Üv§'€.”° ŠáX‡¸söxA ¶—Ö†ÑbŸ9T›®ué?‚kŽ…»à1nר-mì“ÎÄÓ…4çr¤õ»[b–¤D¢;}¤ggÀvSV¾.fæ`3 Mà¥#´çÐ.Ý‘˜Í£)*S€J×’T2‰V? Òîý^ôŠmN0+ïf¶j܋ن$€Ì†á»ÚÊ]E‰DáÈ®˜.×-¸¸Zu®®á,çF™G‹è Á›L®Ð z%Ó²ÐáÑééÖu.OOÉ d$+í¯¢3æN¤‚ÒÏ3óz]BzÕû¤{m©Œ^$´y§ãcÎ…Y§¶Xº §}{ï ÆyÌ6~< D:a•oíñØÖúq™§ êu<Êk÷ `–àZÊØÌºÔ aÔ¹ÐíïŠÎÍptŸJìÆÎ éŠûÀ àm¤å‡@4Dqg›–î;ò¦(›fQ˜«Çóì³±¦¹ÓSV,º^H ˜& â´9 zYNOh›Zþ¦š–ªÁàŠ nšÎpÒC¬»®æ¡Ü¬FFÀÈ0;8ðÀèÙ]ä¿l1p -Z§°–c8+yŽËð`²Hòº\œKŒ×§§z˜ŒˆhíðGÍ´lÏ@?ýÙ$tcTÂ.Ï̓(âIEÜ)MóDq©&` I$V%Ž,(²YÐÏw:\P%YåB›]-5³‡MŽ’%Fx†K4¨·ç¸=MàjéŒjLéêvÿi¶Äú9Ño‰f {\ËÊv¿yRÎ¥°Õ8ÏR4KIkŒMs\jl¶ý|rÕuZó3ß +‡Gâ=ý¥‡×yäĹ058Šº R-á™'gæÍhî!?Wöð•<\MRYÄ׎2¤DiQ,åËy4òm¶œYÿ¯¬Ó[pndyÐñ£Û…Ò}f­þ!ïBíï¹e–¸F£¬˜o‹°#÷;s…. ^Ú2¸µD‡Ös¤.’YŠ¥´ÊÔú<¢r@k…ޏú¡ S…µ§pðCc5&³¨¯ˆÀ÷fä,ÅX=©H†›Dë]’S¦V×ñy¸sHÊ>qÕ1¨¡Èßà æI†ò F°ÌlÁôP|ëJÞ·•2 ú2¼\áB³z »ÅâJ×XÆ T±<õxÑ`Ô"0Ö˜‘ÿ¡ñÀ+CG…ø§sshàé˜%ÆÔEåãÌz¢4A?£ýt!Ó¶ºì^£I{…‚#?å©aeVG’ØÕ0âdh5F™%lEpPøÅPé(ç ”ÉÇ ŒÊóí^¨SpjëªàJÂóCË43i³ˆ~ð¬‹-™, Žd£ S&[žZ“évc`î•r¬¢è®\ÐàÎéh1G+`ã+¶Ò1BáTI#®UeULSÖñæ ØòÀôj«€d¸éy%¸?±€2ŠœŠ‘Š;3|×Iî5Ä9pÞžây$ñmTT{¬Aõßóä05](@¹ zÜ·öžÈkŠ B· ¶!¦—ÙÝV~)¥‡%h¬~‰É›Â¥‰‡â’,“¿BçÜd2¥$æX`°—ÂôL^.^I­ H‡ ~ºQ t`Þ%]ì™Q–ˆÌj¼ÒÂA¡:ÃØ&`p”†á‚ EÝwä3CTâ¼;_ua_*ŒJ•D\ŽÈ|SC rE³jæF~Ë[fvTÜ Sý&Cq9”b~µÓ)i‘§§Ÿýçþe!¸<ž!T¿˜›fú‡VXxb’Ù…ê`õÃíÕ««—¶¼žQ H}ÖÅϸ^~5Šø ^ò6ç6è*”óŒyÒÛ®Aº`:•Ѷ”²mXÕS£µG‹c8«X»:‡XŠÞ¡¹“ îóÂデÇ+™*‡Èòhú¨‹œs´ztWñåüx¿ Áʂҧ͇äùiWØ 9“È;áL.Jj–ÎçÉ‚”zö‹Ë’*Gå5+]vY"w°>jW±3T…s#kU˜Äfp ¸¿##É£vcô­,!9¨NFQ]ˆws\‘¦_¢î ?^ä_Äü»ÞqîbLÏ/^KÒç€>y›¿{¢iî¹áw­½Žé“§q„ÛçîÝÿž¡"í/í`ßé"àܲ•°Z+³þc=dK#ÈKŒ¤·Š/÷Á"Ì›±tð¦­{°ÖÚæÏ,ýÙŒå*ɼ̢ñrÆ<4F­‹k=÷¬«eé<©;…£d,ËÑò¾ žø$§‘ ºoN\GUë€É|Ñ©<Ľ?1‚_õÛäÝ"ûH[íê÷6 ù¸Ù¬~«¨XMÿ˜ÚOXv"÷ˆGñ-X¨ª­Ú«¬f˪+8¢¸º„Žúó«~x%aùƒ‹´z‰×/ÒºMØ`Y*íÓ Ø0P·–0å¾Ç êZn¶74ÿþê³Â6˜Kh«OfbH‘4˜lCó1B;AÅ=O ñýž™H?i* O¡ •E“Ì”\Dt6a¶æ X*±ž ÏÔgp¢ŸÙ_ûf;@0ÍÊk/1‡D8OA*/"Y÷‡“8·±¬P·<%[klËp¡_Þ(I¨aSVø&Â|éf­ÅŒDÝCõÐù8Ö:`+‹ÎÙ¡„{-ƒP|)…9Çê Ä™* ¹Ì&€µ‘PÙT R:1Öûf-0?ŒòSv²#´X륇›E$`ô\êµ×뵫OÕQl’JŒJ$Jú±¨ë•fƒ¾k#ÌçuS~k¾©<ÀÍ_7KbAó ¼…Åˆfé¬Jé¼÷:èÐ á ¶7X"¿îö+ä&®×ªÒÌI…ÇÑëêz}¿Ìœ\­úI*Jâ ¦èMßšf«ž >h‡×‹ç ªoÔ¾{ ¡×?c®¼NXí¤|#ç<ë e¢Ï`àÚ4élY¯’Ï:NO;%W’Ï0Ì÷Žfù4¾2Dî2p…žÚÀ_CËct=à 3Á+®´ mÙG‹³R%·uWu”7(þy¹”<Üä"™i‹Y<2“²ƒ5T|p©+†¶(¢%g’œ¡ÐîK™/X]¶6«PRQpÞ“¬_?øxÄ'AÃàs¤I5ÊÎë²ÊO¬ Pp ¥ :ÂêG]¥¢Ð í§Ük6+¢ÏãÉe|UøykÖÕȆHí䉵 šôAâêÄ“Jøy`&®tN@Ï©ól¹‡VÛ|]»¶Ž3‹tgYÉO+~GòrHÊÜtT !â “†}›SÐbφ÷;¯Êž>v5Á‰³>wÐw‚ijp“iÆ¿ŽAŽiDdòG‚©Šuž™~Îò„}–ìKAÌ8[‚e{¡Å %a£o\,¬)QA¬€Æ GPm È{0ÏÇa}?ÚÉ#I%._žEOÖ8Îý`ÐÌòÄÐè¼µ~ϯKI¼;œC‚H•ŽamÏa=âÑ…¹Jtt\õÒïÎ,†ÙIJúÈ%®]å'=x£Cë á@%—ZÓ¹dYfº9‚–»W3Ìü>=Uk[=¸@Ïe˜MTÅ ?æUÚØZz–Q"Ñ$Q¸#.ÚÐw.t¬=éµÐ¾(Û·õÔ„Ä ¿@Z.k$Ú2^K9M5ä4U2çC*¼œŒ1Â9>.“iúfÀ–öoÆAo|-ì¼›r¹ì¦‰u´SÜ7FÕm€™”ƒð]Tg㉹åüÌ4‰gd êX'Y¾Å³ÎÞXÏÞ –ôådÑ]dÁG‚Gyòn:‹ W«æÄõÂñö|#-¶ñ…lB5;6P‡’ºW“Kaý¢îŠû=ÓäÇÉ% ©,’99Kéo}üëUudÕÏ+ %µ&š»f#s‹Ív‹ÍvsØløÀGÕâŠä¼[àã§UDE—Xp\<¿´±¶Ä¢Q*…Ù%aå@'eʪ?ì‚_%'“3ŽíSáwY[ %f“rKšÏâ²£Õ 5®zõèr<¾—n[•Ü:^c±(¿"­™¹p‡ƒ‚úʳk.AaöEÁÔÙ¾ Tä:áÕ*Ê Š¾§q’}©œ³ŠÃàBÒBÆÚW9"GW–ôÝkÝZz¢ný)ê_e|·$êVü^:OsRâ…—÷ï€Sø… 5OPv€öÊKò²ØÞïO–m…ús ¶ gPxhŒA5,a± à`;,Ì “Jzh &Þš@¼Ï¤Þ@ðùüªœ¶Y›fÒCÀDÁøàp1˜u‡ò(dRåù+áZÀ%`kÉ.&$ÒÂ)ó•›„c—¨XuS\“)[ŠÆl!†IDqÊTH,Jùg½› \K@·\Ê™?fí °èg¶¯£C )j3>'»ù¬È&l²tºœª•@ÁœU†:HgAè#!Ó€hIÓU'¤¡îCÙß5æÈ¿üå/ѤhѪôe&¾Áo„ÙÝÞGÝȃy*7×Ã.ú çŸü”wÆ‹*V„Š×Ϫ5MoÍvÝL¶ì &RíÌæî^ï^w~uлSB™Âv>ˆø^ ¡· zE=ñ‡#™ÎWG[ÎBwÜloõòó à)m²*È25$:Ï{¨¬èÀ ’ÿ&8Êd¢¥tÇ'kFÞpÒD0†ŠóÝ8Ã<+ —±¬!ë‚Òk,É`2X žÏÒŸ–‚æÀR—méeÏïCñjç@àEòŒfF@U02äªp@\–ü9M.ÎÁ'ç=ž$IgS`8ÒóÓ2¾°OÉÐX™â­‚‰ƒðˆ/Ï“5¹Š0rÄÝ4˜Ãy2™ƒÖ"Vø’@¡RjâVžA@³JtjïehÔ u ÅÆ‘“ëЩ9×@H:eEÞ¿pÙ›·~Éì±ÌÉÐ(V¼|á=“7!z­l¹¶k. >ƒ¹xyŠÖNŒ‘²,ǼZ®À„ä bÌâ’DO‚M(ßãúÄ¢lŒMÉrç7Ø—™dÙh ‹H²ü»Øµ{‹9.àljZC’›yý×6› yOÿLÑXfWۙȡDG€‡>ÈÛ¡VPút·N%^Öy‰‘eÇ“ª ÙDÊx¾u_RYA¤Þâ[jV¥‹Äi±H-!lˆë ¤‚€t´‡¼£ãѸ]6€ÖîÒϪÙzÓ²il†Ê,ydœŠbfR #Õ ­Ïìœe—³IFiH¶Çsft·(ú6ñò<ã¦'³Qò—ºkË4ウ6"–¿K¶ó8Úáö;QØ“õÀÃ0#œ%ª ÀwøG£-J a†´£¥`ù;ÔØ² w³>ã‘Á‡Šû©(n äÈ H>_0$b©,3”B–ÿ8¢BœeOb°œ2X¡ËD0ü¥d“¤F^ ¬ãã,£Ö& Þ@·EL¡ùöJPr«!«ü´%®D\{é=oÙZÒxE8ÔF§'U^j ×ÓŠ¨R~2$²}rVoãØÐIvv†$*3z”®—Rj_yž|ÍßÃKÖ6–<‰¡i4ÔQ Jp;:ÿÑ> v`ch¨q)œÆë¡öyV±8_ £Ÿ ÿ„Âs^7uüÌ––†·íãíÚö¥˜˜´ÀÀþËü«×*½½½bHÈÀ£Ó34ñ±ù5É[.X¨—äy–¯_Ïæ7ñ Æwäæbÿ± ¨»…fˆ½æúµ(/^´v~+»ÌcHÔÀ¯ çz„ójž¼•aT¦§öºó_{˜“à­XmL8ðƒ¢dµÏ¬Ü°¬¬Þœæ7eª{ev¦éþ¦„óBÙ§²¼l1+ÅÎùè~KŽ’ç »°\®^ˆúaó€ÇbKƒHìÌò°JðÏ“ñD‹Q¨‘šä82<”1­”On2@2J‘Æ 6ZE–‘H N)çCåÔ­ž•O+Mœ‡³¼Rˆ•jÚ˜…c-æy ‡„¥À¥ló¨ éXÅ7–øtÆ ¦e–#ZŽ}~Œ„ƒŒi0d)/jÇG\eÛ¸µ…’á%>Þ+g>òvS7½LRd×N‡ÎRP/¤º¦ÊÈå;•òÞãÓ©+ˆ¼ó²Æïj+:ˆñ¨T_ÍaOsÅ)Žî$R¼‘EWÖzè2t*Âá_…³RDog^ò¢oj­Z½µ Æû aXÒ´8 ¬³5ÊWëh %Èu `èkA ’]Æ=ÛtàJ×-–ƒ–gªWàë8§¤ð4înàõÎ óöû§W—ðu=iýo„ÊöÂ÷¬ž2¸²™ŽŽ±¡q˜Bèà8Ð(W¾ Ë»±'¡gVÞx‡À [ñL¬½œ)…]ÈnÉü£îðT€x‘&^ØÎ4+2DÉÇeD½NYym˜£d?­$ M*xÖhòÏl@ÊiuÝm’C±”pEöÄi§Í³XBâÙȆr-Ú¢ÑV3g½–´W XÐå$Œe‘Ô1=»—b8p3Ï*• È·GÉöFʺŒ\^ý(EƒÆ~"×bK°¤‚Ä:Ä RË5-8Û”"‚D7DyµD®i\lAxËì—¦ƒ"m?Z3¦ñ|.±Ne®Gv º‘fæȔDzpܺç½.dá/|P¦ Á®óÅÚ—€ëc†á!€ d—NLø ¢…uƒ–yð*á—çIIØ IHÖè`­È¶B ÿà2áÜCfVæS6Rš;–à »¼6$,óà© ÝÈùÅtƒ¤ŒG0xǾذÆZAPc´Î,xÛѳºlÄ᥎ðaø·ªb¡¨¤š$½ ˬQAžL³ g6·™qÒJµ¹¿°»A‡C+5à„€ådÙÈQ¾†R,¡ÃoŠ@µÊD·„”—ŽEbõ'ÍÞ°õ–#Ù;¶–lŸû¨ ¥Ú~=îÆAº;æL—)ŠŠ¡Ñ¹ò4+ëë•í_.yžø:Ãj “âqUªìŠ>UÕ…ÕI¸zÇfƒãæ8ïQIÊŠÁÂép-¨k¬߉[Šy‘åU.¶ÊÓ‘•¿WÑjй¬HÞ œQ$_úxx¼Ê ®Ö5K.C¡ hhh­)ÅŽ$!{9ø2¬Óó~HlÒ3¯;[ˆ¡kýÂ"£[ dÅC̨^rrR({xϹy–S¯×K÷ú2.éëÏì@úðÈ›>1"à5|iì=¹dÎ/}±®­ùJÖ›‹&æG½‚AèR ­"Û6§â")´t©+@\Ë+mh„%¸8ç# ûðÉZ3¶ [q©G;<²°ñ ®Ï¥G£5•±«y½&“2üB+/*Í7¿„;kohcy ¡Ö0r‰~²âÚLïd/z¤ £òž)ä[ȃiGïŒ>9èbJ‡PäŠ\÷¸.áõq:*{^]ƒ•èÆÁ:YÚÅGæ¨IÉ‚5L)cP+¯È«+*cßq–gæØDšï‚^î?G×È ÷@O ƒ»Rqôüу‡ß?ê-Þ-¬×!wñE–¢˜ †XŒš¼Œó™öùà!µ8ÖÇî£UjÀs±Ò×»Nnèæ¯(tžž-~‚ž'¦—:`TÖ÷çUÞì&×x›á+WÔ)/yg¥…Tu ¢Ö^ý×Ò²ú6 ¡%®-<­W¢Iyze¶u<Þðî’‡ï Ø‰9zgœ¿M˜99€4Œ°u áGw/)N¤ba­ÜkÙ€ÃÝ/j·\€¿Z˜dâÝìC†nóÿ½o¬ö¥\ð¢H…Ó߬~º`{QU{[¡-I¾–¸µž”“Åw‹Ñ‡UHJ°5‰UˆN¡>'±ÞŠØJ_*:H‹çfíWuX 7dHa ~LÂC—ŠH±!±¬huDÂ{Ða½óØÊL›Ü¶“«®ÄÛhø':aG½Í“Ïk/ _RŽX1ÖHÄ4–ÅiT‚=‹Ò0b09ƪÀMòÀ^¥ô'ƒ¿°i¼p%ÅØ©¼RœPØVqÚÄê<ÊP1Ä"ª[¬F uÙ€²ÔP•£è¥ÄjáÐ(èæ.½E¢»<¡°f¡ÃÖ¤t!wѱUpl<ÓOK…P(bSÕþÖc ”“=ÚK–©£¥6&Ü û!H&XA—³™ »Æ,6›Ö´ ­àu,ZfE囬è ÏAíJKKŽKnóp<±b¤Íj_ó«²£¿¨e_é3[&!GQs—^aÁøwEHðÚs z-ùû¼S>=ŠîÍÚS‹¸Êô)*Ó|ÏØƒÔKÌ׈-ïF%ËûGŽ< •€tþ°–›ŸÌl±NÙ‘`Q~eOQŠ›bÁœoA×üRñ’­£ý¡l7héóRö¹zfåh꥖s ì] ÛŒíÀÏŸ¿~üàÉÿ~|òäÑ_6¡Î};Á¾j?4¾ÐÖª§-d>Íãïï_|÷÷{ÉÛ“‡¯ž]íÞÿóAüž©ÜNEþ7zãn¢äû…ºz*bµTg–ýhõö÷·>Î^˾GÉ>"À HuG$0»×q•mù㣂®\äºp‡9òGìÊ#;îyŒBë$Kt‹n••Êô´¡®ó!ù”µÛÖsªë²2o¦¿AîSÙ©í™Ö3‘ËG&VŒ›uy1ÐgÌÀ«å—Ë!¶cý›pŸ|Ž¥é}$¡èW;l*‡I~í½®ÿÞ9DûG¿3Y¡¯ŽýÍówã¢1!ÄÄMüR‹l®0qüÕ,\ZœÓÞ‹S,f¨ló)½ÅèbXúHÇb¡cbé‰{—Tª{la›ð¯v¨ô‹%dmüžÏ¥_™gæjñÄ cFž5Ü ”š´˜O0;ð7h¥ Š<ÂßìT§÷YÞàÒÓ2WP[`߸27;ëÆ0h¼~HãÂÜOL̯z?+LpuÝ[®ÅÒÔ»wïv·b]äc†â|øLPL-Då%à>ï}\¦§hˆó«æ×Ã?Bá¿^OX ­ ÚçhX¯ôÍß’R,±&„IôÀuäf[Bµ%^½£H‡¯8Èä$`bø¡$“´pE´$a8\æE4Zæ¤*9÷° o:EæÛ6”“"ÁRä­ щQ‚X¡…KË!ŽéÐ>+#»Ç½È8…$èʃó6¹áC%7Ôö÷™Qž$ˆ†Lܘ„à¢U¤€)å•ß®ö¢E&$ð«#Õ‹*,§n‚0lhÆž•Cö{†¥‹Ö𼦋žj}&F´:þ|ã$ w?Z›pÙ7!ñ²$®™)QÙØB@SWoW5§5é(`/í‘‚QÔä`ȱÝèÈ—ˆáÖ‰5ëS7n4}ãýR8Š_/‡ƒbÔrD&°<ÓÁnáð:\0Å ¤CÕη…'îv Þ!ÒêŒt $£òÖî²B‚gYÅ`ðÞ†àGÙ@õŽ¢/=~|òìÅÉ‹O[AgÙæË µFØ­Oñ˜!± £'!Þ´ä8`Ö“uOÕy Q²ãbš0ºÏü±¾m#jaN¼iê!8q~Z,*àÛ哸ôËۯͿ³,Ú¿ÿ&ú\á„ëÚep>½8¦è—yë^Ú¼¸‚Ôò"<Áª´™—c2IÆà=Og£*T ~¼iz)±¿—ù2©3!ÓwT-äjžÝÒ–õ´¥øT‰ËÍRŽ/bp¤n]8f #LÆq:±•*á(I½aU½jqlvåÉl\¾ê%¿ÜìÛ”ÿ[­øV+¾ÕŠÿ}µbѤVÞ‹E~µúTm©[ß~}ã:özviý6;äeZ¿”?Q &X1bòqŽ6ÀÒØ˜B†„€'ºäLÃ[YüÆõ| Í“räÑïNn¿Õø?¤Ö÷ÍÁû!N> "æ‡"ÚÙ(-€pÆy/úþ PÛ–èx¼4²ß*ÿÒtÙ_Î餵çý j Ç—1”YžÄÓÁ(ŽŽBåÇH)X)Œ,Òib¦s|°·¸.榀G×¢\*??d.!Vœ4pgK).G×±U`MÇ€±‚ßÒÖÝCéÙ⊊£è`鱃‘à+qôŽvᑘKV»,úVsDLYkzU`ýA g{vð)ôFžp¡*£¨a4‹ \S5¨¡ò·â g¯cŽ}!¸ÙÒ[‘)":†~ò,ž=(;l‘/\þˆs³žŒ=ã´¤æJh)•šk¥»™Í4Î #·¦jØä‚j©JIŠ9œ Ñþ]ð”ê12(‘ó*Ÿ¼Æƒ\P)Ü pÚ&‡Î—Ò¨oµã[íøV;¾ÕŽo}Æ¿WŸ±%aNÄÜ"üØ:1ØSû×S!{ëÅq°a3åç!F Y;àiwë=ÞRYÐFBÇÃ…TŠ®Hd™ö%ÎIKGĶl2Yb1HD?[æè®ñŽŒ±€üXÐ,'ð|_>`þRp.hm>EUCìDñYœÚ¬>‘{I`†¥€Å5'’ðíõÙ·Šæ­¢¹¡¢ym-êIv)'Yû}éŒSX»w̽p¶á­eê£y‰C÷½¸±…ºÇà%6|S-Q\Îônu0tMù‚"`Ó$kÄù$MòÕÕÀlš<¥–*Dû w”ëv”°Ý iq€9ÑrüQë7•ZÌcÀHW<‚5Ü ‡,€Ÿ‹tAèÿ´Œ[5øîhŠWAΖgôûLgdסún6”–t¤òÒbUñ0S»H¼¾hhM"1›’åE N™·î4/Ð[¨3Ö„Ç ^Hcáìa…Áy˜†E, >Ê£™%—E9Ö@/~Z"` 0”"jí¼ÙQ%nŸÎïÚà¨ÑéúÂ;“Ÿ µÒl¡47Z™! ¹+=Ç" hâBÃ^dÓˆ^)’lZL*v ån¦jçrÖ)!Ww¨~‘¿ÈFˆâã˜áÚš\r_ –Š1Öl^ãÆ-ÌýgiSóÊ‹vŽv:ÑÎà?„ÿ´vhª;í›Áá¥\†8:Ëan”Äú8À´MèÀzXz—Hæì@zÔð-p‰'s³5Ëi’«*¨nTèüje½i,昦Ù(´°@³F‹ŸÜ–ô«Æë¶2‹ws—§RP¯£¡>9·+®¼WáÜ`ª0}Jè)P *¿ŽÓ$ÖÔ‡ebq"€ ÀõmrÕÅ‹nhšoÖ§Y8€hÅyyù!FP9K-ˆuÛ²Æ@9¾yúý³W/=ìÿýÁãW:ÑÃGß>xõø%ÿé8ÈÓï¿ðäadOú®½mÓFç—• ™ãCâ}ôÅ“§Ï¿ðøäõøîäå£Ï|óhC™¸ö°ÀÏqåg3€°ÁÅkÅ.L£ P¼†ËFò„O+¾è¥ö»ƒ:kÝs#Y›‡?ßúÁ¹¹zɨËÅ>M‹²4±A/õÌ·’R\±ãŠ"´úalÃr-üTõ„ÚTh•­T›½fd®‡ò*½øwÌûº<š-G‘¼K†¸9T.ý9Ùz³¡‹å€9+ŠÀê' 3!°Fq¼ÅSÌOËGjËQ³NçÐÓdû[fÓÈWeܯîc’uIª=Þò© œ»ãèäÉ·O·xÒˆ7†´l½\Ùx RÑök„zcÚ¸ÝýÞôQ¯B ¬ JÑ]sêâÉ{L•‹ã¨ö’Ö?*¨r]4šmÿê"3äØðö1oñärös:ßþ…FÆ1²"j³á_ÑmÌ–8°65Êþvç„(ý*ãÂvý…æaù{œ§ˆI¬•ºbV\_Ó2¨üJNîEè]ŽÞ¾Ìœi—¼´åkP¬xÕÌbª=»DTÀ­Ac`ÌB©v7ˆñÙÀµ‹cØÜµK®Ù‡ðÐÇvÍ*È´:OrÕ—ËÞÛÕ §wïî+³ Ó²ûª‡®p[á÷=PÖø¶¨Ãë錞\ãÅE‘Þ{rKhC÷ÇXW0æW Qó•²YR€®NÐQ.\Eª€öèý ®Ÿðz[9T%"ŒÒ4qÝZP›¯c.°””T€¹$ô“EÀ.VW]šÍf³jDVn†ª9q°lÅ;¦Œ„ÞÜ®A膫|ì ¸½Yë?Ú›ã{\`õ¨<§jGFÀ±a} [²bCÚè‚)ZÝr”2º%j —“8ïøQ–Kk*‹r9Zg.bh~ó±®Ñ5,êÒbÔYôù—B·2¯r½òâ7è¡“Uif%ô( ¢‡ý%ÐÏÎ’ðûÐûµošýá_Ö¯"Õ/»@hÂͨ9Þ zþ—]3îOÔí)’˵kxÎjlûuàO¼` ¤‰–ëÐCš(Â!é'—Rÿ½—¶´eCF)X(uysz…Õêã ÕÀÒÍÕ£zv„0¥ àžqY=Ah‹ ›¡!ºWã{¤²îU'»!tŸ¬,G®€Ð\z¿o<½r‡(úž^:=$Lè¨zŸø.­j_¹V|OÂwº?Ó;¬‚K)TH£ar1¿:7i¯è˜ÊÁøà\h(왘’×%ì¡enN{ŒÌ i˜t)‘µèwnèË¢‘EÜyµ ¤Ü¸+·‚“E^N½žçz*×°dq Ëb)²´Oàƒ[( ÙM¹¼Š5Ü{­XÂê` >x®T#.ì+íâÐß?/âz·JJ…/ŒûúS>þŒÇnTÕù·›Ní­¸9[ÊÈbóáÔo(ux!¯†m×AûÜb7ÌŒ ¡ˆŒ/åJÚsc²]±¼¨su¥³°XDR†Í³%½TA\˜Ê)v†±#Œj¯‘³Ñgƒy¬ŽFG÷šk(¼#,^r7Fâ‡èÕnQ6¦L¤ti:,xœ"—õÅYÄÕ¥2¾~=¹!¶‚ƒ žMV'm ™žïÃ8úð6 À)½€êÝŠ]ì\%qš›Þg[ŠiX³©¿üÛƒWi÷²ÿëË=²±ÕÓ´Kx I/\b:™hâš’|-Bã†~~'§s`‡2,ˆpåH•;)Uu„ÚU%k”Úri ÚRáUŠÌ¡é'”ÒquiJµq±¸éhDI;ßëÜqiŠÆê$CĤ²Î,,¡eYú‚cU˜zâ%‚|ú„òV-ÿ(äÎ ‡0$Â5xu•¨JÌÑ-±üR²,!”˨TÇXóþ+–Ìßѵe$œÊKU]mõˆÏ‰ø}®”E,NTÑ:ýï‰PBK½þ Í|ta¸YY€zÒª?–x¼Hòòˆˆo„) ô·FMkƒ¿oÉê¿Yý=˜~ÿDóÄ´vzµ¦ñ0Ï”«j+©ø•M[*\Ú³‚pƒ6Ê“T?ÂÒ’\!ÛAàóv\.èüôôÏǧ§¬ÖÛ´ý‚Þrˆ•b:Û+!ÿ,Á¡Z³yŒm¿ÚçŸë4lOÇIö«á(Ù$ËqH£-r°å@Ê9¨È HQ ïUpn¹Bán¿}ëÈV®&^2ÿ{ZõÊšÓáQli¿òõþú Zz-°Š›±2©miõy+€vÔÖR*¢ÝKE5‡àæ°½1*Ç˰ÖVmýHrÌ£b^Mk× ð—×’9Œ—rtÔvf6cš“ëW|Ÿ˜X"`–qlè líº\ùD 4.öŽS¸þd¶=/üÚä…ïÇH¼½7Ü ­Jÿîãð¿‡ë >RO²+_ð2G±9Š›õð>ýö£ÁA48Œw6{Ëáû¼e¸ ×Ï"LdF~)}¬5n'AÖýŽrÖCHR¥Tå@•*9o|ªläþÍœ-ÿàtÉy]»\Ÿñ]…¥)Å'¬;w¼1_˜óuÅw6x•¥1«ÞtPû¦.Ÿ´M&5YôÀc5ñª­;ƒôž/އ‡ÑðN4¼»Á‹fˆþNëôl6 aüàË?×çûÑùûžë®B'ÀÈyºÎY<¸é³èŸòt"ï~¼ãhß{îÞË 2u Ë‹-m4žÈ–úÍ!²r%ÄhA5©ªéÔÔŠzE$%€%¹þXøLõXŒÌ /Ö"lÎÿnÔ AÖfN& ‡5“üŠ»òºISj¾Áà3 1'ãäÌUÖäàëèÇQÒHîhL(.@Zk5¡£fûÍ–âõº5ù€‘¾5‘´ÁiSx- j'ŒfåÅÛþ«©1§ì)kE¯d ™rš™_›o~ ÷·i˜î >Ôµ°L‰R÷û“¹¥¸Ã*ÔÉz[ž£ÍëË{ ݶæ7Wüð×kuA¥MŠŽRw¸8Qûî·ÿ9÷‹xœDãI|FþÉâO†ÂÅ“+tØÆùðtÅ1'´(Ÿú¿eKs¯ÿç}}ç~$öÂþ$ñrìšß›˜R€/F¤£ö˜?Þ‚âØP})'<¸´$¢äìç~@Ǫ>?±ñmLÞI]{0_7Q}Ù¡”iþ¹‹ÍÊ¢n°;Á >B±ªùæýù ø K+ÎÁ°xyžÏ ã¶Š9ăÎÇ¿/)µkXñ6ký¦ôËw¶6]1 ßѽ é²Uí£$ý…sC’‘«-Wȯ«ÛÔåI¾çèV.×Ax¹êTóÍßÚÝrMºŸÎš†×¤jH*|îØ|À)ß O¹ªcßW3f‡( ÕFˆ])¼0Rã:yD,jíöûwR2 ÜHWå®ì/ßKÞ@ÆðFño!_µ/²EãÝ1Äž±myvŽõׇæõ†>¼2—Æ´SŠòîµr?Ç¢WqŸåo1¯›°ÄfbõÿÕCIÞÃÜWçöC±6áúýôÞ›³™_3ñ¦#ah±âj…–_±ê}3# nêf^.¸v|ÖÁÛ2ÏNheéEO¸ö#ÁÒŒ)Y‡Œ—yÄORaJžéÊÎñ`ˆ¶KåJC»5`š„òr™î ðñÇé:Z)8sw­™ß¡÷\6ãîÔ9ƒ¾“r¸4Æd2xà1®bHÅŽ£Åt|ÿn¶?!JœÒÈ„øÏ[ƒÃvñá¨FôGÌe6o9öØ/. Ìò™íû¬Ðþ'HE÷ɶøê€¾:¸)êzð Nçx˜c6?¤¯ßkú¥ór““>¼ißÁŽ¢;ל۶ ³vPÞ(5Ûz|ì¹áXÄN|'½Ð⮆O %#¼£ίNÅi¨2£“6RÐg1 G%hiP_=\lšÃµ VµE<ž˜w&#÷’^‚¥’€µ‹Óü90äJ8¼=±þ¦Òã}Ìdv²™út Í5îD;9Ê;×¥Ó -äõÒBþÞÒ‚?؃m» ÓÌ÷o@NÈëå„|{9§¼Ì!}¹}úH/ÆÚ…k'©Ñ,>¼9·Þl™yµ‹ÝüàZë~3Qdz3¤c·êíUù+9&x0ºÃH­ü'ÉC÷«7Îqâ••ó»÷C½ó2|yÄÝ1$Ó¯Èx™X^–1þXa£]ΈêB" ÑéìYÓ'>Í8÷)+ÅaÆ– ÙÎàý–šZöf?\ÉʪsÐ!$ó#–”ĺ¼ã"Åu¨‡B@œ‡-¯ b!˧ʀ¼àJČۚP´7¡ÂMxp‘|j”å£RZ‚›²#&Mó‹¢üæÓ]µÎ%:RS¾Ç%…ûóæj7ß§EᢸŒËBÉ“áZ^ªŽäFÉ86b)ʵÕ~”˜‡;€¾ÄR Ié] n’FDª*#¨²QP П :¦9£~HÖS5$%YC Ë{„^å`›à-Ë™C¬4ߨ5ä`ÑR§X2*™$alÅæNB-Š‚\ÒCˆÃó(Ô¨N´x7ÂBg‘û;”¦Ë’ ˆÓé0ñŠáž¿¥ÛfVž-1•ŸgBþòè±™²‚P§&vºœR§{2Ó"< *ëßC^™LÉx˜(ŠF o€½`þ-¡þ”Ä P$¥‘˜*–@®²%Ûó©Z¯cIå“Ö«>ެ²u~mÚªíÅK“°]650x/z†§C ËÚ#î²b›ª«Z_Å PÚ©9zi!(gB,ývª¯Â M—*4™ë£0|E.ø„Φ gƒ2Âm¼àâY8FªJæ—¯ÃT¶´¨‹ q˜`SX_ˆÂ;wk_‚‡-¸´‚ñÃÛ‹ÂCbcK—Y¹!¢ÿOã«A‚ñü*V çA÷̌î¸.‹ò±ËVÒ,Üõµ0 µœP̋§½£Ì5ßœ«aîf•ý)÷k3ÕÄd¾ëx•Wv€:Ù²&È zì¶*ÈæEí‘Ïî”)ë 0¤û•î©$AóÍ Õè¸^‰öµ¯ÀîåÜöù<µ6ÙÖÀ’‹ %Y¿µ¹” 3VX᤿éK]醯§L^uɆ^ô}üVÓäTðC oá4á‘S<_S gå»Û«oçÉOË”­ï†Á+©°²)ƒ¤Bβ„¡ãý’Zœð­.ÿ¿eÍxŽå·#&gëíßAîþfƒ7TnȯZ …ÙyÞõE!¤uÑàCú•§~-t¤bÓíä†84êvËÇG»Õ¸»Èvé“M kˆÂ`iØ—3Íà„!ËOÛjsËLR‘HPm/q¼U µ²ÞÛKµ$çqaVÀ!-|œØ™Ë+‰eˆ‚Î2]Áx††½ÐÅ9.¶6Bïã.¶ ø—råò<"Ãö¦%Bø"OÏÎÀÉ닸ñLCu=%ïöÎotçãÝQ:F?øb»ûÿ±îmð8=_ÎfžÎ^*Ê÷ijU÷÷½iÁšÜäàk»:øAì¶Tˆ-ûÓ¾iÏüïït®9Q«û+m“kdÕl¤L@‡¡ÄÜ댌JíÅOZos”â>ì–$œdÁA#œÚÀ~ÆÖñÎæ˜Aº›X¨|´*iTÕa€yÑðÅùTÁâ3«šÌb®}„«†‘8d˜ ›8Õâ’>£0ô¯Ö˜B=;ç9Ûcf5c*ÙQñð-ž-§·Ä*ÜA~h 㟀ðDJÆ\ˆÉÑÁø©:N…¹Iæ$Î'i’_Oeƒ‘o£³±Ž–A½zëÛ™o¦¶ô¤UÐáïcE‘þ ´g’Ì í ¼Á] ´Û«LnÔ®ü±À²qÐ%™°£~²©âE´–×Ú±f\ 1TïHÝr&M²á>É(ˆB•+<géËòh!hâ1hb¤]QdC00ùâÜÃ1_SöëVÿõÕùŠò¾•b/ó¤ËvT¾s¿ª!@”†K0Êù„ŒÇ·†ÿ7$*­…èvâÒM`Ê×oâǑϞ—*O*\"'|ªÆ‚j%ÉÚ"’5ëü!e¸ªRZ´T¸ÜÏC•b¥ÁKªŠÐª:ÖOX–Kmwü°ª¼äÎ’Ý~Å;P>囩5A(P‹ÁÝ +ím\âFn'Ù!E«\jF‡É ™%ãtQöC5DQB¤ õ­0ôÂÐÇTÚŸ \BÄÕŠÂPhál9$9zÊ9œÐæ ’X•ˆ†Ž3§ô&F_á8 8 :.lO¯m"&˜‰,|oñ&z-=Ù_ìàÞ¼ ›§z­Ó;!&ͦdÊQ=Zæjt»çÉdî/ü3$Mƒ¸H‡2™ ½sø Ù»S‰žrN´hc½ ? M_^Ò陹šAÚØ"C«8Ía†æ;~_9ÔÓ¦ÝTY£W×Þ§sŠ2¾¶-ÝaI[oÅÑëš;š ß&‹>Djèxýž<84bóˆ›ÃK‹Ä y¢7Ý‹’À14¬ ƆÑåI>È ¨H2¸Šö÷8 Ë}Ê‚ "øz}U¢CÏËÁkË*('.;ëu溻ØAÞ)—G&+¼‰ðÒý©ÄWÍ=ðø+ÿq´€aL?Äþv9öw- 7+sO ‰ØL³QâJ\Gà`7k£¤˜ @nëÔ4=AGÇ‹|\„§¡×ϲY÷æ‡0Ž'…î‰jIÐ29y›5»Oe³1UÇKÑ‘ChDªº"Àï:(oÈs?LO·‡ÇQÏŽë}ÂÎ7V9ô_v³:¤«Iy)Ôí\EG¸ƒÎ$QK'3qhe`••tàbV'd1̈ëy[mò L\´‹Îâ{6ß?g™1šú¢r+›Áûæõ×ÈN· ïDBo¡Õ$BÏÁ…½\¨^R´bŽº«dŒXQ¨BïùÌöêi‚M`²ê“WY,ZK£©õý]t_u w<½Çn_ kv»ycõ?âFÛeâmÂúLC«KXçIu—zl½r1Ywn š‡¢±'qa(§ÔŠMè+-¼kË:ØM`Ï:}ÄØ^°O/œfÅ‚a'WtsD§4ºiCûñ;iª*ž½hz >ßö \Ì¥›XY¿á»çY¡ÒQœáÌÅMAȬlD¡Aê‹PØpTE~ÅŠ^Ë™çÈr=}ÌMúíÕÚÝFÑhƒR\£ýëzFû¼åàÚoÙ ì×èðÚo9üãT~Í,ѵ&Výø~ɲt‡”Œîà ÔÁA¸ƒƒÚK†;8¬ß‚I±º\I×ì£Wò·üí`?øéAðÓdãÈ|žÎ*MA)7/­(|rPùä°òI¹L~æJ —¿ }†d§üa™F 'ØP©’òÊk ?üU–ëio-Ô}€šôí¢ôÝhû¥ê;ªè‡ñb™'ÐH¿û,=ù6}úåìêîÕ“ÿÙqø_ã—{ïÎJu,6 Zõ#ßßlØ•ÝÿSa ¾îeðƒú¼÷Œ>æŒnnƇï=ãÃ9ãÃmf\r®Çé'¿µôä'Ýù(rY¼ˆ?Žhf^´‰tvçº/úÿŒØÅ³»É;£Q|h‰ð]r&,'²Öâk+ |œ4°÷°zç=4qéãù¹Y|’ÃÞu¥¶Ê Ð}Ü)õq§¶ƒÒ¾þûˆ€É«¯>ñ)iü‰;Ã6=Áel>¿Œm| µ¡ÛßÊó»Çk×Tzñ÷ìdä@ž0*eÊr ¹ÍÑ¿¹|[f0·"î­ˆûï%âVhæžtå}ך÷ëÌ;ÄF>üìCoÝ\Ø×¦{å,u ñH:†¼QÔ¦¡qîAœHë„!1ï_B´&¦Ûº ee­FÇ¿ÐPM굡¢…=Ì$Ê}K|(JÁè*0–CHoY†È–°uþè×´5–¬yþ—û½i¥|]ÛèXQtœ/«$¿º/î„¡ž¡hbrRVŽ öª9ððà{Qè)ºPNà"ãþ ñ¾>»Ñ&Ò­v`ü&…ÝÊ^ü&¤ÛÖŸìŽlºï'Ù0övºý„eZKl•9hcdÖºƒ“!:G´J#ò~UèÔ•¸ÅjËx„ð ÿAheª‚Zv!\v-ìÄi< ĶPµ–¼‹Do…ÛÔô©«bÁ2þúeË6¶àÔ=‰'²ëVJ)ðWæN½MÕâgQà‘Ú¶æP”šRÊqÑ®j©5$’šª* ¡lIo„*ìüÔfãÜW5 Õ…Æ\ƒúçW>Ès ?É_Ö<Š™³Áçà›ÐCíkúý<Éreóæ]oo«§p´Û=ÂKºÝCþ>önPʸÓÛÜè´…|â !¶i¦Õ®02œ_ [ ÉûVþ6fÝë«÷£ÌEkõzû èRé üT…Ó_!NäS ¾•¥ùÄɳGË`ðÅ þǯj{s2¾†šWÇå¸_þŽ_v¼&ˆGnJ˜\}hâcøÇ–WjËg€£½Ýé݆ìltÿW‹Ê1÷ÚçZº@ó~²ð˜CùºN( IŸy’”#XI˜ãøM›Ã!ÔĆñêKdX!I"%oû‹ÍíÐOSËÚgáÍ>ë= YQ…Om0§‘R§±€ÄÚðµâHÎR˜ýÁ økÇn®íã°e'Yö6B؉äv¥ÆÇd›œ~ôÛ•vuøöAµâ§KÚ9Ôu[‡DûE¢]˜¿H=,£Ö v|›í?j‰9Äw²%¥Œ%¡l1\Û½ü2Œ&­? …guѯM nUb:^ÔœhΈ%ÝŠS¬<£”[On]N´Z ]ÿ5›Á›d}™ºK#EHû³du¼fñD]s®öJ©1'O¾}Êyf×ceØÑ{Ñþgy"Õ‚Êôm¥½Úá×ÜKÁ`Ð*tîÞ%$”O‚sI€¥(t€.’œ¾¶éR´—i,6)U,u$l‘Hü,âFS£ãC´Ê^Õñù$û“E¯{àÚs‰2 Ú†)WI?d!š*Ögˆ1VŒ€cvž]þú+D¾öû„ÞC:Åv®@eÌÔèí6‰ÂKÂωœ9¡ÖF8ÒøAÑ÷þ;ü ¥ÁC¤J§ÒeÀ£ë_ý,¾MFG:Yý8ú{ïïù-ü­pŽ6q‹Ê»½ä]XÚnžL G^YgþÀò»çYöÏßÍq]›ywŒY‰Õ†¦ šb€v ‰LºóxøªzT:.›a*Ä©"AÖ™‡Vº{˨%µOy ׿£Ò7à³á”‡†›eSÓª¬¡¸aZâq4¿Zœgþ$!×ÅÔò5c\YÔÊX*§-ÆÃó$¸ç:ɬ2¹¢½–<*_ˆ<|§œ«—(0\Nl Ž1Dnª¹Êè«–ê9¾ìï ¾ w¢©^jcvA) _!JTïJ¢º?æåìçt|Ìp°î(™CeÐÙðJ6¾ºJZ<ØÛ÷ãÁdœ¨H,seˆy¦y3V ”ò4ÒþઌÕÀ•¤\ªfO¤9C"ÍÙÅShÊïU~T ÉuØêMr¹H“KBzGöž-8éðPYí2±þIûŽ€ä¿†y«æj³@¬qbf{S¯hm—@ç1¨„ G¼»§3tx/GT–n)äL*Ö´`:ÏëÔÈ…‹„S‹À¡Õ<Øe€Lh_Ú4R‡Q`œ=´{ù8œŒ v6É…~·­Ã×Cô=]ñ2†*gU§¯ùyI 0¤+’…‚Á'!Ù GãÂ^–lÕ‚ÓÌÞo?,Ç2V¾\˹*O;*ùÔc;«ýîçlžô>奯ʼ1n±{§§ApzêžñÔä&âä2¾ª[è“ñоAJ5Ĉ€J¼ÇNO׬›yÚ‘ˆè«î‘×)ç!„Ì:!G¸( Øä•©—€ˆ,w:7tîí̲Uwš_NÔ©~¤˜—Oq úþ±@o:^Xð€7¿ïÚ#Çèxõ1·Þ)ÈAþ»ˆk31_°—¾~0£„Œ÷ÈÀ TJ¿tìIx ï¤kÏ*.Ò%l´uRÁ6€F³àCZ¦åQG[³š°E­½¢®uPm•¦°Ì¹Ñ\øQDa1GÄÛc¢‰ËXdãÅe‰“ ´Fr«TÌ)C#»0(/Ji%>ÙbQV¬DÝä«.“, ª°0W‚5²í½¤ãÈGO¾Ò®r,g‡l¿°vÖÀhßg¿lv¢Ðû|¹ NkÁÈ-ÏK†Ð*Îa˜gEa'j8ð#è0•R ÝÓç3sjòÏaS§ÙÍSköÚnpÃãÄ×Úëª*ã ©”Ý=CÉE8s>}ýÇLa9Aó…9·€1­÷ŠÉH?£ NPúJźlA !)§ÁJ2ú<{I:)9ÃŽsUºŠŠHš¡5]5‘¸6meî´„Â5À`„ÝBú 2®#[v}Nvhöj¹»Ô‘f6ua×p`Að”úùùpØcB¡–ØMA³ƒG¶-€îŒ &‘é­#­ª€³`Q2§¦–ïí·ÖcÅÕQŽÕë9¶RUty%†D^Þ\PoðâË5ÜÂa¹Ì¬Í¦$p bº9EÌyVU½`PF§‡kGàË…Ê;ˆF3¥‚A`˜!¢|AÑ÷ØžbÿÔÂQòŽíú#40§ª„c„¦ø÷8Cã%\îêI‚TŠ.ž"f=ß‚ÚóT­†ÃÇ ãÁ®ºP>BzïãÔÅlé­¶e+p á:‡Å‡§žÜ?̓¾ Ÿ«™þŒî*°Ÿ%|’ïõ¬©4(ÉšÞÌ\ãH¨òœªhcoSoßKççÁ3û(j)ÈÛÛÐiùÒóð1Ì«³\WÒÅòÁ2t_Hó5ši µ~Ó¥!« º†.µÔ«é8, <À¸ߟŽz+õÍJòÈ 6ôŠ )y© lÆ‚ˆ¥#çœÕmÝS[E~Ì{®³spÿc¯ˆ¥'š\Í©¬‘Fë_1§9¡ì)Ÿ*û*]PÓÓb—Vö5ÚŠàDcÍØTM±Ë>µ¡ƒ¹›Eº 0€”œ¾|òÒrºõËh‹üÈ´„Έ8¨l¢à¥–ŠKñÃg©-ɉT ,¸{f,àEÁáEä½*ÊS<@î3.”ÀTÏ5«Ì7?VP>É(y–Ì’\*uh×ȱñ È&Ë– ¶S!7šOúÔ¶œž¢|tzjVà;£b^ ë#­*L @wKÁéi§læßõÁìÕÍwj˜1g½å˜š•L½W1Þ¹:¹˜ ÍâúÌ ÙW…ÏuˆÑUçÍc¨>9 y]Ñ„ë=¡O…^o·ðÔ£Ñ×ü¯[Äã„tŠÖŽg Þi<Ùlx²I:¼â˜Ã+Øc¬žk¶çÑ»y’§ÉŒª¢]%ìR¿©#¸öL2é§ÎEÿ“Îç¢÷À«’˜@V2sB¥žtç:F#Çðù€m…êäªç$4 Àz/0{k¯ª`¬VnØb]4¹PØÍFÇó²ëê÷hŽ£^âÆ*«{ÍÞ–€³Ar%Ò¿lƒë©O:þ4YÄÀ+J†\X§R»‚“pø9ˆô¦½$†J§æ;WØ=á‹RY¯ û ä¯¯·˜ÖÚK˙ߵ~ ß¡Q¡L¼ÑÚŒLà 'ŽŒ¾çì\3µž¾ÉÚO0eµÊ5œaÀ…v˜ ã )h¡­W‰jd1/=ÃèûŽóx¤ãHæHR/X3< ^;È+JQ¨"°Ä_Þè²-!ÜÈ3·l^%¤Žlv6«ÞQÁV 娄Z¬Šsó v|º¬Ã>Z¾\7DeÂ.^Šì®˜Pج&ÍH—«¢úÕ>îÍÍæå¤gîˆÙ!ZÍK Máê‡å~q5»ô;T›©ÙÁÌÈŸ­Ò£íĬ´Ú^´â£ÇOž½8yáõóÙÂÛ»/6 ñ W|¬k «[JÄ+"*õ½!`…²¸ñ €íßà±Áú}0_öûí5­é>no3½òŽâVš_ïí½1/x]‰•Àsï1'/£;¥# ?o6Ìv{ÀØ n¨%ì™1,K2»HólÖ;K­æ³¼üîé“g^~W>Žª?^Ë"™ÓeMfh××kÝÏ›7þ:ëf_¿:yüðé«—ý§ÏOþvòäÁã¾~^U7>³ªÍúžKýÀƒ» öÙÏø/#¡ÛÏ^¹®³ñ@Csݶ٠]ë1HIkŸG}¬7hæ¶÷ûÓ8õûÍꎄÞу֭öævU!±#Œ;`5ºËÌÉ óÉ˸øG˜ÐŽî Š[/ 1l}dëËסž¼÷õ‚ÌýRb¤ÁöiD¼]¥B”}9™ð¿c²€è€¸ø‡%aÂr¤mF åeÐ!’²“-,¦…¡òа‘€®!4«ÅÛ¡xÑÁjåðà×añ7Åã8÷VDÂæSðÔ¥?›}õu¤GyžA)G?a}ùE!©à`wý¡VŠ;X#Æýz‰ŸÖîmªö\CW?¸–²~pmýàúêúÁZñÔÑdŠ:‚ó”Ã̿ϴv‹ô¥€E*Ù·I½"ËèWçq>_©Óã•åñ}ئ;ÇK!†(b±œbÝ SAŸ<[œ¥ }ÄO¬æð•å\9À´>¨ÚƶÂ| p…æçÕ˜ÓFö; ¦^%æÁj*Åð’ÒãlLõÁ •@PZ³K,R‰|Ùòì|A^ñšúGÑ4I”=¤²¸Ip*ÞxÌ|Äq¨êC¬»5È‹’ç ~È ‹F ÒE4^æX“®‹Ûï]G•Ê"Å.{%CPâá9b$å^éÂYêñÇYj–/##ôÑ? §¨(T`߹雎˜[U.–êãuQÀ-¦ÑÍL“äH ôi›þ‰÷C&hÃEƒÐ¿A¶™‚—oèé—RtNnØ1ܨp±‹nV¿IÏ0G$‡|‹–”çœ2µÞÒ°âSYDðà IœÏ%\ ³MÈ’P5íTå¹´ƒ¨£oŒ‹Tâ^¤|¡Û ò»/aQܹ…+å€qB­)%UGªJ‡f”F9<Ý8Q Ð--º/M·\'ÅB á ƒ‘â©£´ÛÆöáZ+!ÕÝ®ÓS­So¯¸Ãî­ è໵J9Až¨Z¹vT–j¹0ùÀ–{tLV¢S^ 9•¥±ôîlžP[@ÏxÅí<š§©Œ ûòòÁ¶€He)œ³+ÞFhO_H“˜¹ b¶ÙPúJ=I(€¯N5bVÛíâìÐÒXZ~¢T>.l":ašt ½pN ûçH)ÐJb‰þd ·²òÓö,Y¨‹WYýPÁ<¬d™ 9q9Z<¼™¢õ,’ŠCß?}v=çØ†>$ïɬp€bè@,_ª<„ý93¥è^óʽԱ!6Ön¿w··'JÅ,OOW暺[‚»51k „Çæj/´ËÛÍÚ›,|â.ØrÁw»+ßÞ(…´0Ûqº( ™Èk&¾sUB×Çinˆ˜Ù›¹…2‹ ´Ç4‹W…í| ùP(‡JTn-wv4éÒ’xñDÏ8ÑêÄpŒwDàÀhVˆAz0–^wD!ïØT0äIàÊM±Câð­!ŠMNWÆl@éEÇÑùb1?ÚÝÅ?{l:éAk7$X¡ÓÚ4ìD ]-äÀ_îhnP‚ôzY~¶[¤Ðã.Ò¹¬á¿=-tQ.&2;Eâ .û¨ºÑr61òQEò`K}pˆa†f¥mÂÈJ¸ÕŽª f%-EÝR½vÇ<ºcÃZÜkò)‚ÈYîã¢.ì ­OE&;uq§6–ÒUCaÂ,ô„×ÊŒ0ƒt7Éš“üCÚÞ¸à<¦ Pí…ð€dR6;Ÿï}¬uF·IÉ5éa> J=‰àAO®¼rˆÚÕ!6iñöfF¸ !仞ÍWS4‰~ ‡ƒö tYïÉM 8§ýÞÞjÇíƒÙ•™”÷µRk;p(odð%kÜ–§€~hþÎ_±ÈGïµ:m Uä‡À½Ÿ9är7w½À¸ vv»`¸ „û ApßðB ?÷BÀ„f»x³5H½ÎJãÔ(k¯rÑ1«Â~_è’˜.‚>›UªþFeA*pìËv³ã#]ìPÖ¯ÙŠ*F/ðŠÂ© ÕK#rs€õf-DÙ¯[Aé, öq©”n·ç~#ëú‡¬À¼YµÝ@¸T ø¯~]´ï.%l…úÞüe5‘ûeX(²}9ŸSr8!!i¿j—.Ž£M 7Ýѹμ=xüàm}ÀÂ%/=€5†YVsSm[3­ð½›Vß,žÚ†Dóö0Õ¦GÖšvB–>÷ Jc¢Òè$ 5ìâñBE]† –0øb ÐAJ×°ÜïU}g¿„¿Ùt7Ÿ|„½„XôOIIÚøe–ìŽ m-Ø3ô‰"‹²+u¬c§‹0V³_7X³mx ‰9ß§™Â)œwYCó0On6Gð.E“ ¹"u¼¿˜fáy i[£ˆlP §_¯øYSæ¸Ô•‰&Óîhæßˆ÷HÅF)€Å0ßá[ ÉöšÊ>6˕蜸ä”ÈaM{%ÞÜv#u‰6jœqn¾¢ªÊ$±®ÂË’éÁÜ\8ùŠ­ÔQaåukS#UºtÖsPIb¥xt@O½õâx .%$S×£ ¤„:‹XœëýnÚ˜¹_¶¨œQö2:"˜„•óÃ!Ù¸.à \:à ¯Ý×oÞÿFæJÖÁ5«¹³ˆÝ2цêJ‡û“WMÇh[¾H!\ 2Z=‹¹N4ˆºû„P4ñ,Kã!xÌ“KsâÉiinÏ«ç£i z`Neóèyœ¢Ów;Ô¾ÚÜ9B¯SŒÐzD-ê½6íZ>k^rç(¢& €ãO&tÚ‰NÅ;ÓÇ¿]ÌCÙ›qjÝ¡xTÂ0¬æ< 3¨ðV fÐ=Ù'÷$\}s„Ó·Htæ)´•BëˆË$žsÎÏ0Æ’ÀõÄü_„xª¾mp† ™w^ÆÀ*¾iÐèx# &ïâáÜ0R œYð#Ü•Ó$.®¤¶êé ²-‘g}ÏOtÇ æ,Á‰!ë0JÎhh–±¨Ã4¨'Ráöðy%Aäs5PùR&„a—ÁiÌ•* J§ Ï™^Šj|q½tüCòûÙx‰EfÉzÑ7gî<o â =%ømÇ“MŸ›0¦jy¦!Ę=À!sQÌf•Çü%O{8§åNÀdk€úAÓù6 «›7œi@\GÔMOíçB%ˆL¢ÂëâØ#ñF6 0©…¡+÷vª}”Ùã €fI$I¿í- ú'›N—3¨\q)†°“@4ë®ëð/§ýPpÅY8üÏhuúY´!ðLïºvÍ-³lÅÃT±É¡©½úlÁoý"É/’¼Z±¦úRèr…3Ê –‰¬BÌ£?"$v<)Z«Šˆõ)/Ы»2ª6éàíݽÞá>:çï„’Ó¡ I¡ÝýÞAmËQ9Ý}øe}Ÿžb\[ ëÆ1ˆnhCì¡z~SE´þP9<µ‚Cô›½f¶BA(þ“ºƒuIãY5g#íÁ°²fÑsþèV† p«t~9Ù jsºÄˈývðùDuøašívôLÛ&å|XD›Hzt½j¸cÈßÖhä/º3%0ŠlŒzuQœfu‚1œA§Hg¶È[UXD™™\÷àÕâ¾°*± ˜Ì„þ휴Hå|Íîþúãh›Vƒ[uí×™zŸÂqÿcS] ¨ØMß„ Ã/q©i<úûxNQÇÓ£Ð'™Üò‹l2òO‡¥Dh¹£óÝW×=¶záz áIv‰hðÁËKtQ_Ý}[5ö)/ à6ï×]è}ÿóWR5#x¥7¹îûCH Ã,.áMce$ŽQ>O&s³T¨Ã+¯…Û”§²‘J¦š`$…K\v»_%@G×8–¾Ž£îÅFlø•qÆ”·WÈy¯w¯vV–²åPªØA±;¸éƒ|Pû‡Ó¬ï\°àØŒ^P.ÇÝ'xç©ÖcòÈÝ EÌ`Éô!×¹s=££ò(÷楅¨qcp\öRRR‡Îå¾…Ä¡î¢Ñ•á´ð®àh°GEÓwT4%îPoSÇ™£nàÞÞ¹ÍîÜÚëVe„9@ô‚W©k¥Ù»7P)Qu ãÇp08­'ò’yI©?·nÕN˜èB…Ø#WøÏ$;@Kô‰)€&rAJã„•êK~o’*Ç‘éª'ª¿§rí+ "aS‘9½]Xc·Æ*yÉÒíé©T•TaKåwÒå¦ –ÂÓ™FÉ"NÉM$áíEO ‡}’ŸÆ«'Ç) ´LGxIO…óÈ¸Š­˜ÂL4»£0Sì‡ñúÕ®Œ"ÍÙBâ7ƒß$÷å§a´ðÊfôýûԤXN{51“`ñ0wô»—/ŸEä (»’8ZŽÐ¡SzQàÆ"º*ö҇෠¨jŸFÖn‚RÝcæPé\`ESbzA<€ eï½Rüè2Ì.ôÒ²S[wE¬ èçIq®Õ‘ú&`a‹á/ÜE]ucŠeNQ§ºà|å1—È54—õá 3\XdnÒ㼦å/Õ¦˜WÚÜªë Øî¸â)Ý (Ͳò¹wŽU[$®z›¥0Íl‘Vühsxe›ŠšÆ"(²ô2ï)ÛèØ~ßòÏ#ÉdÉgÕ×Ûò$›%«W›oûáyjÄ,Œ±ÆA T{…Î]ÒÙƒÈÜsê Üi×PBɼ4öêß•Ê0SPÝ܈¹s€“çšqÓâjHȯh{žä¶2R\º.Š5aâ®QwGFË\æëù±×l*a§§^ˆ)‘=rTâ K^Bûæ©ì¾gqe»š‘Âf–-hÞ.OyÄ*à.‡Šr d c ù‡¥Âá$³C†¾Lç “Jx”® ä…BT²P›òâבZ:7¥ â:…Qó*bÆR¡3(NKôG á—ü@™ØPgÎ)8×*ãaœÚ2©$ç)F ÔÌŽsü]+A;æh P D8•ËhãvaG]¬Z™ ¹®=Ö×çWGáó8Ë]ÌPG¨%)$dì:â[¸Ûµöw”œBè‹QÍÅ(#AI”ͪÍEŽbîÝù$Ø ÄÑÝÀöE'ø-#­ãs¦¡<ˆ æöêGߟÈu!GgåǼ¬¶zçÉ»Qzfޤ¾²kiÅúëæíòCç¡‘4`·•Ã|™ÇÃdÉ­iV@ Îb‚€•F“¸ÐY·ß°¬Ì›¢Åçhš©¯U„&ǀ㉆à}{èÂê iÂ\BÙ €˜V‘!Ruø•¤|¸k£@ü@Á€Ì4°A$lBÃ×¹R«Î˶‡dûKyÝaþšgƒ e@µÈ–9G‡X_Z‚Þ ,=6,Zmå-ªhl¥ù*aóARZeBÑ¡ï+ò’.}[–‰>¥>qkÝ€:Ž‘%£2i¯ÎJCßõ{¸†¨¢mßÖ@¹YiÁî–ï3 Öî\;TµÅ1¶Œ­¹ªZÍ@Á𴇎t} $·½9E}¯ÃÿÊôT® bÇWOË+…µàDªh)ËIÒÑ'ša$A(6ŠŽuJžžB›ÓSçÎ2+4Mt†Ä)Ö¬1ŠÍ¤Œ×{o6?9^·Mµ›ÑÑͼ` öåêô­°ü‚ ß¾eŒ*‚G뽩›¤/»8ÝÐа’«8[QGç!^&†Ébb¸A– «Ÿžâ;Õ‰dF7#œ H êåvÒÇ¿h¶7¼·øß--»þ»(°/¹Ø,·)ì‡ÿOýÔh9%;NÇúV׬¥K§¥%pk,àœ*p± F‘§|…ý»+–” #pdˆ¨Ù¥IÑ ­ðÍšJÛÁ~NÛ–6±æ!¥®Ägq:ë”WË•`0Œö÷2ì‹Ó°Åœq>}Z§H¯µÚ–$°ßÔ®”<µ6B«$¡“`îŒè^¦ŠhúÞt¼D1µ7¸Ü—Ü;3}”íŽÞG]úÕ¥_ž\…—~Pß]!8 ÔÞÕæ*L 63Ê|'%–3‚fRr|ž3 Ë"àÿAPìåĵòµ$ÄMkwŠ!¬¼ ‚ÃÚÀýAínüFa·»ŸÂ½ú×*RÆLëî–·vJvn³÷€ä Åh»)j’Q¨>3ò+ŸC û³vìKÇãÐm]mQä›^CµÂÌéZž¯[5mæ^ûnT>Êë¥êkñ¼Û½ÚFÐ9ñÕÆŽv¦ãß’qU—HöÇˉT>gé( q†‡ak­›ïw½Úíi}A9f{ï±gL*¢#R%D~#h³$HÝ .]ãë\…—_ C¨èðÕ9}mV~¡†¥z7ëøßX׺®zÞ¡ †§}+SÀûÒžúu}?Ƨ8[ÓIŸoë+’eâcâ^œº’›ÜæOPXÞÒ°ÁmZm ²¦å j Ü]R@Ô|ž@VtÈTÃy¤žãS§hÓKÕ]%‹šÝ)]Y<¾rl Ì¾É,²a[Ä”ùR[4\‹Šâá¥Tèr×½x36•èåêÕÇÂQÌÁ†_,]¯~Ïx•îÿ½ÏO‘ém>Jäå”#Ëðšš­.¹ R=Ko“dN@s£ä,OÐ@ˆVÃ^ô-¤¼|–‡`䘌ͳw”ÒúAÐ#ºb´çh·Ö¨L åõÆ"†à~;ƒgfKíÞ½èaÉ;R½’¡¢YÝCœcAãa;ØhEi(¸A¸PuÔã:Ї‚ÖÜÚ©Ùö¾âíï¸ ý5¯i®\艞+4|ò±’<•ð…`9Nˆª'!+Hb šÊì¬Å÷\¬(ƒèY+XÑ †>ñN¥‹ˆXÙ B^]Ã*õ6èF†Ò–¶¹ÊÝ?¬÷ ñMß-¨fu ÈTªÅN¶l‚ÞZ€»)%âè¶Õš>_:s&Fåìd…¨xk÷æ8:4Í(Úlá¡L‘ PYà_´+øVÓ¸¤d˜kšgÙ"ÄenΖJ—å­¤ŽÞÜ9}OÑ.¨9À+'7c|!>cÐŽÄJ!xSÕ“z3\\˜fÖˆ³Ì„™ÓP% º#¥òhúRq´bŒÓ}i󧢄\Š %YBÇpuÄÂI XV™Ó¨Æ±ãödv«ÞãÝ­S%%Il½ð}¾7-Qà@Y¸L·.Gé3”bI9_éR‘m,¢+H¶S@!2’ÀÉŒªdðDØ# w߈SfZ F¹UÁ"'.Ö÷åÛ0€>b=T ö‚ï}5¾{ÿ«Ãñp4Ü¿¿ïþÁýñýƒÑ¯îî5¼÷å—£kò‘þ¹ñ+=6ƒ6æË¸ I‰õðí'“% .›-3ºlóŸ8Ž3ȃ¸?ææLf+tdm*ÌÞÂÖä^ ØXÇDÓbl|0/ðð{çë¹bÄ EGÛs<\`åv7 ñÒ¨7pȼ»ˆÒ¡?N[É Ûö6H 6ªÍO¬(SŒsìy…{|ý1oµ£ããH«o X—§[ËÓk‚€_†ý™UºáŒùàý¬¶å]%œöÆÁ6¥ÀÈ_ë&CÓáWÂ6w¸Æ;“6†|F‡æu¶"È[|¿‹áŽÛ±V–}Ô$‡¡Ä‡¢örQ®Cãr†`׊ ÀÛWœÇƒò€ÔR»»‡÷÷F÷îÝ9¾sçÎý»_Þûê˯âñ—Ã;÷÷ª1ðpFü.« ü~Ó»TšÁ7êýŽ-}“ 5`IiT a’[!ŽðM¯qT35\ê^xé~í4œknö3ؼH§é„ì'¾¯•7±šÕ{/ÉRpÛ"$% ƒ¶ø!„·³d±fá*„GÁ“êoå+^MeJ]~Ão0XÑ:÷êÅô×Ar¦7"ó.²J£›9D‡i•”é1=`l%‚R©Þ˜'È@TDÈÀb³À&Áã›øûë…¦u$7üd]Ö¢ÊC°i(—ˆ°cÕ?¿úˆ&¸ ­ZK­ýb5ÍÍlá6šõ9úÀÎý߇óöš[ìÆÇP´}Wbm¾¨©]»ÍORîá™ vuòÎ\%Dë.Ð.i¤Ô<½P‚(Ž-šdgéÐv‡1ìªîâÂ%×}ˆ"ãS<òŽpÖ@]„à¤r *ˆÀ­g¹—Pþ'Wʳ’º2O§¶üÔiiö‚õ„ 3A+’ }s!Eëá#ó_)l^Rõ˹÷2§&NºXäKð#ÔTr²!a,ðhë´äU>=õ™„J”ÈV¥2‡x߿ʉGôâ_<·âp:M¯ëV„>jeÊê”Dg‘Ù±hÖê"Þ‹ ŠE–TŠ%·“p“eŽ–È*"¤Aví Q™¹ò;.2bìC‡™·A ìõÖ·)ÃãO~Ùva«Eêu»[ NæjÆå•˳Mp†­Î/Iì#ÌÔŒeýtÿwr…¥T Z@vÓXÈ+Ûd’’1ÌrB*@¾À´êfîKü»¤‘–#3ÔÈp¶ÄOTdÂ•ÎÆF'qŠ…ºaÜ®VOO­ •.`Òý–Å2žœ¥Â<:\­Ì¯¦`£—1ºº4RDM(MÓD ¯‰°ZÍ­–›g§ ;ú%,€ô¸ÝˆkßC£ÜôE5²uý›ª‹ÿÁ'~åÌ“ŠEBM aüÀ›‡Ùt€òéŒ(H˜Õ4J÷µvßЧ›˜ËµGí8n`_–nïÛ2yCêVTÈÆ:”®¾‡z¾òÞ×»•N‘.a|Œ»Ã›¿I¾õ†{  #~QÔÉçÕè¦&:vSDÉR!n?ž”<HÀ$ ~‰Ù¨Žj¯W>‰ì¨‚Â[”ÀD†G`q<‚Š%›ìûm/9{PÖìX`“l¼¢>Ä¥´7`.uÒÝS9cž3²à9q½q—L9òª…Õ§Š0ˆ Ùä+ë…½ôt$Oè]­4%W#¡º\«#±{Áü"ô@Ûxc¯†•Žg7]Ÿ x~aº%   ª¥xo¨Í%t1It¢rÜgg¿.n°~di¯ †$ù”>ÕŠ®†ìUÑp7¨)ùCâîÁð=|Tˆš£HvŸj\Äé$f ºŠg-Ž7ø(ôJù»ùçóÅtò—?²ÑÕ_ü/âè·ìëi{ñ>]ß­¡¹×û9S/þg[ô±ècË>†¡^øÓÕýIaßL»(}°úi,O¿»óúwu[ ZÒø0ÜË.žø?ïâñ÷¾ª«MÌðÝës”¶¾ÄZ‡åw8ªYDgµ^qOK7ô‹&½ŠÿìóK5 ÿöèet°·í{M›ÍzœbMÖ3¹•uX(>áâœ1#òJ¹Jt;o®[£E#üi•@Ô·+÷: ~¿Ü9vS[(úE¦(ƒÅ|µM_4ÆšÎ]œ&Pê”ôG¶YÅ‚it2\…f¯îà©‘B°%™qžÍA}´ÅT+%ãç*ûÁ¢_—±€ ÒÈ$­ËiC½Öd¸ÌÊ<îÕ':Ú Ð[5^…’Y°ƒõYdúšRë›Z÷ŽVù‚¡>Ǿ\#¼Ud5…ßüÌ¢<é–oåÑû–²ŽÆ[²_§ŠõØ[˼ü¯ ¬âoð¢úPú‰‹³®¨’14ÅA2Œ9ÚWsý4-®Wc,+² „ê:Âu€Ý‚†•úflƒƒSRç`b¯(*‰Ú—%-Šª`€¼[i+m:^.˜¬fâ¡›«‡ ËœF'oaqà§1ºé@w÷õZ†R“Q%æWëW– ô`º±Ì‘vêêR(LÜ~–:€ÈNi¼Ê²PóK¿ºWÙhP µ¦\¤g3°Í`‡xø6†ÊßqßÑ0ŒË¬ŒÙÈ !V`r*SE¨YX¢ŠHë ¹x€Úid@Ëð ªù«ÒÆß–žXêc\ÞOT£]~åwª²£k•¿iyò(C“~· ’^øËõ|ÂØE4¥œ<ÐM¥£&Z blLL¹{~à [µ_ !F‰1Aât¸4„LÅ.¤+xbb©CáÈ1dÌ`µE.ÊãÁÜŽW;#´Rº’ÌÆDQü\•íÑ@&Ù¥TWDÂlIišOÞriȪi߉+Uüq½?0 Cý’S–A+ªÿ<ctÚ¦_ÓAZeð]QÈ}z½ª>h‚•ƒNn«þl_õçk#Ê 5~/]€b+q2¸îé…FSL%+ŪtR“0çkˆ÷omnEÊÚ"^Lü7ÚÙÙù±±†Ù*Fë}1«Çf¹÷©éq%ÒUIØx²þP‹N&P~±alB"˜‘x]¤(Q¥ŸÎFºÈ% cT¤ÎâóÁä©«ËÓ 1á苨u3u,8÷*ÔçMi€pq•ÎÌMgh¿’´[™žy"HØ^©x3w`,J•Ô±V¢Áü}N¤áD…9:€ÞE¹( ŠÚ‚ƒú¾‹ÀÆaÐer Áô ÎÉÂ8yMj:鬊CL»É;åò-Fe0Çã#¡Úê¨|@Ô|<þ?o(~ÈS ð½3•ö©Ì|抹¡ÎMCrlrÜà‡°Ì5ëE=o«U%ÈäAyiÈL5,x16ï¨ÑÍ5dçCRŽšYIüàÑZ­Ušà ’œá§PFo½bÛç—­ŠÆSþæö—sÙ¤¤°µ%å©7À¬Ø¥Ëx ÅVïvŽ5 “|šr]ÊÉfÀZ€ÊSNò& /Œc¨ àKØf½Ù•:›¶d›5‹a2k¨ü«TÐ|W”õ6ÆdnµI=/%ÈÎ`+õ¨ˆ2$M;=LYÀÓÓÿ·k-W»<' F§§@Ò}aÍ Âü%¡¢<àˆX‘A—æÎ\—j’ù⼋kÚ®ˆ£âœ ÈÜd5{ðù8VÒÕTæ{d¶k¦ ìBÖ<£ÆîÇXÅE«'àØHéÙz> på`‚Ž|•ÐÁ rsÃÅÁ›5Ág$‚£©m®€ÍFv“ÎÆ¦{°~œg—eŽV„Ü…U8Œ‡û{²©Ù@Š×W)<ô~·åV¿€›!Aઓãv;Zâ»ò€âY˜ɓ«‚2ù\¤Ù²J3—Ûê§iÄ’!pox»»„¡¹êÉ4.§PØ Ð[«;ѵê.ŒÄ—Ý ²¾Õ®0 °8ÒKÑçÑœ µ-±f‘ÌŠä}÷‹È“ ŸNDX9=S šÃ¹‘{Á”E¨¡Ú=.Y‘T§¸!ⵚsø¯„C¦:Ô”âÀa ¦tÛ./±x Z ¬\Od€\+›9BÛÍ0ÅÙÑt¡¯†„g³l)yþ¹2c9KÉ €R=ø´ÏÎC1â2øð-–{eÊr? GWÞãÛ)Q\°>&…Î÷XQªW0*gÆjÔ0hÊ*µ‘w‰Á/ˇ7&°ž‚ò5°-;9¥Àae ÊšÓÉH\X:ˆ¢çr¬½f/<ñ„jÙ€ªáþ Ë x™5«–SMâ»§åηSzººV›b`¼%³‚þÆ(@V 7 &ø+˜]°_Vô’ÙEšg³×Íïž~ÿ¨ù¦Þ𠛊ª„2'$½—ùhlXÒU_õ²ÒŠäfX¯Ö­qå×0{Õ¼Þ¸µJï}ÏQ…/B`uæßÇqÌà*¿ŸU0¸•¶¿ã&úÀÖ‰í÷ækÏf¦;Qh f|Y|B¹Ç:­Èu}…èË‚ÊÇ$³‘ÂU°碅\~@õ¸(IDÅÑǵÜNŠR×¢ý½äþýƒáøÎÁÁþÁa2¾7¼w'6íïÞ½{ÿÎ^Åܪ{ò¬®Þ+Ö_«7°n‡+3 Lbxߌþpÿà~r/ì}&ûãáþhß¿¿߯N"4þ ‡^>žk~2Kþ#L3D,P qb_ªÅzðµUãILÓ"™\sE¡ü2£ÚdaBýЗJËü;ÑèÒhž5’TË 1V€Á";ó^ûÿÏÞŸö·q$ù¢p¿½ø5ÐÑ`I-nӦϕ-¹Í3²¤+É홑ùƒ @‘¬€B£Rp?}?û“±eFfea¡ä­[ši“ªrŒŒõDù¾ôŠª5ÆŽ#Õ÷ÉÊÔZ—JýJx É'nJ¢˜c¤DBùp™]X5½“Ëÿ·¸ºwòîÂÄ.~©_ö¢üU¯Áæ«“Ðýyª~{Wfö›_6÷ïýy˜Žݽÿùý»Gãìîgwÿ|84¿gG~¡ËfoŽ]wÕìg¯W/øûÚæ=ÝšJ‚`ШVv¤ÚRÀÉ&yÕº­ŒìÔµþMˆÛ°¹ùR{ EH³‘ô3-–±(Nv¯1´üƒB«u±yóŠ÷¨•8`̘rZÊ¿¯²ôíºGþ€çëå%U(ð¸Ê&ÅÒ‚»ŽTG‰‡Dùkxpb3"3˜ú(§-ñkÑa¡Ã$ùšwë(¸©ª ¨ÏXr<E‡°à¤Þ2»E;0—n2ö;A•§UðÆ%¡UÕí :0áVsÌu•0è1Þ¯.Ö-6¥ê_*+[,¢PŒP1%ÍgkêÑ;¬Œ9°Õ&íPÉä6)Š·Ú X’ѸêƒA‚*À*#¸î+¾q”ýùzGD ³ZBa†²?‚Õ×´Uy?mciÖ…Ÿcអ™Ê­+—vÕíç€\F1”m3„y>îxË܇0êe&ÅUœh ·±kåㄊrd¶w4ìÈR•¤@Þf\8 ANø¾˜aŸ¶h­Nu[te®§Ï^|ÿðÉéÿ<üøÝé«Ç/Ÿ?üæ±÷_Þ¯äTYžî`¥6,æÂxD« )ÏOâ®jéœB‹!DÁ{„Õ=+攸bïE«}‘3Š”.¸¬[¾é &Bàîjض¨•rgx÷Gî¿Äž~-H ¡«š‹)ú xßCQ©Bã‚áW…‡Å…ì»zVÅ9”Ü5bÔbIAqŽ‘bªùÂù4&éÏëä¸sÓï{+}Ó•íßÙgm®–Å4]æ#9ƒ ZÙ."RM¶¸>’‚Īšžp™¶d(y QÈÛ¤Ü/ÃCƒ/9ÚR‹væ%™› ÑL¸*3æy¹š]WD8&„F ºTÓK!‡‹»ág*Í̼5·ébœ-Ž7’­~¡Kçz©µ‹>ÿ¼ÿùç5ÈsãD/J؈úªwx`ÈpKŸöŸÆìôÃu]þ.Ú¡(ž$¸AÕj¸ž¥z¯‘æÒ­ÐÄÕpË-HF3Ü)@$ò¶'†t4ÎÞíò @+fqÝ$û*Ɖ ü\ÝcÕéx­n@DÚÔ*«Ðã|´ô&sâͬ³’@q2¬…„]"CÍF—݉Ò7ækWiOî ×ÔæôÅ´*AèY¶:û”÷~ðw\ª‹ÑËEó·¸u'G#!ïxëÓ…˜ FnXí2›œw0ë ‹g³¥~ðÈå;÷Æv¶¡Ís+'¼¥6Õn«¾ ÊVù»¡\oû/HÐY¨oNV™µ¹§ÎƦ¸ %=Ú Úäê[3°>0¶é¸Óç}ßÙ†>üv¼cf€'²f1Âéì€[º3¥Ö+1EÉZßd”Ž%$:m”7O’¦^³}Ôìàb=˜ù Ùã?<29N^‹Ï!`^ÇtDZgÿŒ7ÛÙw¥Ÿ×\^º oà ‚ªBÆ?ÊÍ!SMT~/È+ ×I´<ñ¾pǵhnzõªpnš·â¾+Hœ:¬ÕVEý–jìrŸÏK24¾Þ TÍ{Ï}Ñð¹ÂyÛ1êjíÝÇÇ‘·¼£W‡ççÝË~Ó•½hÔLÚÿRë+9åá Õ‰QŒéCÉ=Xbu™Ú•3÷nLT™_$‡ƒ »‰=7“k(K`wDÀÎlS Uˆóš³¾c”9w•Å1Á#;s(Ððfßîp©pÂG”Í}.×s¨Ëå}¹w±ôz>1_ÚýévÔ“ófôJMÅ¢X‚>à4m,cdÍø¦£U™•AÁÎ=±oâÍ8E£–é„0êë˜Ê®æÎ#€ì-ÃêAµ¶0t‰Œõt—l)äÁ’Õ ÅЖšñÖ #6¤mf‘í³Þ 6€%Òt$±m°iÎ`% %l¢Ù|8ãbXLÙ.$˜ûYõŠ˜ý¿üúÉçÿùäôéã¯*m¿Ì ?¿ŠÃêÒnu ´ïÒ=(¤Ù/Rÿ¸â„  U…aÀ~݉¨m¬nµª®´ †ât>ÏfP³Øë¤SyA­hßL«]ðûP"þ*-T›Ø*´jë.ue&ûºòü«Y‚˜)kÐݧ¡}ÖþU>Eö+Ÿ“:޶„bTŽ®ü‚Ð8'˜p oµãÛ‚ëzŽy¥ú=Ó§Ú0Ê_ç #Lš6?݉&J"üFí !‰¨Õ¿T/'›&(ÁÕ÷ C€ÝʈÀÙä¸I6kû«Ý1Œæ%Õ”ÓF›J[öÐn&¼ûìF~ãÇuÛHçû¤rä¥%BZÐ ]¿}Ì{@‹¥ºpš'VÞbUC=³Ý†÷XÙ„dØú3ÇpÔ·\Ö*¢P^ø4Z…×2L`kD”<ïðåÕ:û|ÙZ°Í³t1Éü¢;¤L<áÕmEÛmEžÙN¯“äË$8­FG uË“ng·<ù;3U–§Jôˆ¹?-W(í5Y±k¢ãfÒ`ÿ=qÀ÷P… ðÓÁªÐAÿÁnŠÐT[DcGEG]§È˜¡FãB9ºÁè+«éÐ,¶9ç#Bp0,”ÆJ:¦‰ÎD¤3b3\…ñz” i•w?’ê#7ùÃYž·ùÎ ¨ûG8Ïh!2j4èìårAÖNøéd˜œÈóhƒü¬fñ`e>æ3ç­%§Ÿ€:†9A…®ØA6EœÉÚlàHÏ_ÕFˆ„þÁxÉ?ÀUý£Î®Øo`{Ɇ)>{!|×4XüCßnï‘õä&obE—›¼È|¹iàd~ZÈ)G`gŽ{C¨%VRÄ]T%:8¨fKÐlegKÄAê2 ÕŽÖ»i–2|ž6ëéÄÚ:#ŠÉ8-<›r†n°a À‹Äxéa!Ðs°Ù»9šXQôÔ¬@N5õˆ‰!B`Ÿl̈âæ1ÂÀœé*è¹U y+m‚:P!_ž`Õ`[fâ‹|œ1t9V•6R'Â9ä¾ï$ ž¸6à‚:ƒ¢§Š}qnÄ…A›¼ÌÓ|š¸ÊÀÚ '”Ø(·vyE²¿ ê|”ÿäÀäÓ>á¤å 7ò†à ï;ø!˜6‡êÜÜÏÆ üž¨ï{èëWŒéƒ÷úô0þ*ùú'~ µïó÷¯[àjB¸SqO"çß,T%u>ã×îf8{ݲªo묻gƒÁðö}½ó¡Ãâ­·7ÉúÅ.ûDéöáζÃj¦Ô¶™Á|ÛDb± 5Ñs{œ´_?2Q‚yÍ: Ùˆn~<ÀxŒkãw<|hTRË]B*ðEŒ9Ê—¿¹CÂ邼†µ:%@»ötÎm­ž(OnP¹³c\ïH—_35òÎêü|ÇÅ>­‰ì$›0B•ñåûž‘™9 u±‚²2þ¢URÓwz‹3Ô¾ÏÉ>B¬èØ>|\åukf륃¸á¤)[! , p/ýä;#G´ØÞî @+DöÄÔ“öön4MÍW@v=’#9÷¸pÿm¶.#AÚÐ/¼§_%w~ÝqZ±ÚùºýùÑýÎ``Ҝ啕üƒt¶¯’E åxcŸáäÚÍpŸo—Ç·K0¥´•`V øj?;0ššiÝñ¨}x¯#×`eDû_|Ô/ßÜöšˆ>t3òµHGÖ/kO ¬º3"pysØ÷¿Qõ-€6ëv¥Ù%ÃçáÝûG胞¦ù¬¢ý˜MK—ËEÛq¶ë`Âm.ÎOÞlŸ?8Äa`‹uIçíšÆ·‘—]ß{çº?¡í<½£û4=wòª“ü€Ïke/6½)´õ%›å’ùxᮯŒ>ßr ]‚·é8Û%74-Æ«I&EŒ ‚1yøü”D­”zÕg@r!éß#O¼—¶6»œ6 Hœ·FåBaz›»µ9K訶ñy@€„ ñŒ¿‚A[ œ8¾ÆGe=ì/³åó6|ÚÙ·†Ùf³ÝKLæ=_ÍF\Tu¦+Ç¥b±â¤_yÎþT{„‚Àê¶Aâ2̾KÆÁ°jfË„ÃAÕé _:íC÷èóGH •1k£~°è¬¼Ž3±eh#ôüæMЃ*ÜE+GIÚ>Ö„Š54rd uHòQ²x¤`Ù†N( 4ÂÀ´¸½ªý›7“²ý ]=ÉË¥ ›/ñ‚êÓép‡K]È…[ËĦ¤%%[ûBõà&À1»Ìx,¹>œ\X#­õG ™Ñ?ÊËù$]G¨=LÝüëŒCmfÉùl\\›rUTà,0öšÓ`ä¤a\ê2`ÆŒÉo­MÍFÎ=?Ïß G»Ì&s¤h8%Iº@¾”ç¶(ýLÈå‚|)\ÃþšÆ]|²D—ÿÑ.‰ü6BèÖÑA¾@ÄòßÅ ;}ÇQbE,¤âW—Ù¯6F4vV»Ûü,W8’ šY‹¬ÆŠ$UIS€@‡:­™ñ¡ vZfÍ 2ôšC`kþÉ "£:a5gõÛÕpìAEVŽ%=󟳇Û/¦“õ T¹¹‰—äS5XÖð=vF–è@¯p½u)nŠüA285e­`7¡^ ¯);°¸×h²IrÖùøK‹sh+þÜÓ½ªÛA$|!Û2é²Xr˜·ña Œ÷!¤q{²“øžN.ˆÁïôë†*)‘ ÞßY7b] ÖÔk®*¡(R'žÆÛ@.Zºç;e‡+Å?‡>¥n>“Ø:h.Ü„/*%ÉcŒNˆí ­ '­V8¡Çï²Ñ gÄ>= ` 19›0t'Rì.‘×ZHÚ³qºÓ+¼FfÇàý69{h÷/°Ò%}ãk,®ú"fóá}óÆ(Rm€a &÷0µ%×ÙÐ <êo6âĸæ:Cµˆ‡´¿µð#ºæ´Ä9kXÃ)cÓF§Jtâ~ ˜2DZ0ƒÝáN»0ÒL¶,3³ÿÅõ¬ g®z»ÑSxy¤‹<è')O–D s°JoT˜l{Ìùï€<Û±e@õ–‰‡ë«s %¢ÆÔ{åʯËPËËÀI‡Üx‹q P’xû5õߪ˜PP£ªÿuhiÔµ,æÉ¤ ?wNQ:h^S¨Ï'u7 šGáh9 ÓO[äšå¥puŽQ3’ã½ZçåÍ£Ôë´ÈóÕDŠñBÍaý„9ÖP’R®Ü4hîúìÓ†d ˆŠ’Z1WdY{7pÑZ/èj¸ö’ЈˆXuΩ °‘1IGò0…;ŠVǼÍ$´k¾tz^aÍÔ‹¾B”dÙeQç"s0k8 |æÇ£ú·=Ë5qåU°Ãd±2¼c(áÓÂ!NßÖ•ò/&¯!FÙtýQéÐÝ—2>[»¬;,å,¾üá²î¸”œ9˜ã¬Úª.Þ·9(y‘©KJ9M’ßîrà…E(Ó€·!±6ú÷šý GϲiÁ- 2#÷¢xŒÈgh 9÷ÆXTIÔc Çè%O¾ kÊ™‰RyT܆«t‘ã”ØÍÌÉóÿ~õݳ§·í²•ýºfŸY·KaÐÅO4Ø©jà§¹_4cföÃ,×õ€(éÕ„ƒ Ès‹{AˆÄ{t;#ŒMÁWæV}·ï28k½½þÅlºF^ÃÛ Êç­1¾ ¤Í™Ð¢VJÐfNõê)•XY!0`oÿÜ|“Iô¤ˆÃÈÂ-©S½×D\“’"|qAõ'R‘,Þoª«ŒXókdhq,†4XaÔ‡RûýFB+j%†/¬Âš5E0ñ¹ èKÆ*9ÞŸºp²¯.°Òò úSu‘»Rë6p\óX¾ðò:²H•TðW4놯¡M˜ÃÓ DIbÙk@mÚLb4Бá ÉI BØXï ̰Ö7k„–Îa ïOXõö˜-÷±WK$&І€åÁ]Fð¶PˆU\¯c?x-D'øŒT2±.à„v˜O SÝ9W¤”-ŠVï#4ßÒ’Ä…iõÝ6‰9KBó)æh—.#Ê)¨&D†ùB±AÌŒ»©DXÒe0 €†ºRø~.جˬ,/ üõâñS÷©µÊðIƉöoÐTú6˜/ÐZÍj±*µ:_@7¬7r½²:•ý]½e.â49‡ìÙd²\œ\╘‡ ¡çº¾£{UðÝ0f|“Þ‘‘Ãâì:ûÍ·o|óåžo€ ˆÈ­j]¬ó>N”Hœ«jÊ2Õöƒ.@i LjÂ*Ù<²šûÃyF—å&"V@£"Œ}¼Ã°}v4‚éê ‘!Ç3SI„4Yö1\WË'u-âÜŽtC\Ž6Mœ¿ÀjæÓ¾‘/œ+/}>Ðð,;¼ÒîZ‚*˜Ûl–Ývdl1¬áÅ+H¾ÈIOƒ¥Ú²â†…)Ø_ñ§öüù\@D~ é‡|¥Ü°“bAþ[( bº5`³èþ‹Œ„Û/Üð(žDñµ¹Ò˜{s¨w€gi¹ÈUz¾1ªÉ O_aØ.ž+»Ó†áöÑIkÛ•&ͪ.×b°dÞ \*–›C“ƒ 0‘Õˆ»âî^“½Ãd@Q¬Îµ—|½–´äÉVþ†„i‘4’OÌ>ñ„P7ÅâÓÄá¶"‘«ë†‹ä†Ò (+Æqã\ðFé ”íO‰æá@<À·,™äŒÄ®5x››4k“—ùÅ%¥=\a¢¼d„¤e^ž¯œìDvTˆd|ZêHÌ`tN±$÷8Réo#+Éâ”éUæk#7 TnºÐ‹|šé¬FŒ(3Éáä}À¼SCc(¾«¶Bø½M ¼¬ dì…ÖÓ[e©^†LѼ(‡ãqNdì °}ÆÀ” «Ÿkó¦jJ#>ìGR=¡Z–³f È]Ù’ ‚ N,EÛˆ¢¬dÏèœ#µg¼ëäMzîbÜ …èœ 4Y»ÝŽ ÷lÅ”^­U€D›#P";c¶Q¦µÀ^Ç:½ŠÌVT †t«˜x»Ý½,V‚öãpt¯q£š§´…Q= Ý'x0ÄKm” ã©Yn4¬ FmÃþ4AzŠ9)‹Z1íÉ"s~TIíÉCK W>öÛ£ =6)5'Î_el˜ÁØÅ‹U½YʽSz¨:t4Ì^¿ÍÖ×Åb¼qÏ1¹á¹!J³²x6M[?¼xb.p*Þ ·t7AÀ/'ã ¸û¹ïq¡¼f(ö8AµÌkn~°ÇCɌӞÉJŒ}§ õ‡.1Ö+skŽá~]Ý…Sås"ˆ+‰“– 3Ö˜¬{Ññn;‡t=V³Ÿó¹?†óIz!â<ÍÙ†Mšž3ô9üx =reLôMzX¹*3Ò¥"£Uà”Þ`g×…Î&XäB{år¡•6·¦µë„ªË,ƒBî)ÝG±ÛGäº,ð#˜qAéV40w/ˆyAèš¾H¯éfì+f.l«P½WW‡À¥ü]š[ê¡Ú‡ÚÎëË ù¨OK Ô]ëTžo(Gm L3®÷§–Cý#++Æn±Z &a_ñËÜçÀåé^W\/ðtª‰2Ìr°^þT”H¼§[ÿðÅCÄ»r[ÜË|ŽN ¶cÛˆXQ¥é2/•M¶²þÕbLXb Ò…X? yؑ٠$U¯gu»7T‹îÚ‚õY?h!’x‰øôóµ™sìžë½´²m¯oQz€L¸D\ûeÒ„…ïÉZ5QމÌr‘:x•UxŽQÆ]™C ºÆ`òš_¦Ð­ø ‹n mÁÃb:ßbp4\SÐ<†ó!>út…´ôY)bhH”Uéã·®©Ønï&CfŘã`@©Ð—¨*\W¯ô±jfbù?¹ì¶pŽm5%jø§Þlħ¬Bo–8ž%Hõûòr9|õå°¯ýÜš/ÓäÒÐËIs˜_@±ÞAÿÐÕ¿j~ýøË;©ikQÓR¬™¶qiãhÏ6îFÚ¸»g÷F±ÙxŸno‡(¥˜’FÒ£VüÏöhã0ÒÆážmb­ð§›ÛÉÞ-qÍ䱉àƒÍo£fv§ùýÜü,^>°@z"Æ[¹ƒÿå$ÿºjn¨véxŒÕÞi33).›cUXËÌ7Ž&ÈpM ë„´›M[ÙªéòY÷´dFÆ- Ñ´891ªuF®æxæ? '¯o9ë’n}¢>û´E›/̼͊ößùH\鸼žLØÑУïµkáØêÔ?fÎäa­ç¬˜i­ÑÒ8ÔR3a2sy¯‹¡¢Ê}G?îv4;;~YsEîœ2‰þ™ùÛh’›À#Ï M}eƒ® x*¨fb6ÂlLåC¾ä ‹‰î.”eU f¾Ä;©1aËáÉ~7ñæ$€kÄÐm xµ²Csìe*’å5©Þ"±'OÝT ’Ñ(‡FÕ@(¹i?è"6OP´…ÓJÎÓIBR² G‘-³! „šMPïË,[Ø—ÄùMìä[˜wç7Ü®IbWq~®4a6=¸\äŽô³|þ·[ë½’¾ptóµöªó1Øý9úPÖÜ*ޮнÀæÃ¹×Š @µ‚oÂ,_9(~ ÔPœò÷º@@¸µào!”í„ý1⌠Dce¼·ˆVNo¨M±·l ’ZÑ µ#ä•ÑKãWGW!onçÿ½‘êû_UF¯¿¬ŽF‡¿Ôý°ý˜„úÁÞG)öÜÑèp{qbŠ‚Ë,] ^_Ÿ| úïK¤@‘þÒ}ëÝgZ:@G¡älv˜#ì;ÐÓÐÁŸ:xÄ\y‚EòLÖÎÉx•§*4Ö³ý˜>§ùÅ¥!¯ÂF¥Ö—õûEU7i¡R¿¸-?91:Î/-ž¸Óx×û ¨‡Aï„üAÎçÁ¯rŽqyxÝj: ÏëU\Ždc0ƒKkîð’ ˆ˜íÝšÅâ3N±ÎOϼ‘¿—¬ªÚ] YUF§dÕãuRúÏå ·vg5äß{³>ðyþxqýúä%IüÀ)]oéç0h?, …Ù»ñ¿ÈpŠi†‘2”K¯’ÍY3Ù>Ȇçâ9Ц̖|[Ø€³®›¦kônê|¹IatIJ–ä¸ U]D30˜Pæy¸ú4†¸të’JÍ.8;(—ñ˜%É%‹S,úÒÝ9줫 3¿¬Œ5žÛø»?mv'T½>-ðútÕ9i¢Ù©2×㾬ꇤúh”¯¹9lã,–Õ*aƒLd×ÚQ“&^vH]R_€¤..Í+ª–Z9…Ì ×§sÁº°SW¡B9“üÊV”õ ?Ñ€b_ÛâË÷k » ÆÔ*‡r æ¼ S È1Xz@’û@7#|—Ç8’ŸÃÅSq|—ÎÆ?šº¤Ï •„¶[uÔ\ïÀäATÙ9}Þ7³{R@‘‡M÷Í©§`‚?©“+?Ø©K¾ÒŽÐèÑ‹¾ý+Ç=ÁC”òSE…¥Q0šöשç–Ýir!¡º¥O=þú‡¿ÔëµïïÖàã/ž½T‡½ 3ÀPuˆ«‚rßä?[º*ïäaÿA÷ug£?šdé¢GaŸÊŸ±Ètì}êl)ÉÂ^/\þD€”³È$Ó-Ž“€užÞ ÓÐu ÿ_çW§«]I*Ö–„g!æ)Ü~|‚Ÿ2öÞ½=Ç­=&;êIÅ’ºW[RˆM2µ=¹uóf×àpm ¾¿ê ó>Ð:³ú$ÕžýÕ,Žÿ~·Z\£ŽÙv|¡E°ªzü†˜ÃÍ :“âz@ï¤ÍÝÕÿö&7dçÈž>À]ûƒé’/× Gû}÷h/ë›Í`œm& œ'2Ãý¬” Œ©ž Eî&ô5TÖÚ”ül®M¶´-±ÍXÀ s?”ÓÒüM¨<œuÛJ_Vâ å­ü3šÈi½2Œ©âV eÕ‘öÃI‚z¬ÒüùBžÝÚk8l϶!ÈåZo~u Ö@3Þc£áµþ³X8éª2b³‹:¬F$0?0Óðjœðç";æL@XQ½üC=m`€»jÖw7hÖÏÎ!•lQf¶lÍjVò*WvBÕ–,E¥S‹yåƒÑ?þy2ú×ßÃÃØ>ÒóþË܈ÏÃ4™=ˆ,]„‹†N7{ïT›ž¢Íwui*xw™¡xßyIM$©ë”ìM9b ý9îR&KºTгZAê¹IâUÿ€‰ªš8l˜)‹[¥&¹ æ!˜†°œ¦¦_ÞÊ¢›‹š@á°©EÑ.fËEAi¤ºnþ*¹è:úÜÛ¤å~4Ië&aBцڻH’\^ x a-)d :IÈ0Ô[‰YÓ^tO„YšGšó5æ75I· ˆJŒè6qñ“¦Â£°AŠ ¥ÂÀµÆ÷6$œ•Ç ´¤$R¦ø_'¬»cá%ãR¹„¨÷·™ý0.[LP¨ŒØg(¬6€W£›x»5Rté[óá:¢ñÔöЮ6´}ò>B–>ì=~h“ƒ‹âçáâ{¿‹ƒ/¨³®72{œi|ÙY» fAHÍ™Ô kê³Kb€\¼Ð+;›ðÑCŒ1ˆ”ŠÖ˜C–q[¢°yŒÂ MË‚Õud~ ]pü«.mgÃ`ÍóÅ$­1¨@Õ̱á„¾U74OŽ½Ë¡ïQq¿úpLho—ö{1¹-ëÚ¶ÑÊ8@ŒRsÅEι‡5‘v>®ý«ãUw®X¦a1Ž“o ²”ö Uptë­S7ÞiHÍê!ï¿goœrw¶$ü‚×ÿ#—$äghíà2w¨Mœ`lÓâmr‹‡Y)µÀ½Ü{eMð+‡[ƒå?ù!HšïÛJ혽›«Øp–.ËHovDÌ !!çzbuÝð1h¢‘´}el9§àˆÐ‹£YЏ°Šè‡=Õ.cR*ƒ ‹7ÇÚ/¹LÐ$x8 mp9$´ÊÉ%˜SÔÃ&xH® >j2F¯™j*_m8#ðä¤)ÞŸÄÒv‘¶Â%&>½7ê‚àðãD,ÊN9=‚¢º„Då%·øP¬Ò,•Ôb(ØïÎi¬ëˆ¥j gsíç+C§oâ2AÎùQòi²µhº(¿JŽ’;ч½G[­:–H™:ž@¯¤!,ËÀɯZÊð×ioŠ9ju6î¼af¥˜qì¶o¬ì.˜Ðìt“HSÓPsãÛæp’h#ÈÑY§ŽHÙcÎv0Gul7k¹îîNµ°›Ä,tÊuJz‹ló+I3²¡¡(³c~ÆÝ˜ ênMš”=*ñOý¬‚Þ|ý´ÿ4妊¿˜s†)×N\Wºàj¶yÆ®>RììbcòYÐBíåàvšxB]ÌÍk%\X×=ˆIù@È!^—ØOƒ Á7êü@`þ‰jose•àºÞÉ<û…ì ‘žP$jdK4ÖYr’;ŠIÒspÈWNUO°²ªCò‘Èo¤/H¼ª\€«\dåˆY¬ˆAqL?²Aå”_°a–:‹A7ˆYŒtx—ÃǦ:–½‹i„`Ë0¿Ev‘.Æ“¬ôJW˜:‰ƒVÔ•™¶b T-–aµÑu+©v>ž¨NTüµØ6Ô&øöHELäà q…°Ù„JÕ4ðÏÀÏ!Jx"¼½Då¢BQ]”UEs3²mZ{û¹Î…f”1h?{7Ÿä£|9Y ‚<îÅ^úÕòÒªÕê&rqÁ'(*Å:Æ`ë°=ѳq£ÔóÁØÀ6cÜGªÙY¦Ý œ-1,T^ï‚¿IA W6ÃYÛøïr¿€.šZ„pgø’­Êe[².”¤vÞ¼éúÅö §¡gÖÝH¨F ¥,ˆE‘Ž.)Ük&•Ù„ŠÛÉ€8W¡‰¾ífP¥¤×Ëúý.ÖÄ"+kÁþ$Â5ôàx=y¾‡¾ŸJõ°Í$Ì3üÈRÕþ¬PJÇ4¥ªN䣷9þ•{u ´übFñl0I3a/²EYã,:¸®Ù©‘ƒ¦F+/$ rÚ¿ø MpµJT=]Õò]5”t”OòeNÀ6N{IßrY\×íÆêfiSB`:@!éø*-aÑz¯CK…ía`ãv "é"@°–,@ƒ0 ÙTo}óZx {+¥*»PýCIÈ4jÆÁ$ãÌ|9ÍgS=ê× Šû"W yæ 1²Îß^ U;-=½ÎÈ»ù(DVõ'dLÊO»)%ËœŠ¿Ózt0½ÒDðtŸ‚]¹„ÈÌ•› ÚD¸  š»NI^ÀY®!vÁÅçxîY«Y"ê8/G«ÒÇFÆêïä7 ¸&o, â¯ÈØ ÍCŽ]ú¬®ïb˜ÉMêÚú(ýÂ0' óaȰh%¦÷•±à^ïH+†d¹•}àÿ«ù·{S®@ù¢\ŠBa«ÁY„Y²xk·6\Eè?µEÎn3É‚-cØæh‹Sݹ´ÅÁ‡ë¦|4L…œ¤¨ßeP)jCÝÐl »¨Í_­¸ÍT‚*mef¹DlI”Èjälé¶ôýërÃùL~XùJ:Ú 3ð#Ûø°Ùºì늾f5"‹A5ÀÌr幯¥"‰E¦rØÊ~x>Ý Ö=„ gd'€ `ÚS8Ÿñê1ÐÄw„j&£J¹([£š^AáÀ¢†º´¬Ã*Lf„—Á¼žd·úÈ]*å ¨B£@MÍ~+^ :~h’ÝI˜×Ü2UEÚˆŽ7æ€{¯’êã³<ÏÀÿ%B,SU }Pì¶SzA´XýúIÔ–€;9IZ×ùìîQ+ŽždOÇIòZ—¸‡ùq.Ð{«³Ñ¹C^åøë®_«sVi$3jÄG67M8¤ ³û?J-Á#Nð†3ävKÜ·µ’ÌCþUáõÁ¹Ï¹Ý´ØR$©/bª/È«à¶:¨÷šÏ®Š·X¼HÓ\‡Õ´u1•¹ª9 äÕ3óEÆòN,çiÒ‚QryÅÚˆNs¦YôA©.£DlQE)²kê[™ó(]z›–Ü22Õ<ÖÇɧOŸ½øþá“Óÿy<øñ»ÓW_>øÍc¯…[ÿqgU.î`a•;¦™;Tè¨ÏÇUþúÉçÿùäôéco¹æfÀCbHêõÁñÁø"ƒ8œÖÄâNÞ!P kÞ‰>TÞ<Ûg*¦õØÍÆÏ“Fõ ȃ©áyƒA«}ä5Þ‡Çb@‘`Å¢’¬ˬV´GºTnÖ¿ïéb˜bÛçvƒj¥sÖcåZbŠÕ”šK@„«Fßk±žìZ)„Jµ`Þ-TîÌ ˆŒàEJ3.ÓY¬([7( »ÂÂI »ß%\ñÑÓÓ³ÌQ"ºuÏÐJÔ} “ࣹ¢¶®’¼[/`|ª©ÉÈÌ=dˆx@E•$¼P@ÂÜ\f±ZDgÃT˹¥¼·P×¶‡Å·dó;5”ÝeÀÜï_•û=gbEJïÕSz‚6ÙxMAMä¡BÖ×®Ë éˆÝŠàDF`’TsR+¶éÒ#ôQó*–6”ªÐN¨­@≊+¡’’Ézœ5Ó°Š?S®nû4 8ºäö…¾ß¯ÍÕAÃ’.-fç±ôñéÅÚŠï…Î Äñ#ØÛÏ3,êŽ#]ý’ i›–ÙÕËtÒiz`µ]þl¾Þ tü®µŠ52_¿Çæë_T«¹á˜vS‡^a]k9$’á»âD‡ާgåÓ²“øCü=\u¿ÏµÈnœ\‹uoîv-ôVð |ûM²Y¦fxùU'ù*9¬^|.åÜMæ)`cƒ-Þ`Ðn^dKó}³Ó§_lc¯ Ãjå£ãéqà°‹ ­ê2dCUÒ†îM×W餢¸Œ¦:N¹ÏÍÈ„zy+þÐ.Kcoý‰×ähC“À”qœ»44ÝÐ^BXmXõú‰ðž¼>«Dm—9ÌóµÙ-ós@‚|»ö=zÙL¨›üãŸÝ¤)ÂQ³K5s!“.âÕ®£FÙp|òvâ[æ=€ýD6zèõA|òãlRo£nç Å¶4ÜÙYxTdTö¯žhlÚpÇÛÃtf„–“¦Ymdq'“bhŽI»‡tCû•rH†'±6#xÆJ‘Ç:’¨òy–n„ \äˬÝJÙÇgx«U¹šdú-FÍÊW$û\óu[Dò®ÝiøÍ4î ¼cFЩh"h;¸?m%©¸“a2 ïÔ×zz)ügÿµ|"k;ežs¯ÈìcB¨È|õïãÆ\æèK³€äÒPVúTâÌ ¢ª+Û¦èôn-´W»¦éùxN~x_ÇðMß h¬|m6ìÌK@»Y‡HöÊà3מ_+Çbòt]†PaC6'ÀErª ŽaÀ—­;¨:7%9lh›¯V‰u©S‹ü‹«è6Ж]ûíîáO‹¥k¼§B±ƒ>¡Àþ Õµ:_Mþ£ÚÍ…oóö/$}ã2l¡·hUæõN]ßÁ‚ÜMww>œâd…n‰ìÝr‘reÔÜ–1ÝêéÞþ¯ ÕÄ®J—êpßS§Û¯í*Fô`sä^ιó¢PÇËüõ»8]QRÇÅ¡ê¶'¯ÍHÏv²#b{͈Õ7a¯ÿèÆÅúãýo/çĽ÷:ØÑÛ·)m7·ZéäÛ8ÌÐs-Îh)Èèç+o[ŠbO3 M²ËëŒÐ¯]‰3°k.!þ¡ö᯿œmîCv»F'­ÃnrÔúÏúG×Àûž^ØÅýN0;I>ä?“¿´Â÷»›«ƒøo­iàe¦éûwt¢ù3;iÙH™ŸfFÞ]‚8Ð4WD³óñè}è£Wø3 Öû_ê¿ÈŒ”_eàzó£j«k}@q!Ò•cà°˜¯*ƒ¡ :/.@ÇÇ¡/^Îm¹#7÷¶ˆ ¶Ü¡ #Õ‰p.»nÁÓ'©UY.„ï «t°~0*¦†üЗ§f„Á¶nÒM!©ZÄÆ$üaªŸH#OLßÂwÂbÊÍOøQ¯‘¨Tâ/.L £ò) Ðùø¿› ®îMt¦aê5“.ê7Bçiô!Ÿ¶O†Å¶{BÆþ[*eÅn¾-X%³Úòà}GÒ}oèXªµT¤Ã~Ú¿Y:Á¬9=€*®i>îü’:'Q…qÁ‰ùè:Ý]^Š:é:Õ Ö0—Mí!QþѵRC‹AtÖ¦ã×̆͡ԡ¼b€h%°_"ÎËð21¯BÄŠ¹y {Z’z™Ú‡’šp@…8láL.*–wáî+÷³ ‡§z«Ü^ý Y%(S¡q’öø·UI,Žwç1¿•ÿüߌÅÜ4<à_‰õ| +øVð1¬àß!¬€5ÝúTÑ%wÌœ$T#Æ[Í;7*ç¦1yÙè—R çËG¶ñt’/×R;/®Îç~Ð*'O*_2zäÃ| ÐôA.û•ñ‘ i9lú>/ ÃÜ"¢¤Âbº#¡±«dÎy¶@ØtCÑLˆSßËÄùC‘Üò¼‘K²É¹¾#8²Øf—÷Üõyæ=(¨ˆƒ´T¨#Nrî»áº²ÓÌ4+Y¾ÞH™€S„ÍxñÊ|:ä2ŸÐΪ\¥“jæ`K,¥žÚÆQm<v(>d9 bÊ0_N 7†<Ê2wù‹°šç«‰æ"C÷†T‡Ô3Ö) 0•èØnæµ­^Û„'îeÃp®†µ¾‘¢% :èˆõ0‰ŸާàŽdá˜oFF´-¦ùÏFáMªlO×0À†fv%TñêG1oP€÷ `¡ˆ\Á’@ºCø²ú(äÉ €Ó^f 'ű2¶ÛÔÚ©iS9¢85&å¨ñë\‰X¶D¥žš&Ösª…GƳU È€ž…  @J›’U¹ð-)ØoÖMÒnp¿Wï*ElãpLïÊ@¦¾‰•ÐTïáØùm/Œ¡Æèäõ: ¼ÊÌî]½%ÖaµíMFºHƒ»ê™ X0žaveW«Ó€X³I40ÛV.Qh䬔[?ÿ ËR"G˜¶ö‘<~òäôùËÓ—»xKBF=ÎÎÃUn¿×3rXE¨»Ùl>+ri!çÈØ Åá- /Q>¤üƒA0¬® ® †MT`â雑UZ”1°ÿ­êÒGƒæu®î>ïF=Ñ΢¶}Æ fPc6ë„Bÿ2"¿˜kпT¥!ÅÙ KE¿gúT€7‡H3˜ÌR¶ùéÎf+=¹¼ ÔÀؾQû‚ê±o6¾­Ô‰EG¬)õÎN´^G]>ÑG„N ¡&†m¥°}«ç…mÍ1 ”ú %´nÒ¤|zô .»w´Ó\©r ü†^Ü¥ü…ÊiÃRuÉhÐæ yÏëVx€¶ÝÁÁãfW66°ã±Ýç¼ží¼Ø×j¹Lv•NŒ¸¼Ýxí8µ¹PéÐFÊøÐeÓE^FÅ1.d i]¡Äk®­‹‚¢ÄCpõÁ9ÀºfµíN\‘pãžeæv,ÁEÀP xÙ*¥™ºx 0ÌoiŽFÞå~£f„#…O<Èð¸1ê ˆ%byŠðF ìG›"@æ­¶/y|GSY}ë{Öæöë_ˆØpS6™:5- 8¹àîçŸ*C‹ åð–_·¾}ö ÅLp«'Ãôgˆ_ù'ý9¶ÂÛõlÿꆑÔѵìì­æm Þm•÷¿*l‡ÂÃ+}5à’J·[¹\È@«ö¢–J²ý%©‘èG¬½†þQÒ×/ãJØüðôô¿D¢û?£èX¡¯\ S†$ùÞºj‹U‹úM…ÅêOÛžÀƒ@ÏÀ.Þù½éû¸Àª_Þ@¡Ãœª¢Qç_Ý>¶zfhe'ÔÐYŽú¸Q¹¾ix.P¤"².;@²RZJ€Á—4ëTácsCá%/UÚ¼Ÿ£ µx5?*Zÿ{+ðÛ8Ã/Áöâû?¼›IâF±–{G\Æ Qý÷ƒaAPsYL7ˈ½5ÂÌíaذåß\ÞâQŠk7™c[_ðÏ>ýh‹"Ûiîf;û7ÐTƒx8•ebx˜åsZ€”ܵÍyƒ$Gi^2îmz¾d?²_båFÂX]0B˜˜ô‡Þ¼ š¸žôYçã]ó¯j,Þ¥"ƒ=¸øGüKqà¯Ò>íͤwZ>æÔU¯”ÒÂÙXfû°\@inA{à5%ŽsI*UK -뮘Y à³2[`‰Åq:»08Vä2YRL¢¹JÒRv)¶Ó[í†×~ZB…œ+eÄ Æ–áX[É«+Q©.øxÆyà¡–£1š¼_æ ˜àäÝt’8,µçE¹¼Xd/ÿß'x'\P)´®Ÿ÷ìD‰!Žs ÚÙPï¸*SBJqV¼«§ÙÈL /§j`8½0Ú©eë“i:3? êûëu¸nøÌw §²Í ªU—Tñ/†§Ÿ©[É(|èQ…D$s(|Ê~eê8¥ÊlÇMqaõºqA©sLm~™%"-*ì&NÃɦ`ð¾(Ò‰;SæKç÷-5ìÚÂlNj$¨,¶÷=i¶eå‘^"5Ü¢BfágùXÑ˹lál”uØ® ÁÌvP°Øò¥KU× üW¶j2Úò¸ÇÑj1{àë˜çœaÎÕÂZRØpŠ•i [;Ï î>ɇ>@ͦÉȨ栰ÿ×÷OÂOÑÂq>ŒímRÈò%Vu™ —Eá…ð:nQü¼Y­‰‘g3çîWYâÞ7j‹/Æby8>K¦WH20/+`âfH52Z®0è«@Y,Êì³G™nMj Oó‹Ë%Uõ2dbVrÚåbÃU ó$h øüÜèj²ì[ó+E°ûl­Š+æ…Îw}¨@2!æ\®qLG{ ›1ÊKX³Ø’>rmÕ/¤Ù•êá~Õ¾¡>UcCŽa‘”£L*àª|f²#?f鵪U ¼‚U‡šÈ)H–]UøìF9‡Ú»u$z˜="ΡZ˜=vj/ÉfxÈ@œRØQ³fLãþ¿©}™élÀú-Y$ßícŒË/¯¶¸²óùÔÜ4CaþBa õÁ ƒ‹£mO0iÙ_(Põ_r \›i¸ÓQ®ªÜL áñÚmïüÆk”‰€O*4--Áù»R¿ò|¸ÀhDF~}è'AE§¶@¼ëŸyÚÒîÜ\”M +é‚c<ö…J‘N‰ˆj“¯°Œ†1ÉÕª0~9ˆÉÈè&VÌø‡(?@ÄG¢LñxGŠ€w^aŒ¯4Òð ¤NÒ¢Œ4°(V—x ƒL‚TñÕò Ƕ9l%#Þ³ÔlRƒ›oÝë §°³]8sÍ«f{Ú-t,ÔÖÀ‚ªY\î•D•âšsî¨Ô‹¬#ZÙx¸>Ъ elø¡P'ù?r­t¶Ã!›±Z蠟›¤C NŠî Ÿ—4½¸|¶±FeùJ2-t¼Ì æ³Th6kâ®*ŒZ•nQÄe8Ô ÉÌÆdO¨nhPê¬Ñ`0ók1ŸšePsÇeˆ@39YFëë¨V3JÊ ÖàÛåã é›1•œ9£Ý^äF"¢Ò¼@Ñ¥„hˆÞÅÀ“ó…ýèÕô}󯬿dºîÓÚfoÞÈ”¥J³ë¡ë@ÄØi(P­™ê­²bý/">”MÝ´ý?ß3Ôæý“@»IËߺíi¡Ñ檩¢Ý`Ϻÿi¢µ¼ß·–×2K JÞõû­ÆVåÃo¤*‡Ê‡¾­@©´Pm‚ÔüíÅúºü(£>{0bÏÁöV*‹üQõý}¨¾Öl¨ ­ÏÅ P7Ñ41@úž?êÏlýyß0éíjß{ÜI¿„²·ý|(·ö>¬ñ=¼âž2霷¥-Ufäj²¥†Œxž/lViœwÔ8!]Ä6&‚¹Ä‡›Qp¦  jM‘LŠ‚RÉ#Êê$¶ÁO†™y.ûÂP¥Qùi@ò÷çæt)hQ'˜ðÜ}Î&Œ^^Ň­3L,zƒþÖP—-©p©… ô4”hà ó=Dd ‹«Œq½°ë•*O5B×*Ú—x‚\Ð7ª7U!—i’½SÞ/p‘èYî6ù\¢®{v5÷€¡=¨÷¥¶KDæ- vÙy¾¸†¯¡p§}tÝB}¼…}S >*#¢÷{øßRµùñÝ/ïÎû)6ïLþQ¯ù¨×|Ôk>ê5ÿ†zdZd£—7¬.°R¬E÷Î ‘âÀw\3·×Ë—VF°LÉ7Œ.„m dMãGduðÔ-u Yåß±‘Cê~¾;ê¨TL±aÔƒ¾>ÎûÃÖ,®€®¦ƒë±VX•|˹y¬ÿ^qÒ jfXevü‚¦·Õ„Ås®2Á"e‘œ§ Þv­5aÈï  6l }Ù5Êk¶. àÕ «;˜8½¾¦Ñ A8Ac„pjÆ:ö€{¡&Fz]!ÓXR¤™ä(›/mJ&À Ï2cƒ“€ú¦!‘%C6Â{VÓ6D"+‘£Nùf=›9€ÌCðU„ 9¡ ï˜NÌò×`ð©'W膙eKŒ¼§ýßÚöu“V¼Èþn¦Î+Õ‰¤Èèƒ'‚š)È7SÜ\‰Í>Ö©­½¶K°%c¦Éyv]­¶RW<&àTëò;ÈEZ9mcj£¡Œ!Å3ʘyleÄŠöµ(' ¬à®îP¼g¥Ä: œ*²‚ôLù˜3­…—üËlTÌÆ]ž‘-—¬‹L×èVÛYô£Jq…ƒx2𤠒‘(%Õq°&B fýxIò™^3£†ªˆý½˜½ ÞΩ| Õ¦Z%ËW/f¡Æe5)ãMA½{4 MÕÖír_“óf«Í%³Õt˜-Ü(ÌÚ†ÒblÑz©bÒH¥ ûB[?¼xÀ ­a˜Q7)\- œ$2‰Ó­`³è€Ð‡²[í- úM Ó+tŽÙ æëD²`ŸÉ®uK(äÊÜÛcx£ŸÇ/çSö^ wµ¡ú9SÔ)tä]‚—ËåüøŽ¹ˆçyŸnã~±¸¸Cöé;] 6çð¿%æŽí„©H‚eL!·ˆ'Ãøˆ¹ì{h°„׋Gßz…›ˆóºGHe#÷Z°àd£'9:ùc8…Ùšœed¾4:˜ùq&ë$¿˜ œã©§U˜†AóðÖ‹Å \µXcý¢w§¾˜ï+ÛB½¶`˜ÏZƒ%•ÏœäË¥¡Ç¡Ñ2ÎÓªfV·ÝÉ’!!7HZ-5H ^ýDY ²¡›¢tRÜFׄ·†Œ4M—£Kå ”˜¤ê Ciá¡Íå±9ÔÁá+³t1ºÄÎPxœ¬{Ññzdéj­†¬qXGl…„ ×ÌÄ6óúŸ¡æ»ƒaGÙý£ …ú+§»H•b ÚíeæQ­I,!ÖŽí‚çéj²äÚEBòQÖžø”œÒü½“B(?öÚÂUå ß`½ü©P]IÓwÉŽ­ëGÖŸ©¢¬½^¦é|ko¤‚¿Œìv@ðœéËXSeýËlbM+>9°3BׇUn4¡ÓºÉÚ±#!X%¤ZbÐ[&nyæsÁ½ÚÅÖÏ”Öo¨ßœˆ²çè8j6¾%.Ñ/¹šÓåWÑo¥™Ë¯b­”9<2gü{¬Qû(;çjµåkPþñÏ/bo™ÇŸ=:}zújðíO¿Q_€Fʶ¯Š|¬åù2öóõ³!’Ä'Ó/‚¯¦¦ëçëÁ©ií{>ï¶›Ül³+ì&Ífç oÎçvóêÇg•ž¨•ÁÃñ˜úlOÍÛ†?˜Öž¯OgËÁ·‹bú¤˜]´:~›Yµøú>Í=þ¯Wÿ,lÔð$M—ÿŒ‹¡N'ú+[ìhVöšÝŒ²á]Žˆ|@s£!{ ßááu­){G9¼ ÁdGÐUeƒ‹ÀPà¡!8~eN"\‚”:ê0 ÐÙ° §>ÿå ¡¶øˆ1—ƒåð|ËÂD¬*a¤-†KÑŠÉ"…µWøµˆðö‚zZ¥ìj•B2€QÖ€±×ÓhÄ8Ö†å¨5tá§1«–,G«‹NÌÐôY½žYë²uì»ö8)Ø"L%n3þk-p°spÁëÖêO.á;<üÞaÿôV³ü]/ðç"®w¦ìP`_Éb)É+^ö´ÞN_͆¢ÊÀUO¢·4n;Ø?mäÈLÎãÿߤlC‹þ¬{äcèô@ƒghŸ¸[ó·a£Ø6·ƒ„ŸÛ²ôuváÕ ¼„b´Ôr1LÕã>6¬57립 aоø——Ëéä«/‡Åx8 Óär‘Ÿ4‡ù¯°Z‘æWÑ¿¼“š¶5-Åš¹aG‘6Žölãn¤»{¶qo›÷éöviöÎçÔŠÿÙmFÚ8ܳ£Q¬þts;êhPÁ»¾}?|ûþö·‰q6¿¢Ÿ›ŸE×çͯ"Æ[¹ƒçåË;xxjdu ™‹pt‰yràV%–™A 3‰Â¿Ñ×W#PP–ëî\b#í#ÖGƵ\ñà»âËp—"ÚcÏ,¸ìÕÅ`髞òÌ æÉ›Ü®•îbÎéS¼;À@B2Ùð²Œ!C·w6ù~vöýB/¡_ÐyoðkAWJLÖ´Ö”16Égè[®c³ÛaV¡ëg©­t"¤s™ÒÍU¨\Ôq‹{µÛã?„#úélûQ Kä7‹Ó›n)LOÇfË?[‘9¾øôôÛÇ/_Õ}Þ°ÌW/?|ôýãðSëË¿úÝ͈Z=ÎŒªÛ/fMå>bwŸ!>üÍçïKÝ-×(;o[qÖ†‚‚¤Ê9§Õw&ƒhôÿ0Ð†Š­ÂÞ¹ë1 ;YšHª— á8j©ž©’sj§ó¼¿–«ü–§jÌ_Mª¶/¶yÑhÜf0£‹E rÔäì.œ¯-Z&)˜@^ª¹á i¶( ?ZcH ³ŽÓÉuº.=¿Ê|‘]aá&¶*ô)¨É­·ŠÝ§Û^Å3ˆ%–fTõfÌõefúM‡b[ˆM3³!G/>ãʯ„é;µá?:—YW^rä‹Âó”0È–È û’¹F¸°Ø0®Øl´ T¹+Ì2LÌ0%!U7*âàÆ”2ŽCûÅ¿#žÒúÛÎ#€6þ·Sç6ã¡iÔ^ Ç›âú­ZÚ‰$õ”K5 ÿökÅ46ÌÛŒ"}2*„”CR/š¶2#li!qz1ïY¹²Jl쨚X°gÃ4_Ó§-j€ÿpÁüåñ«äèà ¹}Ø¿ú[5r]Î"É“$´Ý‹öÞ©æ''ý#È`¨“{>ˆ–ž0{¬f?çó±]Åʧ÷îA‘RhÆ{'þ=ÿn‹é-Ò ÝKšó}Sÿ¾¦Ó`íxì5 xÃö¶Án÷<-–T_´ë íÜš¾Iæ½»›5w?l|šqO]<0;_à,<º:vœ`p/ìf¯­Ñ‡•íys‹“WÕ±:™Ýhñ€MWéÐ4:W:;î4`hFd¬Út·ígîçsü Wâ‡q`‚úwM> ÏZä­ÏÇ;æ¦wÌ–»ãã¹> »ÿ=siseniœÁÑdµšÊés¬ÎP¨E@ÞÂñÝÐ iph ¢#ݶ7 ΂œ²J:M0Òç/É~G`ƒ¯5båˆk¤¬!sÑS¾sŒ&oÓݰ@VC‡ü#J,È'†èb»«kðN †]$ã]ì4{xx6BXa¦eN– U94°Ç.2sÿ”ˆÔu:C½¾ò¬Ð<ª¬f."Êâ½PenްÝn\šU^[‘‰k£º¼Úf±aÒà(K΃8R<âùyrÞ7M/–%lN»e'ÕªyC…Gð$Ï;8$ð—1wý‹Ü¯¿¦T¦H¸ˆó©ðx_ÌBª‡ °ÿ} ·‹Xf¦ò ‡UŽy#§£o†«sè$Óî‡á‘0ËxýÁGîFdiûÕIå߯×$h¼(æ†Ìm’Ž0ñýK‡\mDML€ÀMg˜ ¬?²eÕÏAÿFƒ?YwO•É+T Á\yÀ^ÏIoÞHTûUß¼1WÀ€kÁ_öæ‹ ÁðŒãšíÿ_ ej'úíD÷TC¹h¦rrúlìéç“p\‚yŠM= ð¬fF‹¦™Sýs/W„ßû’UŽ<¬G>{ó&êR^æ#~RÈF©BÒ…”–ÞX\™TX)£Ä„kB€àËlq.&”Õ)m¨a¸=ˆ9ŠÐ‡ ?ߢ5ûCŒLò[@ÃÚ·ÕÐwhEšóc»à¯pTb“ù:9@Jÿ†œT'¢ ’|g^Ô~ÎO¼Õ×ÀÞv”´zðÿ‡½NvEŸ1¤j¦ãt™&­ÕbÒB\Q—€0àÇœ”]L®t,ŽvÖš%¼sÎ,#‹®A/9ýþù³¯>}uŒq C#PÁÆ÷:]ŒKËòI¾\»(h#vŽ%}ôÝ…´/gæt›ë"æV¼ÃKVìÉ~;ŒBXõ’¯…‚9äºtÐÆ¨³é $Ûš @¨)ÅÙ#q~vVH§#ÂÒ¬gíPF¾¤ ìX4ÆRúî"GÉ w1K?ËÀ†Ð´(Ì¢ⵇ†j2: d]S#(—.ôB‚ÐÍXÏó‹Õ‚U@QHpËà–A±¹g]´\ü– ˆùÌ®QÁÇ?6¨\ ˆÜ`¦o¶:'Š„ 0#ày:bÎcùPƒ £;%M<››ðÖ#E =½ŒTá¨)ê8ñ2…Õë×)!ÕI™ àSCòÆN;6$ »`t?Ã#½@ÓqFqÈŒâàϽ£Ï÷cO³k)ˆ³÷’—|Çû¢ ¸0ä ×p¶eÏ5"PBþ|@;¶ z—cÉ 5âÑó1Nß’è BfnèÐÃ1_d=íåjÈQ+„x@ÉJp()›(´gžY"ÚlÃ?–  ÿz†-f;› ¨¡P…Ù’C?æú17Öyþ.G—®›ÒÜ7CóX’BØÅ<÷gÙ?ºóàèîýÏ’c0#йØç ׺rZ‚P¸Ô6B‚‚lóI>Ê—`ñàc‹w*>dMcª½ø#™%“ €€–kÄ™‰ÞýÀ$ª%PËÌç¨ÿY”×|½vÐ_ëÌx´›‰ ¨kàÑb˜­Så¡JÜâk+µ2Ãë·B~$wôpmUõÖõzÀÞçËž…wa²'»ä|’^$mÃS{$¦V½XvÈOÄQDšÏXÁVK®—ÅB  çÂC|>µ6A¦rT a7×0#¹ Xõ„ÃeÒs°xNÓ±­iq¾‚}‹ÞAOŸ½z|œ†Â(§þ@ HŒ8mD‘óåuÊèªT@ÕÈ®½ñtz±è»ýôà?G—EÁ*gþ¥Wi>A-1‚¡"CñÆ·È.Œ3ãn¾·`6úF´á,ª¯ ÿ¢¼B(P3ô®Õf¼ëì$£K#²­õ¨½<ÃÏé EYòZãÌœå‘hA8lÿG<‡¶‘kªàï;êDÝb&`ÆGã±ô×I<ä®9ëp:ˆ"äí2ȎĨ¥† À†ÙQöÔˆ½|¨fÖÎþŒW ¸à¨§FRÃ-2 ¹J'†ó“@'¤ÅVß™bMh–é›Î2%1þ²Ò0f’¬ŒV BoŸg„E9k%q(~„Õ•ƒ´!ãùnGðÆbÂÌR&!vã7oölU:vb±Íjn¡è4÷¶ˆ*±›€ØV ¦ slD#0$õ™·Y6ç.­î€zÂåjá•q«þ9WzH±‚æãí fwø>³ƒ{îÛM÷ÜÃ1ÕÊ ŽËbô63wB>c+HY0€~‰v5ä„|á0Yy7:"ãÒH(`Hiü)¼$õ\Ú…áQi'z©ZÆKÝñ}POûpyˆÿÄg@–óðŒ|u¦ƒu!'p–Ò‚ßµ;ÉC3ܬ4 \f£·x ·:Á• ´®-Ue©ØL,[.·&æ™6yRÝ`Ê$…Œµ7/$è>du»ý¡tŒÝ²qïÚ· NªÃÔXUj´þh‘0ú3(e¦hÄd¨tUgcoÒìI:añf7ÚV¥è|AjÍ£ürt0›í;‡äVOgÏ«) x>àåPIî.å^¹н­8\-‘rÙO«š’¡lB±¨ë×ÛCréÒHFÙ‚åìì{Pí½_?è£ß½ÏòŒoŽkÕ 6 )%Ÿ¼ËEc—»ZBmD%¡ +qÛÔC{b;VÚIÙTèí ß.Fæ²ÓE«U!a‰5ôî L…N©eåE|HÐæ]e±Q+èΨÒ&£gò¼÷eî0¬øÇ9ò×X_ˆ Eòç&vÿ[ϘFÃJ·Èp@QFrº.^ö°5TòýÓžHDÍ,Îȼ?³³É”ñÙÌWºšâ­Z(hÔí=Q—A7|³ƒ…aѨ“ºîÀÏ}û¨-„qÐLLWÊúý®3tÎ/:5Cü¦Xpý¯KÈÛRɳõŠÈb– TP˜-‘ÝHøW€‘©+σ6§d—6&Tü0\Í–+¹PÊŽ_jiƒýH1‚;ŸšïïÜ;<¸ôçMÊ$tj ]|žïÎZ§•£/÷@ÓÐî4ë¶“í¸R9ÔIÒ~!6ÖŸàyFñõ‘ŽFëqtèTû”‹¯çùJa{î]lÊÚ‘îuÍîÃ(qÔÿì [º-A…¹TßÅ‚¥ÔŠ4¥FàëYpWÊ3,®…qÜ¤Ž Í­zuqÃÐ@Ñ…ól#Ϋք®!½Å¢Xø†$.ic—IµÉ{Úœ]Xop}€Åw6Z5sÑyžôæ« _š¶Å`àZ¤sÃ’ÃþÊý!ÍZïÌ›7x ÷æ¹Ñ—Æ=kª8IÎÁ oF «¢/pÔ¬@=Ä0½QŠ÷=°^µ[q)H°¯ãb„VÈpÏ­ûÅÒ‚°j²ÿC cþ³ÞfíÅ1T5•¨`ƒp=†EðÛjïêŠêÀx¢¾÷H:3¤•¢o}d™4jæZßßÓQ)á ›ËØÞÈVvfÊæj‹Tàó…~~²QaÛèMs-2Ÿæ/©=¨ãiŒf+s’¼µôð #‰|´¡ ÄÃÇnâqIk&ò‹ÁòvI¢·ì‰-Äf‚ù!¹.·OòÚÌY0¹¶fI $ÁFÇâû'–˜§ta?0BìsŒ|!c') :_TuCŽá_˜‰ V D’GŒ8’}—ô}Ê2—õ*´rAvÐ5öMó‹K‚jX-†|?Z/ èKÆÝ*½–(ºg‡AÒ3kÅqÖ-¹Ê±0¬¡|Ay4â³a€@ NZÜĵ’O]ØÇ%ṳ́.Ž]i,RBÒ_¿ ÄQŒ‰ÿ½'õ»ÄÆlÁRAak½"ʘÿ¶mºVaô“ªuZ‡¼yØ?rüànïðÏû¹ãÙW6¨šð@ÁéLKáyÊÈÍŒ>Òšq¨q}¶ï¸äð Di—õ^?r¢xr‡¶›ŒkªòÚe¶n-øÌGï'À9RS{p³©Y£÷ñöÍfÐYµßݵßÕS¯9à É9ÎŒ ȪëtŸB øÓ]o]ò' ¹’›ùLÂÛHá±@§Ö¢-Ü .cdŸ ^Ñ;ï`ÔÓÙ'cl‰ûÏáî³CuÌŽ\{+hÀÝøûP¨=žãÆavãbTê0»I>„ôŽ;jPTâÅ]@X]Qöp ö—#û[T!úâ?ˇ‹rƒùzÔRÙÉ3‘°Î5iÄ  üŽ¢MÉ‹·ø­D¢ mÁqöøÝ•—Ë c àñÕ­ u "jR§Øýõ¦›I´.s̹MüHv$}*ÎÝ“¸¤òÎ'±~S̤T#ÔÏ$ÄqIÿ2·ó¥Ù¨þt|ŸÆžC²°‹]`£‰¿ SÖ»ÒòÒÒI3ŠpêÏâÿ¡‡ø¬áÅaE‡®L4öã b1¡ 7×ÿñ÷¸rv!áì‚8ðö £¡ËB3²ê"KßÂì&Y|Ìi Û ‹Èid[6í/Š·PÞp=ÅŒÒö1$·ŽîÜÿìn'ÎáèRùsïà³ÞÑÞ—Jer8­±ÎŒ+ÐgÁ&ëoZk×*&‡#Ñ5‰Š“U“UÉO ›Ëg{R1FÔº<1Ú˜?´ÿóÓ@/^½ ìÀ-Øáç7½íl<•´Æf{çÍ9œÞËešƒmcÔÆ÷°$¦ùÙƒ0’º„^òja8 ÄX•‡~ME­2ÒýØÔˆËh^7: ¤Ì©€I½ºcz¨Cq¸†ŽñGRÒ76Ã;ñÍ×[ Ézê ŠD˜a$‘‚Î’eHàšQ[V ›µG]ÍT¬@ÑËÒYygôïߧ³ò;pY PˆÖÊëË–6ÓÕÆÁ&îc»;ºûùÑá—<Í©¬*-üÒ‡f7² NŸÊá³:®Þ§åÕ¬ÿ³¹ ñ2Õ]IööN¹™O­sÕþb®àÿm&}òçÏî|þ/Í/GŸ}ö/ŽèÏË›bIl³édÛBùÝâšc4z ´.îÔ+2[»¤Ÿž$½ÃùÌ%úhÁ)aï,âi°<z-KÖjp°]N2ž{á*¬ ,Â:7ú×@±Ò÷Ó ¸ö’ü¸äÌ„déE"aèŒ-‚¬d ]3w¨|üì‰xýºn¶’<Çá²]®²yÅRæ7Ø‚£L cÖÞ¤çû±Ñ:rY1×zãÞ­¨iÂR8E³Ù|¸dGÐcà'Ç.ìO,Ô_¿ÄYk–W’šf‹–iì;G ÔdnÒ² ¸UIŽ bcØ©ókHŸ…dÔ\±ÐÓ[[SµfÕf{,Fñ—b¹<_€)è/é¬,ÓÕEÖ1Ìôo«ÙEg£ï _ñŸƒ›]ÅäxA2 …zsJ€4ÝIiPq:FÀÃpî½Ïpè¢Ã¸µt*®x@”PþßUÁX/-'(·H3éï‹5ÎË˪‡‚¶XmhPÌ¿ç%ϲ|[çpD]·,t0l åjøŽgA¶©±ÿõØzÉÅëáö›q>5!L’ä~d”QlÚ4ÖÇhE¬DýñîZÛeöÝl?xÓ%^îÓt²„ÑX¨¹ï:8Ø;qŸ¯º}zÍñÖþö-}Ûñî€S +s$+sÿ&ÂB9v ÅKš¼;JWx[ö]*· V\‚έrg(Õ±Q1õ"õÌ•×O¾3_›Ã)6 5…JÛ6h9’^<)ýØÅpN{*õfÒ=×ÍF^c ¹u¿ ø]‹•52i=R’˜|ÔÍqÒñC’+w¡'ÁÕëG!oàWˆH7QŒ1¿}!˜Ì ˵ú„Å&æ©Ûa‡ïÎH¦è&»%ä™èÎå¤ðšD#',¶‡÷ÿ|tpœ<’ZLÞj“t=2¶$šËj¹†±Ý¦ÔKþš-†£-Î5fIbì´%ÀÀ 2ÍÉ¿odM gG_Îkvú•LA7Fp.½!ÒÝÏ- –[yÕ=Äú¢®àÔsV…)!;vºHs &HJói>æŠzHp4éØâkÖ‡oH˜4•à :phígïº1¸ a©FÅÜçZRÀÍÏ ªíX‰èè¯ða`Á”èâl‰GJÜïUžz뺂m@D‡ 3$#nv*Idûîâ/ê%ÉAe¦íU%TÕ¤xàõ÷äß“tðkÞ“ß0HÄf FG˜Ù5#BÁYŸš½ÁKœÏBhí‡æ4œ=ï‡úDì}úÊ> [#ð<0kø¢ oo†þ¨-ÕnÐÑç¿ÕamZ‰j,Z(P2ÇÊÇBìX+©’i¯¨¾`îže^bm\ûê*2ù‚ˆ{Ñ+•+0T—’³e[0sŸ=0Œ‘9~Û©&ùÈÅ8äH~ö¼“ÑÇèøÊ¤Ì¥˜£ Ë{Õt¼Â07€‚?¦ŽÅ.žÒ¥miµA&aH )rúʦȪ󤉢N³îXô_+½*Èg/»5814Mân¦K7ì]çãú £ÛÐÐêÖð2`ƒ´aí™áÒ—Å\»yénâe h°pa?ؘ‡dSÍ)þhž-Iq6òóûR2\®ÌW6‰ƒ°‰  ½»Á¾ôI×'Ñ%!:•¡fðÐq97 '•E ¥U Üs\Øm+N\Ê-€ù_NWµF´3ol4X±ÑÚ<_??uKòA$£÷î'O 6$_ÞLÈ[•¾À„X ÛbÌ‘åþYX®ùÿû{³Ü½§€‚1®«+Èò‘¡^,Þ²×Kg!è›Ðn$«˜Èy18ÛÌ:Íê×á3Y‡7¹zö^ £ÆpBAíRñÔîI˜-彇ù`aæ4§b¤½B*Þ°D¬–Ù ¡ˆê×ã[ÃÏÍ«Xü`à !41šEŸG‰°›œÒþòf—p$‡“EG«Ï3¨q+‡‹¢h™¥ eÝ0—$/Ó1›°å•Bˆ>©|Šþ)ã´XæW¸¹Ó5KQ¡Ð#L'èb^2/ä B_Q|X­œ±ï/kΨgmf¹××× -°j‚i†z A®\ù7>Æ(AÂE»/‹Z?±!ûB:÷{wUs”¥tD§–¢…oNsÊá0;‡ë‡ã¿8Úóá‘g£sᆩËb¼1vyU>Ë—^ô¸µÐ÷Ö ÂÖ%t.½pÆ  Øâ©6§.& º«+^—HDD“ÂJ$¦—ºŽŠí®Êó)ÞÂhN€ t®D –8Ì—Ñ7+£ ¬&Z,À™¥÷ÈyÈ׺y¹¦›µ¯÷˜n P©±T%6×ߦ»Ù`cdh|bôù¤Dð\±ûÝ0R7<)lŠÒÂä-|“íä,}±š›fpu U–%' Ó² ”„9¼¹!W¤þ }( ×€(î%¥¸$<§¥×â0VîƒH0‡÷Þs.®’ã…—`ZEsÚ@¿À;Ââ×" ÁNU>³æˆ“tq†šRâû ³Ð¬‰  &/zÈÆõʽäþçGŸÀp/.F°K QÛ‚bïKK¥g@ë]^kõÁáŸ?7­ÂÒ6¬_[€I§Ì²)Z8µÍÙ¡c÷2‘ðö1°n6‹R$8Vó±bèÝÔ¢%a§IóN 033ƒ­D×ç³û>;V¡x fÅJ´Sænާi¼•Ú[äž»E~U„}ÆÌ h'(H­‘³`A4Q©ÄÒ1_ B[Vkż<KD!&m+¶Wäu)°6Ò2p€Ç` ¤&¦lB|-ˆDÂ$¤ð<ú2=+[fS>9¬­VqT‹'Å…Š?úóçF£qÒ£EZ^f¥ä8âðl¢è†4OÉÜ=øó}åGÏÞåK‡Møs¶(Ȇ”ˇ 1~Úl2¼µqKˆŠ!Ú»B´we©™CÇîúVà‹ª5r£À5¥A­2JÛó½R(­¯+ ü#!52‰UÉ} wSÝu;j+#µË’°dã¹ d¥ÃɉM„”°š—Ôóª¥qAãCƒ-¥aþIqµ7 Müìðîý»ผ.ÅO2/ÌÏê”Y·82‘´b6ÂWRM³sÄAEÞ¿“HǤ~¤Hýþï‚?S6¶Yn”јªŽãÕ8|ƒåð¿süo“ BJ";ú¸€BDˤ˜IÊÃ%ñViäplÑýÍ:DéÕ÷ÛÎТè o€tð0(Åœ¥~4AA‘Í÷(ÝI¢?œ.8ll<R[ö¦à]œòúf‹ø4ã£ûQ²pȘîªÁ fõ¹W9äñ•ÑÙ‹¨7‡aºt\§Y©Ö˜aÜõæ5âò^[U4[â ȉC®.s([ >Íäáøo«RR0ÈÊ€c'# ä˜cÚMi S±¬÷®ªÌ³ŽwULVÓ|Æ6E=ek†iÒìõ ÓQ¼õÕՕרµ¹”r¸_JJÌ´±9O–Ê-s*öe6™+Äj:7ôLɱ (éZ¥¸ž™ÙУÞÁŸ‹;gÛ¤ªxáÇ¡I¡ ~ªV„vh›%äI X0Ͳ¥gxÇœxÏW:`ê4¹dã‚­NæŒq–’0^7>VËÞ¥äµL-´aäW*–»†± ÿÎÃSÙ Çò—p·ñzå\Aª·ðFBú„Þú$’ŠOöä¸a/Zò1LŒ”’÷@¨g´¤~gCË)®—ïX­`æ[RÃmÇÈàŸ°"R©”½*!r·hT l‰¤„Éû0†¸•±d~Lqí`²á XY™8dÄèYV|*ª»‰šzÂD$(o±¼wÇc”áÖÔÀ¿²kŸˆ#³…4%hí¨¬×a†ä;ïú]FPŽîÏ\ '~,ÎE÷ÚZÅc¨) øPÜ–6;ÝØ cîxÇ!ëÝæ³V7iY(ŒN¬ý›+ºÏž 8Û|zÏ#Æ/àrN"Æ. ¥—®.®Ë|8­åù‡6é°wtï—w ?¶5º¼pÅHŠ‚ Êw¸ç%Ÿ–®¸ å_êù’Óä¨7Î/Œ¦ÔöVIµv—èVýÊýY­Üѯ[J iêR0ÀÓ)ñÈèÿvq¦®´…¹jQPK🟖uEÆæÅ|5$™Œ\j’ZØ+bà÷–O¤TÌ+À*µ¹›i àå”r/éØ ðtázý!A˜ö¢ákXi{ ÉŽ¥”Y7¦‹êØDƒcÐÉY†ÏõË+‚„øæ¯/wº)&ã²|ö«ÆkVì`¥­5̰"¨0%Ûÿû°ÁšH%–e̬xtïA4–ß™¾m©RÇÓR²,V Ì €I‰ÂTkÓ8¼M¸]~«í"(8ï ¹ÔÄÞ]¼øTä®ajã§âÇZ— “¤ô©¶¤JÌ): íB›x‘æ áîⳚ\‰Ñí^­nw³³¯1ÄhÌÖFndÊÄ6¨jâ‰5àÛ'[Vo^9d„¡²“um-L`sh€¾klÁ \ìÂ…Œ¿zÆq_Qâƒ_þæþa& hË8qˆ>Ÿ*ÇAPå<Ÿ¶ƒi‚,¶ IwƒüJ€øÄmwqQ¿÷ÔÜÿõïàUeM(`1Å|¶[‚ÐûÎ=B•£lƒ„Á>\- ˆö ¶Û%ŽqòÓH9üÊá·håÓGráâÒv“!íz16´tQýÐQj “>ˆ.Ò͹Q”ÃŽ‚L9_Q%!=HÂជwUÜû5©â¯QÜTç;Ÿ€ÁR0^!ñ<ÙKqÚ Mù !ªf³aY¦ô!.))d`ãDÉ÷+i ÏU‰r\4jXR¿ZS[¿z®èئ¨èqœ»R¢þÀnp±ÙûoÅ2]snYcp΃¢ÖÑêÉwt& P)q˜Õ›ê?ز=ìzóGeYYlX>B?éyNmæ|3°Êƒyˆ} â•íÇ!ü5Ž ¾Ì^¤NÒ¦Å] Tn<³ƒj‰9’‚@À¥7ì‹ô-öŸ§ #côÊ墒£ä_n·Ï‹¢SvúɳÇ’R0V,$ÄÄöÌ E£J—=Ç´yÿì‡Nì,¶—NBIbÀ $²•i‚á;MôµJZ(?A &FÝ2,.nO{ä`È<¼qíe Ó`T¨(ÖªJñ°êKØ“¸1”áÖ~”CÀäÛ\ú­.Þ¡²`•B'e×êMPʜߊÇiUÆÉ"}WôTà­ßÃÚÂÚj$v¥K‘(†qKå¸ð*8’«àà&æ÷7iÏ0 5‡z7tVkg«ÄkhN(ÕáüÖ# CdÑ ×æá{­ÕžüæÁgŸÝý,ùRTÂ]µA|í«7Ï|gsyËý}óÄ`dkÄQ:Å€]WsДÛ7¨ZäA·wöî”~trþìð&“3¯U&wªÇ0Aš’B©Žpå¬cÂe~ HZM?Ÿ$aW‚ ƒ Ço *š9)&¼ÌD®µ6¾<²¥žjQOùŽòU/þ‹Œ.lÃV3?ÿš<¡€Ñ~E•iQŠ”ýÎfO!ÕyÔ&å>BfËbæ|шßr`lˆ§®ŒÎÅŠ.Asø{zNÞ§ç˜Ûn‹ë¼ô2åx0è‘G{A‚Þ1)ýê#w#þ6/Nºôü(|!k¹ûuŒÕ9¦Ñë@l0vû¡ZOHz9…×_íír˜ê*÷•ÝÞÜš{C‚#˜ÉÕ-jêG—EQË‚™[Íßó7/Õìj«(Âá\~»TþO°¹©|Ùæ±ÄÝØC.ÐæD:h9/¨þ à€Û‚cü\€Nb'c£¨¢g‘Ý )æýPN+Ã3Ù†­7¬W9žªÖIÞ’36‹÷…é_©9¤S'¼X ,€L‚”‹U¬®¦ÿG‘•äó vBJð°Di i=<VÖ^Ê0s· ë¹£Å¡Kû#N© ¶Á&¼âÔŸ;F}p´oQú=éð©¡!hßÀÏ7¬$ÖRïn[ æŸÕá xµ í{AÆ$L¨ä)—¡÷¼K¯aÎŧ8WÓ1{ã0³³eP´ª¤H;°ÛŸŸæ—A®4üi£€ÅZå5L íù‡Oàæ‚iô·Aç3wÝjØâ“êNˆÖéñ[í‡ñkwü³p“«º*(Ö]'ÞüsD°•€¼Dˆ²¯ ýQÿ»øÔªOׯïº"|@fKê­‡ŠÃ8†Ÿu Í6²XŠè^°¥ëTà|¿`'»Ôª®[¹ÛWŽötoáõàþŸŽéäí°N¤Pg°isœD\ÆÜZoœÀç².¹„“DŽ`"]à.– öDy®t€›×e_äjNh¥@BPì üÄl}®·ëÃ÷·/©*Af,”*¹Zª°ê8ÁnùÆSYN#Cbú–*C|ôEΖå}ü†]~€•3û­ jnhÇñ„v9lD­ Õ”Æûä|9wßtHa>Ÿ¬2ó  ÀJµk¡Î$P4µÅsd ½ØÖê«7ÐÊßÛ¾òÊhðÉ'Ù;ÈkDÿä“Oô4§8¨ór ë–£ͦ—øO‘ïÃRˆÍn©á$»Ažvœ3¸’)6R}rÞCY;ß^Ú¡ˆö˜2†1ZlØÞe×ÜlðQ§¶FbZXC¤6C òŒ2Ø@C¹8Lðh±&‰™í%V‚ZM¶+fØ$ËTÅõ²©ÜG ÀIœ”mÏ.˜šºH=f3Jc˜¯^õ‚ºR3ž|õ@÷qãn4Ô÷ÁŒ/  ×f¯ÙMšý&®Q3iv­XgXpGú)N êX¤ 0ù"üU:ÉIUöK^ ¨ñõ†Ÿ1a—¹u4A:X ŠÚ´¿C‚ŽTÃF™\8<&¸¬¹Â·lܵÃ" rÛ|¾ ßMêe˜XŒœúâ!|fbÞ ªwV—J„H®lK ³j¥÷ÖKξË8êKm*·¼Oö(©×âËÕF.ï*cé| Æ„¸ƒ$eÒWžb“ªëÍL»ðC…ãKhÑ…z–³É:&\ùDÚšL>,—} Í,ƒÑÏ [¥RQÛèF ~p$WÜc'–W¨b–Ák¸Èdmk&+??D‘Íz¯Ð´‹ag*ÊÉ€«”bF½dJh|adrÝ©cÃGjØÁ,h1‹>IÞP¹˜eÕR ªÝ‘:Fâ7«Fn‹M^=[š ù`z¥’<+ØÞà76<æv‹æ…Ò+¼ŠâÂ_ KºŽ Óò”7a¾N~Qõ¹Gœ2b>ñìå)`(Ó6ÿy:yNÓâǼW_sóyðeq¾¼›â#5Ðcμ6AYê½^|‚UªÀfeþ`uù{4müéðO¦Þaÿ~ÿH ƒ|À>Ì¿Ïàçág÷ñçÁá½{ø“ÿýéððÞý{÷ï~vï³û2_ÞûSrÿ×X€Ì_$ÉŸ8{ãs†ÿô/÷¯vÿ­–ý[ìÿƒƒ{w?îÿïcÿ –Þ\ïµÿê÷ÿîáýûfÿÜ;¼ô™ùþð³Ïÿ”|Üÿ_üß­ú¯aþ/ù¦˜¯ Ô±=ê$`!§ûù›bâ-´ø 䊠êÓ-PK’ð^™¼ÈÌj_´ R’¼\À9æ×a°ÄÄÞbmÖðID蚆þÊÙ Xäôž?é ÆÈȌ׾øü‰äœ¥#t_ÎÖ”F¦Õí«ïN_&/Ÿ}ûêLJ/'æ÷ç/žýõôÑãGIóáKów3yøô‘ùßÓÏ'O’ÇÿõüÅã—/“g/’ÓïŸ?9}üÈ4cÞ~ñðé«ÓÇ/hçÑéËož<<ýþñ£nrúô›'?<:}ú—nòõ¯’§Ï^%ON¿?}eºxõ¬kð8Þγo“W§¯ž<î&ß?~ñÍwæÓ‡_Ÿ>9}õßÝäá_ž>}ùÊ4ýí Óðãï?}ÕÅ~{ú꩜ié[3¾‡Éó‡/^~óÓ‡/’ç?¼xþìåcÜ‹K-—“,/ͺN䯢”߸g£1ÎÎ¥>Tøìj+Åd’Ec†AßÔXŸ'3¹¥F’!®ÊùаóŸAÊÛr¼×Óaº×¶ävÐk À”¬/€a˜T9‡@‚Dk]ÅÜ– BÀ,@>B Zè3z”#9/rTŒÆÒhHJ9ØŒŠ„-ðMߎá#zêIÅ7§„Læì×ý¾jeœœÈKí} ýãKX•[M -îõ‚Š"÷óÒ´Ÿ´ÇdÒ;¤<Å Géu}^¦si’òLݤe>ou\s³ÒTh¾¹nuú¸NI»u¹º\µÂçླ+é “Vœ:¸wpÀ¯ò¶º¼RAääLì|y욊«~g\¤Ü­ovðÙÖE:ð( Úlúzà¡ Aa]4Ét§¯ãì7nBô-ܧΜ ïñ¹ Qø4Ìï˜1¢yë$ì¸Cç NÚ „€«6÷ÌQ®| ûŠÑ+óó%=bïyÒ Àn=4Ú™ `¨ èmy þc†nÞøÓÇ<ù/(Âô¡å¿£Ãûwï&òÿƒF4Ÿ}”ÿþpò_\ü»ÿûÿ?Š¿ñÏÜ…’*¤-7äk&gû%J„ä*ËüªøÓ”iߤž:K¯5²sätÐt˜—-0iN>Ö¬¡"àú¶Â©! ¸fË>0òçç3¬4%^LŠ¡üÎp¼UAwþöb`úD„ûp=4‚”|²°¿)÷$b€_e-B«¯9¤o>*æËȼ\„ñŽKäæÄ±WÝ~²¶¿ŠL*³ÝÜ~¯®ûQ>§ßA1_@5ú‹l Õ"Û,<éB$à×V7™¤ÓᘜUÇ É9$@ÑZ%`™Ed0¨,ù‹Òn?–ø öK/vHB<#\„Áj1!Q5›]å sâÍèÚØ¼ÅŒŽ¾Ñ3o´(4¶US?‚ ]ðC¦G_>¢€>žàgíV]úÀ¦§ r}¦vëuÚûù ÷ù§ýÞÙ§¦{#:6òr`t›»G ÔŠ5«ÀyØZøE ù'ÿ™>âê•à‹m·þ–^¥fæYË 3ó<­‹GÀý5¥a\fxŽàÌÈÞfë““–kæõõ‹gÿùøéàÑÃ—ß ^ C{jX”m¿eÄ¡´(³HÒò-ŵ¤µá˜H•”F@íÙh_©8ÞRh1ÄÐšŽœ ¢š’ä»â|²]i…Àì‹2›Eà..1„¸(Þ‚Öè—ž67Mï%ÔVΤ© ‰ô˜0“N R°ÓýÏŽŒ”Dx¢3A!âvš×ÒaX3pWŸ…<ú‹A4eAéYz±ÌLI?ižrL”›•@Ñì®×_iÌæêJ ´”n ¦û7oz½YÑóÞz#e„;o6MÁ£ìc#¨­ÐÓ´²´;†écµ›ÊJ³Ÿn">Éž¢“Ý+bØ6 pȤá§P0ëD6˜Ó̹5É~Â}]™ƒ‰ •pÇ`pàׇù,]¬;ýiNöÈÛbÆgÌЀá/‘ñ;znP”¨^Á‘Þñ|.³ež•íÎë–á]ðpë¬ÓÇJF ¯¹#8˜#ÓŽhyóX`¼op°‰·õÅ}Û0òÄ)'7Õ×PØ($– b;†Õì lÖxÙþqc·’·Y6çðƒéb|ôZCã”¶?í7¤×9aÅœ[5YÛ—™á@­5&ˆž$¯ýå?"зÞJþ3ä?NªÏè9ñ¥·©ë~:‡_ÿ5Òøæ(ˆM Æiy9xÙvDîLl¨(÷þ|ª/ÖŽ[µ@i&‚eÓ‹ â ˜äš …\‡Ž5µaq·¼”U1¡IJ.ÿ ±/‘R‚b&U)ïÃiÙý}K"¹º\5hÇIÕØmÎ ØŸ*œ[s‰zCʉu Ô_¾£XÞ.@B#£%†B&,Íí¡1 $YŒX‡\*ælÄ·°42 G‹¹säÀÅRˆ§KK[ÞîÅÈŒòÅh5¥‚Ý-ɱHý~Ÿá³Ì••©Zåè§ ?Ä ¢‚ ûÏÁn×¶©×nIÌ•Ù{9j©¼¡Ör±>þiÖRŸ(¨7ÖˆwÒãÕVhè‡Á›“2«}öÀ|qæ†G³<Ñs;}þXf~ÞAùz5nF1%‚Í6¶Mmj±Âμ-Ò&•²&1Ïi={/¨-‹fo•I>$V æQŠâÂÀ1ÛFß³7ÞÆzR®êbr H †¿R+.;Œæ9‘ }uŒäT̄ڥS/9˜œÂÓùa§ ™iRè \TŠfíŽ`¬E:­]Ɇõ[‚äI* Ëža5„˜ÿ®ƒÒÝ·øŒÚI!™Q?ÑIê+£>hdl#à`báI«´‰[tç÷^VF! ‘YAˆ/âë-Ád„LGÙq;ØìËMfÜ톫ÙruçQ6ÌS# ÁÚ¹$ìVBkד€SL§<ª)0FÚ]Óá`Ô¸Àb1“½iOóÙª¤FWJñÜn$s¶£þƒ®eN×™JDºå‘즽Ζÿ»Ãpû°LY®ç„º\Ú﹄„›äÖ›74A³ÐF.ÌõÓAm Iٻŵ;Qb „Yz@‘.!LªÛÅAq‰écá„Ù2Ñü«,È+ ¥xdXF#õ›A•$-o Ø:€a@4lb—˜ê .M ˜±™Õ,Páp%ãìÔ>ÉËOjŽ›‰7Ís˜qÉaµò€Ø‰q’ç”Cßœ¤0â Aˆª×“ì†[dT1×¢ºšYqf™ŸþJ{}ëaˆì¯ý‰ÒÍ)ýäȽ(•óc4¯•ºRÎôW}‚dhãK±/^{iÄÍÞ"˜£k$¬/šþ3t1McÑ~]‘¿æ¢;Ç?9ifƒæg晕)0.¿¤;·mv±aÉÁîŒ;”8ÍgZ²Bû¹ò `µl{Q6×gnæóÿ~õݳ§Ï¾úNxÂiçWƒÇKµ£ðR\l €€Ø]W. ÓHWÇ{"ËÀÎà¼uf`X]a »];©Â»‹'ÞŸóvËÍ¢Õ…t¾¬ã=CÑþm¦ûÕÎãFI…‰¥»ç½ß…^OÌÿ:à Ä#ìÜ—¾°O‚¡ïÜvì +vC"+äe£´ãš*~¶Xs:¬1›.„ ¹ç¶‘&o'mX·ã† fÎÃaÒI(¤¸4#«ùã2Œ•œ¥íV¿¥v¤³¿. g ÀçèˆéÄ™Ÿ!±±Õ{†¾µ n³b× MáKD&OŸýðòñ‹—§¯Ÿ´Þñbí;¯çî ûíÁAæ¤`îùL umú¬«šÓòÇÔ TcUž«0²ªllöÔ±¦/HBm##2¸ÿ™­ƒcKä:€œéa«ºŸäS£AõþêªyŒÜ€Ê*<ËûW^f“ÉÉ«Å*ÛöàrœÏ*¤ºõ}o}) þ—¯=ûáÕ–×F#ÎÇå ò6¹;Û,wó>Τ›à/,àéc#êV 37l¯íŽP² ©¼4_WW½Î¿õ˹Q”ԳʂŽ&z]$ù·"ƒ0r)ºÚ­öOã×ý³ŸÆ¶ùÑÿÄüò¿ÿWËvØé_,ŠÕ¼­râ£Ôç«|ÜøöôÉãÁËo¾{üýãÀÝÁµ[€CÐ?µÎ>ߘâô]a”æçÄ-NÁKÓ®ózõõSî´ÿ¢ 浩ڄ e‘˜ËVdbK}‰2â–#¿Èdxp céWB€À™S¼mSùcóG79O—éää[À·W"°a¦j)ÚæÉNx+ãÂÁI s§I÷«#¡1˜%H_ìûÁOÛú2³Î³.Ö îGW¯ÀI»õI«Û©&¸HOPª;fÕâ” b Âç]N#‚:È(Q¢µŒ%Hª Þ[äajEk†–Ú¯Ï:X;ßG,Ïs€3È}x4K)RêkާÓx¬J%s‡¨Ð!ß—âLã’ò ¦µ®”$à·ˆ@®Óu)¨ú™Ô½€ÌOK@0Ö‚À˜¿ÍÖ »Å—Ÿ„· tiJ†¥ó†¢[Ó´Ó‘ ›ëL“HË^¹FÈIì=ÞRÞ&E±Ê‘Zq®z£©9«Õ–º Ñ¢§.—\@Iì­DŠ#;©»*ís;·^Á<úF·¸o½…%‰‹WôµYMàk´LÕu¯Ù‚×jªÝÀ/Å9„÷‡ªAˆUM!-슷N©öMa#WùÙÿú¯ÿ"M±N k ¨ŽHbîbà‘¹£š½†R‚¹½øÍÿ:Jh5o—MRÌ· 4°ÒJÈ[ ,Ûþ ©\ÅWMîpØÀ&áE€ä3t ¹ ¡{àÅ¡aô6Aaªh¯ügkB2­´¹¾+@ÎZvq|¬VP…v’^X@QÆApmÜ¢CÁG3@‹Y³`gÅLëËë+–]Î)”š¸¼k©ib‘Mسhäß…­NŽÜ #z„¨¡€Štº Æ‹€s–Þšš—P9¤”­u ±y,#ý™ ðCþÜ"̪=»‘m)-¢?Œót¬ €4´ºæAxµm\Ó9¿9—HyÇžXر.mU¿êÄ€¹Ò Eƒ7Ì+:˜:?1Ó ;Wl¹ÌAŸ¬é 7Pz–ùPæ«N£ç«îJáÆ™%e¤Ü¶8ª4å;º¶„`›À¸ò%–JƺJlÂûÜÄÓ%µ¼Èz—|Þ°SÆfƒƒ>sL ’}×VlÙ61L õÌŠ(HÆÃÏó K8’PAq|®2á妶¨) ¿˜ÅÛÒÁ‚þaÚúpÎ.'Y|¡– ýâ(4›-¯¡7”÷ú’ ·ª`úo?-–âxü<¹û磮c²××ךǎ³+sqÏKøOïeP©Ü¯"M=A·áÊ(IÆ1F,èÝ58P ò$ !cýáÌúõ€ •§œ–˦#ÊA[Mo?8ÐðbEOZZI p ð³Øgx¡ui(BÕžPÌ@n%"<ÅùëIÒjµ~R!ºÀü~jÌ%ªÓìÕëã³/ÜfŠæ/õÄH™O^k:÷Ë[òy½í&Wbz–¯MÓ²Ýù©Ta–£x¯º‰ã9›µ¯úòaçää>6¯Ñlb¦î¥×g!c=ûÂL9X²[rÜ(«j8´¯pp6]šÏ¾€ÿ´;-m+fE„3Ã<ãí?é#©7o•º§âj·£‚دѶ"¶z(©Úe º²µà€Sbà cIÝô =˜Ê»êAé'À´0@ˆK¿Wöñ ØÁü¤Òºi\¶4øðY²å4œÂÈ;CF¤ª½Ô@K4òD¾ÃÙ´;‘¯P=®{Ï)ô'¾=ò¬^ºö¬!*ò²¬³VËÔ½Ûκgw°˜%It 7¶X¿›µË¡ˆ³îe@¨¡ì3Z}T=WúFÍË< þjhK'=[=7¾Å*¥KéÒ;Ç•A’¿Éù&ÛÍȱär«P¨Ž ³¢¹ÙêæÜD:O°™f§Q!l8,F…¨a‹;ù4:oa¢!j­sK^ÃEbë®<®´¥‹€ƒé?ƒý—ÞÝÏrÙ6O¾#V2®‘]­S¥ˆ€É"„sô–l©¡ãêv@…³l±lt£-wÁ°µ-ÓYöÜ÷*ç$ uOU‚mÁöWÀ ›ôÂcè[¶%`þúφfwè–‚Ÿ° "J¡»Žß[ÆPwÍD?÷'ºùöÙ:÷m—YÈ{òÉUVv°Â¦ÚŽOÅ×ÃÃIÜgÿ!—p§Êò0$ÈŸhÚḾJN‡—©Ò{Ù¯´"É\}ø¥ÍÑéÔ ~«ˆq“mc™£újD¬|æbU¬¹zÏFDÒþtCãõMYƒzü/”ˆ‚Lð¹p 6F‡~[ —êÂS4UûïT‚üu»•|Ÿ®‡èŠÄz™5A¶‡óÖ]gA;˜Ü‹$ƒ^=g ÉÅð`1?Iˆš ± ‚%\±cqywÎø[]Ñ6w‰EpóšaÑ}²eh\­†4¥ DºVÃæÊÂzlü«QoûPJ•ðÆÈQZ¶7%„túbÜE £`ŸñubZä€×4ÖªntœIÍ;âë\£­\ÊJaúgµyw™ÃÃàò g]®¾DÁ]V¯.Z¯u&צý’~ O>ÆjúiD)lÌMmßK¹Û¿-¾ÌNL`‘C_ã§ï&잯ˆæ; έm§a[ïg¥­Ê¯ÔN³¦85«`ƒæIƒ×ÂûÌÖ!?”bß Ù=¾M<™#gQ¦³òŠ­áe;IgPK¤9+ºM©ä)…µGÓT9[}*´ô»ÛbE |ŽQ*ÖÓjß.;GW/ýÿµÈ¥Ntïö;+!P¼KêÆAtÂCï²”U*ìì/èYrQ¡C<óNèÐÆ|=Zå`(œ³¶‚”é2/!Béo‘ý½ËàÂ:ÄÅÊhèœL‚rxMPž÷Òf.èhB¾E]»ˆ,°ºž±ÜF¾kM"®9ââ¯1¯°1&-Ñé=ü˜SjU$Sºó }•²ßˆµÇ~º‘ó? (K˜¢iЧN’ `3S‡J0e¶¸Ãk™AÃ5æmñ¨‚L·’¾™0Ý~ u¶)]®›jØü¥óœ£r¸VméxQûË×öÁiî¢t¾[V™cìZ> `ˆð×¶¯ÊRíä5üBбDëÌ]>¯ •ôç‹0R0cø y^ôXÊËæÒ§kY?Cál´tÅp æfÅ"Ô;·ç4¡}Çjhä§ÆC3Ìl¤II-3i^†T ÀX?¸.4E– è†7&ecL²? e›GÿúøÉ³çƒG§/_Àåû‘NT‰¥wJ§éKˆ;‘Pâ#ZÄÑ5ó ¹˜ˆÝÀ)ÞšÓ®¿¤pÆñ…Ô3´œåS[qp–ÙªïÜuT°¥dvZFÛ<ð8ÝzEàŽ Å¶º¸ÈÕ³!n9ÇS‹RÇK ¡ì)Ua,¨Ì;²Tîƒ#ÝyÛ0’AÒ1YfS†_ól‰×ìßûø»>“àùÁ;@<‡t?À߯ÎÌÿcÔñÉIk—ÇÛ󨺃Y³H0¤p¡¬´¥ ›þ`ÏëÙ2~S3"H‘ÀÖ¬LO‡ rŸ­G§SóòYxRÏ#ئ>.ˆ°ÉF‹¢ª‰š+…Ée=¹0-jºÑˆÚÞI %ßEç¦ šÂ§0íEƒ‡ðº/¨@s¨3C¤¥ÊGvÕ*iÓ|FG¯(]K3½þ>{Œup èNìc¦õ¯Y¢Ø!7|9ÛÂX*ǹ–‹Ò–…Âzüõ9Þ¢JÍm½¿˜£@¦ÐO3 ­º]!c[ np| 4“&[¢ß¡›®‰QðAõü@±å"eß H·˜FWAdœPÃÛï5»ž[«¬0xT91ý|²*/ÛÏ⣭ȼZÎW¸»ß\óå=ˆÛ/««â—vŒëÌ Ò¡e¯ö ²²þuê¥ׯž¡Y;Ìi›Ôx#Í÷Ï?><}ÕP†K7ÛÆum13BiÄôÜ×¾R½Í“W]JõΪÝÞàŽ¨ŠˆWAW–è]Çõuñ·‚)˜_8«»µÇ:‹cŽ`ÌÛÃ×Ê,F<$ÕÓ+ùiF[HâAtÍ'EñÖ¦ÑÚ38rÎÌZJ£€PÓVåS2\ûe‹›ÝZ‹wu5jÌÆ.t0Š Ñn~#ÙSÂïÌ„›Éíš^Ô¥þUrX»ðjÓT¥ÕinŽDlråF l8“nÈšŽ·6‡=nfÓdSlvNŒyšÎÛ¦O0ÊNg60V¶•˜æ0ö„$wªHNqaûæ+‰Óÿ†V“Ì÷¦Þ²ïÔÔŽ=2hˆK»”£sì7·»äi;Æø2J§¬`Å€û¯µx‚w±Ïº… GP†ºr# e·ÆYÂa#„4ëe0à- Ó2ƒ]m­Jщn[Am×[#¼²3›Ÿ®Ôˆ‘Ç£O×›øzf8¨º6ð‰`ÆÝDž®^¢ã³ýn]|‰š;‹oø /`VÖM-¢ ±HöÜ8îõ£‡”ç2~‚àoO@R ›;mP˜ëƒ 8©.ÿ™P)*ˆ|ÿy)&€:°ÖU…ô ÑúÎê*èÔ™JŸhƸ—¸%TÙ´µü[ =Ð6lÒ$tAõÏX½Ò@)¡Þµví7 ÐFsH{ÏÐlC#~éÐ$Ü×Ú‚¾x„Gl¿ÿ0‰Í£¤8r™lÉvçÿËg?¼øæ1úþwØHÎôY§(&²öûþI>e°“ô9LŠý%ª¾«©°7úü§ÿÑ…[ï½…åúÐs¨ÝOÇž Ô€à“+?•iKü|t•ˆÔ^oyýŠ:9ŽZ±es΢ßJ?WU£ãdSãöúö57Ö ‡wôæbž&»íqûËÚn7d8WEO^êêpÄW`Kâˆ4hØ*‰^š1ÕUU4¾j òÒö÷|À|{\çí¨aÿuïP·Ï3µt¦ü.˜­»I·åùn0rÇ]— 62LÛ0ëèlt:°·‚'aõ¥i¿[œ`çfóÑáèí7›ˆÚp*øü†6Íù–WDëªÈÇ¥@Õú¨˜çÝé÷׈PŒƒ†TÏÏ©Œ Éðî‘7Ä`Í"ëîieHA¥Ì›³bj[rq–µå©™Ñ‚¶¡\PžÈ~¸xÎë =2sÖ .É‚³ÿÂ…p<§Æ)‚§L¼cV× ž2s]%/m#9G¥|<—\èŠ oYÛIÃ×QB·Dí¢ŸãÃbvWvŒQF ÌdúÄ®X {ÝJ^`a]9V醎3ÿ}O €Ô&´ƒý¬týc†ÅŒ”ƒŠØz69¼»ñM°‡ÁûHùýŠç¡äâ„×F@޽­Ì’§óú¨ñ¶„º[a·¡›Ð Ò2bæ¸îÌÇ·dáîD;»ØkõF+ƒÅŒÇ;p·Ë>˜uqÌõÛ()?þË_PLŽÚÈÁUÄ3eÕ‹áz¡p;¡Í¨ÍªÆ Î>Ü»žü·êíF¨û^K]«áw¦0gTŠ¡ÌÛ†÷É‘%ÀÖèýÿÞÅŠžù²¿ÍôÏ¡¡%¸y·ÛÂj[ÞÑ£ìHϦiG³cw³ÄÁ¿i¶LÇé2­ZÐ..¾çïÚ[¢¶>cßþ–…;»µ‘Î>ÓÞ>&™Y\ÿòG»enˆži¢~ÀÖöAmPvu¶WMg{tÔÙL$jö[È#^賿šY`ÀŸµw²sïHã»Qš:D(,ÖtP£Œ‹Á|=jo|êV"Â\ÞΖ9ƒ0ý••„[c¯Y9ìf‚G„¼bÀp çøÆÈtd«” 7Õv.Šblz.V—ýZ{Q¹¿ñz»û=Ø[£Ž¶o;_P¿L9Ó6š…èaˆéždKÏ [·#PáÝ6ùòxãRÒ «ÁM ýÚf¸ªð§ÚÅ ¶Œ(ý,ÑWØc~%qýÇÉŽà5›<AÌÌìª_ŽÒYûµ›òY§Q+‰ÁóFG©Œ‰¶NànþÅðH‰º¡§»J@ž]ílÿÔ#Ÿ‡˜5PN/ÂA™ÕI¾!ÖOô…zÀœ]Â$ϱüìì­ÌKÉÀˆ \7ü7,4FX9Øÿ0°õŒÍŠõæÈ­‡c 7Bc 6u{Aœ3‹ÈPnùÑ”Øl{!’§^ ñÙ3S>©€ötõènÜ\:IßBз’o.³Ñ[”Aö Fo„nVvÏ; ÓÂLˆK—/£ÐWíx¦Ý®¹zd£6˜3µãDåq5v‹Ñ®¼íg}uv9[>¹>ÇeƒPC:@¢í’Ÿ ©c ¢®÷ ã?Ùû€šge6ˆ¿÷#R§ŽVF©)…€gï$K6PaŒ°'-Šm s Þ¦óÂEøœ‡…å†Le;žŽ±ð#}Ì[^eánâU ¯?–»Þ!êÈecEÒÑD9+<—Ÿïþ¼‘ö¯Þö¨›z;Å#Nüôbi¶Dæ4Q<ŽÁÇRd_>ÞÐŽª¼‰‹£•“ ¡6u©©n­˜gÎÖr‘ÖY,ö¯E½]'n[Rùçí! 1VO#·¶ÞŠ•7,åþ3½›Ó(~'Ïo×¢b>öh^ÜÙàRÈne°LˆaµjiB…Þ‡­K¬a¤Fï‚Fêüî1Ô©Ê='-³p£ÚÚ·Úøj~’Öl¥BXW3˜®-º%ƬémêíȽ¶éÐß©1!×–k~Úbb®áJÑ>»ÔˆDÒ½¾]Â¥Kñ©µ›²U>ju9ÞS·€Nž²³õe™[õšÒî{ª‰,€·åª¶KÄS< ©å….ö±RÔ|Á™è¯{‡:,Ú&³ˆdç¼8O|ׇmÊ9®òL ÂJ¾ÛÉŠßñÖÊ.RIå}¦¹,0á£^xÀkÎ<‚\ VÉhkžãF-}œ?^W?Ò;/ #Ѐ’Õ›ßU=ø‚µ¦Ÿ”õnòm¬¹jªði?Çuƒ¨Le¥)í’{öŸ]•àKQlf«³q°Íˆëë‚âQmZš†çs•òنɃ¯ Û]ÍŒ¤æ­µÎÀ¶L¨ë²Æ¹‡•à©4U.ÆTîB‘U³Ë}îƒòÉ r‘Q| å€6%ÔK5§jL ÆŸ¥kŒúòh?€“ëÆ,ÿh‰8…³baäuC Z—ÏgƒóôÊ|Vœ‡z³šÀ5¨ˆTvúÑ#h¦ =g˜¿óÒÕü6\gôֽ⊎^8óЧ¶´µ}Äk?ŒÅ ßžUó„αXï+’C£2QέSX™R½Zßà¤÷ , !yeׄ—²šAí4ªu'[Ç0 …Å9\hfÊ6$´o˸¹†<+¬½ãÎâ5–¹©ç·’ç ÞoIÜÛ -Œ——=Ì® £RÿI½­Püƒj‚"¼ ü]ö3¦|œ"q&£õh’ämãÕl !z€U 3”êÎg{ˆíyNÃÒVß–tÛÙq !·:ÌÓ° Öx Hö?Âë×ùxÍ ×3„úáììËú’#3ÍÝ@àxÕ¦YµeZr£Ò§>½±pöM1;7d΄(7˜‚ Yܧ åhk{ ¦$°k@iM0)›;?TDé&§í©X§€— ÅÐOìàëjC ¿)–ypEEz†qnh$œº7³SgIد†~6´‘^˜cÝßä¦SbØfg˜/xŠÛŠ9ÆØ¨ ­n øÐ~N·¨Ývßn¦•âf(ªÀ+%Ãz¡ê6ÕdwùkgYlo¹,v&žc’ÏŠyI•H›ÀbšÃe^F|îK)þ섎~c7ޱƒÁ#|…¯u%ä5ÞeÅ ¡¾X&ëm¯Iòº–+ xëY,0AÌ^[˜Ó6kîã‹ wâmFÕŒuÅkeÏDAÍC:ÀOffZ1ÜllÚQoÚ?î­›vb‘oñ°6žÞkoU½Xù Ñl›#Ùö2M»`(æïœ[ A¬ZNnG,"5 :&ƒ zŽŒ V9ĥؾ¶)tmŸ°µŠ˜ñþájD…uƒÀoeTÖÙ1úc>Éÿì˜wÉFôâËì›]64Rq¦3}ZBÚ¿}g lì“b؇ÿ´7å%ÆøIËeŒF> èp›ôgKÌ‘í±ì?’¶8â§ {R­Hà©·ËÍæÉÛÛ"÷ûGi j#N¿&[ýC®Á÷’·®'_þV³g óh5^œEâ•ð»Áèü" ½"ª&Ó›ù¾Uëo ˜m±æCOsÙ$xÐts]ã÷8‡˜u°Tlâ0RÍ|TÌ—ýlŒP‚¸]©cA±7ƒ#Ku£ÈZÆZÓbé¹øÓ¸aÔÏN?µ—xµ—³ÝB…ü°Å 7çïa‹.éJ ·‹­í9c„Ҙ': Õ"úû>œš¢‘uUµ¦¦#–7,¨pÒ;¬NDx€W`ï7 Ô;ôd¨¸e>Q94>@Pm?ºæD8þj<bP7®kMÆ“¹m£µ÷[èDÇ«³&wi¥Øc ª‰m]4åzÇ­«Bµ×šÆËmø–*;MaKûNf׺Yç‹ Ûi2µ¡w{Ì ¦ÞÝŽ»úì¾á›7ZùH1Ï]îì';Ž7¨·×0½¢r»œNöá’÷ïâjE>¨üY[è³¾Æç¶’ž®šgÅmÜõÊynªÞY_¸skÎ %:oP‘³RŒ“7Xðz›ÛžÿÚûáÒ?ukHÀz²6Ý0ÀÃ4[ !¢•„â+ýÔ½´m‰¶|ßmDWNÿáѬ]Ç~@´Šv DGÙÐâD le^½iÛˆ{§òµŠŠ7Ðìb|_Rü0´F÷hÌÑ–¿ï¿7jª!MNíi0™ Óö'ÚÂë‡(~ f‚Ÿ{Ò§ŸI‡’æ&€ _E¨ƒ@‰Œ4áðš8~™£Íæk˜x»\XŠ.Wçç¹ÙÀÕl\p#!.|e¾Óépœ&Ç4iz’­+ºú“é%a‚¦/?” ÁÎL[%˜ÑÛÔ§R˜-¯U­=ã0wªÓGuÞk§M av"h`2Oü¿WX½<] Œ ~â– \Æü¢‡ …P¤mal')ô±«Ò)Ø{\’b¯vãa"s¦`ú66ßÀ¢¼><ÛåÚ<‰佫VÍ86á„{¿fÉ ÇÔAÇ¢ ïíÃ…Ô]óå[Á·VpÖ5¶ÍÕ¸:¢Q¹u‹MÝV(ÔQüéöNI¸ø¨i¡oÛ b@7"‰µ,._‹Å1z±büÅbÃíǯÏÍŒG $0xˆE¥Ùˆ²£gG‚úÕaßúé¬pÔi²¾tÝöË‹u¹…’t±žøµu·JgãÖü׸•¼$gáE6ËìpXÍ0¶¤}f‡Z›Wùï¾Rëoº“;ù-µÕ„õŽŠË‰8] 5¯0Îä¤Å¬0Ÿ-³…™±ù¯z×¢‘j’<¼È#ø*ãV1]†¾a¢h6›¡‰eÖ1h®ô;®ûêÓ¡x™¦§¹á •©Œd•PHr c ‚søÎsä7$‡Â#PÅ*r[¨K®ÜmVºž›Bp§‘5l7áX:pbs-ŠtÄfv åfÀÀ*ô3ûc×½#Ò¼¿žÆUR?ÍZŸúŸ~Ú’ZOL3à¾yÃ+þæ=êT:>…táÆ-¡jd;’H´ÍT;R¾ “Uû¦­´Ä‘ð6‘–Þ‹ü‘/ùÑȉÐ3ŸšÛ g2ªåFÄ\×Ç•€-󩲨8ó‰1®†¿5_‚áá·£•Ñ{¦ùÏ™þ¶žŸÑXU~É_ÔŒg_6HÑt)€…AåTâeÌ1w A&Ú$/%¿Êú[1„®£®.•j ! ±Ài‚ù % )kJ½N×D‹x[š‡Ì¶[h_Éb¡‘ùˆÇ]»…xÖ.²ÏdtY%—• Ê ø7z½Ežo;|Î ­•rAmÏ@DbD) ÈVJSR¾î×/žýç㧃G_~7xi¤ÄOOŸþ¥·XbÚaìæÖûÙq\EÙŽ›Ç&»Žã{úr`»Œcƒ¬ˆ©KCïÞô öf»üV:]:­ë«v £k— ÃuéÔŸX¸b¢_Ô«äz¼'UVWÏ¢ŸÚûC×bO8ã*Šòãú”„‡Fôþ»‰´ðZÏñl¹!:®à†¨ÐùpRŒÞâ>¢§¤³Y®Ø01õ|;pì©“T%lŸ £T½þ·ÑŽÙ‹V«AhlFjHÆ*cB‚™Œ†œ`ù7-WJ¶›ßc1ªÌæ$I½&¹Å½ÿZ¿{vÖi课þáôÉ£g?¼<{qú—Ó§Ÿ ôÃÔ|Ý8Œ>Ûò[ ÞÅÀž(Þ·#ÓHáÈl™ížãPð`¾nÜnÇÏF «g„™–ùÄ|¡ þËzí©o¼‚lI*õ{™.æ>fõ= W•ù»FÔ1bTÃbÂnït¢.|,BÀb² tCºtP„ŽRsæS¬õ0¦'K?Ð.6D¥©ñœÙWl)©Pýà^¡æFÙšl<Ï€7¼¯XÇwQ'ê^VìAƒ3Ä,¿„Ÿœ‰/õ×a:¦ ;ÀRÅ9¸€Tn¥­šÆ {qÌÍ“üÎЗÿ"`ê}L¥ˆgjj u ™K©ârE±Þz)¥‡M#³{ÀùT;ä„ôR  ÌNÅ¢„lßLÇcRT„ÿè6lü.. )ÃZ§* OH$ãwL)5o³µïÖ䈞C0žHDØ|lV‘’–iár6Ò«ÿ…$Y'Wéd•éU‘N5w o¹t´\áMõŒêUßÒ„GÄpõàÞuƒNp­Igã¤Ø¢XòLÊçU6Úð=¸3Š™zÏoO^¿æ&eŠDœ—hÊ0Š!ŸLC¯J ˜îWëyFÑÛ­Çïæ†¢`Eo"în«òa_é€ô[µ’&¶6 Öܧ ÀIZe€„¢'ÎÉeönË÷©äÂIj«:y‚S-Ò'Žn¿`ü4#`š\< RЉe’1`#µ RHÁ5…`|•Vb¾ðpAEŽ×TfÛ…{¾?-Æ+ÓňVŸMøúµt¹\”[ ƒTÍﱡÚÅ–Eî&zT öE¥ÝË}(¶ÏÀ?ˆ’Ku-¡%ÚäºÜåJ¦vu†ÒüØÅÄUúãøu[bÚªµiue@ÁÅÝøê®éK[_Ïî×Z©™¢lv˜ûfŸLÔjƒL³%¹¹N·4aÖå (›=-^ˆœÆ‚aŸhQ¶÷kJhª´ŸÍfâù"7Ó´²†%;¬ç´šLø&(ôM%vÂêõ.W;<\îÅÆ;ýwzŸõ¼ ’>g«é0[Àkç†÷öóŒd±ÁÕ»Ô }Çt}ü,5BS6¡”Q–=Ü’¨›Ô_u…zŸ«C ‚ú(Åk&~C.`_Ò!ž6"ïNÀ€œƒ.@ŠFéå×rVÌ1ýÙµh» êFçìXvÝ0φïHPÛYprôÊ*fy—ßÃÀ>’ì(ït·ºm©õÓì§™U‘šµ’Oå[N(Gù!]dmÇ<ÆÙ|yÙi½Œa"HŸž$ÍŸf5ùbðq§‰ˆŠïSá©æä¨ËZnÕÚk¥[-ÍFýK$䥴tBY¹ ¹™;jfÎF‚6º/hІ½½y/€ýÑF$ƒ¡®ùœ|ó†™Ú·°,µumbŽ9Êw΋âÎ0]˜ÿý|ghÚ$s0s ~ K¥ ¯sFšæ‰õzfù~xzú_Ý€%4ûý&ºW2‡–[¥Ó˜ÈéW˜M(´Í¢Ël2ç‚duWŽÏÀ$HQRsèEÅÇ\ÈÆêĬq~ô$’žD’úC WpròŠ™ñíR#˜ù©8ñ¢‚¿§ú ö{ñ¡5Ñ0xB§qzf±ûy3I4=N-ã69ºÍêÚe‡Ñ‰sK?´¤:Œ»íäÂi u[ÙM$Umj9ŸäË`K"ÑÊó7߉_ƒ:bÛĘHÒ‡[?YtüݧG›{*wKUZB‰E&ßC—19’˜eXºaù2ÿÍ¿§7orºÒEYdçù;òmeËëÌ´o9[öpÄæOêôQm¶"Ðê‰JjØáÎøO7iuyŒUN!Ó4±ãR gÎöÝ–fÂûVã>Ó3}š#ú ¼= ïÆ–NŸ×æÚ¾¦Ê€¾½èumb‘›ÒJÏ®ÌÚÀ’CwöÔ¨¹›CQJþ¶ižÈØét i¢dŒ¶mäoVb@ß#åÃÝÆƒåz®dÿ±ü“<„R¾ÍîW­p•½EK™åo•y4ç`ôvmî¬%Y¶,Iœîy)Þªr Dú£‘ÀýÊuYQ ¢lX6+“zVSßXÁҧ刅† íÌòìLœipÏîU¼Løø–zôDŽ+”k…•aâÄ€¥æüÝE>âh¯‡bĿķ¼J|Ñ9òÛ€¸M?=šäØP§±9mß<ÓAD/ F4™wÃV(kÜî@ʱ ¡RDïì1Õ Ë¹ )Š&9äC ©ö¯¡}жmGT{¾u`„D— Ã~"š|nîöªGv—nõ Ì//"wË€¸;7éðmŠç‰ø¯GR *úrZŒý6>»_™–á­ýpɨÅÈhÉÍ¿uÂdï² Áj¸â…%se°:ë]Gr5k½J›LžJ ê¶g†p|å[|–UxTkÀ ^2SÊ'Å:XæÅj¶Ì§"óá`2ƒ{É!QHrÐØÚrGÀ Ö toÓùæÎüŒÓoa½lq&l#¶|¶o˜œïP7T÷¼hîmÁÛž ‡Z'M„V²Ô?&D hlµad|ŽƒÈfg*“ˆ,6á±Õ‰+£ó.&k0|ÕçÀÎTÇ .]Ÿáz üøZÃÑ 9ËE~ëXiTåÈoHÿòû cJÖQݶ¥ê4µÒbó«\ 5pá´[öb%éÅ«ží, á ’«y p´@lª\é§Êæ(A £c7 ¾¸ÞåÛÕŠ‰u£èlBõU¶§u¶¬€‡îYÿ˜t*ƒ‹ýöî9'5ø››2©¢7qf·¨cXw}‚^ l˜åEÇ^µØ{Ö¡ãݵÐ¥+9]a~ï•ÎÉ£ØÈ8¿°yDhµˆÑ*Ó)ˆœÖ •ŸoKe2wâ$v]ÿ¥ š²ÇØ©4he§ÿ‘jÄD4µr^’,` i ÓÌ ¯‘”o0òü¶k,_U–LÒE5ÕÜý~·<¨$Ò ý Ç;MË·TˆëÚ/„¾0Ñ~è}=÷c5ÐßgëðÚ%=ó ò\ƒ(YºjAèá¼&Y×õßâàÓY>Ÿ›†nåC¶…•¸‘uºÉQ§Ûñ€í-Â{$ÞµQ_œ`K5Æ ¾«RŒéif(‹5¤ ®–‘J–ýjžxP–GG6eËsO×Ö"—ñnYß³%‹ß©‰ìÙnéP“Á½Ž7¬î`y…+æåL{†ÆìÄ@­(¥VÒ0Ihxœ/®KyÙõÆ4ýi¤:q¥ŸMÄšþ4ó¡o„Óͱcàâ‚ÛE矾\;µa¦A:ŸM†éV'µ'ç¨ÆŒ:‰È£aB,¶Ð›%`ûPˆ›<¨-2Y£vQÑß"A}øÙ]qH JªfˆIutŸÚ¯Šì¦OÓÅ[²Ñ#ž©ÿm›wh×ϪmÊ,²{åñÓ‡_?y<øáåプ§¯Ãgt§®F ïž¯&fãÈ«ªmŽVŠªh+2l«ã¿Y‹¼£ÆJM8ƒºîıð©æß’þF 5•1íatøÚn>Z3ò’Ìñ6Æ–#ÓiÄÕ$>ê^ uVQv»¨(K6Îïo³ž/Ñíâš÷¤þßΊë±m¬9U¹ØÃ·xeɪêÉFh¾Þïús³ˆK[Z2ƒúV˜òÒæ:Øa\VTËcVaB6ǽŸ ûG5KPØ’µ+üܯ*!>ÆÚmç08« žúïUÀ÷1ú-Ø9y›%kµ5h—]cö+ª';zB¦ÁþÞØí£›°ÚéÃñ8„­H¡¼ä2[LÍE5FƒÌ9`";û ¨¼iÔž®µ"˃b?„(^Wv_yÿ=Ÿq˜5ëÊ/# ™.RñÀyݳþžP&KLÄ”Yõ£y‘Ÿò ¢ê¸sÛ«¡ž$ƒFÝb·‘|“‡Nõ'#é–´þÆ~ÉDÓ†Ù"±‡anâ?¸(1-¸à¶-8¸z¸N°Ýž1æ$›7„‚íFA–º~Õ[*–kz€ñ…ÜÛVisP.á”ß¿üë7/^é:¼¯¨,ðlœ.À½ñý{XLXõÒï£E ²wý}U,5¬Ü-.ṵ̀dCˆU^ÍÌ~ä6U«V^¨þÕËße hJN©Þ’½8ûEa™ö“„ <(4jÀ:6+&Ûƒ8Û±|¹Ð†ž,9_Î÷×¼\)û­ä›O?MŽþœ<~gÙšÇ㜑†5M»FÕrÐÒ«fò…¼ºî˜“¢™…,þÖ‚_“¯¾ú*q¸þçó ³oŽúéQ1¢ ßK*zPþôÓñáù9®d±øé§ï׉}^¢¢0æWZÉÞQÿAÿžùëdÇ?ýDá*~*ubyhã›':Úçaɮ̓æÀžùO›Uö—´§E ¹G×ŗЄ¦ó­$ARóKéž}Õì’VrÚkò%)H_éšæ/Q<À"i ­ëS_—ýÒÌSY©°J!¾Ž¿ákÜšîÍzîLõÒØBl9l©-iZhø%<à@4_AmxÒ$6Gêw÷›1µ®iî¡t†Øµ¹g®2¨M ±l~ϑϱ*“<ÅãªÏêïRÒUéŽØ@ýj$MÐn—‚ÅC¹)`¥å°Õà+ÑRÎ_ •J„¾7¢™Ö#åFÚ›çæ´ÀZÜÊVàçdãáíJ.bXf7ZÍÚU¯–€¹W8GX>¾¦Î«“ò'ä!¬ùÆ®]¶ÛÏ·eÌѾt»ìSº.–5¥Ü¡üq*¹‡ÿUu‡àoè?÷ÆŸh{| Z12r 3¿ðë×½Ã3H^kÝi×Q¡º2øôgÇgÒ½….e#S Ò ¹Y†Ò®2¬Š²ÈG®Š¼0¿ˆ³&ñ)ŽÃ~·Î³ Ò0Ës6..P>i}‚÷ …~mÉZÁcÌÚè±¶u÷¼ZM‹%¹÷õÞâÁËË×Ç\%á^3üT,–zPhÑwí—ÂP¿kPÉ.=›]\¸Œ'5Hëî¾vÍ("\pH/%À‚¦QX­FU™®ÓÉÛ ,Àa²ª¼R!Š|¤oý¸É¨U‰“,#C—ö;±ÎÃôEiíÓÖ¨¦&²™[í;E´zù- Z&×i ŒD,!]ø ‚K`ÃŒ?OÅÛx˜ùùŸØÈ¤(Þ²2 Ö ÂÂL5£âroýðÕXG«ÕóÏ×<ÒC#ÈfÓ¹í® n Ár%5½E=v°-#oÒd)ÐSî'ÇhÁRœ®Tó:‹mß`€WÄ`ß(hTä…VïY+Þ­@½†„Ã.Ýzõlì2ÄÊ ×—kq~P_}Q8ˈÿ4¾ÎL†X£åJ×h‰µjÆû§ÿüJ®ëöï÷џGwäÓ;Êï·|·¼IæßƒƒøyøÙ}üypxïþ„_ïÞ;úÓáá½û÷îß}ðàþý?|öàƒ_cV ö$ÉŸÒ¿­fŸ3Wÿ¿Þþ[!?Goã¤ò¯ÑP¡â4^óÆ©uxD ipÕ¼t•× E[åw Õ0¿¡óªvúúÉ«­`º»Y².V‹†´Ô…?),ž¤r½ž£Ò7o8S@|oÞ ‹b U(Qøæ §é³òò¿Ø<•è§Ýb£ñJ@¦léy”ñ¸ áé¥àv &f©Œ=¤÷æ ˜šªÒWÐÍÄô! ©MÍQªùƹ j†9áh=-–ùˆCo †=ì’Xa­ºO¼ƒ/aͽÉ6Õ -Vçææ¢ ¼Ää]5,òþ˜;hµ•€XÏ&H“þÌ$Ì€Šç@’°ÂBPÄ'¼¥Fg.¿™÷ÀZ`î¶çÓ.ªm«~ã#Ë_þ¿š_,Ò±˜9nrláÿ÷î=8øÿ݃£Ï>òÿ_ã×pfxy>ºêÎoô¶ük4^¨b†úžv÷#{U¾U0“)ò³_!ù Ô‡­¶e¸Å|’õ˜×‚Ýñ˜¤m˜liúGº^8ÆcÌE‹ 4œÆü7I^Éõef«Ëa>ϲÛà°xW˜{6®<6ïùwŸ×~bhghÚâ4Ô„Ì`·ºª$yÑÈóÃÐç3†HÒ×pU¹ Žnм6Ð µ;ææ€¹ãäUClÙ< ‘«°gø£^‚FÈЫnkRÏ\/#^¯f’üKdf°š1‰®éh¡ Šoãl7‚»hÔÔ®k’^¤ù¬ έ1WíÁ/ÏDÀÔcÔ¿’öзë\ºðèßú^.v5lU³LGéR(»¨27.QÅah6 Kv3Ö˜@Õ’Iž«º1ŠvƒBŽ3¢jœñ¨XÌ@……K}¸†Õ]ʽn¬°:`Ë™L}HÙÁ|)™-ƒÕìç\°/úý~]<&Ø¥ÙF YÉ#,N×4‡äÇ€ÔÕôó%Ùh¯!æ™”¶ürA£E±‰Á2†Î±åeÑ­ œˆ3Kyq"¥{}²&à.Ê–F¸%ÓÎ(…í5„ %+f=1¢»£$á--ŠæxöŸ, ù«ß€=9†9 €$"ñN™m w#G›ÍO’J훾ÍþÀyoC0‚=¨±@Ë×Ú¸n¦&3ë//~nyþ?€ìln]i%›WfŽvq+yE>z9Ža­ŒìôI^~bËD^M*ÁãÈÑ@z›¯ç9¬ÞLr ‚—MìÇCÖÅâ_KÉ‘b ‹½Là­àv/½yö =µ¹[ë˜àïq[¯ñYqÂ@®'$ò2â(`q3FñÜ9ö«û‡Ç,êíhWFç5ñô“`ïÅYÕ°TÖQw$W–„œ†ÚúħòP; œryâ‚åc´óŸ¼†3 sÙœù@6Ê»õý§-üèNË®¶¶¼ÿ˜æ^*Л]ßJˆ0ÕÜ0öl•»ø¨|T–âÉ‚9໳:€Ýàãdë^6þýäodÚ(ÿäÈÿ‡wÍãåÿ_ᇮ>|~Š·,Â(, >)n;YèOvþG’'3…áûWù«FN3Wf nnþÊ&™á46 Äè¥kOšJ'ùrÆ?Å®¸¡Èze>Í'é«Iªv%žõ<»f¡O5zÉ×k¸œÓÕd‰Bxi¨‰yÔ&bú ‡ ªÜâÖ0‚ÀüžàÀ¯K Ç§Í:GÙÁVz=Ò¼áJs-Èìêà‹Ñ0`sò[ÖWÜHTš¬™Ì)YÅ.ó‹KÃ7{‹ì*G““ˆ¥YÞò|M°°Px;?ÏY\ƨK‰„€˜]\— i^ pNöV‘ÐÇNY&“.Ó+PàsYR¹‹tÕ©ò©­rÅ8Å(bs69¯'Èó @7 ÞGFgõ(5A ñÃæs¤lª0óƒúoiâ à<ïp:¦˜d,^åâJ‚9‘iç¶Ò.MõK ߼lÄ#)  ›%HÞ¥Ñ]ðs.Ý X¤Þxä¥Kë#¹A+ßzSUÚÕ” ™%¥uGf¡¨š£(óÓ´™sõ¯1TWz³°ÄKT‰󢔕´h‚x¸uÓœ`ªv‡!À]A6o7 0±å¹Ñ¬Š­£tψ€]]´eᬷM©Ò÷êZ™¶ƒ€§oŒ7Œ¤[qm„¸×³ØDÃO"'RÌ D´S‚5&ÎKh$hó6ú혮[T+Žôbsr.\ÁT»(†”N8tªÊ"Xl·ÙúºXŒ½=B9D¥‡fwþ¾B7ïüðâ G$Q@q¸(p¾ÌŠÊAjˆ8 ú' ¢"ÌÙ`ðÓ**‡3’NL±ít‚â„ÊJÜ5„›7Е -œrr1©,Ç—Üsæp§Ð0Š®Öt<Ïu(8gwº”Ñ*ï+ÜfjÛJ¼Rœ›Ë–‡\Î8A€=^L´ÌÏŒ3Oè+Vu †µ«n93Üå¥t:¢± ÍPè3’“’¹ë.°öò…Q‚p.§3Î7AËši×Ã(Wæ%Ô¦ì8€§ss©:z‡«hÅñD´?Ìß± èñ@àéôF,)¸\zós#‰ÆxéDc–—F ò”5É_&iiš*³ *…D¯‰p‘©˜o¸'ɵk4$ è¡-”ä]‘ó†!V]8Y÷¼ñ„ô®õ$êã|’^ÀnâE;ã‹Ê° Ó’bþÉ÷*%êe¸C4eX“”S²#Ã¥éX‚E*•3³q^áQ®µº®`³ìšT扵\¬Ñ³Ž»Š\@e¡8,íœÙ^]FòÊ ¡˜´ÜEi#~¯ÛG͘˰F¡™-X?H×|¿IfÈ›àûÔò__fÈ_ü½i/¼vȢ̜Öíz°UÓ,[ra’B¦ÖÖ¸$qp ÈCŒÉÒ&­÷øåC§N T<î\ÅHýjBœIëáY‰Jëpè°Ž"qÑJºÂžPvkÈ.ÞiVR\›téHÉ1`¯vËlb^¤¶HøšEðOØaÅHŸÅy8 g]>I’ ždº<‚ ™VVžYÔÎ8uhYÈeq6$= ÛC~Ša®@Z…¡[‘è{ôšbû´BÐ µ[i*Ÿùâ'Ͱ¡ aÌ>"šÂ…_"„]#šG6‘úY bo%1«7ɇ Ø}+$ªÊjÀ6®R ˆãˆÂÙØ›‰‘Jqs’WË2§ryv6-Œ{ïõ¬¬jG^bÎÉB¬‰%¸ªäצҳDx.ƒ`…½¡öÒÊB½¼mDh™c5Í¢/ˆ [mz¨SM¼WûaÅ$ª1Þ± “PÊtY»F kPņ•_†.“k–8¦ ((´–ΈŢÁÏÍB ÕÇ€Œ˜–Äz¸S¬gˆ†^zsQ±ŸÆòbò?‘„H}I@xcšÏÄ!âsKf-Løa´¬ò™D/(Ã%¼j±&§lø5 i)ʦÉF_^.§“¯¾ã5å¦}™&—fÿNšÃüra{ýÃÞ|}Ô¿ÃæWÑ¿¼“š6A ±×÷|÷(òîÑŽïÞ¼{wÇwïb£ö>­Ÿv´wØ?艂Þö?ÛáÝÃÈ»‡;¾{4Š½ÍŸÆß7z6NòÐL_ >ˆ¿EfñæWô3þ 2]˜¸^ÐȇþÛw"¿¼ƒäÙh<ÉÀï…âsª.ù0ÚFL& pÀhØøÆs:nv¦×ûã=σ}jýb¥÷ܯ[0¦“CЭ3¶ÖoõSlðMtEÓ0-)€]Ó¾„|áòx²@`€¸¦¢ UV—Ǫ¨ÖºÆ·–öQH(fV}feCŸÜk¢¶Q™êèÇIÖ»M˜á¤­â—Ì„#¼¶¬rZèÕðg£ \¡Ò/{KF‘= Öiý0¸€fAí̼ÍÑÄÕ‡ÞêcñêRlv[Ö «¿ïæ»8Jk0噈6:`àAtzÑÓ Ï]UÚ] Ì×2Å!!Öx~•§@'5}Ló‹KÀ8¤CàÛÿÓ!ÎnÒB¡~q[rrbdÈ%>ÝœŠïôZÌ€ª}ðÑóÁ¥wœÏ3h\è“åO¢(”YÀ4&Ý9Di,ðV°¸B •hðΖu¾šÖŽ)\PÔ<Óv·D•}p놵B$¹¨@œO÷j7A× :QDò‚ÆÄG4Ìvì`ú  {€»Ÿ¤}E2‹)èn_÷Ñ1~k¹'Ÿ'Öq-IŽ7ÝìÿrkÄó¶½—øãqm忀pZ‰IF-Ad/ÙB¿X½^D¶^•lqüGÝÂLÇ{.qE|û×ZÄÒùG†¹Ð^’$'^ôpû¸m¼¨ŒÞÿQ z©¾2ßÁJ²Û>˜IÞ˜ü`)¿-¬Ü˹C¬³" Œ0248y­Ëjp]{ƒâE© ‚Ãÿø™†xÓ5ƒËz r…ár˜™Cã9=G£¢õÕa: 5Xˆ]†ÝÝy˜G3Ó&•ÒzúIJ*cýͧn;΂+C+e ¬”]E³'-¼;îP¾÷u–昄gJËͯã;gÆéL˜®µa1õÒ=]—]aë/.!Ÿ­¸™™¶F[KO²SR µ,<Í,Œ×Èd>t¾2è8GÌùù,Lè bê€(žÉ'#§ƒ@‘˜Ÿ‹yÖØR@c€X/ŽeË´„¾¿C¤¿…m€ÿ l>|¤Ýª£®ªá›;uìþŒ³'ˆ±©±>äs¤©ª0ðþTž|¥ æ.˜ÿd¿g@ý©¢ Æãøú¯SÏ\zî.ÕÖÖ-Yòèñ×?ü… Tö¢Z³þæ¿xñì7 MFd+@ŽÌ í ¦i]ÁecÜ]H‹ýÑ$KÝñhÊž·ÈtlZêt>›o%ÌÒ ÛðùÆ"ò´1Aî‚}žÞÂ_- ¸¥FTàwÃ`;|·m­~WÜÈ„\. ¼wëÏqéÉ.rb-#;½+‰vÍKÙyZ»7ãÉæ]!ã ׉D'äE ÏÌjf3?&M$f«1÷™¾ŠÀjâO:T»)š“âz@Ï 2XYU‡Ú›ÌÙßÓq}¿ä©È‘¸éFµvÇÆª»}< £íÅõ–g1‘ÞE ‘Îqj_òg)ÙÈC†Sâ( ;3„ð]s¨Œá–°v¡¦pVm{;ÛYîãü#¹9ý@Bq¿=»Ê‹UYIÿjð”ÝðÇO`˺Á°û.z†@b2OMo4?ž‹f,ÇFrný44wëVF€L)7³ tƒ«GlÎ Ò4ìûïó²Ü¦aÜUƳsñ]”¤[ÀA@¨ÚøÊaÂ:b{Ú¨î†So´‰ÿøç†müã®í!¬í#­1™ç^~Ás “Übdø`a‹p)5>HÈ¢ WllWD™¸R@S£’´A¤©›buÍwà¾w)Â1]ú¨ü S¡Q©3¡Y9NÈÁb€P µu0;D…šº (JÅÇ̰¸] £™¦³ôó ^©£a}ÚÜ/Õ, “Ç+óWÉãèSil ãH0í.îÐè‹m-AHÍŽ™` ÈÅI’àLGjµÄ, Ï«i˜†ù¨9_cØX ÍžAÌ,-•û€°ÀÒ† (±Òšte,6d ÕTzU´ßLŠ‘âð‰“ÛåöW¡º~ÍGm=.”ÉÁ@Í=* ‘þþdaþ»mn¼L&3e¶'%ûe8\›eSÑ  jÊØ6wf¾Â\NœN®OckrÅ[¨¤.2à.yQþòøÔ:(†N1µïÚ‰W*Ò\6ï€ÎþÒ¾Ûp>Æ|¼i™œÛ :ƒ˜òE ~9z"$fy ùç˜ò9kôIKç ¯ùÜçYû @¿ò&ïlÖ¨@×£ø³nø3È"%ù<ØaÌ–h°Û+<•I oìºÎ1¼ oR•*/B©ÿr_q,L NÑ<¿?ð×ᵪªÅµÁ½£K°}”:ºA…ÔõÌ ×yyIuÐÀr‚ÇXGZã_dé ÎA˜}£Â¼H\°"̤K2n±LF™­$Ôù7¥dÿñØ2Ú`E_ÿƒmÀ ™_±jI@Ð&I(D w5V—/hRA\Ê¡²›ëÜ Îò†v³ws£Ó怗Éü “¥è^3¼MSåÒ‚ÅæWo–Š­U»cµb:r «$W\ÖB&ˆu=Á6ºƒšW4ÄŽM4tx¬†ÎNµ[Ž 6^æ£E>6 êYú+˜>‰íÛ¶KÞòÕò§Eï"/‚“rÜ2»ÀQ„#‘9-©ãí{ãc*ýeŸ ÛÕx–lÜNMÁ:í^€ðn!$8 ZR,„Jeø°EC43|H.oÞðûoÞ`\RÚ€˜½žY'#¹i…¢û°8<¹½iYÂÕÀe›×DJ³Áµƒ{Yÿ¢ß”7¶Êl%üßÉ‚l׳ö)˜®)¬ùŽ¿¢jAþèQJÃ0ØPóÑÛÿ7KiD¨2¿˜‘ÿž€,hÄìݰèPÝ3:ª8ÞÀìcÏ¥dNÑ׉.,‡ý¨pæ4Š"&ì8TÉt”OòeN –aú–aqfR¦rÉR‡¸À`¸°Ãéø*-aŒˆw÷JrœUø:®çCJFàœ”%@gÌùȇ ÉB´/‚hÁ"v¹všVLàDC¡Yª&úÒ9÷› &Mòüb”™¿…z‡e±Z@…{-‡¹( ’ŠÿeÔI=a5äÄbpeÚN}÷è€`Õ/x ž\UΙƒßL°ÌI=aqÊ“iFð¿q6P0Fù[=÷‚•ü eœ—£UID\6씾Åd‡@”þ|Aéb¬sFÚNÍá½Èü®ÅÿÃ[Ùl¬¡lŒ Ÿ\= `ç„àE·Ñ¸ÛG~qP£òÄüT”zØ[?Ì5EQ)¿H- Jð±—£‚ ¯’u¨A–Ïø­D¨!BùÓ„f[®RLA{ì>>Ç-‡ÚиôP ô:ŸÝ=jùYµ–ºN’×^‘Ë!Xö‘„€nZ¨‘TÊÝÅ^ëñå7ÏìË®jÞ#Hs™¨f~¤úãÝäˆM€öÜ „4p {>ä_ð8TÿFä/(E©LR2þŒE)ð…:ž«šãÁœ‰ZêÕ·¡;Ó•QÏ: *Iï~ùo¸OÌxH{y¤¬}ÝT]Eq¯‰Q¤ `28ŽM/N#/ê%€Õ‚U«PнWkºëå5‡`¾6«k~HàjGë ã@»É?þÙMšr¹6»T£"™•#¤ YO?;‘šy  ¯Á•N_¾>ð'3Î&!õ4†ÚÒP§VxPÛéÆˆí«o±£±Y@y¬=Lgæ²;išÕÁ#~r1)†† ÛJÙ“mÐJx&EU¬7˜4…¾^Ò­“*ªd©w°È—Y»•²íØŒ–« Ø$“J1jÚ(eVÖ?ø¸-¢T×îüf„gM —Lãí€ÚJR1?'Ãd$¼üµn ÿÂF-Úä¶âà{÷¨Ì.aP ˜}õïãbí¡…Sc$Ö’²W¦â߆ˆ¨Ñ ª¯=×ÎÅ41e+È«>†Oúö‚A#Ãk³°gø»_Ø$­sòãµç?Á àÃ×øu¡ìÇŠD!`2+DoqÐry[!ËÑcIÆ3Û4•<ª ñ7ŠE~ ú]Ô‘³k¿½ vYxCAn9N˜Ý‡”Ç-Ÿ’½¿°dÞú@ÒNKÉ>5Rªy¬†0ëÚä ÝMëÇÏ0YIÍ©EÊô®òm­çaû?›q†‰jäAWwÙ¥:pîÕ)!Œ|1ôqÆ›6{Ï‹B‘«ùë7¡V¤p²„úòÚŒè,j7À÷þuÍ­;°¿µ¡þ8Èr—\±.óç»Ã¥šÝÐ"×PK\ÃT“ªo¦oá³aA>˜2þ ED¾ hì…šC´Ê{Á¬u~a>æ‹«³ì9L½×ÓEèè çÞ—>M ‹Ïɘ~ a¶ˆÛXavf¹µfDÅÇ€¤Ý熎€•X *–ðÓþ½ÈÒ ~`ÍQ ¹SÓ|ÜyÙ›v…ÜÀ@ƒ1p§»éᨸã¼9|±÷‘~k©ÜÅ–PARÑ }œLŠXKš U¼‘/‰`*Cæg^á”æ˜.¹J›+¯Æn}… cˆ×Ö.cyxr¹ÙòR¿Ö½BBé, ƒP0•KaPZן±_Úñ=b»º]~ÏGî£{æ£{æ£{f“{†%úúð­Ô5Õ”EªÑbªyFÔÞÔ7fw\ ¦2¸‹ÌÉÏG¶Q£E.ׂ¹WKr úà jå @OÈb˜/±Îª¥Âþ*±] wdÒyñ”Vµ;ôœ‹ÆrŠ ¡gµÊÆ<[ ì•Ù2Œ|;õíV0mŽ»ŒähD¹Ì=—MÎ÷qDÍÒPE“m¾FJ”£‚“–*[Í]Î <\W“*”r>öÈï1dßô ™ÁæÓ!íÃû«*ÒU"•[¢A•0ô£ØH“vMEN9Ý@¨ ©ÑlgYæ.>Vé|5±ÃZdhÞÔïFâ‡ôÃ¥©è¥Ýcb„ÕG`¡15&œ˜µÈ1{V "’*Ýl çJÇ^¢R剒4ߌŒhSLóŸÍMÈ‹^Yn¨ìݦQªmsQó S 1ðä!ħn>¤¨drÅšíЖˬÁ-Ѝ…ÈnÚªcCé§P–×R9Ô1娧ëÜ\…›¨BÔÍ«ë9a/ÛJÞðNBò.& ÁÖë vt£¥û ¤¥Kø$îcÃè]aç“r‡¹/¦u¥p«o4Ô¹zÇÄoyî¤@ÙõzÙÞ!t{FÞŒuPm3¦ìGÚô5µ'ÔÍaÓsvu>¨¡¡nÀïiupX4ÂŽ6™j˜Tý¾óêÇmÑEëÖµ%qy Å@<¶œ:ħ£Kð„3øržQË8ù€ ¦wñ´5æ #sÁ–åÜÌ"ËÌW_-ˆöíŒj!BÀu‰Êds¬¤Qa%n=Ùo‚¼£¤æç é é€Âòãè`»/P¼ HÊb.Y›t!ªáÍ5–7>0ÜíèºIZá4µ—”¿ª ojËoÂrsK–°Z~;:ùn×´ïÞk'{ÇøžFÒ¯®q}ˆ€]­£ê¶-ç¹)ÞÀ-E¸þûÌTͨҩݰµ,–š×œØvjÑnÛ/»ö«mæB&iù– ÖÁEL²ìµ÷äÒÅ­ Ï[L `w\ùU¨(bÈ?… »ï›SªÓïTƒ¶ÅeUò% |«ï”¾Ê¡plM80úù¿úîÙÓç_}‡µY³yK'ÿê‘òNt´=ÇYr’Þˈåf‹aF¤™Ÿ%Ÿzz1>šÍ®òEÁfþ£?*,d*“]KsÅ;>Ù%QZØ´–[ˆ–ÂaôÏt™Íéô¼&ºä^ë—Ï8×?úiÊžƒ‚¦{•µÃk ælgJ½ÄP1ô¸íž$éAÏQ¥ŸtÜþM9CåÒefs;£Ôåì"ŠêN©‚µò»i¤ã±º½Kád‰À-$îŠ`-§sÞ±¶c(xø.}x):FYQô«´· e[—. S(»rQZ™®[}…™e/—(”p4ä­ŸŒè÷…=’ŠÔiiçã'ONŸ¿<}¹ÉÚ)Œjœ'ÁjµßΊë†µ5›Í‡ãqEs)mê<)wZœªµÐ¼D¹ƒâìA÷]É.—¼1'¥cÍHlKÒÛ±éR Œú´Õ™0»»¼µdz/žy¶?Æ;Fؤ̼ñrã_Fd×5í¾n•m ¢&õó¦µcÈ)å6ÅàʲÍOwâÖ=2=tÜØ>YyPõÐ7ÖVè¨"+˜+¬žRÐ\Ýîûć§˜~³Èº_)GRßB9¤2e•UP8º:¼ñ»I“8|ðéÑv¾ß;ŠŽ™ À6´æ.“/TL2B‡‡ÁÅÐÖžgöuKuÛ™Ջ¾°åìCÿgµ‹…ð)öTfWéÄ\ßÀÓŒ Ðî€S†ìHw1"lFÀYXk‘—x½3àœ„ó†aÃEk…2„¢ 7Ä ¨‰µ;¾`èÆ3ËFP`j±HdÖ "!$+VjCp #‹LçBˆAcf#•÷ gÖð€1ʘsg7Ї Šè{MQ­n/m1Ô·¦ôöÍíUÔ"\TmrÑ{5à໸G~²]úÂ|â„§×­oŸ=C±ÜAÉ0ýâ“à~MN§-¹þ@ú|7ŒŠ®Qg«ø #ÚmÕvg¶áe•>šâŒ¬tSá rA;ñ¼Ê%Ù&’ÔHr#ÖBûªˆ}C —r󇧧ÿ%’Áø‰Ø>D&Ddêr54L¶ä›¥s½ºÝªþÃZˆØÓ:¿´Þô»s€xEN»««Wpç¦ÇMŽZÕè„!€1:ƒPŸÉ ¾"è[8áH €‰£;) ¥Â`šMªò àå™—*-‡b±c.¿ýÕ£¨ˆõ¯¥Õ´yÚv:q»?´YeÛ+ædïÈ_aCýâ'CCPTYL0_Ú\yà…¡gcW¶ lt»+5–J¥(t“9¶ñÿìÓ¶(æfÿ¤p<‚ж4gÛž{-ˆêì.~ÅgÞ È1—ŒK“ž/Ù?B’;]êuÎ/  ýÃ]þ^@k?'wÖùÈSoF¥MoÒ¶E¸ØGŽ´ã²ÆÖ³–YíAžxAÅ]ÊñzÄ ˆ_™BcvÔtö% G¦@õ é®™fP-/§j 8Ð;Ý*+“ä뵬 >óa=KfQEeUའ‡6k(°T# £'€üh“ç”Ï~ê0%ÄäFã»â:CèqA!ÜL>ü*‘,‹±Õp‚)Æ.ŠtR6̇ίQê´û…YìÔÜ`€¼÷/i¶ÅÆ0["èܲf!gùXí·®ƒÙI\…»|a!¹°„ ”H—‰W‘ÓU±Àê¾ÜÓhµ˜€ÝâÿÏÞŸ·µ‘]{hþ®OQ‡Ž¯$[ƒ§4iò>´MwsmüœN‚yEI*AÅB¥VI`:·ßÏ~÷šöT»Jc÷ûœ4 íyX{¿5D ggìÂgÖ‡P×`vˆ81T`}-"q¡Swù‚›Ø51Õ÷°ûÓgSíY~à_l¦«³·~O|a ä“êoѤ ­õ µ9¨™@p‹» ’rNíÑÌæ$TÊ© o¨ëú5w)°ÐAÚ)-i²rÛs.¯jÆIYh_ªOldÞ×@zPÖ½C´lœDZÑÒ5`¦PrR¾=ãíA›[‘•šól™p4^+~ ÙLEšçß;J9!š/ ÆYÆ™¨GŠÛZ—C\¶z½f˜U žLHý”ÞðYÖÄX±‡Ã4Ñ.Ôûzü!Þ^°þ*Ê«¢zªmÛ÷bõª/§ Fà ©È8vApÕåÐh´žÅx\mH7HŠ.X·âoð#S»U†QRcÐÚ_H°C²EN¤Ý`7+hØî»_[?Ø{ô içŠKš§{C2g¨¹dñ—¹dæÑg .G:Œ´ؘ(øR˜%v(=¢gÓ µ<$«h&õ'8T|“Ý»-7¡8@—8.ÆÀhM„S³‡§ÆÈ6ØŽrI B-¦h€Uˆ+ ñR‚` >O€œ§§jµÆ×]Z»ôôT¦&Y4L­¼<ÆE 9¢ÌI& è\u²H8^ ã¨n™O^»‡Iݤ ÷c+êÂý¢Q•TÞ-Ötÿ\Ñd{û`„vÜp—|yxÂÒÄKîNÚ¤p…/\ú§åÒjjtv·Ûˆ*™N·²áž|¦Ó¦ÞÀy–jšª¡¬GŸYhp¯£z–7ªk—ë‹Hr7"‰v[°B—AÌ{€“®ªö0 ²ÜÓyæóÈ37uóªfßoAƒ?†i¯º¶kZ…tÜš„B@´[h€Þ"%Õ±†ŠUìe£•.¼{ƒêZp„q?5H¼K¤˜LQ³y<Îs 0ÇÈ£F÷û©ú>½ÏYÀ±1ù© ²¼t§ê[!ÀÂfòX‹Xìt¨ Òº_ŒlЫ~=Ò¬z¢¡<¾_Z(¡¾ ~?¿L9N»\ó³I“¼Íá„¿lHÀ‡xœ~°2ksÙYJà™qH éVƒ¦.VÙ‡‘C©×¹bÖŠB2²ÁnE ç¨P»êïÿÕ¬[sU—Ä/ âÕóëÅÒÞ…ŸZí£ÔºC{k'¸/üì~ö ?û…ŸµøYñÔL='þÄvØÄ ½ S Ê»åDpê»N'Óy*˘8dC‘´Á$d¶C,¼hÔçvЇyÂ.??t hFEµ!ÅŽF]<ûÅc+:,ÂüjýÀÖŽhŒ`‹ÉÔ×Ýù‹Wã¤<ÏÝ\jé<æc¬ƒÇȈQ#9v²ÈãQ2ãí²¹ct•ê º`ÙèXÚ¹DŠkÜjdÔÊŠhBTíšqÆâ=ŽÜËœ„à:ç²:UWú838—R‘ŸžÊÝWc“Šò‚ÚÚ9;¥DB 쳞!tÒV@KH§tbBºœlsöJ S’ŒÕ²¯AJœè´Ðàÿ‘p$¸ìQ÷S+·ÿ»‘vféOjj¼-àIhnÿ·¦îuükfRñDr9WÄs×Ib}ÔsÆ¥ÉÊúÝ›'­m»¢Ÿ-ð7„i³JþC=Û:\›[,É͆¼)è.@o©£‹[æŽÖühð_‰~¼%fõÊc%Ìm=ð0ï¸éþDâ­ØÅ[‡Ú¿èCûëoßî¿x~ðö¨wðfÿûýW»/z^HbÍx ¿[¹ÅªFë‘W›+JU~Âå}éXÙˆvýJ°Ýƒ@H4ÁïÄ«ÝËDn½±v¯XkÄ÷̇aDnýuKcp‡œY{#‰GéUµ 4¢ ôn3ÿï­-<4ÑÕ_íPŸ|¡ÀJažÖÍ(ww‹ügJ©‚` ”ñ¸)ŠG˜ØL¯<6‡é Ÿ ÛïÆÁq@eh€ÀIÏ©*ü–M¨‚ÉHBk×SkÌ¥LBVµdrF8eŸÎ+ØÝª“q;M@~&|A7£ñ¡Íc_o2OýtfzSs÷¹ XÔ†@j¢ŠKê¼}óÔàUG3r;Î ÖmF/¸Xì#ñ ÄD¶ö¡nGjN ljGïMõq,Þƒ°Î¤?¬wHÐàˆiÀ——Ëø™Â›p&ñ1 Eà„[(LÆ|E¬o4©«½#ÝñCÐ逯ai“kRF“úCñÄêCŒ—_ÇÙÙ$Ÿá\öîP5$®î!½Œhm\0s3Ý! ˜÷‡u(Øô‹¸PF+<(íÀ8›ÏÕyé+nq”u Š O!Ô”Iú"ÍÜz™¤6Œ0•³Ží4U5#óE2œ[Út±5G‘¼J»Ú7ÝÊ>ëò"Mfƒsl™ˆñudz}ªmÈ!!¤¡ŸçàgΙ;!s@.ÜðÚi‚’ˆý&ÀŠÃy`õª•(ÁEø¿HÙD`  5¨ºÐ†ÔÍÁwõ £d1ž3Ö®¹ )‹içáæA8•sB)ŠX“[ÜkB Âëá™ðùU_+úlI­#ïVQ"ŸœÏ^£ƒ¡óX .>!®pÅõ:é2Z¨Úî¶±’ÐË˪ÉAL1 þ$¤Çï*…aãi"æM(:po•  >1"ëa™BëÑ#Mñy’Á6ª‰¾Õÿ7Œ"|þ7çS)®>†L à¹4Pwã%æÊxžŽ8kFq ŒÛ~ù+”R_<ßµÔûîí«g¥Â䆚—y6þë?Ì+½¾>èã–Ü¿ø+¤D{õqo_Õz‰LÆÃæW_kK‡íxm­õWȩ̈G?è©Vow8¤¶›ª´ºOªöëëýɼ÷×_䓳æV‹ÚHM²¢›TßûÇÑó½—Òˆº«°ß¿[aX%Úg0Mú³÷²Ö¬.—oS˜1éø(Ù"†èªûÇݶµäš~ Ø ¡Ö=w#Ž›€`,|@©+sOÕ‰¢L!&‚•óÀÅOŽ=L´ÙI´ 5¦Áe,¥ ;-É`Q‘ ZRžxò 9š³Úó$ƒ”‹aò·+dO-ù»ÄÂI¾Uà:(úº4‘}i¬iT Êø©-Ë4m$€¶Àú™mG­IÑØvUÐÌ¡#¸c³h¿Ø­„ NKLÇ¥¼¬<ÌÎf÷mu“ìC'{ò—'ŒL9FLrX(˜ý w£X`Ù/Q ,£€©?nè9´¥o"çÚÓO‹&´ s׳Õ˘‘„þæ¡÷ ×ÑVüpÍPèš|¿d‰Do³˜`©ÏæH.#x†-ÜmnÓš)Uï ô£AP¶ö•- ßœÏ/Æû¦Ÿ¯YžÄç³t´³ÖÏÎxE¬­ý-øñ7ë‰jc浪~ú[º[+Ö}¨ûpź¡Q;ŸV×7G¥ûs6¥Úîg+ÔÝ ÔÝ\±îÖ T›? ×·Ž(Uõ>XVë±_ëqu-"(k£Ÿá2È«¸·míoÝÚëxŽ¿YÇCÍo¤EdAœ£?6¨÷‰¤$}xèNP:³²¶¨ë6Ÿ´(€;Jc SÎõ€Í$C·£ý8uÖÙõ°·Èy2à[GŒR%VyJÍ‚±ci%9ô8!¡ Æ–œsrÊ‘ˆ#½Bl E€TÒ þÏ’]•澃“^ý¹R­.Ü÷êwõv| p@ú$¨‰~²4…QÀÚ1‡%º”E’¿#ç` ½7ôÌ´<®bZ~O‡ Tìqø¬AÔÒ¡(„À¡LQÄñ‚ s» k¢¾ËµV¥Ña3R¨0%¬Y†hiœËXÿM)áã2T„K@  JF '-Š„ ÷j @„q>Ö1uòSÍq;ÈK]`7AŠÀýÜö¬FùeŠßPÔpëFeOw09;t§¡ŽT£õ¼}ðš¬B·nd¦È@ĽLÇù”d…3Bë 5grÌ’F4¡®È×¶] ÛÑ,á¤nÉÄÉ‹¨eAK}"¢0˜$’ 3Æ%º)ƒC—ΞÙ ¶*mЊè±?æ–Û-8¾f#ûšô½F:nÒ …F—G)&†£sOÊ&‹á`I†™ç®‘*5ÿÛ¿Šâ7© þQ[Òï†÷‡U1䯅§Í•|ù;ýv½Ü}µÿÝÞá‘ÿw—c™ÔGoövŸ¿Ü“¿´.O>óV’”O¯Q“Øø£µ%Ü–M{õÓsw”—Ö­Š*æñ}ƒÓ4ÃC#®ßrþiö”,˜=ön4‘ô²}{2ìí³L)Šv%"ÔìRù Kp1Í@û¯oÕ]žÂ!‡†…äÐ;Hv~Á"/Þçlé% ,òˆW Ž,Ɇk"@H hr„ÖA+P6+X£Ù&Oy¨ ¦eðg–³~Ô’xžû„Îp8ìo4±òUKmô¿ÚÕíÒD|SAºðo¦”+æé´lX-ÆeÙ7&Ù—“xëÌß/¶žBŒÁ,µÓ8—˜ÙyŸ!w2C³Úcôg? "_%×…£GœÎÒKÔe)§KFÙÈö1£×²‰D× ¡œ_tb{§EVÍÐtRm:˜/InGAÚ¬±Y‚íÈÒc´¾Nèà€!‹t|±°™“¢2¢ƒx#µAõlH¬¾ è:ž¢‹\b5éŽ /tcЂðoKÃ^M… kâ[bPýа*IÛ¶íg¦Ùï–åÄYÌ­a¹T¸A |Ÿ’Å2% Ú¬à,½£T=®ö…»>DРԛNðefEë8G@UkXžÎôAƒ*ðŸ=®ÀKõýÞQ¼µ±¯ ÑSÒàw“ˆ¿pLônÁ–•µº·9ÙÙQB*xÀùïáGI1_¿Þbòs6µ°†ôìõ_6ÅëÔŒgÝ-F_¸ŸUˆÖƈ¾ª»ºÒ?Ž Ã[£·žöueÝGYö ¨M0 FÏ«‡*VÇ«Ôð!õZ“Õtáá’Œ!ŒÒ}˜Ø+™H\Å@™nÕëw|-Φ^áŽ3F:vGå> Ϥ8°°Ã„A8WL•,+7W»û.'´ì9]¯{¾ ¾âØ_ÉÊgKÎüþûh`û¯ a·#S¿µQ¤1Õa³œ \s…¥Sl˜ÛsMÛnðαŸÔ":€% ª¦[G#0ø ’– µ}׿W¸1ݹ%……9oæüÜŸi¤’D´/ñFà"†è.ðž©Í‡Õ·sBŒÁS‘UvÃ:¹°F£X+ˆ1ÞOŠŒ$% Ÿõ³TÑÉ#ž÷'(”ÊÄ\¦mr™Zn­ ˆìJvÎ tžÖ±©É{ÒU^{‚wqÆ îÊîŸ%a%‹ÎÏ:]cXjA}X½6`[Úþà\ë«N³Àmõ -zNÜàxÛK.; #ædЀ]ÇìèëèXT ŽÎo&zÆÏøh‰-ÎDݺƒÅ Ó©‹$*‹€Â(µ°Š2¬¼*ìü†Ð(òÇ“x $ûŽP ;k 5ò§yËq™›Ïcé`™˜Áúí0ÓŽ4Ti-jó”l™Ëš(dã‘g'"ÆÐ5)ÉmÔUMÌæ,f³¡ÝðJZf%ü¨uKSÿljɿ“÷à._óõ[hÒ=ÓxªÀÂúšŸУüD«‘>Ï T”)ÃJJjW=ÚÑ(“ŠåvwÈro@|ï?ý1ÿYK‚rÕÖz1¬ÿyòøñŸ6¶6=ÙøS¼ñ9`T&Žÿ”ü{19«-§ØÁ?ÞþË»[D;òäj´®Éæ¯ÅÈRì.P)´S²“¤¸ÈS]F¹L£"Í1cíÅ;D0… ¾î7† Û<[Á~c„ a¿­bØ †Ûd¸!¹ck—gt;Î$^;ŸEâRÁê@‘àù< n¿ËÁAUØk@”Ôß‘Y‰Lì1cŒ' 5ä“Qv¶˜%þc·Û‘È2kM,”™G_!Ú“Íc¢–M\Aò,½0¤Í¨oÃZSÅ'¯%LÉ%=op_q+ôŸ³÷º^dI"Gîô_9ždÆ’£îçç3P"‹‡ å9ƒT™ #z•P£"jw“–'ª^‡Ín¼o©6,j†ÆAdÄßDÁ:Õž0z›l0LôÌöbqRCJÔZmuã·^ÀëÍAKÜ›¬.…|2Fbsd‡€·ðfaˆÞDtŠÃО¼4³vš×χÙèšü ZŒŸ=q00•y g!$wŸ.F¶r^Œ–£…yH cMXüHðÓ±LVófmЀhÖ‘|±­|‰ÍCö -R-Àëâ0¿±Zí-/Yoä±þq£´(eæ–˜pvmÌTI‰ž[3x/Þ^‘L_óþf"tuÔPéd£ÍM>u'ÒJèãQ5 ®!¾ŽèÁ`øÇÃdUXúÅùöˆÍ¤bÔm4)ä(~H°-z|ÎYqô‹| ò÷…z>.‘å¿L „#—A39Ftàj¶IcsçéX;ÉúÛëêÆ=… æÉûÆV¯/Â`d1æfZµfýŽãáØèlR’óîÖ¦7TíFËÉ’:V”Ž’«dŒ'e­¿Ë‡n¸BFêÌÔ‰N H ±¬ÛKFe7ÔhÕ6~žCºx³œ¾ÍhûŒ¸¦n²È¢¶¨€'ÀéQaÑ­²ääeŒtºÏtÒÈâ<Å[2˜åE_,Æó̾…Ïå$xríʇYqç“ì§EÊMù5×%ñÌX«:á<¤çù…áTâ‰bdl÷@|Ì‹9„«*'Ì/Æ&Ï,†'C¯=¦§Å¬^?-²Áû1[ÒÕý‚(ÞGˆ\®ÓØÛ·êš‚˜Í †1ž§ã)p§âðå=hQ6‡<—ËN9vÔh9þ0xÎC£ÐxèÆ&`jÁçÔ“‰–qå‡É<ñ||™!%½a"šôFs4¤5Ö%Óè?Ô ãÚÀ= «„l ¸?.ú+º{¤«ÛSÑ®ìzŠVI¡KvÁô'»ˆN6ŒVF$8ðr锯äï ‘R]·oãûÅϰ 5‰R£N³Í¼›ö Œø@U=³§ôâcbÔL1ÓN_õ”A:ÔÜÀÌ!ÁH›¬jü5³2¸§Î‰RG\5'¸üðT(V¨¼œ{ØDæYœXÐIN/ ô ¿Ö(ÅÛ^>;aTØÓБãé ?[Hý„°A¶ ±éÕ¾(²#ƒÆ€õŒ½T3:G¦Ë=›%.×ÖAºÄ}(/Z»7tK"K³/£v†ÇȶqÌleÃBª‘1P$±ÊGVr`ß#,±àqÙM…'§4'Óy1Mj¢Ÿ:gD" WÄ)ã5.·Ä’œ´,×LJ˜G)0â÷<  I„ò9ØT]¯ÁkOÑ›lï@q‹,ûCƒ3­íÔMéw%¤ª¯Áÿ ÔBÏue2”ãEFf:N:U8⚬ìïv%à`ä¡ç½›Ú¼®e%Úr{i™2Á£ÇkŒ¯«í’q-˜–í­§Ô ·y®\B®=朳î-öAŠ•¥Ë7•Kk¡Ý ½>yƒJH7ØwcÛ1꺺õ+ÖD$Ö`6Gk°{àCµm^˺„òƒq¢¸Œ—ÐèvéK8ï!†W¯×„WÕH1ÒÑÿñ€¢_a*„¿Š§ùŽ[³T??n€ ]°©Œ=½eÔý'ã<Öô´qrÜÐ{Þ8i¯Ø7œU«µJå8…€L&+àP•ò[¸½µ늆Úb€êz¡~MgM½Ú­n:›å³êuj£÷Ô…¹WHòq‹°w5À°Þ¢ÄKçlj–d…£Èé¾U”zÇߨŸ0Ðkh¡åŸÏÒ9c ,D`Õ‰Í:‰2¸RÙÚÕI1¼¸g>5ºV+Û0‹L\+‰‡sò‹.‘7=Œ——@#­…V¢ «¡(<`†gd0ƒ±ÐGô°e±á¡@wÄÇ_N©GÅzüùIO‘°Ž ]Ì0í6~¢ybnuü ÇåÓÌ_Y o1UŸd}¦œX…^[â¬È•Y:õl„\1(Ø…‚scbð6\©•ß§ésP—Œ"ì¡^<˜0àóŒ[ónE‘ˆ‹ÀÏà½ã‘¶Ð=‚T#‘£¡1G2æ¶Ý%2'Œ2Åo ; °»£_“Vá~qœ¨ ‡¦3qËÆÎIó^44I5D‰ñFE r ¼öF,vvGÜRzfŒ8ä:w“p/‹C“ŠtJF[ìŸqüZ©U.E)²×% ™Jpš„æš„ªDZW8àJWöîN<ñSï™Hs˜B·tÜIESÖjÙ ì¤ç"ä#P™éˆ+WØaiÚe¿imàÈhÞ’€:O ø6Œ×ººÈkä1ƒ™’&Žp¬±KQ§¥@I¦èΈ£0/·]êR FU-øAcCaøÒþµ,œE?Xñ1”x¾0n;À<“¨îÜ ýPÙÚ9ßê@pl¿TÇY"ÚÈ‹°›è4CB+‡A#6õ(rô0ÂebŸm‹IM·”Ÿ¹ær6œÛ‰R.|õï¼OBEy=u6æb!æ,‹ÌˆÔAèRu ®T¨U¼jh1Ï'¶¢T§æb¯ ­)I “¼3Î' PÂPizxL°’ŒÄx”fAû¤'JÓÉ dä&² +A×l•°^ÚÆ–ÌÙ]KÝÁ}Q«Ñ’{*†&b&²œùÏ ["1³ I‚…÷B^’ÑH+_[#Y.Ýuº‰™\&Ù˜"*‹0áómºQP}NÐÜ(ùCpiDÅËЇ§U›äm2 @;ì2 !@ {Z[Å ˆêkß:Â?•aä>7iÂE¬\  ™*ž+=ƒ‹Yð”ı*~±à@Ȇ2Ìa†¼³¢¹JŒÏ¢uc†N–µ^‘M#öG•Á§VØœE}Ôd$6R›¼ÈÓI«ãô»H ^6ÅÉíÊEÓN‡z7tv|-qà £žSÈs½ˆ¬ÐZJ&„‰“„Ór'ÇÚSÐM±¼ˆ™„n˜˜„@ºŠÄC3Øf—«ÏRI¥é¢ì:P¼ø,Ë‘!>}«âîN{|0^µž3 Ø<#>¨»‚¿iÚ!7 ÆtŒbÌ´µÆÌ5$ ÉéËp†Ô¤;6ÐÙðÖ€£·AQ`ƒ8´¾Üð£<–¬0¤á¢ì5¦ƒ"§Û×ZÞ7Òp¹š7[oG“-„NkÎ2ƒvDÇ_éŽ{Pô¤G„h4f}ޝø”6XÖBÜ>ĸÌ&Õ4Èö8I¶Ðš+ÄW õe–”!n¦t’ÊJ'¤––]Xð¿EÁSñSWzÆGl'^ã×\¥N¸G°Ê†¥,hlÇǤƒÜ¨ÅB!£qò‹ÛHË“yŸÓ†s0lÅÄ^án¼g-·°(wBŒ&l gQÕœê‹zùÄn¤†ñp ×#ˆ9ÜAÌsŽˆ•¾€xtÒü‰n<’#¢z?ÕX’Þ®Ïnûl–/ \Y½3 ¹åéøÁ5åšT/š9² “ƒ— ÖjŠö@b¤‘N&9z£\%3x(¼BÃJ€¿«“k P“ ¾_7;¶fˆê¯µ˜A(ã± h›[VèÁ±Î„o„g­„ªìÈ2éoØ–ƒì2XïJWæX¾1­1]Ðmãé°šDU×ŵZæÑÈ;û¤?r8CÈ rÙ wupécDÇ6/Eyà윂;ˆÃGX šŒ¢ƒ:»§¤í×Àåôv°Í‘Üo47Ï"qá ÌðáZŽ˜³œ]æÂ\°$ø³ág$·Jšl|,õ'@kQ‹AR8ÅBÌmíSfiò.»¦Ö´®¡eX2dk{¬À âllk:mæûsùZ‚-v\¹áŒ-ËO26 („œ˜ÒŽ,ÒGÄŒ9>bÐít¾F± Ÿ æDv®DÉá3gyi–PÂù>XþˆÄ#³‰®h) ‡?sk6Þí«¹yÞ­ceÍ•ØØ±Kóô‘q+«|DsÌ~¶[ËÙ¬Ó0GˆÅÛ*âØ ¾eØÓÄݑǙýÀh4þC¿û&cÛW‚î̤j…|†¶Y mD€¢úÞ^Y4m©7…ÛeØŸBéñ”{Hý´'óÉ *f™:ŠÓ—Ó1F×@¸8­D¢6’~:.ØF‘3Ÿï2îÍZ5Õ•œB…•p²zÀü"áDÕ^îÁèsDeíz‰øª“ú¢–ë>|X’Ì5hSÞò­/ƒÌ!Ý»%®ÖÝr|{¬qvñ›¶€i=%ž¹Ã]þä{ŽaiŽÚPƒÓ<’žŸeSìvl»ôDâJ_Æ^UàAˆ_‰Á`1+âábF¬¢Që· £ÌòØ‘…"@VßMfïð†ˆSLPja¥6Qxv2‰¨d±Ç°<2>h}T|qî vŒÙ:ãWÚhH*t:Å %áÂÔ ƒ·Ád¨m?¡HˆÂ_0 ã²ƒh£H}3ñ]ÙºŠžfóf…ß](?ùrOÃ8ìϵ²S ¹‹mWzã}Œ7à]yÞ¡Wà-=KUt“)äkªp§+û™6”¸ˆ[е[ãjXTøÊñª=’Là]TmçÄ»T|îËôq7æ÷}>â*Àé$›/Z¶FJŠW£$`‚­¤šmý`y%í:tô&Š<[@ä¤ åùÂíáö¿pû«sûÂyÏëДCÓ\QFøH9áÎd…äo=ê·´¤ÕKƒv&mœ©éʇ51i•”ìÓ8dº`Oé?2ïäkKÙEb–úÎÛ¿y¾ê¿@ùh9DmôI6wQУƇY>™uã—×1¢BÃ;t¥x›÷ºRMô“y6n®Á9:ËÁq•@®­qrÑ&ñ¶ó®:òIðK5Ì­ u Œ_£bXvðÐÔi>]£†Å(n¯"#1<ë>EÀgsÈFÜÆ5ÜCÀ `Å4Á\ਊÀ`0Ûª£6óbÎ/Zµa³9@Eú¬`†U‰w_ïãv!Ëá üTQS¸1‰l=y1,¤@‘+çlº³­ýV9Þ|®îs‘ÕðÚöZ;^û3üçü§¹FSXk­qPòsgÎ)Xù,‡ÈôašÀñ`$íð‡8¶öjÑ7ÔÁ{œLÆSµÄ‹‹t– ¬Q´ãóë©’ÅåZ£,ÂBí‚9¥Qè‚£–el?~špÕS‚nÛè?ì›”ÚïF»èê?0y‡¹4°c…–A²‡iôS­@׃T)à NÞ§×Jö3M²Ùjm©hœúªî,ÓÁ_Ô\SCóÀ|vðòõÛ£½ç½¿ï¾x»×ŽŸï}·ûöÅÿ©¯_î¾zÞÞ‚>k-ó-} dY[=b·â¯Þ¼Ü}±ÿ¯½Þ?ìí¾Þ}¶WÁÓ”6>Þ)ý Gĸ2$©šÎõ³Öau$£—z;‘©®ÝYª¸ÄsÅù¨Â÷—œª#œ; ò®ªÌåÕª©¥*@8@Bš;Šª§ɉUߥzô JïŽf$Ã…™1qùøWæÇJ! Vü‚Ýa)®¡¢gSÃïK×rÏ&~ì÷¶¤—ôC:ÀÅ#<äìçtéâC•Å*v4#.©ˆæ°C)•jJIîFo —Œ‚ÅyÚ}L"´üTê°PDJ¸Ž’Ý;Ä¥ì,)5†}݉÷_}wPS²ó¥ÓËG#x%—Ï ùc=^í|bMû¨˜šŠ®Ûé¨]MÆ+ ™¸¸thËE%:¼ƒÂïò¦‹\‘ E“؆PS’rq-mP½mêG.l2¸æ#»l‹–ÄÂ\ö—¡6»É]«>HËîJ>HÕ&"€°ø…$;G¡\Àhêëtô+¨9Õª]øjÎ.~Poâç]`¤*tŸÔ@›ù¦lB5*´ðÈ 95ªTŠêC¶9íhl,WpGŽ\`êò´BW9NPÖ»±H* ¸–Fd·Ô8ëžiNÚº6 ¯E ¿-ÀŒâ³#ù€>FõÜh4ŒRÅRw½JêÇÒàÀÍÞ¡ŸÛx­ì–[õ9äŒjÇþ£UŽïºD´ÔÙŒP@ÑhLÀ‰"µs •ˆÊPñ®JëQ:Ì} ½ƒÅ8™µÝ„x¥s($…¯ ´—úÓÚXuâ¦hí½KW-Ô¬ Q•<#Åo@ñF;«¿ÖOl±<®^Ê×(åÜöPÛ¹©¾þó´Þ žÐ_ÖႹŶ¨ž4ª÷˺Ï'ÆþQÀù”MÉDj`c…ÐEž Û4`É×îÞÎi×X’¨ÛíévQ§žá ñGº ³¹$cÂ`µmÂ&|cÎh@‘§GˆÁt&ÑϨ鲮˜Ò¥”v°¾A Õo5©¶ì±vqì~È=v3€ÔÍùä³YWNS>‡î]ðpœC"x rm´™¥¬Ll°OPù€cT¬vü$iÌ.HË—BLæb¦vŠ5j04tÎ45X$O¤F‚:˜d’ÄÀ RRcx3H³ÛBØàüÜ"Œ3BÔf…h‚hL#3%vâ@¬^Œ½.0k„N†òp,±¹±ee‹næw³Ó+P•î5ÿÇŽqŸ4‡µß †Õ€]ë„¶5T¦•U7KЋJú,Æ7¶#2%ÀƤF—„,s%G^ï?ô0®ªÏÃ*h6 ?›ôz£kŠ©™TÓ–’4*IYAJº¤T&ò=KDÓhè¾0&ÚÂX!õ²dµg6–6¿¸ÈæN:E$¾œC¢»âæU·pxZ˜Ä”¢¹f6;²j¨^úíAëbèó&¼Ì/ìF‰Ýqê¦ôXvS*û—ß-HŒ¡Edëó½—²ÐfälWœ 95D:¶‰Çl˜Î**ì†bC”4tAgH]Úâ' ¨Žr«#÷Ÿá8‘DŠ„× o–MâÑà'2V>‚§‡äì¨qiÿ—¡ÉGÑùáç,¼"ÂAѸú¾¡cƒ²nŒz¿!ø/7ÐaÌ’„3Ówð4 Ípû¿äò‡¯º ;WB™«ö]úŒÐsî.Á¶V~6wÐÚîÓe¾o1É^©¹l÷s+[|©]u÷÷ Ç Isp¥£rß”Çë Ã1ùäé‘ÑÀ~mªPrúB>yø-Š.¿?"°¾JC Ë©ˆ›˜o¸Õr#»š7hZî4¡ H-|ßÑ)jˆÒœ‰!rSKYNF§§ß윞²Ø ÃI ;]9ñé0rœ7ù43ì–²aª4% ²C¬?_ª3Jú¥ÕHI?gók[ Òmó)¾èÂÙõ팚EL‹ü+Š6×h+øTDËTƒUìA_›¦î7>G…ß“àb(Û¦1åã|†] k:ØZÒÁ–ßÁV©ƒ¾e2=ðZ9Í›iÙf!3–ߎ´´’Š’§HŸÓj赠ͱÈà¦þxËþxË@ÉZ³ “Ê}?µ‰~ðÛÖÒ’+°^SËUk0Ǽ_t‹|Û ©~+u1ZÿùHð¼ Z¯¡èœä!š8±zÖ‹k¬AwJS_tÒè '¡C †ò÷Kw"×Ã’üŒM›]?ÿª¶aV¸¹# õí¬BgT¥›Ñ¡$ðsÿ»…ÿ}X¾@X¤L:ô¼ ;q¢¶|+\k•úý͸¿÷ÆýGáV®ÒÊ`3”GAWÖ¼˜¤o´—YK³k”ÔqÍ2B€©Qƒn–6H²c/Û%í[¼y³½¢çvKö¹4½¯ø¬ÂÔØnRµ¼PÔ~=Œ“G¦ô] µ´Uj©Ã;ÔX‰ Å4E2U¹—Ô΃ÁÃxð(<4DiÉ@ ŸM.ì˜@lìÑIùœoÆçUç`× VC؆ìËMöpë¶{èž‚;ÚÉÇw·êV¬ÚQáãê¥ ÄL€xmú}1ð‘ßœ°:{=5Og $¼x‘,O×=Ts³€«v ‹ñÏjgpvQ3o·ëvBï;®Úqƒ†Ø8Aã,¹îÐm’yŠLü.ཟ G‡ô^Çf9¶N*Ø‹esû 9vÖ ³štN;N2§®ÞeLë¤þBqû"ßV¿.Íètȇ¥2ìûXýÚ~,#£÷ßΊýZžçÂaošéb9c‰£Ì=:L¢€Cþ&̇)+«}„¨ÜÏÙ´W$£4“3ФHçU79_£Â9™ ΓX ™Ìø4}Ÿ,„.ü~ÌÉ£¶ENî³þ6Oo]ýÞ@+ì#‹âNGý!aVÐPè—ìoÙd¿N”pNFåA8n Ûµ>Á×\ýxŒÅ +"O´>‘>Â瑦/=Lð1»ç (Sæ/~@»¥›W¿g¦Ø#ñ7™sü›x3^rÛµÞg:6Ol¸-~­Ã>¸6‡§^ör‰ÿP_nË÷^±÷àô¶Üéù,ûòV;+ΩóùæôГ †ôî¶ë†üȲáuƒíycˆž8 æ´-Žˆ_ö‰¤Ùjݼ²'}T[ÒDôR€ÚžæQg‰"´\‡d@"ÜkŠØ¦3$ðXÌ/„îgU;9ö¤1$äÏŠZ½.ûv²xyT‘BkŒ(Ä›^£³ªFÂñ®’„•/’k €sA`Ðq`Òì±z~gç˜?BR€®LíÑ=š¼cº(%€ûÏ9‚uT>{O‰R)œh>™égqÙW‡â“^r¬_¿¹îå†ëÛZžhR‰AÀûÄaéFk©zv3¬E„ãÇg´ciÔô³ì2KÆŒiL0¯€­ZVPf\¬A@Yä„ÐÑbäÇe{KE‰ª“,Ü®¢Yªš)@÷Ÿ€õp1 çôùÖê"·†ZGç&Jº֜Ǝ“~Ž,<›Üâ­Ü‰çSøw£õnR^õfã{ÍþÃVqûÛßCuÕÊŽC¶q:0Ú‡L¤W™Ùæg¸å›¤eÜ´>Ú¢¶nzë·>Ãpql[ÖØòéCúèaíp½õ¿Í Þ`˜°BühÅ1-#˜¥öëÅÿ(Ä™}ý>1y®2+…É6ŽIÈ'âp{ôݳ©O&œJ}&á8èÃ{çì\åK0£Ti…𨠱1W½ô'àé"Y#Í ‘9ØLƒãÖézˆ)Bà} ÙmÕÑVhð‡™ZÖk,öFŒK:‚Û¤uœ„dåU>” 0™oß¼(¢¥^‡ÏÜ¢$fVm§CÓ°ƒAÆ-rU¸Šëc^t1±þE€¿4'™Š÷ÐܼyÖ§—Ï|ÙŽ×fø6¬­J/€ÊÏÊT~¶”Ê»nÕuZG$Q9¥¦žþáàåÞzײ~`XĶN2C[¨ð#ŸÄ~®.xКØ0Fbhl(ö<„/¢tr™ÍrtlоO,ÐXoPo˜ÏL]²%ü#)¼a‡é8»P4jÖµí‘ãaï<ǯ÷tÜ€~Ùü¥¸„0gð§EùÈ BvcñHc©š©õ6nÂÀÙÜÛSõ÷Ójs«7'ðýÈ/Ò?¶ ‹ò´.zÉ6Ÿ#‘¿b²ÎpÌÚqçm[„=X–YZÐá…}q¶WzÞ6þ˜2jíy“;2ªFewBxfù/0yë* Ù©ÕÅfV†,ñŠ åj~A‚xv¢â,¬,hIp]ˆk[Tp¤¦tJ¨ÿq÷Í«ýWß*¤íŸ,]-QNÞB+RæµW8 /†JÍ¢…ÑJ¯CD‰Íà>ì,qpÄs$€g8Œ¦Ž~{O&*/¥ƒv®¨™nü#3ÊV닉‰j©ß.7‰¾X Î#Æ G&ƒrtà'´—èz- @ð8ˆ#Ž…Ð, ž]ßX²²¤ØOб­Ðº2 ÏÆ¬e$C#(VJ7¢z+ '2•%Vµ"DOºH®û)ú õÓÇKÃÀV'JÖÀø¥¹¿ÝpøÙTÓ(ÌñÆ9ªXŒÉ¶dW!àçˆ1ög$IJüuöÛÓž˜"ZÓ1½\UZ‚AvHÅþ‹PÈè=Xó)ϽDn‚vjœÜ+ìf©JM`u9=ÞÇfÝr¦¤¦T=í£ËaV>¡+¸ôO¤ ¾¾`jeCUuã—É{¬šИ@AÀnâ–Šª‰9Üsÿìw5Ë,ýi‘±vI=@Ö+,ä1Vº9˜ ﹌i\™?>¶™<¨;òÛ6_ç²¾'øºœÔ´T:ŸQÉ*Yì8t(i VÉ ?U]Qµ¼üEBÍ_E†Û‰×Á ¾>Ï×é“P’pe MÐÁ =²¦rŽÙG h\‡ÓJ0({ÞüIòSÑv¦8‡c\B_cCuïvq$ÙYgz„XCS9Ãfh>Ë 3Ïz@ÚúYÚq˜á?èÙOÖ‡Ùíóð=øTçÙÙ¶7‹ÉÄ‘14H™®áy ë߉Љ‡-¿X¨-È,Úó±~#à)·µ8üöw½bçÂJ¨ Î ´ÑeqÑq€N´èñÅFFD€ò:ë`I7  y¬&\‰/%ü†!„é;™ÛÑ(÷L‘@vÝ‚,óp» PôvN:k'‘GÐxÐG[Xa9‰t’#ÁºöTŽþáœå´IîÛ“³ Kîý4Jì7Ôéí¥§xŸÿ„ˆBbRÑ$ª‚ÈÆ5{&úš´47cIadu<)ó yáÈ T«§gR/6pnB7áœS¤€t‹;ŠŸ€›5@¸èTn­bÖÓɇ°£Ð‰$Ø@?ñBƒ\Œm§Õ$š!ÈÆ»±ŽäU>GFPqÃ0¯ŠQDP’¶Z°‹"€àOx3œ–çü"F¬.F”„†• 7Þmÿ ÁC˜2N¼”éT?4˜_û -A·˜gô6>åÅü¸wù‡ÌkÅ×™Wæs !q·l×[‡y«ËLró: ãVñþ¿–¬V 41Î$ÖÖz]Ÿ_ˆ¸’…höÉzs?á’ÓRÏþ´f”9Yú­Û¨„¨ºÑ CáÞJÒIÏ¢qg Q´õÓI:ÊNQc…i@÷îÜÿ›=Ñ3Û ¾ i›,.úJ ›Íµÿ,±!¹wylN((a?ê2l£ÂK×8ÖŽÄ öix‹â$>–šú=ˆ“R³¶ÞvGæä*˜—µs7ÕQêt %<-Äk¼zý¤È2¨ ¥%"­}Çæˆê S,%;çÊ ÑÔq‚>SGïxg€R+ÿa¤ê3nÏw9Ðî{eëäC ûkQTIÄ'Ð]&MµÚâΜSûô8µ^’Cî …nü:æ¯1Ïbª†€Éì:—ü (Ñ}rÐk'õóÄ” ³¹ÁÞ•æS^(äD Û(YÛCõŠEŽØÑV¡äÞƒF‡!cKç5þ ²"¡ÇªóS׺9©bo©XNéK8¸é¬Ã> %`¢N.TÝF#ºTù0µ2‚!@Í{%›ž䢕Á’¬ &Aóvó“|Ò¹».0““ÕaXÑ4M¸—ôBG¹s ‹N ˜m×>Ù|õTÎf‡#‘LŽØ¥›bØÔ€‡·iâÌS|Ý~®œ" W3Ž$¹++dm¶Â¸³o;mЋ6"Õ¤a£ÓI“4{p6m( âY’Ë$#[2Ééj¢…º}ÙX ´ !5ƒY¿ò÷ùŽf.›˜º;A‚¶HßKf5w:·kÌÛîWfZ‚ùgn»†¨Ç¬ì‘|µôØßÛt¼ð÷ÊldÂŒÝÂ}‰8Q*º=Xî›DÛ"Mÿ´žÌf wâ®îo7¡J6—MÍØØÍx_ƒ¡{%·÷ܲº˜ÂWÞ~­&Q2ƤßóT»—sÔ2™Ç”ÖL‹¸(Ksz¢C¯^f‡òéæd€ë\6î$D—¼8ô¢³”$‹®>Ð ežtgab³0=¢siÕÎà¡MØÚ\òŒÓO¢¦O|6ºænèp€>:U÷ÑÉšŸÃE •¸ð!¾J o+8xl¯° —ƒŠ]hÜïö6@O—ñåô9åÕy*Ž]ðh³`­#/È+ï$+]uÇžyÐÛ`Gs䇮Sùß‹¼˜sø÷øšN¦ðÊŠÒ7´Ë~ 'A(÷ù("e”] ó@E›æ­Ú”d~ž–^dÙ5ãꬅ•Y»YÒKã[ub}ŒìÍx¸ÐÃÍ›j¯†›V¶nÜJfrøðÆ­<üv­Oé ]©z°‹o²m3¨Fã ±+lq…­p…­R…‡\áa¸ÂC³Dã" ‹Ö‰CytÕ§ýMç¯-ç/;D}ëüËWP”¯y¦ðÛ–þí¡þM`»†q9¯ò0ŽíßMzÛa¬ïŽ  _¢xºª°ês Œ—ïz9/­¹=“£ÆŒ¦Ç¨xÖ—À¯&˾”Åê¼Îö¿Ëþ2¹~|ýê_›‡ÿ¿££gŒ¯UséÊ#Ú¬N`E6ï`ˆLAª¯¡?ålÝx[Ÿr[7ŸÁÃÏàá§œÁú°"¶öYeãß@f ¸†ñðѼ—É<¹›'S5z5Ý´¡?ÿG=/æÞI?(Žéc_âNȽËÌ]y‰ L (é`ÑaáÑ ˜p_>.Îéa÷¦¯¨^y»Î#®ó¨²Î¯ïo÷ ¶i}t‡óqî©Ùkí‘3K@å­>‡,lç¢20ý›Œï d0“ÂdwÍ@î aiËÐó˜f_ÕëöŒ_Bõ…eøÂ2|–AÓ„O4 Ýþæñè6ó°Éݧ›ÝK53d«‚,7`õíâ¬ÃM²þ«»¸ž°ÛvÂ^ÎÊjœG,m *þ$aÙ&·ù±ŠäÚž´>}4*º8^:díÏ'Íf·Ê̲ٽKéýC|c¡\3xF×øÐϪõˆ StÂmp–tÐHέÀÜ,l×N׌Ùb#B^‘$¿z7ɵõ P]ÅÕ¯Êtèµù,ÜÆ®Ö›íåA3Á8$KóÏ8^N.kmoTG6;5÷6Šî'Ý5ûRqÖËgRKŠ3ÿ'EÇFÓY~‰¦M=G=!ê?[¡¥1çS¤öp,µ±jÃF£ ¨>üt0˜K%/¿$î|Ǭ@[ñWê ¾Oç<3-•Q›å!Wó¢å Å+ŽÉV ýÊöÚuFFhFŸ®Á8¬"6~:úFU× Vేkð—^ô°–‡oì­õªZ¨©¹jy]ÆÑ]©4Žbµ¢¼$«v×½û¯Ï£nµ0¹Â;EM–ÕðZšâø<òç~Èë)‚ù͕ϙPç’<1añ ¼l#bVÉzVf@<ô~%²âÜUL 9÷š<ûVQ—H$t±U“;‰øeðgÜØÎ£™œ$÷ÞÕå‚ü «]œéŠe’Þì^u¬*«\«àù³"{±2GPyhœ5×᛼)& p³4õ=„ýe†Íçê¶h· èX,š…8“1\M¤}Á y°•Ê@KäÞ€…(I¦s]µÑ…&¡Í¨À€bŠßrŠÛ ²z,긾HCbœçïc CH݉2ˆ0.4°í_Ÿ{°ÝX¶ b±q¶Û±pŒÍ’"‘ÀzÀ@µ õø¸o?Z·|†å0k Ø¥ò,{ƒ?D^•‘Nú {–R6«®Ù Â6gÛQÇ+5™­žÌ½“¾I‰¸1² £#šõàÒ¾#còè[+sõR/JHù‹#B¤³tŽ8—ôÍ8u޶6ëùºøÜ|Éä!W؃‰qˆKQ÷A¾„va‚X’N"ݹ ¶ÿ<¿útÕõ9¿ª]ˆÀ]k5•­%ìÛèAÚiòù‘û1•3 TI½!Ûv<]üî?ܲN;ç—å™ oB¹«×Ùà}:ܶƒvâ¿wÿÔ‡¹€Ó°;Û!u4¡ éËÑ¥éÌÒ1ø€AóèµI%á‰éœçÊ÷­Ï¦8®Žö@ÝAo[S@}‡"‚Ž"igš ÞšnÈÇÊyxäå¯kÕç½TY ,oS·q»8É<‘Ú³¦¨^t·Ý‰§˜03ýR¨Ã{9¾ü4”E·Ä\-Ý œ§ÎžØNœ¥ÁX(û…Ó”²U³Ã¯5 vœvú¶¯aYR.Æhh¦PÜ!Úï´H©åvüŒ%„YÜæÚ=’8БÀËbòs6uŠ) Û¦É3 ®eÌìln`k£/´;3e,LšM~(™f«–}-™  ¯#ùѸ£^yE ÔY¢Œ?:²Èô'ù‰(˱Yz™¥W„ „ÏI¾PÏ&„!‚èUªõúmÅ‘-¹‘8{ƒMXlˆù‰ãòõ®ö è¯(TÉ9Ãà‘Çk?$øÔe¤€=™“…ŽmgVJYx®j°Ë dˆT4Vq!ñ¤¼Ir«†q©åVr.áp„êÙ8ïv_"pæ¢q¨ggc 7¶qŸ.uµ³IH¤΋Ø+Žk{½\€ÄSœ9èݽþc)¥lÀ`­[•ýì¬ôÙÏù4íÞ8rÈŠCDñé´‡ëéi”žš:ÈF"Ô¡RWɵ,Àþ¨¦ à:Ôe¥À',~zºd¾ª–º~s`­êÎ)6ÆÇf&’ …˜Ì0| «t•¡C.’þ‹©¢#pF9Lk’×Ýî”nuydAvû\3 Ñá"€á z\u-‘2¶üã•‚?fÀ\&Úóø­ÕSRVIjÓ Ï=_5äpǪ™'y…ÉIDÙ Ëœåœ7-ÓD©…ƺ'L-SÔàáÇÎ4ºc˜ç¨#WÁè.U-¦7'1. ¸ E>š_1¥ì§¨mào3!qS:’Þ #"7˜¤5CReUŸž…¬>ÌÂuytnrd!\Û¦ „fÕCÒ_h½‰ÍMÖYgR [­Ls¢Ü˜V<Ò‘ºŸóBcè‘eÌò¢ÐP/Ä4”Mü‚y-ÜŸ@jœû° ùÅä%{£7&—âF{Sf5ñÒ ?V£¤¼È¨óâò¥”©—Òºå€ kÌd^Ù!&ö¡×=-‘.б„àj‡0o6MAîËÆÀ=å#J€h$<9V]QM4ø44âÆQ¬}Á°U¤/ð㺰D*Ñzpâ‚D¦[z¥»MšTçü¸ÿÀ …‰ãæú??t9H]¨ V/hPåg]HnÃÈ K>APQ#ÈæäÉ–| ªBߦÞ5dÕ<³¾"-WªýàBà D—¯^0ûÃ4UZöô/AR»¤ šüÃ|Eº‰ 5Þ½|m·à´ßÅÇãJ|›&”q“#jdbäUØyðŠ'ä‡ÓLÊ]qÔÍ©Q¼!*36Þ ÄIÕÆjñ"zA…ŠâyR [õÌôÖ§}Z¶nû¶¬–¹ù™$c¢‡^Ÿ­õ?WùšoU<çwï˜÷ë¬êªìýÖ-d­[ [·‘>¶n/~l•Ø‘èy}Á…<`w¯—ù0­ö„¢¦‘$ÙÙ x•‘WiÙÏÑqtmª™ë|•€"‹ Ä“Dó!ÚhX$è±ô+æÁQŠÍŠ‹©•†}L™Dgâp-ã<K7E®Á´!æ›zÕXIâ*q¢2è%eFÇì|é?ÑŠRƒ_¤é\ç|3‹–‡ìôoÒfÆWQ@KÃH â(zóÁÜúÙ<-fˆ —’K¹u"?"1ÎTk¨ô<û@Ñ›3ØVÄïŒæFS†§-¹Ì3µ9)‘æåF)Q,†sÕ&hÆ ån<39î ûô$E7v¶˜¡-EµKoxæG¤Í6Ê*x[!¯§ZÑbNi=gc\•ÇwAç:¹Yì×è+‰ùY¿C‰ð »æF5*¯gà š8œI‘&³Á¹˜Ïápi_1Ù³‹IC £–‚´mŸXc æÚ³l–ì7 ˜\”A¶WDx%¸X‘Ë¿í# ©Q)¦ü<1ãBf8‹ˆTàWª9ÆÔ!°H`dx*ÈT7€> ¸vй1§úôÔö>õÝÓJ*nRç:O©q=V”[ ‚e/ú¶w¶ÀEöµý™¡‹‹LMß3¸³”ÊFº¬öëЛ:ð@"@êF: ‰Ás0CDàÀˆôé l¸z!™ÌÇ[mL§äÕS‘ž¸Êò“ÒéàèQcá-'Ýt‹‘ç;[§¤ÕëHòˆzÉ ¤•/)øø›ËEÇÙen%æú)v`EÜ *ð‡Ã%—Þ²¢çÜC}X£nƒ¬çó0ѹ¿ÚühæÂ~ŠÁˆi4¯«šÞºÐ¼õÛjìm§ÚÂBæ9!šöŠºk@9±w¶ø¡Ó6%ûãöá:¢cLâ]¥'I 1•ÇÊŽd‘#"¾˜]ô‡Cb ZzíÀCÛ*u0ufÅûÕz\/Lˆ7Y¬ŽÁæÙÉ\‘ì5ºiGø„ov7\ÂîäÚŠÿûëXÊ Øì•ÃZ€W› ÓøÑýMkÐâ•fõ\’Ɉ­&þÝÂP'Fú›è+]o”¿ƒüãŸñÄåp\¯$3º ¢‘&Ý|3~l zê/’K€Þ6žhù¤„²û"P&Š,l#Zs#ÕÖ(B-e9ƪ2­J\—|i2ÿ”BÐ÷ÉYAŸ%™5ô²íÀo¤»‹ŒaÔû¤î*jUô§ù\ÛuÃhPGU·EΙ"  ÁÇÖb:%ghP~CPOTyQ¶Cü ³%·ÙÐÿ¬Ü8ofÒ²OòŠ Íl™X•ÂvïŠÜ.þ|É¥ýÝmÊž–R#7SƒH '¹}R* Ô,àd4OgQ(dN˜¬Ó^Ák`å3´œ"€/8[L"ô(§°¶tu’BsMÌzáx:^œeðÌ’wi¤xÝqÖßÒ%PŽ!vÐ8è>sü b<ÿ6§báL€²*:žu"þã ûœE(Ô¨bP+Í:xÁDwë ª`„4’€µ°°§§¶RIIò˜‹/æt£|1FÒÆ ÑÌ”§%XI66)K)æ$+ÎÓÂZÿv` ÓªK„c‰o:%µ` ´vÚ£›cv£— f…>7°N˜]$!Zíg®#‚sVú"]äÕXV²ü³:ç#dšVƒ7ˆ&Û\Ð UB5‡ÊË×)MÐb/i… U7ä’µUæv -½Ä’θ9½Ôt·æú‹yi0ÿã›Á ÑØŽa1‘{»È·a]OÚµu½cì·ÀkZnä÷• ,JZCÇ]*b ë¸ÊgC½ÒBS ²DèùªìiR0¯žÍ SÈÅÛw9ªRH¬H t‘Û’…ÑÞCˆ&ÓF¨^ôäy/}$ï­_­åϰbÄçW·‚![Á?¬ä~UÍ¥¹^¢jÛœÔØÉp~/"Fps2¨)¢?xOi³ÄKO”ƒTSIÚ/ã$l#¢72'Í ûÊŒc£5®d¦óG)-ïÄ.ÌÁ˜qI÷®_5ýt Šv¹¶Ü"¡‘® qe3–ðåÑ=‰HOv>|ˆ¯ÄBÿÃ^£9* ùùïþ §ÕZЕαVLoîP †Òï=Å$¥•8Ëq¨®›OV?éœ’Ó ©¨;€B‘£ˆ8`Ðvÿ"-,ªDPgr™ f6Wq*’›:–}TD} -L&y–@îÛYz¥N)…Õ)|ûæE|‘Ô ¬.8O0_*«¹*»RgpŽ pÓk<…r»ªµzí¨ªŽjü2™!Ë‘êhsü:Uœèi;>í^ÿŽN}íÙ©VŸ½ ™\4Ó'« økï%äÕ¿=RÿÂUQG({—ršAyu’"Zœ¾„¨óu†¾ ä/ê)v8µ¡´‘H¢FUÛW :~ˆÎž²°AO4‚ôC2˜ƒ F‚fü‰:ÅU=M“âZ0ÜO-}»†>Ö™Œ['«­:=KqÀHS9¨e)$6§|i£2À•vè»oY=ô‡–…à@YÎÜ‹ƒsñ¡FuvSð[¡c¦º2òÕh`ó$]õÔÞžçÃâDIõ¤qÃoÛ:ÀZI0\„±Kd`ªØ„ư©Æ G­Â·RËÁ‘Ê:Yµ ŠÎ"œ=PðÄä@ o)l·#Î0à‡à¡“)?Cž‰6kƒ È`%@þ.­œÚºä|€€XEÏJ±T¥Éûî¬~ùÅÅbÈ•`çR„ëY Þ릡¿öºÑŸ>öŸ[𸻵^Ìë?Ö x™ÈîüÃüV}l¨O66àçæÓÇøscóÑ#ü¹±±õôñã‡ÚÜ|ôøÑã‡Ož<~ü§­'Ÿü)ÞøÓgø·Çò8þSòïÅ䬶œ©ÿô‡û÷b0‹Å, šªcEåÚïL!Ëe?‡0WËm/¿ÛR†½ÑÛ±v{·° ‰*aô¦×Rà ”³{»#–?¬øTl‹GÚ§Þ—¼œ?ØŸ†qÖl[€TÙ*ù÷ ê»ö6ZÕr€#ç.×.‰` ƨø-Û“hsà„µ=5á‹%4þê2sÎ":"­õRv{²ÜKfgMˆ 0Aë­¸ñÀiÃ[࿚ž«C8þ!bz›êj~³«zÊ®è{#oXßÿÕáêô°žnÕZ¿z}ëJhÕ5½/³³NM°:…Ì­°±C7^N}Œá£@BFZXn(9à8Õ)æ×ãT»ëßJiX"˜±z$”»QP¿Á'YÖ[ǹ÷ÅœÚ/±CÿűCG¯ÁÊwv,ØP#òºAêŠÝÈùæ²{c–7õÔ;¯>¾nÅ_ÝýúkT'Vy¬±ÊÁͦ£ý>£ÍîF“u7?’žKë0J¶x{E-P_U°/q™(Ðcb“PISë%¬w•©£åwyÍ€8XÄC¢WôLçõ²„¦§Öí-—¶É/n@gzýªû ë9Õ,–½P¥•Ü+ã4™¯oç @ #…Yd¥þH?LÇÙ 3~tM¢ãÁzŽÉÈ:Ú¹ìÝ0wFYðë+X9=È vJ/Òn%ãtÆÊKx«’ I«Ô½®œ]ôÕÁÑÞv¼‹Ã—ï»ã„ná ¿IAÿ®Ï’µ´b+Xr´âfgÞÂhRµÃ³ ½ið19*Ú3<ƒjÂ:ÁjL6^&š¥$?ù»e¾àV©Ûu ®fjš›Öªo×Nsã Z‘16»êÎA6÷ù#VÞmQ»eÊê›èÈ(Ú¥Ã9§š"¹†ðÐszÐú(Ö“YÚqN‰Œ0Z2Â_ þ<õ2¿æN°@yÁªÕ]&Z¿ïÈï•M–&Þ`ýükC£ŽÁ™1eÑΡ}Ì€‹ºcNA¯YÐ…‹Æ¿í2t}ÏßëœÍìÏŸORíôÃ(¿ÅäýÂ%¸»íí²Tü™•)ôöÕÿ¾:øñÕŠêÙ`-¿ÊZ¶Î4î1[jÈò‹eMdg‡{¶ü*hGÇîžnv7»[§²‹ç`ʣЌú­æ¯¶˜8Tg)ÝU»¹ÖÇÔþE×rRPâ¡Ð©)™Ð³ØŸº¶j&V±Â1Ò‰½ÐŠâŒŒ¤‰…Ào+a¬¼Þ¿ŽâaUªVÃÀÁfy¥O~“Òô‹ÒØênžþÆN¾ÒM‰È\ÿôÂ17G7pà E*ûCdµø¿‹“ì¸]ªaHE/Gü¹˜lÓS+‹G§34ŸG.¤å¯z ̰nuä¯Uo‚®¼ô쳂I-´9ó¿2ÿiÖêS©šŒÅ1ó¼”HD>¿±ÓSKOƒ’÷g;UŠ×`£ÉrÚªn¹uOãûÉdxßõ¸&’uzúÛ¤ÁÕgô@ƒ_$œþ'îhts¨‘§FH’|â˜íM)šÑýìÜe•7ºey1F#¨;¬Û”Þv.Û¥9ûÝ•ç;ï&ï&NÉÁ8/ÒfëW¿Áƒø^á¾ñ=³PŸ÷E/4­'×~q ÷Iî‰|l|§ô1š^@?Mø%ãkB"A .ò¹ ¥ìpJÆ-‹Àê$B·ˆèøÈc, @õ 8UrŠ-^ÀeÏSÁj-kwnXAÁ³@æ©yÿþû+µ™…gS5ô Oq_Üèü¨K?šÔ€ çç;6\?:&Ù¼Ù ;X|ÞW§Þ)Ô& ÒX­åOÇÚ§s˜Ž“ëÆv¼ÁT¬1Ï{Ãl¦>sÖø¥LP¤pè§;nŽÒBRP@OǺ"›°,Þ¥”¹cÑ!ÕÄ0jj‡TÎGß`ur` “Ùµq²hU©±ÅU/ÕTBÖÂáTiwTk°;VêïU· Úkl[®&NÃëò­´ÇŽWK`³ÙºDÞæjâót@Kè–†ÜúMúÿ/ÿ hÛ[,ÿØÜx¼éÇl<ùÿñYþí.æ9D- âo刿%¸Æh§ò£…&±™H±¥m‰yH†Elúþ¬§…´Ò ƠԡܜۥocÊ÷z€œÖë5ÖJî@±a,2{ú8òÕ/ŠÊVY=€ê‰a™N¾ ‡¡A?pÛAþYúøàÛÓï¾17]uB‹´É=„AüI×ÁEÚ^[ ÕòA.MSuØêÚ¢ Ý{Ì5ÝŽøâïÈZxÁø7?Ÿ Åîv "гm€Z"â™{y·é6N–– Q†ñÀåT#eèÔÅôl– SöídŽa®õÖà%ö»³½WKg`šÏ}0_<·Î¬YáŸ;†!Šoè.iÆ:ä¦k­FÞÒT‡¢$ è'¨¸˜°¬fœïÿÊn¤3~Ô¤wÐÑÆe§Ž9çèúPrÝçôÙþÚEÞ$èCqɦä6(m€ãèÙ¹I*Àâ RØ@½öû®£KÚÌÏöö°†T¾bä&‰ÁGûyKÖšsdc–ÉšSppD½K0ПžÂé,òGòÄXs*Þ0ŠšÚ"‚C{:pAÏ`›'“yóVÁ7«¸´Wù@À3• ‡Ð xA6Ñã“"¬¨4Ežv‡¹¥&Ûy¬ÖôBÐ7lyPÕ‰’ljRñõ¼îÛ´ºm­ËÓ9ûøÈt…à»×£Çaså‹èû&U:cˆ…½‹~šÃ¦×xK´V¨«†¯±`Àè3P‡¦TÓT­dZnàâM­ÎѤ½J«¬ñ Ь FÑO¼Ao;5Š#›µå—¥‚ÿ Lç.+{  >Ê*¯ú°œò.ó‚£mÑäÒ­í’²£´¥\²TÐßRkåÂÕ|’ªÚC¡Ü ¥×K!Ÿ4J¨eÛî"tßÔE>$´[¸ŒŒ¢ˆ/Mø€ØžMÕæ6²í/?Ý£ÒÕ’`{Ü-)/;ßi(jÕ¦)‘Ç2Zß9ÞÔmv§V‹²òÅGÆN‘Çg@ò7!¼¨WNÑz]›ŽSA‹‰u»" ð^ÌÒbŒJ¦Þ[»#B§šöG–eñ_üø›˜Íi©(a¹-…K|&‘ÙB’Ó¶ü&P#kÌp¯¡U`âï}w5¶zVqãkXÙî“0#{#–õi+0¤8Ÿ!UCÀð>†N,°þ…íÙéðQ×þÅÇʼ!æà¢èRä¬\Z¸™èWù0ý¢úèc¾ S¹“¯“ý)NÓBöƒxÁvoÃ+üêWð®ïŸØQ0·h?…¾@®§”^Œ’k‰EJÄ4FDR*ã/ªó46ˆZžÓuÉ:¹L9Q§ùª„­ãøÜ©[ôÁ°4¥ÖJ¤ºÇ4µ*U~E|«ÆjH=V… fÏoG›ó*—Û‚™‰•Sí²ƒ9àXæML?áB 1vÄÞ‚ÒS ‚¾ ×o›£ÆV”î/Ò„“!E~>¶2cçħùyãŽD‰¡]ŽÝç„FzÍP¨eÓÅX,F”@­Kº"Ì\BJÐØ°–•˧5 PY:‚.¢äl7l§òÐ)Ç8# fWQ\:?x8‘KÉR ótBÍÓï„ôk\ä‹ì"pN³k—0Õ:nÖ$+§éÖ]åÖ~àSòñƒp`+õÂ*qþ7Ä_øbŦÊÕõÆañ5Ç¿|‰OÇUѪWè¹C¸!Ð`!à“³ôŽ1ÎÜ€›ß:ìÙä !’úòïøo¹ÿú ¸•Øÿ¯'OŸlxþ_›>üâÿõ9þ=K()í²z_Ç0òsDæ))Ô*ÄëvJš4H&”Ùa¨ˆâàœ¢¾ÚìR£¹Ø~6Îæ×”!DÜÆeŽô\ÐU¥Q‡=L %Í)~ß~ ñXºÁÒ  2Ÿaéû0ôøD#cÆÉÖ4³òŒŠ®Ò†’G(X¬¥Éø=ÈK¸Æ=Ó:¢ÇWÔ?½VOõìà·vÇENɧū. ÷ ¤¬ DáJØTA)Ë ü(B]¶ÙÜ}[Ma>UÔÞ*·˜ArpV>jZe©hHNÐêŠu)VîÜó7ÎÜ´ûöÛqÅJN„©ûÅM†­zØàä-¦røÔÆoÓt™Ó€Ê˜c•ÇÃ| †KÀ_uù#,âh-•ú bpômR(9 ¦Ç›Â$’ÎR´,o¥Z@bAGEÊàçœaŒ©¤•¦Ä) õ¦,%›Ë‰¦E×J-ž1Kff“€YáÔjŠS‚Ê»ÈÃl½Šcy®&ÜOf‘q—©Ä™®lQÖC l…Îífd%sÀZ’w~vÍwE7PºYmÖÍÀ¨mŠl :­çï'%ó®2¯R…ü“FVGm>pœû’ç…½Ù,ŸmÇÏ$C‹&@ ŽAE ¤JX×Kî+ÕFn&(“°‡˜K•çs©a+z;Ap,€‹¤@ÜP;× êƒé“ “n Œ¶H²¡É$C¤hÀ”¿Šòµ#µ´X£ ðlCzkBv*R.9YL&› ëN.²É¢‡.4V‰ËX¾…HiXYC³"f#¼‹sÉ‘ZÂê_ë Š&ýå…v’ࡉž±«¿¢ë4¦1iÇç’‡žH(xÌS ñÅ¢o()öÈfäS"qwJô½eÈÕëØu³r"ódBøº²¼ô u§˜RHSS#±`tD*¬ÜtÎ…XJ¼ <Ë->ääÚÀò+tGÇá^Õ×D¢+hÑÇ^>r1¢Ž*$c{Šã¢‰¶¢N?NÒáàë¿ Ÿ¦OŸ|½õõF²õõðÉãGÉã¯7“‡É£ÇΣ–‘¸3qu^t!Ÿ ,#µy¼qÒŠ¬U*o(|Ä0Çp+PÛáQ\ !Ûá)•¥>rú±«Õî÷J x$mR:/8ûp¥$""ª‹èËB>6]Û…oÜ.CtÜágäzÏq€œÊæìmõ¨"ãhYDõ[{Œ…:pxì~ö‹»85û~Gq^mÏ…ÄlZy£TAÊP. ‹qùÕÔæiw–89K2 ò‹-aM<±UºeÚ‘‘j¤·®ç]Ü0å3à˜¿ÌMÎ. ÅPœí¡ÂvxZr³f¶ZŽvâwM<$º ¤pBrdÄœÐÖÉ& ²ôÒ‚ÉÉÎIb FÜ^uLVf{LR,1 €q¾Qè8_—ó!¯ (šÍ À»ðCÊVç"B±€BIâm6¬tˆ"1f ÄoÓÅ”˜¯%ElÀšt!‰¼eÄ ‘ Y ÓÓÿ·®%Øuž\ÆSH·vj¿™>ã`á³ Q Ñ|ø•…:›—@e›Ãt:?ïàšµ"[b…̸^‰}‘iÈ.Œä6£Sd£—© ú@sÝ$1C„"kšÂ‘IXa:8¬’”¢ªÉVòƒã/ d„딀`9S7Dz…¢(bsqN…Æ^x#ÕH_€ àQà"²˜g@é¨!A&&ìôÔáAÔ†ÀäNOùÈj(ø¢i¾Nðœ¹2MÿÕRhêŒ)† <èâþãKÑQ>ž¨ ÀëÛ§4Ászc®È§‰t6XŒ}<x]0WåbB¹ö®Ùl°¸(æÉd€°É-èèaÍ{dl"[­90›ù•üò|î‚2Õ Æ :•Jð€û~•”ko€¨0䊸̃J%rßG®6ÆÛ1!hH6ökA—ñrv³”m^^F´ ÕÖ„*ŠY@ýŒ›?À# ÎW»¦l…д†<Ãïõ•”¥bV#ÓBQMæû9…“M~¥õ'2ÀWÕ¥ƒ0‰q‰uj ÂD˜d’Ô¡‘½5 àl\౨§ÄnWpHTC6¸ÄoPÌ™h‹¾Q•(™ë=BXÈÂNÒ¹³Nø`€5^Ή!`\]Lׂ?åK¯(ÁDIâ“t”že$8Ùá"èüÃ2gä¦@ ížßqRÛ­ó®eDÉ8Ï»YA—”#J eVŒÁІ'M#l­Ugÿ3q¡Õ:í9Š­ÿgÿ™šøÕ”üAÃÉú¼d@§áÇÔõ¦:­—« Tk‰Ï1žY [Ô¿“x‰ì'KN±ÈÍwcñϨêuEN‹cö'ŠÁíZÖË •?‚ÑvìÏít>?¼ÜÃD>¡5yËhkÓÜÊbð(t«V­ 4lFnØòe#)µ|±¼4 ²^'o¬Ø««Ä”+k°ðw£ØÄUXMËà,©®w7êÓ;’Ϊ×è[G–U0¨â=»ønjc…zªŸ0Céù#âŒXhÓQ ¼·TùêspæR7Ü}MŠí»ÑÔ€g›su;q¼¹‘>}º5=ÚÚÚÜz˜Žž žÙØØzøøëÇ·†éçÿò¸ßW¿§[O>’øURŸô…•Te6‹?×Õžá¬(zÌzœ–[h]w_£TªQ'sÃ$Ø»´§u6%Äs'óˆ^5É ÈÚ²\%ŸÓˆæ€zcÚµëÄ„£OYî­NÿÚbÀé°Ýª¤Dµ„µÖ$& ÉÞªÙ„TG ûÿläÓZðësiLÅ]ü<›ñG}= `?éêßvÕ?›ô±å¯v®WXÙßð±Çqeóçé)¿Zx°´ÎØhŸD/ ™gÀ¢Ž¯?,í Aü\T`éÚ¡wL §ýwF&?Í!H'ˆÉxБÀz$ÆÝhw¢‡â–8΃D bôÂßÁ(ÈKÏcìÁ{Ú6‚ªx5P²÷Nrϳ+2RIä¸rÚ$Ë5ä·*~VÀ—ü€·µä{ *ÉæMZ0îÙ8ï'ã27ð_µ¼^£›ûÿC ¬ýôúÓà¿>zòøéSÿusó þëgù÷Õ]þ‹Ôÿ)Ê6½ž¡³9hÅ[jƒ;ê?_Çÿʧ©úrçV‹|Ïò ùDæ3u‰¿ŠwÕµy•O Ű‹­¢Í¾ÈGó+Ð>Âï‹>êå ½ìbêïõ¢?Îñ‹lÒ¶jˆÓcÆ[ݸù¯×/ZÝ8Þu<²Ô‡r«)Û3½Qµ=8a´G?ìƇßý¸ûf/V¿¿~sð÷ýç{ÏãµÝCõ÷Z¼ûê¹úß?éç‹ñÞ?^¿Ù;<ŒÞÄû/_¿Øß{®šQµßì¾:Úß;Œ¡çû‡Ï^ìî¿Ü{ÞŽ÷_={ñöùþ«ïÛñ·o  1~±ÿrÿHuqtÐVØ ·sð]|´ôb¯¿Ü{óìõéî·û/öþÙŽw¿ßÝux¤šþîjxïåÞ«£6ð»ý£Wjpª%Èm±¿Þ}s´ÿìí‹Ý7ñë·o^îá^Üí‘QoÂß“y&@°ZÁÐLj‘1Øc´˜ Èc6úóþp;6ä(VDbóë§êˆmnt6¾îl<Š76¶7¶¶?üW|´ñÏÐz±Óä·ê¡‚àŠC|äSõXLrùcœtÔYdÜ,ƒXbüáLàüBÿ•ÊoÅ9 ^ÿ•Þ§sý×¢Ï6õÈ …ôcôßæW~*Ÿ³Cþ »Ð…Õ{:Îú[zºÁbïì,+*ÉìogJÞHÅï”þŠ¢Qq=¨—å,'óù¬™À×Á‡Ši'ý!¹;LòíøU>I[QVô®²ÉÃ-ʤ­ƒ=1c~ш" K»í,LÊ`ae€V5¡ÕêJ똥P£‰j÷ÑÑšÃMêÎIbf—’ÜkMÐïyXi.äj"yú·ZìPVÓ„…ºÆã*¶M1æÇÀ®mš¨â-hêÊ4!øVýeÚg–¡.<ÕV# ¡Üg¸B¬ÄJ»VèäÝ7Gª´jVUJaÔ‡/®uÝI©&¸k*Aƒ¹ðgà6V= ’è XiÕª/QJow6O¸Q§þ#0;õO2þay(ÎŸŽÆ‹â\ÎPž¦ú IŽd7Õ›¥·¼Ü‹;=O~'håJTD/ßõž½PÏ\ï»ç‡j8’X™N“gÑ…Ýp jqº˜ï4$p=\ªÆ Â8†´ä«ãƳƒo_¾:D'‘Æ_6ø&¡ÔoE÷5gE¤ªóMçéx¼s¤ä˜ºBêÊNvìÖ÷_ïÕ—‡Œì7ª žÕTÀê†ÅŽ» 5UÔÒí¨ÿÕ”àëÒŽóv bhsÚÅ™·cüÝoð75V}µpï,ŧ?æBNbIÂp³+ â”%àO½Êh3¡6ä1R’éôº‰Âìt^Ìíxp¡N¤R)vð55¤;ü”šñíËÆÚ½b òNªÈî>CCŠÛvݵ0^avvâ´$(€AXë Ö% I¾ º¿FÜ kT!D‰Toj„³t:kBÇ º«V£KgÓèSbÍIh–b^š „Ø¶÷‚¹¨®ú‰W Èvb­Þl1!TsΉiRѹ´¯D§±¼M¨1–Ÿ'•mxÙ‡V9Õ<:Ü¡sÈ>¯>V ò³÷ðB3'‘¿Ý°ˆ?5Z˨™š±Y „7u2õ™™#Ù£4z¯w~@Ê`þŠ çœØ ¿É9âhKÕÿAØc[[qŠ”·K§ÞÂy1P­/úBã «C1¸’»;­ŠõbΑ×iÙŸÓäj2Æ·ñuïÇÝý#çD„¨Ó {d¥7 6Á§„Ž[ˆŸb–’tƒ åá×\¾¤Ç’œíÓ…µ‘‡`9¢CµÊÜpéN¸Çj‚ò·3lPUø½t+™;?gSyLûP |C÷«®O§?ÝŠÓ§y¯{&oÄÍ'Ãu±éŽh<1D-ëËì¼]Sg)ØkÈ_Ââ†h)zÑ1#pÑë5ZÀ›u¬±[; ƒ%œõŽb ’³t(KrQÎ}“K +B‡œo-ö&ßÃ{Pk)­¯³øó³í“æT?š,ÝX7x™w”aò^q¯xÇ×P­#ô£ÖU]t-¦JÒo6ºVÝYNýSl"ô¤4¦Ü=äúZ3Ò¶•þÙÚ×5kþ†ÅrauØÊ\Øm8±[qcaŽìð—.©VÅ—Yû¤Îs·²Š8<>†üªvA¥6µ¾ÌKÅoxJ' bÿß5î]u4aãá,ðæ+Ò0Ê··NÖ_ÎÁoí [¡c6!®°ãþwEüÛíñM¶ù–;}Û;ý~¯´å+n{xë—nø¬F’@Ü}cÆ hä‰5m‰Eo´TûìZœ&pÈœ-ùž~Q‹î»‰I’±vÈ:~[mó2™eØ¿±¦®˜-„Õušb;D¹G{LfCç#ßÛ5 Ý‹ÿÓàºm¹5¿ˆÆ&Éæ½Ådž›ã¤ŸŽÛ¨igžRý¸ÿþÊ2 P4ƒù³\™Õ~%€*®?åÒê«÷WÝi>mêú!Д~¸Á S’¡8Û¨ïºðŸfë—Ã"WçLg¾¿ÑÕ#LªYž“{Jœ¡t‹qšN›ÝÍVøì4Ž2HeˆQjÙ½7àJñR¸ÌtÖ£¨Ô&w¿¶¶ö=Ÿ4ROœ°UWãžÆ¾î!>m¸Þ4¼WÙsIÆW‰1k¢²JŠ—n'þÏ/ËTäTô>=±ì-U"$«¸]õµÁ¢OèPÒs4…”Q hÕñ}•Ñ·©$L‚w^­ê…>òðoÕác}¾Fèªãå,ÁØÖ„©¡¼¤xXƒ=ÊÎ?•)ºô¾"ÀolÁ½üÔ™ÉeJê¾ÏÜ¢ÌM„ü‚ïŒjlV!únœiÞMØ)Þ¦ÿ-Pµ,s„Óën¢WŸ\f)~M( ³·VIlR Ûöw»EsôÉ™ãNßúP³T‚V!¬^{;5*ÇÓØùwšN†®q~Öf.üñãî›Wû¯¾·ç¯bÅR Þ[}*0€y)ÙŒ DKÊUª/˜rŠP€]¬[1¾£ýqà†·†tГ%lš¼(ªÜîL݆­õ`á9döíÊ›CèTêÐŒÜ]ÄÙ®Øù5Ì—­®^ÓæñI˽BMnV$Fƒe$fÈiA8„8CQ¬Ã"s+ŠtÆÃE* m!{65gÙ2ˆ7öUð1!˜±²ž8V¦•O°ÿh§xÕ‰|;mÎIkDºBõ{¼³ŠãF¯7Oµ|êM+P¹i'ú¤Te–ž)VVu¡Ö+}êµÅ$ºÅå,5mÎð·W]C·êt©åþ¤WÓ(­(Ú :)zÄ炽¿‡ÚrvÑ€Æ_¨_Ó™’¤ðñö‰Î¸âP"Ù{c,Ù“rö›d}nÝÉïÅ-e°*ÚðøK«Ù[ ¢M®RÙ|xª[O†Ã`Óåu¯[^ý>åÓ¾„êÆÁï†áùçKÇ…E&S÷zŒ‰çú:¯[ÕÞcÕøÏ¶(¥±ü/ ¬´Ä¨ú–„r°½£c…6A'Üï›  x{ÿ˜@?ö±!SºWÚ¥ÎTyèºÆ… ýcB©e~j³’> øŠÌhÉó&°òXP—£ew}€˜‡©†èõÓu,ЉÈ1ÃÐ÷Ô¢Ë[wÐz=¿P†µÙ… ž‘RYšÇŽ;pÞ3ß7}û^±zShÔšdÕ‘Q§x*]©e­Ò©/Ílpšh gzT•Ç fýEB¥|ŠoÀį†Áb.à @ÐëhX}»‘R~PkkL[=/£äßÔÂc/à>e§Ú¶ûoøG¦Ô’¥ UtâjxV3â&k ÃÛ *È”N°~ºÓù¹fN?(6¯CI;c€sá¬Á>›Œ8©¢ Zè¼ÈãK½•}‚óð-¬¦š+˜¹ZÂM‚Îà Öl§{^ªAÌ»Ñ ¿›4<©^¾S{QþÒΑŠ&ë¦$?m• ‡ƽY¸\×ÊæYªç¦öôh©k¨ÓOw%ª¨¡¥ÃêùY¨ÿ@èEÀžà/€öêÔûjÃ6à±ú†¹t¯aE½ "—2èV^—Ç_¹õ‡ºyr,nÑE8ºF\rÕ33L'jÖÆ;§s/Ç‘y<(AÑôÚÕe“ƪãï(ËÂbƒ…¢¦ÙÏV½ÓÿôúÄSqrö¢òªxCѾ3¡qº'ÆùòžG¡[…7rMA0…“3W¿ìÙYá8ƒ¸«H?hµKÊþ†;€Æ¶7ಹÃ#ÅÛe2þ‹“V|ªNP)ÛM€4/½wMŸE¬ë¼Úqà•ð¤É.%ýl¢^ÞLÂ…ž‚UÆOÌÚŒaéÇ…ù`ÌîÂrgÀ— ÒKÃOó1zŽ‚Àzá~Œ" ²ð‹ùbîbë·Ý[§>ÇŸmoÈy\ýbýåp˜ø[ÛÖ6’l•†'T„t`µÄÏw«¥»½§Ýø›Àðà§ùX»¡êünÕ²X0/×B;ĸžb´{ãg­¦6òÀ’ê?Ú.€ËÇ¿iÞÓx)žî9ðtsã*oŸÿJå‡áÔFÀ¨ÆàÇEM7 £k~ úÛõà*÷zÍ"ÚýìùÙ¡5£ª¥®WÝ󘂻à`7é(¾ÂÇ|µzऊ€‰×ðÚÐAdsJ1Y"û6•½”íð9‰Ü=Fó³¹xþÃeÜ=ÇqPšUcúŽžZXÑxë!¾¡¾¸”¬²^©q~f¬XnÒ+é´hs8gyxæXS¼MhÍ9Í •Tìexìf_K [3F3«aÞû~ïÈß5°tªƒ°ÒCS'ŽÉ„û¤¢EðIÑÞ‡lÙ-è:軞NPpà WÖ/5çAVLo„>mN±" !ÖešOŠ´¹µ±Ñ 4k¤ÈÐmGô]‰+¹Í<9Zî›çˇOdüÂh®«Öñ ý¹Çã7eº¥â†a3Æ_û@º/ÒiË?à;‡^é`°­S±F6)Š ùY¿Ë“aíÿÕ*«J/Bã›óùÅøoßôóáõßt{߬ãß߬㗮¸ðWÔ¿µÔz¥ÛqyVfPçJtWD¦Á›Šî¼H'gósõóYsœNš ‚¶V­}t=E{/pÖ8àF ¦©X4__¡>‹8ö’ó‰þ–­î²³úÇÎò«u9ï@œTyYKS¶>¸Â*¼Z•Ñé´þ1Ò7æO\o”¾‘‡ÍOo’ø|–ŽvÀ©ÿo÷ŠoÖ5åLdO2þR?QeΑQ1tšùH–¶ð×8{ .1aVÒ¬ßhÙÁ7¯«ïÛD´e2ä8#û·|RžEîEëä`íHZã@_g?|W:g+ö†Å>QwþÞÝþèOTO|4—¡^MÍßk2ȇ©Ëp^Ú`èá½"ÆÿÇ+‡•t¼Ù³^hâmª4‰®ƒFKúèÕgÎÝq´R·Á%É7ŸOAZb^]»v[øM«F×e‹-_¨)’8#8dhDƒ ³ÍÍ OO #WwáÚ°ˆvü~X»\$FXwéG“ÿÚý®·ÿjï¨-ß<ûßÞáÑ›½Ý—–£šàüûPK=™¤J˜,–wð>@þUé9øJ×DGw^Š¿àŽíh1ä:߀wAžhíxíYB®‹€•­Ú[ã½w”û´w¨Ü׺s^e³1¤C뀎4ïáoMÕž*»‡ªA4;öÙâÖ„ê«Ã0ž(ÑÒfÖý FÈÝ‚dÚ$ûÄbºcŠZ %&vcò§çq•§`”R5hŠ°Ý‰œC{`„ög¤]ßÃŽg2„-‰Ï/nÐ߃_¿zA‚=úb%HÚ z4®›z¹xÌp%áõÏgðºmÙ÷ÍñyÜz\s9>7 ÆÛª9ì…ô­Þ¨Q«ò>ª¹uíØ{ÀéFNê7÷¡»÷ìàÕ«7{ß½=´ ç³Ã½£[1\ÂÕÆ =ÕÝ}¦Su{+¿jŽ ŠêtgÆ”ê¸0½^c*,1O iL‘JŠîÌM€;+È…l6­2m4>ƒ£öäÌZ«D)ÖÛ|%ê!O Ù8YîŠêZvò¡§•tm«C¬ÁËÑÒQOоƈ¨aÅV›øJó“ʰÀÜ錡µR5ŒÛšw«)ÄpЮï”uÏHÍI´Zvhi+n _øÙÕ9PÛÊE\î¸+fé=(ÇWž$#Þ—î 8÷A‘~÷Âs‚ûô÷ãã·¥nKz4Kg?§híl^$ó‡‹uÏfùbÚÜ,AbéÕ\ïÞ5‚˜)JZÃÜLMUB•iƒìë\_C-ù!¦¹ãMv9cÎCµ?p;²FƽEË"÷qñ¬qüîÝúICÝŽÕ†åW94÷s°¦ç)¼ÜE72W‰ë¥Ý´$S§ëV䮬¶Š«Ò€qwÖ˜mÆñÿm¬Åïæï&ïfÿsòà^Ú(zÇGó“ã½ôäø°8ß{üEóøÿ®5¸øÉƒ–ªoE¡ JÛAÝko£éCµ4æcEw1]ÑŽ=ÄÆ;0 Ã‰3¸Uš,š¡Y5šï&ÿ§Õ‰ãæqÒùy·ó¯^·£†h sÞMÔ—ïÞmAh6Ž-¨}ߤÏQYaììŒL&å®:ªµáq÷äݰÙywø õèÉàÕ·¯º¯ÈâÍóýÕñß–ãÿiø­kñÿ¶66·žnøø[ž~ÁÿûÝáÿU>þn~ümêÈ”‹ŒnH˜ ºãl‚tZ:Å"`Z‰éRæbø8b¡p?Ù3š˜¯$°r«ûˆ`ØÆC6ObSª®ÝŒ@æ=É×Àiì@84ò; ` ’?5¦°wò»âzfäÕr;¼Á*„ÁjAH8ú\=¿ò7Ç/ À6 Î»º 70ŠÄíª\s¥L9óÀ—O ô×Ë reu8æ6:_?@¦Ü’[]ä0#€,ü7”j°'— ³tº<‹ %ÀǨÈ#–7©:µ½þ,ŸNzCu€{‡š©¨Æˆ)ÕiºQí-íeñ2+À—òURNÌœ V þoz¿™Hº]´Ÿg3V¯M_Pc])åur˜nÝKAu¦—{1nŠò²B«„D²v„™U0ÍÈÀ4ÙŽïAèQB7Õ äîõx„*A’:0KK2£8ÍC7 >O-¥õûôšü®ž?8Vߣ›ºý77b‹ üµ×{“’·…»„ï,ÙRý%•G¯¿ t ßq¿hYp{×VÂ\…ànõiWµ%vWùÊ6YbCåÚ.·©w¢X³¿ÞY»æ¯P1©…^.Ëjz´L$Ç÷ŠóàÕÕAêjô<“cþîÄž’”ñ­°ÖêÃ×®ˆÑO´¾¥ÆqíÒꊎ݉×î›§°©>æ°ÙVÀæ#•œ¯80¾EÙ^ ©…À^ £bw‰VÐnŒ%Aî<~pÒ[‡¥'0—  AÏ4ÒYÖH'ÜHÈLg€šB/µm률SÁ§{1©ºÕ+ÜgkwÑMÉÜæª+ov*xÇÍhì{¶ÚýÌD¸ÛZ‡$ñ›=É:¢äLÛPŒ®â–:NøMg–ŽSÐl5 E øþ°ÜJ¡ï03àw÷ÝÏ)$¾£CâU @Öw ©ï1\ø¾¸¦Á~6±"j·ÙÙ1*©¾ÜBŽRŒK—K9ßš˜ÚmðË”pý¯ýy—Ò!¨õ|ôðö«–Ó&”–\â.‡ðMWÿEnéT¢ QÍ€ “V*žÿ@¾˜¤Wj“ý¡pž‰RÿèÀî. ~¤¿Çç·CX ¥ÊÈáþé@fúF±R|’x©"™t:bM‹ÐüòJJïà³Ö iÇç{ßí¾}qÔûûî‹·G(ÜŒŽ“¾¹ ÿy™}È&5>©;ØmøQ§s/,7¨ƒâ+RíW l%‡ZKx¼˜%u[½Þ0¶¸„ÏÉ,Òˆ|L¯ipv?‰,¼„JóèJ$/²y;•$ Í–ç=a—ð(Ääûñ£Þ´ÖÔ{À¬o^„v=§š7) š•åy8ž³_õ°¸ŽÀÀ«‡¤Q÷¾50dÀîͺÀ¹XÍ«1 °uÜ,€‰FW".yZéÇÛ)JMCý*>ÌÎ뤀ì3˜Þ7±"’I¤¢6]¤Š ·ÍŸ˜˜[ —Ýx»¬çî2Ü‚`1| Q99n …Œx£ :þg/_¿=Ú{®/üꜙXƒrM³ÒBc@¥`ǘÛsŽZT:7úËò8–βîT‡íHÕKR^ ¾:¸ fàšÐ uppµ…ß5©l;n2gý¿Ü}õ¼÷bÿÕžŒ¡Ugò^©ÙkÇÒ±Í7Ó*{Nšþ/±ŒV½å¨Ýf?6õ4V3²ú,(…’\nÇ—˜î4lÍ+4ˆ˜;Vº£8v ×ã?¿´˜zÚd3Á»¡±Y$úQ7*_8ŽöKãî&~¥ÁUKž!vSE `³ñÿ«/Lu¢µenÖ tµ'M(÷¤y»…¶=ÉÕF¬deÿšñv´õR·Ê[SÎãêìCåµ^eJÎ-v¹ŒãÌÃ7œe¿æW¡ƒ²•Ï ¸ƒ67Çç<-×ï Ž´;LÓ)ö޲Wz–\¹RN°ó! ™e>—‡îøÄš †)ZI ¨Cæ Ù;j ‡VMÁR£øž*©¢1õ`X …Ø9ú©ÕæyLÆà¶u7¿ÏçóÑ rŸLŠ"YœY|†f‹D×±SÚ4{‡Žf˜"µâ  ¯RÊ;¦˜Ÿ|¬ÆÃ!ð¨=QOò0™ÙS“7#K‹.rs2jð² _‰É@æÙ|A,,PNˆê¶IœOR¶”¨µPT Ò '˜Õ,™PNžN7Ž÷§0¹¶ªkHD­­£ÀUSË»8;oü”ày‰”d²‰Õ†a'ñº8á;æa„±û  rñxS¿ŠóÖV:è7uŒ&ÉD"]ú·=ÞñBè)þ8 « K;)?—+T‚3fª]s˜sŽãל]N6²…7ÓçêÛ[J1Å£e©EñöS`Çt§ú @a”&H„n#ˆMÖ·± ¡;µ‚WqóôÔp¨ÀµîÉéi˹Åx¤q6€ÿq Me¶È`Š©{¡ÕY´I|:‚IŸƒ{-À§gÉà:–UªŠ„W' uxg¯•34Xæð&3ìØZûéB­§³DmÕMšˆÄ/-: HÍžºP'sPì žeKGqâR©D?A2ÀßD‹ÑS–n”ÀQìÈÖØþSá´C¸àÇð§Â.’úó˜Ò«¼êŠ 8Mý„˜œøC¨jX|¿?t“¸|WµåÅ+Ô>xÕIû®Íš™êʤ­3´u=ÑÖ•/»S«ísú5‡ðãM½¦©˜ñúþçþÇþ,ä»GGoö¿U¢Éa ÈÃñ»ß‹ßìí¼ÚýöÅ^̺›CUìï¯ö_Ù­ý°÷æÇýÃ=ÓÖÞ›7oTƒûßÿ-¾~½÷ ÍòGû/÷âïÞ¼T=<Û½wصâÏâÃÞ¾x¿÷jïâ?ã·ªíàUJ$Ç‘n,j£^ÂÒÝ_âTîè7Ni–èÁd6‹a­öË›pz.g1xó¦H@xH™:ˆÔÇäì@8ȇ)ówúQ'ñÃ:0ÏþT±>²,Ë8žêr.“⦬´Q£{H}¡%ù8°_7ׄz*ÔÈIp¶} Īâß5mmq¼YåSw®K±Ó¤Z“]7A _[TõZ#êõó|ÜÔa«óÆßTì‚yEÃ_ñ|–»äª­åPq6~t34«UǾѺá{\jÎ…µ©ŽW˜H¯zÙ„‚¦’Ö q¯—õ¶ ®Ò]|Y_kÚjó2°ÝÍÀz¡Á©å±ÀŒ#®_ E΃"$RNvÂ"¯´…ž„ÁµðÐÑ΃7"lƒôC\?¤ƒž0Un+`uîvs¹““¨€)Tk¦R‘½ÖÿzýûvËkÿ ^½FDŠr#ùÉ@LbQܱØ0ŠD$ÿ¸.üR‰SÞýöÍÁÿî½ê=ß=ü¡wØc^6^µE´GýE«ªoÚ߯t‘ƒi®8hër¡úýwH¾•‹gŸ ·¦Ç ”Z]úòzujíwåV¨ﻉµ§žgÅT‰ì#PòI1Ͱö'µ8€b:zž§œ`ÔŒ;9F̵ÚÉ×/¦û-0i™ó^Þ~ù4è¬?¸Ù§Vu#_Åkj[Öâ4;ÈȆú.€2½M,ã,¢¡‰‹ j¯Jú›NWš¶·+yO‡gNîè4í~ådXÐu`Ó ‡'9ãÐõbnµu•4…A~6Q”'¡äÐ#õSE¾p:…º°¨*GGRÏ¡úËÂW×jAp‚U˜”„?ºÁΈm €AJ ÜF öÀcçÉIÂÓ]íŸÿÓp“`:/L­ì}VF r„*ÁâÕc1@õ4 8Ìq§øVÊNƒþÄ’[R–Í–¯&>CJB½ŽÙ¿ )ñbæ„£:Èg„R3”œAVsY†i¡% X×$Õµ{Ûxå+ÝŽU×I@v¢Ã¡ùy£#r÷󷼺ËôÔaÛÈ–sV@Ü:6§1ìÑLÛ^Ù@a[JØÑ¬J-Ë_n¤Î¾^¿dµ½úr->òú;–¦\ѳwœ­ÇBÖ`+)ËŠ{WxgùÆj’p·cÉ€å2–îf§¤ûùdûë`+_Q@‘V¹rË@†‰¶ñCÊ êÄ Ã—v®Ü9Ð×Âi?X5ר£ª¥‹rü„‹dn_…1+± Æ«Öp€¸ôv{y¾÷÷½¯{Ï÷ö/ã}ºîù»¬~µ+p®œˆä(èXŹXG•¡\:÷´5‚qUS:ì^´ÚÔo7å%€Ë'ƒérÃŽã¾ó†•á´x®»0¿y]V™¾×íÎ æ9·Eµè Úo"ÌæTuv(ʤ€Ôê¡ÎFˆž«ñÞ¼¢µ]†‹¼€ef<´Â€+(l B©¦¦”lÇn“rêõŒôaJ÷êµ;=WcŽ#ôT:8£ß±«WŬYÎvô›LváØ@²U4a(>M¸˜ƒo28¸§ÿ=ÛÕF_uÏç×m…s3+¶¿JAbYÜjÆ€Ÿ¨*DjdiÎv"¨n:,°‡´F­šfÊ𬫫gôì¢òBªÞ§˜©¹¿@‚§&9ÊC‡Åöê…cµ7!¹L_ÅT¯W-ÍtÀÐqܽڵ{/вKOgÈý¸‹ój®½†ë|O1µÃY> –{EwíÆ®fpŸÚñûê°: µ&xüþ¼+¬¨On<‰¥Ã4³Ô³j¸ýnroc¸7[a¾<Á¶7â¶7§å µêÏÀJ[jŸ•ÍÒ3Øì‚î(sE塆/¤'á#Ò¤ÎÂ/K«FLBÓÆ”^?—vÑ·ðÆÁÀÇ©7[/)ÔH ØøN#æ”þ;[%ÿ’‰ñ÷¸7€²€ߣ²²ªk ‡Ô ?׫óìh¦25ϽfbD¡gÝ¡u±Ò ,ÙÐ{Ÿ«y_Ð*FŸ-aUd­·§Âí ð ªÐ<'&ѼǡLD%Í[hcë ˆ‡®¢ÚÛïIó¾n£õP—’ŤûµêVX ¿E©rüÊýáoj¨&±›«/oy­#ß}v“ܪXòeÍ’M¹+˜á*p¸ZUÌærzyS-‹¿¢µDµa!ºÁ•©án£Ú)sRãP:÷z)Çûôðˆ ¨Aýv0kQG^̱÷ÑÉ-˜ìr«¨( pʨ*Š|\}ÍBo{xíÇÂf¥J³®”¥‚ûÉZ4WÙïš*ÆPbÂÔ™×~õ—¯”°ÆTqwOß ¤Ï}!>þ&Þâ9¹g¨?¹ ªƒ0Þ )ê Ö<&‚>âùˆ¬­ÔàòeÅsVËs;jqJQC´nC!êø!L6l1•Õìf%ÕL>ÃÚ—°º/›ÆñÊßÑÉ.u>»Ý”OÐ Š?s’x+B)ÂX²VùÙ4ê JëW‘"Ý17¢ácÎþզû’²gœ¶£Z=`½8{ËuC Ót;¢{æžPï¹fMË®ºPÆÅ¤l 7¾â¥¥å„Ç÷õÍ”Yl—WAk›5#:KâFý%¤´øÅ‘€¨½›Ø0j  ÖÁžrFD®Ú¸(|ÌUÛÈÑtÚhÇ#{ ’IÊZWÉb.z¥ªe»ÒTölx+e*†zFÁ†ðXL¬=ýÙF7CÈÑ£šu¯FM*2Ý7ô´]m2DoŒf çõo;Átu+ ž\ÎüoàFJ³ízˆ5ÊZõ˜?]Лv}Ûwªµœ¿_åsLûÛR‡ÊÏTSΑwÃÅ®7ßrNù(W6Vé2ì“]öŠñ°½SzHž]ñbÄ1hÔùm†&wwÑëñ)‡¢¯*9¬J‚êž: iüƒ6²[5Xö’‹Û‡·Ðkkk,3Ä|¼ÌiƇ÷©;½ÖŽ&”^ØÕÃ{öšÜì!ƒ';ð’ˆÄáOÜ*ž kå‹ò*s !}_ö8å/‚!E §t¿^Ý'*²UZ=`’;{ÄéäR U!õW6ºÿúŸG?¼z½{ôÃN) ‚™ b3¸H©•Ä0¨°"¼Ú:—¦U#‚Ó‘ƒÒaHü®ÂH ‰.0™Y]aÞ¢%FÎ’è×4€šr,î͈‘!廑m¤f|õšV7•]%‚‹~“Iâ _{Î'_uì½j¼@>•Àæ*[«„š¡ª‡ï«T›_Áf#n ¸ÕÄœ•ó*E\g9@}%ƒ÷lûl°ë(dAWÌ ÒðÐi»ÒoX·’Aždókžl¶›..Çzè‹\%Ÿ˜×ϘÁ5<.¥ÄsX»°™¾D;ª”a#[V£’: ZõjMzì_8¡´z½n²4³N·]4o£úœÌ:(´@¡C³dåˆw÷×o;Z:Ý’¤8rfUcµ Þ¦‰ÇÁDÁÍ ¯00Š#/*T-ã`jõwko'釩"VŠ.0y[Ì»å¹Ew×𙕯òá2Ò’+=c'1ß7 g˜ ‘:ùš*Ûø0º®äÍd‚i«UŸ!³«‹{Z'N5%³ô§Bÿ¨…–oJ†Æ!Pª€…mVçh‚OÇ—ióXõæ“¡UômZ—¢¾°÷¶ÎsÛãcÁi{U…ù½Pþ/ïBŒ‡¥|ŸôK5mçë¾I®ì¿}çI*D#ý@Y>-ºØŽ‹PaL´Ý{»W.éxN–܆8t†˜8¬q˜W¶ší'þ-èÙÿš’8¯„k¼ph:™!ý}ņýÿÖBñ{¡ÞMâÖ1xÕ_9¹&²BC7raDç¨h´ÁpkšS«ÚkÞ@½XV%›7'C5Õ_0Æt·²Û¼ÅVãÝro¾ýÛQÝ£Ýø-§Ï{”ªÆÐIÇMô¦êpxQF!õ§(=[A½ºÝ™£v–ai`ŠVÐÖæp“h©å³"µz©)ý‡i“³V„ŽUå43´s;BdCº:ü{ûlÈÃjÁnèÐο¦l2òdˆ ]Ç#×wbñ-N¡WªÞ~re-£Ça?`¥QÛCsíeÿÆp-mˆ +´(LåoEÕ‘&¾:ÞQ`/µÊ 2ƒwå"-ŠäLÝ­û^üh–WŠï QI-.ÀŸ~ºË<Ÿ×áU<´õ¹:ïc§Ê¡zh“‹èóf(¢‘(ïXððYiÉJ§ÏTõƒÅ_å1·©ÑÕÀË/Ubà3Ìù{Q‡“| ¾"³\äùQœçW OâÕA3¿í• 9sTŒ.¥(FþžleË „Ôç6+j@±öóïdÆMiË|Ò°‡Òª±aMgù49#·Î²­STÉÂ2L8¼âÍZ£³N´Ý[¹ w«:_”àÎ9^TÍ;æöJ7±~EdpÌõfÿhÿÙî ŸÉ•ŽÕÕ,m™X›¾¨Ãu r™Ò<Ñ^ ùwS*ØLÚ!"Akû%ÒdC9[Üü½b­4¾ËtÖÏ‹l~]iQÑ%ÔrlÔØì–`úï’³jÁ)˜fqøV«Þ!èì˜/ƒIDŠõñ–ÀžÜ¡-ê놦{ÔWq§†Ütu?¡:‰b²dP C,fDÀ„üM0.º#AQ–>›³•ˆúnØÀ†q °ëšm6>SY -+øÊßÅÈ1áÇU"ÆUc™=Zæj@³âÕ0¥j3“¹•ìÜß.>ÎÍÀÕŽ½Lfª“øpnê¿6ž |ñ !hÆ­ßðß²®šhX³ó­Ÿ´"ÛJˆû ùpÃôƒ_5ïÆÝo Õæ‚í/aûh6‹BÈÅ+㥩«„Ùl2Í•‚샳} Í)·ì‡&ã‡=Á—Zžà^ufe¬÷” Í.„®…^?å÷BÓ"¾'vSåü›RºžPx‘6PŇFAÏÙÈê;F-à›RÊÊ IäYS§ÙÁP¹  +`<ÂbFé¯;ŒÇ5½v‰kY1#÷u1˜´ØÚŽ–EŠƒÑ½Ø:t'N£Ky¡NÄØJÓYpsÖH¢%V÷Wú@ä,A½À1ï$c7kKôMkzœa®»Öª=aÕ;…¯¥&¥4·ÎÖ aO.’i5š70©’4+Ü*5"(:›Š^t~ £%«[KßëüM­&6 ‰ƒFuªÀòÁÆM7Oµos¡ï.½Rm[à¬pCx+$àÝ$V¼Ø_ßMxiáŠ×zZ¬µ¡ÊÛ}›k÷ ƒ§xºUU’÷â&ƒJi›`Ü>~Ä­µVmjÕ‚¨¹É(õ‹PÀ«ž¬V®V#ëøQÁŸ4žÂ¼mÀºJŸä/ßR°œ,˜¯DÅ“c•p‘ W€+ Ú@Ä+ÃÞ!ºàjÈ‚_ieòt–]$³LMš"y3Åk§ÀŒÀ¦07¸¹‘‡ ¹¤¨)ä^k£¤ 5ÁkÌi“|?¡ÉÎÒ³t’ÎdTcƒuõŸœý†¼(ÙŒQF=¬Dü‚ø÷[EüûUY Uù±î(}@D:£oè1îŒEªç~j9Ÿs=ÕL&¹J&s7G.R ׇW–h|Ɖ€ŽÏRæeí¤<~qcžA€Cy½Ô‹aüx5~?³Iö£Œ$\.Ð…ÿMe$¢WO^x™/ƒÃ5f%ë'’ÐD§5U¥×ЦMÖâ)½JÍ|Â{ ð8ëÏ’Ùu ¾Ÿ0ÕqŽzßõ|˜û²¡>>n˜É ',’c,zÿÆ‘y473 @"‹¾è¾^£á)ìu鞆“øE*" æÍËõ¿–’f@ R«I’‘äOž\M.Ñ5îuïÇÝý£vb¢±ý–m»ñãœê^ä$T™âÝFIÇÖÓš*®’Ç‘ …í .gu‡÷RðBuâ6Ë¡¬¾AÄQ@,Ç8¿zúòÆ>ÅüFÁ•ª&&è}+³?à‹;)æS€ÿ”… KÍ´S©€ªHQyWS3ÚªÆp;•)%i>CZoSÓ©>ŸQ.'­ý0·³¿÷¯¨°zQ(ú“|]¥^[‰` l阢§»Ú!Ú>ëábqj$o¤þ‹AîC@»íR8fß®žJ.¢OI³Å°‘¶ÙMÝ[,ýyIÝêëQÆâuaì·[&T*‡³1*£€Þ(ýBP´ ëÔb†ö/˜YQô^1 "þlV¡¤bk†ó^’±zÏ®K)KdG }ð¢»V“êF"ŠËü© O^~îâëF©/EF¢4$0­Q¯ÍzcDZÉ"°&Ô°ñ{Óº\*f¬ Ãv<—Âóôb ò½¢íün3GÒ:ÛS%O[myU#£0eBeÊC$/ÏÔ÷( - FB¡á£‹Ö—V[µSr<΍°CûÀ,eCQQ¢UR²V°‘,IôÆ”«YÀ~ P…“;—.žßа*+l^Ò tUb_Õþjö”OKý)ý ‹ºJœŽp¨ã:Uý.‡´Zñxÿx¥ùEË4§zQ-Æ­2x©FÃV¶Àܾmr’匀h¦$ê+Hw Âh5ÁÕVE¦7ô ë9…,u˜ý¾õÔÓ.~ëÿ]õÂ}ϱÉâOAôüWõefŬAGÖè¾Åÿ¦×' 7¸»Ô<´¨m¯³äꆭÓSô’€!5Î-DÈc Âe†á{`8“5 ©R:B£ I›­R&@®dí[áíÛûôšƒJny¯òù>Ïðˆ¥CzaV ç@ Ó±ßðª-êªv‹ª~‰irŽƒÚ¯.r†ìK¯¢"|ÙÔµÁ«ÔöK6ÐÀ.µã³Y(‡[ àVƒ‚*Kq0©j$*»ÕÌåP×:A>„dìàëXÁv?®ÂÉ’¹Îܬ5‘¼µð ’·?ÕÛä5+·±zÝ“ÕÅêåÝÜÁ,O>V>†½:a)¸"Í¡n²>Õa É¥^@½{‰›E^=ÜU²š¹ÝLÖõV‡ƒá^!‡BîÛtƒ˜¸¥Áï|bu³Ø…ŸmÒÙÒ¥Úñ±›Iqº0*ô Å#,ÊÑÁÓh^ð ”•ùø²Ê¹w˜«n8°±”ýƒÖmÇ2‹ùʯ¬Ä…¼lƒ Ì/Ó(ú‚yò¢‹ìfÉpR²ôÐçõb`b+Ç4™sÒ*ÝNéºtV½¸§2KC}…b›,Ö&Zá|M y].gWô9¡Qæ.šóÓ PæÏKRíD“0›î5¸ »ñO°«çþJ5ˆ¬ÏKxïuʧµýÉn2«?‡>0,kñ½ØM‹m»S­”R湚<Ó‚(ÿF->pQ%៊V·&\ùte“©¹PÏJ$eYøu&«‰C«Ï²‘1¹ÃŸA'a'(«”½6²–¶ïÊ¿AD…à*gåľâC ªÕâ°Ö‹W¹&.ÐÃÏCEw´– KÞ ÑàŒN=’_:ô­ÐvH.!)Åb×+#دN+=ÌinNßL Ät#uxü2E€ÎÙ°-ª&ÛÒ†EXRð!9n–Š´ ƒ &ØÆŸÿ,p*ŒÓÃjc-ò/ÇE¡*Þ:i_n;ÞÌÚirLþ êµ&¿‚JƒûºFãR¿Ó4n<ȼ86I» ŸßK[ù¬[_ߦ)´ÑÇîz´œPB¿Í•ŒfÔ¹©¶œ*ÕØI9>º@6% ¨Qod V9› ã¼ÐMq€Ña“º!ƒCÙ=³ –(Ècz—Ò&äÕÐ} ˆBª*ûÂxGƒoVµ÷°dº ¡‚FóøÏ'ÿ9þ¿¿œÜÿ¥%6,ªVd 2òŠw’ÎÏ»mt¾Ž»½“j¼Ý‹d>8§:-åVy÷çÿxµîoûÍüâ¶CѦš¸ÉxÝÌ>è­‡;ϦüiQµñåà‹&]¥¶Ž7···¼¬BPe¾PK¡x¹ÑñÖvgSb{ÛVÐٚƃ‹•BO.Nšd|ÚºÅã†ö5ë·!)OÕQ®u~—ÌlYC×q>éÞ¼©{°”­š©þíWžêyù_ÁkrM³,îtšfëéî4 ˆ†¸ù”ë‘äm¦Æ'gK×× ã¨ÆÁy2KótV· 7šîæÝO—Ÿ®Ï;ÛP"JlǃHÒK19Ɇ…ô 7 uËb°X<˜Þ€Mи^(YWÔj­zð¦]V¹úI,ÙgH‹Ô\{#O°Î”W½œ¶o€“éíˆ÷`ª"ò¥Küu8¦m—×ð«üPË/—ô@£Þäg%ÑïCkÔæ\;Ì ì¨Ã݆…Ãâp!Õ'#¸Cü'/ê6q‹ 2ÙtЪà4–.¹«ìibA«0ìx9• ÷èzš²íІLMñÅ¢˜ƒÚˆ°XÀ‡úZÂ/!ÞÖÊ–4›; êðTuË¢Yâoýúᜳ¦‘ò™µBâG];,X¹§k¼|4u# ‡áÉqŵ[bR´têwÿDéœÀd1Œ(‰Áä<´TºvfCUòÄôŠîÀ%{$kLŒôåy ‡ô´„õe\qßõ-4HÃdKSeãèä? ®SlÀÔÅCeøt‡Ë] •Y£‡–GS‰¤:j®¸«³8£¬„)s½¯¦uPW¤T™\Æ\«k͸zÜ ƒÙSp…A¢Ñ°âÉÏkZ…Š[«ò7»dÚ­ÝLsE¨ÉkÁ$u–Ó-¹hB¾¨å¿\uü|r^vøSµ•ô6çÈ5eO™/Zs¯ÎÕnÚ0QŠô¯µÃF Wåç<½Ü¦Y“Ÿéìº×Ïó±«ì¶4N.>ý÷Ùe b«©ñ$†`§IFIPªXÍã}zÊ Ÿž¶áäŸAcm+梦tŒU|$1oTã^µÀíÁ»‹P>ÙB³z¬¹BÓ'ÿ¬‹Kp棬Y„;—¨±\ðKþPÞÉe:›ÓnÐøøe·uƒþf}üö„–¶fLè ¿žh×–Ê‘ÓØôºðËÕô¶yE¨.7êQƒÕè)Ý+,f®œœ=Ú*w_íí0A{Q$`¨“±¯ŠßÍßÍÞÞ]žÜ71½›œÜ¯NŒÕøÿÕ|÷­6ܲ »‰?W—k™™ ̫ΨU6(0ëõZÀ^b6)ö¼›pA4¸Ô”›ér³Úr#]nT[îR—»”r'|"Zä¨ãml¢¶ÏþŽqvêgš-ËhÔŽ7é´hJ}únî£Î7mÉX£àªÅjÇU«Ù Öx7«¨2««4ª¨4ª«tYQé²®Ò¤¢Ò¤TÉú]ë{q &KëðŒs±*9íPa³µ]МúûíÈDB~lHIÚЀrµzšŠ+'¡xkûÄm×±WµÊÒtg <«»ˆÈGˆA0ÙØ)¯¼RZGÞšÕ²šjß+N€(Ú>&èa<³›„ùÎN±Ä¥ÆRÛAðÉ@~-<ºa†gãñåUN`8îéIH¬&ÏE½NÓ”4¶ö½˜QÜ74aÃ+PÄ7ÞÛ¤P¥Áß^•Kf×mzGߤƒÅ¬Pïäø‘4”Ð`XCH¬ã'§ô&ÝÈ~(%eš$ÿ ŽžJš¯àð.&âî¨Ë2ïBŠ~ p˜_a4ŒÒ wŸó/†Rõì‘aÜÒŽõ‘à3,:øµ"§±z›Çàã½€m c°8G=~®oX/+³qS6Ôºè›gMDÆiÊî*è¦P©å#ÈJ…ãmù­;C?ÓÆzƒ­´(ñX&6ç’ô‹ÐXÝA”h ƽ(.óRüÀ E Ö®Èç¥(Ñ×­nínº~ö˜­ñ:?îå í_ù»¥Ë&'IwZ2RÜ ,‡¿c¶B¾uV¤Öþ¢©ÆXå ±‰aš )ÇS"°¶ÉÔ¯j¹³D Ȳrrƒ•’XÉ FÓæhªïji è/ûv"P}Ñoé°×¿¶òÕS!jˆQÁünp¡Øª=­Ìð„²¡ žhô[+PK¦¨™;ýk] úÚ±P …»Ë&ÞE²”O~é4Á³f&‹J?¬l¦í½+ÕïÓR¬™~yY¶£òBòo¥hÏÔLÌzÎ¥4EÖ>ênš¹ï¥‘XÏl3••s8ˆ¥ìC«¬-µH?ýÁ­Þ‡4¢ž+*Ùï¤â·×b÷ܨt:K¨EŠãÃy>hzÈæžm®vUp4aœíÒÄx^õ&¦›.cä9hØø(;›ä³ÔN& “ô cr¬ã'ršØ¯p¸ØBc¢ªºönÂxŒÍËM[ÝÔŠ”.ëå–]@=¥<„²ÑRµï›:­•é,]™ÎÝ­Lç–+Ó4KS,»4~ —›ö¢Dar{)4Ý]³†­ŠE¤Ã½©-[Îõ(6«Ñªé åJx|ïpk;ĺÂ7›žÝyÓfK œù:ÓÔÝ}·E«’=™¡LÆËp#ô‘ã+tÜ ÇŸh½Wc›¹Q7üÄO Ãɸÿ`@Æ”² VI7"—(Y- Q_¡=E;Úñ¼§#ï=D› ®$»ÇxKˆ Ö7ÔŸLkª…  ´,ó~wÞïf 0ßyÌJí~ì†æx¶^' ò¬ã¨©qEçšûôBB8/…âÃs¢ñ# yÛ¹:ãM ž¤ì{2 ùÈt‡Ç'Tq¼¹}R‚ærc ¨‡òäR.û’ƒŸŒRM2”"’»f)¤Ž¢Öznl¬Ùdž*ödL½XÔÉí£”å6ÎJšŸ¥ÃxHx•IÜ_œ!K¡ÒÏRÄF'Ÿþ~ŠYÎ 0ÜCãl霢j±{‹ |'Œ²™¾µ¥ @^„@Xµs7©Eòni\‘Ñ•X| ÑŽn’ZU¾× }O©ÉšøK2׿¥žªŒi`E[ê0ŠîR¬~6û˜L•2µ¹ÿVkú/ìíRHPÚåw‘àÚ@Ð2±íZ,Ó´ïµ"Ò‹7¯Žö¶ã]¬‚¸H0„X· /BuÔbÁì‚Ðÿ´©­Ñ7øªêdÁqZ«7;óYüIm[­‰U»Ÿž'—Y>ëª1ãì-˜œáB¼…?¶ªZ’œÄÇIi=Hý%¨Dú‘ßùŠQ®Å{w:çéxÊVû׸TsOb À•DÃ~‡½áâý 8„¸KY=Ôúéô jÍqª ûœHîBM5çûyvè¼ÐêOÜêóô.[Ä–v…{8ĤF×P““ù#`nàÔ"…†}ÆAh›*·€E)Kƒ®w£³5 TS6M88ŠCÒ­äº8äy\äƒ÷é¼Ó€XÌò¨©@ÌHm Ü7Oú­,e®1Ï h”áR©œ# ¢5£ Ê¥ª)•EVÿ—É8%lOèÜ´È&0Û Ï€7Žz8pz˜ä“ÎÝõ‚~V PG_"ÙHÝ qÑ96¯=ŒH€÷Ù7 Eû¡˜HÌý±&„¢²°MŠ&=q'H·Ÿ.#àšÆñ€=Š7“‡bIYÖ´6}Á× úå¿ÃÀ{)ŸNü¢Ã«“Ë$cþGB*Us-Ô²™Ááóôª´Ûw4yÙêÀìÝ9²"þ}ÄrŠ;#7E¢û•™Y£`fÔŸÞsšÞsÀÙ%^^…}À>²Y¢-Ð®Š¸äŠü_ä3xIŸW½¿Ä€ÂhÑ¡7™¡ŽP¯ &\Ã3©äh1&.ê¬6ôæj_.xp¬{>üi‘ŽÕqà”ßEœôaqH÷ãÔ(UcÛŸåïÕh;‡Ä?ˆjÕô®yTN Ò9uq¶u@Ý©²Ó[£ï“zxóK¸Nîu¾ƒ‚åÊ©gTÑ}tc‚nYƒl;”””é™&4íÀ#«?†4rk¾ü´ ©¶e†´?zFï)½|œbL]X°<ÉØ6„ÅKGd’Ç¥wW‰Ýá̉:âû… Ì/ðõ(¿KŒ¦¬º9öö*Õ®$0}Γ7£ ''‰†iì¯ò9ßuRé¨!ϳäAµ6  =O‘£†`#jS`“­¬0{ý¥ù‚§ÌHɰëTˆ/F=6^ÂêvÌ´ô7qŸ€OÜ”^XFÞ¾„†&$ã\MnÆØi!“©§“AƬ…æÿhå”^  ;öA´žýò‹ßF/nÀvÆ ‡ÅJpø=”Rw ð· XX³i_ë$w§­ìŒÝœ4œ7‚Žq6—³+ˆ!VM·K‹­á#¨Èê<æ0;6‚j5¡—f«»C+ AÍ™pôR°o¢—RX2ÕÀ°ôÈcuäÌ›÷'a"âÔLf³drF£ ì²¶Y!ä‹7 â ¥¬£æ W}Œ¾˜eL9|¹ Ž&|CªâÿÇÙ{5Mxyzj;[óš"ÑÓ'κÄ0RŸÎÐçh£¢©éô#Ò4-‹¾èÃßãÜ>PC•FlE©D"ôvëÔˆy“ßŬP‚ùµþt¨ºñîxlðàþ©0Üß‘ÈÕxzžôSEpàì™OÔs¦åM¸•ïÓë=5Ó$›ùíÑ.ÐÆ¸íµíë +œÏ²3|(ð/j²©9xõ5Ý냗¯ßí=§4íøùÞw»o_ÉŸåL-µ8 ¹¡òΦãšEòœ«èØ`MÊ…ZoÄO·½°ÕõÂ,%cΠSî†ïŽíаo, œvÉ OQ!*27±“㲄íNªÓÛ Ÿâe+q½!8NRŹ  <ããPKõ¿ã™ýsô:Qû×Ë«0nM±iI«Eªo‰z@/™îò§óßæ“WÏw‹FE¤¡Œè²*¤P/íƒxs# Glèf~ZÚLg•f~¬h¦¼‹N*¢ŠÖÞV´æï{9m ±¼¢1Ñ^±ŽÔI[Ý` ~%ýMkY7Ù »¥/ígr›~H޹Ál^}\/«NæyE7rW9,EE7¿¢Éª¨#& åX.ëfGåKº½‰7:n6¸ê!P |Ú£6TáÕÛ×}ôpöƒFuŒ×´:Ê¥Ðùô±XL$·kÕ¬n?UdrõNqKR ?dKÃv öZ[HtåA¤EœW,b0ŠOɺ/JƒlM­"°¢¶öS²"·s`‚Ò 17¤Õo]Né¥ýJøïD s³l`w>|Ì%¢’0X‚X‘Æ¡€'ub Ý*nðÞPTŠ`êMd0(+[V\¸É?K¸Úw>–ž|TI7nFü´ÖzñiVË5œ7v‚Ÿ­¾Üä¼jk~댉}sÇŽxh œrÌÂQD¤xrÃòv£"ö¿ÕPê(ª{0¸¾XçÛººü-aøg­Ç ä|‚tˆ |AcÚH$Òðæ–Z…GKÚ•Iz1™¥DèÃogC¹IR/‚žtJ”qŸ¾’NßkSªa¾‰7ÐÄx‰,û˜õñ!PJÀyNТA•ÆÓÜâó¼må8´°ÝU›Ï'ݸ½ÀKªiÃ92y¬ìëR1ù³¹JcmËßM‹A2M›«áûæà÷^õžïþ ýq÷Í«ýWß[`½à`ý# ¥UÚš’Sp˜£Ñåm²æë´ô-ã¸é¾‚¢¨L|!$0Ñ 1äÙAçZ¿¾ÎX ¿€Ú¤áz\6Düõ>÷ø¹ôº…·ž ùM˜¹éqFe ðÒ«ia|~Ë¿6-ÆÀ-…'q€¶/]xc—g©Zù̲‘K1JæV¢åuOÉÅP¯lr˜â_ÈeÄÞGW‘°çˆ÷|N‡ý¶ZÌdBHŒOã‘E.Ss]¾K)R`ó¾ôV~µÊ®ï&‡œó†±ô}R_tÁÀÐ#CSÚ?ÞZ)…d’vÜ †G¬‚Y -ȨªV”Ì"Ï僕›Xê-PÁÐ="OŸÖ äŠÒ’Wù™´*8Ó[lsÉ·% €›8A–ZÏVNÈÍÆ:Úæí­õÂ5·Úñ#^P!Íb¸H‡MHI€mà >¢ üÕÇýà6šUpúòïNþY®³Ù}ÜÝZ/fƒõŸëòé:©]çæ·îcCý{²±?7Ÿ>ÆŸ›áOõïá“Í'ÚÜ|ôøÑã‡Ož<~ü'Uêé㇊7>Ç,à݈ã?%ÿ^LÎjË©óøÇÛÿ·h–µÒØ–iëþ‹hg¥QôóÀ<õç¨ëG†I†.êÂ< "d -X*<¿þ?ÑðרUŸ_¯RR -Ý+Z%ÚÑu¾DlÏ©d^­´Œ´c×2‚CI'‘ž°_¥|h™˜tR´f–m#”ç/’Ùšäì PÔZj`öÚ÷¡Ejî0#œK\ü´PòÆu‡€nL*òa…ÀRˆ7È ^›r"îíHä~1å9tlS¸ÄdžB§þ8þ–‹™õ‘ÜsVEØlÈŽ^€¡V͇lW‘eíôÌ‹´¢Ãaa­¿O š-Žƒ—7.//øzÐAö·lAn]‘…ŽoóÚÈãI[d„“Z«Œ­³2S^ìšÙyäzh/R°íóü}œÌ#tŠ@ONùð·¿ý-¦T¡ ðÞcÆ > K1»¦³á«ÓívãçÞ†ßÑ~o˜¤¬š½÷º ¥-ÄHëFƒâ¾¸EÅ Ì-@Ajâ‹i:‹ÊV1B‹i6l9Ëѵ­úÙ<ª¼µzmÀUHv5C¢4`ãöÎB¡„qÞLߢLÓ…‹@S0~fIÿD>–Z0xРÁ°Lú+u«°èÛñƒWo^î¾Øÿ×^ïÇöö_ï>ÛÂìX|Û”ŒKíž5£è Ÿ‚(„>1g9®]#ìÛsW] cvÕ¤±jqö^¼Ø}¸(&ðrÚEsJ»Ò‚Ƹh6  >o˜7¯³ÑUÿ×™^o)þDuEæ,©9ƒ•Y\K¬»I(CÆã+ÍÁ‚ ø‹ #ÊŠ3C7 rôL~¾Ž·[«.L×Y‘›®Dw=°¿EþO<Üš¬çÿmÿ‡jCÁ÷€"›_Gu|йÓÓÈé)¸»-Íe–ÑP¼¶éòž«[8N‹ˆ”–ŠàÉ” ÃÀe’¦C¶x$±Í·4Óy7ÚŸë ®ÏÏ/9¡Áˆît›½éS¶çl©¡g@Œò}"1®ñËç©H±¸èÂ[É÷^Q©ŽŽ^«¿f€ÀZ!•¢fW ¹¤JÊL%¢Z’`i~1Eæ€ècznE†°Šƒ)6Ïáò‚ºP¨¡ê0W‹’~˜w¹"WYÌÆ¶”̾>²ÃÇ?NÆŠw%>t¤^¶s‹è¢U ¯záêƒËlsTð÷4>#ò/_ü<¹LÉ» ™[µ²‹©ø°¥Yæ$û.~–‡¨3QñóòGXÄÿÐZLõ˜›è¹uü$ªÂ<Qgù?dØ'¹®Œi<®ÚÁ›G¾` ›gˆ›(ÃF¿û¼xBQ–H惥-×CûOo&ö8ZˆÿeÃY•D»}ªB™’4ðp.E˜‘LDû9½}ó]¿&‘Œ¿ïâ?´Cº”=Ü÷S@<´°-v|„uáÀk1g`Á­ðA³É0ß øŠÎõp^t‘òåÃ9{ÃÅÌq´ oB%h޹Uô5´×*Fëêé×ïmødõ“9ƒ .-xE¹PB" ¸d –.2¦c5sµ )wOžyÞQáÑøöù‹%צ¡KEåÅD‘Ñ‚AÓ·P/"„ÛS#?’à¯ËÎø‰UÁ Áظi‰dôÓs)XÈp!d~DïÜÙ5.‘â½–Y18GÐg´Ív>X”ÌX­Êmƒi#¼¿“|Þ•TS˜~™Á{Åú­×5¾ *c)p•+Þ¾¹åüëh=l2õ´ ƒ´´oêôÎIT»æ[Gï¯Õ¹sòtõL9õzè]Q[×l¬ÛѺyrÐA‹Þ àÔñí@x>•Üϧz9é M‡ö¢#¥4‚qÖ—cv1|l-k¼Ÿ²,†åU) NÑêTC•›_Ú6vSùàCÂk÷<ý ¸­ò)WgÕW w­X‡¸–»=[AÜ¼È Là®à@²•ÔS@Þßg̳ðâÙlL|‘„^i³fóù|º½¾Ž'êM'öâQ$zOðKè7Ç€•pÄä™ãAèXùEz…w£T+`¯Xýˆ†öoÕM[~¸o;ŒÏ¹w°°´¬«å¬˜šz`¾#£P[dlM‚ô{KŠ ýª l§l´ÅeËÆ'§ç2]Òh‡¾•·ño&&êÅ-1h›…„ù²7´u·Æ O´}×ÉMUí¥¼Bz5…™r£…(0„eðür|—wäm6Ž)‰DVâRвÒ!ÓæØíXã¬ëqTfĺØ+±vÀ.ÆŒïgBŸçç¤EV ¤H(ÁÊœžF…šñ¯U1‡ÿBp ¯lÎj¯ºdUí ÓLY@¸]ƒrï<åU|Ü*H¾¤BsÈnƒx«A¬FÍ*‡t¹or$L¼Eq» TÒva ,w´n¯Rð>a_j£ªWÜ ˆpcø*âW”‘A—œ{üïIfÝms=ŠIgMáÿÏúÇ\£'÷µµ ®c'sÐH„NÁ¬Å4‘F4ÊfÅÜbwº¨)öº‘ƒX#Y’ 1µâØrÓ! B¼Šã¢i¯Älj%ÞrqÁÛQ>XΔ©-ö/9K2 B´geB É*08S[¤ÛÈ'¬×& XÔR<œ¼Kµ!ÞËû«®( ®R£Ëö8$bŒŒÐµÇÀ];ç"/­5Äeã6äܪià›½½ »ùÙ¹¼&D~žàëkäj»аŸ`'‰ê&†à¡Ú‚‚ÛÏ™¦‘ !9×{¦ýãk£¹DƪçHì¾F}GßßÙÉÅæÖ?Åù•,–rAVJ«mO?1ÊКÑ{ÉvàY×xxý³’¤ylST!ÓZteÈw•é$¿Á|c¼ÛìÑ[iTÿÐl¬»FXŽH™»¹ þc‹û,€m¬Â:^$;Âb,L C6²_ϡ᪳T:àó^.7øÎUó´/¡RÇÏ !ü7DŽ o L ©ç3ðryRŠÝA±s4 þYfu03nv›?Î@²”÷¼­¸Ð¦Ч¨VYvçÊó¿™ ô)òC~ì_Û9'Š–—89@àÓŒŽAÕ)ÿ 0-+ªÅ§Ñ•µÊeŒÜ.0³Wd(…È„B€§ã7°P®";J\Ѭ¬™‰¯Ó9¯¦w¤ñ˜È6Œ¶¨G‘É;Q!Ù’ÒvTbØœ>#kЉcOf‘Ëž¶¢HdÔF`Hs™D;66tP$vä¿•ÿE`oVWÜ}H•2¡År‡¾¥?åÚµÇåU{ü>M§¨‡éÙ,EÁ¥ý®‹EcÙ#‹#gyhDègàZàÜH©¹z´üõC×DPäƒo„T™µ.ºña’ª¦§Éud ü€Säay—lZ1ÂNrAP=i䮓ȉdœ6` L1k¨wH“áªÏ¤®¿#>w7R/@R…‡Oâ5°:dìfw:s%-R°¾ª•ç:…öó õÌ0jf f°“)¯d6¿Cf`ÂLI‘tXûC’¦e6¯Êí´¬-öƒIN$TPàa)¹¹/s²˜(˜eÕªˆ#£f@«äŒ•äS°Ð‹~ˆíÞ(.NÈj<Œ` ~ðåW‹‘xL:ΡÁ¦–·×mP%ÿ}¾#6üæça‰@‰/äHÌÞYÊôÝNh£w#¡œ|MØsAnŒàÌ›r»WCˆ$:¯Š:Á˜˜òz¢vu‚i 8’‘Ùv¶ºñ óv‘\ ‡…nNǯ‘z_”¸\`Âóª8]vÌb¯b9Õ"¦âßuIº:Wi"¨¸ø–© 9Šà°â™#éðU‰üª€Û+0M3ÍÇûðŒ*>ŒΚ3{!•¢bªhñÔä#‹¨ÜÚUß ?7ÕÝ_œ¯GŸ~ýp46Ÿn>yºõtôtkøèëÇ›_žüå/Ãé¤Í·/mÉ(eU‹„ƒi"ÆãD¦ª%U<0lÃ_ÙPÇ øN‰–Œ#Í0“ÔwÞ=,Eê5ò1DP8^ÕÓ™@PÁ!ÄóT݆Ìæ“Á|NzzJnã¦;w¹—AšÓÃÔ­ôS@kÒþƒ€Ü– ãd†-Zj=tp+gÖlAL¶Í>ƒ ÜtG‘ÚìsÖƒ—ï¡Qzýcž†¦sdqK†ìÀp×'á¤ì’×ft2¾ê‰:•× xã"HK#V˜j›õ-…oƒ w9d4€iãÅd˜Î%1ôfƒ­ÔÀn•nÿã‡OûO†On=y:úËèÑ£GOÿåÉ×ù:ýeðèé†ñ±Â Ø„QÔáß«žMod5'tµm—[­È;t7?xé:„Ö[ÃDo]k :3Èæˆ)¤-ì`vg5[í¡Ös¡…Ðz†Ú(Æ}Xæù5 0Iç;04ºhšè'GÁ!S3F>yžR|Òܬ@iŠß_r.,Yï™}ª˜ 4§ê. hHÆlÃŽÔ…±/”÷(–¬úÈ:y!olèÝ-ñT®–ò®Ì§!͘\„2§ºªÊØæ†èíóK9,›LebÖóÄ7 Bíg<ìŒKpxŠŸÚ­ñ–‹~cñ0»ÈÆ$·¹:u^Tt’¼Jåó/Ρpc¼Jó<ò¼>æ†ôቖ*P/<¸ÞtþáÛä-¼ßò¯à4 •ÆÕl†O×"7‘cyU[ÊG›XÇ8D–ñcí INT=4]ƒ‡)‹+!'¤:»Jùq¬"nIß»Ùò+Ón~Wn¬Ù[Ж¢ð‡Êk›€D†€L¯Uä¤  ©Ñéž§+3šËŒ)¿/åýŠ[=ã¯*´ŠÎ˜Ÿëy5Î!ÀJWA›§ÔÍ&š_¡Fº”!Å.ä:ƒ|‚ý½^ôŽ_dƒtR¤mÕÐ߉Ç[ݸù¯×/ZŠCßuLäêC!PÉŒÆà³Š!Ûq F{ôÃþa|xðÝÑ»oöbõûë7ß¾÷<^Û=T¯Å»¯ž«ÿý“~¾xïýãõ›½ÃÃøàM¼ÿòõ‹ý½çªUûÍî«£ý½ÃÚy¾øìÅîþ˽çíxÿÕ³oŸï¿ú¾ûö(~up¿Ø¹¤º8:h«ì…Û9ø.>Ú?z±×Ž_î½yöƒút÷ÛýûGÿlÇ»ßïî¿:ìL¯ï5mj1f•#ð!õ8nV–†\KŒvÓù~Ç-qÒa̳‰byŸ›°ûΡ²’$î„ÎhrhJí@'¦[îPw2üÙh4ÞUãt—ù|´ka!X󺈪À†µ¤!)Ù'—r rUlr™ÍdêNo¼þçѯ^ïý xOR*H~{M.º §lûK)Îüò¹_î¨ÿYxÄ_Q.T'X‹®uäȦ:ø„ß§`adç" Ý9ÈZÑ£èE@–lWå}$/«~ŠºK†¤Æ”è‡ GwLj Š ÆÇç LÄÔ‰ïZ›%3‘x{Õ]Xáâ¸Aw¿qÒl¸Å­P}JW¨x⢋^.(w¸Õ:c)«»?ŒySÝùšÒ7 6òï¸aöHŽ7"÷h®JgtV³Ò™¥‰ %qï^‘ŒÔï¶éAû«z¥žÀò™ö—ѺXÍõÁѦ3Ð!ì4¦×Ö>Qþ9@· Žß©)^&NKÎ…Ýèä‡2äzw×ÝÛÕ/rÛOž€®a û)‘r”(±nÈ`E-¦°¬ºV®¥fG!›Î4­r¨ðQÇK2]Ù Ûͪšù;ò 2%]x{VÁ&q=KŸÝ¯_;/í)=C÷j½?¡ÇF>8Lço¹0–-¿aóJ†Ã#{ÀG[eíq[œÙYK ‘NmUÐ8ëÝâ cµÏ䙑ì>ÏGêç!‰L’5Õ\¶ÕY2ýÈNÝ"q!S°Œf;gÚ}³÷ÊüÕ8’ÔÌöÈÈ÷<žô†fb²e@¥Hí¦}ÌÞ·S+^f‰ëª ÇÅ6dšLšÆçf z—‡B¼9bŒ™¦æ*Ù4… *#ýÓwe`þ< ð·jŒÿÁИÞ`©mGq‚y {+¬×kÞOZÛ&“•]D$-Àl¹-Xy—›R f­š-ç›Åt˱#í{é*ç»JJ;KÅÒtÓäBµ+º™Æ¦úczÝ#¸jš¸kOMÊ%m÷{iç? ‹Œ5¶Qí…ÙÈÔ©í7ÜÎ/~C­U§þfo÷ùË=ø­±2=ïËÇ€ÒSëu(ýg ÇQ;™ßعÝú½ŸÛ­_ýÜþ&N$Âü½Õ®ˆ<<…–Sh³·UN6úð&K™ý‘hÂFynö2£T£ïá‹{údšfTaÓ¯ÇN ;jw«d2T,X‡ˆ2a´$ Àîí' ½ñPÛ“”§nkYVî¾ç¤»ÎË ï)5ÿ÷ §QÆ[ô÷>ÝŒþÌó.ËÒ|§öˆÒcNPN„콘I]L„‰¡—ÀŠEn6ªù+ì =ÜŽqšig³¼½ú+Ó .ÙN¬ ǬrÚên}ŠãñäUº²Óñè1é…‡5U´•M Í…½9æÍTÙó|ÌW:›Ÿ$­õdu§{ggÓ?à›ô÷ÛI8âuÇ~3ŠÞX©§Œp3á<³6š+ä‹VSDYLг ”I¢¤ï°˜Ä’‡o} aÌ Y»Q¾x+¤úÑù‘¤îNܹ¬%×Ö²8KÛÖ;ñBÿHH FyÉvÄÁMÈñã–Æ‰ƒ:×ÙG$"…Z¹”˽Î8Ü~awâî“Ò¨-RwÃ.MEÝ]ú­Û$¤Ÿ‚:¯ÖÂNÉ%d1H~`,îÍS¼+„ $fò™ 0Ü#‰ÉŸòÚmó¨´PL½‰D££¾‚ìm ¡Ô¢ ÐãP0˜³¨z<¼†xþydçhRÒ¬ïEøg´[õÐñòÇú¿õ /=¾†ÞŠè¥î­ãj?'ÎØ—£ô3„†ÓÒŽ(ÈŽh£×Ž•Æ±T™I]¤Ãc4þIÙ1ß,¸ýj¥“t&Éã–2Ó8 „$²\3Sœ]À]¯ŽU]fƒüÈúMÿ/ž·°uÀýÿÆ£MOÿ¿ùðñã/úÿÏ—ÿÍGÔ‹ª´ú¯8ÜVBr0ápjWäõÁ{’íüè¡WDäMÕ6Ñ~¡È[IæF&®ÛÇåNLºö ½füø c×Yg=l~ävÕ°³…­y¡œJk‹8OðèÜõãª)àCo Õ ËH EÅÜ´Ü ZÍ ´x Ád}¦ø Zo*ø Æ;;æiåç’¢ ”À+ ÃÉ Ø^„óèÞ.É Odåt™`@°gF«úÍùübü·oúùðúoôAŸÏÒÑÎZ?;coÈML±úæµö·àÇ߬'ª™×B¨ú ënên­X÷a îÃë>„Fí|Z]_;†ntΦTÛýl…º›º›+ÖÝ„jó§áúé‡9NrSM«z„k¡‡ÓúÚßèg¸ ú”ÀÄí |èÖ^ÇùÍ:OüHát‚g}œŸaD*I…öe!à/Ô92Pn£ˆÏr*mÝ ïFóþƒŒõò°ó#íwTãÛû,Í*7ñ‡ò´Nßµ ÒßZÔð!p7:ºn>…û‰L ùآɛü2Á¤ ÔËSÀïøØ“—­m½DLŒöÁG­Yu2²ãu•\‹ÌEÑA [¡ž#36ž c”ƒÙ…|º¢‘JkÍ¡¬C¯(‚¢…ª8ø°îìb>K,ù¶@Ç»ŸÀ×ÓeºoàE>rmK ,2ûÔS(ßÕàg^¼W©4þ“Æõ;Û89¦A“˜¢ì­Æÿ`@­Ò£w#W¿5Z}faONïV±Vª¤ ˆI“™a™"n:3ô%ñaÉ{%áÍJ‚S z éÕÅýO†0fÆôÛ˜¸I{׸j´ä%n6ærÑv¨¢;8¿È‡ÒÎÆ£Ör×È/®½4×ÞÿGÊ»wŒÜ¬v–¨£@ÑïŠÿÃ(èàþ–ò[yø?›OŸ>ùÂÿ}–»êã&OE†‹*ø¾o!~ ¢“fL3]ØJ¬þ× w?¡ìý››ËÅ'(¶ú¶a…”½­cgªÒÜz%†±õ.ª6ž þRnÃñò;;Ã%b‡>âlIQ7ÍÇÙàš?"˜5ûÍ#OÇ{¿ž!Þ`q’ò\:9©—(‡*8W•þ¥Ž“(Œ0ý0í/È”s‘ƒÖ2˜ ¶•ض|zåáû¸PCRÜñëbGHÐÒ1c­Ræ)¢´³yn%‰ä7çœÆÿ«OžŒ«ú¤gv‰Þ^² ­ ƒ¢·Qȇ«Ë3¬¹<ÃOxyþ‹õ?èXèŧ‰ÿÞxúxkËÃÿÛÜØÜúòþž÷_ínŒÛëâ¨LØqÇ*Ú.]ĸÇМ§Ö:£„"%ýl8L'‘*¶“mÇOOñ}:=ÑàºÇí]¤ópW»’ÓS'‘•̉$ätŸ HaNˆ­ÄˆlH9#Zœ,¯1ô|†ï+áS‰z€ÝÇNÉ7÷N÷s@}! ² ÄF6G?0ÌÑÒ BããHk’Ju\ÂS;@à4j‚™Òù,¶ÀýŸöZÖ›-Ñý F ¡IY ¨ÓV£;£DëÑ•Ú×Z¿B‚Êï,}nw»£ÁJî[¨NjþLE [Æ÷Íøñヷ˜y.l ? ì¿ä­Ëð^äV%:|Ô¯{<Ü.…{—qãqVô‚zjx¸>h¾¾Jû@1-Ñëë×ûJ9JH¡°]ÒÌ”÷ñv|­Œx'Ö»UÏó–ï¦,)­ùWôƼî¶Ó3¼å¡™­a±’½h+Ê%j»€˜ÍÁÑx.‘NÖá@ 1g‘Ã*¤j•ÜsZÆí€)ÒÜgE^€£øä†Nk|ÿþýo_<ûß½çê7xÐ:ká†~"jpZ\NÔÚžÒs+ —1(ìT,ĬN/ ¾ò5°ÚLJs®$Ït(ïi†æÿøGÜ©êÒMWv‹W·õ;¸¸ÿ·ù?4âV÷þ—[û_{kÁt ¥²¢X¤¥dUVp~~÷›70¦˜pX‘B-I¡&0N“Áù4v'é|Ýâéרï×·~½µ¹õé5<ÿ;ôeMž«CùçÿÐÇÛdæ/åCO…üc¿S­–±-¿ ËéFCäVŸ[öÉAÏ;I{hbBˆFÉDÁOðM\e´I̯eÍ ¢*õ~àbŸÅ´ÛqÒKÉ•É q–Ï!îX‘û»söÌÓ|äÄûzë{x‡zx‡îwIË «í)–„¾ ¿•η#¾œŠ¦u?P`í38Wdç}I ¢û›^ÿõ<íüåéïÿ?³MõËÖÓ§ÿŸÙ}E4ýÎÀr³¤+ë¶66Ôï®þ2@|ÑÿüQò?|ýÛËÿ°ù%ÿÃo ÿÃÚÚÚ·~ $@£ÌJ¾¨˜®;½Réø%g Â?iæŽÊ“öϪª0Hµ€ŒrB¿ºôŠB ý{PpX¥¹§vhš)ÚzN뎷Ÿ÷ÿfbgÓ¹Þ¡ÚDý¶OU³˜ ' Á­Õ)¤e /JuŸÒ 8[¯iUبì•΢ZÕ!·ÛK{æéSA³¸Q_ &2Â>éƒ ÷¥ „;DÈ+V_ªúöËâ6u©žmüÏÛýÖ¿¢Ö§wï6h 'ž 7v­S ïã߇—Á%Õå"JÓÒV¿ð&«Y‘RR_7‡ÙÀè¨.5¨ˆ¾©ímgçÔî¯ù†ª±³Úye½EøÌ:7OS=ûĬÐA¢Píª³¯úÔð‰†—4]NRrÈï*AoÀålh¨>÷ȸï¨6Ôœ{/mÄm®×žZÉÈ=«$Š6ÜZ%fôcºØ)µšˆª¦¶•ÎE>ú€Æ¾I}’ÅgÉÇ=mèlÙ9‹w´÷ÒVµ7ˈL–M üË‘%ñ¥ìU¶|cû6©¹rÓ­µñl>;û;_Š¥cq–/_î†9R¾{žïF!£i‰;guÿ\0?boN’ÝóÍÀN-J¦ò~í2iå;Y ›úÐ1ÍÜmšQÕw[áSiÔ%Fêë&+‰ÝU¶OŠ5ÚE_Ÿawÿܾˆ?’ó8ŸÖqkÒüetßÐþÕé¾M¾4±»}Zâ&åÍæi­§ì¦èËAëÕYH(›ÅÈà|2L lx?–0ŒN剒EpTdž“­Z©®ˆ:Ü|¥¥µ£ƒh‘ Î„Œ©ö Ö¨ k ¨´bìWL­¡E¯]k»sêÛ›µ{y‡)x½ü¹J!éP=Ń#q‚p@–?º«nå:ósSf«ŸÅc8JMé^ŒÇ¹JâÞH¡ŸÆ}Õ6zt™1îŽbÕ:E¾ØÐŠ3Ò7ÇcF-ëɶ׭ïÑy•äœö…Ò¿˜I'ÛÆpÖúö×_o=²RøÒ&§2îÒ©ŠÛ*^§"B‘E5+Þ^ÕcQÜ%üŸÝGðrçá.úÿ<Þ½óÿù,?Ê:rh§Ò,».F IÚ.¾:ÎŽŽýRL—«¥Òµ5ÔtŃøBTä£\銔±‘ZÜ%ýÝÎá.·žiëd3¼Œ†3âT¨¤{s –L"ÞÞ½ åºóY銪‹ÐXEùeûÿé v“_?t þÿΣÇEÿ¿­ÇßÿÏñóT¯®D[–1? êƒ ÷ünڋ笊4±_¥+'PÂŒmLÓ쩬Ë&1E_$h¥»ˆ.R–sâWID Ì/Ob?¬DË4#ö—b‘×oößö~«²©K²ÄQʈs…XIúÐy:#Ë„\ÏØ„Ò˜:%¶=I%‹ÃE[pª#Ugn©p{s -@cp20,¢6Ä:ÃQꤾ¤h;AËÛyÔhèÆ5¢“úŸgñ$Í´èÉäš8¹IXÌŽÛ‚îÉÏ9S} Éí­O-Ä Ð`)1"ÈßÊW«ªxB.cÍ95’·)Šá¼cIrLpr3Üì³—LÛ•ØÞÛ üß¼÷õ#EKûߊUª\ n¸½<,*\1(T•¼^H¨°¹VS·jê½a@¨5šBBïå׌ÿžÀÞ}×mú80™Û…Q`·ÖUlfiPW;X%Ðe¤½²~Aì–Úd×E¯]†Ö v9J§IßBùFJ‚A*Jÿnãþ ÔÂÚRІ*Pd31L*IÌ p­¯°¿~¹gz½`mFº³\qHñCËm& #´ú>3 >°o¹¥2³ý}d!1ë|2²±?é"ñ¨™ÃßÝ·¢‹RÐ¥D0 ¬®™ ¼`ëÄܨrSa uvé+­·€ñXÓúu› «|*+Ì¥QÑÉO†éeE¿Ö8½N˜¥'5ÀZ•­fYú²@*®ðD3€ ÕÁw…Þ›y2¦TT‰V6kaÛ1Ú §@ b\‘/ÕŒ¦&,0Ô\ÓçÞr«.TH…jïå_È}¶Vò6ï9—h¬t¯}a÷Ô‰¢[ÅM§.¬³t†c‘ûˆ}W myYzo/Úd)nâè<¢àzÞo½HŸëÍ€¯úRKÎÎ`³ÁT’nÚ»®Üß_õwË|l™‘½)5¯ æšsS§pÕ°vâ–ëlÿã”OÚ‰Ö²ã^4ƒ²Í&Ìɇ“ò²aïš…Í(+4>@ñ FÊ M‘r);bÿ Qg‚o.(4ø„ÌXIuÁ9Lò@iOÚ!CRUjKDIÂj‚Æ#Ðïq*Hç!Í‘ÆCN›þ°YÐ %çcx,Ÿµ”FD_âØíÌÂwjÆ ¿ -¨ÊÜô˜!8n¬ÐÚ~ÿ,ÖCý<—Ôõú—w†]Q˾Έ)ÎÝ|J ä.Ζ•K®0³ç¦DJåS~9©J5ÀhtWÈõ†pe¬ ZL£C?ø#µÉ©`tvF°9¸=ìð/9LP ׿ÌBVäS»ù%é sz·m8Xw‰€‹IÖŸ¸¼tÐíó µé©„Í¥3¢óÆÎ§Öd°ž'™Æ6£)~£Ãˆ¸Ñ®1P€z¨°Ôô¨X†…5dS1¡^Àþ΋(<€4a¦³¡¢¢˜$˜âRæQ ñ™¢iMªx¼Šrê~+ëйÖJîð­&U(çOÑ6ãôÉ$ZÈeJ–…š=꯺¶…P]œ—­jPþr?ÂÇ}:tÄ£±G„ …,‹‘ CÖ;XóPãmOj‚&2®dfïÈHf‰çŸ‹=îÛ®BUÞýˆ'ÙÌ-Ãæy{kûD¾ýWÉxg»Ö) ÚÊÆ×Ü:!´dÓMæF 3þòËjpd'y+ïm细] #™r®‰"‰Ð¥šžÕÅÚ´wôJ­p¢1]6þU”µÚ;=&¸½pæ/ÞþyÛÙ:µáh}$ß9$Žñx6¢CR§’[Ór‚ (êä—hµ|Ù%T£m`v@­ÃW¥5Õt$a7RùðÏáVÇ—®v\ÃÞ^bîu_8ÍUŸÅ¤¬Ã#¦X\ÐôßÖ¾}ýòoGݧûÇßuk§8‘ÿ¬ù:€ üÞ¶jþ,½Hñ@t=°°X­Jòñþ6,Œ÷¢Þ”÷÷¢Š·ìŠeÓ¨²%-†ÊNBqz0 µÑKljö5«J°@i#àCg'ÈŠpØüAŽñõ†o³¹MÐ!hTd!¬½Ž± ºí(Íó¥')p!Ót”ü+"A]¶B~•h[£"Á¢pGêܱÂ6rDêUÕ›P”{‰Àñ ?• ô¦´4DŸÒü)¶Ø«½ð^f>}íž$I]# é®»:3HÜwiúÁ“ì2!¿] Û¨LßøßT Ž”^r,Þx FŸú,Û$±‚=§‹1æÅÕÑ7mêŠ !AREKCããR§r‰¿ _¥YÎìÏÚSˆŒÔ‚ZhG¶âq:;¿`qòc2bæëÄú™Sþsþj”¶}¯gÂŽ·ZèOÑ@6ùÍJ¯ô QŽÎü¨QSÜ A¦š£ÞÅÚÝ 0Öqå#‚X³|KªÖÁÖ Œ(“.Ö})¡ä"²<¾-q-è_D0©Ó8C7¸¾è,%†”¢ªj%—xôBwÄCô>$µ§,!W.ó³B:mnCye˜/¡‡€"JíB.çJ'” Àú …Cå厔¬¨†):ÙÜè„EjºþtUõJW ›\ñ[öFU<—ë‘á{XñT?¶·U—Îy·[¢æ…ΟÎ!«éŽ#+g¦Çaãd'w­h­éhB‰þÔ;«8e‰Æ—¥ï·š:Ñ}¯j¶‡Òd¦Ó^¤G7Õ4KL~KHÖýZX»ïÒ$wÿhj^L¨ªðõÕ4ú€õ)ÃjƵ~q+!¹L"5.Fó®†AÍǤ¤Ú~‘f1:˜4ô;Á\XzYYíZ)úÑæ9×,“r½|ýbÿùῺßwxrpüjÿÉ}ÿÕïúÓ-àÿ{éªò£>ŸÔ-0ÿo7;ˆ¥¾"nMÚQ8Ù7[“ùQûˆœV›ØæMÿ§‹úgO‡J”ÊcÈ$žèn—øŽnSÓt»„¯®6–°AɉCŽZƒ[Ðèu€ÑVy h#(M¾JÎÌ;èåYØ&[‚“æxaÀ:G´0Hòc,½#S/9Wõ“5À‰åËU.Õ4· éìb‹ìcžô(ózÂömÞÔþÁj¢7Zù†å¹¨¬SËh¿,üY°`54 ~mÁ~X´Î×mc|ë?Ú»Î;ìšXìˆj>VêÀ°¶og'ý3£«-yØì-óçꘜ­Ø¬¦âxw$ܗй6LˆâKrÙ­æ)¤<‚¢BÝá¤3¤¯ =hT ´ŽKó@‹͖£ü±‚б„×.¾®â½CÏu@<µ“€½ÈN/üªš½ü­•è÷šç‹Áÿ€kŽ2'\ÓýsþÏ£­íRþ¯‡»;wþŸŸÿCVW_½y°Wþ Åìi6 yF ÷ .ü…1¶¡Ðvž‚´“âO‚ÜsÀ§=&™WÈ*°%«LûýYf±x !ãDºFrôKÆ´be@Î _ÚÒZªïHÕ¤ºßO•ÃÉ7-&i>mÁ…%ïðXƒŽš=²cc |™*uå4V@~Z [d˜c+Ø´š±§!ðÄ9!^–šzX‘vX¼ò‘Ñ(–L­ØYÙ´AB椶B®YU~¥ÚÜx88X0ÖzR[êblr}uu¼¶’µÁ¦s­Â”jQ¤œ/KßÉó·5¼÷ÈùÂñ_ÁlΩ¸Ÿo¾ìô­•)è´¹bE…î¬úY£rB=YŽiÍÊrdÏ›Óùò¤©Ä ºPÃ'ΨªDe ²¢ƒËjˆ»n½ë‰ä´wî¢ÎR9ÂS`Ë6¤‘â ã—Í»„ñ‚›ïG'±P'|ˇ }Íè Ðq¨þT¬¦Qó>úô»€3„ß“L¯Ÿ"מ‘…áA2æ’ 1Z* ‡£ÿQo<: ©¬Có`UÉŽxÀ÷ÏÎÎ “ÀQXEЦÀ8 RSù¥™ž›Ë"xÔ&*êyÔˆçfvK‰ÅñEÂ>›|Ä:ºPÇœ5FS¾ŽÌ-¢Ý•£Á@® ¿):Qš‹ ŸM& ¤½®ËerêÚhâD>s‰ ®÷!ž+'B|ûg¡ninÿV%Ý­³ý°Ñí‚#T\ܓ֟Ů0Ý–e÷) šç±Îù521¸¢‚]‚CĘ~hP­üá¨:ÃòJÈ ÆâÏ&È :Ü °Gð4xÿ^%ú<žï<âÔÁÙû÷JÀÎÒt*iAÛá·T;lDP},™ŠŸ,r_h ›e½ Ì俊êmiÉb6²xæP‚q~P,Úê¾Üë2¨(‹;¥—’7ãq½œÇ´Ñ–©—yË”_P¶Z“üöO?¤pݶ©ÿèl35! j͆C™U†Ž%ðÁŸOAj¡Tjt *êÃêEA_7X¹xP¼o7ŠÈôcòÛSÛOÓ>F;± ¦¡+ï\!¿†† ó×ÎRB‚ØÝhÓ˜ð}««ÇÓ7“úTC课`Û×_°^97XFÑÈÿÉ»j¾EwÏŸŸ\»› ] a-õ¢ù<ú0ÓÒµ¦ôg=¿ªû_iç® þ¿üþßz¸µƒøÿ[v·n?ÞDýÿÖΣ»ûÿsü|u«?ü/|’Næ§Qï7=åQø/8Kð<ÃܤìFˆ'ˆí./)ÆÑîƒèú¿ËÃ×1y1 ÚT!Gé§gS„Àßg½“b>U±§—‰ößÂ'ÔÞ«Yo˜ôÃç HÆy܄ШÅv{+¬ÿëÕóúèt¾ôá«ç 3b&¥MN ¶ƒööä; ¾Ç/Ÿ|¿ÿú „ß_½~ù÷çOÃýcø{#Ü?z ÿÿOþ÷ùóðà¯^‡/_‡‡/^=?ªŸ½†Š^4©ƒÏOŽ sPÓ3èß~øjÿõÉá“7Ï÷_‡¯Þ¼~õòø€ÖâvwËÆÆ†ö!F*~8è„Ñ }óÍ7›´Z›ß´6†ÛÛ‡;wþþ;…¿ÇjÉcaÒÿéä°&I“›FèZØ›[B–%KÉJ~K-æˆrbžpƒÍ£hR‡‚l{qúW*ÿ!·Mõëš—Ñÿ ÷Eú¿»{—ÿá³ühÌ Æ‰™ IÏØ€Éƒý±6–žM:6{¦K„¡¦žR’³óPøt–1ì2)G¸wÅ`=4º6æÎÓYí2T6Ûæ‘êsÞGg,…1w sŒ8´ˆÀFƒã{ 7S3œÄÙE4A3íD}Û° 9q÷7 ë7§‡ÈT d 1V&·_æyH÷Ä%cgY ¿»hÌŽÝ @ ÝüÑoLëMÛ˜5†^ÚJ)…öãˆÕ<%¢ÒÅ{õ"NÎfCðÉ“éŒ/ëaòÍÖ8ÌiP°’ã GîôRu„>žàÅ·T› e+~IÅmðMñrd™Op{0Ò¡? 8û‚5þ\³Óm¿8+† Á™€¼Éíáv@Ô¸0Æ |ÔwS³ßž[W;8ICØì3ŒÛ˜*+?©´ Ïìœ1F¶ô„b°™µ«1,ç}Ç ŸÒ/,¡Ö$WÑwø(ü>͆ƒßm,øôõÁþÓøÛ „Ë[ŨJ*’i“ÑG‡Ï°jT÷6¨ò’1UaÔnlµ7K/m8]ù¾d/f°Ÿ²½oÓÞ†ÿ]7EÉpo£—öþ¥|ØKŒ±a|;¾•26 8 š¨`êR°ÎRÇšœþ¤õ³ •ߥNËx ×†MC ó7<F礃ƒ„£ ç?+uÖ¿@Ø'L®”3'»ÝccħWâi¢Ç¹²jÌ›¢Ö‡¹µm°”ìªVȵ›q8Ûí]tþ»3üôÿùõ…ÿeüßÎcúÝÑÿC©Ç[wü߯EþßmÁ¾ùò”›wJ€/@ PPås´=ê^•ÕG d1ç$Kð ´ÿ¡úmòἛŜÈV?Ìbõg¼ ,_xù.â :¨¿gc„æŽx¬và°ÀJQñ*‡°a¡€Ä·©b w H·§›Ç; x< (M±õr/¬½{'±¥ö‹¯÷ÂmIQ,¢Q÷ª;NÇ]èò¯ ±¯n´#ÈßÈ®G©ƒ\UÑ)Z¹’™†“HI‡ ÿMu)«YTJ¼¥˜Yü‰nÆYBÕÅÜ?·…iüâ ‰Å!ì~ž4Gå8ëåþS8[.“%}iQÍc!? O(*T=ó„°–ŠØ Üæ­QŽÙ{Šƒ}â_ÈæYâëùËÞ8 :´./k…0ÖŠ€Õ@Ìi›9ø)JÍúÊšxŸeg®7KŸbš’ñ²YDqN­~kôñ(ly¦Òž1š/œ Ïâ«°Ÿ.|©› ý¦Žta­y¸¯ŠåAsCD¦/Z¼/ÿôv~ø-/¡3æ¸êÃÈ'™Ý¡L¬VþW‹Ÿš ?~¾EÒbÅ•gÕ+6|ô Hó[÷O¦$ÜȦîŸmZMá6š e¶~®©¯ˆ®üØáÙ Ø'ŒR;%e/XUT “y•¿„¸´G¢öBu‘¥&º—™Tª÷4U1«õªhI¾¢OõAœëˆw]ÃÛSÓÐ¥ÙÞ«)Tzí̯©F¼+³¢ˆ­@kp~Áe×No+O¡þâ^X纥ãf8À„®Ó\äéU÷ê"5Õ"rN#,ân’w£.¦&ý©­j<$Ô,ا‰ í`|0WÀˆp‚p©£Ñ:ÉXåuBÁïck(²ÅUÌ`‰i8Eˆp« 44J1¢ófZu뉈0ì17=gêŽÝp`÷ÍVzVìòÁá'óÚr¼€·˜\¸··ÕÞ¬Y “u[üá:-na‹˜ ZùV¶ëäÇs_ìŸ>Ñp 2—òïk,®—à²^(-g´Ø‘µa —i0ÕîY­Ü7ÌBÎ[ Çpkq\½iÉÜ«*K²Ð˜PÑ ©É†Æ»Œ;…æFï˜êUß2Û®ÐçÍš 69¬À¯¨å³iù†ÆAËSÐÔ„©”£)Zñ®0 Œ }õ§ª.BlÅTLëƒu¡ò{ï&g¼·Ò×È´Úø9Ϋü«œêzŸýüFëŸ_}PÞdbR,[ù€;ŽQ…œ« ] à*!3ð,KÐü±¢ñ#tÀK‡@üPMŸ.aJ-/ÝŽƒÄ§½éf _r³¨„¦i’Ÿ!Fp¹­WIÿC<èØLÙ^¸Ù~ôI¨ÛÒrÑŠåzzk{ɘ=ÙæÜêÛuá,«¾µ¼ÐRHÑgMqJt9PR gÖV»´Œ W°‚ä—ºàL”¯é¨Ø´wŒ½Ê1:;À×@o¥æj¦¿ÐËɱ–ùiaX»öš9N¹Ö+¼TÁ¥¦”¹ǦœÑ¨!\V‚ÈŠx§œìó±U×|uYÐkšrqÊA‡cÚw RçvøÌÊ´XYA¡Ã§5Ê(©¬jÈ\SmfàÔx#fÚa]or…þFïâÒ|»×ë=çâ}פú^âð]cÔ嘘Öó¸õÖGÑÎÜIŒ~i¬ ¾œV­U2Ó9CˆVùÒ%LØ|Ù•®“«Î¦F˜³{šŒPüKgëú¡“N›¢Àüв93A0"Ô^,nƒ$:§˜ß¦ùpãBF,ÌŠÍe4œÅ”Ô&oF [, é+0.Ò÷µ.Ùížóí"â±ÀïsRðóŠT4ÎËYn’þ†Nгaæ±¹Ámº5ôV§?²H}Ýè%çX>bš-¹haR ëìÜ:2™$**œÙùsÖÍ•Êb|WhœÄ®,'$ü”²ü„4­-Øœ”‘sŠI•®'|²6ë¸_œAh¡¬ï™â6„ÿ \"³éd6•­É™ap?>O$´iãàÜwŽ:°WI_³PÊÅ,]%`WŠÑú'¼!ªþ•â¯ÒŽÊ€°Cú¢`9`ݥš»e‘åïŸL\ù»¯»f',í¢)Z®Ê/TÈF)ÔÍ>æ7²âýጨÔÂS\ø„ŽB®Ð2ö ¶¬¥lÙ}Í 1vè¢Rvé9ë=a  ZþKÂtƒ†Ì¡ü§Eúlþ ¶¸ƒ˜ÕB¤?B Œ)/çÖPc¤³ojÛÕö­j[j°µxÛ ‡á0Þ¤AÞ—š»²E»Ää2ã½ 'ÿ¸I–éU W ¯P8"F°L‰§Ì|¨ÍÖÝËrÄ‹ä¤üÖ ô T0Γ–|˜¨ÇÂr¤œáäypÙ~Ä R”þ„郥Р¿‰&"„êv‹z¬Î›€ R÷z ÷޽o0Ä/ ñ cæÐvØ©—‚÷höKzh9üŸˆ¨,ËÎD1OQ[ÅgÂŽdŠÿt}az_Šb5$ü·zD¼e0ç«Ë;JÆ/ÂÙn?¬}Ú DGô9ÂWY°_ æW¯vâÅl…ÎhΩ7‡“›™}ÎÆ+6уV¯lI…K»[´s•—Û¢ijØ9Þ—O)™N³ÄsŠ”›hs“\èH+l²S¬êŒÄ.ïm›ª¨ÑtIRõ²kÞb&Ð7]TØí“$g÷l-;½(ûi‹]±òáÞÖ³Ë5‹Š¡ Õsס Û36ð(jgË7]7r›E'‹ZEÔ™3¾7ÈQ›¿ð­ˆÁ§áÛ¸Ðó19™À_ ÙHÿ¢²væ§þ´ÙN˜w¼/Cè_3lµpôå¸áÝoÅ¡Ö~q³RÞ2&Û=1ñÇi<¦#ìÆà_Dhš“ìi(fùú7åÙÄÙ(¥h Y™YI–°ð¨Èÿ6Ê“>Ó?ü QñL9ùẑ‰›Ös` r/¨¢å»mpmAÜêÏyt£+úG¿XñÊ3Y½Ÿt"ão&‚ÇùDñlFš3·©*Œy™…­VžÆ»Öt?®ós±.ÍKmDcMÙfc {?ï~³ý‡o¶¬ýßÏ¢ü"æë²Pœ^uGùhìçÚŒJ÷SLGòwËNc[Õ¸šÃ„'ŸÆÝ0½Ù+•¹;_È Xa¾+ð7cUD2ð©0.`^DÞ kÒC÷ž!Ô~­·"G ø"ç©Kz¶œNѧi·wî N›:ä0(«)ƒ'Êõß¶tÞöeÝŽÜÕ~¬•WáaWΣ·öÅMqøÙñ‚>Þ&,‹ZÜ1%J)/+°ÇåeŇŸfYçŸ~Y‘ãÿüK»¤ Îèµ—çí6÷ÄkΧ“Žã•ÃyV^€¥“»sÏóÊ:ûsЊsü)¦“| ‡ìF“zÓ €ãªsý9P÷ ß\Ñ``#ÜÁ­4ˆ'ñxû‰ *DŠ@. %ÇC¬ç%b™7-ˆf® GDs‹â ÚpŽšÕË$›Î(¼³>éÚCãˆx8fÐH ¤ÝÔµ³Y"u¸¦9ÞÓ¢{£~ÓE)ÙÛÊØQ’OAfC…i3˜¡k¡v¤¦(ûÑ#‘Ʊ6) k²t†þ×!» /I¨åå±°«¢ycIJrhôvûØD‡/r>̧ø_Ïý­üÎá ÷CY%ĽÄ#‹@â§ñ«´Ü¯¢l슜^Âzˆ—cê*¿Ã0m5b‚_¬œtSu»õšSYk´eÅÍö¬wé÷n·âÞZÐQÎYõqåSábÇÃÚç床Zöò9"ÿV:]Âú–2]N¯üc­ÖáaþTQЮLME›úqºÜ7%%;௃m$m3‡+ÚÇÌœV&{Ê3”ìjæx²å^%ÁØo*@„aà#U¹ðš8l[žJ^.ýÊE¸·¨t[cJ*¯Ræ÷Z•½†:x˜Ä=•]ìûý×G‡Gm,H;V]Y£=ÉÒItÎ?dí6ˆW,€hÇDL/QÀ# öBòÆ­$²ñ‹– §P…€Ï¹{f*Šºw¿ûÉiƒ7ý[MŒ, ûiqs˪2Ê}ÅpCY Q®>µÃòųC\o¸©ƒ«®½ÞlZŽxÓ÷  ‰²“@7`»?Œ£¬ÇôåØ0lôV0ñúÍâsA£‹e™¼À´ßbrš·Yð–ÃîL³PwÍ0Õìˆñå> ¤˜a/ptJŒÖ æÓäÜn“Ƨe »¤v|²ÿú$lý¹öGjŽ›©VüSq¥{‘¨E›Ã²úk{Å÷¨×™M5™#&ŒõãkP¹ÎÝ1¯>æMšlâŒÉ…x„ÞÔpZDÈd¤v6T0ëIì1sºtž£2SÞ~VuW[„‰!Ž„—h‰ª‚{õ9XŽ_€Jãžlè 9.²ækš/øv¼îèDvÙ·¬ö ­ËÎ$Ý€ Y>É8›DÞ%ä]YøÙ¤ ‡ÄqÅ:ñsã;Oû³| §œ:æ¨Õ-[#l2;]Ù<œGUÂu>´Ë8›‡y2JèʧT…JHG.æ  é+Ï"è’®:BAá†.³á7R}EÁ´q%BJ8P²9iü«èÇàÒ´©ÚÄAÌ ø?šÿ¬°( ½•˰LsÕ ß1 rC`ôên(WYÖZî=¿®|ºJ€P¸W…µ#>Ð+‡ÿµõ‘ç=ü¥ïq*ª#ݹÐ:©ºŸÓân7ŠqòSQ1óë‡ .¸oÅ…þø²W‹¨ËäðÕ¡vq ì¼"AAˆâø9»MΊµÑ½M9oz•bê(ªx¸@šE« íbè9´Zte‹·Q/äù'ˆ9Q5/dñªÂ_÷Ð+ýY çç§Åá%¦ÿ˜FiIXû3äoÆq©Beþi…èä[VLç"3`§Ÿò‹°– *†ú&GgéWâœpýª¶-´€;½l—6ïáÔŸ³ë:ÌÕíxi_RÇܼ¶µhãì‚?óhˆ`%g,M§ˆ§Tb>b»IGI´äŒ¼ýpóáÎãn/K?Ä@ ¹P—2øh¡°qCrØ›Üh(B'Ѳڋ§Ó8 ç1Ì "o̦Ê. ùP9ùP!×ËâèC b¥%Õ©Êô5Èõw4õަþ²ijy{¦£¸…{»HšÒ¼-ÇIäß7 /üb±¼8¼Ê' Çàé aÚeí숓¹ûW_…!«g äT)uìôߪÏf.ÄDìØAyñ„¤‚U„:ù\ÉfK#äÖ‰Œ»¼LWŠ+ª6ÕAðóàEz D™Mγh¼-° 2H)­Ü&w_¡ÂºŽt}œŽ[Òf#D!UçÐÀùruLúZ[vÔ¢÷Zr, {7SÝÉ.·VÑÜ­O%?ý eRÑ,¤Èéõn1Ma;¯¥žeéÕƒg“qÑ\\Î~²20Å‚ˆY¥§±ÓüªÅ} §¾]ŽúÌFö–ú½Ü‡C ‹@•&K'þK¦í[F“´=í\ÀÈüúWbx¯tQ0ŽÆŠH;ÕÀ{R¡ÁÝ“^lµ½—#Ÿ(9lŽƒŸLòô"¶(J'øÅŽí_>éØþÙIÇA0Oª Ÿp?ÊŠñ>å Xó°u´Jºƒ#uÛ»lGá°'‘¤ah¥«Tœ®Tñ·ì F7½ia'ä×ÇÂWw>Øn—µêº›‹HÃ6­B"“=Ã¹Š…5QYZ¹h râˢ˜uè6)ñús¹U`ry7w£Á€BTŒ5œtÿ6·ø*š^äBÅñÇ: |Ÿªb‹ø”à}9hBOS–öKèo“Zvy¹©Í׹κ׼ÎЭ¢^£ ˆcVë{‚¥ñ°OPDÈ:&ïÛ¤T2+õ÷Îâ.PpÈ€pwÁË TµLÊý…<—rèc§*wÁZãø%é@ncp‰Šãê®Ðë¢Q­­ §4·~çi.sË!Èžö‚7Q×`5-ú¥s¦‡ÿÃÙ#þ+@þÏ€ÿ‰‹Ÿj¥)}J<„¦¶ÃôÜÎ&H1VéTc1R’>Ÿ†HÛ‚ E!б#žÐ“RxÉÇŠhÇa’*]ËsÒ(º†ƒ.zQ¦âEÉÕÇÊB;‹âQ Nu²Å*rCˆ¯ ~{{›ímBö+øä… çÏú?mrÖ=ëÙý=zP>´j +yÛJÿÚ§ß¾ùk9õ†Õáöê,Dn÷¶›q„mŸ·ì‚uwöL—QØ„]ʘi¤C…ZÈo8[Á çá(Ê>P²¼Àéà²iõã^ÔÿwÖÁ_˜j[&µ¾ÀB·ŽåÛ\ˆ~U.Ä{î—ÕÀÔx™,µãWº4ÿ¨~Ú¶}8; ävÚ\±¢BwVýìèÍ·Ë€VãT¯€TM“ŸÐÞ ²6úys›£/²nZÞ,p™£!®ÇÎTÓÈU}¿Ö̱­†áMƒNžÀn‰¢FßBVå|¿i¿?Ë0“Ã`FøË˜“öY]×í4!¨1 —¢´!1¹¨åÍËXBËÂú(ÍI&‡=F.Ë!Ë Œþ=‚ý))–8• ;^ÕÒ¼†>ÍVŽp—_!êk—p±A–$Oj9¦¶?›'n¹Úc¬léü‚(áÇÚéuQùíßÖæ¦¢C¯çÝíF„!U‚õXŸ"¹Ø˜º—„¯ßvX{¡æ&òGŽø…(ûC×Mwîüõƒó×ï·Õå¡vmج¨V­ëñ|‰KÖTOíCõƘcbˆñG޶çUœBŸ8£!¥>»ÀŒŠ)ìtØêã1Æ6’#t × ™C; #7?H‘‘Jö jØ‹ò8yIrÌÞÆÈÎÃ4ýÂqÛ®!§Œà[tàéø f”LU:‰.¢3ô³4Ôt~l€tçiÏÊ èd+œl‡“p²ëÉ­·µ€û¤u÷|³}ov|cïϧ»«6w›ú¾r„ͤlÉl—í|ª¬&ª}óà:­ß¯™«”vÈ^í&¡!Ÿe {l›ÂWæLú}·ó ;Ÿ ߯aãí~ö³`|1'¡üp×÷û»S<3VàdÕ ’ÛÅ·ú%ªÅ›ý¶VÕ7¿ªCu·Ö·´Ö+‡£^—ry €MÎî.Å/ùäz©ºç8/¾ªŽ²eÆaÆ£$³(ãê÷¢åù¶œRJÀINWæŽîb cT(Q† üêÝ»ñ U7 ¦ùJ­¨ÂK\­Žt¼À‘DÔ [K\L½×7xPb ÆË\Ó»§[u^ÙŽÌ…”®:Ói§^kc#ÜŒ}uˆö1äÎ…9{g­í_ÈŸ/t:&h¨=q3+À ~‚õa¯Ê§­Žm^7…|)=<ăד~ª‹RÑ/¦†'ÞÔVJÃdÒœ/ÈDÎ i%"çT&ž½ÔÍÝt<O£bƒ‡5>»µzš'ƒX'೦õú:‘A˰ ¡ £µå#´«¢lc*¨§c„œ…Ô„_å)”.Ê^¤3¨t/Òqš¹ tSâÅ(8C–Áª~!7‚²- € \ó@¥{ =µI¤jÝGÛ»µÛ|²kYÓAÅþ”£„l ûV32,'Õ‚!ð¶A’3ûØJýáî …§­Ûºµùpó[Ýcò³Nú”åCe«$ÿ©äœ¶…à©C72IqKþU&^t¨úV™MGÉùÅ4ä ÂH|¹“>[{N²Šl³~Çq³Ô¢Ú¹’#¿OÆáCDˆxiçùöM+‹ÃÞ^:мö„þÎ&ðhMÁþà^/ é3ÕpàŒaSòi9‹§}Îwï|ÁР‰”ýz–Ç_SXÐÁYÙŽÙžM)»]¡ŸÜîõ±êv’ÝŒ뻤>®{¯ÑŠÖº2šE2þqÙ@À$æ> c½ÙÞío­ò!ÚÖâAás~Wʶ©¤2ÁÆ­LƤ+ëf€Ü ·Ék¢+ÚÖ WÜ…>¶¯µ ¨å­ðYÖp+ÜöÞ½¹Îª¤è*á™LxŸE—ð,=ëæ°ô¥Ä”>¼:˜ (ó³f†âNÙÉ%¿H¯ƒxŠÎœ7 AŒEûïUÚ3B J1ù¹Àg@ݺÍÒ”“1f´ˆ[ª7ŒC€Äì .Ä)å›IφëréäM‹@˜qÀEÍ`Ã+Â)èIL“Ì|ÖË‘§nïHR˜À¦kõ¸}ÞnºkHÅ*=¼u{àŸ*Ç^^ˆÛIµÒCUo“¯-« jPã¡€™^|³Å©UÚV²—_D¹M8ÔÇ[í-ËBO9$Ú“ªÀf{æ•rgà"ØCjñp'õuוC@W˜Äã/Þ¿‡3É'•Ø)N@üþ=퉜ï¡Ù¨+z Ô5uº¼ÿ(}yÿ~« ¿‚X† b·u^Ù fȼjrûD¦3ŸM•S^Â2i íœEý)û¾ÒaM {z§Ô>›ó.2e赇 ºáORkðaUÜ_]Þ5C›øÎ7ºm¡*¤ˆ…Õ»Âï†ÈäÓx8ÍÛÒ±?J=îÓvW§ ïn(BÑæÀÌÖkx¢Y²e\XÚÆñÇI*;ŽxÓda*ý‰÷_Zó䑘uû±Í‘3D2æ{XßEÛ0úiÛ­è\ìÖÔ1Ä4hמw¾™]>‚àUd`@Q¡°¶ÕÞ¬ýdFh#E›Ñº~ŒÑ ;–¥u·õŸ·)ãzŽ[¬^l³2€5‹ËÎnŒÑBÌ¡S ìÍ ø6>K3½ú@à›rØòiAáç&äíƒ*èÅ@ú{@ÝЛ˜Õ1Ñz‡È´M {—ó'‘¸˜â¡²ï¦.¦ƒÖ¸ªÎmux†<7Ÿlê^–Æî`ÕnŸèä6••ÑHa*÷,žÍ†|h0é}‹)KËì&ª(PúŒs}ù÷æ¡›óWcE¥–ÊO.T@u‡ó ŸŽzɘ‡—(™Ó¨2¥¾=¦«D9RYõxn•† éË÷wjÓKí¨Å¢„¦òYl`¹;ž­Ó J϶äÚæçœ9 ƒÇ7Ûž½·… † ú¥’YMe‹nÁDu ³Y¸bü˜¨23‰tkÛ[L^´)Ž9T'³‚Ïi;c\m>ë÷ã<‡=:œ+-.‰Í†ÙÁŠëV^²ö'ÐWV‰–fÂ'"ÀŒb4àdØ8‡êàQ%²¦ë(=KÃôš'~/tÚ6þlèç=Éâ)EN N¿`ŽK_ øŠ"È–`oˆqrΑ¿#þòPZ‰ò^gvϽ]|;݃!Z½éT'}­®Ní0_uëŠr×Q躣B}ܦ%•©ÜŠõɰ*ê³)á/N½à~aï~ÿgÚ<âÃgÉXg{Çè- |ƒ¹"fÀð &+Å@.Ø3¯2ÌÆ"è5\ ¼€n@Ió¦`‹ä³á‰š¢…Ãa×öIoÄRÈY:„?ѽDtŠX¯RaèÉ!muÑÌ¥!Ck§1íïŒüp,.“0¾_´ç©g‡RØb¤³éVÅô‡}»ç=u({#AÒ°7Í76',F fMI[öš’ÊQž««g•[ÕQUÀh¬v¿n„÷ÝrËhçï\,«ÚC‡6˜LáÅÜ^G]SüZkà|QDEöê¥{M÷ñöÊŠaŒQOQÖ D@DÎÎ@*„X[÷”¾Ë[Ab42ãøJóÔSÌÅ£:H ™7xØÿ ÒqÊhŠ¥8»•5ç\k2­ÊÓ4šo%ôët'Ã4BÂFZŸ&òŠJ›Â®eÇÊRxÇú`¥È-8û(;Ÿ17öè¶üm3©pÈäýŽ&ÄóŒ&„:ú0Àß AÓl^–j™ý„ï}9ªŠ…ÙÈ…M"ÖE%šê÷”µ›vËV¦eµì­þëäm}7ÞXg8ð›Ö¬72õYU8nÑÉÅ:Ì{÷²wcdáäIUËÞš¥;e‡gÊy¬Àù?R…æyeE»PïgÓú¦ÚB•_°¾Øå¥!kJª$7šÊ×âSSPû4«ËWç¬]ú '‹3; ¤c«®Á›vÎÎ'Ö\ë M¶ïºªkn¡« ,7<ÊLºÌÒêšò¥ÇKòF¦WKÁXev¥ç+ÛWý{‹ØºÆ‚Œ¼æU•ÜÖî&nïH)ñZÅ€çSþÏ#å`á2O¨a‹P{_!pzÅtúD¿ û¶Ñ®»ÀfŒ´ÊzíªK³¤±[{’ñ®íiü¦vŒs\VÔéèg+yêd+~¢´?È‹µ1e};í3ò®8Š4Ëbº4âʳÒÖ¶@x2$gU%+¢î&œÇ’åŽQÇ$¬8kqtªw[ì%4Ö*‹®Çø&±Ú-ÉW»\Çü ¥]FC˜‘†xyD¡Îÿà¸ý ÙÑN¢ì®$ð2è–Ž¨l–©]¹!q3¦¶ª£c’Nn öw–þå[ý$M§¨4›°ª ‰²=8vp¾Œ AÇË$¢®S/»1 ½úcÈ ðÀN§],o¿cU«Õ¥ÔÅwJrí æñ>PÈ yœYWÓeç‚&•(«5ÍÄôÓ(YÙkùòvo»Ópm#šµ´è}_% ˜ÀNQOqG|Ë9~Ü9ÍÐ¥y‚fNýÔ33”Åê_¨KØÁl}ÓLB}áòÂìÐA†>¦¢ÖÅúЏ\cå+ ‘èVÙ'¾§=ƒÖp˜…~¤@m|г¤3V830Ô1Ÿ'XA:¡ax¬™_bj–õD˜e º!í¦ZøÒn=Ö«ºVÍž›R€É¢²Mû²0ɸ‹ÎEüã§¿† ÁyÓ‹½·dÎZ8 ×÷­û’oòØW‘åð,¦šºkIQmn2“ÎnÛ™¤3M³>;¿ £w„ÔB“A™–PZ‰¯-‹¤Û Õ¬ [Ï|v1çÑöÎÃo6m¡o7RötÌ -Åðå2%bJ.£ uE6Ds!áB.$X“ «¸àZ\ˆ}­’Ë5ë”Љj’ÅgqfÈÙž'þ œÄ€ ÛFRI…0³‡y`FÒ Õân”uÉyršÉ(-Âè¿ùÄŽ&Öe¥ÜS#ål…ù¥È=Üô»Õ"$W`œ3p6MGpaôq¬ýG³Ý’+Ñwy gòJÄf¬c­ˆñ€n³ÅÚÜ”ÄC7a›Ñ­‡Î—³ó7c;À[t-¾6©¾Â Ø Ôm9»¤;¤˜Õ*üŽÉƒRÙÅ^ñNü¿æ-²š¿õ-éšr»d°¯Ò ÔKŠ&Ñv¥'k,Ö˜„×ä]…1áuû& Cö6¾85^Ë¢àE$AÕŠµc¼ˆd݇î(NAbŠ‚ð‡xܘ ¹²AF ^@|D:(mÓÍÛÂÒ?CF‹äbÜ(rõ’Þ&B^nK6@fMpLFiÍôýSî¬R#—š{Ê¢Ú 8–h8C&ùpª”$9cß"ýÊ{<Ƶ¯kššô#É-оKã¹vòÀþôñÞñtD´–2bò^ŠúSqö{aÊuj|¢Pda±­âƒœü6EØZ6ùp#C'ò`ázÊ÷/Ò<ã$-«7`Ål<šL甊Eº3NnnAœúHBd‰Ç^l÷ŒQ©À¹^¨µet†G²¹trPÐi_ÂîW‹GdÙ„ ©E¤°åá¾0ü1—IPUìóW§¾8§’+´Èì@2UÆÛv÷+ÖFÿ:•‚Îó%zïmÆÓª~J}ó¡2Xâcz(nzHA:¦í;õï'Yé¿`û_ŠUÆDlc«Z0%ØemKÂÝ2º­ÖöçRXòFNœho¦.ûú¾ÛŸd üÌ‹ÿò ó³.í\ŒÿF;Al’ÐJÂ~4(Å-Fý÷ͲÝr°›U¨Ê-_n-]`q .ÂÓ@k«ÍHN‡G‡Û¨°ï±š^Á¸±î™A¢9 a ™«^þ E;š¦2ó‹ø8$ÄzÅ3!~1gc ¼ÒÃ!¼=ù 8ߟ™b:šë%ïÚúÕ.+ðlE‹Ä¸-D4µˆ¬ò®3˜  Í€¾Vô{„ tî‹Õª ¥u‚{!$‡£ØCwt*ÞW¨jøìZI+ìKÜÆÕY±ÞÔüåÉoU¾ªN"Vh#Ãð»!˜-k¢—Œ} ”ë4uÆvç©hQ®¨`Ù«*ÆcXTNÏNãæ ‘T œûû³_•6¾ÊM™Û7¤kµnJ:[ö±çmþËœú³¾èJÍO5˜™$$áÞ¯2î°öÀšé8ìòM{Ô>²ïV¦eh{øÍöÃí]›„}/v%rÝX\ƒOÁŽ0v†ž‚ÿgmF• ÅŒ’K|ªlÆç…‹²Ém®\ƒ‚Pöè…xŒ <†×UVKŽ)Á$…›£G ó6 “Û( ë?€•fTB–{ƒŒæ €í hÃ8© Œ_J|@®YOVá÷þ,Ÿ¦£ä|ØÖñ³t&©Q:#wÓr—°*ɪ+^–c|D‘ÜTA †ºT0®ËæõiÉ‚Ì(ok~.’¦Ä~bŠáÁð ÉK¥O®bI£Ü¹ÿzº&lŠÿù¬2´&ÁdÝ2šÍükšY5#_qÞdÉDN¼ß™ »à ˆ­ç[ƒ+0ÌãböâaeenB-Q蘩.*mr+‹g«KûÄÁå~¸x³cè&y`óÆ óTzGWÌ Ž¯|Ó49A™`~:Wšbôñû`mBÝFWBÌᛇ8•ÝòãéœÜù²HÂ(—tÐ r?|–Jnßq©6Z™Ú00ŠR8—Aݱ€[m©»eÁP@y ô‰®v2›DŸ&‚ÅÙW¿bˆŽóÐ}è—®ý,‰‡l}ð•̃üi+‘̳ Ž;ýЦ›ï‹h­y2ŠJA 3‰­"ÒÎh¼ÚZ:N¯v€(9N|ƒ(:{2æåL,s”ó»Te<¯.‰$ÛXš‡{˜ž#ÞKÎMÓ›“‰Ó†?ÅʤCGÀÿfh:ÂØ’²ÞO½(s:Væùfi2õŠïœ¶jµõfñ)ÓJÐ*¹Ò€ÌmH‹…ªB–ób^s^cL„AËC‹T;ý©(&–@™, ¢K _û¬ákÌ—ƒÊ¶nh-] aŠ2‚À3o¼mW" ,ìÒÛ€«»žBž ªbÿh3x0Ô® ^8Œ½ý´^V/ŸþO•V“vÕZ@ÍkÀ Qå ²_º—áD÷«o‰l6v¢»)FY#õc‚ mU;ÿ6XЪÉ3+°w„×Òà.’~…§²­‹‹ì² „Œ‘‰‘Æoëã3dzçHåáfþjBá!Œ]Д‹N.9Í𬆆³ê¯nlúµãØšPk¢ÝDÁiýõ2Í• HU84R, ˆŒƒ„1p±2LØá|‰@¥æ}4³Ý®b\šŠ¯Ù`p oî`\V[z Ñ-‘§ø€KÚòZ–+÷{-V¨êËrbÖðfµÝ1Z×f´Æ·ÅcUæ¡•Õö3Xe¨Ix©Vøt1;µW@¸,u¡Ôr ^¯}5T&áÝ®·±…ÅĹDÖ‹ )†ÙÁfpù&3>ÊÂbÅ}6Ãß5Î=ËôE1Wîþ¸9uÓû~,WýµR–[ù ×»Öú$g¤#V˜žðç°¾Ý w…¦!¥Ã-×E=rCÈu,¯xT«¸ŠÊ~ç-VFºþÜuŸeúƒIŸŒMàU:‰Çu<;ÓA2޳߾:h–>·àÌ—\üŸk??>yúòÍI£Ü™65ÛîÓ\yW—wP)\{ä++KÕ`ê:5Rv ë“öU”Lu MKpZܽâ³þÇ‹ÕÙèhËáV‹Ç·âû“W–“Šû;íhq^¾~±ÿüð_Ýï¿;<98~µÿä@ë ~¡Á'8|4/ ×R$Þò°ãj˜V±íUйC¨^k¿Åß#øpŽ"=• ¿¸y¿¶úaí~U-öªÌÔËiñyõC‹Ëï‰^o¥èy)U³}½j‚…{´õ‡o6»ÅÖeü;HÇSàâQwš’™Rmƒ”×DùE—D=y›»ø‡Q–ävjJUL¹kvE|.”ËbÔÂmwÉô—1(±3˜Óó7²B#ëè7ó324i•Ð6—0ÃuAŠèÂEÛMκ(³—B7w ¿À‡Ã^¼³IÃí_ kc‰º é3­ŽÎ1~è S”™=gÕj7 £Ävìˆ?ï?Ö7L4ŽSÇÑCÎ(]®ÈP©\Ú!Îó„`éœTòSNE…Ź’ëüñ²PtÐu%*©rw“˜œ“bJQ†ß ›¨£Z\zÜOgÈ}Ær^-gšÀˆ‡«¦ĆÿrWìF£1»žu‚âñæGíéÛÈФVXàOð6¬aÏîå¨NJ ¼Í„’Õ­ÿfh¶ÛU¢ÿɿٕÎÎi1v‚ÙCvË|Vß&‡–·ð°p•1C8q2 °4ÝTmq²©šï‹§¹—5˺cxXñ%ÿƒ‰6f*ÑÞF´QÈM´o4üUÜ qL8%Ûçwpr7é¿[ôßmúïýw·Ä Q9Oª|¼^ª¹¨ù vhiYÀP•µUJm­Tj{¥R;+•Ú­êO¥mO—¼>ç([9 (Fó^Ž’<ŸÄè:ñ—Æ+¸8ьՋU«*RïJˆõ}¥°OñðC=4P xÒKpŽÒ›öU6š« +ƒ#m)ÞÎ’}‚,îem‘ß«5|GüPäƒí@Úþ.®îkÇû1ƒè¦S˜ìÈ\•S>*øÓÜÓÖ«¤ÿÆg¨=à'ý ö=œqÿ¦³×‚DZpÞ™hmêW;VÈñ€+ÙU*)ý¸7·?]ÚÄîÒ&vVj¢üzwÅì,íÁö5{°s;Ü^ÚÁ­kvpû³ôkiÿÖÖibœºé ˆR5¥ƒoÓ_9éwxv47±M’ŠÕ˜ñÙ÷xŒ–hðY³s²t'q6šq†ºzü1¡4Mº²‰á•S¯©BHeÌq±ÜW0zXúHesh¸ãŒ¾Ô-Ë‚)®<ÃfU” zàHÓ¦6‹lÔ`:F5‹p˜žz+Ó}_±6k¬L»ÖF½%”ÃSb‹ò{w@Å@9µ‡ÔyÅôè´D «Ä‘g6†ó`%êå]©éZŸx'›é/QEÒ)û‚®ÇC(ä“‹È[V!^H( ìIæHhëžÝê¢C©$øÄ~ºÄ¼_¡µ¡Q2y“3oWŒNÏF=Ä$TñßQFŒLJ¨zÃü‘¾n‡z´ÂïÑp(¹Å¨W"ÿ´(§‹,±ÄãøŠó™Íf±6‡°4äí¬õv´¥î3åáVUcéx8¿V[µoCðT5EVµÖ_¹µ·»§ØÜCÓì‹pwA+<üba½}„ =.éñ‚¶(~lý¦þ€MýÁÏÍÚpp­ñlmb+ßX­lmªfôJ8-­ßFm‹6n¾-϶ÛZÜìÆB“×ÛýÐéþ¶í;ðxíÎð̯ߙ‡º3»î¶Å7k÷‚öØúx¤;ñ¨Ð‰GØ &I¨¸ñP¥¥ºC_%CÛíÓ³?`2¶oüí›èµíMocøø¶ÉÚö-ζŸ`ããO@â¶w°Ñí]Oƒ»·Oå¶Rk­½·ýðºíÇÔÎ#»Ç·Né¶¿‘µmîío¢ÏFäv6Õ‘†ß|D."·³­;³åÒ—íÏFäv4ÙßÙ)tBs"ëÑ2Á›-¸Î‰œÓŽÄ-Br‘ϸüsÆBôZèqnG,Щ3K Ñ‹ ÁD‡üpzzÞ+…”ç'ê(p´N«{œŸ¯Ü®vÛñÎY%ŠÑTJž”×ñþq4êeuôgtðPãQΤF·‹ÝþVeÑøî1>0Ì«#–“ †øBÝxN¬:LNPÊV<5è tV¢lÖéòf%ñÖ}¶ ­ÃßIŠË3í’3szËX½ø#‚O%S8u0¨)·¬´_Ú¿€ý²ÚÎÿí쮂oÇ’ÉY•GT˜*’w~ »ÃÏkþR6\8«l»ØÃCþ,Ëíã;ÆÅ¾.o¹x˜•̦ø­sœ£x!Ø|¥¶Óñgb¼·Å*’àø¶i²NÉÑÉ8cY[ˆ€ÉÃZ/*ûEÅ™±ÇéË÷KǤãÐʵ}É6 ¥O MOÉ*ÎkìUŠÙ_âF‰+ß,»h¡ìEçS=ûaG‹e f¼TÄÜÌÛ–ĸø ­®Y Œ1Öp\y—ISºˆî­ün˜Ù)Ïú]Ü¥t‰,X2ú¤³Q5ò«µ®ØÒPnü(›¾t‚A<Ë~ì÷©a0x·Ûí®»¸ó9ÞH }ðñ ½´Þ¡¼Ê1²Í’’½á©ö3É»zÚ‹àüú¹ZlrYÓOÉm ém½ŒSbƒŠ¶%%3n 6Þ#u¯´ûÍkÞD˜W¦ G-ëVËpv”/‰…âI/àA͹aníº´‰ƒs[jÄ|£ˆ=ý«¹߸5!¡e¬`U‰Sw+ªPÂ{–(E-–f1¬W¦6(³®¯¹=b¾yëë.üÒE·TtÖ¢ë)/tcÅc‹›Ã£¶Ä)ŠìíÙàÅ8³[E?ðɼ߉°;™§0"™ïúi†w/a@zp ~FÝùƒÝH¯»Pãge&ÁPðù 욇•¯óéR†ÂÉ^Ötÿ,DIøÒa‚ÀöÙlÜ'LÉvŸ—Iý¢Ï¸âöÙ’/WeÐiö~X §ì>ëGº±³Ãìö-øîˆdò«¿×‘*ÛÕµÈ'óe¨—\hš·Ñí‘.bæ\:E„,Ú&¬ž¯ùºáÔB^ˆ}âéK1¡Cþ׿¶ž½T,NE¿¾í/~ú¤Qœ«´3S8úQ6@l4y„É\(‡fŸå>ì•{œ†– Ù ÿ£*ÔÑz`sŽ‹˜X÷@.#¡îTÄ'H÷ò‹3,ESÄÖÃ< ]%×8¤OÍÀq:Š$%'-q4¦X‰ÿG$wrÖD…ÑØNßn åÁ£c~ó¦âmåÚP¹. ‡Xe¾ÆÍ9ËWß q@ŠÉ%›ò )Çnš?±ƒÚÖýöEø…ÐTèqÞ šèP&š_Œ¢iÿBüö{)&nú„—‹‘Œx_BÇ‚â #Ó]¤ 2“K®˜ï ôÍÎWLz[Š£êÜ¡DíˆÕÊýP !0­Â»Š²qwS™³°ÅGŽ˜,÷ÔÍ1ãRÞ§]ɃV}ìœm”A#ÚIZÝÔÄdºÖùÄ~P5(ƒQÄ,™:EëžHãT_o¿­»× Û‡€úÉìšP²1®šˆëOiLEŸóTDáy4ìç8ÑòsÀèa¤qÄYJ%S2QÊ[LKÍüq¦’ukzÔ—ó–vLû–ÎOTs¡Ôô–Ë«i®…õ1ñæpŠ«*Ú¤ù,ÓÒƒÀö1·Ú°Ë±ÅócãŠ! g’ŒÈ‚+Ñ`eDŠ7èÅá0Ím v ˜²4v|Ô”Ô ¤½7š‰öf)œ±«“Z°«¬ß» ŸÃîÝX+ “C)I§®/h@¦Ê»¯¤ðàyàtTjldsSfjÍ“*a0HÝTkC¯ Wéƒ q®×$,N0ÉЪ3àHL‰Ît€:,!Ÿf®X#/”™0Óá£T2ÞþÏ À…*¹¼,»ÍLS QacœºÇx/Þ±4Pò•8Rk‘‚n1í\•fNŒ>⣣@6r¦©ç"«À!3¬o ‡² ±8ã?ÙÍZGq¼”#G«Ç€1e‰›t‡~ƾmþÞ¶ß²Fo×™ý“3y«ƒ©Ùh0 kmëÝx;ìœO󧳌j¶}ýjm7¶×’hG Ø»¢LÙÝÞÚÜÞ^´Q“³ÒJal³8à¸ê¬·Ä7¬PЧ³EcÉJÍCAS«mü(6_YÐÛüZ›UGöðmbN(³êëÔ},õƒo¶àÍE4*ïçÂW8Â7jn²VS]ZaZvˆ†iö§Æöj'& ?"Òvön £ÇQ¿Ã`K‡’¦VUiún>­™3«ß¹µ]çð¦ãn?"†ºˆ^×Û¡_ä…qK$áTå4àû{z¶p—Ý߃•«-'âaË‹+œÝœ’¯]툯nåÇbÊw´“9+FLÞ¹,ï·uòŠÞ<7–ÀíK£œ…¿VF´(Wg£U,K§E§¨Éœ¬ŽnŸ|8oÚ!îõzMþÂFld3,<ÛR›Êûƒ÷h6c]Ê?Mª15J µ¾+&UW˜úa ̶ª·Å$‰¾æÜrÞìˆöq£üöZ)—w¿‰K¸úp½=kWL•ØÕ†úîÞ= ÿ¨P&2O[¥¥ñrf!=‹Q;<óÇ2ª÷§T ýî¡9±‘ךþ²*•:L(¾Õ:¬Õ4+U—ˆ¾¥Œ•u4|7LÉfÿšW‰ä7¯9Þ{5†J•H´<=g>áÝ`¤žÞk~²§-RSÃxw|[¤B­xMyT)6÷¦U™ãµ€ÎW“ÚkÛ…äVú+K~Ô†¤E M«òü©/N}RÄå\P´1ÎFí 7–€Ül:Î #ïÀŸµ…=+6Çk|³F¹Ž¥íþ´èý5©¿|}Ó+ z—®Eóo@ÏÜ@œÛ"f\ë/“’-ÉH]±$?µû"šµNÊêÅ[;¤p2GÃ_å;ôí¶ëYÒ7¡Ú’&uÒ³%•:Ù°—ÔyGÅQñB"·U!È6l¤eM5ÍÅÌow¤Ñ"]ÚU]+W=JÆÐsT—ÏÎÎ’{µÚ:$3X‰pÙüžQÎf¯Í÷îÁÂ1œÕñ´ñ‘BùÓé³]y°Tê\ùèlØ–PÇüå˜#=ËÛª¦jËÁ]q/¿—[ÖÒÒ®²jY²Áî…uï::è|U»áçÙ…¥*zZKVóùbòTÛ8œµ´_¬¬L0°Zt¦½.œí¹ýÙÏ´½n¬Â–ò¨œ-j—-äÏìÃÍ™$!eÒ»ÁF_öI1=èàD"q]ekoòÖÆ-ºúÖty8[öõˆ½X”>K³Â&®L·P¯&‚ÍpËRaz 6Ã[qùUø=–Ñç9©”AÔnwÛ7àe|žCºÂ ò¹¨˜ž’bÚ€Íî¹Ø³kÓ•nþi£ÛQþ¶f5'. çÓ™KÄ®¾ kÞ ·‹°²´+¡²doo›–´ÖߪaldÍEî]v¡&å¾ò ,P¦.ä ×âÚCƾšRD(~EOŸ•ØX~– UQ‹iœ6fqajRÌJ©&è…|EÃúÞlÜp¸›á°»î^X{¬>t/ÕKÎõé¿+-Áñóí+éVyëTÞ^ö½% wï.S5Ÿ–«®¾¶jŸdC.Íf‚œb‡»˜K­œ_)ßô?½š[7n_üÙz&¥ña€pI?|5CÁÁSdhè·ü-BAÿøãÑ›çÏúéAE^>=<:<é>{sô$@A\*ª_¦É üÈáãó—½Çýiøõèœòªy5ïbdò šêú†|¸ÑTÁDo4þ}Vahë|zð“§/^rãArü­XM+íºîd.³k"Ihïâ2â’äí~Ši­ýÛ txyÀÀãñø2É`EÎã)§Ch©Ø Å3#¹ñ:Kiô^¦UÛÛBM§°°ú¤`¯Í„›(Ã{ùK””êÁƒ«««öé$n§Ùù†:¦*MèSþÞÍŠ•þ7m¦~:ÒŸ@k¶ø®çÑÙ§rfߪßÛ} å!¼h¿h0貓ãsùúW'n«4U‚ëŠ9»ëغÔ-U™uÑ«ò§€3qÕNMWå*>ÀS…áß %è™â#lVÁ!³zœŸÏqãÁbMXÚ›ÆhêÿV»­ÒIó•x±tøìàø¤MQ]Š^|Ýî£øÞyäHÞS®¡’“’tq`1%#‹ˆåØÚ=ާo&´ör<¼¤TX…j¡€û×Ùî¨puUÃp“ˆNJ‡Up38‰w­ásèÐ|-¿ômùrŸ,_üÒþ¥ø4ý²î_³òÒØ¯YÙµOÈ0õQØ©8“[í‡ÊµQ )NñõšPã]³Îç%Ô]Gv)*úœaLãhL{½V»Ùú&jý°ßúW·}z¿Ñ*=˜ÌëïoÛ§ï ìÖïkUêÂF›b´ÿÏQ|õšA3ê“,Å‹¦^å*/Œæ¾ù¦ýÍ7ŠÂK*±«œCŽ—EK½² w¢¥(ídÎÙÌôàë6'K›²ÝÃØ„SG¼k´Ï³t6QFuŒÜ)4¸3Zð“ù=Âj á[wHËßÔ£†N^£êÁt“ÃbJä ŸÂZ8eŠÚIކÿ’Dö¯dòŒª¥Ö¢šS£0Š›NEEa>xõ7þ¥À–⧘²î)iÒ¼mü‡Ü á.…¹%Y=Û…á, 5y%pwdvä›ÖšINa²h6“ñYª|Ï<+®Ç¼¿™çsüÃtU ž»Ûž¨¥“äö‰SÁ§r¿ëõ=‡ýÃ}Øôß Ñ9@^ÜÜ4…0¿ª@›NÕéÌ‹Y$“Ë&`mÏ!4Ù)œ9N"·òÄâ“&‚ §’--’ ²ø|6Œ2l\!êp›\QÒ"¡ê2<ÛnÒ°éÕ䨾á$h™™Ž„¹WüúéÁßž¿|Õ}zx|âh¾| r›&ÆþÒ8Iȶ˜ u¢‰‡åí_ êNÚžþ…}GÃïjh>ˆò<&¿O¢«ñ° ÏÂóWÝï÷OJÙ*%q'V‡Èn”× ¬f±³‡wÝM_åRºÖÓG硃?Zhd æÄ)Jñ=F¨ðœÎWÿ<ùîåÑ«ý“ïöVÑXð³ªÆ²Ê†¸•‹æ£{q³¼7pÿ4–©€„¡sVV0‰ ÐWáë˜aîìÜ•„exï¼t¨ ]ªsL¥¥XÁ.Ñ4ùŽè '|’»(oZN™kY´Ÿ—Ð4Ä‚høn#Ï0­íbû(_åL ™=& öšœÿ8¾êÚ}^Öß.äáý×ÈÛ=´å׉éqz%!ó”ñRpC§ÆKY€ƒÆ%²;ѸÆ7èD²¹ÙÞD7’"LágÇi2¿f“¯§^RÕàV½äنŶ-sã¶ eVPÄ¥m\%Þ.‘X˦‰uvßò=R E]ëß®ÒÊ)=-9¶-]^’`é6Æi6B@C 5ÑøÊÖ…bhé~ó~4.~õ»ÚýêËbý YàAÅfyö€\+¥{0!½êv{WäXõ‹d[»¾ ²r¢üŠê¶4ªEÚ$ Û÷ß)ã$LU Ì„×ÇzÄ_ Øt:bÜÄÈÊGL¶bý`x“)ÁXùn·xvq¸2VS T"$Pš:(÷¦ QÄV·Õ»^ÿËŸöÞðQÝ Ï†Ñ9)ßà,ü1ŒÆÑpþA™dý‹ä’`·§aÞn·iÄ<¯jÔgIì ç$œ Çü;ú åOé©Ë!v™á·ÞþOíô~-|WEó^Ž’<ŸÄˆQþî/ï(Ô ©6ÛÍg( u0OPuª›ðÏöÓ´Òîñ¬œ›ÔÜì ‘ÍÆÌª¿ã 0Ѹ¯ÕëµkB¢´·ôºu?šÆQö4½/üîD ¹Ÿö/âþ‡8ÛƒÕ”ÓŽß¼>82Õß®äÐn¨ €k~LI¾îgÌ^­ÝªkæË¿ÊÛ•„¥ü±—„”‹9ä¨k‚šóä|Me¢K4‚èo½;¾ïÂZþå±ùñÕàtÃ]ÜøïŽÃÓûLdß]Ã|µíCo'œïø‹¥m½m Nᑹ¶ÿ×LoÇû¶}Ê9»= š¯Úeœ¶¬õ¬cÕ¿påuºÿñ7R1–¶B2Ò[˜->֏Ћ~c?^\‚í¤ïîåôºÃ)Þ''X%½çÌäKëRpÒu:¬PåfRß½Û‚boÿ}YÕYÍ«,è8ŠjÖßà _ÿ£ýöòn¼{÷öݻӿÐl?X:!ßÕ‘^å„ËLŸ¼îpú’þ8::ZþÙdþ®Žç¢AßMæuøª±Ú¦ZgW±f¢rKYŠ‹;æèS0G®4ø 7rí›oàšF“l§£Ã£ƒ“âÛæ×¥„¾nuæ ¥þbpŸ,÷‘³YH°k-Ø×ƒ6ü_%7×åJ ‚™‚ÛmMn¨jRêÚ˜ó—Ðé_Õ Wá¬A¯W¿å޶J3 ÙIÚA­0\|×]ÿ–´¿Üx7~3&x7Ò„B©ÆÂX³Q«›–íÅy§ ¾kïÚÞ óèèoFhýŒˆ«*ÿ,å½×Aª£‘iNê$i®hùEŠÔ@>"4iýÇä<‹ŸÒŒ¾’—•ww„þç ô µFŸò^· “ù€Ž§ràÊÓ/Iî\S,â_ý:ð®g„nüPŸFåé«è«PíQÌ 4Ž3òp¢ýPËCxŒõi¾9:üGgYŸÆÊ›rÖ›di?Æ€Gê×»åB¾õÍ»vÎë»wõ(;¿l¢Wåž(§ß½«SV_Æp3Ôuà÷Û%æ_…OMò ¢R9»ôæ*S_{qsïZah“#‰,<)“êû4H/]:þÖ·›áÃ9§¿õÝN³í¯RÉêCqžƒ0›'ó¶¨nŽYÅÝ\¡žžàN8ô@^“) «×¨K’]Ù¸º,¾®RÙ)úã¿=m-ÈëÝdêÖR× Öµ¬[ìóÞ3Ì ¾>ßSý?zùúÅþóÃt¿ÿîðäàøÕþ“ƒðõðT©[¸°œ a]ZÙüøöþ|Êߟ^¼÷Ÿ>}}p|ügϺ¸z*v[&« Þïôê;ýq·ùð'/Á+}ð`Y ì&xvúãÎöOÒ¿§Ã'ß<ùÛñ›¾^ã>>]mƒÀ¿ž vÇ—ü²´ó«3-go˜ø×³ñØÑôNæ~¦¼üI•“PYL«fQZ·­_PéºZpROn@}pÙá´økåR Ь¤ef…vµ~¹òC”]WW›ûbEº“jéï·Ì/~uÁôP[£Gakn”0ÐÝqÍ•kZ\ÑÚºè¬ö?o¿>m·Û4`þ×ñ}Þýfûßluµ'C·ŸEùE,éSZ&L2¶Œbqª2KVvcc¡Äή¢p¼ÄÕ½B_R‰úZªóÖñ‹cZË·f%‘އ¸ãsaÚ¶ÕÝÿǤT\é»b™çÎöð+º½¿D£9+Ìñttw¦ê;SõÏlªÞzü ²TßÈÞ,D÷ùYòÑCø¯Gˆ¿²ÜØÖgÈŒkdålSÇ3â¬=LÏóÙní£Ùpø]4 ãŒÃé1Š#2)×­#å p¬aÉÞ1Þœ&×>†»8­Þÿw {à_úS¡ƒ·æ3WÜ.¯Ú¨Ë¤ˆ›™Âù*LÇÃ9úG›&s ç™dq§’úL±’X{õ”½ñ=¯–¾õ°‰x—²Xt»Ëv?ͤw_ <±¦@…îQà6_cƒF.$Vìás]ªaX¬_QTO«(ƒ´ÇÎ@ö3jœà2g±º0ì½LƒÏávŒ³.*—Ðm±Ž{×Pc~KÄ0ÊZXÀln٪ΊG6ÂØw¥Ð½çòEã°a ëöpŽñ…,¿Æ;Õ¬ˆ;#¯UWiv|!Õ•/ß¼~î!¯ÿLgDHúPF& \¦4çÞA¥@Ý&*N˜ 4oXª‚äN¨½ü0å*nQ®¨ ;õ~-“BEIMJ-t~=HòŸ3”‚æ¾ÏfCê|‡é•áFñÙŽú†Ô3›ÃÕ„àR¤ÀÝ´\‘–1êO¨•b¸A€Rfâ”fOD²÷ÌC8Qè¯ ÁîǬ,‰½Þw˜¹²Ï'cªIg±V›ÔYES®ÈóX;Çq¶>n¹™¹×øÍ2ø‚sa"Æ\¶lä.á0c2Z‘î””põ©qG¡§g…©»ÓXœžŠ©AhŒ.;Ûè¼72Ï5#ò ]–Lç5–ä»é¤ ¹ÚDMÃaò!7¶Û»mkkk24Ë%­âYŠ1@ÄîL1:áœÈ3§/©Ù2N䤵Âçiúº‰+i3+ØeEI ÈH ÒLÚŸ¼­ªy©e€&P©òwbd}ðŽ'P}=|3N>6QTÑ;ˆ‹X!Cðï\íüG ´A,´fjë+`ùZkýVÀÔ6†þAĵ#„=§`©Ö¹ºñÄH1’Œ´O"6ñGôÁ î3ŸfI¿9ïÖ´Æhñ)‚yR@.w"JÕdË4ùÀn-à¾Ñsá)‘ÀáàW%s`®pÏ÷ìŠÍe{ÔÞD=%: ]éº,)‹·+2… gú¾í6ÆÚ-nî%_N-ºyó >»m¦zDQN)ŒÆçCIo޶j@IÓãÞ²ïɪš¨ÏÛåÉâ~‹B£·Óœ$<Å>#T'¹{Î’DeÆ &´´T£ê‰H†x×ã×êäÑì¾9*LEaæØÍ3g"'ÝÓ¥X¼)qÐxÒÇ,M¬×Ì'pöÏ’˜ò€a‡YÝ~ãö?+¹üMÛXlÿÃß¹ö¿­G;[›wö¿Ïñ£ÓiÀzÇÐíÖkÈl­ÑÄ} ÀqWzê]ú]Ûìd_g¼ œw<ßyü‚ο Š¼>¾Éó¿µµóÎÿîÃ݇;=|çÿáööÎÝùÿ?O™{&¯\­¯Â@Ÿ³àëÊŸ ø.à¶jY¬pOMbÈ0ô¯ùˆÛLÆ `óN´Â79*¶úÙ6¾·¿æk'™™’ŸA«ÌU_ŽÐBŒY Ö!ÒD̈T¤¢–‘$Lu̓Ll±±7RUJ•ıZâoŒÆÈÿÉ0é'Óá\d¹6äiŠfQMþ˜¡zÿ^ƤŸ?°Ýxß¿'áÇ)`¸Ö%½„zïo±É,ͪMEð£Paaújú”PšYýNË&¬IgÊÇöG¯ú¸\ÌAª¦ç"¥T`DZ›YAqÕ“y“nûTUwäõHÿ§™îƒ[¢ÿðð£èÿãÝÇDÿ>úÿðŽþÿüëoC2þwÛâÿ=†õ¼ýèîþÿ,?_/ù  ~xp~Þ.«aYo,­!h·µ-ÝD“©nf®L<å™Ä=‹.Tß9”-Î1¿ˆ“šÒµA›ë{ɸèo‚9ÎÄ £} e «"FpïÞÑ}þ•¹êÓñÿwmYüÿ.žÿ­»óÿY~¾ºÕŸþ>I'ó,9¿˜†õ~#܆»?üW gûIš¡¾|ª”rO€&¹$Íò6|·üûkü.ÒB–q`­±BÒ3æéÙôŠÔ–9*›É`$ºCÒ&3Ó+vjïÕ¬Œzø<éÇãäf ®ýÐçœ8A5N)XÓY~WÍâš4hJK¡¦Ü¼Î>ÜZéè6jŠ&ÉòU¨Rÿ¼ÛO%LìC<¿J³â%mˆvˆ4 ÄiÈŽê£fÅŸ?¡ß׬@9vP r(Ö«âÔ[ê'µË(-{WïÜhA['ÙLjWéöžŒ«ôd¿5ùϺª?‘ü÷xwg³(ÿíÞÙ?Ïøç¼†ÙU7Än‡õÙX’4ÅQ ­¥oÄzOÛ þr;¬oonm¶6ÿÐÚÞñÙ‚k~´»WƒÚºîÙË+Ûjs¥[«T*ᬠŒ½ƒðråRWÖèÍÉJMæœýÛ@R½ˆÑZßâSÛIÿ"ø'¯"2Á_ñà6aÇ7Wêø½ Ï8ôã "r/Bç™ Fšlµ·Û;½- ~ ušô”»³ÕGè8ÝÔ8#Íï¶¶¿ñ5Ïx-M²ÍNGñRí;zÚ´Èñö…ÌM¢7N@¹cñ묅Ò9¹&DŽҖKŸh{l~ÓÚÜimý¡jBž%ÑÕ¸ÃÍ)Uz6Q‡ÀZ79Ö€nû´´µJKÖÈíHC”QIyIZKQNÎQ\»2XŒA,ímZí=®jo€ )îù÷Ú¯Pœ®ò÷‚@ Sù½#±Í÷S¸Þ7.´ydfùM‘g-ì!5J•*¥6¡ç„1U(S!†Ô)×®dGaåU ºJ±ò¢e¡YÆÍ€7ãû,ž¤?ÄÖ©zO{ãýd¢c÷èxÐK?¾gwOÊ/Ér$‰Ãá%ùU(ð Ê/ˆ!œÁüdyU)2iª 1i1ÞŸáý¾mæ5ÒËÖâ%SJó‰óØ´ÖS™²kœžÍ 4«¯"¥™3/1Xî-Yî?´6·¶¾Ya{©W`Ê…êáQÑ÷ðyU½¤!>ÅØÔ£ÖöãêSÌsðÞ2徭 kFê¤{ ¦ÉW˜ékSƃڂ\5.$ý°ó5ý§½BQq=Þ›ìEŸ§°Ì9Ñ6¥nÁÛ2]0Çp‚PÊI¸µÝ{(­onɬê3&)g–%ï^5¦¦ïM4`êÉ1VM47¾«ߪ8Ák5Ž®t‰ö’dWJ¾¥°fý¥—½£Ç|dˆ^i_ãì~­Ý#Xg#ß &G–j¥Y0(‘üN}‡Æþ¨µ¿{ã±m_Û–÷6Gv¨Ø~OEa#p1š/ß»M·|¤l@D^é`¨ž!àw.ÿ„ÂÐìî¨;VÜz£3 ÚKé^žDÙ”§™â å#¬Ž³Õ+¶=^D±¨° ´Ÿ0PñT…F©6¨Y˜$L”†‡¨‘޳¥Øajœï8~\'Ÿ>~ƒ“ \ÆmµŒ›­­GUËø†2­’DÔ·ŽžõçÜe ƒ§ºQ¸ê)Mõ¦Þ6[ÁâZËï¨o£À¢(§Yı 1{(«µ™xBs㩽žß8ã77æeñM‡º×€P.Ôk"âHûôèk}2×ÕUŸa¨LÔ¿ (\ôLn-l/;9Y ÕuEêÖÀõnòBf³q`üµ59híœûp%;VÇ^ÆN[ª0pÔÚ9·ŠØRqsÆzbsY“(“)Rá;eç N‡ôBá/'ºM‚ð}£Pê`[¹1©õÊå¨*Ï*V°°’†‰1$éÇŒ|’¨j­Qâ˜òÇU °¡È)»JCœ´7º@mò¶¸j-ظ16¬ÇUï 5.Ùå €ÅyÃ^_µ2,…œ‡Ê6 Mé…5‘N¶¤‚鈟ɪgXOk2è[Ï’#š=‹¥ô/¡9i+YM¬¢mCÔµ!µn MêÕ8pX¢“'dØM“aPá.tÈìUMn,RuÈWs8a½®\¿€¥úŸûG‡ÏŽOÚÉø“ùÿ“ÿkÿß¹Óÿ|–9žáרà `ág°ÿAfRÏó¬/ïâüÄÚ&<±v¶{?dÉ9H4ñ¿ÿ¯Èÿ'ëßÔt}ÿÏG;¶ïü?¿˜õÿáf[àëÿøñ£»õÿ’ÖŸ=ø<ëøŸïÖÿ \xôà3¬ÿ|p·þ_èú¯°˜ÿߦ×åÿ··noßñÿŸãÇñèOÏüN€^Ý¡G™(¨fûtl…µ Sk!§`ØY:ËÃé5¦ÉY€] ÞßvûgXŠ(4¸´*oAP—³ ‘ð¥ÒçªFYQUÄþÓ®CâJ4òÃÐ9hûu=‚š4š‘•Cj#[O"TqˆOÕøè5E™Ÿ¨^Ó©7¯ŸçM¸°Á$G,Ê ­3zcMÔHY/·Cx5ÔÒ¡  ¨Õ G)Ž碸¾Œ†É¿h‡1;d ÁR;¾[Í!VL0KËšÀÊ@¼)íéÅ SÓ8£VŒûk /µÍ:…‘£ ŽUá¢WD3„áW¤•RNuçΙÄ²ŽŽÐDÇ‚¨Äh‹Œù3Œ²ósÏ<‹c9µ›ÀQ7CÔÓ”\Å5øˆÁ¢T?P7šLm”S<_³Œ:¬pßXµ(hÓìi*£1¯QÁ'Ó)b%Óð,B´&Ø:Z”¶€ÑÔJªD ¯Ê–¥£våâ]û˜Ä•Êì›Å=a(¬ríˆ dCr[¯¤q¶W¶gÕi–IÐeÍk%/"÷¼;ë+Øbá7Ép È«ä>ñRüÓÅt4üóŸzé`þg~…Y|¶·ÑKÎ1Mbk³½ÕšÌ·Û;ä¸ýgïã?=ˆ Ž¬Pƒïó5¿Ýö|»½â·;žowVüv·ïëµó´ú{>n-T¬þLøk÷Ù ßny¾ÝZñÛí¾ïkyêÿ>þ8¥AnµwùÓÂÿWD9lü™ÿõ—!2‹·'ÔóÐýúíÈ?= íiŸ‚ XøæØ$ð(g¹Å‹àŠ0*x\Ã9[»³ÏÅiŒÐC'7…FMð–H‚ž¤ÇEôÿ "ÓÍ Ö Ì¢ÊêÊ^gWâ4@ó0¶‰Œ¿ÆY¶±zÉØì+íaÒÆ½ŠÆSm£ÕÈÕ½¹íD¢["j"†ÂÞ®oNç ¦ÐÒÓ/€H¯BîÞŒÁ« ‚®i:¬•Ò¤c*.ü Æt RŸ8Ë1°%J›’_]o—Y4‹·yÁ•‹ÛËÙÅNùxFC[FË}‹-÷®j½AoÏñT²,^î4²ê{ ÞØ‰¦Ó,§§|±‰ §ñq  õÈXÈé§–J"°ØðRÐe„XJògä\‚-,û’0K Óp²ÿ5Š’äÂÍU2³sbDKË‹„|pT:AÝ‹Ùñ„Ю¶D3Ìg@‰£¢ž%*<¹ˆÖ)P ™ÑºW¸oJÒå쥱N[#¢Ü†¸²aGíö7$QRXÇ’‚ªÓ¡Ü^ŸVg¿Œ†3MŠz,rÑ®Àj~ˆ³TëE¦˜$Në>è/BJH¥ÉÂ¥êWB‘$œh>0²Ú^Mð#ÑÑU2M¼Ää®Z´ûêA¶EL•nðXPá"Âj99 ‹ÔÝ>0(ÝzÐÀ ª´pYSY8ÈÜK¢xJK`úˆaÅn‘¯Ås›”µFè|¼á:q{]¸£„ZâÅm{osGII¤}Š]46¨™7¼>&¥‚g¡ÊAîí…µnw%ãn·ÖјoN%m|­¯K`>AAaYÖƒ!CúzãÝÃx5¨.g%+̓æZ¥SУ‘×:Á’{+’””DmK—nœL£Œ*Sf»¢ÉŸ›(4Ô¼›ÏЧ0ú¡–œp80Åãe2˜EÂI°|Nê·˜Öܱ|_Š´rGÏîèÙ­Ó3K–U¾#®ûˆ‹¡´fFx`OÓQá­š°• ám:RÉè—ù²H1ËEž„›íëù³¼´üð¯™Â8øHQËPÁÞü*è„üùÇ)†XÅì<èˆ È´€Fø¨?-¢‰|Ök^âVtfj“Åñ‡ßý ›€|Ú£Á"¤›PPúÙéQ¡Õ÷…(?é€!¨“EÈ4·;³ë‡Ù]vf×;³»"ìÌ®vfwEØ™]ìÌîʰ3»^Ø™Ý;Ø™ëÂÎìú`gvNØQ"BðY=Èɼe¼$öBjFq‚mvuuUʘYæA´ wj© 4ÝAžVJì†÷æq~áZ~BÌöBWà¼aˆ_SU·»G;¬kÆÍÑÒÅ\S­ÁiÞî_À ÷ACÐÙ5‹U‹è:#gc…è>ßjÀ8%\œÕaqü¨8Ÿ1†ÐxÉ[÷'ÞӴ眠@ÓϸÐï.u“·Bª¼nò—¹És`_âRÊib’ñeúÁlOIé^èÑ«`†Ññ0H'¼ÎÎv͉UãÚ»’Þ›VZ„V &5s–‡y¼ì»šnû w{)#yÕ–·ª²Dßü"îE°÷Â3JSŽ’U½Ñ&-v½á “K¾íìœÒ(¿úÝÃ6¨­-~±áÝj¾©ÝWÅw:ðA¹\*¼©=QŸ9±g’^½¹šºí“èÓ¶oU›ösÙ¥×ÜÓkî?jíûF£zç_³öš-ƒßmnws_¹›½k§´Ú33K5„-²óËFøçpËlë®Jòvá-²=Ý.OA·[߀{ Þo4Úü‹®äíVç6PÒïŒ:V¦ûB(ͳ¥cÎÂ:6M]Fêb»SÉ6©ãiL1W7¬}+©uJI§«†ªÏêЩ¢ï©÷õcч#χöàlᬕ;(sù¶œ0Ûž^`ç&s ±ž»¸^÷§áÆŽ6Ãj†Š4m4÷· ÚÃð[•Õ|b‡ÜÁèTŒC¹8©—o7ÝÁ âaq÷ÅÆŒ¯T%éµ–Óô‘ê·Þ¾D»0L *VšÜÉÞÌY‡ö·i¶aݦqgíþ0ͬ_þ;"fLæ·–ÿiëñ£í"þûÖÎîþïsü|u«?ü/|’NæYr~1 ëýFˆ)(ÃÁ5Ï38Æ¿ŒT(¥YÞ†ïöAyßåáë˜DÍA›*¤Èˆ<=›ˆþ>c¸7‰  ÌEbà:jïgt{žôãq7¡¢¿K&PJý¯WÏmÃëCõ‡¯ž+H´¨OÙµÇsF;°…9ìíÉw‡ÇáñËg'ßï¿>á÷W¯_þýðéÁÓpcÿþÞ÷žÂÿÿ“ÿ}þ<<øÇ«×ÇÇáË×áá‹WÏžB5ðõëý£“Ããëyzxüäùþዃ§ÍððèÉó7OþÚ ¿}s½< Ÿ¾nàixò² 8ð×óòYxrxòü ¾8xýä;xºÿíáóÓ6Ãý¿îŸ@ÕÏ^CÅ/ŽNšÔÁg‡'GÐ9¨éôo?|µÿúäðÉ›çû¯ÃWo^¿zy|@kq»»eccCo“P¥~8è„Lk­­‡ßln†&}:üÚÙ~ÔÙ|ü¯ð<Êæáï±® nè ìž§”Ä}˜M“@I²¸þL¼«’vÂôò£våó®è´‚ è£< {ulˆè(ÎÝ<™Æ]=”=†½Š]Ñà«\²ðèÈ×BÏš¡axO惘¹b°\ÛÒƒ8Î º€H’ô»è¼Uû O]³À&!Ôb]_‹µÿÌâlÞíÄ 7v·ç qoã¯Bô@% U´S‹*y’„|@ÑĦ´,Nr#ÿÀí*·W³×FãöÃúËŒLÄÕlÓüXã,¾-ßé›êú¾ {r|Bº§ír%6štugªJy:5p;UY¿§sº6­üV[0§Bë5~éžDyf­†™-Æó ÔéçON݆ ¼lZ"Ož„{ÀÞ›]×±$ALð‹”\ùÉ{_éeL-ÔcƧâEQ˜ÄÕ3‹•Ý”=SU&ò¶Ý\P¦‘6éó<çîq¨ž‘€\£kàŸÌQÊØR6cMÙt‚ÈÍòÖ¨G_èγnä½â…].ýõ×®,’ÅÓY6ö¦&ìsu¨]‚õ[—Eÿ‡ *ŠlSF± ¥ñ«ý^X”d8jàÛ(ë(Ô(iæzÂÉlguS#“ïF[WTUE°:ƒ]¾˜{ɸšá(¼ôð=—Ï(Ö¶ˆ½ é§Û¡«|Ý Ï1jØlu’ɺ´Ê×â¤|-š0¶\pN¼÷í©}‘‡PÁyÛͺ²D±…)Å{ßõìØCqª¹‘Òe±¡ô¾v†ªú‚ QlЛµVZÏj~Õ³uôÊw«Æç¯yÁ]UQ]­æ”BHïlê$ÏO<°‘yÜ¥ð’.‡íÁ'mܼ‰­­¯ÕßþÏÞéýÆ^ýÝÕýú_:oÛ§ðKãëF§øà÷¦ö(šö/n‰FUðžæ|i]g/Èãš³ô.3F~ÖZࣂª½#j’ÇúÚ/Žº=S¨*>IÜ-I®4o½²a“# :nƒÍ‘êPïÁFßjàê¨ë, ¾¶2³N-ë|äR+(É!/ŽçêÏ›MÖ®´ëêžÞ&g¢'Â/;Þ~в»ó~…ëD+´í8ËÒ¬^)]o<‰ÆºþR€Ý½¼½Q9‡Y”ä±Ã¯¡þð€ÚÛ8sÊL«¶r5¸åÛ„ù<¨óÛçY:›CŽ5 >f²è`Q¹òd}¥Ý}ô“”F*a†–èØôÔìÇ“©“–Ä76RûäÓ:þ^½ÊPŠm^åþµ­$kO²•~]GYÜ+Xþw@ÞLר\O/p¥94¦¢¹â¢ÐråQ摘ú*J¦H—NαO¥9'›,6Ãr•¦Ç_…dz±b¸`h]§èQ´7˜uàíy”NUxÀ›³Q`är‹ýrx¹Õ{´D# “P)ÛH…õ ¸ÜÞ ‚E/æ–’vöT„VÁ’«’½"‡â–´.ƒ½ê[¢ø|\üÌ~gëk\Ü»êǾoÜ;ÏÇ/®`Lˆã{jWè]R¥Wæ’¶úe €èùE“>óÿ|EI²,G¬ð,´†@’4B— çì!ÙW<5¶eQDn%Q푤í(ÎþÊI|¤œìÎ(Äâh,Ðüœ÷!jCŽß¿—Šß¿'‡UI*d2"‰ª1¥8Â>ø•iPƒbàg¸Cr¸,}¤3ÎEŠ §Ü:eÂý”y#óD¥ŠG»ÛOÇ—Àiò*p¿¨`Ã,´[\¤›/‹oJô ›~‹¿"sÐF­ìy'pæE.zaymvGàŠ.Š=0Cº§#ãá×Zx/tºÂK宫½Fl >ýÿþýY>MGk¸€,ñÿØ~ôðñmmmo=ÚÝz¸ýè1ú׆ïFN»sSs¡YÎÎA¥‰Ô§&]`i­6Žj¬¢Þjo×@êz´tIWâï–ºØUã<³ðUÖ;‰NI?ž—ê’ݪ,+¨]™ÈlÅÚÉ4åÖâ¿iç@>ë Ÿ#‰)ÙÂ’4ÅY<ÉêÅ:A¥ŠŸðTÓ1Q‹;+Á†Ù†ÇWÖZÇœ_¹Ë–ZömRƒ{;õQGëˆ …çä:{ Ú*?—êÙ—èjœèý’ƒ…»IÂ^ÁqIuyÙµ¼NO½úF,:äžOùªn£[`}ãÄÍ%… *ßA„jN'—£Á†mªZ6SÍjø]ñÓ(™ãX“ƒ,mëý4›w|ËJå=]ìVÐtˆfá.ßB–OE «4y–ç žçjË¿u*9vÀ5„ñüö2¸Î³¸0wÖ½^œØÂá‘Ù.ÑÊ_E0QžÿÏ›J¡2®¤—%K|F–\•îøç¶Ü…·ðé)G9ËSzRpz=$ e:D%4÷Žò\FY‚‹‘sjU»§âa‹aiq¡2; 8ÇË누¢AÊÐähf‚°9™9‚Ð^H¾^«ºaò!ÿ£øYšþÔ)šÆí¡Á`y÷¬7ÖRúÖº°œ²R¾%ªZšŠé/¯S¹·‹VnºU:Ùv Ñ¢egí6YlþÏI寽±5çû”¥ÏÌú–nx2äÔN‹ñûÌB_µŠŸbS›^¡Ë(®ÈYõ`Q/Ö¢¹+XH”¾s‘¨Ä#„sƒÚhMÒðô⮺ ‘=§)d®Èö0LzY”Íõß.”­U hûFÔu:,Å%4‚Œ%cKù¬j]àA¬"-Ù8—^Ÿ÷ÎQu‰DýR-ò¶)×|jí"\†·z–¬#Ǩ؋Û`•o8Î÷*9/¹JÀ3é)º6À_Ï©ÄBÌ©S/̓êÙ_sæ±ÆÒ®ØŒÆ„;­DgfK&±ìÏñ‡ÇŒT Í5Ê?ÍÐYŸj˜ Çþ´ú“‰þ=,¨ì“ï}ßö`ÓL`Ñ .| úÿh’,…|[Gÿ¿µµ³SŒÿÜÙÞ¼ÓÿŽ´¿fø¡ýW‡t$ ¹@æÁÞ’Ÿ 8$ƒ'œ°‘ÀÜH̪ç£>q+˜)`8Å p•(‰9«sJŽ|^ôjÁ¤•µ–Õ§–\ÐÝóE`”È0@Uò˜ÆDFF”ê™$ÿkÁ¾_ÂG=!Y‰åE“’¦™'Ÿ` z©ˆLdùIrZdQ²*raB6è™Ö›‡ý,ÐÚq`ZdfeÜ9g¡ô¹–s‚ÌÒXLð­&ßÖr¼¥RJW[…:k¹•¢N8íØ Ð¡NгÀ{#X8JÖ8&kê1Ón&Ã!!\5N«ˆC² ĤyZƒc/ãŒ,*ãpÍã,Œeî¤[ˆ<£T(¤Üô¤ÖX #&õoKâG+ÛН¼ø\ºøâP+ÒÛk±Ýv"lȳÃéG[yÿ,äÅK5‰G–Öµ” ø#!}Còp×vmt0ŠL¶øçµSGm'ÈÇäc;”â˜m§R®¨ü=¬Ô^EÙ´c+lª Ðþ3mvj¥’:^É)ç͹Á•fU}oRE;ÛŸÎ_û ªv+ó©:N›å å™nåáæ[ç’Ûu‚¬D Lg¦ '¥vµrI2– ƒžE—”%]2ÌŽàd)©æQôý‹Ë¿oüßoÿýŸû½×˜ïgÿooÏ‘èªÕêïçãÙàüÃ`¶=x6¸ÿýÿ=|ñ¨ø‰×Òà^ZõjÑü~öÏøIôlötëo¢o?þÇÉ¿îK­ÝžoN{’3°;X6ã\,öÓ¯wìEÍù}OVꂦ-¯Æ|nÃïVd\Ù †Ô9‡n 0}ŽQíÇ›·¶ÇBECMµ‹ÓR¤ç’¹^ËŸ/ý¯÷²rÒ5ys4Y?”µæƒ½ô²;Á$ «)xŸ¡Ül%ø²bC5W«_u…â;;Á/ò2³!F¤šÌÚið_w?_þÏõý¿WW/Öÿîawj<ûø’—œo²P²).W /Ñ1¹À8îÇyŽhn .‹W癥,%a 8Ä "àÉ›'Xá8°´«¬‰Î)y'@,-dSÎT¨º¨®ÎóP›œ0âG¤vJq·9á‰sF:2;–"¹G7:çä%‹–ê0v"pf‘Fh¦4ÙýY–é†þ«Ô$¦HexOØV[$ÉûèWÓ¢”I¹@ (`“<8„yJX#ŸÇQÖ¿ i‘jbxp¶ý÷5ë Ä:à¥LÁ±CdR¾A݃ù8%}· ¼€1Ù@B^Äáñ÷‡Uæ•'÷ïó‘ ëVÌᓆ|– ÀsÙ–ü)mZÜ«:?\NJSµËØfdr9)}D Ùéšú\÷Ç®QOBüX;Á*P¡ÖMüS)jÏ9ZÖ?pec›“¥¬»ãsEæeÔ:¦`ßi¤ >­êp¾yýœ wC¼q(d½’Ä8H`A ¥\5¨F6¼ jŒ†#Ę!æ£98¾h‡V’2 ‚Þ¨M;ñÆ+I–s¨!šDÊì_Äqç餘™¦©lw03ÿLgNB­ˆ$r‚ÝSÚê/rè“ÜVÔ)ÌÕ¥„P–ÈäðÁ– Ø©Á¼bP™z‘ XF”g“5c£}îKkòQ"kbë‡QvÃyãÓ50æÍø# 8P!º-Ò”°ÜEøxºx»Ž&Óyh4)[¬d]²w¨lw„§ ˨Fè Áü^Ó)&?J¦áY"X;€g½DôT1ìC!‰J1 Yåâ»)r\£±µ¸Ð.VQ®=Að4¡j¹S¿’J$¶ {Mk)†aY΋Õc+* mwY}±µU½o‘o¡L”½ø ɧ" ÐwxÉ”Lû³)K­8(3G$pÓ_¹b¬ÿBecå˜°ï ²C¿‰Û_)æîO<¯í‹?;O%?>†ç9^\}X‚dzŸ‰A({J^vÄRðúåÓãÓî³7GOF Šê—i2@¥ê¢é{5Éæ×£?Ê£TõjÞ=„¯^PWwê*qØFS5Ø 76¤¾ž¡öäû—ºFþª»?pÝõ”J_¿šާÝg°Ÿ§ãóúvƒëˆMâƒu>?øÇÉÓƒ/U%ãA‚üÑOÌ"› gl“Od$J–Æ;qØkò¡à o´†¼ö99°e±J³MíTœ¤¹¥ W-·‹=á¥'?„Ë(ÒáMd »®òœ dÄöùMƒ/ ´eAö4î'ïe8F|sð  ¹B²ST%Ù“Sò‰PΛ%ý¢I†`žÐ ÅàWrºdRÃÝí‚í{î7ZaÅì+o8sdé^†³náÓ¦}“ñ––òbYÒ5æn/…±-«‘õ\Ë\Nÿn%묕ÅL­;=ÿ!™tóè öÑ0:ç+(žþ6b4œÿ@w1p>˜œålTbÏpCŸ§|§é ‚èx?Rì‚çðˆJ{\E”RÛâ騥+"—ŸZðRÀâøÕÁåBxFu¿Ùfë} ÃúZ;hæ…êj2µ)‘Vz)ŽÕ‘²Ù!)(4¡„Œ‹ÓqGý,Íóp„>iöqϗ妷 gw†µa{Pk6N>¶’Gxäd<ÛÑ»U¡9†²VÁeÞGéTnÆqjC¦)ZÜOä¢66@` 4Ÿ8å,¨5ºz•ŠšksÁDpN1¡ì´¨>Ñ~vȧ+';Shr;Ó‹‹Bt¯ÐÍŠZ™ÓS;UAy‚€“Zjb%©öhŠfÉ„ã’qaÊä—“Qi/=mr"·¥®fjEÎTœ«ËõúáC¯Î´JdˆtEnÁQDD+ܳG\éˆqck”3!²þ"iûµ³}MVÅíbCö©Tæ´WqÝîÕ"ADO0“²ð 2¥Ûç&ÏUžÊïÙâ¦3Š*/Wæªv·¹ïNjJg4äO¢ÜG$‹ ž@õ;pÓZ»z]¢$cÔ AR‰¼ÿHß8¾ò]93°£Xn6Ör8ú¥ÌP÷Êl2É€ñ‹’-¯ ÍhÓqK∑”Jl2²´(|…ßK"Oq Ž¬n¥<¤²<Öd1k×UbJCÙ¥‹\ñ¶z‚ÞâvG–Ù0ï×ÂÖQíæ[J?åGËM?A£t–"ó‚’?:”Ó^ãö—T¨îEþYX‚oqE+zjeM‡5ò> ^^q<‚6ïeÔ‰b}$0(•ú µ”2Ž7 ¤“n¸é°’Psp) =‰ºªOõíѨkqÈžMq Òöév ¼¶‹Sîau¹›m«'l®*nN-)‰­?Æ1N©05Å´*±"1ªœd~A\{ô«ª÷ö¶¬,Ý¿T¥0ŒÏÁ§ •º•'>ÎÄy\ÅÈÜñ¸ Îè³’›‘‡.xõÄÁýe?¤(FmÁ2…4VÞ[ïÞ•êJÔ1KŠÓ»Hú¼ËDôk¼¿gãS@&%ƒB>ý6v’>L¼Úð &ûÕg˜¨]©Ç„6¨ŽDd“h/†RAbËáTIÔ ò°Ö ˆj:óüP6àÅíG@yâ–Ö HZ4Ž&áº×Y°æò1ÃølJ)é³ÂŰøeÍEi4ªBqÞžVް Vš(ƒæìklÏh"-®i"Ó‡—§ì8Ùy¥X“’Êâ Ÿx4þ©*?ƒŸ­Œ³’­ ™ÆRÉZ´à¥©ØÝ{ ÙýbT#¡ ¹c ¯<5+ž!È„vd̺Jõ ê¤Ü.7ñ‘ìùqÖ¹£—ù6¸5·üBýÕ + çHgõˆ¢,ë‰&´ ^lSª}{HïÞcÜ1 «îëËÐ2žÅúæÄ›;‹©Â4LH{œ#F”.Ô“6Èßr³6”Ú-Äž \ìÃü¥Ø¥½~} ]ÂMÒ\Tþ ‚Pd ªÞ@(…ìS¸c]¨owG.Öb¬½ö”·~•ÑPååI1žÚ§gÀNždR ×ÊÓ |9>Š)ÀöOPõ7ÅwQ¢5Éú0 /UG¥\S^íŸ|GŽ Y¬òcV“-EK­Ì°,_…{/ü*®ˆÎ½{uçq#¿»5¾Œ[ã¶®‡ŽŠv¹¹Vë³ UºVúBómH˜HÀ>máÕ>ôª“Ú®Èn¥ÃX\J?ÓYþÕ¹(|²sánÂϤŸÑÃÉFËDnW±aé\”=½‹W‰ˆ?L•>EÅ@8ñÊ+—­îj·‘Ù}ÎþŸÒ¦«—²uØJ.{ ™¯»/ å oû(GÌî)”å>;§i‹ò ‰ÐI.ù˶u`†ºúÛw w w w ·Áð+uÍ®ö‡Fòùɯ¸¿{!^í¬¥£Q6L`5½m;¯´àÅþÑ᳃ã“âßm1ØÀ#†UQ©·¯(õÀišãË y"5ð|×%XUV¯²=íšýezI{j¶Ÿ ÚñÜ©áȶEœ8m´÷Þ [b~$ŒQy»ÎÄ|Í>Ù-#Ý×.£a­ÉߨëáøѸÀÙnߦ;ÀÚÜämí^Î2×uœ^©¤CˆÚûÝuàÀ®lÛ³ÎfÉPw}[^¤0ûäÜáX¡FÆéþ6ÉÌ@°gý4gà/ÆbÁ<-S§oÅÿÄb¥sBøÂÝÑá5æÀct|²K5…ˆó'«îx›¢–U|}×´íà‹ŒÿW滕3À-ŽÿÇß· ùßv¶6ÞÅÿŽâJ‡\i¯P¬xÉ&þ•ž”žœ¿áDKME~ï`@~mø¨–Ë×Hÿ¸ ÿùÑÃÍÇEüç‡;»wçÿ.ÿã]þÇ_uþÇ@I9Ic~j”&rF¾DDx*_Ä„/>>}@E1ôòWïfÀÆà³¦Â6”Ë 6*«.µ,fÏÿäª Í Ø«›Ç ´þ ’³Ðy‰ÉóÞIÚ0ûÅ×{ÀÞQQü¯šá¯Ì½-“°˜`%›¥4‡;;€ªCõD×ÙJ[[ n Çõ5P˜ç{?ç à:Êàh°Õ@éìlëFV‘U¿ÇcJ3£Òx /ê:á„_?öìë_¤MÑ}1ìM„~x³qüqBjèÆÄ¡-RL £„Q u!ÙÃ#Ǽʎx蛎†]ŒôSIÈéudá;¨(·ŒƒÔébL%î5§dëcl¡UCüMS\“<à$ÓØ Rå¼|óâ)SÐà˳höa4»ûG;]cC~@¦­ö“û¡F»4_{—¨;GcÆ„ˆºÈuÝò¥«•žj¦$¶n™ˆ¼y[s@&Ù+âõ¯Ð‚ÿe°ìƒ^zUóW…³áô³†¯aRŠÓfƒÈvüÝ1S§«p׺šÒf©bímX‚®mÖž.Ú&î ¹æ7»1†3·æg@¹nc@À&óå_åÖ7½h|¾ÊÉ îj.;î*Ó¡ghõŒH#Z¥ê¶[p½õîø~ã/oÛ§ÐÑ:݈¿¨˜×!Û9»˜ uI+o[ƒSxd¼ÿ×tµÑzû?­Óû-KÖó?ZËÚÉ(»è;øaM zé#&¶üµäÛ·Që‡ýÖ¿N;XÁYšâ?èaƒÕh˜VO-_…Oõ`BJŸËIØQE®òöâéyײÁb©ÁAáI©‚S7‹šUb…ó-I¨î÷MO%zmaÞEI¯€î]~IY!}¥OªNÚªíi¥}ðõâ¼çv6|÷³.™j üí»ãðô>߫®¡Ü>ôöÄùŽ¿XÚÖb<ówƒûüÔjñwØüÃæƒ¥mWB¤c³’ºÝ×våwÔlå‡7¢#ÌøŽ”\ƒ”T|¬<îºë½xÃlÔᮨ£m°ñ‚à;£ñ—ÿ%ü˜ÆÆÂûÑz7€ÿÝ 5™7äW‡Øðn¶wï¶þÞþûJw¿63¾·ï¿{§Œ'tèÃÚª5@&s¾ErÙÇæcèÀ’¹0eÓÞ¿ÃŽÆkz~ô·Ý­ow+ dÈšJW3^/͈<ƒþ(úŸ³$˧.vÅÒž Û’–V®_´U)™Æ£ ôñ äýh4x´ÛÚ†¿Þî>|ôø_¿±#ÊcüÕ *aä ÖýÞ¨.Z:ï¾°·ˆŸØ4W 6Fhù$4ÅjTÅEc†×yÞ– ù.¿ßv¶OÃßí…õíf¸k¥1ý*|‰Žhèm¨å;åÛDg+CýÕv{·‰ÞjýØú¢ µ_YT¾&éZ|˜˜|ë8m $85î¬,§ÒÂGK_["Œ–iva޵ˆ[.uƒÉ¿>Q¿.a¿q¿¾ xã«¡Dÿ [ª‚­ \תŽ#}h…™þ½ª¤ÓZýí [ã/ 8IØIÕ­ZrÛßtÅÈy¤>›­‡­oNáW"öOÿ°½R Ö•o°êN® Æ.j‚«'~ïÆm\Kˆ¼AÒG—=l}ÃI¨L)@{M·‹¶dí÷ëGO¹nWL6Z E¨l2 $a{5£»ª5‚ߊýmߟUü6oü¶wvwîìÿŸã”¬ Ýn½6ùpÞÍb Ëkö î#z)&`õãº:*;¯žß˜ÿϵþµÏÿÖæã;ÿŸ»ó÷ó3œÿWûkëðèÙËÛkcÑùßÚÞÜEg?ñÿ{¼ûøáÁK wçÿsü¼ˆ§f”m‰o\‡rÿÁ)ï8POÖkØ$Áñl4вy'<&'vrˆEãèÜŠ€¥ètŠ|—Ž0Qá9T, ^Ì_RÌgòÀntï²Nø“Qø ït,Z1ðãÃNøïdôßä®òC Ž~râÛnoOcn©ß_Ë ¾µýnð§TBÿ‚ÐVÜé”_cÜÙ·:H,KI?%Á»¹öUÂ)ê+X4™ E“f‰Î1©Ø_V‚Éìr쑤ëea˜Â€ó<õ0"E×dÇ`Z³GSMÕ"¸¢’õ*É1BxßiRW7ŠæaÔŸÎú–ãͬ³ô<‹FP©Ä!úªêœä¥¡¡üqmÿL^¥Ù ÜP{`k"&;Å ÌŠª ? ä(0Û®R.£®$Èãš. Òñ¤b2œ3X_¥Á:S0¿ì*~wgèÖ–rþ$ÎA‚jOÙ!´/Å=%ôû,ÍÌ2³wOS W±‡fc8 ÀáÆ³sPDâ*מ¥¦_f¦D-×O‡C‰}„©q4ÐøMýýÞ3gä1‡Áá¹1E°£¬ÝI'µa;GO\>ë_,«Ëšû(7"ÿP}[‡Ã<£ð,އt,ÐsL5A±¯9EÈÈ5ƒt4mÂâMLåªk#ì@}¦U‚ ÷]ÃF1¨ڨ‡åYk®«§ªÃ÷OÔ&¸gž¦ýGqUø lö8÷£ë€ ÷þŽŸÍ†’ˆ —“ÚØU6Ct‡žV æ*îåècó'¡ÂWWWF¿døÏÞ½¶À,,bY.x °{” ©„µyèæ{ׯôOK®SºhkvNÜèêpJÞ€Œgˆ¶$ùUœdvvf!\GÎe)ùL:Œ[æEQ–¡DYRÚ[µlrb‘úKƯãIÆ©s5áAÇ"CWq[+ŒÂ ‚;ٚݡqa)ºo8åQnÑ$hW¸¥^µ}S_ ý[2ÿ¥òî"೚쎫²Òs¶&\Ê0͆v«ÄML3jUÇ™½T M]qByâl÷] •Må~²k[>GM¡N¹ÎQ$õ™ønù±‡@îÁöH[ì,&'Ê)ÊFN\ä²9ãÛ ê³Ùx Í­xBÌžƒbU‡g†©6Žšü¦åìsÜ‘|†ìZôqZÜoäËvÈP‚ÛtÅ®;yzïVêx~섺,V*y wÈ ×÷Ô:WUu^W*&gÓwU`q ;ašf¤¾b÷¬O<³+,wíåÓo1BQÄ Î‘3½…Aèä=’›Ë7ž–nÞ“þu5·þ]œ1S…¼À•Éî—²Ÿ²‚É;“)‡IÜ#itM˜Ž ¶.â\e c†‡÷²—¶ž`öÁÙT¸õãØOé3”gcáE#ñàuÉdÜ`‡ì N$?¹¦7ÿLg5DÊø+ M mF5 q±QlŒ9¥À¡vÀú'—˜Ÿz@fcF–PäY]EÑ¥€ïÁÔ*ŒaÇÉd64rÌ™^‚L¡4 £„¸™Ar6ÇjæˆÎúJƒ{Hoè¤ —CUP¬.‡í2Ý4B]Œ7£’, fÀ‚‹ÂsÆè$rÃèOuu‡Ã'$†EP u51~Åh>q‡j¨^Ú.ÀŒ(ßSžúC+e¨áÙà·J´Pè* }¿¸Sýtõõl\È+‘Xiµø>gT=íx=Ïzå9æ”(AKcM©­…ÛT¿T4›š¢ìœ8fÚùœ;eʉÞðÎ*Α³’A“b®ì–Z!7)n*õ«j¸ie²1³Žó€UÛC¸Sn'|‰RÝ,Ó{ÛΗãY3£¤5Ž,’¢hð¹J¶VZ°C³ÂZÝ€êÀ×…Öá"6CÜÀq¥yìÉÎ×… ÓÝ#ð$“1õôåû4ûÀ‚µ°jÕKSE¬¿’J4#ÈÒ-ìoRZ¡/HXÃ93|s!Jìf„c. ¼z<­v¨Of¹íq"DI«L?/ªšL‹÷N¸¡Å„é²¶ VºÝ­Qp“Ĺp¾>$”%ȧjª¨YÔÚ£ŽºÕŠ’=OÓAQƒÁ¤€ÁÝ7Ÿ'º·ùå˜"ê+¿{€Œç‡~“níCBsÖSù>”BMÅÜI;²”‚j§Ôæ¦ÈÕVϳ¾Cl(³S9k%|€ Y’/v n€‰ÚºNa¥¾ªðÅznXõ2ÕæYðíø"úŸ٣ȫ…Jbd+N»—e’ ˜ÇfiÂï£üS¼9„ž‰Š mX‰«Ê’Ž%yhDBIó¬°ºÔ[õ7ÃCsþ4*YdŸ7•»Hœ×ƾmâ!,¨M;”D^Œ4 {@í68‹ï^ú±‰3€7YšN•‹Ðcô½Ä<–奄ßÒe³rR¯‚iÞ"äššËz©CJ-…„;=Xš£˜¹ä¡J¶­´cÞ…yòj_ù(Ãô"c|¦|JÈk¸Eæ:ó d(¸:%+F*ù´á½LÒY®3ø@ ÖÌ4Íž5‚N«%…ßÒìÙ—)ðŸÕ0ü½rKt¾Ñ5‡[í­öŽ_ýÞ^9µ%ª(›Fx"òf‹V I CT8Âl’ÜL±ÌñÇ]ej%=¶øÔÚ¡˜Þ±úfÜg¤.{jß»‚Ëû®–rèNЉ@KÍÒY¡ ýÿ{à^éu¨ýÍ8³“*ž1^ÌK%“ÜbËõÔ åK2L‚Kš~ÈIÈñíUYï³nÀȦ±N²CØí:á;»:…ïæa¢±<ÿÐ 1š ¡ñi®Â¢j:?5Þ½¹EÝéhUR¨<‡,Fabi¶)5—ûήMN–ñþf¹Rº#0ÆM 7ª/Šò¼@;QK±d5j¢þÁ¦’ªm…¼h/žîÚt8Àt³Y:MééˆáC»‚sÓKZ9°WD!ìí_eÊ€I¶ŒX«Fnm»Ã¸®MZ2ì..ŒÕÛk·°|j¿¢êri&«,ñÜÅÛìÍ4&S#ý)E¹½|M™ÄVŸ‘°í!)BÕǤ ÿN{>Öãÿ!cIÆjìÓ·³óðuŒVsÛ¦ýuõgéH!½‚êþãT޼ ÉýLÊ Ïa`|ÿŠi˜‡»£üþ5Máò×…ZyrþßF«²š‘é¿7;Ï‘Ïâ.žG³qÿbñ¨O² \Ù/Ç`8U¬=ާ×qk÷7l2JéVûá*s[5Ãx¯@‚Æë°txOÀ‰›õcC^iO^Dc¿=ê°œCßEàá\p*gѰNV]ãÂÚZjɵ׸¼¤®™õs--ðÙ™3b)>ÄñD´¦ìïAüê=š!ò 5†ò90¿8·K+E5)ÐØêRÄõÎ;ˆí<í ¼ ýmÄ£8;Çý¹ ÓŒ¶N4CÒ}œt›?@?{gz¦‹8°ÕÞ} xàðôU² w5ùÁÀ²’.Š#‰ ga·½ëT(—’DÐÓp6N¦*AœŒ³p6Ò˪¶ø#^ rý"³ZÜ-ro™XºOI¢D¾ÌÇH™\—e)‡-n9l J– —DJûх’Nä ׳ÜPy)Ø´5Øy‚M3­"0ÿñ ʆ#ßP½Ú°¶•`N×ÕÙ J¯ öáé™H1mìqœt-õ½Å¾ÂdB™Ò5èn®î#<æ„”ÛB‹q£Bψƒ‘Ã!+±§> K*ÆeH!nm/6§¦¤@¢›}é%çç„o%₞;Ùƒâl†¤ ­tJ í´ g$õh›…BÐ5•UbpÕ½´{« ¸¼uNf½‚àÆ½¯HXmè?û¾¥ÜæéYTž(â ÐËäø’¦‚f€f«ê†Ä£ 7Ëó¤=VѰÃᜪs%1ÍrÛ] |ÿ¾'(™ÆÂÎq¸´ðJïß³B‰1¥ Î%IfXªÓ@óæÚÂöTõN ÖqÌ‹÷^||Ž-<·ÅÂQxßÕç¾ãF9ן=ÐÿÆ{:zCD¹Ö~ 2©È¢Z4 AUÞžÿ›Vͽ¯°ä—TM`º=Ø ö´¼xTTßoVQI'o0âsVP nÔGÿL{–™xò Ó dïéTòÐEºí¢.·¡ÑM9{!:âµÀ–cÏ4ì«èaÄü’«´'Åq°W0Jß7Ú¾Jm‡S²h¢§Œåüa»’˜¬pú’†ItgoÉp–‹';™ Ùr”š \œ+˰J¬»e‚FsNÓfs,g*<- \Á~#bN-nÄ”*Í o£.U¯Ê2ûy’s¿'bx”óBŠÉ$c8÷ âìÙo/i?2û6(:ªû-Ö(úo:|˜5Üitnb¶Ôm[OT3yŠ®Û93ÛèSNÓl£Çˆ?¥ˆcº‰w<° #\¸Ÿ!.pÁjðs2Î Ï|ª”á—ÑIØ aze;^åÆ_Éšä§òuPpt»ä“Dï¹G‰¨Q-ãWZŠp°«N?ÒR‡! »à%ç*C=5or¹-vÞì¯*·/ËÊ,o>;)PîMÛé}ÀèVÄ,Âm ÒjvܰR»% ¸âJ1™o4 Räû÷_ýî÷?ê\õŽ=ÕOpÒ ~,õ`–g”kH¥ÅcøÂ‹gמÚâ|†u$ü¬EÏÐë%›Åtø­°Ç—÷7$¶ÉCWi¬G:ËÕK¦bQg¥2é–Ñ k*éôÖÀ†³A¤!€ZˆuUywØ·Sd’aV¸]êÍc˜èMä&ÄßÛ±ÈÇQª's5_‰o L‚Ó˜°#÷i³‰ð-¿vQ/>Wžwêf2^Rãd2‰§,Ô‹ÕX™,#`ŒGÂn ‹gŠÌ9º"Û7JW‘QTùcuæ»pAðH}àäIºÁ„ 2É^q2 «ü@œ“c° [Ç®¿È|ù·œíÜ"ŸžŸœ’Äi 詹ÀïÃ9‡û®¾Ù„:eZ>sîþ—êÆÌi?5¼u'Sp uÈZÌj%ÏúÏeú5• 'äI¯_©C8Ú*L˜WQbÙÎW ´ìÃJm›Ó¿P£}êt"Ó«4<›åHꢨ:Äbj$^:‹«v!oÛ€ñö8 Nctv\ÓÊÙD•¹¡KjÛ;Ưü6þ8Í"Ë÷êp$톼Wó ¨YT¬¢~ mE1:ÈÞJÔ9’^yhÇ$¸g1”ïårB®ã?6P_#•†ÃHJ¦sNã=G µav?ÔTóÉOÌ¥>ü~°¾ºeÝATͪf¯(‡Y8G@*N"k÷ZEû²&OºÖKj78T¿ªç„ ‰0_™QÛ“=±NP=2>Žðj퉖ŎÙá€{›aÿRvÿ2†Õ<ˆQUP¦_€Úþi<&·råeÉÄR¦¤úÝüÈËÏ’°Ÿ¡.R/\ËC…Z›îÄA»Ýn”¿ª¢ØÌ•€Hd’¥¨¤ÃÅb=+¡ŠÆÖ3FNKè”'¶€’ž¨,>”Ò‹v×ëVWÐäNãFl¬0EÊ©óº3dnÏU©˜á ÷·åùÄQ5r6!Ô«iQ§3y{ôŠ-úÒû¡î^;š «¾RöÙC½ú8òvÀ²’NŠ'ÒXÎÑâ°\Q¦ärÜ †!umUr¯Ž÷ï›S’{aÀ{C³\šŽ¯€È]Åè®»ÎS+Økxz@Ö³`ÔHèˆ[Ò…­Ù'³%J®k8.Ë(ïÖlGãÊ õ¯ëTòOÝÝø2Û³hƒÒ*Þ›cè ÐvNŒEšœa|NL»ËeJò Êê¨BŠBã¦9ÚúÑÆ£lÜ ÙG6©ö¤åôlm‡"#nC¡³Áêäô®²¼Ðí§é]­6Ëï£áU4Ïݸ5mjE¤mä‰l4˃ÆD7."áמ‘\&¢ÑØ:ŽåÂæ£Å:_S®¡‡cÔ"­qZ°Ó*»#[9TÈÜ€t Ñ㊂†]“Wc/Š÷±¯Š–>15áŽÓ6³ÈvBÄ“¢p5ÕJ ¿È@Ä*b „ªhãÔsžÅb³[ Y`$·ýÔfwØ•D”¾Q>ÕªAÑzî+nAkYßÃ>86mû±<*¨ÄÄëá·dÉ:‹2×Ù%³,¾À|Ô—q¥ÝóÛBЬŽÄÂC¥#œÛ œhp G‰w€íW=s«ƒÉ€Eü·ˆ\»²ê=Z£ CkNaÏ .Õªse—1wdæðjîÞŒ)òûý{knËé¹êfÍU è4e+{PJOS$Æîˆñ6t‹=Û×CºîñtFHèJˆ¢q]KmQîD˕㳶‚×R SõM­ åæ#*<ž‘‡¡Üô¹LÍUÖm{ñj7è­ÏÅþT’¸O9Mv”ý‘ íì÷M^õ -"0“eàc|«²³!œrùfGcV5µ‚5ßʲ.ÖXG߉šôÙpÚš¦ìÁÇ B[yøq4ăLéü;®í÷ˆ×û›h±ö/*¬ØP:ŒÐíŠX m5GÜ­™_!’Ê4ž°±”ÿ¶·€{¼Ê†¬êqù¡¤–xsWÃlxâoî°Ùî°Ùn›d«jà\™"%.Åø¸øi%VÑ„æâ¤žO´¯-_ÑÄ•âèbô°r6 á2Õ¬z?ô„t¼Ž¯*&S"ŽõWèáAgÙÖ@+Ÿ ‡Ë Jš4÷ŠŠ†VèjTíôê0ÐE|'\—*˜u ¼F/V~AX³ rÑ {²;¦ç\9…Ø ¦†ÕöE "S‰ÌV^ Pt-”Fª³J CïDîðDF¶­r4" N¦,Ãè›fÍ\:¢fþÙëߊø®+¯[e÷²ã”)&%š:qÿ8EΑOQó ei¯š`¼¬to ï‚v–.Eòs‚ºŠ¦•±Eã¡0«`‹…]!ƒH—ÂCTñV8zÐyfñ&rr²—NJq[· 0Qa|ˆ»çc&¤ì¯v^qžáT F†MuHÓ";Fä¢ mr†Ñ“A2w6 ‡\zþ3Kú†bSú3“°``"/<âÉE\ &óQ3ªuÊh–H(´ ßàZ)”ܲ˪|íx‰[,q®½r¾××Z…Ð8gjéY”—â èû”)|éc E?9.öMž¨£êU/ÝXÐ ts‰ô~¥šÚ“yÁWªÝn‡¥gâ:LÏωD¥ GÙùR åKß³­ù6ÚYZXùà)š:rCM D OGSù4:Þ ´ .¸Ó85T~o¹UL/–8ÈØßù|î9ok¶ÿÌšº·îçÊòŸ˜$'Çù ~ÇÙ«Zo,Xf2hë´&>‡_ã¬nœ…Úq–¥Ùòù¬=‰ÆØ19#÷à`ßËÙënj_ˆíÚò¹(N^¸t| «Ì" ´=€ßÀÍu@ãª2¼\TPScÙþ¯<r“ÐÄ-˜m 8p¢Ô *¿Y¸:¨YY¼8µ'Eª;‡•©™IEKÍ*þµ`*µ•«,&™T¯Áe³ÇîâË'/Ï}ÑC¸Ÿ0‘«{„\E¼ÝǶ<|ûÕÙ ½º»\¬+ÏO»^8/âÝ8p*ÍšÞã‚^à–/yq²óóåVƒ$À5x,:5ˆòý„¶ü³ølh³Q$¡’šù8V<15—Ïf2D‚Ê¥ÈÆ )ƒ=˘%p’À“¼ ¡[mÍŸ–ŠŽDy%è+UÓ#€‰;#'[Šó /2Kž1…høÔê1É™åßXè å='g¹Â•£¤.ê?Ùü ‡>;<ãΰ¦Ü?©MqUtãZÊŠSâød­ŒúÈYM Ýt"Iéº6â<>:OP¼ “Ø9U&Þ©÷n ŸF\!ä}—V~—KñFŒ…,(Ô´¸=aÌ•„8šÈþFIÜÒðGW¾]áwÿÊ–"ü0v‚]UkYëm3jòØ £Ì–UØÀa]´Q®XÇs¨œHÆp;F¶â ÅdÜÖ%QÞàpÝ|ÖË`yL z95'1%¹#q·<Í5 ŽÛ­Ÿ›.à«j²å¿©*ÛqßÓrJo®#; Ec?A×Á3O}(\¹*,çÄú¾Yxâ ƒ]JÎd¬èÙØÉ„l¦ÌÝêO‰+i¢©®Ì¦byJ(ù4$×YZ^íæhºÑO ‰BÍ¡ Žv‹ü;í±pZžwäÏ”»€Eö”ÑÎVÏR ôgc¢Ï´¨“ˆGi`Ìö\òZ̰ %A³<®ºô¤ïJ¹¹£ ŽÃ8­TÚcÛßHº6&Èddâê Ñ%öCu|Ñ·„R*(_‡h*yë-Zpx¦C"˜2ÝP«&ºxL£| Â[¼~y8Äòðò“6cM&Êשxë±^‚œnT1ØjÈgjâ¤tÛiΧáÏ]P&/Á®²ÅêFÐô1&÷ÄÒS§Tøä¬…6ƒïàEÂ'qÙòqHZé µÈ:Bÿà*–ØCÕÍ´xOiO ­î˜‘S€ÃìÊÜ0;0˼»Î×wàósöéF6ÈRaçÍõ%Š5‘ B„ãyÆ[÷^´ÐE%¾b^ª¹[Y,,*i ’£4kìBÅ£ôÒ¨ÍõFdô~!7—¹/ôê¡Óa_s 4 ¼Ç2ÖldÄ_c*–ž€Ðћܓm‡#Ñ5!åÎ%gŠcu-ÖÔõ=Ù›:—.•ìý©våÜ~m©Æ@ºæØNSæ}¹²$Íëë®Pšõ'€¼ˆ]™a±DàñIqnUÎìJ6U«>v«SîêM N‹c¬G.+B §ÁµàªÝ¾²'-)ÅEÖ2±•˜·¬ú€î÷r!ž Þ—%Î;&‚3ä—½=œ»Ê0®Ú5ޝ|® ¤h x®9ÄŽ9!}8ä0,“ó¾uг̻hˆ±j»Á<åSdÅAÌ(r6RXúð¶1óÌFN­W¦ù".éÛ¯tGºøÉi—/"¼käÐèsr%7¿ªKdmûÞ#ÎÚ`sñPýH죓0ˆLо9°È¶Ž©¸L"v-Ù n¤„U•­¨„e¸8ãç£öñÉR5•òkqèRß 7¤g~åM\WRî…?Ö,eW­¾e•2&ü"-/ µÓŸü•5VÔ±¼DWkì¹ò~ÒìÚØ^Évx`'F•5³o1æŒI;YgìC&¦¤E&Éu[ò1^%ƒ¢åÕ¸18^‰¦"s±¦]ÙÈ 5“-©¢`áRJÔÊIòj’Êè6γ¶Mhß»(—»ßñ1@ò"5ðW^Â`ŽT¾>Øúâ =ý8ÕV‡Ì@>D—iBl.*bÉkò*ÊÆ¶Í‡6©Æ±F?81-ÊPƒ–#ô•¾Ùq2]‡¿6Bß~:)~H–'¡—¶Ã&Ь×ècU^í$WX›ñ•Iꔬ³ª„Ê‚âz;,=úoUÉrkBK™¶h·zš$•òhËzv¶âÙe ß9^'°õÎ%~›1sÈ=²‡a `+ª&ÂõîžÆ*9‘å «ù^}=‘8žý®6Óµ÷«†Iæ»[lÈx¡ëøç–¾,¼rQd†ÂÈoZ>о¨,½-–T¼–2k9)c=‹Nî|¦”>"B:TB´IŒ¬ÂtŠä9åëm[U—åd³ç0÷‹*,¹ ¤º@]Ÿ„§î*å¡•Ä™²âÙQ¼ƒ¦ÈÇV¼´Ùl;œ·”¿ ÿbX'ª¨½zðyåtSÊñÕÁ—€HÌ RËtƒ%*…=KÜ^ĨrŒ¬7ñG{U©?üEô8DãÕ 2c;+.'b¶-?[$â¼(ÊH0¤$ªkÌFuY²TP• ¥è•òÕ¢®VÐÌMx‹òîr˜Â‚›…í¶¦RJMíX…ÛÆQ}ɰ, ‹Ø”¥#õc_=éä”íËTÑRíî¸}$µ ³ñX»]S›ëðj†ÜuÂZ¦yéMš·û'‡F©¤&dz¹ßŸØºHkåº&ee'{Q]7é^¶BB:aí7¡Áø(&Á)/4dµ”-ìÞêi'ô˜7+w-9âZªO%‚Íw”=D½”:â¾å­° yÿÌžç>Æ ä.+oóñÎ#Ö,4ʯšFëN±7ë –x >æWÖ]²¶× Û•Õüõ']™Ê®dÏ,mM{ªÕ>Eöº†­víàÏŸ¾}¾ô·ç‡G^…:wõ»¦«]_ÿ|Kk}­!Ëèké«ÿøò»¿?Š?>}ójþàñÙ¿÷£þûÜŠí´ÈÿJ-.¹M,þ~j=ËcµgVì¤õv×·íßF_+¶G}Ä€êNH`vooÄEºåCòºb’뢼ÇfÈ_WÙ1ß“¢ÒN wÀ§èNØY(ì`M+Ê:ŸòžÒzÛê›ê¦W™3Ò_àíSZ©õ/Êg¢«XÉoÖÄÅx\ŸÅ1ƒŽ–›.‡¯m·XåöÉF~ßQÞgbŠ~¶ÍF®réæÞk¹?þ•#´²;³fëjêß{7MšB ܤ—ÿ?{ÿÖÝÆ‘­‰¢ëõðWd³Vm6‘ÔÍf™î-Ûr•V˒ڒ˽–—™d–$* Eרç錱_Ï/Øo½jŸ1ÎÓ~ë·ö?9¿äļËŒL$u³¥^]–ÈÌȸ̘÷ùM hT-йÂÄðW³qyuFg/A±”¡²ÍOé+ÆÃÖG: HoìptI•º§¶ ÿµk}àr Ù¤¸ç÷2®¬³p½xR…1#ïéFM^Í'Xø´SÐEáo+*CÐû´Énpåi…k¨-°ï Ü •›]õg¸ 4^?”qaí'æ×£Ÿ5!ØÞg÷£Ôbmêõë×·6]c†â|˜&(§²ò2Ÿ÷ß®ÐS<ÄËùUëëã?bé¿ÞHØ -ís<¬üæÙ9–xâ,F`€:г/¡þ$^½ÃD§·2 ˜þÉp’I /l㈖% ‡Ë²JFË’L%ä-@§(|߆ Rd˜Bв‘³5!;1É+´re9$1ÚÇcåd÷£—§]›cpc~,nxSÅ ãýÎO’DC.n,Bð±*rÀ”òÚo×GÑ*2øöLõªË©A6tcÏ”ý¾‘cùbgEÝóEϵº#iÏ?_»H‚ÒÝWV#ÜDuÄMWH¼*‰kVJÔ¶ÐÔöãª×ñt’‘޲û&J0ª† !ÛµH>`†Ö¬.ݸÑò«•pTﮆƒrÔJD&°2ÓÁnáôºÜ0ÅK¤CÓΰ·…§îq Þ!ÒÚŒDRQùÑïÒ¢ÁŽ‹¢æ0¸²ãäQ1P½ÃäÓ‡?zöüÑó÷Û@Åú‡µBÙm˜O“ó˜!±¡£R'!ß´ä<`¶³mOÕu Ióãb™0†Ïü¹¾k#ÙÁšxó8˜‡Ä5öiµ¨o‡”øô9ëÛ?™ÿΊdÿþËä[…®wì”!ø☢\æí{EfóâJˈ<ò»Ò^É$Ctö,ŸZL¨~¼cF Äß‹r™5¹Çùkêr9/>ò–Õ¼¥z_™ËÍrŽß3¸V·.ž3†&ã4ŸØN•@JÒoXu¯„^ë]yr‡W=ˆøËÍþXòÿÑ*þh´Š»V±XR­÷bQ^¶SÕ†¶õ Ù×7nco`gû·iP”iõVbþD-™ eÆã­¥±6‡~? OtÉ•†uñ·ó%´ÌÂ$ÈÃ_ÞþÑâ“J~ÜÞ7„÷cš/|D¬E´³Q^ãLË~òÝ% ¶-1ðxat¿6ÿ y¼œ-òÉÎ6Ðû)ôN/Rh³Üî&Ûÿ ÿó{øŸmZêöî6ƒƒÍ€x©–!MN @˜e©&g˜¶ØKŸéœ](¾â.édnŽf9ÍJÕÕͪ›œ]Ρ­7ÍÅi^Œbû,kôøÉmiá_ Qô•Yœ¸“”‡<‘†z] õɵ]ií» çK…é§ÔшÞ3 ¢öë¸üAf]}ØÖ)E'Èð\_e—=¼èF€æåzcšˆV\—Wb•ÓÜ‚xа;Â8Ç×O¿{öˇßÿùÁãv“o~ûà‡Ç/øŸN‚<ýî»O¾9Ý“~·»i Ó§Ææ—Œ¹ãcê}òé“§ß÷àñ£{xü㟽xøüÙƒ¯®©7 ü9ªýY ®„¥CñZõ£Ë译2•Ñ<åÓª/z«ýá ÏZïÌhÖæåO6~qn®^6êq³O3Ä"Ô&Ö¥¹ƒùfS2JŠë"vT3„Ú_fÅ6®×ŸºÐX ­ê õ„ë£WÌÌΡ6ŠǼ_‡³ÙpÙëlˆ‡CýàòŸ³†X.˜ó¨f´¿i„Ñ5ª£ Þby’Ô†³f7 QçÐ0Óló[fËÈÚ*îÛǘ§=Òj6|ktw”*¼Â~åÕ!ʪ=f5ô$¢¢Çã%0ÎÁ’ø÷0úµoûç¿Ù¸¨T¿Œ&þø=Ž7ƒÞÿû-3ï÷ÔíGi’Ë„Ú32g=¶}‡:È'Þ pÒBÃ>ôP&ŠpHúÍeÅ‚4~ÿ…}ÙPQ JÝÞœ^†Ia·útB=°´C³K=䨟!L)¸ç\V¯%Ú"èfèˆî7Ä©­{=È®A]Ç'«ËQ( ¶–þ¯O/µQ?ÒKÔA„ê÷‰ïRÛóµkÅ÷$~§ð3=b\JáB “›ˆùݹ™I{M—ÀUÎBÓx@ñÈÄ”¢.`-KC12“¤cÑ¥dÖbÜ…\¢/«Fqsäõ6vã®Ý .e9Eôú^è)ìaÉê¶ ÄVdy%‘À!&·PB³[r¼Š=Ü{­ZÂî` >x/è—ö•ÎvñèW¯‹¸Þ­’–@ñ ã~ý>“?ã±[ UE_v³À©ý–›Óµ­Œ,ö7‡ ~C«ÃÓe5»NÚç'!wì ŠÉøB®¤¥;˜¶íŠÌåyS¨+ŸÅÕ"ÒÚ0mž=éAA aª X<Æ0ê½FÁF_ –ˆ:šÝkî¡ðš°x)ܘH¢ßxDÅtš3“Ò­é°áet‰Ü"ÔWgW—Úøúýä†ØØ Z r€D6Ùœ´#($dzÿæq _“œàÔ×è£ÚùÖÕÎ6ÓÜôcö¥˜õï¿yðá:ï^Áöß½Þ#[§¦õDÂ)záÓÙD3×r”•+׌ó;=;¤‘aEŒ«D®Ü•|Héª#Ü®®Y£ÖV¢JÙ– ¯RtÍ?¡•ŽëKôÆÅ榣íX|cìsÇ­Y(»“ “4B›E"0XB˲ü;Ǫ1õÄ+yÿåG³ü­°;/qÒ×xàõU¢.1‡™åÐ’e ¡]F­;Æêœ÷wØ2#~GW¶‘p&/uuµÝ#>!æ÷‰2±9QÍêôOŒ²^êoxæÃs# ÈË,ÐÓVý¹¤ãEV†G"¾v¦4Ñ›6&d«¿%¶úkp-üú™æCÈi)ìô*Ù™¦Ã²P¡ª´âV7ÝQéÒž„ØE}“êGØZ’;d;È|ßÎË%Ÿœ|qtrÂf½-Û¯(Ò¨uZUï%Ï‹yîÔb|ZÃÇ[ Ѫ¦³¿rðO3œªu›§ƒ|’/.µûÂŽ¥ ÆQW:s*7aÕ¶Î\f:œÈ1½Äö»õáà´Žanûõ1¿h² ðyr88I²_OG)&E‰Sm0‘ƒ 'rNä q"ƒH"E}&|VÑi¸íŠ¥W¸5|øÞ‘BM¼eþïi×k{NÄ£ÄÒ~í×ú×õÖk‘]\O”IoKkÏ[´«Ž–JíY*®9„0‡¹P˜S@!ÃF_µ#m…9Šy7­_7"_FÜKvä0^Âì¨ÍÜ8ì8Æ2'7®Ä>±°D )À-ãÄЧø´²½ò‰ h\î—pýÁ{Yù½É¡ ßÏ‘¸ ¿7ƒÜ ¯NÿÝÇÿ=Àÿ½½š¡à+Í,»ö Þæ£$5¤x°ÞøW°Ÿ ’Áídpg½¯Ü¾ÊW†ûÉpõ*â,Ðif—Òä`½qÛéªî·U°R’jM jtr^›ªl äþÍЩ@]B¯+·ëw|Wak‚ü„UtÇ󩡯ÛIzgOYÓö¥ƒÆ/õ˜ÒÖYÔdчˆÕÄë´Šé;Ÿ o'Ã;Éðîš!ú­óÓÙ4†ñƒ»ó²™®Ïö“³«Òõס`„ž®C‹7M‹>•¿%мûöÈÑ0¾+žÞ‹*u Ë‹=m’4žØ¶ú-!³j%ÅhA=©êåÔ4ªzU"-€%¹™,|¦N#³G‘Æ‹MT„ó×Oj…$k³§“ÆÓšIÅSù©CKê¼Ää3J1'çäÌuÖääëäß[²¤ ÂÑXP\¶¶Ó:»/7T¯WíÉÌômȤ.›ÒkaRÛq4+/ßöo9e©¬s˜ü,Ý”ÓâÐüµóòïññÖMÓ}ÎDÝK€À”¨u_ͽ)Ãv¡I×ÛŽÖï/ït›v˜_ßðCn¶ê¢F›4åî2py¢6÷Ýþç|~\¥ã,OÒSªðÏ0.\bÀ6-‡g`Û@*Ž¡Ð*¤ú?Ks¯ÿÏûõEZBùý¡ø 'ùà·ã–ù{K ðÈÔaÌó· <64_‚W–DœÜƒ}xß Tp´=á‹ñƒ˜ßÄõàQêJÂü©ƒæË­j™æ?wñqgrÀ?Ñ6¸5Å ~„jUçåÕˆ¼¾@ÒÒª3p,^œåÃ3(ż­j¹Ç`³¹ãï£v…(Þd¯’ª@?¼³åŠýŽîUÌ–­[ö¯=ŒYHFs¬?Ù¢¿¶?ÐT'yÅÙµn×A|»šLóõ¿ÚÛpOzïϞ܎ïIÝ‘5øÞÙ¼Á%߉/¹ncF¿×0g‡( ÕF‰mU^©q•>"µÝÝ«8Pnd¨ƒp(û—ï¤Ao¤bx­ü·X€¯>ù¢HðnæN‘»6‡ÓSe4¤‹Ý5½Kú¦=ë,TQ2A)”¯ËzYƒ» ªbÅÔ8˜ I›p±Ê >ya¶Ä™¦—5 í)%àoºÊ§£¶-OϰÿúÐ|ÒЇ—C–ÒXvJYÞý¨UéïgØô î‹òÖu–ØL¼þï<•ä °ªˆ×W³{oÎg~Í Ä›Î„¡ÍJëZÞ1Òo2°T°qšÌ?ÊsI %ÞˆØ|Séõc¬dvº™úéš{¸›l—¨{l_—Oƒ¶P6k 啵²›Lv¡Yî߀žP6ë åæz/yYB,ÆréGz3Vn\g8Éeñ~Ô͹ýþýŽY×nu«<¸Ö¾ßŒDÞLéÈíún[=E«Ä„Fo˜¨/eè~ý¦—1YY£ßõ¤Ú™àË#îŽa™~GÆ‹ÌÊöŒñ6ÚÅŒ¸.B®ž5câ ÅŒk_‘³Rfj¥ ¾oE ù oö‡­¢¬ž8bã@r?bKIì¡!,;Îs܇f(Ä)pØð jó|ª ¨ ®õA\˹­Åî:\¸/.²÷³¼UŽB[pZvÂL£cþ¢8¿ùé-µÏihßãŠÂýus·›ïòªriч\ƪe±âIH „t-¯TÇ)r£lœµõÚú8JÍKcŠ'@_`«PžCÄ漋¤•°GDKÃ<±+•kÆË‚û#f# Ä}a³MkãÚ­­uÒ«‡¨Ûéºíã¼{¹¹AoE2A¯AK´q3!ÚŒÌJ­þÓÓïÞê«l  ¯Ë ¤¤ƒ°¨fLù|S‹H½;øDp°=¸" ~áÒ$gçyY`¢®­­  Ñέ}}>I‡œ‹36 ž`®¼‚ÉQ6ɧ†×—ý¶|®Éèø¬@hÏà§Ì+HO§ùI1RàŸ CéšðK#Zú.¤AÕÄ´D{›0dbVÌ}óóû›§¿{9ÒÅ4û耹ŽãþUÐ_t:&2y¨—'Íé®Q7éýÐc_š|v™U•\\¬uŒäðJRÿ‡ÎÇ#¾Ž­õž ¯Šùް­ÎþõrPdÊb‚Œ”ªS OæÎ«À`Ye'EÇ"ÂaÓbµQ†ýx$dX)ŠƒÞ­ÉÔü ZTHi*\¦˜¥L‚>h(öÇß?yôäï:P§ëxÃ)­æ½ïu³-| š^SãKZ3qÖëvw˜ÏU–Ay˜›d‘©ÊˆªmôÇô'èŽihÔOéÁ¾bª‡¤kdyŸÐ«l|e9sˆ•æ7j9Y4[Fe“,Ž­øÜÜIèEQQHzyXà…ÕY†¾ïFXè, §£‘²tY“u:f^3ܳWtÛÌ.Ã;£%–òóJ(^ž<6KVêôˆ].—Ôé‘̲O¦Êö÷w¦P:Š¢S›`?Z‹Àch?eéÄ‚TY0³CÕÁe±d>uëu")¤´~ø8³Êöùµe«v¯LÂÙÑÀàýäR‡8*–$îªb;j¨ÆXÅ#à´SCzy%(§PB,ãvëŸÂM—*5™û£2|I!ø Ñ– ƒ¿0ÂmºàæY8GêJæ·¯ÃRö´¨‹ yX`SÙXˆÂ;w{ÀÃVÜÚÁøáëUå!±±§ËìÜÑÿ§éå Ã|~•+†ë iáWfÆöG\—EH6pÙRC:•»^¸f£–ÊyÑCð´GÊÜóÍ…:ænV;Ÿp\[©&.»øµX%«¼¶4Ȇ=AžÓk»‚¬ßÔåìvÈY_#ÝïtO- :/o¨GÇõZ´¯ü/t{Ìô´³Î±F¶DmÉêÝh¬¥d˜±Ê* ýM—ØêšX7üzÊìU·lè'ߥ¯40MI ?ú¨ IJ\ñ|MŸ…w·_Þ.³¿.sö¾¯´Àʦ ’8-2†Ž÷KHqÂ7ºü¿Éž#¢ðÉß™­öG¥ûË5¾P»!ï´ ‹/мë‹BHëbÁ#†ô5:O½+t¤jÝãä×h4·9JnAVã­Eq‹~²nc 1,/ßãr¦¼‚0dýiSkîm¹Ij šíÄkÛ¨Ö~o/Ô–œ¥•Ù‡´ðqâgwÛ ºƒñ z£«3 Ð.¶7Bÿín¶ øE·²u{ž€ ‘i{Ë%|Qæ§§äõUÜt¦¡ƒzž‘÷ñίuçÓ[£|ŒqðÅf÷ÿmÝÛ(9}¿œÍ<›½¦T„÷Ô³¶û{e^°b×!|í7P„ßÄn[A…ز8˜öMGæ}Ô¹‚¢Úû­¾É•Yw)—ðah17Ä>#cgÒÃó'mö9JsKN²à Ní`?SëÇüçs, ÜM<Ô>Zµ4ªÛ0À½häâ‚bªàñ™Õ]f)÷>Â]ÃLrÌÄ]œj i`Ï( ýË®PÏÏyÆþ˜YÜ?Ê ¾BÚrvKªÒx‡–0þžÈˆÃœ q9:?ÕÇ ò¡°6ɼ¥å$ÏÊë™l0óMl6¶ÑŠ*j·ÓhÇvåë™í=i tøwƒ³¢ÊÞ3ÉfPv Þ®†vwÛ\î|Ô©ü¾Â¶q0$¹p .þd]Úh-+îµcݸb6jÞ‘¹å\šäÃ}R,P….WH§ù9ëòè!è t0Ó®ªŠ!8 |qæá˜¯hûõÑœ÷æ|ÍxßȰ—uÒe; ïÜ;uˆÑb F¡O¨x|eaø? Ui%D·S—nS¾ùߎ~ö}ÐyRá9íà}uÔ;I66‘lØç7©ÃÕ ´¢- Âà~þMŠV‡—t¡]umœ0ÔKípü²ê¼äèFénïð„T¾žYU€"½ÜͰºÑÞÚ-!n䆠s’RµÊ5¡f`T ˜ ²Y6Îaœº!Š" ©?*CWP†Þ¦ò ÊþTBà2®ZJc©…³åt•)çtB[7Hju0 gNå/*MŒ~…ó0Là0¸°#ýd 1ÁMdá{«—ÉO2’ý‹ÜË—q÷´3¯uy'ä¤bÊF±Õ³dÇ\^ï,›Ìý†¬iVùP3¤wN¿!w.ÙÀî hƒmìwá§`¡ëË+:=5W3Ê{Chµ 9¬ÐüŽ¿¦zÚ²›ºhôúÚû|NI@Æ×¶­;,kë·^ÏÜÑbø*[C¦†ÎÐßyΓCç >žðãðÑ*3SÅøMïêt»ñN%ô6ZmpD#ô\8ʹ%E+嬻ZňU…jüži¶ßÌl™ø‹UX¼ÊjѲZK՘ﯓ‹ô² µãùìð §}>l8íÎõ|‹m·‰ û3 ­-aƒ'õSÞÒsë‡ÍdÝ@6ecOÒÊpNé›/0VZy×–m°›Àžu ú:ˆ±ýè0ž]8-ªÃN.éæˆMi$tǦöã좩«x1ö²é5ø|Ä3H1Wnbuýn‚ßZœ•*GqŽ3—7)³°•é/BiÃIù;z-g^ Ëyôôu27éÃëµ»ŸŒ’Ñ­¸Fû×"Œö×øÊÁµ¿²FÛ¯Ñíkåöû˜§ò.«DWºXõëûA‚IÕÞ®¤gÎÑkùþv°ýéAô§1ÈÆ‘ùy>«=ªJøx°£ð“ƒÚOn×~¶ÉÀŸ¹ÖÂáïb?C¶þ0äQÑÆÉ1tRnm±²ñûè²ÜÌ{¡î#ÜäØnʱ›íqÐ}G=öaºX–<$›ß{–?ú6úÙìòîå“Û~û_Æ/ö^Ÿ},Ö`ZÍ3ß_oÚ‘ÝKa¾êckȃæ\yÅosÅ7·âÛW^ñí·¹âÛ›¬8¶ª‰ãü=€ÇßXûö“Œî¼½,]¤oG53ZG;»sÝýóߌÚͳ{ÙkcQ¼iður&®'²Õâ[+ |‚4pö˜°zç –¿„ô‘~nŸävÿºZ[ôw‚1î4ŽqœëoGŒ,^ýê=PŸ’ÅŸ9¶èe !cóó‹Ôæ×Ð30uû·p}àâ|í†N/þY‚?ƒœ(FA¥,ç›ÙþÆõÛPÀ|Tq?ª¸¿-·Æ3ßð¢k߻ֺï\gÝ11òæWûêúʾvÝ«0`©G¾Ð5ì²6 ƒr'HúàBZ§ ‰{ÿ²5±ÜÖ%-+o5†øƒ†kÒ¨[*[ØÃL¢Ú'ðÄDzŒ­Òg9¤ô† 0¤A¶L„½ó‡ïÒ×xóü_î÷ߦ—òurm§cÍÐq±¬@u¿¸‡z†¦ˆÉI X9€Ä«ä@âÁï¢Ò#Z ¡‚ÀUÁãÁÃûšv“S,¤k`|Êní,>íö';²‚ãÂðý¤¦~ÂNo­?qÖò›GerŒÌ^wq1ÄçˆWifDѯŸº”°Xc¾á¿Z™ê Vœc —Ý »pšO¹-Ôm„5ï*ÓÓk ›š1uW bXæ‡ï¾mÙÚœ¦7‘"{nç¡•ÿÊÜ©WÙ¢Þü,‰¼Òø¬!ŠàQ*9®v¦< …¤‘GU—…Xµ¤·Bvqjspõ…ÆZƒæ÷[_ä5Æßä_6¼Š•³Ñ÷à7±—v¯÷‹È$+•Í—oÉ|û½…³ÝìÞÒÍ^òϱƒZÆþúN§ ô_Ɉ‰M³¬Ýš Ãõ5ˆ•ø/ùÜÂÃÁ¬ú|ý~„R´Ñ®·a´žÐÉOu8ýu¢œó­mÍ{Ξ=^“¯ZXøïwxWw×gã+¡ùäQæýòïøcG+’xä¦ÄÙÕ›f>F~lÈp§6|$ÚÕøN/òê&lg­ûß®*GÔÜkkœ+ù­û lá1§*2òu“RÓ>Ë, 3XI˜óøM›Ó!#ÜĦñê‰KfX%E"Œ·ý‹­íÐoSËÊwáË”>ë½ UQ•Ïm2§ÑR§©€ÄÚôµ˜áHÎÒ˜ýÉ úkçn®íã °e'Eñ*AØÉäv­ÆÇ䛜&~øáj»:}û ÞñÓíEúº£C&ã€!±[™1z\Gmì2ø6ûÔsŠïd9Ê‚Š%¡j1•\˜Ú³ü2Ì&m&J§3Žº‚h€×&‡°‡j1.(škR)·â+Ï)åö“Ÿ ­Ú¡ë¿b7Øbª/3CwÉ`¦iš-°×,¨‹`èú`/(yôäÛ§\gv=Q†]‰÷?+3éò·uŒöú€_ñ(ƒA«2Ò¹û–°P < Î%^Yèÿ\e%ýÚ–KÑYæe´Ù¤t±Ô™°U&ù³ˆ?˜L5ЪzUÄôIþ+f‹þY÷!´ç e2ô S­’~ÉB4Õ¼ Α.RìdvV\¼{„¿•o÷*©÷PN±Y(P935z»-"Ç‚ð@ùù1šnmtC”ü·þ‹¿Ceði Ê©tEÈèæO?ˇ¯²Ñ¡.V?JþÜÿóZq ¿A+PÃá:aQBy·—¼[Û+³ ÔhÀç±êÌD~ﬨÐ?ðIäws\GÏVÞaUbýAó ºb€ö ‹Ìzótø ºzÔÝ05æTÓ ›ÜC­áÞµ¤ñ-ïÁÕߨ øl¸ä¡‘fÅ”À´j»D(nX–x”Ì/g…¿H¨uD55¼f,‚k›ZûkåtÃtx–EÏ\™Õ×¢ÚkÍ£ö Ñg@Šp‰"ÓåÂÖècì¦î‘«Í¾î©žcãËÉÞè—ð$šê¥7fŒÒø¢Bõžªûs^Î~ÎçÑ׌ë²9t /åà뻤µÁƒ½õq?L&,‰ªÌ WV€Xfš/c7‘H+O£í.C¬î$åJ5û¢Íih ,¡ ¿«â8h@‘ë°Õ;äržg„ôŽâ½XuÊᡳÚEfã“öÍòvÍõf#ØãĬö"¥^±Ú.€ÏcR AŽxwOWèðYލ-Ý*VÈ•TliÁt1ž7¨Ñ —A@ºypÈ…Ð4½$´iä£È<ûè÷òq8AìtR *ýmÛ‡¯è{ºãe ]:N'ªO_ç“@¡0¬+’‡‚Á'¡Ø góÂYF&×lׂÓÊÞ‹¯E†¹Œµ_®”\µ7H15'ØN÷s1ÏúŸÄ§òBãƒ@gÞ·ØŠ½““¨89qïxf J qr‘^6mô£qËØ ¥fD@%Þk''+öͼmØÈTô¶{ä Êä<„Ô‚A'”… ë|2÷ QäNç†oÂb¸•YÑv§ùãÄšgŠuù”Ç ï+ô†¡ã……øòU÷%F×ë¹ñ!HA úßyÚX‰ùœ£ôÍ“eä¼Gén  ü:2°§ámy”®#«¸IpÐ6HÇÍ‚‰ ·BË㎶g5a‹Zº¥¯uPc—¦¸Î¹ÖZøUDa1¯'$ÛS—¢‰ÛXãÅE Iz#ù©\Ü)C£»°7%؉-_€l°)-;Ñ´øzÈ$€EAÖJ°Föy¯è8ñÑ“/u¨[ÁÙ)Û_X?kd¶W9/Û¸Œ>_.HÂ^cE0J˳àÀºCå9 Ë¢ªìB~æ³ðAja Gúdf¨¦üuZŒÐ=µâ¬íoy’øZg]7e†yuž ŠJz X>X†ëY¾Æ2M¡×Of@mȚ‚®¡[-õŽk0ïħ£ÝJc³‘¼ä±„W°ÇØ=×ÏÃ×ó¬Ì³uE»¨ìRпi ¸öÌ2éçΔ$ÿ–Ïçb÷ÀP¨’„@¸9¡SO>ºNÑɱ|>`FŸ@e¦:¹ì; M’°ß ¬Þú«j«µ›¾X×#M.3‡Ùñºì¾ú#rÔ[¼Õæuo8Ûˆ±d—¢ýË1¸‘ŽÉÆŸf‹dEàÈ…}Z ·«¸‡ßƒLo:ûAf¸tn~Çê û¡'|Qjû ñúfi£¿4¬ünŒSøgâƒÖnd}äØÙ{ÎÏ5SûéÇ—¬ÿKVëRÃ9\j‡¹0.¢Ö_3}êŠe9¥„Ó.Ý*ìLÎó”0ÒþJpk±5añÛ†‘l“üÞÓºpàAQ, QIÃÒ‰XJ†9͸òÊiR®õº@»ì‚Ì 53¿+vŽ Â/fì­óœ¬/O ™¥¨ëÿôˆ›Þu¬€8wKîƒd=ØíÁ f|«ÛÙ4Y¡± 3þAÌÈΧѤUõU‹s¯ˆ‡„ܦÔT—Ê£>pË ¿~â¨7Àf©­Þ«›æ¸z/o˜ìê½Û’õúG«f°%߸j«¢túmå”)#¼ÛÖÅjÈ‹–U­ú3õýÝ7ž:Ë™°vÖ›Îòˆ eì6Õ­5%±;ƒRÂn7~ßòÓ}w£«DÖÒÛëßëÍ/ú·A# ŸS,©·ßß‹zŠÙ ögéHç;‘Αå^²f|Býݨ¬² ÐD`?PÞè²-!ÝÈs·¬ß%¤‰mv×ëÞÑœÁVK娥Z´å¹y ;Þ_Ña_ /× 1DY°«—&»- ¢;õ¢iãrYÕ5†÷çæðÀsÒ7wÄœ¨ ; `Mñî‡á?=J:=ú;t›i8ÁÂèŸ;Á«»}ÈYÙÙõ²>~üèÙóGϽq~÷Ÿàë½çk¥øÅ;>6= »âˆªLýÞ0 ðBYHÜtPÁí¿!bƒ?8>÷åññnÀMƃïn²¼ðDñ(͈?íî½4ø©–+tß1ƒ—ÑÑc@ÒðçåZ“Ùì ›ÁM5Àžöd³ó¼,fýÓl±Óyö¯/þôôɳ/þ’£÷²ÊætÙ"7´ë'=ÎË—þ>ëǾúáÑãožþðâøé÷þøèɃÇÇúEúTÓüÌ®všGÆFOü³¿ã-Ã~öÊõœ4×m“Ãн£œ´ñýqrŒýÃÜöããišÏŽ;õ‰}£Oï쮟`WW»"¸Óv£»(l“¬¸œ¼H«H|d€íΠ¼õÐQˆië#Û§X~Éû^?*Ü/$G|ŸFÕÍÛu*DÝ—‹yÿ;õ 8ÞˆK|X &¬$p@Úf–Ð^"9ÙÑÃbž0\^16Rp 4„nµt3Ôˆ,:h7Þˆ?¸)ÿã™·#’6ŸC¤.ÿÙœ«o#=,ËZ9‚þ û Â/‰i·Ö°µ¸ƒjÜ»+Dx¿No]³÷à¶úÁµŒõƒëXë×7×Vª§Ž'SÖ90œ§œfþ]¡ã³”/E<ˆPɱ¸uúéZF¿;‹ùJŸ¯-Ã6Ã9Y 9ÌÀ«åûaš ÆäÙã,]ÐèGð„ÄjN_YÎULÛƒêÙÔv˜Wh}^9ídqÀzê•Ðb¼¦Ò / ^ggªï ÞRÁž]`“2(ä+–§g ŠŠ7ô?J¦Y¦ü!µÍÍ¢KñæcÖ#CÕ*âÝm@^”:‰CØ4b/’ñ²Äžv_Ü)xïè>ªÔ)uÉØÛì M $ÃKÄH*½<Ò…óÔ#§çEn¶¯ 'ôÑ? ¦¨ªTbß™›HÌí*7Kõñº(áËèf–Ir&Æ´Íø$û¡tËeCÐ9¿A·™B”oœVá·RtAn81<¨x³‹^Ñ|HϰF¤„|‹ž”ï¹djµ§=âŧ¶ˆ)À¨²´žIº³-È’T5NíTí¹t€¨«oŒËTâQ¤}¡; Š»/aSÝŒŒÁ‡rÀ¹©ÖT’ª3Ue@3Kcž¥nž¨Œ‚è¶C‰fXî“b!ð†ÁL‘ê¨ìÆ„¹}¸×JIu·ëäDWë„éíµa<¼Uü°VPäùêge¹–K“¹ÇÇd'ºáÎ`Ê©låpgËŒžÕ9ôŽ×ÜÎãyšËñIJ¯ Û •a¤@g%v¼‹ÌÐR_Ì’˜¹b±—ÝPú‚‘$À®W—±¨íõpuèi ¶Ÿ8”‹X†ŒN8ƒQ¡—ÎIiÿœ)VI*ÙŸÌAáVÖ¾až=ÍêâÕv?Ö0Ï&+YaBA\N†–o¡x=«¤Ð÷©Ïî§‹ÛÔ‡ìõ"›UP #ˆåK‡p<§`æ”À{^»—:7ÄæÚí÷ïö÷ä¡ ™åÉIk­©»%xZ³—Àxl­öB‡¼Ýª]²ÉÂgî‚M0 |¯×úõ­ ¥…ÅŽ³EIÉìB]Ó0󃫒º>ÎKÃÄÌÙÌu*”ÙTà=æ±´-mç[¨‡B•8Ö¢rc½³«Y°–ÌË'zÆ…VŒÄxMF‹BL:Г±üº+y×–‚¡L‚PnŽ‚‡_ql|ä¤5ß` †L:J΋ùá­[øÏ>»NúFѺS¬0hmì& ]-ìÀ/47Ô§$½~QžÞªrñò¹bËÿz^é¦\Ìd vŠÔAÜöQ}#“ålbô£š*äÁ–úàà 7.&‚cÂÌJ¸ÕŽ« f­,UÝ _»ß±i-n õQä,L÷qY–DbûSÓÉN\^Ɖͥ…rÕX‡ }ñ½23, ÜMªæ¤þŽ7­¸ŽilTû1< ™´Í.'Õ•ÉZWt IJ­Ië(õ"¢„ž]zíu¨RlòêÕÍÌð¤ßò|¾š£IöKt:œ´©ËúLnzbø7€sÚïïµnÌ.ÎÈ$<×Z¯íQÞÈäo܆T@hý˜Î_óÈ'WÚol¢Uâ§À]Ír¹MŽ»^bÜ'»Y2Ü&½‘$¸¯y#Ež{)àB³Y¾Ù ¤^ç¥qf”͵WµèXUa‘žë–˜.ƒ¾˜Õºþ&¡‹ 8Že‡Ùö‘.¶©ê×E#7¸¥q*hõò…9À{³¢ìÝvPz›@‚\:¥Ûã9‚¿‘wýMv`^¯Ûn$]ª Pü]@†_í»G[±±7CÑÄMä~žÊl_ÎçTœAH(Ú¯û¥CÆq¸NcáŽ#ëàGÂ{ã„·1Å[^z[ ³¢sí[3Oáwß6¯¾Y<µ5™æGbj ¦‡Ö›Qvbž>÷ *c¢Öè$ =ìÒñBe]Æ –0ùb!ÐA:B-„ (=k’/Ù©jæ• T*±}á¹ ð-ä„ܳK8.˜Ì'ËÓÔCªšrîœÒ°¦ÁõÜC€gØ-zA±ØV¿£ÄÞ?„»”ä磴¸G3©ã„Ø–Ûô'´¸°1jɾUЬ$vw°Aó¨;9ÑÙ?vŠ''‰ùàB@M!X"ËÙh˃|Ã#µFGo#Ø\iz8p·”e˜WgY¥Î­™Õ’à(榼zËÙhã¹E¼Äe‹<ˆ#¸ÓC¨‰§Ÿ;xL¦Dõ¾ m¦if?ØÄ¨1×{Z¬™y—ÿ Ùžëqkc£Á¢¢j<³”xpÇļç2$暣Òënëì 嬬½9„˜¤*9~²ÒÀ§¢ðT,}”lô¶ãõHûÇ|/’¿5¥Øt¢»s˜ü‡…VÏ´8„s{Ù]kŒà†#ñY5ö÷øoÖ=ͧa/!6=E*©)¿(Ê‘=Qá­;a†>S$f†RçiÅ6v¾ˆc5û}ƒµØ†·™“ó}Zh œÊE—54Ëf}ï ›ü ÙÌ5­ãêjš1@†g ¬m…!²F=@-~µuâWMrijM®ÝÑ(.¿R‹¾ÃWR*ì5?•blV*¸â”ÄaM{-ÞÜv#w…6jžii¾S¢êÆ$‰®Ê«’éÃÚ\:ÅŠ­ÖQå˜ukK#U¹ ÖwPIœb¥xvÀO½ýâ|T .$%S×£´„‹Xë·GÌÜ/ÛTÎ{…ÇLÂêy„á­Ýð.È…ŸÜ¯_^ýFæjÖÑ5«¹³ˆÝsÑÆúJ§ûST]Çè[>Ï!]2Z=¹Ntˆºû„P4é¬ÈÓ!D̳ Cñ´4·ç‡ï'ÓôÀÊîѳ4Ǧvhü´¹;r„Q§¡õˆ[4Gmº:´}×|ä<-QEÍ,:ÇŸK褛œHtæÿírÂhƉ gÄòaÐôš³T0Ì _À+šÁðä1…'áêÎ_!Ó™çð|ª ÚGÜ&‰:?ÅK×÷Câ‰Bú¶É‚Zd¾y‘b«Ä¦!A£ëÍ€f˜½N‡ ?ÀL1}pfÁðTN²´º”Þª'-ñdÛ"Ïû0ž%Rt×Læ4Ã…¡è0FÎhh¶±jÂ4hfRñç=àóZÈ'*k öK•˜‡]† 1wªd6h2Økf;Īáß«;ØT4^ÔkÆ1ÓK kV}P0°æneÜ9аJ×—“ò°±¦K¢Œ8nßCÌc´ó½Kæñ=Ù*quhuDàaFG»Ô;ZSªNK§…¡™kˆ" ¢ef3(²ÝÌo€ÙÔ•…ep#jÆtõ¢ªrƈže¨ÇGü±Ðcj{8à %SæÄ™àZç†ÄÏÒ†Sð£¤eÓ°*K½jWÇË~Jå»3kGÛ€àN¬òQ¶ Œðx†I²*ÍnS°À&¢%%j/J”ÁÚ“÷g¤ËEzexÀV›¢} Þ–´dî9báœë—ÊÀ*üdPD6+T ˜;ã:Äã\¬íª7Ý]w–•CüäD5 ‘øPY’ Œ¨,©Ñ[= ùÖ…±,ä‰篵¾ý¹yñ{˜9æå6 ZŸ ÕQ²ßän8è¼äþ'ÌMu- R·|K†_âVÓHúûH§hãéYhJ¦°ü¢˜Œ|ê°œ=wD0|}ßSk®¶žˆ½¼ÄõÕÝ·]sàœÊ@)x·ùèh¿éBïû?ÿAºfD¯ô:×}?â‰aa˜Í%¼iìŒÄ9ÊgÙdn¶ mxµp‡2ðL62‰ã\3ò̤p…Ëîôë èð Ç2ÖQÒ;_K ¿¡6ÎXòöö9ïõï5®*"R6œJ}; bv7MÈM¹_@œfç‚ÇnôŠÒp9ï>Ã;O½kG.é!(RKîb ¹)œë9UD¹?6¢!ŒÁyÙK)IB9—#| ‰CÃ%£K#ià]ÁÑàsœ5Î_SÓ”|¹Cýu玺køñέwçV^·ºÐx#²ç¼J]+-Þ½‰J‹ª ˜?¦ƒµv=œÈ –%Áxnßꃰ(Ð*ñG¶ÄϤ:@kô‰%€&zANó„ •úG~mš*ç‘é©7ê ¿¥zí DÒ¦C½ÝXc“°æ*yÅ2ìɉt•TiKá7érSKåÙL£l‘æ&’—ðöb$…Ó>)Nãõ“ã„Z¦3¼d¤ÊEd\ÇV,a&žÝU˜)vÊÃtãêPF•—!õ›Áo²á«ðm˜-|²“|÷Í]z¤ZNû 9“àñ0wôO/^td¿ãÓ#édÉWu¬åI1ËÚw›oýð,7jæØ SÈ a•FHªM½Fç®èìAbî9GnÈ´ƒk(¡ä^{ýï†2,T7?ŠÄÚ9ÀɺfÜ4€¸ò+$Úže¥íŒ”WÃe±fÌ\ 4êîÈhYÊz½860à†C%ìô¼Âë9%rFŽK|jÙKìÜ< •[FîY\Ùž¤ð‚Yå<¾€OyÌ*.‡Žr`d cû‡­Æá4³C†¿NçM“ZxWêB!+Y¸M¸ùMl‡¶Î-) ‚¸ŠAaּʘ±\èšÓÿQJCü#?R%6ôY:…à:pe$Æ© Yí ;Ë1K auœ˜ãŸZíX¢7R1áT.£ÍÛ…A@tñj!d&Ôºô\Ü^CN\^ÆéqV,zXyÞYÑL‚j'à3‡Ô²çzvÇÔ¨šöÁ‹¢˜+øÿ½UìH).²FE-‡Y®Ðü@@µKæ6¾¾¥†¢LÁ†í²;EÅQý@‘ÐŽ”P’qË©߂ƽÛèGÀ„¾Õ©P2”dÙ´.JsïÎ&ùÀ&!ŽîFŽ/y„¿e¤u|Ï<(ï"¨¹½úÕ«3¹îV,ÐYûc>¶W¶úgÙëQ~jHR_Ù•¼bõuóNyƒ©óTJZ°[ë4_”é0@qÅδ¨ Qg9A J“IZéªÛ¯YWæCÑês2Í+ÊÔ×&B‡sÀ‘¢!yß]Ü|!K˜[([Ë* DªŽ’ŒwmˆP¹ƒ6È„Íbø:Wª^6%’Í/åu§ù.iƒe@µ*–%0G‡XlA¿K‹–E[}‹:[m`>NØLHʪlâQ¨:ûF€|¤G¿ u¢7Ài`Ì#åxÃvPÕî$Ÿ&7ó ÄkT•k²76Âò‹‚|ûžpª­÷!änR¾ìòdôƒ€†\%Ùúˆ:*¸ñ"Ãü0,Ç Ú°ÜXýä¿©$]ÝÌp. èDhÖÛÉ‹pLüEgwÍ{‹ÿ»¡§à–ÿ­ ìsD.6ÛmT ûÃÿ®þ4X9§k}«köÒ•ÓÒ¸=pNUŽG¸ØŠ£Ê^aç`Äîšg'€²agN€ §4©vb;|³î€à8øÅ·#iàØ2rÐfÖ=¤Ì•ô4ÍgÝp·\óÁlÏ Ã±¸ [|À×ÓçM†ôJ¯m }P§Djm†V ¡“bîœ^¦ŽhúÞt½B1u6x<–Ü;³|Ôí¯b.½sí—WcäÁ´w[Ôšã§ÚiSÁÄa3£ÊwrQb; è&¥ÀçËò,þ$ÕùÈ^N] ¯Õ #iÚxR aå]œÖázîÆo{ë}¸Wä^EÊ™iÃÝòÕnàç6Çá ð H¿PˆŽ›²&õ€ú3£¼ò%”ˆ?ëǾpb1ÝÖv"ßô®N׊|}4ÓÖžñ½ï%!)¯Öª¯%ó>žÕ&ŠÎ#ßlìê`:ˆí!Uu‰l¼œHçsÖŽâgH [Ý|¿›ÍnÏê‹ê1›Ûx=gR¥‘+!òAØ-Aî^qëßæª¼úJpB@‡¯Îå{ì³ò5Ü(×»ÙÀÿÚ¶ÖuÍó.]0¤ö\Wå=Íûz5Æû¸ÛÓIÓ·‘Õ5Í2ó1qŒ,Î]ËÀunó{¨,oFXã6µû‚¬ky‚V w”5ŸgPsÕp@¹„øÔåÚõR÷D'—Ù¢át‚+‹ä+dmæðKf“Øz#®ÌڣឨÞ\‚F‡T»îå›±«DoW¿)9 Žj>üj9èyý{Æm>ºÿ~•?MHd\D®Wôù(•—'f–á55GÌ9L©ÓÒ«,›ÐÜ(;-3t¢×°Ÿ| %ïŸå!9¡sóüAY?˜zF—Œö¼ Ïmƒ7ªPÊF¸ßØÄÁ¯fÐàÌ©=›ªŸ<‡&,eW:¢)*›Õ½Ä564÷s±ƒv”¦2$€ õ@=î£øq(iÍÐ6¤ lK϶«ª7v¼£Œ×¹¦»r¡$z¡Ð8åc'yjá Ér\ÕÌBZXb$›Êœ¬Å÷\´ÌŒAŒ¬UlhFSŸø¤òÅÛIDŠìlÔ ¯o‰•úôC†ÓÇ\—îo6z†ø¦¯Ô³:’d*Ýb'{6ÁîG+ÀÝ”€9ºcµ®ÏΉÙC%Y!+Þú½9ÝA3Ê6[x(Sä@Tø/ú|¯iæš–E±ˆI™›ó¥ÒË¡¾õ†ÌÑ›£Ó+:ŽpA¯mœÜŒó…äxŠI?Â8S8…àMÕ)õf¤¸'¬¬7f(„YÒP' ›º3¥öèúRy´âŒÓci÷§â„ÜŠ %YCÇtuÄÂÉ XV¹Ó¨Ç±“öäv«'ÞãÝ­S-%Im=÷c¾7-Qá@] ¸Ì°.Gå3TbY9_é É66Q„$Û) MàÑŒºdðB8" OߨSfY F¹MUÃ"§.6’ÆUå6Là±ꎉ7{Á÷>ß½ÿùíñp4Ü¿¿ïþÁýñýƒÑÏïî>¼÷Ùg£kÊ‘˜ý¹ö'=1ƒ>æ‹´¢E‰÷ðí'“% .›#3¶ó8ÈôA<ss& 3‹ :ò¶@ VoáÓ^ ÜX'ÄÒbl|p/ø=:Æ~®˜1CÕÑŽœعÝMh¼4æ ™wQ;ôçi{ ¹iÛÑôFµõ)P`Ši‰#·„70ÁÐËÝäè(ÑæÖQâòlky{Eð‹x<³Î7œ3¡oBÔîxW —½v²Mù®n4~%s—{¼3kcÈg h.Ðf«¢²Å»îºsoe£&= 5>Tµ—‹°S YŒË‚]+)_o¡ÇƒŠ€4r»»·ïîîß>¸wüÙøÎ;÷ï~vïóÏ>OÇŸ ïÜß«çÀøCÖøûuïR°‚ nÔÕÈNÞ8Øõn¤º$7G¥½díý]/`£~qð&´¿H­#‘î|Yfnv26Œ¶Þ©ë¶K}nýö˜y£Ô,¿ô¸!›5VÕ›–Ä£á ·J +»‚­¸j Ž‹Ü7Ù¦B-0Ö¯¨È¥–Èê"¤DB;Öz=’ÌÜñ!M±ž>À‘•Ë9_#uÑ\¿ªò‹ƒµÅt§ë'h=¨éà~çm¥3­ãÉ— ßl9ÝTˆOkߤh XSÕS˜dÃZÔ¾é áz¥†+Ý‹oÝ».ùæa¿1‡Íó|šOÈâÇZùð{ S¿÷R,·-AV¢0hƒlÄ7¡¼f‹W > ^ÔñF±âv.tø…0YÑ÷šÕô÷Ar®7bó.³J£›9 DÄÔ¦ezB[ÀPjÝË ˆê‘XìX'y|x³Ò´ŠåÆßlªZTu¶ åv¬ùçwÑ W¡U ãQu©õ_Ì §¹Y-ÜF³?‡o8¸ÿëÞ^óˆÝüXj‚Ö Úw-Öæ‹†Þµ›üi0Ê=¤–£Y/C‘§µ :&6Kï|‹Ê/Jàì`ûü{ùï³ç™b„?Çû¦Ô<$ól¶"á¹s1ôÌqŸŒRøNø›áÄÈ;ín™>C²IqðfÄ”5+¶¿F<-ÕÕ¡™*~´ˆiˆa£Ð³bPt’S7¸ œ2æ«fWt"·#çŠ[NQ¯6õéUùOˆ:pלtÓ».¢dЈÛÏ'¥È²pIC\bv ¦#”šÀçŨ@"»ª¡pÄ%0‘„áÙ¡bË&û};J ÍÇ„–+lRÍWÔ‡¸”ç ˜KŸt÷TΘ÷Œ®xNÜoÜS޼naÍ¥" bI6ek¿°žä)½b«Kr=êÛÕž‰ÝÖaÚæ{=¬t>»úLÀó›ÓM(!8€ê–6àM¼¡ 4—ÐŤЉÚqŸž¾3\ÜhÿÈଠ‡$Å”Þ׎®†ìÕÑp×è)ùcæîÁô=|TÈš£LvŸkœ§ù$e ºZŒg%Ž7Ä(ôNù§ùÅÙb:ùò‹A1ºüÒÿEšœ•Ùøh{Ÿr3HÕàqûË迸•š±Ê†‘bÃ\qŒƒÈŽq;2Æí ÇðúcÚQ¼Ÿ®ÇöÐÜëÿœÏiÿgŒ±cÃ1†±Qø§íãM7aß,‡~Ðþ6¶§¿µý%ý·ýYLZ Òùa|”[Hñ_ÜBò÷~ÕÔ› ›á4¶!ö æ,m}‰)µÛïp)T§JN zºåž7ôÓ}ŠÿyÌ_ zþñá‹ä`o/¹}Ø{´ÓiÆ)Ö iµkíÃBÙð7笩‰×ÊU²ÛÙysÝ-²ñŸÖDós!¹7ñûíÎq˜ÆF10. H«”(æW›ŒEslŒÑÅiÁ  ?²ýÈjLc“á.túMשFÁ:㼘ƒùh›©ÖZÆÏUõƒE¿±€ ÊȤ¬ÛiC¿Öl¸ÌÊ=îõ':\£Ñ[=_…ŠYp€Õ¯Ydú†Vëë7Z÷H+¼`hϱ‚/×oyMáo~eQ™õÂ[yxÕVÖÉxÃFÖãët±{{yƒ—ÿh«ø^TJ?syÖ5SR#æ¥8Ȇ)gÛ]vÊz­ŸæÅÍfŒepU1T=BG¸°[ÔÃÐjo¦698'pž!ŽŠ¢‘¨ciÐ񢐣 È»µgå™®W &»™yèæêeÂ2§Yå-,ü4Å0Øî¾]ËÓPf2šÄüiýÉ AO¦Ûé ®n…ÂlÁg0$@vƒù*ÏBmÎ/üî^¡Ó2b@kK¹ÊOgà›ÁéðU ¿â¾ãa˜—Y›³Ÿ‘C¬ÀâTæŠÐ³0àŠÈ›¹|€ÆeUä@Ïp ×|§¼ñòÓ#[}„ÛûžZôW°å7P^s§º8º†RùAë“_C XôëµðÒ_Öèç÷À>¨’)Õ乄nj5Ñ éscRªÝóoØ«­äM1ÉÌŒ §Ë­é$e*pC ÝyÄ3ƒE"§P1ƒÝ¹)W p;^ïŒØNéN2˜eñsWNôG›äRS H³%£xb:yÅ­!ë®} ü,.èøãF` †.Æ ¤¦¬€.VÔÿdæ>0è´-¿&Bjsø¶ôr?½^WtÁŽÂ¤“]6ïúó•Q冿—.@µ‘:Ýw¨ôB§)–’ùƒªœ„Ì$¬ùâ}à›G‡[Ó²6È@…“üM¶··ÿ}k…°U‚Öû䬙U”ÞO͈­HW°ö dÿ¡, ü°î¬îlB*˜Ñx]¦(q›¥ŸÏFºÉ%$ cV¤®búàrÀêšêôbB8ù4é$½B‘åg^‡¼ù"O'ùÏfŽÃl€Jn~ XrA•Âr£ óÞÀ^l²2·¢fh/I*ËXeŸbàºçòPŸ V–ÆžIs„AÖÎ,ȺIò85ÀQáÉÓlÁwzÔOžÖê =Ýš©žM3Nóɇ|¾d¥@!µ ,’î‹ÇRçNÈÑû7¥lÚ6H“‡'?¸du·²P¹¡šWy1`b[ý³ß¦ùéÙ¢›œImEˆT-N’EºÁŒ`ù¢Iä¦ß+Ôc[®Æ©^ÍzÉM­íIK¹…‡_¤Áµ¾†¥ïØR *ÑлT’6Â*ì¶Bîvb°ìo˜óÖ€òEåš<úM1nÈÌmY¬`0¹›f£áçŸîg÷ï}~ðù^zðùèÞÝ;éÝÏ÷ÓÛé»­Jòb5ÊL§ozµ3‘S ¥r]"‡mžà¶ƒØ %¶¸_ܳ¹ Žr=ÜõøÝ•ÎB@º¸*‹ŠWææ3ô_IÙ­,Ï| $ì¨Ô¼™’;0¥ÎêØ*Ñ`þ¾$Òp¢" Ào‹*lŠ‚ª¶ Ç }†ß"ðqtÙƒÜB0½Êƒs²0N@“ZN>«ãÄSÇnòÈ#lßbLC&˜éÕF¤òRCøHþ?¯©¾IªøÞ™*ûTn>sÅA¨‚só 69oðMhXf‹:ͪžwÔª“Ôò¤¼2dæ<‰›Gjts Ûy“œ£aU’?x¸A«Í|N€süTÊé­wlóú²¶l<•áonXËÆ %•í%(%O¸fÇ.\Ŷú´Kì•Óœû’PM¶TrVvyaœB__Ã6ûÍq¬Üù´¥Ú¬SŒÉ졊¯RCgˆi\RÕÛtš’»ÕEô¼” S¸‚-Q1e(:›&¢vz˜²''ÿý–õ\Ýâ53:9éFŠÞè7 ö¼p ¬_.ÊNHtiîÌ9H©Q6_œõpOwkê¨g":w¤XÍ>“c­\MUGa‡&ešÀ.dÏ3 5?¦*/Z½d#­äè™@á: a‚|™áD¥¹‘âÍžà3RÁÑÕ6WÀf£‡Égc3çy±¬¤ÓÌä¶úešµdÒ¾î.$aAj®z3/‡Ë)4vôÖúIô¬¹ 3ñu÷мïU½+QÏQ“2a³×ÃÉ’¨ž+V/òŠê–†`³%œQ'6œ×·®¨Åc7¡fª^¾ô¾Qü¸‰j¸$Ž©š÷ô8¼ÚgoåØ|ÍÝ'X¿ºÍ š¡>Ÿdá‚z4 ´!óó1à “‹3½{Ý©ÐÛ;`V٬ʮz^ÄŽ˜Uø|º"ÆÊå™jÒœÎÒƒ¦lB×îsËrŒ Å©1¯vÉáˆL ¨9ÄAÀ@M»¶]^bõ ´@hÝO€Ü«˜9FÛ+°ÄÙñtᯆ…³b)u>]™¹œæä@­bÚ§g±qbø˽¶d¹P£;ïÉíœ8.x³JWŽ{¢(×; •€3cˆ- Z²*mäSbðËð`"ûù0ê _ÛùرSP æÐö ¬¹œüÇÌ¥¥ƒ*zF)Ç:ZaÎÂSO¨— ˜îŸq}/³ÕBÕ¤¾{VNÕåz;ega¨«Ýó Ü ú7Ʋ Hü±˜c‚«‹¶à+ª~6;ÏËböSçOO¿{ØyÙìøˆL]Eõ BJ™S’®ä>‘ty¬Fiõ"¹6›u«f\ûâ5Ü^ovnµÙ½WœU<ø"¬öXWþ½À îòÕ¼‚Ñ£´ã½0ÑöNl~6_y>p݉A=ãCõ õ´6*×=Vô¢/+¨“fÌN ×}Ò\ˆjùÕã<ЈªÃ·ë¹T ¬®—$û{ÙýûÃñƒƒýƒÛÙøÞðÞÔüho|÷îÝûwöjîV=’çuõ>±ÊùZ¿M'\[IdÃûfö·÷îg÷²ÁÁÁÞgéíl<Ü Òû÷÷ÓûõEÄæ¿æÔCò\9ñGã®Ø¸1Ê4CÔùd Sûr­ÖC¬­žObƘVÙä„+*åõ> Ú‡¾VÊïL£K£{ÖhR;N‰± 6Ù™÷w‰ò}í5TUœf†«è“Õ©µ-Õ4úµô@ÒOÜ’Ä0ÇL‰„êá2»±.kz­ÿ»ÝïŸL^_™X'.õfå[ƒ7«“ÐÍyª~{]föÎ…ÍÞÝ;Ÿ Òѽ½½ƒÛw?¿{û`”ݾû³»ƒù{vpï ›9v“¨ÙÌ9Þl^ðï‡÷lkj ‚I£JYY?“jE'[äÕ¶2ºSׯ7!oÃÖä Iì!•ÌFÚÏ´XIJ ¸Ø½ÁÑò>&…xŒv³äj,IÆ1xI’"ØÝêMÏ”ä]ÔœþâîTgFnŠ2²Ø¡…˜È)wšxïÄ»„^ªx”—¯àUïÇ¡Ô69à3òœöMò{ã,ySŠÖ{o¯p’¿ôM]·+§µzëÝ‚C ‡E ¦ßäò½“R«k›> ¬_‹ÀZû¬?Ê®Tv}H·ù7*ÆÞÍ%”’L]ñˆ8îØM@Àç§e:Ò© §j¡±ñFR ÚyäU¯þ’|øèymǰ¶c›{ ÅÈ”ï5+šŠï?Bü]âOWVrÕ’™Y7±Vkž6 þ0šVE-ó£|ÉZ‰ÁŠ©¦¥úë2K_]ö(ðìrqFÊG<MγI1‡²à.gq/ÔUâ)Qý^œØŠÈ ¦~”Ó‘ø½è°Ña’|Å»}ÜT5 ôg¬8Ÿ2„CXpRo™Ó¢*·û;A•§]ðæ%©Uõã º0áQsÎu0è1>¯.ö-1¥ë_*;[”Q(F蘒æ3Ê5õèvFšØn“vªär›Å+í‡,I‚hlzàd  °ªnúËAœe~¹&"ƒ¤Y- 1CÕÂîkÚª½€?ÝÁÖ¬J? æÂ_ŠV*wÎ]Ùt·ŸråPî˜)ÌóÑ®·Í}H£^dÒ¼Qå‰6p»÷Ð>N¨(Gf+yG3 Á~€,U+ äcÆí Ô„oŠöi‡&ÙÙ­‹îÌõäé÷ß=xüèßÿø§G/>öàë‡Þ ,¼_È­²<ÝÁKoX¬…ñˆVA8RŸä]5Ò9¥CŠ‚%ö«{ZÌ©pÅ\ÞÓwû4g)ÝpY|ÕL„ÀhXµ©µvg(û#ò/q·§ßˆCèj€æbDнþƒÞõÐ@T©€Ð¸`øÕáaq#û®ŸU1†–»F*”ç)–š—.¦1I¾Lw¯ºá}o§¯º³ý[›ìíƒå¢˜¦‹|è2gT+[GEj¨WÀGÒXµ` DsÀ®Ö %O!ëy;‚”ûmxh2ðKζԪyÀcIFr!š weÆ:/׳ËãŠ'° ©‘‚.µíPÀÅIø™*33oË[€t1ÊÊÃV²Õ/Ô`éÜWjdíÔ¢Ï?ïþyòÜ(Ñ›¢~ÕÛß3d¸¥OúObvúá¦Oñ.:¡(ž¸A×jÏÒ½×hséJhâzºå $cn‚ yÇC:e¯×yP • ³¸è ’}c‰TP~®é±úr¼Q[‘ÚFez”Þb޼•í¶âB(N†½ð“ÈP³áYA2Q¾M‰ùZÅUÖ“»Ã ½9}5­Nz•ÝÍ Ê{?øw\«‹ÑëEóW§Ç6påI'FC661lÞáʧ9 19>6zÃâøx§Ê&ã®faólöÔ·^¾öרâÀµŒ9v¹bpÃ;ê`qSíñwšÇ j•¿Êõ¶±ÿ=)Z  õÍͪ²þÒnëPÜÐ’ícr÷­c3±>0óáÝ>Ÿ{ dÆðwâfx${#œÝ5pKצÔfÃ#F¢¨YkIÖbt4!Ñe£¾y”lëémw£š,/çE>Cöø7L“Ÿ$æ0¯Cº"—»»éN?).¸½tÞ8ÀS…œ ”›K–š¨ú^Ж˜®“h}âºpÇhnz÷êpnš·¢_$N]Öú¨Š€"ƒþ@[5ò/¹Ïç¥_ïªá½çþ°ås „ó <¶kcÔ5lÚ7$#oyW¯ ÏÏ“Ëþе³ØjX´ÿKm¯äT‡g(TF1¦÷`‹ÕEjwÎÈݘ>¨*¿Hö6~y;¹€¶†p‡ìÌ>5R…<¯Y1ëá;v@YsW™Qœ<´“1—ovóí W '|HÕlðÍÅåúrA{_þºø z=Ÿ˜/ìùôÀ:êÉ} +z¥§bQ,Àp–6¶1²n|ó¡e•UAÃÎÖ/qlâänÑp‰m:!ú"f²«µó  z˰z0­- ‘‘^.ð’< Y².0P í(¥¥}1âCZåi°>·Cyjx"͇ v|š³cð’„š ±½ý`4ÂͰ˜²](0÷«ê1û_üâ«Çžü—Çž<ü²6öó ê÷óS Øã`Z]:­.ö¹…4û¡ õ¯+.ÚP՜׭ˆÙÆæV§>è¥Ñ6ÁQœÎçÙ z{Ù­½ v´o–µSðûÐ2þj#Ô‡X©´jï.}Ê,ö§Ú#ð§a bv¤ìAw“6ÙKøó²öSTQá¼òY°¨Ãè@(ÆäèÊ_ç Žá­ø±à¾Ž±®T¿g¾©Œê×ùÀ“f‡ŸÞO&J"üFã !‰¨ 5¿Ô¬'›!ŽQƒkþ2LñüVFÎJÐã&ÙlÇßí]ÃhžSO9í´©e/m;á…Üg=ð?l:FºßGµ+/#Ò‚ÞèæãcÞV,õ…Ó<±ö›ê™Õ>¼'ÀÊ&\ ÃÞ÷˜;†³¾EX¨@ŠJyá#Ðh^ë0Sl ¢,àù. ¯n4ØçëÖ‚mž¥å$7ú‹þ Uâ ¯þè+Zí+òÜv¢x%_$Âi-:b¨+žt'»âÉ÷ÌUVžjÑ#îþt¸X¢¶·Í†Ý6nf! ö¯‰¾)ŸöÖ0…öú÷Ö3„®dÊØ&k28ë&CÆL5šÊÙ Æ^YNf³Í=:€€a¥Ì0VÒбL$F "Q›AÆûiP-¤5ÞýLªÜäƒó<¯ŠA!P÷C¸OKô3löj±Š üäY°¦7rM`Ÿå,žL£Üƒ|梵ôPǰ!'˜Ð5?H[Æ™ì}À4ð|ð«Æ ‘ðÁŒ—Üø®ƒìì^IÄ~ ÇK>L‰Ù ỡÁã8úÖ{¼'Wy;º\åEæË½È’øI!·9ï ¡–ØHwQµèचIp”»+š€ R—Q¨ÖôÞM³”áó´[ßh'Ö×1LFXáÙl˜3tƒM^$ÎK žƒÈ^ÏÑÅŠª§(¤frê©GL ûäcF7îL×AÏíjÈ[¹iô ù*~+áV ¶e^棌¡Ë±«´Ñ:Î!÷c'aò¤Àµ„Ôl=ÍPíCˆs£.4(ÚežæÓ|È].½tBÉr{—×Q$û›° Ýzà¯AL>m '—PØÊŽ€3\;wð&˜6§ê\=ÎÆ¼O‘¶'O_<GÆdgéy^”ïÂȦÓÛ0^ø"¢XÃ)1gxyú-ìÑ:lùÕFð;È:¾´ÛÔ~ÔœdýWcriÉ^ó¼ ôñT‘prŸ%g/Ï/; |¢˜kG4=¾Û,ÑÝÕ–¿ Ù LÜ¡ó>9ºwöÕ.¬ÅY¶ê©*;ëÞ‹h(¤ã4…«ßøÈ’®aïG=ªX'²o²Á’ºjË×[SL›¡Ý*iQi¯Åâ’|¼îK¯¢3Bu5vgY̨@MJÕsë"½¬,ðÅX‘˜ïMCcEW2Z |ó`’MQëAXÌ|6_zÞü Y>ä§rç™ø]ÙXÕå⌎ۋyaŽd ÙÆSÚ`ϲÒkœJ­AA$/ÿÇb×Éì Ì6-1ȼâ52£¹û9µG°Q÷ošë6®À™ ÖìÖyTe–q…?Þ»«¤÷É«æ¯8úÆ)}EÕm̪mNêû¾õsúà½>=Œ•zý#„Æ÷ù÷?u Ô„p§žDÎß®T%M1㟜dxùSÇš¾—Ý  ¦·éë»7îè¬ø#o¿½E6ovÕ'J·ï®J9¬WJ­Z 9ÌW-$– Ú=·ÁM{û™‰’„È“Øn¢•ˆx0æÇ Œ×¸1ïpýÌÃÆ$µ\Ѥ_Äœ£|ñÎÎä=l´)Úµ§kníDy²ÅDäâ~G>‰ù5S£ï,Çã57ûQCf'ù„ªŒ…ï533[ ¹ M¹‚²3þ¦ÕJÓ×z‹+Ô¾ËÉ?B¬èÐ>|XçM{fû¥ƒºá´i[) ¬ ðWúÉŸŒÑa»s-ÙKkPMÚ8º±m¾±à È©Gj$çî¿Ê.«H’ö_£ô†ÏôËäÖÏÃ[Î*–K;¿Üùüàîîñ±!Hs¿§Føål_&e íø`Ÿââv¶Ãsþ}uøû \);J 0;$|í< va6 ˺%êÑÎþ]ƒµm.øè»,¹­˜ˆ>t5òQ¦Ã û—íL ìº3$pysÙ÷¿–Qõ-€n7Êv—Ÿû·ï` zšæ³šõc-],ÊÇ]Øó¸»cÌÂZ>ÏzŸ¼ýÙmüb¬~À«þ>&œ:‚Ÿ€/¿ìÂw¸:?yµi|~o§#6%&w_E^vï´®usB[{ywiyîæÕyƒÏe#6Ý–ÚúœÝrÉ|4ˆpׯž‡ßr¢]“÷é<Û4-FËI&MŒ ƒ1yðì©0Ú(õºÏ€æB1²3l¾G‘x;¯lov›9m6 *¸nœ*„Â0$ô6ÖÖ\,ü©£ÙÆ÷‚Â3þL ÂR”ÀùÅ0>jûað<[ü0ߟînÚìÝm÷‹yÇËÙ›ªÎtç¸T9 ¾ wÑÎQ‘¶5¡r ™B’|Ø„,iXÖòJ§00-®îjr2©v>ÁDâàSój!èæ D¼ þt:ÝáL7r¡ÄÖ*±°)iEÅÖ¾R=¸ FÌN3[.…7öH[ýÑ C¥AtÆßäÕ|’^F&¨=LÝüv抩6³äÇ|6*.̹ê*p˜{Íe0rÓ0/u°aCÆ·Ö®f£çŽÇùkáhgÙdŽ ·$IKäKi1¶MégB.§Káö4ßèæ“× ºý_‹u©HäÝP¡[G'ù="–¿“$ìô5g‰±Š_œeomŽœhì¬ñ´#øY®q$54³0YƒIº’¦uÚ!³8âC6ì´Ìšd.è ‡ Àöü“IDF}Âîê·ËpìEEVŽ-=óŸ³cN·/§“õ U¹‘Ä Š© ,Ûò#vF—Ø…¯‚xëRÞ5ùƒb jÊ^ÁiÂx+¼¡ìÀã~Ø`É&É-Øç[/-Æ0Vü¹'{Õ ’HX Û6érŽØr˜ñA Œ÷!¤ñxr’ø+¼Üƒßé7MU0R"Þ?Y7d[ öÔ®®¡(R'ÞÆc3·c tÏ2e ‘âßCŸRÛï$ޤK7aáO­$yŽÑ±?Ãba¡ä¨Ó ôðu6\âŠ8¦' ,Á%¦`¦îDšÝ%òZI{6J˽Â{dN Þß¡`þ)vº¤ßø‹ë¾È“i¿¼''ÆÚÆ`qW[r‘ ÌÄ£ñf£NŒÄ†¨E=¤=x×ÊhèšÓç,Ö°SÆ–A•èÂ8ü@peˆ¶`&»†L;5ÚLV/2sþÅÅlî\]ºÑS(<Ò²ºËÉWÊÓ…€%èìRÀ&ÛëGþ{Lž;±m@󖉇û«s%¢ÆÔ{áÚ¯ËT«³ H‡ l•b<´$^-¦þ•@JjTýß¡-ÍŠ£Å<™çÎ!(JÍ íÙäI¢ {®–:ý´C¡YÞºcþBS`ÔÌäp£ÑGyµÁð¨õ:+r¼œH3^è9¬±Ÿ°ÆZRŠÈMƒ„ø\¿Å?mH’¨¨¨kE²›ÖzIWƒK¯ˆˆMçœ û“t(Sº£XűР@J»æKÆ5ÖL_Ñ"Di–]VuN3³†Êg~>ª/íY¯‰¯‚&›•¡Œ¡‚ÿ¶Cœ¾•;å &o FÙtߣ֡ëoe|µv[רÊY|ûÃm]s+!9óxŽ«ÚáT­pó¾ÍÁÈ‹,]JÊi‘üv—/,(BÞ”N„ĺ}п³Ýo¹z–M n`½ÕcD>;EÍØ›cQ'QoŽ £—<ø.ì)gJíQñÎÓ2Ç% °›Q˜“gÿúâOOŸüÞn[Õoö©µy»„ŸøwšìT ðïs¾4Ƭì‡YþºëQÒ+jPÿçö”‰öèN2FmÁFª~ÒwܵÞFb>]£¯¡4ƒöy—˜ß„ÒæNèQ/%X3gú õTJ­¬©˜°ÎÒ?…0ßd½)0²pKêVo´‘Ф”ŸžRÿ‰Tt‹7…À›ê‹uF¬ù5² ô8š¬0ú†2ûýAB/j-‡/¬Âº5E1ñ¹ 8è+ƪ8ߟºp±˜¯Xeù‰$ý©¾È]©ƒõp\óX^zõ »²IµRð´ê-ß&BŸ0#†§ȒĶ׀Ö´Yåh` ÃA:R@¥°±>Þ˜am¶[„–Îi ×'¬f{ Ì–¿¹W $&ʆ‰ˆú‹à.#ø{(Ä+®÷±¼¢ |J&™xpAk¬'†©î‚+ RÊEk÷šoeIbÊ´‡ún‡ƒÂœ¡ùsôK—ãL¢GCŽ,Pl3ãn*d Q‚rL( ©.U¾Ÿ ŽÇë2;Ë[ÿúþá÷/ Rë•ᛌ í_a¨ôU0/Ñ[Íf±jµ:/¡ö›½^yªþºÑ2—qšŒ¡{6™ƒ²,‚“[¼ó)ôܧoé¯*øn˜3¾É@ïÈÈasÖ]}»Æí;ß|½çk H"²C¯Zû¼¥窛€òLíøI`4&5a•´O†¼æþtž’ðï¡ÞDÄ hT„±2 Çç@#¸Þ¡ß9r<7•dH“gÓuµ~Ò4"®í@ÄíhÓÄÅ ¬¥a~Ú7ú…sbå•϶<Ïï´KÐs› ÁrxÃÎŒ=& ƒmyù R¯zÒ“`«Vì¸aaF öwü‰½>•Sú¡^)7ì¤()þbƒCLlÑ™ðøñâM”X›k¹a3‡æx–V—Ç"ŠÃÈ7f59å‰RÃáW˜ö‰‹cåwj™nƒ´v\ÒìêâRÎ;¡bÙ¹¹49øðY͸+áNà5Ùk,&ÅúZ{ÉW—Rö‚<ÙêßP0-šFò‰YÀ'žòÚ¦Ø|š8|Â^$ •Âf]Àt‘ÜP;ce—qܸ|+ÌôÊö—Dëp [–JrFb×¼-ˆ-š­É³üôŒÊαQ^2JÒ"¯Æ— ÎÎ@v¢;ªˆf|ZúHÌ`vN± ð8%Rëo£+ÉæTéyæ[£7 TnZèE>ÍtU#æT™Ôpò9`Ý©¡1TßÕX!ü^ÛF/«¿Bû€å­²ÇÔ/C–èM^ŒÃÑ('2öØ>ã`É‚Õϵ{S ¥æŸö7Ò=¡Þ–«f É‰l)A'V‹¢}DQV²aöF+çHíï:}¦ž»7C!ºf]Öî´£Ê={1å«Ö+@ªÍ.(‘“±NÛ(S€ÞFà¯c›^ef+*Gº5L¼ÓÞ £,VƒöópôWãN5Ï**h ¹¢zBú›Á(AtP&Œ'f»Ð \°.8µ ûÓébpå¤-XÅt&eæâ¨RÚ“‡žî|ìGzl"Sjn:Ü¿ÚØ0ƒ±›ëz³¹Sy¨:t5ÌY¿Ê./ŠrÔzæX€¼å…!*³³x7ÍX?|ÿØpjÞ Rº› à—ÓñÜ}ìG\¨®Š=NPÿ>Ö5oqðÇCɌ˞ÉKŒßN'˜ê ]b¬çFjŽà~Sß…G ÊçT×'yÆœÿ|ÈÓM¹9Ûürž÷I÷‹òôé«·ºÖ}èÕÓpÒ ¢Œ.2^Œ «YEœxHÛ®[KÍoÛ÷…Be7ÃÞÁ­0‚CÇx½å5áŽÓt1Ä öÕåȤ½äœ.èûx´hõ.ë ð1Ö˜\ö¢ó]uI</g?çsãIz*ê<­Ù(†Mš/)fès6v ùùzæÊ™å›ô°nrU%f"dKEf«À)½ÉÎ(¯ ƒMœ°Èö0ËåTmnO÷ M—YÜS’G1é#z] xŒ̸ „t«;‹Ô¼à š>M¯éaì+f-ì«P_¯ïKù§80Rú¡Ú‡:΋³ ù¨OK`4‰uê@ÏÊQ[@ÓŒûý©É†åÐ~àÌÊrÉØ-Ö*ÁË$ì+.Ì}®\žäºâzj„·S-”a–ƒýò—¢TâËpI±ý_<@¼ ·à^æs R°ÛfÄŠ)M¼R>ÙÚþTwˆ1a‰1(VðÜÙ"ÈÛòÂ¥íä…õŽYØ®XøÛ¿²~zyÑ-Bf§>`G8­ÇK(~ËÝ"…´zÐ 5 Þ)Qý48úŽ¿£Øg¿S2Ÿùæ í„§ëˆß´Ÿ|C´[IkϨƒ½SŽö¸æÒoß%©Gt"I÷éMò:s­±@Ü7ÈŠsàï¸Zˆ zë%sb gsé ¼†<ìÌlϪ׳¶ ¤ÝªÅpmÁö¬Ÿ´ÀI¼E|ûYl朻§ÓãzÏ­nÛëÁÛF…/@&Ü¢ Ä~•lÃÆ÷d¯¶Q‰¬²ÌÀ<Ïê¼F¨ã€­Ì)…‰1˜€¼æ·)tI+>Ã"‰¡=x˜@L÷[ކ bp †Çt>ÄGŸ€­V>+åI ‰Ò£*!jü.:¶[ÙdȬq ZˆªÆuÍF+¡f%–ÿSÈnçøÑv3PŠ¡†pæÍ @|ª*„ô‰óY‚R¿/ÎÓÉ—_ ŠÑ¥_[óEšœz9Úä§ÐF¬·×ßwý¯¶¿Œþø‹[©«l)6ÌÇ8ˆŒq°á·#cÜÞpŒ;ÃØj¼Ÿ®‡(¥˜’FÓ£QüŸm0Æ~dŒý Ç8ÆF៶“½^à&ì›ÅãÁÚßFËìÖö—ôßögQøÀéˆü0>Ê-¤ø/n!ù7usC³K§Àc¨ŽN›•IsÙ»ÂZž`~Ór5A‡«ÁhŠÙŒ ¤ÃlÚËV/—‡ÂȦ§¥22Žh ˆ¦ÅÑ‘¹P—jŽWþ£Rpô“â-/»d[©Ÿ}Ú¡ÃŒ·fEfHQ{‡ï| ¡tÜ^O' ühÑ÷ÀÚµrlmê3çò°Þs6Ì´Õè)iœj©™0¹¹¼×ÅQ±ƒÆý®~Üwìh*..~vüeƒ0ŠÈœ*’…þ™ù·±$ÛÀ#Ç P®¾*‡A"ž º™˜ƒ0Sû! 9Ãb¢§ mYÕc‚‚™/°ÅNªTL8rx²Ç@¼C„9$ à1´G#›d­´ch޼JEò¼&u){ÂñÔ¶fŒ¾@54ª‡BÉ•dý`ˆØ`§Ô%óHêô̼¢zÙ¡7‘Kȼt}Ê0¬ »tõ1tC¨`’ßÙŠª~á¿è@±¯­ˆåû=†Ý‚Àbê•C»sß‚)Ô,< ÉM ›¾Æ+c ©Ïáˆâ# ü)&~6tF?7T>ºÓi¢ææ&O¢ÎÎéç}³ºÇ4yh¼oní<=üQ“^yc·.ùRB£W/úö[¼Ž‚ÿ†(åþ™fÁhÚ_¥^XvU¦ÉY„„š¶>ùæáW?ü±æXí}½~ÿýÓïƒÕeï 0Uòª Ý7ùÏ.…Ê»yøýàóMw£?œdi¹GaŸ*žQf:÷>u¾”d i¯§®~"@Ê)3©4Gg‹ã$GçÃWÀÂ4tÝÿÛ}ëtµ.IÅÆ’ô,Ä<…ÛÏOðKÆ®ýµgx´‡äG=ªyR7K±I¥ ö'w®>lâ\Z‡ï[Ýa^à í3;¡âY áÝ_Îâ8ñדjq‹:æÛ5ú…V À«êñbWsèLŠ‹czÿXÆ\ßüßi Cî~ˆìédíæ“,\[®öuÏh#ï›­`œ&Lœ'2Ãó¬µ Œ™ž ENú*[mJ6bSímKn360ÃÚ´´*W½cµ/«q…úÖßþ­@ä²^IÆRq(«†¶jáLûá"Áçã0¢¥/{j pQþ<¾ë P/»Þ̬y©ñedïZÜ‚Pšs©ÖÔçÄ1vpñR¯ìjB9¢§c)5­1—<ã¶E-`󃆖»ëÈüÀºàüWÝÚΦÁšç‹I>¼Ä¤Õ3Ǧb¾o>TúÖÜÐ<9ö.§¾GÕiüÕÍ1¡CÚ×br+<Öc£—ñ èx˜¹ÖDºûñrmÞ¯n¸sÇ2 ‹q˜| ™¥`|0¸°ç¨‚«ÛìºòI«Dú 6ŸyÿšY¼qÊ]Û“ðÅÿ7®HȯÐZ#dîP›¸ÀØ–Å3Ú$Ô²Jz{µ÷<ˆ$á·Û0òCP4ß·Ú±z7W¹,7\–‘Þì,ˆ˜ JBÎýÄêzËÇ …FÊö•³eLÉ¥%HG·:pc±-2G2Ꟍi©à 2,Þ\k¿=:Ô2Áá0´ÁíÐW(?HÎÀ˜z ¶ÀCjõU“9z-ÈÔxÐùªåŽÀ““b¨x#ü$V¦°Ž¶¦ (5 ðéõ¼Ñ„€Ï âQvÆé4…Ô-$j/¹Í‡f•f«¤CÁqw.kd[G<лm8[k?_z:¸ì¼Ë5çɧÉʦ ¢xø"9ØÛKnEöítšX"Uêxxþ½’…°¨‚ ¿2h©BÀß§)æ ³Ûzò†!˜ TâÆ±ÇÞÚÙ]0¡9è.‘mMCÛ­oטÃQ¢ /w›ø´=æjgsT×¶%YË}áöúT §IÌB—\Ç¡¤Wè6oI›‘ U™5ë3nÇ|P·ʤìU‰ÿÔ¯*èÍ/ŸôŸÄ³ÜTósÏ2å©ëªBBÍ¶ÎØõG Š]nL> Fh÷ ÔÅܼVÀº°è‘Ø6LÚB ñeE€ý4© ¼Õæ¿…ùÕÆîÊ:Áu½›ùò ØS@ ";3 ((ÔÈ謳ä$2ŠIÒ pÊWN]O°³ªCò‘Ȥ/H¼ª]€ë\dõˆYìû/‚ê¸~äü‚4Ê ¿àìt+‚·¨YŒtx‘WÃÇ®:Ö=Á4D°eX_™¦åh’U^KàS'uЪº²ÒN ”ªÃ:¬vº®$ÕÝ7j­-v E#¾ÿJ9¸HÜ¡|6á…R= ü;ðsH’žo/и¨QTuAÕÑÀãbF¾MëoëZhFƒñ³×óI>Ì“KáãQÒ'.ÒK}¾‘%1„ÕnëØïìªo^™›uJÕBËÖ£.yM_T¦lKO•frtÞU~Ûµˆç ]ë9ãdŒ÷wC‰èµ§àJa)ó‘9(/‚ZòÐ7!“gÑÂKÿ¼´îµºŠ^\ð Šj±Ž1Ø>,äOô|ܨõÜXåŒûH5kë´ë“ó%†Ê›CðWi¨áÚf8o›¢,÷;è¦`A w†/Ù®ض%ëBÛHçä¤ë7Ûƒš†žÙw£¡-”ª Ê"žQº×L:•Ù„šÛÉ„¸VacÛÛA—’^/ëŸö»Ø‹¼¬Ç“×Ђ£xòb}¿8”úa›E˜gø‘c(U?öW…Z:–)åÐu"¾Êñߨ¹W·@ËOg”ÏF“´Ž"[”Åa1Ê¢“ëš“:hjD±òR"·ý+€ŸÁÔ׫DõÐ]=° ßuCI‡ù$_äl㬗ô·åÀèpßnìnæ6%µ–’ŽÎÓÙÖíwðB0´TÚ&&0n§ ’!{É9(£PMõÊwÏ¡‡ª·Rê² ÝŸ1•„\£fL2ÊÌ/§ù `ª‡ý¦Iñ÷ˆ ÔB‘ùBœ¬óW§ÇeFÝN+Ï®3ún> ‘UýÇ $“öÓnI‰Å2§æï4‡]L¯5<ݧdWn!2síf€‚Ú—!Ôp)é ¸ÊKÈ]pù9^xÖZ–:Ê«á²ò±‘±û;Å îɨøû:6hóPc—ƒ=«û»frM“º¶?j?†0ÌMÂzA˜2lZ…å}U,¹×»ÒŠ!Yneøï ÖÊ5(/«…¶œE˜%·kƒ(Âøø#ÛäÜØ6“(Ø2FqŒIbNÞ¨‘Õ]H[q¸nªGÃRÈIŠö]¢Z’ìæ 6ÿêÄ}¦’Ti;3‹±-Q"»‘³§ÛNÐwök¬Ë–)ò4ü°ö+ùP‹ÎÀ¬JàÃd—U_wô5»Ù êfîkÏ}!}pI,²”ý~ÐöËéÖ°î! 8#¡\8ÓžÂýŒw!þD¨f2«”›²mÕË+(XÌPW–µ_‡ÉŒðÒX—“ìÖ¹M­ü" UèhèYÃoÅ{AÇ/-P²» ³âÂ[¦ªIÑÃak ¸wðª¨>þaÖçø¿Bˆeê*¡/ŠÁjJ/ˆë¿#‰ÚpGGIç"ŸÝ>èÄÑ“ìí8J~Ò-îa}\‹ôÞÙm îPT9þºëÀ×Ù}Y$3fÄ•gwM8¤ sú?J/Á.ð†;äN[Ü·µšÌþ«Âëƒ{Ÿr»±£HZ bê/È «”àŽ:è÷šÏ΋WؼHÓˆÃzÙº¸Ê\×òj”™y™±¾«yç Fšt`–Ü^±1£ÓÜiV}P+a4ƒŒ-ê(#Mv­S}%s¦ ïÐ’ßjëÃäÓ'O¿ÿîÁãGÿöðøÇ?=zñðù³_?ôFøÝºµ¬Ê[ØXå–æõ:èßñq•¿züàÉyüèÉCo¹A2à%1$õÓÞáÞKˆEy8[>ˆÅ­:¼C`6¼|¨½ùr“¥˜Jl:ë±›ÖÇÉ1fõ#C8>žžw|Ü©gyƒ÷á±XB$ر¨"/Ä"kTí‘@€.U…›ï{@º˜¦ØöÅØP£vÎv¬ˆ%¦XM©¹$D¸>aô{­–Ó]«…P ²¬»ƒÆ¹‘|ŸRÃŒ³tkÊÖ ZÂ.±q’ÂîwWüFôöô,óA”ˆnÓ3´M¿‹IðÑ\S[×IÞí0¾Õ4ddå2d< !ƒF+Þ ¨ an®²Xm¢óaªíla)×Vêv,Æ¡Gñ9ü݆Êú:àGî÷kå~ϘX‘Ò{Í”ž d6º¤¤&ŠP!ëÛiª Ù¿Á‰ 'À$©ç¤ºWì Ò­G"è£æUlm(]¡)ÐØÄ)µPB­$“í8ë¦a¦BÝöiRptË?üZøþwm­:–tk1»Ž…O/Þ¦P}w(tn"Ž÷ņÂ~žcQ8ò©7ÉVY™]½MGЦ×V[ÇPáŸÍ/[ŒŽ÷Úª¸Ò óËkÌ`~ùF­š+Îi=sèöµ–K"¾K.tXãzz^¹-k©ÿ0Å÷Aü5=~±Èaœ+ˆÅ¦7׋Çú¨ø–~“l¶K3¼ü|7ù2Ù¯ ¾cnåÜMŽÍSÀÆŽióŽw¶O³…ùýönŸþbûiÿÐ0¬N><œ»ÈÔê!CvT%;ðyóéót² ª¸Ì¦>O‘çædB½¼h­±Râ 9l˜2Îs¦-é-„݆]o^ŸÉO/›QÇe.óüÒœ–ùï1)ò;ïÑËfAÝäoï&Û¢mw©g.TÒE¢ÚMÔ(çŽ/Þ.|źÁ*=ôÓ^|ñ£lRïVÓÉÁˆ;2ðîÚÊ£"£úšðûꉭ¶“àž9y|gÎŒÒr´mvYÜÑ餘k²³‡tGÇ•qHŽ'ñ6#xæJ‘Ç’¨óy–¶Â–ù"Ûé¤ã3¼Ó© ¤mrýÃíÚ¯:Iιá×;¢’wíIÃßÌ€áÉÀ;f» MÝ á§$•ða2H†¡LýI//…ÿÀÿ ;>‘í8cžs¯ÈêcJ˜È,Fúwñ`ÎòSŒ%‹[@ji¨*}*yfQ Ý•íPt{W6ÚkÜ3ô|4¿¼¯CøMß*è¬üÉØK¯íjÄDêð« >sáÅå±Sp,'O÷emv4@cs\¤ð êà&3úÖð Ôm#"¶w?^½›¾z…¿’`¿U—øûÌXAùy(®W¿ª¶»ù$2]9›ùª6ª¡s‰p:?cñro«8l¾¹ŸTf8"f€Úv‡.T¹꺒—OZ«ò\? Þvé`û`XL ùa,O­"Hƒk¤m)©ZÅÄ4üAªŸH#OL_ÁïåTíOøY¯‘¨Uâ›F&ƒÑMùFÚì¾#þ{›izäeSâF<>äÓöÑ X%'dîïÒ(+Ö‹mÁ.™cÔžï÷x%Ýï ]KµžŠtPÁí¿Ë,à¬;=€*n~¼û&mN¢Jã‚;‹Ñív×y)¤Û­ XÃ\ÚÆC¢üЭR—C‹ItÖ§ã÷͡ʆԡ¼b‚h-±_2ΫP˜˜W!cÅHÞd^¦ö¡¤!P!›K8AÅú.Ⱦj3zx«×ñÊýèõšÕ’r±IiYV´Àâp}ó®âç¿1sÕô€_ëù˜Vð1­àcZÁo!­€-ÝæRѵ %׬œ$T#Æ[¯;7&gÛœ¼jô3éi†óåC;x:É—Ò;/nÎç~Ò*OªX2FäËA¾hú —ãÊøHKY»¾ÇÔaš{)º!ŠA),–;»*æœg%¦ˆVB<òýñ°M\?©- À½$›ŒµŒàÌb[]Þs7Ö™÷ ¡": ÒJ¡Ž8ȅØffX©òõf‚È\"læˆWæ§nó ã,«e:©WvÄ3Q饵ΪõØ©øå4‰)Ã|89€Þò¨ªÜÕ/ÂnŽ—;Í2Ãð†t‡Ô+Ö% 0•èÜnæ µ­];„îUÃp­†õ¾‘¢' :ˆõ0‰_Ž·à–Tá˜ß j[LóŸ‡T;ž®a€[šÙUÐÅ«żAÞƒ‚…@æ ¶Ò„RU¥<2pÚ³l+ø’¨âØ™@{miíÔŒ©Q\“rÖøE®Tl[¢JOÍ—sê…GγU`È€…  @JmŪÜx€¶ü·ë&e7xÞŒ‰«O•“"ZÒ8ÜÃÇæëÊA¦~k¡©ÞùóÛ^CƒÓÉû"Ø0ð>3ëÒØ-±ÖÇnsÒE\çQ¸Õ?Ø´Æuž·»ùV‡M »a/¢Lܱ×6÷aÓ]MO|Šqßbt“»«Ælª« p0¾8˸p š£ Ï mÁ/—Ó7CûŒYMu ̼4:4þ,çáÊ,KïÍðtdc}lT<…‰Vܑ߱À-Í•=VèöŸã丷4ËÐ\ ò¥6c-î„“N¸ ±ìB‚¯¨mÌøPþí#Z‚íćÚ0ÉŽ¬ûE3/{ÍýyŒEX?“Õ©zvW7Ìêkšðº)‚¼›äº­ Ïï*;£VÞ8™Úiƒ´\hyd¿ƒxÜåÎŽýe×þ*fÁý˜Iò´zÅ…½—bC¶Ê…÷äÂå / /'—:ÓU/”‡¨Š:“¼"P€úínÌú¶í®.B””…Š• >¡-)Ók蜭*"ö=û×zúäÙƒÂÇvÊlÑ£ªf;G¯ˆOz7æoužÖ¤÷| ꆎHpt -zBïeò©çÿñ^ÉfçyY°{–ÿѵVJ|-:ZJܪÉNèÌ~¿ã6¶éoâó¬*›ø‰îô“äe€¥}Æ Y»¿mÏy¶ª°Göc ”(4„©ñ˜©6-Fdy ÜíñA³úZîf4T$2o˜:ì(쀶zªàÆýϹ¬öÑHiO•‚˜…k‚€²dþˆ&Ü ±à&—l¨«‚]–äe9X«…جUÍ• í•Õ.|Ô3›°`”=Ã쪮("Ö¦µ¦M50ÇV-Pi䪔ßý»Qñÿ`YJä ÓÖ1’‡?zöüÑóu¢%!£eãp—w^ÍŠ‹¬"Ô½½½ý`4ªYÈ•…œ#gƒV‡×ö€>GýꎃiuuMð0l¡Oß̬6¢Ìãoõ†47h mt7y7áˆð2êÛgÜÁ` ¾a³O¨Tð_†7w Æ—ê4¤8T©è÷Ì7 äm‹Yª~z·ÝKO!/h50²o4¾ ¾Ø7¿£&´iŠŽXSêµ( h½‰º|¢!œBM6 û**a%Zªç…­¬1 ´ú 5´n²Mìßûôà^\vç`­µRç ø–¯8¡üUÓ†­›ŠÑ`Ì+òžŸ:áZ%ƒƒÇÍ©´°æµÝä¾¾\{³®Õr™ì<u x»Qðvv!¨ÍȆ6&PFÀç€.›–yUǸ‘”u…¯[§e‰‡:` õÁ=À¾fÇj#vvㆄ›÷,3Ò±‚C¢°S[3uñ`šßÂ\*”åþ f†C…O<Èð¸Ú ˆ%byÊðFLìG‡"@敾/y|MWYóèVûøÍ/Ä.lx(m®NM Ç\\pòùßkS‹+åð§,ÿÔùöéST3!¬ž ÒŸ!ôŸôçtÚ ¥ëìÿꆙÔѽÜÝØÌkM^o—7öƒÂÃkßÚ“JjŸ]ÉåB†Vµ—µT‘ï/IF?dë5ŒÇˆÁ€¾~Ó×Âæ‡'þ›htÀŸˆQlìÐW-†)C’zoݵŚEýݶÆbÍ·íƒ'ð Ñ3ð‹ï¾oöþ—Xõæ:Í©®íþÚýà«g†^vâØ@ ƒåh“ÛØ›†çáE:"붤›!¥¥D˜|I«N>07T^òJ•Íû5z±T‹ë›ùQÕú·mÀ¯â o‚;lÄ!6x=—Ä•r-7θŒ;$¢öï ¤檘 n–Q9 {kˆ XÛðÿàË¿º¼""¢ ×n2DZþÀÿíÓvÄÝÝ^Ïwö°Tƒ|8Uebx˜åsZ”SܵÃy“¤@i^1îm:^pÙo±r%e¬)!,Lúà•7¯¨!¯ýr÷£¬ùµ:‹×È`.þ‘¿)Üú*ÓÆLz¤åC.]õZ)ÝNkc˜ÕÓr ¥¹í×”:Î-©T/%ô¬»fJä5€ŸUY‰-GéìÔ\àX“ËdA9‰F”¤•ìRn§·5: ¡ý´‚&4 9WÚˆA-Ã=°·’×3V²R]òñŒóÀC-Gc4y¿Í0ÁÉëé$qXjÏŠjqZfÏÿëc” §Ô ­ë×={3DbÈãœÂ v5ôuI\•%!¥8/ Êêi64 È«©š./ÌvêØôêdšÎÌ ëû«ËpßðXï6N%d›IÕ».©æ_ O?S)¶ ’1ø0¢ "ˆHæÐø”ãÊôá”:³E7ÅE†ÝëF•Î1µùm–ˆ´¨±›u '›‚Ãû´H'îN™_º¸o¥a×Js8©Ñ ³ØexîÉNaGVé‚Qƒ:0?ËGŠ^Ær„³a¶›Àq f¶D´ƒ†Å6‘/]¨¾V¿²]“Ñ—Ç_.Ë øGØïÀ<çs®Ö‚Ò†SìLkØÚ8+K—î>ÉP>@æÉИæ`°ÿ·ï‡7ž²…9ãc«Ç¤”å3ìê*ΊÒ áuÝ¢üy³[£Ïf.ܯª$¼oÌ_Åöp|—ÌW¡ÈÀ¼P,‰›)!ÔÈp±Ä¤g¬"eQVÙ!>æŒN3=šôžæ§g êêeÈÄìä´ËÍ"KêÖIÐ$ðùØèr²è[÷+e°ûl­Ž+æ¥Îw}¨@—2!î\îqLW{‡1Ì+Øs8’¾r;:«_H³+ÝÃý®}»„úTA9¦mDRŽ>°¨€»ò™Å~ƒñPø1K/T'¨f!ðvz"§ YvUã{ðåœjïö‘Pèaõˆh8‡naöÚ©³¤˜á!—┉š 0sõ£þe¦³c¶'hË"õns\Þ¼ÙâÚHÎç#i>¦ÂüRa`ôãã  fÛaѲ¿Q`ê?縶Ò*§£^#TÙN áõZïìüÁ ”‰€j4-#Á ù§Ò¼ó|¹ÀiDN~}éß“¤¢G¶A¼ëßy:²îÜZ”O ;éBc4ò•JÑN‰ˆ‹¯°¦1‰hU¿œÄdôa4 +füCÔ ã#Q®x”‘¢àBTó+6¼­“¬(£ ”Åòô Å0è$ØA_­ÎÀql‡ÃPÓ1êý(KmÂ& Ø.uÓIO `gëpæ†WÍ 6ô[è\¨Sì]³¸Ý+)ˆªÄ5çÚQéÙD´r&ðps¢UÊØðC5¡Ýä ü‘ew5²™«ACz’$»Äàà¦èôóŠ–×ÏZÇYkVö/?@K¦…ÑŽô|–ÍfAÝUQëÚ-ƒ˜£ —Z!™Ùœì 5Ò ½Êœ5 V~•ó©ÙVÅ12&hC–ÁÈèÉz6Ú^G³’˜QRM@±†Ø.·MßÌ9èäÌèö¢ ˆ0µæŠ®$E@|¨ñ6žž¯(ìG¯§ïɉÙÍÉô²O{›œÈ’¥K³ûB×-ˆuª˜Cä¬S0E+–ü, 3dœ¿F†0ÑX‡ßFçP.¯ULfºÄ¿èõcÂ><¶ËU Þ3ÕÿµªŠõ‰¡´}fÇÿç5Sm®_ÚM:þÑ­. W/í;ø²û[(ýhå½ßV^Çljy×ïw¶Vþ u-84>´´ ¤6B}fPóW§Ç(Ð×Õ{bŒúìÁ¨={«G©mòGÓ÷ý0}­ÛPZŸ›CßD3Ä1€4ù£ýüaÛÏ›¦I¯6û®!“Þ„±·ŠýÜTX{Öx¨¸gLºàmeGU…šl«!£ÞóÒV•VÁ}G‹ÊEì`¢˜K~¸™Wš °¡Öɤ(¨”"¢¶YÅeöW³tީݨCŠœ>ˆq"˜¡É‚,q+Åk•ÚþÜ}(F ¿ztÕÎ#jŽ˜tV%y50'#(6¼+ÌÚÝÝ«u>/^™¯F C¢ûíÏHûRÑwŒ{nbíeÉiO¬6³Lªåxœ¿–j F|æ&Çzj‹³lmüj¥Û\ñè<-»DVÿæšyl€4Šzȇ…5Š­ÖDµ»± Þè¶ðj§¡H›gú&ÁHAÒ—ÅC].gU kÞ¨ÿè6bÝ Ÿ7ŒwE+$» k‚¢uÜp/²-dÝö<›5lbÊsÓ}âc(ÌkCdh„” ýØW?ô»GøTmß.Gð 5o¶Û\2[NYéfaö6Ôc{ŒÞK=³‘F+ÝÆúáûǜڦu“ÂõÊI#“ 0= ‹})»õï£T¿ dc¾ ÇjóëDªàœÉ¯}K¨äÜÈí¼ÑOãÂùG¯…€»ÚQýŒ)ê|È‚g‹Åüð–Äó¼OÒ¸_”§·È?}«+Ôæþ«äܱŸ0õI°)Ôñb1—ƃ} –ð`óè·^ã&â¼îRA™À(¼l8ùèIO†ý¹œÂì’‚eä¾46˜ù!â>L.“ütV”¸ÆGžUaËÃÛ/JƒvÕn^àõ›ÞœÄ`|®ì õÆ‚y`Es’/†ÆÊ§tͬ»Ó%CBa´Þj¼þ‰²dCEå¤xŒno%iš.†g*(9IõI‡ÚÂ[Ë'bs©ƒËWei9<áò8¹ìEçë‘EäÛÐk5dƒ¢€z<¸bK$ d¸f%¶±™÷ýZÎ1 ' tÈáÕ(ÔïX9Í8Dª c°nÏ2jHb¹vì§ËÉ‚{ ÉGY{âSp(ó÷n ¡üX±…'ªÊ¾Á~ùK¡¾’æÛ´­Ù¦ŠªQ¼LÓùöÞhÙ1œ€à9“ᬩ¶ÿU6±fŸ8¡ûê°GšPiÓbíܑǬR-1è ·yêsÁíâëÀg*7T €%'¢î9<Œº'!Ñ/¸›ÓÙ—ÑßÊ0g_ÆF© sxhîøwØ£ö›lÌÝj«ŸÀøÛßÿ{Ë<þô›GO½8þö‡'_«_€EÊÜ9/ò‘ÖçÿèØÏ.Ÿ$>™þ!øÕÔ|úÙåñ#3Úw¨|ÞÞÙæa·»2Án²½½ûoÍcH»yñãÓÚ—h”ã£}sgjÞ6üÁŒöìòÑlqümYL³Óƒ]̬Þ|}“áþ·ß<üîi8¨áIš.ÿWC Nô ×¶ØÑ¬œ5‡åÀ»>Å$€æ†Bö¾ÃÓëZRöšjx‚ÉΠëÊ‚ÀPá¡)8~en"A*u˜ì XÐËÀ˜ÿ¢%^Bh ~'âÌåäcy"ß²1¯J˜i‹éR´c²Iaï~-¢¼=¤^cV)¿Zͤ `Ô5 DAüµÄ4¶b«e;]øÓ˜WK¶£ÓEs4ýMv¯göºêú¡=. ¶S‰;Œ¿ÇF‹%¬\ðSç}O„ð-ž~o¿‡À zËYþº—ßûì^€ˆëÝ){”ØW±ZJúŠW=­ôWs h2pד¨”Æcÿ§Í™É}\ÿ›T;0¢¿êÅz{ýÐàçÚ'n7<ÁcØ,¶½ö‘bás+¶¾É/¼œCƒ—P–^î †©>pü–£áŽEÜ4¡w€#LÑWÿâl1|ùÅ ]È49+³ñÑö ?åV;²ýeôÇ_ÜJÍXeÃH±a®8ÆAdŒƒ Ǹãö†cÜÆVãýtõ8Ž4û?çsÅÿÙcìGÆØßpŒƒalþiû8êjÐÁÖ}ûnøöÝÕoãÜþ’þÛþ,ê¸>7Øþ2òÃø(·ð¾|q /Oƒî¢„„Ã3¬“ƒ°*±ÌtÚÝITþ½¾‚²€\wBzh±>2îåŠß5_YŠhŒ=SããW]–½ð”ç^0O^EºÖ> N?BÙNRá‘!FɵEòm:©hEˆIh˜ˆZéoJìdž ½:ÂMÚ\-0_Y¶ë¿ üÀÕZJ0LixÂ^L涉¸ìÉ8I`F~2‡Ð¥ t´B·ù-+§w×UNÍDX{üîz4 ‚Äziı ‰íF’L–”è²ÐϰgûÛÂz_UM1¿ó‰²É:˜™õ¦A’;ûCaHcTŸgíaìs¬Ir8!‚AOÃfžÊ.«6S78EŒ(®_G›¤êƒV1ÒÏaCÖÁ´8Ïð B5Úý­ ¡#pZîðŽ:†¤;»oÀm½æWáë…Ÿsp%g“bîÛº7\¦YúŒë±]Icn‰üø.‘®.qU¦ä²”}_Ô>åfׄšÓ|†OnyUƀΩÛkN›b¿kÇ~á+a\ÐEoð×<ƒ®´,˜\rÒÚ¶Ìq›b†¾ç:¶º5V†Žq•ÚK'J:·é!Û\¥ÊE·ˆ°×±=ü Ñd°íGq,QÜ,N캥4u¼íž'~¶¦s|÷àÉ£o>Ñôó~€=`~õýÃß|÷0ü©e„¿ú]ωZ¿ÎªÛó¦ò7b²Ïª7/ùüsi’B ÀØyÕ‰³6T¤ôPî9íÞ¸39D³¦ýÍx@·Tn~ýð͉ǀîdk"¥^‚„㨥~§*®©ÎsˆþZ®ò.oÕˆyêzWmäïxbš^¬Šõ/é7œ™•ø¾–U ÂpM´Ùµ‚FhZÕ dŒY3 HEÂÉHéRe)¼(©bP—'<óEgähI“Eê4œº&Îù×3÷}; æ«?°ãÓ‚ÃÐs 1OÝRÄ=_-²y=`=^Nê¾/öyÑlÜa0£‹e rÖÔì–.Öm“, ¯T†Üài¶) ?ÚàH «ŽÓÉEzYyq•y™cã&ö*ô)©Éí·ÊÝ'i¯òÄ K3êz3âþ23ý¦ÊC±#Ä–™Ù”#€ŸqçWÂôÚô]ˈ¬+¯8ó€‰ÅLáYJ„ äKdß’µF¸°ø0®Ø´ T9f–NfX’ªÁ·jê`kILÇ¡ýâ¿#‘ÒfiçÀþïnS ØÌ‡–Ñ(Ûòú­Yº)ê©j¾ôëÄ'42æUF™>5BÊ¡¨][™Q¶4ƒ<=¨˜÷¼\Ù 56TM lØÓ²Œ Öôi‡àóÁüñá‹ä`o/¹}Øý=‰„³hò¤‡ ­Ž¢]»Ôüèh¯ MzÏXé ³—ãåìç|Á ¶»Xûé½;Ф††ùÞŠ¿FÄ·ÂõùÉ¥–áüØÔo×uìϽaã‚hØÆ>ØÕ‘€'Å‚ú‹£v‘¡ŸAÓÛtÞ+è±ëys7óÁÆ—àÕùŠWá‘èXs\XÏ_z£÷k'ÚóÖ¿&/êsu:»± â ›®Ó¡t®lvp8ÿ4¨<"ŸUå9³€RÔ’ÜÛsCë¨s°†µˆ*‹r¡ÎÜà¸Ý¸6«¢¶¢7f?ty+´Ï¢eÑ(KÆAžñD‡|œŒûfèrQÁáìtì¢: o¨ô^äx÷†Sߌ»ëW"_ߦV¦ HÄùTx¼¯f!ÕCø‹ÿº„ЫU,³Òoò ‡uŽy¥ £ï†¯s$×îÍðŒHše¼ÿà7N"2Ž´ýÕQíOëë4*‹¹!s@c›¤C,¼ÃkÿÜá×Q  ÓfûlY}g¯3‰!ž¬?OÉk T Á\{À^Ïi''’•ÆqÕ“#\ ñ²“—‚é‡ ÇÿÏ Uj'úíD©rÑMåôôÙȳÏ')ภú»zàx?ÌŠM+§þç6_® ±7Ž%«yØ|vr )/ò¡@?)d£Ô(!iI)¥•7WC&Vª(1áž ø"+ÇRaBU2†š6€Ûƒš£-òƒ01‰!Z³?ÅÈ"¿1ì}[O}‡QÔ¬¹>¶ ñ G%¶˜¨C‘”ô·Ô¤:UäOæEçüÄûS ümÉÎÁÞþ^þow£›ÝcÕg¥šé(]¤IgYN:ˆ+êãüˆ‹²‹É¹ÎÅÑÁZ³e€w`î™edÑ=è%¾{öôûž¼8Ä<ÐQ¨ààŽ{‘–£Êò‡|’/.]´ÑÀÀ:Ç–>Zv!íË9Ûæ¢Hù©UïPHÀŽí!¹Àßö£V½ä+¡`N¹®´1Úlºɪ!…0j* öHžŸ]Òé°´ëY”‘/é;±T€¾»ÈQ2Ã]ÌÖÏ2ˆƒ!4-*³h…xã¡…¡†Œ®Y×Ô(Ê•K½$t3×q~º,Ùƒϰ ¤ ªÍ=¢åæ·ÜI@Ìgv ¾þ±Iõ@, rƒY¾9êœ(:À i‚ãtÈœÇò! AGwJšxÚ›Pê‘¡„‘^FªpÔ”uœx•Âêõ‹”ê¤ÍÀð©!¹õ£À·$ »`t?Ã#_¡ãŒbŸÅÞg½ƒÏ7cO² iˆó÷’ç,c€}‘. 9è=œ­Øã±FJ(žI`ÇQ¡A/ã2c.9¡FŒ 0áâñ-ÉÞ dæ-z8bAfÑÑOQ-œµBˆT¬—’ ¸‰B{晢Ínù×TŒ_/B°-ÂìÀçf(ta¶äÐ…~ŒÄ篳Qt»AÜTFÞ ÌcýI iótÔŸe üÑ­{·ï~¾—‚QÜÅ>¹ÐÓ„ºÀ­¶úð€T›Oòa¾_[”©øu©ñ à?äŒd–LŠZ¾Žæ®gv$zû†ITk 6=–™ÏAÿ~”×|ué ?¾Ò•ñè7P÷À£Í0G§ÚCU>¸ÅWVke†× ¤B~$2zpiUõÑõzÀÞ狞…we²'»dµ>A¦rÐT a’k‘^¬zÂé2é<žÓtd{ZŒ—pnQôä鋇‡‰`( sú˜‰Q§*2^\¤Œ®êð@Lì›@§eß§ÿ9<+ 6!¸ò/=Oó Z‰ ™Š7¿2;5 Ì®»ù½³‘Ô7¢ çQ}Qø‚ò¡@ÍÔ»ÖšñÄ'øI†gFe'ZëÑxGx‡žÓSвä'!Œ—æ.Å ÂiûBñvŒ^»µ¥ þ}Kݨß10ó£ùXúÛM<ä®9Ûpºˆ2äíd’Wœ‰1K €³7¢ê1¨{øPܬ»›3^â‚«žžM È0üÅ2ÎO {}gŠ5! Y¦%eJâüe¥¯aÎ$y+­„>>­Î‹rÞJâP ü»+!ÙŠçÛ»‚7Sf²ñŸœl تlìÄb›5HØ4{wV¨*1I@l+W…¹6b’Ç…z†Ì«,›ó'÷­í€vÂÙ²ôÚ¸€WÌRì 9@àøðè£ÍƒÕí_gu ç¾m“sFT+“8¬Šá«ÌÈ„| 4Æ^ª`)ü%úÕ²Àa²ò$:"ãÖH`HiüSxIú¹ìœ†G¥»Q¡j/}ŽåA3íƒð‚(ÿŸ]ÎÃ3òÍ™]ì 9)€³Tüng7y`¦›Uf€³lø ¥ðÎÁn RÖµ‡‡ô޹$b¢x½c} s¥»áUJ<«Ö3Y¿&Ï‘×Qè;Ø™ùT%tÙºßúÒBkYßiæUMRh̓ËpµÜNÊ0V³œ‚ÈTþ°X Ù22T.Í.li\>@Õ‡+üÁúô꽫­šnºuFFÕo–˜f¼€y¦ ©5¬ZƒÁ¾;:½ô•7á ª?Um«ØM,[„Û6Ö™nó¢ºÁ’I éh^HÐ}¨êvöCå»eãžØ· NºÃ4xU¬þh“0úgÐÊLÑ*ˆÉÐ誯ÆJÒì5I9iñæ4v¬I±ûú ¶ 9Ë/dz9¾1¤ ·zŠØ(ØxÞM)Àó/J t²”¿ÊßÛÛ*ƒå)—ã´j(™Êê)eÓw½3¤.Íd˜•,gWøƒŠè×ûÍòA_ýîÕx–ç|s\«é^ °!H(¡¨øæõX/y¼ÜõÚAT8؉ۖÚ»kµ”]…Þ©°t1:—˜nZ­ K®¡'ƒ<0=T:¥—]”ñ%AŸwÅF½ S¸£ÊšŒÞÉOPîËÚa1Øñkä/°¿A†äÏÇšØýßzNÀ4š&P¹M† Š:’³uQØŸÀÖÐÈ÷o{" «x0#÷þÌ®SÅW3_êN@è>ˆj¡ Ñ¶÷T]2ÝôÍ †E£Mê¸?÷ý£¶!æaÀ01[)ëŸö»ÎÑ=Ÿî6Lñë¢äþ_gP.¾¥ŠWë5‘Å*è 0[ »‘ô¯#Swž?mNÅ.;XPñÃ`9[,E T»~«¥ÿ‘b·>5¿¿ugïîÁgmÆtj >/vg½Ó*PˆÂ=°4t8͆íä8ÎU u’ì|/>—ÞŸà}Fõõ4ÒóØ¥[ ïR-¾^ Ô+…ã¹wq(ëGºÓ5ÿsþç©ýû°­Û‚Ô˜Ký]Ü!ØJ=¡ÈPj¾²ÊPža¡ Fq—:24·ë=¶Å CCî³Í8¯{º†ôʲ(}G·´±Û¤Æä3Ýœ]Øo}€Çw6¼üCÔÍE÷yÒ›/K@¾4c‹ÃÀH÷†5‡ÍûBšµÑ™“”½ynì¥QϺ*Ž’1áÍ dW´§IÍ ´C Ó¦(@éýj<Š3AŠ€sC̰B†;¶áK ªÉÿ‰ŒùÏú˜uÇPÕL¢‚RÀõRÀ·ì¨½ósêã©úÞ#éÌVбõ¡enШ™k3~OgA¦„7l„±•ÈVwfÊæn‹Ôàó •~~r«Æ¶1šæFd>Í¿¤ñ ÿg1š£ÌIóÖÚÀ' %óѦ.¹…Ç5­™@È—yê²eÎxÖ|pQe“1#X/`h ^ªix*%:Ýó™‹ÀÀ‰RàüÞªº¢i=`l 1¢¾¸#ÛBÐÛ*ëkFñQûu#›™Ëj_Ó+{èV¸ÿŸ±+àƒs±Ó¥cÙjrîòÇÄ|T˜“èÚb*ËÙ(Ì‘H­éÔ¼dÈXzµÄ‹µhæë6¹–vöö>ïíôö÷6 ^¬Å˜À LqÎl¤e¿‡¤Tw\ìéϪŮMΞf2ž‡KƒI1 QÄNK,ÙÞv²BÃñš¯õuB¼š¤Dè8¶†¡zù¨Q€e5åThÏY,S"f|䘸œ‚’<Rlƒz6x ^®  ¹äabÁwKjíí~ooó¸ûëZÜuiTëü ÐÔµ>Lß ²¥4"V¹tP+’JÓæ]A§Ôv =k šN‡Á4IØf0˜†L°ë9¦sp¡(|w{+­Nê „ùn7CzŠÊÄ1ƒ³Ê ýÍ¡¥:Ó=Äá0Ícj¶(PlºA[WÔŸö÷šd½5À<{„K^Âíþî›»´ÕrJñ53+îÈ{Q¢·íޏÑ>“,„Rï¿’ef}”´ß³é@ˆ˜ŸaÛ¸ƒ%Åšò56§+èI~äô°¼½]«ôécÝé‘?`”?ÕÒT=ëÎzÌS}ÓÃÞ»sïvl O#³­MTÏ nkOZ®Y6‰´7¾ß–šá bTkËlb®Ò9ÈÛÅYÅô äŒÌ…9x2¾ºœŠ c`AV-¹”I½q—’r5°"7îp³lKRðܼ<'*oòòãc ýãcÌEh׹Ї½0IKkõ¦B+áþ2›çQ_§=uîmše÷#‚§ ÏuAÄ*0ºÒ"_H¡«´w,¦9CÜpÑ€<e FŠMë½6~ êUòÏ;ÅßwûW™X­í$Ñ_:á ízæ-;<æ)P 7lè‰%ÌẔޅ]—@ÓÒòŽè” 2üºµ7v™ ŸoXúMÁrÌ"¾»‡|Ü<ŒnyªÇ4–E›oU,#{7±ÿpd‡Åôóù„òʆáE•]ö(Åiž-ÜÌ©àJ•ýD9DýV&­ je¤ÏV‘¤÷®5cô²NÁkŸ-† Ȇ¤xÅËlÉ=(8þÔk¯ÙÈÿN¥ÏvJˆj1 áWÏÏV%;ö&ÐãB®mlº‰ïªÃGº½Ï·µxÜ?Øô>qÅÒ ´…2›ŒºYQË@±I©ðÓªì’&g™Í½ÞÁæ“£LÖ‚,ZààIÌgc3íE¹6$Þô°˜ÜÓ>ÁóvFª·ì‰/ÄV‚ù)¹®¶OêÚÌ]0¹öfɤÁFçâÇ'X§tj`”Øæ˜ùBÎNnR@t^ÖmCÎá/ÍÂ+²F)"FɾKö>Õa½ ½\P]t͉}ÓüôŒ –å€å£ÒÀuoɼ;•7e÷¬q2Hzf¯8Ϻ#¢ÃÊ”G£>Äà´Å6®•,ézêþÃ>.it:˜ÐguzèZcÉ”Ò.Xü‚G9&þï=­ß6f%k…íõŠ(cþÛvTèZ†ÙOª×iòæ~ÿÀñƒÛ½ýÏ6½rßgtŒw_eØ iÂ… 3m…)£03ÆHæµ§æuÓy‰ ð DY—ÍQ? ¢xz‡¶[Œ][ªòÚYvÙ)ùÎGåàˆ¨¥Ý»ÚÒ¬ÓûžÄûæ0è®Úßݶ¿k¦^sÁ’s”ETi ?„ˆ§»¯u)ž€änHnæ3Ho#ƒÇZ¶pƒ¸rŒ™}‚xE#1OgŸ`ޱM$î?ƒv-˜šcvæ:ZAk.èæß‡Fíñ7N³ÃJ§ÙMò”wÜR3€¦¿£¼ H«+ªNÔþåÀþ-j}ùŸÕ’ÓÅ ¹ŒŠãùå°£ª“g¢7`Ÿk²ˆÁ6@ýU!Z’—7nñ[‰DÚ*:ƒã<ìáë9/g怱èV€†º¥Sþ:é&Fmªsa¿’CgGIŸšs÷$/©ºõIl¢_3iÕý3 q\Ê¿Œt>3ÕŸŽîÒÜs(v¹ ì4ñóWáaªzGZ^YZ"mFNó]üzˆï «:te¡è¤±?2#Ú ¹þÅ?ãÚÝ…‚³SFà@éANCW…ftÕ2K_Áê&;Y|Ìi Û ›Èeä[6ã—Å+hox9ÅŠÒÇâH~wpwïîýÛ»qGBå³ÞÞýÞÁÆB¥¶8\ÖHWƳ` “í7mµk“Ó‘HL¢ádÍdÕòÓæòÙ†TŒµnŒ5fóíÿ~‹i ïÞ¶ç6lÿó«J;›O-­±„Ùʼ9§Ó{µLsðm\=†ñ=l‰iþÛƒ4’¦„^ò¢4œr¬*ÎC¿ ¦VÙ~ljÔet¯›Ê?æÔÀ´^ýazh—òp%= ãßHKߨ ïÄ[4¿,ØSHÞS§ÑT$à 3‰$4ž.C ׌R¸Ø³JØl=ên¦âŠ {(g哽×Û¿½éÉ~—ΖÈï dJ!z¨®/[ØJW£‡¸‰ïîàöçûÜò4§¶ª´ñ šÝèz4¸}ª†ÏNè°.O«óYÿg# Q˜êOMöêVUÍOmpÕþňàÿl}ôÙýÛ{Ÿÿoå¾ùËÁýûÿ[y@?ñ¢¼)¶Ä6‡N¾-Ôß-®9f£ï@ƒÖÅÏ»ÍF…¬Öné§GIïÈp>#D_-8#ìµE< ¶S¯e‹À[ 6ðËIÅS‚j/ˆÁšÀ&¬scÝ+-ŸfÀµÇ¥`&K—‰„ácìd ëšxøƒjÀ‡OKÔ¯ëV+Åsœ.ÛåÎ!í;–2¿ÁÕ`Y8³6&=?ŽÞ‘³Â¨‘¸×­g·,ÑÒ„3¤tŠííí =~rèÒ®ñÆBÿõ3\µ&ly%é ¬iVvÌ@àß¡¼8(¶26=ÌYU绂Þú͛ㆧ é ùOeˆ7T†*ú°R‹‰²ý@¬­³"[çÛ23z…aÙæ³“T#Ü8ów ¦]:iàB{ƒÛD÷Wg·D'Ù aÒØªªm>“Sòý0çÙ¼f‰eÇ]#ö•C3q±#[~¡Ò_Ú†çÙ *Ùˆ|/èR°ž ù†—bGå ¯ª¾+åÔŠ3°¹Uí e€£96,¦^¦žyýäOæ×æ²A‰ hMà¡Ò¾ ÚŽä‡ïW~îb¸¦ z³èž‚V# ¯±Ü¾Æ_ü®riLÚŽ”"&ß1ekœtþPKqå:ôÄ)¸zÿ(å â í2Š1ç·ï2“"C`qi¦>aµ‰ùFêÁvةû3Ò)ºIÆagI9B@&’¹\Þ"ƒŠhä†Å6cÿîþg{‡É7Ò‹‰Ó[mуîGÆžD@BW`U/#×0¶­`J½äÏY9€m ®1Kg§mniNñ}£kâ=?úbÞpÒ/d z0‚„£té–Lwÿ=·-Ønuæu÷ï‹Á©¬ KBÖüh™æ˜LTæ§ùˆ;ê!ÁÑ<äÃ_³9}CÒ¤©ÍVÑ…CoÏ({Ý ˆ! [5,æ>×’n~MPã‡•ŠŽñ \‰.Ï–x¤äýžç©·a(Ø&Dì’ärd$ÌN-‰ÌÀ#?üBüE½$5¨Ì´½®„ª›O¼YN„ ½·)'¿fˆÏœŽ°² F„‚ ²1!t{C”8Ÿ…Ð::Íe8ʇúDì}•úÆ>M[#ð:°jø´ ho†þ¬-4ÐÁçïꀰ· íD½GmŠcõc!vì•T«´ÇG€WÔ_0²g‘WØW徺ŽL¾"â^ôZå ÌuÇ¥âl9,#ÁÚg/ ó/óço;Ó$º‡ÉÏÞwrÚâ½_[”Š9ú°¼WÍ'WæPBˆÇ4±Ø’á)]Ù–&P›d¦¤!§E6eV“mTu¶›®E@ÿzÑ‹B@j8˜Ý†ŽV‡°†Â€Ò†µg†KŸsæ¥7º‰W) ÁÂ…ýà`F-5§ü£yV"’âlè×÷¥ä>8[š_Ù"Â&&Hpô®ûÒ'[ŸT—t†èT†š!BkÔåÜ(ƒž:t)”UD@pOÍqi[ts´¯t0q%·væ_|¹]õ˜Ñ:ȼ¹ÑdÅF{óìòÙ#·%7¢Ü»sgÿ0y ½!áúòaBݪ|»Ä&ÄRX•cŽ,÷3a¹æÿînÌr7^*ÄP¸¯®p ËGx±xÅQ/]… %¡=$(V1:'Šbpµ™ š5ïÃ}Ù‡{W=Qc:¡Î v¥xêô$Í–êÞÃz°°ršK1Ò^!oX VËl‰PDÍûqÏíÇþçoSK "AC„&FW Øó¨v“Gt¾|‡9$©ádÕÑÚó j@ÜÊ!Å¢*ZeiI©¬­ sIò<Q³ Û^P„“ʧŸ‚9N‹E~އ;½d-*TÚa†éCÌ æ¥€7«–iu–URãˆÓ³…¢-ež“¹½÷Ù]GÏ^ç ‡MøsVäCƒ ÊÅ’Ó„ƒ?í6bÝÚ¨¿"EÅím!ÚÛoYk&Âй»¾¸Â¦êÜ(pMiЫŒÊöüh4JëëN ÿHH „LbMr_Ãmë»ngmu¤ wYvàƒj<׬r8ù¢±‰’vó’~^4.c|i0±¥2Ì!‰ ¯öŠ©‰÷÷oß½}×åÑBâ$óˆx6 ÍºÅ‘‰”³¾V¢lz”yÿZ*“ú"õ»ï¦jl³Ý¨£1UÆ»;púëçüßmv&€ )… èã +“r&©—Ô[Q¤‘wÀµù†ä; ëqä«~Üv†E/a¸ÒÁàø&,õ³ Êl†¼Gù|P$ ùÃiÉic£Ú¢7…èâ”÷7+ãËŒÏîG©Â!gºK«/¸Õç^KäÇ×fgQoÓt帤N³Rï11ļúš7 ¨Ëó #aÕJ½êgê2°òu§ïM%—ØBÄÀà§èŽ}|>˹sD¤˜QNÑÌüÅæŽ1Âc(]ÌCè £sgÂÖ?³Ø¬­–M™ïêØÞʇÿ–0Äß(?¢‘Aì;±wïÝÊB)®|ƃK'8`9ÖWâß{I(uœóF¾‡RÃ})µO‚™Âdا$¬4–‚`³<‰C-9ë óݨÐ#¢Üà(< :æl9ö°nS` |ó×á“AR>«»¡Ëœý9£é7B˜&äNU’(ºï»ì©à`’Úm,é ¯{yþýIÓaÕãk{bX¿Î~´$ƒŒ<ìj䔤“Ø|³oà²à@÷Ÿ[0ýøv<Gþ±}¼±«h¶ÀP‡B]þáPµ@s™ÉƒÑ_–•”`—çNN¨1Dz›Ê¦âYï×™g 8,§ùŒ;lŠyÊÞ 3¤9ëS@§£|ëóóso° #”r/æGÆh¯“¥vË\Š}–Mæ ±šî =Sq.(jºÖ(nff6uà ·÷Ù»H±s¾Mêj!€~š4ªà§Uh‡¶YAªÓ,[xŽ7¡q¬‰÷\p•¦N“3v.xÐêäÎe))ãMécç°ìuJQËÔbÁF¥f¹ËAØ;±ðï<=Uýz,ÿdïWΤ ·Ò'ôÖ'‘R|ò'Ç{Ñ–aid¤•¼B=s %Í'zNq¿üÀX3ß“;f.ÿ„‘†L•œí(0 ‘³¸@§b`£4H$%,Þ‡9ĽŒócÊkÇMoÀÎ2Èì #@ϳâSQ“&jê ?• ¼ ÇPîŽF¤(ƒÔÔÀ¿rjŸH «…4%;h鶴÷a†â;Oü."(Ç÷g®”¿×¢{c-ã¹ Ô„”|(Þ‘1w»±ÆÜášB…×NgÏ:ݤc¡0vcã_ÝÐ}ÆðÐÀÙÖÓ{1é0â|Wsqvi(½ty*p]æ‡ÓFž¿o³‘ö{wÞ|Hø¡íÑå¥+FJüLP–á^”DbZºã”V¨kKN“ƒÞ(?5–ÒŽ·Kj´ÛôÀn a5ïÜgjçÞ¾´”ÒÔ•`@¤Sò‘1þíòL]kJsÕª Öà<{T55›óåd2 ©Iia¯ˆß[>‘R3¯«ÔÖn¦€—Sɽ”c3ÀÓ¤ëõo*!C’0­ a1¬¬=°dGÒʬ³Euî¢A0iŽô,ÃçúÕ9AB|ýççk)]û”“qY÷î¿Õ|Íš¬²]£v¦iû7q-ÞDj¹°¨bnŃ;÷¢¹üÎõm[•:î˜VR­`±eMLJ¦F›ÆámÂãº÷®Ž‹ à¼‹æJ{?tQð—Ôôîajó§â×Z· “¤õ©ö¤JÜ)ºœ íBŸD‘æ¥pw ‰YK®ÉÅèN¯Ñ¶»ÚÝ×â4fo£(72¥µ ºšxj ÄöÉW„Ý›—a üd]Û Á%¤Ø&`ìdp÷áBÅ_3㸫(ñÞ›—Ü?Ìdm'NÑç[å# 1ªœÓ6j0-5ÂCÒ»C~)@|¶;=mÞƒ;jî¾}¼¬í %,¦XoÀ~KPz_»GCè£j˜Í bðFìƒå¢€l¯¡ `»Sâ'¿Œ”Ó¯~‹6>}$n.mÊ®Ë)°¡…Ëê‡Õ¡¦°èƒè"m¯¢vTdªù’ê( éA’ÖH÷Ü¿­¨âÎÛ¤Š?GqSœo<‡¥`¼BáD²âÔMù/ !ªf³!aY¦ôCÜRºSÈÀ$ljŠï 6ÒJ/T‰r^]4XR¿ÞS[¿z¡ø°-QÑó»V¢þÄn°lþ[µL÷œ[48œ„ó )‚ ´zúÝIÔEJ<Ìê¶þ¶m‡ÞüYYV›–ÐOvž3›¹Þ ¼òàâXƒDeûq?B#ˆ/s©Ó´ióÁf—F5‰gNÐC-1WR¸•b˹ȷſ@êã³´4:F¯Z\BVr”ükÊÂïwÆE±[íö“§3Î%¥d¬xH&ˆ9ˆã™ŠF ”.G&éðþ¾\:ñ³Ø\ºý9ý^ Å˜LLßÙÆ(Q§¢ÝöT`bÌ-Ãââþ´o ™‡÷ ¡½ŒaŒ uƒj¡z «¾"€=ÉCîÒÏr˜ü·~«„‹ïR[°Z£“ªkí&heÎoÅó´jóÀb‘¾kz*ðÖ‚¿ïamao5Ò »ÀÖ¥H ”ø¢s\( Dì]Žq}—ö ÓPsèwCwÕÅ·Öv±J¾†æ„ÒÎ=ÒÑ0Dm›û×Ú« )øäÞýû·ï'_ˆI¸®5ˆ¯}yrì¹ïl-/p¹¿.¡cž8Œl8*'£°‹z š û]‹<èVâΞLéG÷Ùýý«,μV[Ü#¥8†ÒTJ}„wPÏÚeL¸ÌÏ'hУq¦·¯6ÓÛµ™RÛEêÚG Ò(b âO%&IØ• È ÃñGƒŽfN‹ …™èµÖÇ—GŽÔ3-š)ÏQþþ[⟑ÀF5l9óë¯) h;/¨3­@"J󂪿Û)¤>Ú¥ÜGÈlÙÌœÝø-'fÀøxêÊélQì éd0w¿§cŠ–8;ÇHwyåUÊñd0"þ‚£cÒúÕGïFâm^ž¤uéõQ"x){‘ý:ÇjŒeô:ŒI?4ë I!§Pü5J?ÐÀUïLy”W6w?*¹5÷†Gp“+)j6êGWEYË‚™[¯ßó/Õìj¥(Âá\}»ÍTñOð¹¥Ü„nóPònì%hs¢´šÔÿpÀmÃ1~.@'±‹±YTÑ»Èá„ë~ ¨@—•áÜ£…0¬×9^ª¶›¼¢`l ïS?Ó¿ÖsH—Nx¹X3˜)»X]M!þ" jÉç_v¬…”àa‰ÒÒ~x ¬l½TaåA× GŠC—ÎG‚R5lƒ6¼âÔŸ;F½w°iSú éð‰¡#èÜ ÏÖ*—ÒïnU ægjûox^ïB»Å^’1)ªxÊUè=kÀÒ«Àf˜só)ÎÃÕtÌÑ8LäÜ]1)ÚU²¤øíÇcÃü2¨•†Ú,`ñVyw ‹‚ÁzþáûÇ ¹`ýUPÅù̉[ [arØÕ Ñ:=¾b»ý0¾qã‰ß¹þ ë‚bÃuÍC"‚íäBT}é¿¿KL=¡þtýæO×”¨lI½ýPÙ`ØÇð³.¤ÙÁ@OÉÛºN5žÁïK²K¯ê¦»·zçèL7V^÷î~æpL h‡ "…6ƒ-Û˜3à$â2æÖ{ã>WuÉ-œ$s 1H€»\28¹Ò nÞ'û¢WsA+%‚aOà‡x%f—éåj{øîê-Um"ÈíR¥VK5V%˜Â-¿ñLÖ-g€#1}EÆ!?ú´À`Ëb1~Ce—`ãÌþÖ&5oéÀñ„v9mDí õ”Fy2^ÌÝovÉ`O–™ÆyÐ `¥[€õP g’(šÚæ¹.2…¯ØÑš»7ÐÎßY½óÊiðÉ'Ùk¨kÄÿä“Oô2§¨ój û–£Í–—øO‘ïÃVˆÏn¡á$»Avœ3¸–)6S}2î¡®¯ní‡PD,Ó-6lï¬k$üh·±GbZXC¤>CIòŒ'2Ø @G¹Lðj±%ʉYí9VŠZCµ+6fhÓ‚e©zik÷pR'm[$²K¦¡/RÙŒ²æ—Ç^÷‚¦V3^bõ@÷qçn4Õ÷ÁŒ—†@’ëvo»›l÷·q¶“í®Uëñ îáSŸ#Í©Á‹ƒ._„‚?O'9™Ê~Ë+õ#¾¾å—FL8dnMPV¡6í¯Q #ݰQ'…0®êFDøŠƒ»pXDAmà'Ÿ—»I}² ‹1€ÓÜ<„ïL,ZAýΚJ‰I˜Ò•m‹ AVÏ¢ôÞzÎÕwg}é¦MÕŠWàÉõZ|¹ÆÌåuu,]/¡òϘЃp”Lzêê–gXÆ´êf7Ó:üPWáøšzt¡ŸålrS®ü "íM¦–«>…f–A€êç«„­RëÎΨm$ÆœÉÀ÷8ˆå5ª˜e b ™\ÚžÉ*ÎY`äG³Ñ+tmÇòD8˜Šz2à*¥XQ/•’_˜™ÜtëØñ‘v0ËZÌ¢OR…7tn¦EUµ”A‚fw¤‘„ÄÍ®QØ¢-ªg[“!LÏU‘g ÛâÆ†Çü¾Cë é5^Eqᯄ%ÝDéùˆa¾L~Qý¹o¸xÖþà“è÷⡜óêÐoºƒ‰âÏ&ér“žü—'O|²õõÒÇF<4SbÔóËQ]rhñCÍô¼gçC£6áOŸ? ebÚæßÿfžNžÑ²ø1ïÕÅÜüÆ<ø¼/.À§øšè!W^›Ç -õF/>Æ.Uà³2ÿ`sù;4Ýú§àºL½ýþÝþôáºu³ßØ3îïíÁ÷ïßÅÿîíß¹ƒÿå?ÿ´¿çî»·ïß¹÷Ÿööoß½{»oc2¿L’BàìÖç =þÓ¯îOüü-‚÷¸„V 7pþ÷šÎÿŽùë}sþû÷îìß=¸o~¿ïî½»ÿ”ì}<ÿ7þ§ß— ›á__Tw†Ý_¾ÜÚ:jü³õˆ»££ûåÿÔ"¹íµ-óµ2½0_‚N~[ @¾”É­/õÿnmýK>M¾5V T‹!gÿº(A1ÀhÈÖ7ÿöÃHÖÇá à´ÉÃí­ÿºìdÕ¢g„È_—^Ã×ÿÜ:©^òÔ({#»\g FœÎOù¦BÁ/'Ü@ºÿòè(( ^3òûÁ\¦Y~ž¥K7ú{4–èð‡œéy6L §QóØ÷¿ücþË?°^vÖ‚6Ëh°³$pÁLÏËÔh$¿ü_ (P·.ÕŽQ°ùÇi ©dÝýò‚<‚ °¥ù/|èÿ^ðdú•ÊÜìkf é/ÿ×Ñ¡Àsk¦>Yb›'èÓJZÊ%tdž¤á20 ÌÏg¿ü~ Ìô–öa¿ü?aÙÆðåYyöË?P¨Î³¥‘ù[þ)øûyÔ~’XCŸ9ŒN¤Œ;㚨v‚?Ã%ãzСÇLagÍ9•à¦6 ŠøÃŒv<<ËáPYkL£efÔ|öPÙÄØ~]v–3y8Á¾‘éd¸œ˜¡ÌÝe*Œá³2£sì“ÿßÿëÿô úOUÐÀÑUIùËœ‚2ÿ­1÷ìöWt¢¸àÓ<¤,í'ÀJ<ÞØ×Dè•W „¡;—ÓÒP5t.ªêñpàñýå?¦¼žm"?³[7 úkËL*ø÷lf–[!qðòWöÊqL£‘*í1žBrÂȶŠ=+Ê2çKÖK€4Ä |!ØŠ“~<ƒºiÀxÅ{¶¤‰ðts‰‘ăá[¯îŸ }CsÊó·”iÎø¢¬¥p0”1p08ªÚ a¤T_$ò˜äÉ_:ÀQäAs>æB–ào˜¢£snþfÈaŒöÝ&ïvýòÒHæ4ø†uͼ)€O7É\ÜM¬«í:>6=„¸PY¼†íúOðCí¯ºþ×¢y)2È´&Ò *9].="4ç“-‘¬‘BºÙQ^ì¢üåÿ¦Cä,µlvž¤Ó_þ1É ÝB-'žeV…têsýåÿ8‡”$àµdÕ•ëΑßÄÒÄÌ‚òØJüpj¸ÉÂ&gà×yØ®s¹(àTON*3Ʊd2œœÐÐ6GE6W98 j@`u²×†A³çY._¸Ýkl× ýäøÌdÕñ3|íFÁôJºÞM·›K)9i’z"%~Õºo‰J…uà6Àbá–,Šå_Ì&TÄ' ý§t»¤.^<@leN½¹¡[ë1„5€ f˜Ó—ß0yø§m=°ÄM‡»Þ5¦IÈìÝ “[à]xféÆžÔ˜7oH , xÀ"ÂÝ¿˜É†ƒ¦¼á ‡3x0DXZ\ñ̰ÇsXx3Ë{êã‰Äß"ªüòu_¶|ÍJvK‘c ·CÎ~z*`ô€-ðË?RúˆzÃjsæ³[œ²$×3-ó_:¨Qѹ«éŸdq²PÙ. ™ó7Wô”«+ðºO –3/ü¥ª|Ý<ìañÃð¥örŸm&¿ÅЈÁ¶ôAk– W'mžÍXÉéPgõršëCâ ÕvÖÊ…zÜåɾÓÓÈ1Z[€Ž‰*’[#mmŸÏw§v9DËëvNè²ÆdUOÒQùÎõwó1êc|X öPŒR(ñÿ%ã­Xbú!¬ œzD÷áÑ'@ãÇ0cUÊÒ¿W;€-²V"t^c®PÐaµåû4“ 7 †ýåÿX@ÛªáÂh§0¥um±^ò3PxÙÈ~:¸ùBäðé‡óI‘óÍ…&ÄE^å áÃÔ'ŽÍNAŠÍ(ó‰6Øs- àI¡q‘ÙÜ`Úì´uÔõÂ| Œ'ü™Q‰!çd‚—ªÆB4x¶°GhŽ_N¬ï̪KÌ®3HšaÒsL0Ï›mZbúë¢X\b¥±[Zhö…óÊ£ËH1´'¸‘‰¦Ã¿.s©Ó„äZþL ¶Õ7@ÐÓ*™‘/ bçýlòåÿúŸbåÀˆ@6œÛ*j¢èÎP)]5i«…†ìk8"µr¨H˃†UdoUZƒA©—±‡)¦K`’SH+&e%ŽáÔÈï‰YƒŬfFôˆYÏÏf ³O*Á‡¿Ëi/0*™Úã¨(x [üƒt|S$9§¤ècœ¨j¥ÜgÃË냬Q)’ ¿}Øþe,ŠÆŠÞJ`tž"Êì²ÂnQk#Ü>.žåœÍZ<%y*¼X£ÔÄeÄY†ìŒ+!äÂ:|0…±©Ä“Õ3î¾ñãw·¶þŨ3lk>Œ(“8¶N…mÛrC^þ¯! ¸SXº5µÊ"i„ÖòÂ…™©N`ºC¬Ú5— ŽF˜E@ Èk(lD¬ô¼*HÖ¬·ÍføóÔ‘1²Ùuo"]–â'ÕüjÏR¸RÝ-«=Âéw ± ñ\¢#)Ê›­úÃ,hÁÆRÐÇs1˜dPt –ÓozŸ¿êÐZ3Üñ|bY Å+#'_ÊŸ´®…>4 g[DšÈ:.÷‹õàêdX;ðø•ùŒ°òØ®ÉH^°Á*#`í¢Ì `Ø-£Ê;0™ÙËüý/–aÞiYs¦ ¡ÀÇ€9<ÏÊsdÿ „¡C ¹Ô‘6a›×ÑÌÌu!Ã>ìÄçTAØ b”ÍBhn!€ŽËfO.°!›ÒüÿØ»Þ —q™[Pš ‹áHÈ%;F?>'·nH°àGÞ÷ùÇiÊš èÆÀ“èPXôWhÌjد]éŽ8m M°¹! rî<žõ Ü9U•ŠžiøëØŒhì†eIrhÀv¸J.²¥¨¿:ÑÖ³P/Œh$/Ê_þ£e7ŸØ/Áv Š±°r´qp¡Ë9‘à”k¡î)Úª’C³Ú¥L)-U†­!ã£OÃ@è"â¦Ið^¸Ï(¿>¿Ž7_!ÁˆZtŸ”cà‡°£ž¥§µ=ÏNí¢ @o(äJðäÙMnPäpìæjäTëÝ£‚õ‰ÒBì}Ƕ z£äK4VP¿y\\Û°‡I*€õax3´1±ÙÛvðå’Ó)e£ËY:µ´ÍB¸¡e?â°sñ×cˆB09Œ™4-˜pÒ‰_ë"ñˆ?³VnIKJc™ìÖ‡×\L2Éî£yep³è“£Qô!_ýâíØ e¸ˆnàË `Ð(Þ¬MÈâPs×L^Êö„ФÖÕù©–‰,×mšW™õ¡V€CœC£I ÃìÙ->nÄŽÞþ]KªT!¯$Q‹_EŠ™SÚyM×àúØÅÚ´®ÇŸRGªÁ+nBÄklÀ·›çàß«§ÄÖóey^ è«fÙ\+¼ô=1"1ìhlJ\Rº–ëï˜ Áq›¢AIþ"ý‘® L1Œo_ÎéëÌJöÄKgF=‚óœ+¥ÓaXZù¨­8³o9D›ÊœI&éáá–ÊѳÈ¡Ìèð׿Kl¿(ä€å•ÕGfÿ².é¡É:áÊ}ü:›¤½Q¾áŽÎæ ÃÙœ1ªA'%pAâž<]Nù5) ‘Œ¤ÒΪl:˜ð-ÇQ >ÁQ°«IyšÀÑ´•û…1ÑXp4äBkûú ”2*)GNBåbÃñ¯vñ°Y³6¼é´LA/×5p¬ÝãÎ(ÿIÞ7ØÈðʺìò&AW¹5©(”%J|€©¡ìôšÙ" #'­zúó<¡XoB™-òRÌä0ÇØþÀÍx³*LÌún479Xo˜ÔbC[ü9J00#4Íý- ™mU5~“ìTè°»Â1ý}ƒãRâ¨Cè„¶ö~c4¤ÛL3/¿&DC§Ó—)7†ÁìtFØ ¢a”¯Œ¾qžQ‰!§F5°×¯S ƒˆà Jâ»hËd“p„·XNÀ$§Ü·š;Õú±Œ‰yáû^2ˆ Nq4(3dA‡ ЋyÖ‡¶¥ ¸ïßÚ´1èÂGŽVýAÖ"^£xˆ»oiuM²…OÍž¥ƒÂBŸµ“-}§?©o±ûxFÜñ—û†¥þ0B ÈÀÿ¢Ñ9 #Š7=†c!Gp}Yƒ|vk-5‘×cÃK3¬¨³M É«0(ñÛpò”þ<©½ä~2Ø2HˆÍ˜M…~RÁ“†¦NA&æ0·•¨ÑÇðø10ˆð•-w;£ošwŽóÙ¸HzåÀH↷m¤õjP<ÂÈ;”¤õw(u•Bœ¶Ó›m`t³×ÜÏ¡È ÀÉ-V’W¼üÈÑÔ9Š@%“ÊP0WÓ#ENÙ ãKø®rÖTŒ‹s¿æúœùâD“Ÿà}Möú9mÝ.ßúì*ö¦ÚQG:Rr:§%À­H 0 AK¶.'h”×vŸÇCíÚSíò±vÝP+‘(΢´Uë¬BÙ¨s$¢r馑|šØk– Óg¹U†*À<{d#äõ6­1¹äØ28¸ÉÓ·rAüu.0bqžýìki%\dö‹tFäX¢/¹ÂüQ/Ó@ ½ãÝþ£“ú«”PM¹Œ)õ7b;áÎÉò+N°ˆÑã03¦V‰‰räŠé/ÀYŸ8+6NhLåœl¤-àfð4 ë5þ²: Yÿ¥‚E¡;ähçŒ~¸k‡ã œzI &ž¡ÛC}RÉ %zãó²ÂÌ¿áý-²+„xü×m1%‘X6£lÆxÒ9Ïún •‡@[ÔüFÂ53ùx])LrDnÆÁDô„§3ž¸vqtÖsæö’o:éš\×i8ÍaRÏŒ^fþ·ƒV-««[0Í[nˆ[ ™½òÔ­F%¥Ay gÿ©Ù ³È%µÜ½E¼Sq†*÷ó49g)-íyÊ®‰^ò/ìÍ™1[Áæ“(·V‹N£ÀV•§€¾UsˆµµùÎj3¨ê:¨nÙkU…ýî%ËS®œ“®‹iQS'ûõѵñà²åà*Ìlý—›Sü’®âÔ›„Ïþ>ö\ôø[+FTÙC«*Œ¨¸C—0Uô¿ ˜û3Š¥uô¤ ¦º5òš¯Íî¤ÌŸú+¹¤hÒ)¨d¨_)TkàÇ è\¹E(-̼ñ‡ Sß½êYH‚z ú›ÏÞ«²¶Óe>’jX/ñŠDLÒE4±³ýü Q¹ºf±W³þE6ÚÆxšc’ÇûC÷Н«Í¡éúìqæbÄ¥u£U)¢Ê霜Vc˜žNXP[3ŠŠ)3ↅþò?Ð{‰%~šJ®@„ª?lmÞPC·u“b/pè¸òf·:~¥òÀ"Ñ:Nmkµ uÅpC×úã¼™â­{P­qµ±--í%!ë§AÉŽK¨ã}-6$]4Uûª¼’žGq-¼±†Ó^”Pˆ)}Â5VÁ{e‡e÷Jk9zµà¸Í‡ñk.(˧·ÄŠK¹ÆX“[‹þ÷[Ä”ys ú úØ(Í¢õXÑÄ5ñ¯9@çëR2ïVS¼¸«‰È¡œ– Á ÂCü ËbËò#9‹úwP“ŸrmË3?î'`W6AóÛ£õÿ`eËâ¢)Ìש´£]η$¾·#nH|9ꌋ¢Óu)4Xðql$Dr”ü­Ó9ìTå°ówûÀn#ӯĹU`:9¿¾-æ@·¿ÊºŽœû³Ó›ˆÛ:ùLܶ.;µtÈQÆx”ƒÕãÅ6ZìD¾­Ðè`Ë«äÀ]Vo“Å‚¦÷r„ºED­/£yjâ ‡aC½³H°Î¥ó(VbõLúì×â‚C+†ÖGàìØ,¬‹¡$hÊdë€õâß#8¡T„IXINmÔÔ…bÍ‘-lßjr¼²³Ð>ÿ#ÄÄÆÔQçvÿvÏᲜuZ¼Oõ F†åQ',T¤F `MÂM4°ã¿q Zü„&|»g¤ÿ]´gû`ý2$þmÕy{±ƒÙnŶŸ}àè'üj§ûRžaHŸcûEèPvd$Ö2s ß=f|JÄMÇŽôs>?FÐô£ä[è¹-?o½·rZå0§œ9ÆA©Ð‚©kè«—4Ôá¿F<#“QÙ6Ù]="\ˆ’Ä æ±þu©ªPÒ Ë®GïÅ’5 åPׂhÏ8f™˜:—4ÇsÊ7åÉ« 9”Òˆ4®;eš4º8À ÷NÜgž‘P{2€Üôpp M— ¢J*uòÐmåg‡¡°E¹#4˜a€=ÔúÉ3Pj™aAŒ o¾r$¸¬ÇÑ%úŽº¡“Æ ¿³¹xô£Z×ý+zÇž)Mq>l/×´ÏH#ûŒc$Í;Ö®e§ 4ÈÖhœn€¥ÕÆþ›=Ž7b¯Þ»s÷êm~WVu¤´'|66†3$Ð]]]••••ùä“õ:']-ç¿7¶äâ/q1l(#‚ÀH£r:eoo¹ k{ßË|£f6€4H¾ãÝ!¸Ü4Oy°Æ~ë'W\©Ø-kâáébìæÇG³;˜¬"›M÷í  Í©ØòÁ-­‹Cy)öšluÐp Dñ ˜iölèxÇÖ‘8çÚÆaAM’¤#Gïšè„ƒ†ÊADƒ —ìltè¾ü0ýu5Ê?tžV®øñÔokp™¿h˜¹5¾h qî š¢ÞñÖððËOé¤PÉþŒnÑј¢›ž ½¸Xtµ:HÇu‡mòTÌã鑃-Ýÿ¶iæsN©¶‚~Kr ¢QˆDañ s9yò`¬qñÒZJÀTèÔÉ4OdÙP˜ÁÜÊÐO½Ñè˘ˆMàº&:kÑbeÛ6T¨øÇÙTÁö ÃCrÈùZΟ±-¦‡eÎåN÷á%ùàŽKòßÉ_00Xò/žçŠckƯ@6ó?¥=æEˆrŒþ§ž6H?g?åXpëáâÿ˜s生›õ…®e1¡ÄÄU$*=AÿZ þËË öž7“¢¯4óÒËã2„ÑÞ?‡Óx5ã &1‰÷Þî†nœ»žr³vfVO7QKlÃhÛi.oÁ6ž†F_Çýnº\„ë"¼¨u¶«¦[Ц7öÁºm¾½ÙÑ{¢ã߬¤øc=ÛqeÒ‚$YOW…üËO?~¸÷i×nó•JCÿ @l3¤Ä§’YZ”7êÅ]†à²iÞ…!À¿ðtœNz89‹^Â7tSríaøóp7Ï'ü£XE‰Àt¦!\÷ˆô⃼àXc/žíSÌ¿ð¤K ?D21:¡Gë‘ÛIÚ¹`ðz©öL†QÜ`Æò‰˜:Ͼî4—?³:#Â%`Ø¿Õf ›»†]òo´m¢:ˆÎÃüͨ²ÿ´„]+ÒOâ¯H¨q¢‡ÀQ«žp¨û67*ŠŽ îo±¦K§$ K(€K!䋇«..†RlÈì3ô o‰çáô!÷I‚ðÓ8QH\ïÄÞDl,?ÜÒ͈´$Ú Š¦¯ãÉÝð~ ìˆc£]<“ºw f`I%ù]_Ù_;²à«D ’ *t …•Fk/ÿíéÃâ7áÃ;¤ „ƒZV¬>=²;q¤°]õ×õ·4Y\6BΣ$ºHpˆü@˜£Ð™ÅþÙ´³“Ü<ë ×iöª$Ž-²|£d³jx ¢\íÊÊÈL‚¡w$!…å>ÛÊn17¥¡í6èKüe è3 ¾¦{w{;1§ ÿzÙÛK-ûÏ ùyoߎ3!p²ðN€ÄHÿ^˜ 1“äDb¹0' ¤SWâ çE˱§ðáŸc”q08õ{¦éöÊp{\ª(W6ϳgqÖ»WK·Ÿ4¢©iˆ¸?‡½ü]*99&\žöT5;Éæ *­=øM5Ýa(<ßáÿrý{âÔ8«g§ÿê?>¼ƒlõdÊ‘Êú&5æOõ2d_(Û‹«¯ŽOŽ¿†ƒ58êçáä<šLºÍ tó²‚-ó—ßNØP?ˆçH5\1íºø~¬™VMตãûÂìneÞâ°èÖ°  O@Řú´{¼s‰@m¾KЍAŽZG”¬ Hp›!ÈrýÄ’%¡j(3€Cx”^nAQžNÊÅ@9cèD_I6ÄÝ<ÜäÊ÷ÐvGÒÅùÞ«íÆ£ë¢á^?R¼.$Hº×¶eçùŽí«öð|M©œU„Xc®Æ³>Ã8óc ø õP5 Óp¦Éùl¯Q°×cà&¤w#æÔð‘v.2?н;Àò†aþ_á/·îI. |JçE SæØ(A qˆ:«- 9ê ¡¿ ¨G7J±×NßkV%$Rbq–LŒÊtarå¬}¾‘nU!¾Ó1½Hšb™Ç‹*¡‘Ôäawµ%‰Ín F¨Áƒ â(©Þ}†vV-º¹ë£¨I}-ÍÿƸ m]Ûù¼¦,X¤w!VÞø>£XS8;•Žð,+Öú ‹ñ?lûèÊ9ñü¦Ax“wð£¢tš”×ü]¯ß˜‘-çß‘×ÎïÀ†ÝÏ.ØÅ4˜ïÙ?1»#dØÈ^£§˜HlgÓh_¼«weÜN—KL>ÅíÅöÜ›í²Ð•f©GÜÚ¤k§“‡Åñ¯‹ãbS¶ÅÅwîQüì=ëyºí6Í’×3|ùŠîâ ]ùééqñh‡œS,Õ(:ÊÌS6íoúå÷Ñ›Ÿ¨çà‡#¹ÝÿT*ìæö[àB|\k·^·K‹ÀIvŽP¦ž9Xîä–Ä3(î$â\ó•›³ëû#žQ˜&(˜Î¥+[‰’óú|QØþø/Ä6+pz²»= FQPœÒa¦¬q%¥Íe$iñbC¨>¡µ-§;Š)d!7—ކñ Ê‘9& £ÀJmÍëtŸø "ˆal§Äk€A¬[&å!-ÛËþ4öö×€:Ã*®·Ìÿæ¾b3é<9ƒnÿÜaÎ1tl7çÍÿ“ãBâÍKfœ 3&Uö—a #zm'Æ-“í¯ÂúE÷¹…°^ö¹jˆÂheg»òÉVuåYU{‡R~¾m Ù‹Eº{èäÂ^2M^×V`tx>néù`Kî¬K<6-lD˜o¼×LF¢4Awa ƒ¶ˆ9LÍ»…;·W .±¸PœÕ,éÆvë æD!±ñÅ8‰6ÛŽ¼ʦܤŸRIa@ø±™° ôžL[2klÄÜx!ccPáȸƒÍç§# w½ïšÙ9}‰¿ìÙzD¹h Cï¤H3r‰œ’KéìW>~öúìÙç¯_íœý/ä|àœ% °†›Bpi”÷ãò]"‘‰Ô.p%™åðÜOþ㿲$0XiwŸ´,  ‘´5èDòÉ¢Ü(ﳯéánƒE·Ùz?Cþ³`S²÷R2:J#Æ”ºmyJ³êh„L3G§/,õ¡(þ©Ø=•d\  $»Ž2OÕ1Tn+%ÄâÉ êÓšz²f6Rœ04vÇ©.vÄÍ8¶ÂÎŒ™ä¹^”Þ 1LV C㌣žóÙöpê­Ìb¯P‹Ï#æ°!!Crͨ³HºÏ›¼kØÙ Äý€%‰+«ÃçN÷KF^y#„k³ÕW‹yꬩ’K‰`ÍŒ9È;ÁÊ9š6‡T"FS+ÏkçùÔ8ßVm¯ÁDè"¼EæXcÎå%:(,›ÝM²òg„Â?Ú¡/ux•î…‚²tf›ÜÆÒ¿†[ Îi“Ø?,¯««æF þø7Ð)²g(y¬µ$ä/îhŒž¶&w,wI··‚<‡öŒeÙb) 7 ï+fßô¾O~bT¦ä¦¡~‘PKI¹t d ,TÕl׳roɪ“¾oruL¾!Ÿ•#÷óŒ7t©¶•ä=Ö•|D!/˜+nÀųüöíçÍ w}Ç]3N ³™£!qFéb[„4r|‘ÕŽnr[ïe•5v4¿®ö ðó8½Î$eU1a0aSw ‰¶¥ˆq*72êÂpÉ•òEæRæS_ZSF´bùW¹*44 k“FRK •J j¹Ê#Jõ-+ï[íõˆömgúÂPÖÄhYïß¼}»|gc6 Àä\ßœ$¨¾EsqQ¯.Àâ…Îú:Gp ÍQþ¶`XeDôéÆ|ÐU‹ùØ‘o˜D÷Ã_x®,økúU÷™Óøw‡|òÕÏì#ôÎG8Á=˜ä«‘þ6úú«‘¹ÚÓ€ÿÐOÒ…ÛoðнzކšuºîàÑ=tcò„ÃhÌÑDÓS\T›çðkÕØH˜®ÒÄè“r…Ÿ‚ÕÒuÖ÷}Â=ã‡eÈ--¡ékç·ö:iëØDE³Š7`Täù`ôlu¦Â,÷îNy˜„X}ÑEÊö [¿ž¡)øó$Œa™Žš] µ~\Â$˜ÿd·! O]]¹½Š E(U«™N™¡Bz*zñJL/Þ Þ—Z<¥mtMðÉ.ñ–ñÍ;ênD½©•XÐÜçÆrUøÖ(Ä ý’-Ã, Ï@$²µòÃ¥êa(Á‘3‰*5DR™¯øÚürW‰Éî,☡җðÑ'(ØpSeðQT¬„K¢Š%ò ×Ð[ówÑW‚JÆ/â×dû`ø-»n§¿q*žÐ¥TIÅ6ɼQÜ7BI®ÌÂàgç>ÓdÔ7HýÌóéö¸÷Î)Q õ&§lŒ¤Ø&á°ß”GÂ+¾þ$sE<æ¶nnÈî$e0™˜¶[-îM/ß±•O&ålq/¿ŸˆŸÄ¿¾>´²þôS´5zþÎé§Ù—‹éBµsÎdPv¼ûŽ~ÏàHÅýîwyot«Ý3ÿ¡hKsÎ̹Q -:È Ð‹‰ÕÉq7jˆOôÞ²Úø‚Cjï³áƒÂQºikXÉÕ Tý¸…"?HEFŸÜ-m–Ó@6íÍ™`$Nó{÷îá§_9Hj‡–>Æ.ƒâ;! '‹ðŸÑš8‰—Y.·‹Í` é ‡¯÷µÞ¼ûÒï-´Ý çüËžú?Xt-¾ö0‰¯‘ŸæJlª™<™Ÿ#äGi•™ØM I±U¦Û`Ÿ/UèoàAQ"mP dÜb´¡¥\&ã¢$ûèÔ+¬ô¡w%¦âÙ$z ,H@üväN¾XIÉc¦Ž¹=8÷<½%Âa”|˜~ó[p-Y~G¤“b:Äó¸|ÊzäRÊÂz#t8]=ÎÛõ«NbbßXËär b5¶ „ ¨ðÔÏšoI»5?.ŠUu-dDZycL€]ráÎŒŠ—34Ó• ï³ñØh¢·1M>Ç7?{û–{F±–Ø'@cO%m¶èa™c>†:6óùvA~6 äyÂz<¢o™2@×ù"è3¡§®£n(~Ê 5C/$û)o´;®½{b“dª*ãäïÛÔ Ëøñtn5gÓÐH$« oHIžiì­^9ÕÃô #²ÔZ æ‚òÖ .d 0*$yU>ÿ›yu|XÄdÖƒJÞU¿05OY·[ôÂ+þëb]Ñè8挙/¨ ´ï°¼Ð¦†E"A+FDÈ@C›¢(¥Ä)~NÎKôh¶Eîp§Û3Bb‰~xšËŒÜÿ4Ws'~âãÛ'>xâã= ¾¼£O‹|k}V%;ä-†6*Z‹‹¶_tÖW~y’™˜%=L¡ÂE0'ªŸ!ŸÖXÑݶsOŒ·=”!­Jn"ÂÇ6·ó»ô6zUqR ß*ê~V:K¸Ô)s¯(uÆrY ŸudBhÞU 'ä#ÏóäZº•EÞãÎdMáK3Ó)2•0dôÛÿ ¨]$á!‚Ëíª7(5Mº§ö_8‰`m{W·øðÖÁ`ˆÒ"êhB²Y&Ø'E~@öã²ñÞÐoFâ ­4åc•4ñðMåxTìyËÊ×âRe¨#YqAtàue:KѤ) Üj0=¨›'sK'NðÖ¬'í€ß„ȱ¶A3' ôÀÜ^OªnÏ\šcR# QC²5»r_V쎇2¡·ÔêjoS-Qù‰qjçåŒ*­!¯¥ð³sSÌe+9ÁÒÁÇ`åÁñуÃìç¦\Ô”°"än‹!™EDÈó0'Âå¤äQ‘¥©iõŪi™îÿ¢%K,ÓJ³Ô¹œQœ«¡kO’2Â3‹AÏÖÖöÜԖ«Çz!º¶X‘wŽ‚vÁ§•Uùãÿ„uæfw·GU:heÍ.¶&Eí—U½@a„\l¸Þ8ç_2X4c­w\ŒôL ¸,ýÆñÕ,ƲEÂ*2Ç9Ýö\Õ5žFZQßR©KúÃwSjÁÇa]ˆ¤²9l`¯cNt1¢Ãßø?y*í¬'š*âÛãäWrih>‡0ôAO9A‹Ï\Ü•s>WÐ'y aJ¾JBqiáy„ºÖ´C^sq?õ`à’»¬¦ï$¼ˆ"ò¯ŸVj¹úlÑËþÏÏ'´H/H}b¾H€À0†¢S¡5÷riyõxUE}àR&%mS)^ÖŒE ÙS4P×xÅüûåŒLY¼õn› .„ÁíïÁݶ?©}›Îe<î/¯ASx{ B¤º£‡S à¸ìx Q€ÁMå7~B­SqKvÄ+4™ Ïd]b•¢Wšÿž>ú%! z›£Ã¢¸už s¶sÉIŽ4.i±k§;÷É¥`ÅVÑn¡hÀ›U´RÊÞÌ«@¡Ò”Ûï(Ç%uvKŸ¬árÒ6ÔVÊ-°\—MÝÕsÓör|­”a&@} !pè{'Ìþ%JÛ’«+|ÃQ‹ÕÌOPÅ$šIîSÁ á¤|…C2 q.—hþÀ¼~#EˆÌ.Åéõ¢©À¥gp»ã،Ԋ»Ôr)!MGÞŸU1ÁÖ 1œNºnQ„òuœŒ¬õ„Óú¤i.·¢Âx ·˜ÑÈË‚s‰[ìuk$`):ä¾|ñÙŽä7*p&§LqÒ㵨ðV«óC6 Œ’s߬š.Ï¢@.üÛyÕòX‘{ü1QYõ¬…;­«…Ô$HÓMðŽ7q†õÇ‘'Ã%\´ë¥;yK¡,I¨Æv^„²ó>[Åešº„Ioüª”„ÉÒ4( ï5/Å?X…ÃÜó•·MC^: œ]6m7¡°&ßóF,q<×Tí•èõØ…'qó=ÅÕˆÿü²^4]ÿUl|Ñ@§£\ MœÊÌ*=´R¢M4€r¼ígÁ ¹kí5t•³{Žç¡2J>%Fs² œþJòy ›ÊUÈ‹eâÚ~"M¤u1FA*î¼dŶæôÇÿ+J!¡â]G'íÇÿQ†,ÿ)W”J*¯e¾¢õº,Õ_^¾º¿ÚÙ”÷%=X6©ï,â¦_€íx†¶$üÿa×]zké3¼5ùòÕÄÀ\® Cf?=qM^.Ñþm»ctiÿ™wÊ“ü¸x\Ã/«EÓÀ¯¶ËeÙÞœðžÿ²Z[mÀ=}óòyÄøgƒñšÁtDuÖ†›OÚ—›%¥^}Ò¬oÚúârOúó«çÔ/ñVìØx±@Ó¾|™ÿ¶CzŠÔ.ü.“Ê­¸ Oò£ÍrÍ@Füa”¿…/¢ú#ò|äOÖÖÓî詜Ràë'ÛMó²úçmsu o7Þ1ޱªIj^v·å‚íàˆóvPÒÓf>G߬Z¡}aÕąН ê =èlkA ­±çF‚Ïuj*µà;-Œ°¢dqܯÄàÓ ó¬ê—/Fí°Ñ„£ÂËy­;Ê\v¤¬àwü'aõYHi% nÆæÓ˜V²é©E´áâ6Ä̲v†n‹¸-g¿“K6Ôwã8ùÑ:EgKÅAT¥'“ÿª3à)õrŠEÈ–²½LHõóÖi.& ŽX·%4âL2ÃùáN-Ý´b*Úš”o•,H Rƒ“6+ê¦yÑ. ƒì«’YP-ÑR2Bøèâ6LN+²Ô3î(ÇëDRGžo ‡P%¿ ‘Ð2,º!3Ó®·Ri•’ΗçÄš"õ…÷´»%.¤c0Î¥Áo\ƒwh%'ôÏw×g[ey Ƈ)KS&‰žQLm* *¤<¼6…2íÚ¸„-U©\Å©Màè4¡Ïž¾}͹%ö:*¡¿gïÉz:D“€KnÍ8·¢7S­Ä˜¥ÙÕ š`áŸmeèA\ç«!Ïœ¬ñ×˦þ–,8\TZ9q”[|0ÂÒj¦Uð†tâ\“Ö'·ÓîMüÁOÚÄÈÿü¾ã]óú’VnÅË|ÒÎó_BÇÏ>~óìùÓ³—_|ñ:ÓÌ;~.;ÕðwÕfºï»#4î ì ÅôrÏ%G÷¿ÇÌŽÐнBw-áÒáÅØùa¨Wîžéì– îÏ;й‡Jä“7–XÐÏ:9ÆOiuCOó¿J N›;a7Ø)9Ä\ÌnÇ–ý':NuO?¼sÕªúËåŠu}Eg5’BD‹áRÒBNRÞG¢ñðÜ©;Ð#CY9Å{¯òtãA6ÃB6êÍWŠì¥ðØo'”uÕ§‚¡€0îQQ¤>(ÿö¶‹J]Q”Í€$;è­é”4XN?Ê »œÖ«žmíüY,&I)bqnÜz³Õµ÷‰Þq¾íäMÂj°në̲ Ú1d»R¬w ý¾ç0xÞ4}žè¥®Â!jÄXš›ïŽˆ¨:q>Qñ:6cÓ‰\Þ%^¹²Ö"žì„Lç0DTb’ÉQ6üܸʡ³VÈßèøË¥¾x\¼×ß„×'uYèžk­)¥\Œ×R *ÏÐ!aG‰ÿá+ƒõ[lSê’ ‘ÂtfÇ…¹ aöm ¦³Ó…Ï>õúÉóçg/^~ú‡gÿyX¢ÂÓAP‡ÍìQˆúµ)@Õ?…¥à$„àO÷©ÑeÕN·pì£ô>éª}(šUo—ñU/—Í,Ÿ¼Ì'×9ïƒZ¡Õwë(ؽÒ½ €ß&T,ä¯Àö0ÍÿkþmÙ"ól¥sjŸ"øK¹Ù´“1¹[øßÃ<Ýtèš_?z”\5°ííáæòRíD ça’.J5d_ÂÊ!d¬c__ØQiÉàžR;ãGUç˪§9óÓ;à{ÂÓ­…ì…yµN¸FN²}@÷94rE{;G’ajŽ˜PsvGU‚âÈK_ßYŠéç_·F«ö1I(o»BÄòÃ3j ëešâÅk‚jRÓ ^i‹‡w}‹ÁþQýí ¬ZÁ¤UQIoO*ÈÙýÑb0lóˆ×WÏ#ó\^¥ ÌJH²¾E ‹ëêæê{ÙŽyÇúGæ³­%DÑ1õå\¨ûò<ÐxÁþt¹jht [x^«–-›8M-z£)oǾ¹$to‹a54g5ÐÄÀ8§å‹ÁbâºGÝj;‡\ðý%„Â!ñu[ÎD¾`sÙƒ xõçÏóß~pü«GÇü¾?þÕñ£_ýCþÑßc`o‡•ÿX¨«‹½×ìüß7ÿp¨†z8Öü¶{<«æÎúï²½éÓÏÀltmš(efïb†çµå5¡$Ê—ÿö¼Í~çÿͲ?ÕËü°æÑOß öeOÿòæùƒ>úu–}yYn+Î=ÿ÷;©|?iʶ«&pT€mf&¯ Z¹³ØšÌ0¥R”}Ž9Xxûkøüºiß!Ž2®µ”ÚËj]•(Ú‹fy¶q…aF XpèKÊô(zøÚÉ„ú‰£¨pß%$$¥™o®K*‹®ón{‰ Áu’%›¶ÑãÔŒjO00Úh%‚ˆð,7½>Ûc©=±…ânŸ௸£üaŽ=¥P¶´©h‰é–"…¸3]Õ³mɈÄpücÓÌòëK˜—³f°²=Ål–kبfLºT®¨Pƒ0wNóÉïò¢¡»†[i·‹Jæ—? jŽ3*爽d O®WL¼hñóü`pN†¡o/„aEQœf“ÉÌYÊÝêŽ@~ ÃSç$Ò,C˜ûÒÇN³Á™À*ò0Ý×óíBN((`ƒ_l6:É)šÙ]Âþ—ó˜ÂÎ[–é‰F™©"ýÎÅpK;EÿÐÖvò¹¸aÈËcv1Ï,´åÉéªØù³yþŒ›¹,qIæTú/m®W$K±|2ÇzÍ¡¾g£¾ #ˆù—½<(ÄÄ"ß6Ówÿÿò)W´ƒOþÿ*µê(Y»‘9—ã >o6ù,äyg…"z¤Z]Ñ,uÑ#øê%BT·ª•Iþ̨™A×ð%v=Q®D‘‘q¤·Õò¸vÒÁoH¡^EµÐQŸÁ·7ù ™¹PéQ€£6žÞô_ WêÒÁ´åÕ,ßUÊÑéh¹ÛüdPÜÅóQ"É;iN¾%œ|¶/LZp‹ëò9Lšwô$cèwFfB½Ô¥ ¡÷°*§|_¢&Ú®AêHPãüÁ1‚vÝl"Ú½©ºÕh#·TždZPn~í8"1À†æ6"ØŽ ëD™JÀ†Ë‘—ä¾ã,-}9!ˆ ØÚaÎ"†ã Æyæ£õa8Óvoe°V7è€Á‚y?¯6×U:€Ó*Ä·‡ dÖ·¼ÿ¶ã±ƒ3ìv¾‹ Ù•°^W£ÀnÍëoózC jÙ¶5¸mÔåR¨W„*Jæ[aÚ%¬)û.Øààå¤@NëjA[5sÒv´Ïb×Ð%˜×ñ Së×õBœh˜k@×»kð–xñ‚dpÔ'xГœ û.i eD–pÉ€ÒTS ¢y(EžXZŸ–èÌCÒYÒËÙîÆ4-¸e#u uY`c‡° xh² Œ_d—¯ôÄp©Ãgæb£Ã4#/F‚2tÐáס= °¦Ju:}º­û2'uMt§`D¹A×"!eí²~_ëáã[ œÃ÷èkïê6=¯leqn.1Ç f•TÚó>Z¦W} ‡T^rf)¹AÅ•91W"Ó,óаÍ̽Z,¸¨°ò$2H(¶¹÷埦WÃ!þ]´ÏË9žÞHOêÒ¡Å¢¹û xJÃjz£›MÙâz²r«Í,££¾ ºí1HØÎì¤}y[A¹ gí¾ü"w!¿ÐåUL˜Ô³Š¸¡0FÝÝgÒQˆ—v.Ñ©‡¿Æ$"zw»žó)†Â6IÍaØ(á„Ég6µrC{ÝeyæÅïa´Fpi|K= s–’LZ£;íBhêRÙBÒïtHÔÛ4àÇ>œ.* ¼ªÐ~'KÏc8•dŠ¥úDŸCÇ6Ì€Í:§Óº¸H<¤ÛÔB¦Sê6ÿðîoMv2ŠH[^Çp¤VÈ« ­Í•Q’Nºí9ÌîFÈ:o`þ¾uÆâTƒ÷BoכɧXŒ°5úú-9ÖA®$¥uY ù» óÈËÅ™oñp$çU24X r¿ƒÖG›âÖÑy²@)‚#KM® ×Pœ Y.9ö:ìñ:Á¾^/lvÉ(™%G}ƒ]¨ÈÍ}s÷$,Aã,róËqtÐ')2.UÏÖ½ðù 9Ö°Ýq~}YÃ%µQZÀ¦W­s;*6m( ®vÃv ö‡ç7,2Ì./)~ðœ+Ë9/ãn/"›1Üu0Ã@ð5–G«Æ2 Ö,,™´Q§´è5F‘]Ý@+b¨’ŸÅ5ÍîÞþŠÌ³ÚV³[p¿$‡Oò—m¤gätžú§±ó@#ŠxB/°Ö$N]»pÉDÅ­>éÿÙÖSôR‚æÊ’Þ|†Þo‘ÜiÙILAÃ!d©EÉß”|æ§Ý‹˜%¢uûI¢“ÜGÌÄ@ë˜ÞìÛ’ Wâz2p›AÍ!±[ÇZT#ž«k§¸§ê¡à¶sÞÔÏÉ«ÑU”“^õ›bb¦ Gì0§M?ÊGÍ‚6å{Ý%i?òs"x½E•ˆW¤ÚºW䯖6ÌÅŠoÛ‰»jËxÃ+a½ÝlB;=ßq.¡ök̤­¡ø9ç|hB†Ë=ƒð¤ë¶K󎈟Lgš‘-3{æS9 é¼Á6!sñщ ïÂóؘÕt%‘ê1`ê@AƒZA[††½Ì\ûßíå§Ož~ö)ÂPXöGÙ)T«óÂM­qwÏC#Ñ8² =š‰"ö#Í8Ø„yzpc=ç®_—Õ1¦?{„¯8öÑGÚ'µì|4Æþ_LŽbQ°V±WêÄ·Iœ½¢çñàeC/QrtבGÍ`žjOHÆéÜ,ÓÐï°X#šÁl_mYƒì³ÂèHŸ‘RYŸöÔžÁâ¢1$¿=–`GÃÿyc1ûÃuWÊ*õCÍ—Bd¿kV°\ì,¨öMMZ¿7(\3…Š®úwHBkc·#>åpãϤ1Œ³ÍƒÒš¼C4ÊøÑ§t<òŠÚ[;³VF W5€ÚD²Z¸]º#ª;nºQ-×›ö—øëÔä…Å#.T¼;Âþru£­pW5sN0~iéRêº/XqWâ¦Û4M2"‰ Q/õ3A$×aáë€`øL=>ø€Š[aC 5׳ŒÃŸÿ„¾åëÊZ#1àHQ'›t< ]ü¤åÆU®S·©Èò²Ñ÷2b,“Âðfâ'@æ†Ó޵ذµ¶íi~#ìÚ)TÒwNmßòÁ¼«²Šèš—ÛÙ$B£¶çŒ_JvFñ’+R–{P`Þ.LŠˆ¾'òŠ2<‰Wµ'mžñ×ã쪎sp@¹×@áŽðjëÔÆ%¤»mþ~»ÑIÒokì5'pìóU—Ý}€ça ¾¢ (¸¥RPFxºByº¡êaDÅËíj°ÿ>}ñ./lÍÍü˜éÇ„ŸÓ?“.þ¼Nò?êñ%Ö C}E[ä^™sÊ.¢µ Ûðåçxùã)â[\’ÉàpÏJb>iÏaÙñà}CI‘¬¡H Å¢g4~_‚‚.£M?üÀÖ¬! V§ Yk³pV9MÈØfd,S2s>VÈO¤uÁbƒÉê § uáè-½çÚ4Èk—ƒñ]¶Â$,íÛ01ê5£°¾U¯S÷…îÍ]‚Á/míðŸ¼:ƒ†ŽÃ´tÿi¶xQt›3+UÂQ¾ Ÿ!k›6¬Þ¶1Ùêª0¨á™Ê!õ²Z¬Q‘¯Íµ Gc°²Õ©ýúE‚’îêt"Õ³iNâ9mÇH«=A™,Iëëp+ø«×˜Ì!Ç’9tÇk &ÊJ&q¿„ñ£r[]ÕþâfVe8”Sϵ{æŽ&Yû7å…™®YìµN.#¿&¨«{êq\–í;q,+ ,…Û1[*ª$Øk…xeB| `ÍÝ¡†)Ý&$öí‰ó޳4ã`H?~ /CËœF—+Ši#;SjWÂ[€å†’D§äŠëæð“0›lNß¡5·^”7UíÐÉ'n‚çcó|PÉçUµ`«ªªL±™±”§=Ãá-ßù¨(¯áIˆÏ5K5øf˜ZÍVÝÈqe\WÜ÷x†= ìÞ圾ÕÉ™’ûÜn…àƒ”ç‹j‰ÃøºuèkÜ=z5k'ù'ÀÄ/Ona3w ûœ7h†ì‚Þ¾MÄ£²-[Ý:bb…)ä×9A{8úº(ëYÎ5BôÞÛ4Ì}vŽ@΀È‚åSÞ ä±ˆ_¤^EÎ@TPAÑiyº”N¡{.æÃ’8÷?ÃC¯¥Òóî£ÁIl ­Ø^[ˆf>tPîà\ZÇËiÛt.±³°’R¸œ&iã ~À‹LŒ¬^j‰(5â¦ñéžÌýóªZ…^î\’¯âÑmÖŸI'›g3ùÖmg—1ÜÐíãE¬ë!F^æoVõ·n¸ønQwi›×ÐÐŶž±±G½[· ž”ÕÏÈ\²…$Ö, M9„“o–çóQ,Tµ]+Ðä¿èíÕFö;u ],šsÜ鸒ŽEÚPÿÞøº•è7ÕD2˜Ô³ôu]HÞ¦”i²×ÇÅ $–æB;Qç*äd”Šcìâ`o=ŸDó $)÷† ¹0‰ØOÄ'Ç™ .bÛñöÏsÆ_ÛÊDÒ~"BœÙ‰Æ–ãî.–%ÎsÒªÁåÇLá§æH Ö"Mõy‚vûm1Ecf)ßî%ªì.»N­ ¿ãeqäßåä%Çv%*>ÉŸÌHƒS°N°©µK€¹¶«ËÄFë”û7}¤ô¡…[svQšY59‡Sä;†˜Â gù²yÏŸ„"òzêãbòó¦íƒgÿ ÍNóïG£“Q×NG?؇» æç€hÅ5{Ѥ!Ù³È(^ùQˆ9 hÅ]éx1î 7æèL4ÿL ¨Ý¶a³àº2[84]ò)<0}Yfì6J/Ž€<õœ£23t¥&ž6`ÜÚLjãupU…SÛàÑ$*Æ=pš@CÑ$Ö‰;JÎ>©Ù'4cÚ,Bú’ MP9£,+º€¦K>“ùtô°xX|d²¶m§£=Nч¾¬–Ó´§£-Ž %6á4m7Šï8«ÀXZp‡bJÈ?ª-eöe>…^ßþ4‹ºèpì^»ƒïB®×éWôÔÑøk½FgöDT § ~·U¸„ã™ C#ajFÖÒwõú¬+çè6þÖ!ÖÏoYž°st±üš˾ô4Ç*Y9™Ln—>$šr˜³Â™¨ÔK*¶ËrÍðÙª¦@‚žÆXô)á= ~-]—À…¶O‹²nAÝ4çWµ‘Û~)QÓD•ˆÏÛ²ô(#˜hfÅ5»eòZ2„z›vSÜïpmn}Äl¥kò¥rÞûW4„¨½€‹°zµ¸Âó yEE]p® 5€]Ïü10@uz’•û…ÂaNb\%ªs9éDÅS?.Äë·¬JuÍÔRTJ.*Îs(ÕÛ΀%‹q_((§ùwâj¸É•'ŒÃ9 ݆٫18Ø\K,Ÿ6Š€s_åoÏÎ0ºqvöVn"w5ˆKˆ´½»8Ó²`]1«@ÒÛêÌ^îàŒ~?;;ÌøáÈlÜ^–kóQæ¿àI9ÃkÝpç"‹Ÿ¾½{‡‰ŒîBAÛ}Ú¶M;ø$\áó³3„žaLþ®`Ä}~ _óøí?Oß®³eAÐ-êêŒ,}ظ¯*N_#÷¡€Ÿ`!Iާê Ñ`&[ä‹¡§}\qÌ”\úé<æ‰ÿÄ]zàÓC¾øX w)jä4¢Â)ûÓeV(‰hé—hÌ5&Ʋð.ñ5w¯"2¼a=:v»NB`Þx aDЩ|ʇ{·çp"ÑJ.ì ÇNÓž«:PÝx°œƒq±…‡­‹˜ŽèäV¦û}Û¼H±’ÿ”-ŸËaÖèìàƒ–$ûP…¶…ÚÕÙ8h¤ªÍ7`'Ù0ZxÈ(jÌq®mÔ$É*FΫïé'Òe£-º/?ÌG]òÝG£§2ÅÁà=õ{\æ/ú`ð'¾h qî îü¢|ñÖ¿ðËOé¤àúF·hÈhÌGñ.‰Ï…^\ Já?HÇu‡yðŒ6k:urÂ"ƒæ¾Tž ò¿ò±+¥° €9ÿ”kQù¾ÓÿgtÌ(RYÎØÃrÞÂëS°”q×â‡îitË-êw•×:ءƆ²øœðe3÷²xv'G7¥;ƒLêj÷úz𿱾þ ãE=E‚\°Œ_<ÏÇÖŒ_Nl6J.‚Ûs¥Éý¬ëÞÒN?g?Å̾ÕXÿ?Æç£ìn‚÷´ˆç‚ñéÉMàÊ»ë ¾=v\ØÏD¤–=ü?zÿ¼jÁŒà$±H÷Þî†sœ»žr²–v‹ÊÖÓÍAÔ’”3¡êzü82«Pߌ¾Ž*Uf¿ÒÑu¨0ë¬VسÖmóíÍŽÞËÿfµDªží¸2é Ô­'ÁGþå‹§?Üû´¿k·ùJõ@ã¿p0Gèæ|Ô»ŒÁ%F l ð/<…¦s†¾?Šól„~ñ†îáqJ®= îæFd?M»*I¡L®÷;€Zy¢pnÂÎ5#¦æwÚpù§p3[WL Ÿ%$d}ª=‰8(íª,¯Ñ2 gä‰IƒÑ1rC!y1¼«LYBÚÎ7‹ºN»õu%&«Ö™œMºŠ+HR!æ#}ˆ°Ò§º¸òHeÙ+ÎÐPÀÖ@”9eÓãGš „èìü 1IŒ6‚ù†Ôhµ e^¾ªª“»a‡8$ý,éÇ]js+þóà''úÊþÚ°…¯Nèbƒ{¡YƒÞ á$óùoO¿qEAo‡á¾Ö„4 =+L%FXcö¸ ‰žçü$± ½£=SèLé®b %;@šîOè_:¸1<2éc⮡¯8&Žr(Kru ¬¸„8‘’o±tÔó*÷é,~jUžîX|]'¸í6èKüeÏ´jó¾¸+ß»Û;ç¹îmèÄþö-šÏ  ¸a'¢Oš0Âä–FPž¢š0à}Ù…=:–cSêªèÌ^sôŠª,éÙ}Äv|³ù³•¦rbx=ÓÅîô•Fþe(Ù£çÙ+†!ÕQþ_¶¿b°Ÿ³AF»‹Ì®t&™õžÉÈá3ÑìÌ»¨ŠUµ9’Äì‚לs¶¾\ÿž2yÏêÙé£Ç¿úï =ap|W¾‡ñ Ãår¦“–íÅÕWÇ'Ç_ÃÇÁ$uóp>M&ÝæmhÚ_~o°@ó\ü@Ø#Õ6Å´ëâûûPM° ¾/ ÉîVFá-‹n½¨7xòUAä>íï–|Aá{Gh³Ä\®™K€‹Ò=ù÷•2;Ç“>Vn’\º+K!ãòÊa‚& µg2 ¢£”MÝ™,£ŽÀÔQ;tÀ˜™§jZ® ËÄàíêEµq â»’,!8á03`Ú §­·rŸŠ¡!Dwž)Ù œªE–K©x?V:p~Œ5J.ðö-N *žŒplµÃuN ‡_]Ä·cwë5)ªn.^Þ0÷+üåV]œ$±˜-D .tã4ÇFÉ1N:”E«,9 BOC7wÈöÀ(i:–&\0†Ž.‘Ú/§€) ~»‰7d˜‘£ V0A]çÄ»îÐð® Ú 0ÐEIJ´ÂrŸ%_ ¨rê¬[„ 0 ËÐìõÂ[§šKà¾hæa‚Éç¦`/ÆYíAÂiôd6Ó,WÁÿíÈtô±®þkG°f/Ø¡«ªµ¨±ngâ3 Ë•É>&×ýFä]dÿ ;’>À=ø]½+[kº\Ö¹ŽÂ>v³]†Ú V©”[›títò°8þuq\lʶ¸øÎ=ŠŸ½gIñèñ’‚/_Ñ]Ÿñãôô¸x´cgMìo:ÍH‡Ž2sËL`‹áúöÑ›»:÷r!:Žëó¶lon¿.Ä÷!»íº}*Dö^‡Õ§*bŽ›J3¶8mE˜œHô•EFâ™ã¼CŽÏ²oæÔèñÂû:b'~uⵤ KDZst8s¹cœ]ÁFÒ½ëá†"¨TÑxþzcæ]4AñGü®?Uñ×ÌÎéKüeö•‡¨XïÔÜYø½g¿zôñ³×gÏ>ýjNÛŒµh‡øp=«§r:È:ã<"L+™ 2 +åȸí9õ%à—/ù›Ñd% ;ÐøÒ7 ”ŰÛXAaª­I±)WÃÀ ‘È.Äbîx4ðO\ï!+Ƕ蜀@q”H,ûÁT&D/LêÌãÊ,áö€E}2ß"Æ7‡vÓ8B¼±5BÞ¡§ ²X§¸„_<-8ô‚T<)[¶´ttpt䑠ЈÓIš í¼°µ¹|ú4yá=0ˆ#ˆóMÕ†V6ý±œŠºõé´ÂãD½õçgÿº¸3%‰pÚÏà­H-ÖFˆ‚ײœœ;¼EB¥æÙSìK¡9 ÷:¾µÇWyÊv’‹€Eòv'ùÊò ì¦YÈ.t¹¹œÖNctzí&¨ :¿t•¥›Ö_Ÿ¼e±£"!"vþG&iåÎÉŠï”ê¢tÝQ¥¢¡§ë‹¤ü39c¾t9[3¤ZÄgkˆ ·!‡›Ùfà ’ƒ‹šÛÝïv‰ù.2žBÕÁv ²ZÝy²ør›«þ¤p?ÇÀ’Íü"ŒTO Ñ§2ͼ%²»:¶ æ×&û‚&èsqûLT! Õ8wL³‡w¢°c`d7†•-ÓÞ;H³Rø`Éûžô“Í$Ä7·Î²‘ŠðËc]Eº_Ÿ¯°øÔÞD:— OAó¦z2ÊIè‡xZË•{@ÜçIÃq™Àò‘£-êmPoßR)in í뛓;zÍÅÁ‰è[Tª)çÁÎ?ÃV Dˆ`Ýåºj1;•àé5̇c®‹yÁ_Ó¯ªËOã{ÜòÉW#”ˆº,#ˆÐžØp,Ï_ô·Ñ×_l L#C?In¿Á£vÐ$FòétÝÁ£{À¦ä ‡Ñ˜1þ¦§¸¨6Ïáת=°‘<,*DÄè“r…É» ê>EñCB ⦯ßÚë¤,»]ya*ÞtUKxŃѳÕìÑ38ÆÀ½»ÇzlA.Ùµšö\ÐY³ÁÍ2£2öäþÚ4a]‡„ø/+gäÔLjWª†ÕDu$gü¸p„6–téY”;€u§rš<›„*®0Qü1%Sé£Ä͵ ¹”³~(9ú’®ºÜ` ÅÐbÏû,ÛŸÂztG ˆ­qQN$n‰TŠįc…ìβ‹Ðï¾èŽ, '8î#ÂCES}Hñ¯Dðàþ.úJv²Àbª¯É[îð[vÝNé|¾XL®…5μDqqÁ>xÚ‰)!Tª›´{3´Ï'Í­”v‹ò­Þ$›Ä KAh„h‘Sú™÷ª~æ¤ã)©ùWÜÆIæJ©üLeŸªx§Ü³;‰Ì$UÜ›^¾ck&ŸL+õ~w/¿ŸÈ;€AYYî)°=çÜÓÔËÅt¡Î™ ôÁŽwßÑïYµ~÷»¼SÄzSOVj˜—SùÅ©·â!QÉÔ{ñÁ`òÀB ®Â>ðô&Õ.†ãDÄçxòósÈ,Ö{&ñÛÓüÞ½{øéWNå‘¡•Œqš æNÈÉ"W$ù'"Ã2oµj!}`aüõ¾Ö{b€cÿCn£8eÂ@mù—=õ°0ZTâa•ø’Jh9² ÇÄ ~4ÌL,ÁdÏȘ¥gÙfmüû¯©Îƒ$ DÅìÆšSs.¶*û03:í£ß€S~"•Ø€rkÉ>|;¹ï(ÿg’8u÷ªg:Јc¡ÅAÇκKã:Ì"—É›|2ÍÛõs¢í`ú]»[Û=¢8E0É’CÄ»! ¸§`çl+ÃH—o\Þ°x9údsãè²ÆÑÒ…ñ(zÝ ñ½få/'ä …o¯À,A¸˜ÝBNT’¥~':‹fKÑ =‰¡9s|o2¨Ä²žk…ß D æ€ë•P™ºÉ ’…ÛÅI¡¸¯ÇnS[½Ÿ\ºø4FÉ9I et6 –»bx‚% œ|S(K¢æÔ‘·ÁÇñʝ9 «ŽæsÙL>§u¤•;‚03gñBR­á6š¹LLO0|Îr¦^ xNÒMf˜vº! Î<1úZº%)£–´w±‰º(}ØÑÇëåc{ÚØUúJ†Ìhdâ,çP3aãÃà(óÜU(@%•T>‰ K‰>ñOÄ¡Z(£É`KóH–>²Ä9Á,©û2¦J%š‹kip‘JÒ´:ää ¼„óFÓž@ªƒ@ïVìö$›L$é€‘ëŸ  é¬ ,ŽaÊè@ðߎlC;’'3ëÃþ”„ !µÞzÞLÑÛ±ÂêÙÁòÉvÔcÿEþjÈÓΦ~6 þi~tÙ,«£oêåQ!˜Flás– ZqðÊÈèÄR+¢Ê®h‚F“PJ_8Lüà÷…éîËsÜÒ¶]{DjG|ÑуâÑÃ3Â)Hx|·G ì4èš9 FuTÇ‘"´iÀ6Ø·Vá–jN-Ò7¤Åŷر»\ÕkhÊÀ:Nh¤†+o#…ç­í¶çaâd „-Hîý§/>ûTÍUÝ6+åRnyÏè#¹NôÈM:g»FïãŽÐ\§þMÙZ ¨ ‹¦‰žQJ·£TJS’‡j”}ÅRÏ{P”„±€%‚åj ÏÄú*ÍÄ.e‡UPakv¥‘›‘¤8Žßstå‘<âO&ËDÑÎâkªVÓigUqúˆì«¡ƾÜ1Êc}+ÑiM¬[í¸B aô\óƒT¡†ÒfF6kÕÁ¸l¡rY€•­>Û»ýí7jŠzÄC9'¢œ±]ã¾{Êžr~7½HÄ£€hF¡¡c¼¿ä¸ÂÞ‘ 3ú³ÎþD‹ðŒÏ¼<sâl¥fS×–¿òÉ-„Âw$Úéĸ¥ëdy Ðaúdº³¨m1‰¥;>xë ô0/_|r‚¹@–{ÛÿãˆL›® ÌðˆZûô?u [C½ˆ¨ÔyöC"Á*W¹‹¸M'E$â˜äŸ!df^vF#„쟜8œ„+„¥æÐgj0&óø9+Óñ—y`Iô„WŸxh\$öt#÷> ¹ ʃGá×Ç|؉öÖâç ‹£Gó>z‡;ÝF ¼úáih.3zÔÓ\·êø‰ÿmŸøxà‰wCP¼(tÎÆ(¤Iv ÒËDkO„˜eަV5ÔIõšƒ8§(a•èÉ‘`Žk®m9¼Í*£Hs³lÊOw7í¥€ƒtò=D+±¸°¼QØi$—­Q`¡FP6UÌXï"^½¬S0ZU&dÒðË{±@s!6.ý‚½TP GÆðšó›ˆrÿ5°+"uxë{&z‡•a¼åb °ëù0®²1& 2Fb08¥ÑÝ‹C)4›ùê½6qDrÆÔ‡ÿŒeC0hš,pˆ5­n³¨ÄC'Õ"Ù7äÆW”iÞrœ¦ vQ}g1VàæöðŠ(02µAl&•C|F‰bW–z#˜L‹x´Qà*Å"8·•/w f,ðÏmF^)ƒ,£q‰ =8ÜG´¶7W¤ ¦$¥1w›DŽJ,q)…!|«Öê |~ÙÑcµê‘2©õìY vR{ž2aÎG‹ÓŽZ'ÇFºT† ³´×,×$ö4 \v࣠¦öëåÒÎDÝM݉A, jÙ]3pßɺ­¯ºôÊN¶l'å ,]b7ë Ãïl+Ò `ƒØWHP0宆/7×TÁ ³Ì8üÓÓ±<ÉT’w¦É)L‡Õbbò®*¿×Å\y÷“žwØUBi„ó9%Ò?4¡ ¯½#ÕfÕ¥x|8ú] ^ ¾X) ³†¥/I‹¹§²½òt”“±ã™r¥’pmt!6Cœ¾Ò)Êv¹åôcš—Ä´sW”~ÍeÖtTu<Ò‚¢µà)ë4²Çífe·JGÌ«fr+uiJWk‡ËÖÕ¢ Љ“Ñ!tUI1 Ö¤HN W[ÜU¥I4rfµ¹áºîRî!¸›ð鑽ñ¶¾­áÖõZ»Š÷ZyàeRXû&zSä‘ejÆe-ÎoÎ êŸ&õ£ÏùÒµjzˆ"TÚ¦m÷Ãõ{†Þ ®ô]¬°Yžð††â¤}q>xÞMÛ¢0îî¶?ÐPìÓÒ:–w±Ÿ¯çåá(ÛÖÇ 5Àãê„`dЩöˆ?_“ƒƒ#ô<ªN#ÕM߬jÊÎì@ø“çb I†%ÆØÌ¿†$¹åäé OüÞn,´8 –¾j”ìS,KÖaöu®½:»¥vöFdqƒçËR Á®˜ƒ—´üPòÎÀ¹Å9W…Þ3•Q¦Ü+’®½rþ=Rò3sîц„¼S“®[hê•Ö&oÄ}[HMyš0©gp|èl.s ¦p‚t@¸…mQjµ¢‡ªúzXs¸'„R–)Þíð·i²ÂzÑÜ Î.v_9W6yñ^1;Aš]ÄeHýñçÚySg.Ÿ°]/ýÉÈ!’t-Åk]e0+‚Å^r«c-ÇeebÆÔDX¦TO¯^E$‘q4´4†û4¹~öîè8 Ïú²^4]³¾¼IùMv ™Ί —²§g£Ð¸ŽÖJ²F+W0¹µÌQøÕ»Ð¯ÙzÎè*:ü#ÕHÓœê'Âi\Ü(BF¶/ƒ ;Ê 3ü…¤¿j™gLÕC©«óúü5Ú ÜÐF·¥@{ö——¯hòøÈº“ªOí‹ûrV§Öw{ _€q†vüÿa×]zË:½eñòÕć\ À"݉k!O(qŒî´?³–>É‹ÇÅq†‡ >ÀðÇ«ír Ëû„7—ÕZIÃò€ÑyóòyÄ­bã:–›Ž(ÎÚp³oºâr³$Pø'Íú¦­/.7ð¤?¿zNýÑlåÁŽŒ ¾zÁNë/óßv%è¡Ô.ü.“"=¸`Nò£ÍrÍü[‚}£þ¾h(ÇúÈæp’?Y[O»£§"gðõ“í¦yYý3FZO@iív;±Ô¢q^-ì\7”[-×,kLP”T¬®Z¡ãAVí˜P»‹ò×l!JfÕÖ¼T¤»ÆbÛFAP"¶‘Mõ%h ÈÁž °'¼mÑuÐeî[îö甤…›.7KNá=›¯[|cÞºmµ²ëá/ì,¾…wêê– ò†ÂJˆ? Í,¤q®—öÞÒË ÷¡º£Õ샳J θ]xa”A´UÕ'2å«CnbçÊP´D„ÌÓ< AjUe6ÿd*%ƒÓKó}ÑláK™®Éù~áæ†PÅ3ae/#2ð؈{!çÓ×]½2¥°RšÎ8-O`Ùa¤†ÊÝﲜÙz)£ö„® 94½æ=Ñ°Û ã°&ÈèQ&™H*Ù*Éó(»…)È׈ R* FÓñÌ›«‚©ªl„$AeG—ý~Ææ&瀨>…_4ÄÄg‘¹Ë'Ö£´o¶–å<ÓPЬ ©žƒ¦RßG…UÒ§HX]% 0ž.fùX–rbÀÆ<'­Y_“® î‹d§zpûNedøç÷o Tþ¾®b°í'í<ÿ%tãìã7Ïž?={ùů3ÅÞñsQÇÃßU›é¾ïŽðDY`_6 .¬Ý—Ýÿž«Û縻¢¡‰Wx1Þrê•»g:»å‚ûßó–ûCT<0Ÿ¼1ÔbÒFµcöâiþWqÔks',o§äp>û]Ù£%gŽpì—,kJnvv²¦°^"IkB5ZqyÄËr­àõ4¯zªº(јѕð,庣ô۬ì°þši Š!Š *º)1·†Ê=µu΢ðWKïh{å$gã5ãy’²´hY-XbF³K{Ǭ<®íšœî©tË11cßÞÇ6³”ذì;Î{T3y,΂ð̇A6˜ýËÊ|p…Ê·é8AÙ[ôö_2…D‹F3ïüÌXåqM"-)D+q,%»!*ö2FE¸3——,ž]aÛ±÷êmGõj 4·î¡Co­–[O>¼M‚ѽq*àÙç¯^?yþüìÅËOÿðì?«\çöҲб™=z@¿¶u¯*AŽ}§`廂Ÿ!uºO{,«vº“ž0îÒUûPŠÞ.ã«^Â3Ÿ¼Ì'×9;øïƒXP¸òÖQ°{?¤{9j¿M+ô×@+Nóÿš[¶e†dNíkõ€ûåfÓLÆ´”ùßÃ<Õµtͯ=J®Ðö{h‚ÐâÐO"yq"¨†ÕËjbSšÔ*Ûn_âŠjˆN²°æ±øf\ÔÕ–ØQDýŽÆÇdã¨xåWVk:]C¥ÆRé‚*x{ìhÓ„"à"F0ç ³©'üðæ¬à(¢›ƒ$qsE ÆÁ¿ëÂñÝÂf<)ƒ{Ѳ„0ˆF~«m¢ëºð W¢étæ¼Â‹µ]¸%`×|McÔu.Ÿ<¨='D™£’ï¸_·ú:ÈÙäÝ'ÚHN¬¢ëÔ ¥ˆbaÚjâò£Ž.ÈLÇ2¹¸6]ArBõ‡*Ñ™1%íõrÚ+S/•ÆE_ÂÁß»^ =Ÿèá/‡‹ØPÕÄ)˜1HM·KfÚyŠ1Éð|7TTËPØgAcEûØ¢rðòÛSR ¡&‘H¦¦6zqªà±c˜ó×ÓŠ‰TCe»¡yd瀔QŠTÝUÀóô€øºÉOóãâ×Nã0Aú¢y+¾òqçVÊp»ô|ª1éé|Ox¬µ=užUò} s}^'Œð,sÕB’:%“…Êðôo±#Ô,ªwp3<›BF+åùHíj°Øy/VŠ ;IÈ€ús¦—¨ÞªŽÚÖ:OC×b‰³þ æ/kU~!*{Qbð×¶®æáùG쬺Q<”5M¡i)¢[¦âÍX+<O´DRW_¬(.¢3§ô˜¾Æ¿xVp儺Ë:#=£„ÅŽ¨”B½Ún˜šá á1µôîò9áf‘ùº¢Â_µ]m)'£`hmZOjG®æí§s6óïÞÿ¼ÿyÿóþçýÏûŸ÷?ïÞÿ¼ÿyÿóþçýÏûŸ÷?ïÞÿ¼ÿyÿóþçýÏûŸ÷?ïÞÿüþü/½qrqmaas-1.9.5+bzr4599.orig/contrib/maas-http.conf0000644000000000000000000000245613056115004017067 0ustar 00000000000000 SSLEngine On # Do not rely on these certificates, generate your own. SSLCertificateFile /etc/ssl/certs/ssl-cert-snakeoil.pem SSLCertificateKeyFile /etc/ssl/private/ssl-cert-snakeoil.key ExpiresActive On ExpiresByType text/javascript "access plus 1 years" ExpiresByType application/javascript "access plus 1 years" ExpiresByType application/x-javascript "access plus 1 years" ExpiresByType text/css "access plus 1 years" ExpiresByType image/gif "access plus 1 years" ExpiresByType image/jpeg "access plus 1 years" ExpiresByType image/png "access plus 1 years" Alias /MAAS/static/ /usr/share/maas/web/static/ ProxyPreserveHost on ProxyPass /MAAS/ws "ws://localhost:5240/MAAS/ws" disablereuse=on ProxyPass /MAAS/static/ ! ProxyPass /MAAS/ http://localhost:5240/MAAS/ ProxyPass /MAAS http://localhost:5240/MAAS/ RewriteEngine On # Redirect (permanently) requests for /MAAS to /MAAS/. RewriteRule ^/MAAS$ %{REQUEST_URI}/ [R=301,L] maas-1.9.5+bzr4599.orig/contrib/maas-rsyslog.conf0000644000000000000000000000013113056115004017576 0ustar 00000000000000# Log MAAS messages to their own file. :syslogtag,contains,"maas" /var/log/maas/maas.log maas-1.9.5+bzr4599.orig/contrib/preseeds_v2/0000755000000000000000000000000013056115004016534 5ustar 00000000000000maas-1.9.5+bzr4599.orig/contrib/tgt.conf0000644000000000000000000000006213056115004015756 0ustar 00000000000000include /var/lib/maas/ephemeral/tgt.conf.d/*.conf maas-1.9.5+bzr4599.orig/contrib/preseeds_v2/commissioning0000644000000000000000000000002113056115004021326 0ustar 00000000000000{{preseed_data}} maas-1.9.5+bzr4599.orig/contrib/preseeds_v2/curtin0000644000000000000000000000002113056115004017754 0ustar 00000000000000{{preseed_data}} maas-1.9.5+bzr4599.orig/contrib/preseeds_v2/curtin_userdata0000644000000000000000000000430313056115004021653 0ustar 00000000000000#cloud-config debconf_selections: maas: | {{for line in str(curtin_preseed).splitlines()}} {{line}} {{endfor}} {{if third_party_drivers and driver}} early_commands: {{py: key_string = ''.join(['\\x%x' % x for x in map(ord, driver['key_binary'])])}} driver_00_get_key: /bin/echo -en '{{key_string}}' > /tmp/maas-{{driver['package']}}.gpg driver_01_add_key: ["apt-key", "add", "/tmp/maas-{{driver['package']}}.gpg"] driver_02_add: ["add-apt-repository", "-y", "deb {{driver['repository']}} {{node.get_distro_series()}} main"] driver_03_update_install: ["sh", "-c", "apt-get update --quiet && apt-get --assume-yes install {{driver['package']}}"] driver_04_load: ["sh", "-c", "depmod && modprobe {{driver['module']}}"] {{endif}} late_commands: maas: [wget, '--no-proxy', '{{node_disable_pxe_url|escape.shell}}', '--post-data', '{{node_disable_pxe_data|escape.shell}}', '-O', '/dev/null'] {{if third_party_drivers and driver}} driver_00_key_get: curtin in-target -- sh -c "/bin/echo -en '{{key_string}}' > /tmp/maas-{{driver['package']}}.gpg" driver_02_key_add: ["curtin", "in-target", "--", "apt-key", "add", "/tmp/maas-{{driver['package']}}.gpg"] driver_03_add: ["curtin", "in-target", "--", "add-apt-repository", "-y", "deb {{driver['repository']}} {{node.get_distro_series()}} main"] driver_04_update_install: ["curtin", "in-target", "--", "apt-get", "update", "--quiet"] driver_05_install: ["curtin", "in-target", "--", "apt-get", "-y", "install", "{{driver['package']}}"] driver_06_depmod: ["curtin", "in-target", "--", "depmod"] driver_07_update_initramfs: ["curtin", "in-target", "--", "update-initramfs", "-u"] {{endif}} power_state: mode: reboot {{if node.split_arch()[0] in {'i386', 'amd64'} }} apt_mirrors: ubuntu_archive: http://{{main_archive_hostname}}/{{main_archive_directory}} ubuntu_security: http://{{main_archive_hostname}}/{{main_archive_directory}} {{else}} apt_mirrors: ubuntu_archive: http://{{ports_archive_hostname}}/{{ports_archive_directory}} ubuntu_security: http://{{ports_archive_hostname}}/{{ports_archive_directory}} {{endif}} {{if enable_http_proxy}} {{if http_proxy }} apt_proxy: {{http_proxy}} {{else}} apt_proxy: http://{{server_host}}:8000/ {{endif}} {{endif}} maas-1.9.5+bzr4599.orig/contrib/preseeds_v2/curtin_userdata_centos0000644000000000000000000000043313056115004023226 0ustar 00000000000000#cloud-config debconf_selections: maas: | {{for line in str(curtin_preseed).splitlines()}} {{line}} {{endfor}} late_commands: maas: [wget, '--no-proxy', '{{node_disable_pxe_url}}', '--post-data', '{{node_disable_pxe_data}}', '-O', '/dev/null'] power_state: mode: reboot maas-1.9.5+bzr4599.orig/contrib/preseeds_v2/curtin_userdata_custom0000644000000000000000000000043313056115004023245 0ustar 00000000000000#cloud-config debconf_selections: maas: | {{for line in str(curtin_preseed).splitlines()}} {{line}} {{endfor}} late_commands: maas: [wget, '--no-proxy', '{{node_disable_pxe_url}}', '--post-data', '{{node_disable_pxe_data}}', '-O', '/dev/null'] power_state: mode: reboot maas-1.9.5+bzr4599.orig/contrib/preseeds_v2/curtin_userdata_suse0000644000000000000000000000043313056115004022712 0ustar 00000000000000#cloud-config debconf_selections: maas: | {{for line in str(curtin_preseed).splitlines()}} {{line}} {{endfor}} late_commands: maas: [wget, '--no-proxy', '{{node_disable_pxe_url}}', '--post-data', '{{node_disable_pxe_data}}', '-O', '/dev/null'] power_state: mode: reboot maas-1.9.5+bzr4599.orig/contrib/preseeds_v2/curtin_userdata_windows0000644000000000000000000000051613056115004023427 0ustar 00000000000000#cloud-config debconf_selections: maas: | {{for line in str(curtin_preseed).splitlines()}} {{line}} {{endfor}} late_commands: maas: [wget, '--no-proxy', '{{node_disable_pxe_url}}', '--post-data', '{{node_disable_pxe_data}}', '-O', '/dev/null'] license_key: {{node.get_effective_license_key()}} power_state: mode: reboot maas-1.9.5+bzr4599.orig/contrib/preseeds_v2/enlist0000644000000000000000000000047113056115004017757 0ustar 00000000000000#cloud-config datasource: MAAS: timeout : 50 max_wait : 120 # there are no default values for metadata_url or oauth credentials # If no credentials are present, non-authed attempts will be made. metadata_url: {{metadata_enlist_url}} output: {all: '| tee -a /var/log/cloud-init-output.log'} maas-1.9.5+bzr4599.orig/contrib/preseeds_v2/enlist_userdata0000644000000000000000000001403713056115004021652 0ustar 00000000000000#cloud-config {{if enable_http_proxy}} {{if http_proxy}} apt_proxy: {{http_proxy}} {{elif server_host}} apt_proxy: http://{{server_host}}:8000/ {{endif}} {{endif}} system_info: package_mirrors: - arches: [i386, amd64] search: primary: ["http://{{main_archive_hostname}}/{{main_archive_directory}}"] security: ["http://{{main_archive_hostname}}/{{main_archive_directory}}"] failsafe: primary: "http://archive.ubuntu.com/ubuntu" security: "http://security.ubuntu.com/ubuntu" - arches: [default] search: primary: ["http://{{ports_archive_hostname}}/{{ports_archive_directory}}"] security: ["http://{{ports_archive_hostname}}/{{ports_archive_directory}}"] failsafe: primary: "http://ports.ubuntu.com/ubuntu-ports" security: "http://ports.ubuntu.com/ubuntu-ports" misc_bucket: - &maas_enlist | #### IPMI setup ###### # If IPMI network settings have been configured statically, you can # make them DHCP. If 'true', the IPMI network source will be changed # to DHCP. IPMI_CHANGE_STATIC_TO_DHCP="false" # In certain hardware, the parameters for the ipmi_si kernel module # might need to be specified. If you wish to send parameters, uncomment # the following line. #IPMI_SI_PARAMS="type=kcs ports=0xca2" TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX") IPMI_CONFIG_D="${TEMP_D}/ipmi.d" BIN_D="${TEMP_D}/bin" OUT_D="${TEMP_D}/out" PATH="$BIN_D:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" mkdir -p "$BIN_D" "$OUT_D" "$IPMI_CONFIG_D" load_modules() { modprobe ipmi_msghandler modprobe ipmi_devintf modprobe ipmi_si ${IPMI_SI_PARAMS} udevadm settle } add_bin() { cat > "${BIN_D}/$1" chmod "${2:-755}" "${BIN_D}/$1" } add_ipmi_config() { cat > "${IPMI_CONFIG_D}/$1" chmod "${2:-644}" "${IPMI_CONFIG_D}/$1" } # Example config: enable BMC remote access (on some systems.) #add_ipmi_config "02-global-config.ipmi" <<"END_IPMI_CONFIG" #Section Lan_Channel # Volatile_Access_Mode Always_Available # Volatile_Enable_User_Level_Auth Yes # Volatile_Channel_Privilege_Limit Administrator # Non_Volatile_Access_Mode Always_Available # Non_Volatile_Enable_User_Level_Auth Yes # Non_Volatile_Channel_Privilege_Limit Administrator #EndSection #END_IPMI_CONFIG add_bin "maas-ipmi-autodetect-tool" <<"END_MAAS_IPMI_AUTODETECT_TOOL" {{for line in maas_ipmi_autodetect_tool_py.splitlines()}} {{line}} {{endfor}} END_MAAS_IPMI_AUTODETECT_TOOL add_bin "maas-ipmi-autodetect" <<"END_MAAS_IPMI_AUTODETECT" {{for line in maas_ipmi_autodetect_py.splitlines()}} {{line}} {{endfor}} END_MAAS_IPMI_AUTODETECT add_bin "maas-moonshot-autodetect" <<"END_MAAS_MOONSHOT_AUTODETECT" {{for line in maas_moonshot_autodetect_py.splitlines()}} {{line}} {{endfor}} END_MAAS_MOONSHOT_AUTODETECT add_bin "maas-enlist" <<"END_MAAS_ENLIST" {{for line in maas_enlist_sh.splitlines()}} {{line}} {{endfor}} END_MAAS_ENLIST # we could obtain the interface that booted from the kernel cmdline # thanks to 'IPAPPEND' (http://www.syslinux.org/wiki/index.php/SYSLINUX) url="{{server_url}}" # load ipmi modules load_modules pargs="" if $IPMI_CHANGE_STATIC_TO_DHCP; then pargs="--dhcp-if-static" fi power_type=$(maas-ipmi-autodetect-tool) case "$power_type" in ipmi) power_params=$(maas-ipmi-autodetect --configdir "$IPMI_CONFIG_D" ${pargs} --commission-creds) && [ -n "${power_params}" ] && power_params=${power_params%.} ;; moonshot) power_params=$(maas-moonshot-autodetect --commission-creds) && [ -n "${power_params}" ] && power_params=${power_params%.} ;; esac # Try maas-enlist without power parameters on failure for older versions of # maas-enlist without power parameter support maas-enlist --serverurl "$url" ${power_params:+--power-params "${power_params}" --power-type "${power_type}"}>/tmp/enlist.out ||\ maas-enlist --serverurl "$url" >/tmp/enlist.out if [ $? -eq 0 ]; then msg="successfully enlisted to '$url'" echo echo "=== $(date -R): $msg" cat /tmp/enlist.out echo ============================================= sleep 10 else user="ubuntu" pass="ubuntu" echo "$user:$pass" | chpasswd bfile="/tmp/block-poweroff" { echo "#!/bin/sh"; echo "touch $bfile"; } > /etc/profile.d/A01-block.sh chmod 755 /etc/profile.d/A01-block.sh echo echo ============================================= echo "failed to enlist system maas server" echo "sleeping 60 seconds then poweroff" echo echo "login with '$user:$pass' to debug and disable poweroff" echo cat /tmp/enlist.out echo ============================================= sleep 60 [ -e $bfile ] && exit 0 fi - &write_poweroff_job | cat >/etc/init/maas-poweroff.conf </lib/systemd/system/maas-poweroff.service < /tmp/install_udeb.sh && \ chmod +x /tmp/install_udeb.sh && \ /tmp/install_udeb.sh d-i apt-setup/local0/repository string deb {{driver['repository']}} {{node.get_distro_series()}} main d-i apt-setup/local0/comment string {{driver['comment']}} d-i apt-setup/local0/key string file:///tmp/maas-{{driver['package']}}/repo_key.gpg {{endif}} {{enddef}} {{def preseed}} {{preseed_data}} {{driver_preseed_data}} {{enddef}} {{def post_scripts}} # Executes late command and disables PXE. d-i preseed/late_command string true && \ in-target sh -c 'f=$1; shift; echo $0 > $f && chmod 0440 $f $*' 'ubuntu ALL=(ALL) NOPASSWD: ALL' /etc/sudoers.d/maas && \ in-target wget --no-proxy "{{node_disable_pxe_url|escape.shell}}" --post-data "{{node_disable_pxe_data|escape.shell}}" -O /dev/null && \ {{if third_party_drivers and driver}} in-target sh -c "echo blacklist {{driver['blacklist']}} >> /etc/modprobe.d/maas-{{driver['module']}}.conf" && \ in-target sh -c "for file in /lib/modules/*; do depmod ${file##*/}; done"; \ in-target update-initramfs -u; \ {{endif}} true {{enddef}} {{def install_udeb}} {{if third_party_drivers and driver}} #!/usr/bin/env sh set -eu REPO={{driver['repository']}} KERNEL_VERSION=`uname -r` TMPDIR=/tmp/maas-{{driver['package']}} mkdir $TMPDIR {{if http_proxy}} export http_proxy={{http_proxy}} https_proxy={{http_proxy}} {{endif}} echo -en '{{''.join(['\\x%x' % x for x in map(ord, driver['key_binary'])])}}' > $TMPDIR/repo_key.gpg # Retrieve the Release file and verify it against the repository's key. wget -O $TMPDIR/Release $REPO/dists/{{node.get_distro_series()}}/Release wget -O $TMPDIR/Release.gpg $REPO/dists/{{node.get_distro_series()}}/Release.gpg gpgv --keyring $TMPDIR/repo_key.gpg $TMPDIR/Release.gpg $TMPDIR/Release # Retrieve the Packages file and verify it against the Releases file. wget -O $TMPDIR/Packages $REPO/dists/{{node.get_distro_series()}}/main/debian-installer/binary-amd64/Packages expected_sha256=`sed -n -e '/^SHA256:$/,$p' $TMPDIR/Release | grep 'main/debian-installer/binary-amd64/Packages$' | cut -f 2 -d ' '` actual_sha256=`sha256sum $TMPDIR/Packages | cut -f 1 -d ' '` if [ "$expected_sha256" != "$actual_sha256" ] then echo "Packages sha256 value mismatch." echo "expected: $expected_sha256, actual: $actual_sha256" exit 1 fi # Retrieve the udeb and verify it against the Packages file. This method # of locating the sha256 sum for the udeb within the Packages file # relies on the SHA256 line coming after the Filename line in the udeb's # record in the Packages file. filename=`grep ^Filename.*$KERNEL_VERSION $TMPDIR/Packages | cut -f 2 -d ' ' | sort -ru | head -n 1` wget -O $TMPDIR/driver.udeb $REPO/$filename basename=${filename##*/} sed_expression="/$basename"'$/,$p' expected_udeb_sha256=`sed -n -e $sed_expression $TMPDIR/Packages | grep ^SHA256: | cut -f 2 -d ' ' | head -n 1` actual_udeb_sha256=`sha256sum $TMPDIR/driver.udeb | cut -f 1 -d ' '` if [ "$expected_udeb_sha256" != "$actual_udeb_sha256" ] then echo "udeb sha256 value mismatch." echo "expected: $expected_udeb_sha256, actual: $actual_udeb_sha256" exit 1 fi # Install the udeb and load the kernel module. udpkg -i $TMPDIR/driver.udeb depmod modprobe {{driver['module']}} {{endif}} {{enddef}} maas-1.9.5+bzr4599.orig/contrib/preseeds_v2/preseed_master0000644000000000000000000000624413056115004021467 0ustar 00000000000000# MAAS - Ubuntu Server Installation # * Minimal install # * Cloud-init for bare-metal # * Cloud-init preseed data # Locale d-i debian-installer/locale string en_US.UTF-8 # No splash d-i debian-installer/splash boolean false # Keyboard layout d-i console-setup/ask_detect boolean false d-i console-setup/layoutcode string us d-i console-setup/variantcode string # Network configuration d-i netcfg/get_nameservers string d-i netcfg/get_ipaddress string d-i netcfg/get_netmask string 255.255.255.0 d-i netcfg/get_gateway string d-i netcfg/confirm_static boolean true # Local clock (set to UTC and use ntp) d-i clock-setup/utc boolean true d-i clock-setup/ntp boolean true d-i clock-setup/ntp-server string ntp.ubuntu.com d-i time/zone string Etc/UTC # Partitioning d-i partman/early_command string debconf-set partman-auto/disk `list-devices disk | head -n1` d-i partman-iscsi/mainmenu string finish d-i partman-auto/method string regular d-i partman-lvm/device_remove_lvm boolean true d-i partman-lvm/confirm boolean true d-i partman-md/device_remove_md boolean true d-i partman/confirm_write_new_label boolean true d-i partman/choose_partition select Finish partitioning and write changes to disk d-i partman/confirm boolean true d-i partman/confirm_nooverwrite boolean true d-i partman/default_filesystem string ext4 # Enable this if you want to override to a specific kernel, such as # linux-generic-lts-saucy, but Debian Installer should pick the right one based # on the boot kernel. #d-i base-installer/kernel/image string linux-server # User Setup d-i passwd/root-login boolean false d-i passwd/make-user boolean true d-i passwd/user-fullname string ubuntu d-i passwd/username string ubuntu d-i passwd/user-password-crypted password ! d-i passwd/user-uid string d-i user-setup/allow-password-weak boolean false d-i user-setup/encrypt-home boolean false d-i passwd/user-default-groups string adm cdrom dialout lpadmin plugdev sambashare # APT {{self.proxy}} # By default the installer requires that repositories be authenticated # using a known gpg key. This setting can be used to disable that # authentication. Warning: Insecure, not recommended. d-i debian-installer/allow_unauthenticated string false # Lang d-i pkgsel/language-packs multiselect en d-i pkgsel/update-policy select none d-i pkgsel/updatedb boolean true # Boot-loader d-i grub-installer/skip boolean false d-i lilo-installer/skip boolean false d-i grub-installer/only_debian boolean true d-i grub-installer/with_other_os boolean true d-i finish-install/keep-consoles boolean false d-i finish-install/reboot_in_progress note # Eject cdrom d-i cdrom-detect/eject boolean true # Do not halt/poweroff after install d-i debian-installer/exit/halt boolean false d-i debian-installer/exit/poweroff boolean false # maas client packages {{self.client_packages}} # maas preseed {{self.preseed}} # Post scripts. {{self.post_scripts}} maas-1.9.5+bzr4599.orig/contrib/preseeds_v2/preseed_master_windows_amd64_generic_win20120000644000000000000000000002363513056115004027115 0ustar 00000000000000 OnError 1 100 Primary 2 true Primary true NTFS 1 1 NTFS 2 2 0 true 2 0 false OnError /IMAGE/NAME Windows Server 2012 SERVERSTANDARD true {{node.license_key}} OnError en-US en-US en-US en-US en-US ClearType true 3 Work true true true {{preseed_data['hostname']}} false 0 true all @FirewallAPI.dll,-28752 UTC {{preseed_data['hostname']}} 1 powershell -NoLogo -Command "do{Start-Sleep 2; (new-object System.Net.WebClient).DownloadFile('http://www.cloudbase.it/downloads/CloudbaseInitSetup_Beta.msi','C:\Windows\Temp\CB.msi')} while ($? -eq $false)" 2 msiexec /i C:\Windows\Temp\CB.msi /qb MAAS_METADATA_URL={{preseed_data['maas_metadata_url']}} MAAS_OAUTH_CONSUMER_KEY={{preseed_data['maas_oauth_consumer_key']}} MAAS_OAUTH_TOKEN_KEY={{preseed_data['maas_oauth_token_key']}} MAAS_OAUTH_TOKEN_SECRET={{preseed_data['maas_oauth_token_secret']}} 0 maas-1.9.5+bzr4599.orig/contrib/preseeds_v2/preseed_master_windows_amd64_generic_win2012hv0000644000000000000000000002364713056115004027456 0ustar 00000000000000 OnError 1 100 Primary 2 true Primary true NTFS 1 1 NTFS 2 2 0 true 2 0 false OnError /IMAGE/NAME Hyper-V Server 2012 SERVERHYPERCORE true en-US en-US en-US en-US en-US ClearType true 3 Work true true true {{preseed_data['hostname']}} false 0 true all @FirewallAPI.dll,-28752 UTC {{preseed_data['hostname']}} 1 powershell -NoLogo -Command "do{Start-Sleep 2; (new-object System.Net.WebClient).DownloadFile('http://www.cloudbase.it/downloads/CloudbaseInitSetup_Beta.msi','C:\Windows\Temp\CB.msi')} while ($? -eq $false)" 2 msiexec /i C:\Windows\Temp\CB.msi /qb MAAS_METADATA_URL={{preseed_data['maas_metadata_url']}} MAAS_OAUTH_CONSUMER_KEY={{preseed_data['maas_oauth_consumer_key']}} MAAS_OAUTH_TOKEN_KEY={{preseed_data['maas_oauth_token_key']}} MAAS_OAUTH_TOKEN_SECRET={{preseed_data['maas_oauth_token_secret']}} 0 maas-1.9.5+bzr4599.orig/contrib/preseeds_v2/preseed_master_windows_amd64_generic_win2012hvr20000644000000000000000000002365213056115004027716 0ustar 00000000000000 OnError 1 100 Primary 2 true Primary true NTFS 1 1 NTFS 2 2 0 true 2 0 false OnError /IMAGE/NAME Hyper-V Server 2012 R2 SERVERHYPERCORE true en-US en-US en-US en-US en-US ClearType true 3 Work true true true {{preseed_data['hostname']}} false 0 true all @FirewallAPI.dll,-28752 UTC {{preseed_data['hostname']}} 1 powershell -NoLogo -Command "do{Start-Sleep 2; (new-object System.Net.WebClient).DownloadFile('http://www.cloudbase.it/downloads/CloudbaseInitSetup_Beta.msi','C:\Windows\Temp\CB.msi')} while ($? -eq $false)" 2 msiexec /i C:\Windows\Temp\CB.msi /qb MAAS_METADATA_URL={{preseed_data['maas_metadata_url']}} MAAS_OAUTH_CONSUMER_KEY={{preseed_data['maas_oauth_consumer_key']}} MAAS_OAUTH_TOKEN_KEY={{preseed_data['maas_oauth_token_key']}} MAAS_OAUTH_TOKEN_SECRET={{preseed_data['maas_oauth_token_secret']}} 0 maas-1.9.5+bzr4599.orig/contrib/preseeds_v2/preseed_master_windows_amd64_generic_win2012r20000644000000000000000000002364013056115004027355 0ustar 00000000000000 OnError 1 100 Primary 2 true Primary true NTFS 1 1 NTFS 2 2 0 true 2 0 false OnError /IMAGE/NAME Windows Server 2012 R2 SERVERSTANDARD true {{node.license_key}} OnError en-US en-US en-US en-US en-US ClearType true 3 Work true true true {{preseed_data['hostname']}} false 0 true all @FirewallAPI.dll,-28752 UTC {{preseed_data['hostname']}} 1 powershell -NoLogo -Command "do{Start-Sleep 2; (new-object System.Net.WebClient).DownloadFile('http://www.cloudbase.it/downloads/CloudbaseInitSetup_Beta.msi','C:\Windows\Temp\CB.msi')} while ($? -eq $false)" 2 msiexec /i C:\Windows\Temp\CB.msi /qb MAAS_METADATA_URL={{preseed_data['maas_metadata_url']}} MAAS_OAUTH_CONSUMER_KEY={{preseed_data['maas_oauth_consumer_key']}} MAAS_OAUTH_TOKEN_KEY={{preseed_data['maas_oauth_token_key']}} MAAS_OAUTH_TOKEN_SECRET={{preseed_data['maas_oauth_token_secret']}} 0 maas-1.9.5+bzr4599.orig/docs/_static/0000755000000000000000000000000013056115004015231 5ustar 00000000000000maas-1.9.5+bzr4599.orig/docs/_templates/0000755000000000000000000000000013056115004015740 5ustar 00000000000000maas-1.9.5+bzr4599.orig/docs/about.rst0000644000000000000000000000375713056115004015463 0ustar 00000000000000About this documentation ======================== This is the documentation for Canonical's MAAS software. If you aren't sure what that is, you should probably skip everything else and head straight to the :ref:`orientation` section where it is explained. Like any software though, it can be frustrating if you don't know how bits of it work, how to achieve certain goals or what to do when things go wrong. Amongst its various sections, this manual aims to answer all those questions and plenty more you haven't even thought of yet. Getting it ---------- In a cunning move, the current documentation always lives, and is built from, the main MAAS source code (in the top-level ``docs/`` directory). That means that whatever MAAS package you have installed, or even if you are really living life on the edge and have checked out a development version from Launchpad, this documentation should be the latest and most appropriate version for the software you are running. However, it is also possible that there have been additional sections, or more helpful and clearer bits added since the package you are using was made. For this reason you can always find the latest documentation online here: `http://maas.ubuntu.com`_. .. _http://maas.ubuntu.com: http://maas.ubuntu.com Contributing ------------ If you have some extra information to add, or think you have spotted an error or something out of date, we really want to hear about it. Please `File a bug report`_ or `contact us directly`_. In addition you can talk to us on the Freenode IRC channel #maas. .. _File a bug report: https://bugs.launchpad.net/maas/+filebug .. _contact us directly: https://launchpad.net/~maas-maintainers/+contactuser If you see something wrong with this documentation, you can help us fix it. Download the source to MAAS by following the instructions in :doc:`the hacking guide `, make your changes, and propose a merge against lp:maas on Launchpad. The documentation source lives in the top-level ``docs/`` directory. maas-1.9.5+bzr4599.orig/docs/api_authentication.rst0000644000000000000000000000504513056115004020211 0ustar 00000000000000.. -*- mode: rst -*- .. _api_authentication: API authentication ================== MAAS's API uses OAuth_ as its authentication mechanism. There isn't a third party involved (as in 3-legged OAuth) and so the process used is what's commonly referred to as 0-legged OAuth: the consumer accesses protected resources by submitting OAuth signed requests. .. _OAuth: http://en.wikipedia.org/wiki/OAuth Note that some API endpoints support unauthenticated requests (i.e. anonymous access). See the :doc:`API documentation ` for details. Examples ======== Here are two examples on how to perform an authenticated GET request to retrieve the list of nodes. The , , tokens are the three elements that compose the API key (API key = '::'). Python ------ .. code:: python import oauth.oauth as oauth import httplib2 import uuid def perform_API_request(site, uri, method, key, secret, consumer_key): resource_tok_string = "oauth_token_secret=%s&oauth_token=%s" % ( secret, key) resource_token = oauth.OAuthToken.from_string(resource_tok_string) consumer_token = oauth.OAuthConsumer(consumer_key, "") oauth_request = oauth.OAuthRequest.from_consumer_and_token( consumer_token, token=resource_token, http_url=site, parameters={'oauth_nonce': uuid.uuid4().get_hex()}) oauth_request.sign_request( oauth.OAuthSignatureMethod_PLAINTEXT(), consumer_token, resource_token) headers = oauth_request.to_header() url = "%s%s" % (site, uri) http = httplib2.Http() return http.request(url, method, body=None, headers=headers) # API key = '::' response = perform_API_request( 'http://server/MAAS/api/1.0', '/nodes/?op=list', 'GET', '', '', '') Ruby ---- .. code:: ruby require 'oauth' require 'oauth/signature/plaintext' def perform_API_request(site, uri, key, secret, consumer_key) consumer = OAuth::Consumer.new( consumer_key, "", { :site => "http://localhost/MAAS/api/1.0", :scheme => :header, :signature_method => "PLAINTEXT"}) access_token = OAuth::AccessToken.new(consumer, key, secret) return access_token.request(:get, "/nodes/?op=list") end # API key = "::" response = perform_API_request( "http://server/MAAS/api/1.0", "/nodes/?op=list", "", "", "consumer_key>") maas-1.9.5+bzr4599.orig/docs/bootsources.rst0000644000000000000000000000673713056115004016721 0ustar 00000000000000.. -*- mode: rst -*- .. _bootsources: Boot images import configuration ================================ The configuration for where a region downloads its images is defined by a set of "sources". Each "source" defines a Simplestreams repository location (``url``) from which images can be downloaded and a ``keyring_filename`` (or ``keyring_data``) for validating index and image signatures from that location. For each source, you can define a series of filters (``selections``) specifying which images should be downloaded from that source. The following example use the MAAS CLI to list the boot sources and the boot source selections. Assuming the CLI ``PROFILE`` is the name of the profile under which you're logged in to the server:: $ maas $PROFILE boot-sources read [ { "url": "http://maas.ubuntu.com/images/ephemeral-v2/releases/", "keyring_data": "", "resource_uri": "", "keyring_filename": "/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg", "id": 1 } ] $ maas $PROFILE boot-source-selections read 1 [ { "labels": [ "release" ], "arches": [ "amd64" ], "subarches": [ "*" ], "release": "trusty", "id": 1, "resource_uri": "" } ] Restricting the images being downloaded --------------------------------------- Let's say you want to add a previous LTS release to images being downloaded. Starting from the configuration described above, you would need to: - Add the "Precise" selection (the selection '1' of the source '1'):: $ maas $PROFILE boot-source-selections create 1 os="ubuntu" release="precise" arches="amd64" subarches="*" labels="*" After you've selected the additional boot sources you need to tell MAAS to start the import process by running the command:: $ maas $PROFILE boot-resources import Downloading the images from a different source ---------------------------------------------- Let's say you want to import the images from a different location. You would need to to change the source's url and keyring:: $ maas $PROFILE boot-source update 1 url="http://custom.url" keyring_filename="" keyring_data@=./custom_keyring_file { "url": "http://custom.url/", "keyring_data": "", "resource_uri": "", "keyring_filename": "", "id": 1 } Adding a source --------------- You can also add a new source:: $ maas $PROFILE boot-sources create url=http://my.url keyring_filename="" keyring_data@=./ custom_keyring_file { "url": "http://my.url/", "keyring_data": "ZW1wdHkK", "keyring_filename": "", "id": 2, "resource_uri": "" } Inside that newly created source ('2') you can add selections:: $ maas $PROFILE boot-source-selections create 2 os="ubuntu" release="trusty" arches="amd64" subarches="*" labels='*' { "labels": ["*"], "arches": ["amd64"], "subarches": ["*"], "release": "trusty", "id": 3, "resource_uri": "" } Deleting a source --------------- Let's say you need to delete the newly added source. To delete the source:: $ maas $PROFILE boot-source delete 2 maas-1.9.5+bzr4599.orig/docs/changelog.rst0000644000000000000000000011063313056115004016270 0ustar 00000000000000========= Changelog ========= 1.9.5 ===== LP: #1636251 [1.9] resolv.conf search path doesn't match the domain for the host. LP: #1603590 [1.9] MAAS does not allow link-local address for default gateway on ipv6 subnet. LP: #1657491 [1.9] Fix lease parser to properly remove released leases and update DNS zones when parsing finishes LP: #1553176 Fix issues when handling zonefile timestamps to match fixes from BIND. 1.9.4 ===== LP: #1584850 [1.9] DNS record added for non-boot interface IP when no address of that family exists on the boot interface LP: #1583715 [1.9] Ensure that restricted resources also perform meaningful authentication of clients. LP: #1584211 [1.9] Exclude RAM, floppy, and loopback devices from lsblk during commissioning. LP: #1585400 [1.9] Change detect_missing_packages in moonshot power driver to look for `ipmitool` instead of `ipmipower` LP: #1581318 [1.9] Append version to templateUrl in maas.js angular code. LP: #1591093 [2.0,1.9] 3rd party HP drivers (archive hostname renamed) - deployment fails LP: #1597460 [1.9] MAAS 1.9 should only download filetypes from a SimpleStream is can process LP: #1567249 [1.9] If rack and region have different versions, the error is uninformative and confusing. LP: #1582070 Update the UEFI ARM64 local boot config to chainload the locally installed EFI binary. LP: #1597787 Send size with the extended partition for MBR partition tables. 1.9.3 ===== See https://launchpad.net/maas/+milestone/1.9.3 for full details. Bug Fix Update -------------- LP: #1521618 [1.9] wrong subnet in DHCP answer when multiple networks are present LP: #1536604 [1.9] IntegrityError while uploading leases - when there are reserved IP's on the dynamic range LP: #1580712 [1.9] dhcp update error: str object has no attribute mac LP: #1575567 [1.9] Re-commissioning doesn't detect storage changes LP: #1576194 [1.9] Enlistment via DHCP fails because DNS has bogus PTR record 1.9.2 ===== See https://launchpad.net/maas/+milestone/1.9.2 for full details. Bug Fix Update -------------- LP: #1573219 Deleting user in UI leads to 500 LP: #1508741 IPMI driver does not handle timeouts correctly LP: #1572070 MAAS 2.0 cannot link physical device interfaces to tagged vlans, breaking juju 2.0 multi-NIC containers LP: #1573046 14.04 images not available for commissioning as distrio-info --lts now reports xenial LP: #1571563 Can't override built in partitioning LP: #1552923 API allows attaching physical, bond interface to VLAN with a known tag (Inconsistent with UI) LP: #1566336 MAAS keeps IPs assigned to eth0, even after eth0 is enslaved into a bond LP: #1543195 unable to set mtu on default VLAN LP: #1560693 Migration 0188 dist-upgrade update failure LP: #1554747 CPU Utilization of postgresql thread reaches 100% for deleting a node from MaaS LP: #1499934 Power state could not be queried (vmware) LP: #1543707 MAAS 1.9+ should not allow whitespace characters in space names LP: #1543968 MAAS 1.9.0 allows non-unique space names LP: #1567213 devices results missing interface_set LP: #1568051 ThreadPool context entry failure causes thread pool to break LP: #1212205 get_file_by_name does not check owner LP: #1298772 MAAS API vulnerable to CSRF attack LP: #1379826 uuid.uuid1() is not suitable as an unguessable identifier/token LP: #1573264 Enlistment fails: archdetect not found. LP: #1556219 Discover correct IPMI driver in Power8. 1.9.1 ===== See https://launchpad.net/maas/+milestone/1.9.1 for full details. Bug Fix Update -------------- LP: #1523779 Fix grub-install error on deploying power8 machines. LP: #1526542 Skip block devices with duplicate serial numbers to fix multipath issue. LP: #1532262 Fix failure to power query requests for SM15K servers. LP: #1484696 Fix bug in apache2 maas config where it will reuse websocket connections to work around a bug in apache2 itself. 1.9.0 ===== Important announcements ----------------------- **New Networking Concepts and API's: Fabrics, Spaces and Subnets** With the introduction of new MAAS networking concepts, new API's are also been introduced. These are: * fabrics * spaces * subnets * vlans * fan-networks MAAS 1.9.0 will continue to provide backwards compatibility with the old network API for reading purposes, but moving forward, users are required to use the new API to manipulate fabrics, spaces and subnets. **Advanced Network and Storage Configuration only available for Ubuntu deployments** Users can now perform advanced network and storage configurations for nodes before deployment. The advanced configuration is only available for Ubuntu deployments. All other deployments using third party OS', including CentOS, RHEL, Windows and Custom Images, won't result in such configuration. **Re-commissioning required for upgraded MAAS** Now that storage partitioning and advanced configuration is supported natively, VM nodes in MAAS need to be re-commissioned. * If upgrading from MAAS 1.8, only VM nodes with VirtIO storage devices need to be re-commissioned. * If upgrading from MAAS 1.7, all nodes will need to be re-commissioned in order for MAAS to correctly capture the storage and networking devices. This does not affect nodes that are currently deployed. **Default Storage Partitioning Layout - Flat** With the introduction of custom storage, MAAS has also introduced the concept of partitioning layouts. Partitioning layouts allow the user to quickly auto-configure the disk partitioning scheme after first commissioning or re-commissioning (if selected to do so). The partitioning layouts are set globally on the `Settings` page. The current default Partitioning layout is 'Flat', maintaining backwards compatibility with previous MAAS releases. This means MAAS will take the first disk it finds in the system and use it as the root and boot disk. **Deployment with configured /etc/network/interfaces** Starting with MAAS 1.9, all node deployments will result in writing `/etc/network/interfaces` statically, by default. This increases MAAS' robustness and reliability as users no longer have to depend on DHCP for IP address allocation solely. MAAS will continue to provide IP addresses via DHCP, even though interfaces in `/etc/network/interfaces` may have been configured statically. Major new features ------------------ **Storage Partitioning and Advanced Configuration** MAAS now supports Storage Partitioning and Advanced Configuration natively. This allows MAAS to deploy machines with different Storage Layouts, as well as different complex partitioning configurations. Storage support includes: * LVM * Bcache * Software RAID levels 0, 1, 5, 6, 10. * Advanced partitioning Storace configuration is available both via the WebUI and API. For more information refer to :ref:`storage`. **Advanced Networking (Fabrics, Spaces, Subnetworks) and Node Network Configuration** MAAS now supports Advanced Network configuration, allowing users to not only perform advanced node network configuration, but also allowing users to declare and map their infrastructure in the form of Fabrics, VLANs, Spaces and Subnets. **Fabrics, Spaces, Subnets and Fan networks** MAAS now supports the concept of Fabrics, Spaces, Subnets and FANS, which introduce a whole new way of declaring and mapping your network and infrastructure in MAAS. The MAAS WebUI allows users to view all the declared Fabrics, Spaces, VLANs inside fabrics and Subnets inside Spaces. The WebUI does not yet support the ability to create new of these, but the API does. These new concepts replace the old `Network` concepts from MAAS' earlier versions. For more information, see :ref:`networking`. For more information about the API, see :ref:`api`. **Advanced Node Networking Configuration** MAAS can now perform the Node's networking configuration. Doing so, results in `/etc/network/interfaces` being written. Advanced configuration includes: * Assign subnets, fabrics, and IP to interfaces. * Create VLAN interfaces. * Create bond interfaces. * Change interface names. MAAS also allows configuration of node interfaces in different modes: * Auto Assign - Node interface will be configured statically and MAAS will auto assign an IP address. * DHCP - The node interface will be configured to DHCP. * Static - The user will be able to specify what IP address the interface will obtain, while MAAS will configure it statically. * Unconfigured - MAAS will leave the interface with LINK UP. **Curtin & cloud-init status updates** Starting from MAAS 1.9.0, curtin and cloud-init will now send messages to MAAS providing information regarding various of the actions being taken. This information will be displayed in MAAS in the `Node Event Log`. Note that this information is only available when using MAAS 1.9.0 and the latest version fo curtin. For cloud-init messages this information is only available when deploying Wily+. **Fabric and subnet creation** MAAS now auto-creates multiple fabrics per physical interface connected to the Cluster Controller, and will correctly create subnetworks under each fabric, as well as VLAN's, if any of the Cluster Controller interface is a VLAN interface. **HWE Kernels** MAAS now has a different approach to deploying Hardware Enablement Kernels. Start from MAAS 1.9, the HWE kernels are no longer coupled to subarchitectures of a machine. For each Ubuntu release, users will be able to select any of the available HWE kernels for such release, as well as set the minimum kernel the machine will be deployed with by default. For more information, see :ref:`hardware-enablement-kernels`. **CentOS images can be imported automatically** CentOS Image (CentOS 6 and 7) can now be imported automatically from the MAAS Images page. These images are currently part of the daily streams. In order to test this images, you need to use the daily image stream. This can be changed in the `Settings` page under `Boot Images` to `http://maas.ubuntu.com/images/ephemeral-v2/daily/`. Once changed, images can be imported from the MAAS Images page. The CentOS image will be published in the Releases stream shortly. Minor notable changes --------------------- **Minimal Config Files for Daemons** Starting from MAAS 1.9, minimal configuration files have been introduced for both, the MAAS Region Controller and the MAAS Cluster Controller daemons. * The Region Controller (`maas-regiond`) has now dropped the usage of `/etc/maas/maas_local_settings.py` in favor of `/etc/maas/regiond.conf`. Available configuration options are now `database_host`, `database_name`, `database_user`, `database_pass`, `maas_url`. MAAS will attempt to migrate any configuration on upgrade, otherwise it will use sane defaults. * The Cluster Controller (`maas-clusterd`) has now dropped the usage of `/etc/maas/pserv.yaml` and `/etc/maas/maas_cluster.conf` in favor of `/etc/maas/clusterd.conf`. Available configuration options are now `maas_url` and `cluster_uuid` only. MAAS will attempt to migrate any configuration on upgrade, otherwise it will use sane defaults. **Commissioning Actions** MAAS now supports commissioning actions. These allow the user to specify how commissioning should behave in certain escenarios. The commissioning actions available are: * Enable SSH during commissioning & Keep machine ON after commissioning * Keep network configuration after commissioning * Keep storage configuration after commissioning **Warn users about missing power control tools** MAAS now warns users about the missing power control tools. Each MAAS power driver use a set of power tools that may or may not be installed by default. If these power tools are missing from the system, MAAS will warn users. **Python Power Drivers** Starting from MAAS 1.9, MAAS is moving away from using shell scripts templates for Power Drivers. These are being migrated to MAAS' internal control as power drivers. Currently supported are APC, MSCM, MSFT OCS, SM15k, UCSM, Virsh, VMWare and IPMI. Remaining Power Drivers include AMT, Fence CDU's, Moonshot. Known Problems & Workarounds ---------------------------- **Garbage in the UI after upgrade** When upgrading from any earlier release (1.5, 1.7, 1.8), the user may see garbage in the UI. This is because the local cache is dirty and won't be refreshed automatically. MAAS 1.9.0 introduced a mechanism to refresh the cache automatically, but this will only take into effect upgrading from 1.9.0 to any later release. To work around this issue, the only thing required is to refresh the browsers cache, by hitting F5. See bug `1515380`_ for more information. .. _1515380: https://launchpad.net/bugs/1515380 Major bugs fixed in this release -------------------------------- See https://launchpad.net/maas/+milestone/1.9.0 for details. 1.9.0 (RC4) ============ Major bugs fixed in this release -------------------------------- LP: #1523674 Virsh is reporting ppc64le, not ppc64el. LP: #1524091 Don't require DHCP to be on if it should be off. LP: #1523988 No required packages for HMC as it uses pure python paramiko ssh client. LP: #1524007 Don't hold the cluster configuration lock while reloading boot images. LP: #1524924 Fix commissioning to correctly identify secondary subnets, VLAN's and fabrics. 1.9.0 (RC3) ============= Major bugs fixed in this release -------------------------------- LP: #1522898 "node-interface" API should just be "interface" - to allow devices to use it LP: #1519527 Juju 1.25.1 proposed: lxc units all have the same IP address after upgrade from 1.7/1.8. LP: #1522294 MAAS fails to parse some DHCP leases. LP: #1519090 DHCP interface automatically obtains an IP even when the subnet is unmanaged. LP: #1519077 MAAS assigns IP addresses on unmanaged subnets without consideration for some addresses known to be in use. LP: #1519396 MTU field is not exposed over the API for VLAN. LP: #1521833 Updating subnet name removes dns_server. LP: #1519919 CC looks for NICs with kernel module loaded and fall back doesn't check persistent device names. LP: #1522225 Migration 0181 can fail on upgrade if disks across nodes have duplicate serial numbers. LP: #1519247 Migration 0146 can fail on upgrade when migrating unmanaged subnets. LP: #1519397 [UI] Once a cache_set is created the UI fails with ERROR. LP: #1519918 [UI] "failed to detect a valid IP address" when trying to view node details. 1.9.0 (RC2) ============= Major bugs fixed in this release -------------------------------- LP: #1513085 Partitioning should align for performance. LP: #1516815 MAAS creates DNS record against Alias (eth0:1) if alias belongs to the PXE Interface. LP: #1515769 Failed to power on SM15k. LP: #1516722 Fix migration that might affect upgrade from 1.7. LP: #1516065 Failed to power control IPMI BMC that does not support setting the boot order. LP: #1517097 Constraints for acquiring interfaces argument should 'AND' key-value pairs for the same label. LP: #1517687 [UI] Cannot create a partition using the whole disk. LP: #1513258 [UI] CSS Broken for Bond Network Device. LP: #1516173 [UI] Prevent being able to unmount/remove filesystems while node is on. LP: #1510457 [UI] No error message if there is no boot and/or root disk configured for a node. 1.9.0 (RC1) ============= Major bugs fixed in this release -------------------------------- LP: #1515498 MAAS uses wrong IP for DNS record (creates against the bond). LP: #1515671 Local archive ignored for deployment. Works for commissioning and enlistment. LP: #1513485 Fix handling of multiple StaticIPAddress rows with empty IP addresses. LP: #1513485 Lease parser failure - doesn't update IP on the PXE NIC. LP: #1514486 Cannot claim sticky IP address for device with parent. LP: #1514883 Cluster downloads boot-images from managed network (pxe) instead of network used to connect to Region. LP: #1510917 Updating/modifying/assigning vlans, spaces, fabrics, subnets doesn't allow specifying names and lock to ID's. LP: #1513095 MAAS should prevent deploying nodes with PXE interface 'unconfigured'. LP: #1508056 MTU should be a set on the VLAN, and able to override on the interface. LP: #1439476 Internal Server Error when creating/editing cluster interface. LP: #1510224 Non-interactive way to change password. LP: #1513111 When a bond is created all IP address associated with the bond members should be removed. LP: #1487135 MAAS does not provide a dump of the config it passes to curtin for networking and storage. LP: #1512959 MAAS should not offer EXT3, rather VFAT, EXT2, EXT4. LP: #1505031 Network constraints for juju. LP: #1509535 Creating a partition or a Volume Group on the whole disk leaves free space. LP: #1511493 Should not allow partitions to be created on bcache device. LP: #1503475 Storage section should only be editable when Ready or Allocated. LP: #1512832 maasserver.api.tests.test_fannetworks.TestFanNetworksAPI.test_read fails randomly. LP: #1508754 Creating a logical volume on a partition that is too small almost works, resulting in strange error messages. LP: #1503925 [UI] Keep selected nodes selected after action. LP: #1515380 [UI] Refresh UI cache after an upgrade to avoid seeing garbage. LP: #1510106 [UI] Boot disk is not lighted nor can be changed. LP: #1510118 [UI] Can't remove / delete a partition with a filesystem under 'Available disks and partitions'. LP: #1510153 [UI] Creating a partition should allow to select filesystem and mountpoint. LP: #1510468 [UI] When selecting a device, ensure padding between buttons is 20px. LP: #1510455 [UI] Misaligned mount point column on used disks table. LP: #1510469 [UI] Align the individual storage actions with the name field, rather than the tickbox. LP: #1503479 [UI] can't add physical interface. LP: #1503474 [UI] Containers (lxc, kvm) data missing on node details. LP: #1513271 [UI] Unable to unmount a filesystem in the UI. LP: #1503536 [UI] Animation missing on show members and select node. LP: #1510482 [UI] Add tooltips to icons. LP: #1510486 [UI] Add tooltips to inactive buttons. 1.9.0 (beta2) ============= Major bugs fixed in this release -------------------------------- LP: #1511257 New capabilities for subnets, vlan, spaces and fabrics. LP: #1509077 Upgrade left a PXE NIC"s on nodes without a subnet associated causing deploy issues. LP: #1512109 DNS record doesn't get created against the PXE interface LP: #1510334 bcache cache_mode setting not configured on servers LP: #1510210 Administrators unable to delete users using the API LP: #1509536 Can create a VolumeGroup (vg0) without having created a partition on the boot disk LP: #1501400 set-boot-disk yields in a machine not being able to deploy LP: #1504956 Deploying Other OS' (CentOS, Windows) should not configure custom storage LP: #1509164 Add RAID 10 support LP: #1511437 MAAS should download grub from grub-efi-amd64-signed package instead of the archive path LP: #1510120 Fails to deploy with UEFI LP: #1507586 previous owner of node can use oauth creds to retrieve current owner's user-data LP: #1507630 IP range validation for too small ranges LP: #1511610 TestReleaseAutoIPs.test__calls_update_host_maps_for_next_ip_managed_subnet can fail randomly LP: #1511071 No way to disable maas-proxy LP: #1505034 [UI] HWE naming needs to be clearer LP: #1509476 [UI] Angular $digest loop issue on node details page LP: #1509473 [UI] New nodes interfaces doesn't show which interface is the PXE interface LP: #1510471 [UI] When partitioning, there should be 20px padding between the sizing fields LP: #1510467 [UI] On the available table, add model and serial to the name column LP: #1510466 [UI] On the available table, change “available space†to “size†for consistency LP: #1510472 [UI] when formatting/mounting, the button says “Format & Mount†this should just be “Mount†LP: #1503533 [UI] Tickbox on create bond networking LP: #1510447 [UI] On the file system table, change name to “File system†(lower case S) LP: #1510474 [UI] When creating bcache and raid, remove the empty column between the config fields and LP: #1510488 [UI] On the available table, make sure all buttons are lowercase LP: #1511174 [UI] Subnets filter doesn't show network, it shows name instead LP: #1509417 [UI] can't edit / add storage tags LP: #1510891 [UI] Hover state for networking doesn't work LP: #1510458 [UI] change "edit tag" link to icon storage LP: #1510629 [UI] Can no longer see the IP address PXE interface gets on commissioning 1.9.0 (beta1) ============= Major New Features ------------------ **Storage Configuration: LVM and RAID UI** Starting from MAAS 1.9.0 (beta1), MAAS now exposes custom storage configuration in the WebUI for the following: * LVM: Ability to easily create LVM. * RAID: Ability to create RAID 0, 1, 5, 6. Minor notable changes --------------------- **Fabric and subnet creation** Starting from MAAS 1.9.0 (beta1), MAAS now auto-creates multiple fabrics per physical interface connected to the Cluster Controller, and will correctly create subnetworks under each fabric, as well as VLAN's if any VLAN interface on the Cluster Controller is preset. Known Problems & Workarounds ---------------------------- **CentOS fails to deploy with LVM Storage layout** CentOS fails to deploy when deploying with an LVM storage layout. Provided that LVM is the default storage layout, every CentOS deployment will fail, unless this layout is changed to 'Flat' storage. To work around the problem, the default storage layout can be changed from `LVM` to `Flat` in MAAS' Networks page, under `Storage Layout` section. See bug `1499558`_ for more information. .. _1499558: https://launchpad.net/bugs/1499558 **Fail to deploy (boot) with UEFI** MAAS will successfully instal in a UEFI system, however, after deployment it won't boot onto the local disk. See bug `1510120`_ for more information. .. _1510120: https://launchpad.net/bugs/1510120 1.9.0 (alpha5) ============== Major New Features ------------------ **Storage Configuration: Partitioning and Bcache UI** Starting from MAAS 1.9.0 (alpha5), MAAS now exposes storage custom storage configuration in the WebUI for the following: * Partitioning: Ability to create and delete partitions. * Bcache: Ability to create cache sets and bcache devices, allowing multiple bcache devices to use the same cache set. Minor notable changes --------------------- **Warn users about missing power control tools** MAAS now warns users about the missing power control tools. Each MAAS power driver use a set of power tools that may or may not be installed by default. If these power tools are missing from the system, MAAS will warn users. Known Problems & Workarounds ---------------------------- **CentOS fails to deploy with LVM Storage layout** CentOS fails to deploy when deploying with an LVM storage layout. Provided that LVM is the default storage layout, every CentOS deployment will fail, unless this layout is changed to 'Flat' storage. To work around the problem, the default storage layout can be changed from `LVM` to `Flat` in MAAS' Networks page, under `Storage Layout` section. See bug `1499558`_ for more information. .. _1499558: https://launchpad.net/bugs/1499558 **Juju 1.24.6 bootstrap failure - Changing MAAS configured /etc/network/interfaces** Juju 1.24.6 (or less), assumes that it can manage the MAAS deployed node's network configuration. Juju changes /etc/network/interfaces and disables bringing up eth0 on boot, to create a bridge to support LXC. However, provided that MAAS / curtin now writes the node's network configuration, Juju is unable to successfully finish the creation of the bridge, but in the process, it disables auto bring up of eth0. Starting from Juju 1.24.7+, Juju has grown support to correctly manage a /etc/network/interfaces that has been created after deployment with MAAS 1.9.0. See bug `1494476`_ for more information. .. _1494476: https://launchpad.net/bugs/1494476 1.9.0 (alpha4) ============== Minor notable changes --------------------- * Various UI cosmetic fixes and improvements. * Do not create MBR larger than 2TiB for LVM. * Various concurrency fixes and improvements to robustness. Known Problems & Workarounds ---------------------------- **CentOS fails to deploy with LVM Storage layout** CentOS fails to deploy when deploying with an LVM storage layout. Provided that LVM is the default storage layout, every CentOS deployment will fail, unless this layout is changed to 'Flat' storage. To work around the problem, the default storage layout can be changed from `LVM` to `Flat` in MAAS' Networks page, under `Storage Layout` section. See bug `1499558`_ for more information. .. _1499558: https://launchpad.net/bugs/1499558 **Juju 1.24+ bootstrap failure - Changing MAAS configured /etc/network/interfaces** Juju 1.24+, by default, assumes that it can manage the MAAS deployed node's network configuration. Juju changes /etc/network/interfaces and disables bringing up eth0 on boot, to create a bridge to support LXC. However, provided that MAAS / curtin now write the node's network configuration, Juju is unable to successfully finish the creation of the bridge, but in the process, it disables auto bring up of eth0. The machine will deploy successfully, however, after a reboot eth0 will never be brought back up due to the changes made by Juju. This will prevent Juju from SSH'ing into the machine and finishing the boostrap. To prevent this from happening, `disable-network-management: true` needs to be used. Note that this will prevent the deployment of LXC containers as they have to DHCP. See bug `1494476`_ for more information. .. _1494476: https://launchpad.net/bugs/1494476 1.9.0 (alpha3) ============== Major New Features ------------------ **Advanced Node Network Configuration UI** Starting from MAAS 1.9.0 (alpha3), MAAS can now do the Node's Network configuration. Doing such configuration will result in having `/etc/network/interfaces` writen. Advanced configuration UI includes: * Create VLAN interfaces. * Create bond interfaces. * Create Alias interfaces. * Change interface names. **Subnetworks page UI** Starting from MAAS 1.9.0 (alpha3), MAAS can now show the new Subnets tab in the UI. This allow users to view: * Fabrics * Spaces * VLANs in fabrics. * Subnets in Spaces. Known Problems & Workarounds ---------------------------- **CentOS fails to deploy with LVM Storage layout** CentOS fails to deploy when deploying with an LVM storage layout. Provided that LVM is the default storage layout, every CentOS deployment will fail, unless this layout is changed to 'Flat' storage. To work around the problem, the default storage layout can be changed from `LVM` to `Flat` in MAAS' Networks page, under `Storage Layout` section. See bug `1499558`_ for more information. .. _1499558: https://launchpad.net/bugs/1499558 **Juju 1.24+ bootstrap failure - Changing MAAS configured /etc/network/interfaces** Juju 1.24+, by default, assumes that it can manage the MAAS deployed node's network configuration. Juju changes /etc/network/interfaces and disables bringing up eth0 on boot, to create a bridge to support LXC. However, provided that MAAS / curtin now write the node's network configuration, Juju is unable to successfully finish the creation of the bridge, but in the process, it disables auto bring up of eth0. The machine will deploy successfully, however, after a reboot eth0 will never be brought back up due to the changes made by Juju. This will prevent Juju from SSH'ing into the machine and finishing the boostrap. To prevent this from happening, `disable-network-management: true` needs to be used. Note that this will prevent the deployment of LXC containers as they have to DHCP. See bug `1494476`_ for more information. .. _1494476: https://launchpad.net/bugs/1494476 1.9.0 (alpha2) ============== Important announcements ----------------------- **Installation by default configures /etc/network/interfaces** Starting from MAAS 1.9.0 (alpha2), all Ubuntu deployments will result with static network configurations. Users will be able to interact with the API to further configure interfaces. **Introduction to Fabrics, Spaces and Subnets introduces new Network API** With the introduction of the concepts of Fabrics, Spaces and Subnets starting from MAAS 1.9.0 (alpha2), MAAS also introduces new API's for: * fabrics * spaces * subnets * vlans * fan-networks MAAS 1.9.0 will continue to provide backwards compatibility with the old network API for reading purposes, but moving forward, users are required to use the new API to manipulate fabrics, spaces and subnets. Major New Features ------------------ **Advanced Node Network Configuration** Starting from MAAS 1.9.0 (alpha2), MAAS can now do the Node's Network configuration. Doing such configuration will result in having `/etc/network/interfaces` writen. Advanced configuration includes: * Assign subnets, fabrics, and IP to interfaces. * Create VLAN interfaces. * Create bond interfaces. * Change interface names. **Fabrics, Spaces, Subnets and Fan networks** Starting from MAAS 1.9.0 (alpha2), MAAS now supports the concept of Fabrics, Spaces, Subnets and FANS. These new concepts replaces the old `Network` concepts from MAAS' earlier versions. For more information, see :ref:`networking`. For more information about the API, see :ref:`api`. **Curtin & cloud-init status updates** Starting from MAAS 1.9.0 (alpha2), curtin and cloud-init will now send messages to MAAS providing information regarding various of the actions taken. This information will be displayed in MAAS in the `Node Event Log`. Note that this information is only available when using MAAS 1.9.0 and the latest version fo curtin. For cloud-init messages this information is only available when deploying Wily. Minor notable changes --------------------- **Commissioning Actions** MAAS now supports commissioning actions. These allow the user to specify how commissioning should behave in certain escenarios. The commissioning actions available are: * Enable SSH during commissioning * Keep machine ON after commissioning * Keep network configuration after commissioning * Keep storage configuration after commissioning **CentOS images can be imported automatically** CentOS Image (CentOS 6 and 7) can now be imported automatically from the MAAS Images page. These images are currently part of the daily streams. In order to test this images, you need to use the daily image stream. This can be changed in the `Settings` page under `Boot Images` to `http://maas.ubuntu.com/images/ephemeral-v2/daily/`. Once changed, images can be imported from the MAAS Images page. Known Problems & Workarounds ---------------------------- **CentOS fails to deploy with LVM Storage layout** CentOS fails to deploy when deploying with an LVM storage layout. Provided that LVM is the default storage layout, every CentOS deployment will fail, unless this layout is changed to 'Flat' storage. To work around the problem, the default storage layout can be changed from `LVM` to `Flat` in MAAS' Networks page, under `Storage Layout` section. See bug `1499558`_ for more information. .. _1499558: https://launchpad.net/bugs/1499558 **Juju 1.24+ bootstrap failure - Changing MAAS configured /etc/network/interfaces** Juju 1.24+, by default, assumes that it can manage the MAAS deployed node's network configuration. Juju changes /etc/network/interfaces and disables bringing up eth0 on boot, to create a bridge to support LXC. However, provided that MAAS / curtin now write the node's network configuration, Juju is unable to successfully finish the creation of the bridge, but in the process, it disables auto bring up of eth0. The machine will deploy successfully, however, after a reboot eth0 will never be brought back up due to the changes made by Juju. This will prevent Juju from SSH'ing into the machine and finishing the boostrap. To prevent this from happening, `disable-network-management: true` needs to be used. Note that this will prevent the deployment of LXC containers as they have to DHCP. See bug `1494476`_ for more information. .. _1494476: https://launchpad.net/bugs/1494476 1.9.0 (alpha1) ============== Important announcements ----------------------- **LVM is now the default partitioning layout** Starting from MAAS 1.9, all of the deployments will result on having LVM configure for each of the machines. A Flat partitioning layout is not longer used by default. (This, however, can be changed in the MAAS Settings Page). **Re-commissioning required from VM's with VirtIO devices** Starting from MAAS 1.9, storage partitioning and advance configuration is supported natively (see below). In order for MAAS to correctly map VirtIO devices in VM's, these VM nodes need to be re-commissioned. If not re-comissioned, MAAS will prevent the deployment until done so. Previously deployed nodes won't be affected, but will also have to be re-commissioned if released. Major new features ------------------ **Storage Partitioning and Advanced Configuration** MAAS now natively supports Storage Partitioning and Advanced Configuration. This allows MAAS to deploy machines with different Storage Layouts, as well as different complext partitioning configurations. Storage support includes: * LVM * Bcache * Software Raid * Advanced partitioning For more information refer to :ref:`storage`. Minor notable changes --------------------- **Minimal Config Files for Daemons** Starting from MAAS 1.9, minimal configuration files have been introduced for both, the MAAS Region Controller and the MAAS Cluster Controller daemons. * The Region Controller (`maas-regiond`) has now dropped the usage of `/etc/maas/maas_local_settings.py` in favor of `/etc/maas/regiond.conf`. Available configuration options are now `database_host`, `database_name`, `database_user`, `database_pass`, `maas_url`. MAAS will attempt to migrate any configuration on upgrade, otherwise it will use sane defaults. * The Cluster Controller (`maas-clusterd`) has now dropped the usage of `/etc/maas/pserv.yaml` and `/etc/maas/maas_cluster.conf` in favor of `/etc/maas/clusterd.conf`. Available configuration options are now `maas_url` and `cluster_uuid` only. MAAS will attempt to migrate any configuration on upgrade, otherwise it will use sane defaults. **HWE Kernels** MAAS now has a different approach to deploying Hardware Enablement Kernels. Start from MAAS 1.9, the HWE kernels are no longer coupled to subarchitectures of a machine. For each Ubuntu release, users will be able to select any of the available HWE kernels for such release, as well as set the minimum kernel the machine will be deployed with by default. For more information, see :ref:`hardware-enablement-kernels`. **Python Power Drivers** Starting from MAAS 1.9, MAAS is moving away from using shell scripts templates for Power Drivers. These are being migrated to MAAS' internal control as power drivers. Currently supported are APC, MSCM, MSFT OCS, SM15k, UCSM, Virsh, VMWare and IPMI. Remaining Power Drivers include AMT, Fence CDU's, Moonshot. Known Problems & Workarounds ---------------------------- **Fail to deploy Trusty due to missing bcache-tools** In order to correctly perform storage partitioning in Trusty+, the new version of curtin used by MAAS requires bcache-tools to be installed. However, these tools are not available in Trusty, hence causing MAAS/curtin deployment failures when installing Trusty. An SRU in Ubuntu Trusty for these tools is already in progress. To work around the problem, a curtin custom configuration to install bcache-tools can be used in `/etc/maas/preseeds/curtin_userdata`:: {{if node.get_distro_series() in ['trusty']}} early_commands: add_repo: ["add-apt-repository", "-y", "ppa:maas-maintainers/experimental"] {{endif}} See bug `1449099`_ for more information. .. _1449099: https://bugs.launchpad.net/bugs/1449099 **Fail to deploy LVM in Trusty** MAAS fail to deploy Ubuntu Trusty with a LVM Storage layout, as curtin will fail to perform the partitioning. See bug `1488632`_ for more information. .. _1488632: https://bugs.launchpad.net/bugs/1488632 maas-1.9.5+bzr4599.orig/docs/cluster-configuration.rst0000644000000000000000000001463313056115004020672 0ustar 00000000000000.. -*- mode: rst -*- .. _cluster-configuration: Cluster Configuration ===================== Before any of MAAS's features can be used for the first time, you must have a cluster controller and configure it to manage at least one network of nodes. Each node in the cluster should be attached to one of these networks. (In addition, a node can be attached to any number of networks that are not managed by MAAS.) Managing a network normally means that MAAS will serve DHCP from the cluster controller. **Do this only on a network that was set up with this in mind.** Running your own DHCP server that competes with an existing one that's already managing the network can cause serious disruption, and it can be hard for administrators to track the source of the problem. Worse, the problems may not become immediately noticeable. Make sure you understand the implications of running a DHCP server before doing this. If MAAS detects any DHCP servers already running on these networks, it will show them on the cluster's configuration page. Network requirements -------------------- The cluster controller manages a network of nodes through one of its interfaces as defined in MAAS. Cluster interfaces are discovered automatically, though this may not happen e.g. if the network interface was down when MAAS was installed. When a cluster controller manages nodes on a network through one of its interfaces, the nodes must be on the same subnet as the cluster interface. This is for two reasons: 1. If the cluster controller is configured to manage DHCP, the nodes must be able to configure their own network interfaces using MAAS's DHCP server. This means that either they must be on the same subnet, or that DHCP packets are being specially routed between the nodes' subnet and MAAS's DHCP server. 2. The cluster controller must be able to find nodes' IP addresses based on their MAC addresses, by inspecting its ARP cache. This implies that the nodes and the cluster controller must be on the same physical subnet. Cluster acceptance ------------------ If you install your first cluster controller on the same system as the region controller, as is the case when you install the full "maas" ubuntu package, it will be automatically accepted by default (but not yet configured, see below). Any other cluster controllers you set up will show up in the user interface as "pending," until you manually accept them into the MAAS. To accept a cluster controller, visit the "pending clusters" section of the Clusters page: .. image:: media/cluster-accept.png You can either click on "Accept all" or click on the edit icon to edit the cluster. After clicking on the edit icon, you will see this page: .. image:: media/cluster-edit.png Here you can change the cluster's name as it appears in the UI, its DNS zone, and its status. Accepting the cluster changes its status from "pending" to "accepted." Now that the cluster controller is accepted, you can configure one or more of its network interfaces to be managed by MAAS. This will enable the cluster controller to manage nodes attached to those networks. The next section explains how to do this and what choices are to be made. Cluster interface management ---------------------------- MAAS automatically recognises the network interfaces on each cluster controller. Some (though not necessarily all) of these will be connected to networks where you want to manage nodes. A connection between a cluster controller and a network is called a `cluster interface`. Each cluster interface is built on exactly one network interface, though it's possible for two cluster interfaces to use the same network interface card. We recommend letting your cluster controller act as a DHCP server for the networks it manages, by configuring the corresponding cluster interfaces in the MAAS user interface. As an example, we will configure the cluster controller to manage a network on interface ``eth0``. Click on the edit icon for the cluster interface on network interface ``eth0``, which takes us to this page: .. image:: media/cluster-interface-edit.png Here you can select to what extent you want the cluster controller to manage the network: #. DHCP only - this will run a DHCP server on your cluster #. DHCP and DNS - this will run a DHCP server on the cluster *and* configure the DNS server included with the region controller so that it can be used to look up hosts on this network by name. If you set the interface to be managed, you now need to provide all of the usual DHCP details in the input fields below. Once done, click "Save interface". The cluster controller will now be able to boot nodes on this network. There is also an option to leave the network unmanaged. Use this for networks where you don't want to manage any nodes. Or, if you do want to manage nodes but don't want the cluster controller to serve DHCP, you may be able to get by without it. This is explained in :ref:`manual-dhcp`. .. _static-ip-address: Static vs Dynamic IP Addresses ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ On the cluster interface edit page, there are fields to enter both a dynamic and a static range of IP addresses. It is mandatory to enter the dynamic range if you are managing DHCP on this interface, but the static range is optional. Dynamic addresses are given to both unknown devices booting on this network, and Nodes that are commissioning. Dynamic addresses are allocated by the DHCP server and may change at any time. Static addresses are given to Nodes when they are allocated to a user and started up, and returned to the pool only when the Node is de-allocated. Static addresses are allocated by MAAS, and are guaranteed not to change while allocated. If you are managing DNS on this network, only static IP addresses are given DNS entries with the Node's name. If you do not configure the static range, then nodes will only get dynamic IP addresses and will never get a DNS entry. IP addresses in the static range are also available for reservation by users using the :doc:`api`. This prevents MAAS from allocating the reserved IP to any Nodes or other devices, which allows users to assign it freely to their own hosts/devices on the same network, such as LXC containers. Multiple networks ----------------- A single cluster controller can manage more than one network, each from a different cluster interface. This may help you scale your cluster to larger numbers of nodes, or it may be a requirement of your network architecture. maas-1.9.5+bzr4599.orig/docs/conf.py0000644000000000000000000002407713056115004015114 0ustar 00000000000000# -*- coding: utf-8 -*- # # MAAS documentation build configuration file, created by # sphinx-quickstart on Thu Jan 19 14:48:25 2012. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # Import maas' settings. from os import environ environ.setdefault("DJANGO_SETTINGS_MODULE", "maas.settings") # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. import sys, os # Include '.' in the path so that our custom extension, 'versions', can # be found. sys.path.insert(0, os.path.abspath('.')) # -- Multiple documentation options. # Add a widget to switch between different versions of the documentation to # each generated page. add_version_switcher = False # In order for the version widget to be able to redirect correctly to the # other versions of the documentation, each version of the documentation # has to be accessible at the following addresses: # // -> documentation for trunk. # /1.4/ -> documentation for 1.4. # etc. doc_prefix = 'docs' # Path of the JSON document, relative to homepage of the documentation for trunk # (i.e. '//'), with the list of the versions to include in the # version switcher widget. versions_path = '_static/versions.js' # Versions to include in the version switcher. # Note that the version switcher fetches the list of the documentation versions # from the list published by the trunk documentation (i.e. in '//'). # This means the following list is meaningful only for trunk. # The first item should be the development version. from collections import OrderedDict doc_versions = OrderedDict([ ('dev', 'Development trunk'), ('1.8', 'MAAS 1.8'), ('1.7', 'MAAS 1.7'), ('1.6', 'MAAS 1.6'), ('1.5', 'MAAS 1.5'), ('1.4', 'MAAS 1.4'), ('1.3', 'MAAS 1.3'), ('1.2', 'MAAS 1.2'), ]) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode', 'versions', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'MAAS' copyright = u'2012-2015, MAAS Developers' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = doc_versions.items()[0][0] # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build', '_templates'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # AutoDoc autodoc_default_flags = ['members', 'show-inheritance'] autodoc_member_order = 'bysource' autodoc_docstring_signature = True # AutoSummary autosummary_generate = True # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'maas' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] html_theme_path = ['_templates'] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = 'media/maas-logo-200.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = 'media/maas.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'MAASdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). latex_paper_size = 'a4' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'MAAS.tex', u'MAAS Documentation', u'MAAS Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('man/maas.8', 'maas', u'MAAS API commandline utility', [u'Canonical 2013-2014'], 8), ('man/maas-region-admin.8', 'maas-region-admin', u'MAAS administration tool', [u'Canonical 2013-2014'], 8) ] # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'http://docs.python.org/': None} # Gather information about the branch and the build date. from subprocess import check_output, CalledProcessError try: bzr_last_revision_number = check_output(['bzr', 'revno']) bzr_last_revision_date = check_output(['bzr', 'version-info', '--template={date}', '--custom']) bzr_build_date = check_output(['bzr', 'version-info', '--template={build_date}', '--custom']) except (CalledProcessError): # not a bzr repository bzr_last_revision_number = 'unknown' bzr_last_revision_date = check_output(['date', '-u', '+%Y-%m-%d %H:%M:%S %z']) bzr_build_date = bzr_last_revision_date # Populate html_context with the variables used in the templates. html_context = { 'add_version_switcher': 'true' if add_version_switcher else 'false', 'versions_json_path': '/'.join(['', doc_prefix, versions_path]), 'doc_prefix': doc_prefix, 'bzr_last_revision_date': bzr_last_revision_date, 'bzr_last_revision_number': bzr_last_revision_number, 'bzr_build_date': bzr_build_date, } maas-1.9.5+bzr4599.orig/docs/configure.rst0000644000000000000000000002275013056115004016324 0ustar 00000000000000Additional Configuration ======================== .. _manual-dhcp: Manual DHCP configuration ------------------------- DHCP is needed in order for MAAS to boot and control nodes. However, there are some circumstances under which you may not wish a cluster controller to handle DHCP address assignments for the network. In these instances, the existing DHCP server for the network will need its configuration altered to allow MAAS to enlist and control nodes automatically. .. note:: If you don't let MAAS manage DHCP, then MAAS will not be able to allocate its :ref:`static IP addresses ` to Nodes. At the very least the "filename" option should be set to "pxelinux.0". How to configure this depends on what software you use as a DHCP server. If you are using the ISC DHCP server, for example, the configuration entry might look something like this:: subnet 192.168.122.0 netmask 255.255.255.0 { filename "pxelinux.0"; option subnet-mask 255.255.255.0; option broadcast-address 192.168.122.255; option domain-name-servers 192.168.122.136; range dynamic-bootp 192.168.122.5 192.168.122.135; } When doing this, leave the cluster controller's interface in the "unmanaged" state. If your cluster controller is in charge of nodes on more than one network through different network interfaces, there is an additional complication. Without the DHCP server built into the cluster controller, MAAS may not know which of the cluster controller's IP addresses each node should use for downloading its installer image. If you want to support this situation, ensure that all of the nodes can reach all of the cluster controller's network addresses. .. _ssl: SSL Support ----------- If you want secure access to your MAAS web UI/API, you need to do a few things. First, turn on SSL support in Apache:: $ sudo a2enmod ssl Ensure that the Apache config file from ``etc/maas/maas-http.conf`` is included in ``/etc/apache2/conf.d/``, then set the default URL using the ``maas-region-admin`` command to use ``https`` instead of ``http``:: $ maas-region-admin local_config_set \ > --maas-url="https://localhost:5240/MAAS" Now, restart Apache:: $ sudo service apache2 restart At this point you will be able to access the MAAS web server using https but the default SSL certificate is insecure. Please generate your own and then edit ``/etc/apache2/conf.d/maas-http.conf`` to set the location of the certificate. Choosing a series to install ---------------------------- You may have some specific reason to choose a particular version of Ubuntu to install on your nodes, perhaps based around package availability, hardware support or some other reason. It is possible to choose a specific series from those available in a number of ways. From the user interface ^^^^^^^^^^^^^^^^^^^^^^^ The web-based user interface makes it easy to select which Ubuntu series you wish to install on an individual node. When either adding a node manually, or on the node page when the node has been automatically discovered but before it is accepted, there is a drop down menu to select the version of Ubuntu you wish to install. .. image:: media/series.* The menu will always list all the currently available series according to which boot images are available. Using the maas command ^^^^^^^^^^^^^^^^^^^^^^ It is also possible to select a series using the maas command. This can be done on a per node basis with:: $ maas node update distro_series="" Where the string contains one of the valid, available distro series (e.g. "trusty") or is empty for the default value. .. _preseed: Altering the Preseed file ------------------------- .. warning:: Do not try to alter the preseed files if you don't have a good understanding of what you are doing. Altering the installed version of Ubuntu can prevent MAAS from working as intended, and may have security and stability consequences. When MAAS commissions a node it installs a version of Ubuntu. The installation is performed using a 'preseed' file, which is effectively a list of answers to the questions you would get were you to run the installer manually. The preseed file used by MAAS is carefully made so that the target node can be brought up and do all the jobs expected of it. However, in exceptional circumstances, you may wish to alter the pressed file to work around some issue. There are actually two preseed files, stored here:: /etc/maas/preseeds/generic /etc/maas/preseeds/preseed-master The generic file actually references the preseed-master file, and is used to set conditional parameters based on the type of series and architecture to install as well as to define the minimum set of install packages and to tidy up the PXE boot process if that has been used for the node. Unless you have a specific need to change where install packages come from, you should not need to edit this file. For the more usual sorts of things you may wish to change, you should edit the preseed-master file. For example, depending on your network you may wish to change the clock settings:: # Local clock (set to UTC and use ntp) d-i clock-setup/utc boolean true d-i clock-setup/ntp boolean true d-i clock-setup/ntp-server string ntp.ubuntu.com Having consistent clocks is very important to the working of your MAAS system overall. If your nodes however cannot freely access the Internet, the supplied NTP server is not going to be very useful, and you may find it better to run an ntp service on the MAAS controller and change the `ntp.ubuntu.com` in the last line for a more appropriate server. One thing you may wish to alter in the preseed file is the disk partitioning. This is a simple recipe that creates a swap partition and uses the rest of the disk for one large root filesystem:: partman-auto/text/atomic_scheme :: 500 10000 1000000 ext3 $primary{ } $bootable{ } method{ format } format{ } use_filesystem{ } filesystem{ ext3 } mountpoint{ / } . 64 512 300% linux-swap method{ swap } format{ } . Here the root partition must be at least 500 mb, and has effectively no maximum size. The swap partition ranges from 64 MB to 3 times the system's ram. Adding `$bootable{ }` to make the partition bootable, and $primary{ } marks it as the primary partition. The other specifiers used are: *method{ format }* Used to make the partition be formatted. For swap partitions, change it to "swap". To create a new partition but do not format it, change "format" to "keep" (such a partition can be used to reserve for future use some disk space). *format{ }* Also needed to make the partition be formatted. *use_filesystem{ }* Specifies that the partition has a filesystem on it. *filesystem{ ext3 }* Specifies the filesystem to put on the partition. *mountpoint{ / }* Where to mount the partition. For more information on preseed options, you should refer to `the official Ubuntu documentation `_ .. note:: Future versions of MAAS are likely to replace this type of automatic installation with a different installer. Installing additional clusters ------------------------------ In an environment comprising large numbers of nodes, it is likely that you will want to organise the nodes on a more distributed basis. The standard install of the MAAS region controller includes a cluster controller, but it is possible to add additional cluster controllers to the configuration, as shown in the diagram below: .. image:: media/orientation_architecture-diagram.* Each cluster controller will need to run on a separate Ubuntu server. Installing and configuring the software is straightforward though:: $ sudo apt-get install maas-cluster-controller This meta-package will install all the basic requirements of the system. However, you may also wish or need to run DHCP and/or DNS services, in which case you should also specify these:: $ sudo apt-get install maas-cluster-controller maas-dhcp maas-dns Configuring the cluster controller ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Once the packages are installed, the cluster controller needs to know where to look for the region controller. This is achieved using `dpkg` to configure the software:: $ dpkg-reconfigure maas-cluster-controller .. image:: media/cluster-config.* The configuration script should then bring up a screen where you can enter the IP address of the region controller. Additionally, you will need to import the distro image files locally for commissioning:: $ maas maas node-groups import-boot-images …and optionally set up the DHCP and DNS for the cluster by either: *Using the web UI* Follow the instructions at :doc:`cluster-configuration` to use the web UI to set up your cluster controller. *Using the command line client* First :ref:`logging in to the API ` and then :ref:`following this procedure ` Client-side DNS configuration ----------------------------- When using a third party tool such as ``juju`` it will need to be able to resolve the hostnames that the MAAS API returns to it. In order for this to happen, *client-side DNS* must be configured to point to MAAS's DNS server. Generally speaking, this is a simple case of adding the following line to the ``/etc/resolv.conf`` file on your client host:: nameserver replacing the with the actual IP address of the host running the MAAS DNS server. However, for hosts using the ``resolvconf`` package, please read its documentation for more information. maas-1.9.5+bzr4599.orig/docs/development/0000755000000000000000000000000013056115004016125 5ustar 00000000000000maas-1.9.5+bzr4599.orig/docs/devices.rst0000644000000000000000000000214213056115004015756 0ustar 00000000000000.. -*- mode: rst -*- .. _devices: Devices ======== .. note:: This feature is available in MAAS versions 1.8 and above. If you're writing a client application, you can check if MAAS supports this feature via the web API; see the documentation for the ``devices-management`` capability :ref:`here`. In addition to nodes, a MAAS cluster controller can manage *devices*. Devices represent non-installable machines. This feature can be used to track routers, virtual machines, etc. within MAAS. Same as nodes, devices can be assigned IP addresses and DNS names. IP addresses can be fixed, in which case the device should be configured to use the defined IP address, or dynamic, in which case the device can obtain an IP address from the MAAS DHCP server and will receive the configured IP address. Devices can also be assigned a parent node and will be automatically deleted (along with all the IP address reservations associated with it) when the parent node is deleted or released. This is designed to model and manage the virtual machines or containers running on a MAAS-deployed node.maas-1.9.5+bzr4599.orig/docs/enum.rst0000644000000000000000000000064213056115004015303 0ustar 00000000000000========== MAAS Enums ========== .. This only lists the enums that are relevant to outside users, e.g. people writing client applications using MAAS's web API. .. autoclass:: maasserver.enum.NODE_STATUS :members: .. autoclass:: maasserver.enum.NODEGROUP_STATUS :members: .. autoclass:: maasserver.enum.NODEGROUPINTERFACE_MANAGEMENT :members: .. autoclass:: maasserver.enum.NODE_STATUS :members: maas-1.9.5+bzr4599.orig/docs/getting-help.rst0000644000000000000000000000366213056115004016733 0ustar 00000000000000************ Getting help ************ Where to get help ----------------- The two channels you can use to get help debugging a MAAS issue are: - The `Ask Ubuntu`_ website. - The `Freenode #maas`_ IRC channel. .. _Ask Ubuntu: http://askubuntu.com/questions/ask?tags=maas .. _Freenode #maas: http://webchat.freenode.net/?channels=maas Gathering debugging information ------------------------------- If your question is related to a problem related to a specific MAAS installation, we encourage you to gather debugging information and make it available before you head over to Ask Ubuntu or the IRC channel. This way, you'll have all the required information handy for people who can help you. Gathering debugging information is a fully automated process that is performed with the help of `sosreport`_. If your MAAS server is running Ubuntu version 14.04 (Trusty Tahr) or later, you can get sosreport from the official archives:: # Install sosreport. sudo apt-get install -y sosreport # Create the report. sudo sosreport -o maas Alternatively, if your MAAS server is running a previous Ubuntu release, you'll need to install sosreport manually:: # Install git. sudo apt-get install -y git # Get the latest version of sosreport. git clone https://github.com/sosreport/sosreport.git /tmp/sosreport # Create the report. sudo /tmp/sosreport/sosreport -o maas .. _sosreport: https://github.com/sosreport/sosreport This will create a tarball containing MAAS' log files, MAAS' configuration files and a dump of MAAS' database. By default, the tarball will end up in /tmp but you can change the location, see sosreport's manpage for details. If there are things you do not wish to share publicly, feel free to edit the tarball. Now, the last step is to make this file available by any means at your disposal (openly accessible FTP server, Dropbox, etc.) in order for the people who will help you to be able to get their hands on it. maas-1.9.5+bzr4599.orig/docs/hacking.rst0000755000000000000000000000000013056115004017752 2../HACKING.txtustar 00000000000000maas-1.9.5+bzr4599.orig/docs/hardware-enablement-kernels.rst0000644000000000000000000000600613056115004021705 0ustar 00000000000000.. -*- mode: rst -*- .. _hardware-enablement-kernels: ================================= Using hardware-enablement kernels ================================= .. note:: As of MAAS 1.9 this feature is configured by setting the hwe_kernel variable instead of the architecture variable. MAAS allows you to use hardware enablement kernels when booting nodes with Ubuntu that require them. What are hardware-enablement kernels? ------------------------------------- Brand new hardware gets released all the time. We want that hardware to work well with Ubuntu and MAAS, even if it was released after the latest release of MAAS or Ubuntu. Hardware Enablement (HWE) is all about keeping pace with the new hardware. Ubuntu's solution to this is to offer newer kernels for older releases. There are at least two kernels on offer for Ubuntu releases: the "generic" kernel -- i.e. the kernel released with the current series -- and the Hardware Enablement kernel, which is the most recent kernel release. There are separate HWE kernels for each release of Ubuntu, referred to as ``hwe-``. So, the 14.04 / Trusty Tahr HWE kernel is called ``hwe-t``, the 12.10 / Quantal Quetzal HWE kernel is called ``hwe-q`` and so on. This allows you to use newer kernels with older releases, for example running Precise with a Saucy (hwe-s) kernel. For more information see the `LTS Enablement Stack`_ page on the Ubuntu wiki. .. _LTS Enablement Stack: https://wiki.ubuntu.com/Kernel/LTSEnablementStack Booting hardware-enablement kernels ----------------------------------- MAAS imports hardware-enablement kernels along with its generic boot images. These hardware-enablement kernels are specified by using min_hwe_kernel or hwe_kernel variables. The min_hwe_kernel variable is used to instruct MAAS to ensure the release to be deployed uses a kernel version at or above the value of min_hwe_kernel. For example if min_hwe_kernel is set to hwe-t when deploying any release before Trusty the hwe-t kernel will be used. For any release after Trusty the default kernel for that release will be used. If hwe-t or newer is not availible for the specified release MAAS will not allow that release to be deployed and throw an error. min_hwe_kernel can be set by running the command:: $ maas node update min_hwe_kernel=hwe- It's also possible to set the min_hwe_kernel from the MAAS web UI, by visiting the Node's page and clicking ``Edit node``. Under the Minimum Kernel field, you will be able to select any HWE kernels that have been imported onto that node's cluster controller. .. image:: media/min_hwe_kernel.png You can also set the hwe_kernel during deployment. MAAS checks that the specified kernel is avalible for the release specified before deploying the node. You can set the hwe_kernel when deploying by using the command:: $ maas node start distro_series= hwe_kernel=hwe- Or through the web interface as seen below. .. image:: media/hwe_kernel.png maas-1.9.5+bzr4599.orig/docs/index.rst0000644000000000000000000000554113056115004015451 0ustar 00000000000000.. MAAS documentation master file ######################## MAAS: Metal As A Service ######################## This is the documentation for the `MAAS project`_. Metal as a Service -- MAAS -- lets you treat physical servers like virtual machines in the cloud. Rather than having to manage each server individually, MAAS turns your bare metal into an elastic cloud-like resource. What does that mean in practice? Tell MAAS about the machines you want it to manage and it will boot them, check the hardware's okay, and have them waiting for when you need them. You can then pull nodes up, tear them down and redeploy them at will; just as you can with virtual machines in the cloud. When you're ready to deploy a service, MAAS gives `Juju`_ the nodes it needs to power that service. It's as simple as that: no need to manually provision, check and, afterwards, clean-up. As your needs change, you can easily scale services up or down. Need more power for your Hadoop cluster for a few hours? Simply tear down one of your Nova compute nodes and redeploy it to Hadoop. When you're done, it's just as easy to give the node back to Nova. .. _MAAS project: http://maas.ubuntu.com .. _Juju: https://juju.ubuntu.com/ MAAS is ideal where you want the flexibility of the cloud, and the hassle-free power of Juju charms, but you need to deploy to bare metal. ************ Introduction ************ .. toctree:: :maxdepth: 2 about orientation changelog ************************ Setting up a MAAS server ************************ .. toctree:: :maxdepth: 2 install configure cluster-configuration static-ips ipv6 bootsources nodes hardware-enablement-kernels sstreams-mirror networks kernel-options installing-ubuntu os-support storage networking devices ****************** Deploying services ****************** .. toctree:: :maxdepth: 2 juju-quick-start tags physical-zones *********************** API / CLI Documentation *********************** .. toctree:: :maxdepth: 2 api api_authentication maascli version *************** Troubleshooting *************** .. toctree:: :maxdepth: 2 getting-help troubleshooting ****************** Command-line Tools ****************** .. toctree:: :maxdepth: 1 man/maas-region-admin.8 man/maas.8 *************** Developing MAAS *************** .. toctree:: :maxdepth: 2 development/philosophy hacking models enum development/security development/building-packages development/cluster-registration development/cluster-bootstrap development/tagging development/lease-scanning-and-dns development/preseeds development/metadata development/rpc development/transactions ****************** Indices and tables ****************** .. toctree:: :maxdepth: 2 * :ref:`genindex` * :ref:`modindex` * :ref:`search` maas-1.9.5+bzr4599.orig/docs/install.rst0000755000000000000000000000000013056115004020056 2../INSTALL.txtustar 00000000000000maas-1.9.5+bzr4599.orig/docs/installing-ubuntu.rst0000644000000000000000000000333213056115004020022 0ustar 00000000000000===================================== Installing Ubuntu and deploying nodes ===================================== Once a node has been accepted into MAAS and is ready for use, users can deploy services to that node. Prior to deployment, MAAS is responsible for: 1. Powering up the node. 2. Installing Ubuntu on the node. 3. Installing the user's SSH keys on the node. Once these steps have been completed, the node is ready to have services deployed to it, either manually or by using a tool like Juju_. There are two ways to install Ubuntu on a node: 1. :ref:`The Curtin installer `. 2. :ref:`The Debian installer (Deprecated) `. .. _Juju: http://juju.ubuntu.com .. _curtin-installer: The Curtin Installer -------------------- The Curtin Installer is, as the name suggests, installs Ubuntu on a node more quickly than would be possible using the :ref:`Debian installer `. The Curtin installer is enabled by default and is the only one supported. The Curtin installer copies a pre-built Ubuntu image to the node, with all the packages installed that would be normally found in an Ubuntu installation. The Curtin installer is the fastest OS installer yet. For more information about the Curtin installer, see the `curtin project`_ on Launchpad. .. _curtin project: https://launchpad.net/curtin .. _debian-installer: The Debian Installer (Deprecated) --------------------------------- The Debian Installer installs Ubuntu on a node in exactly the same way as you would install it manually. .. note:: Starting from MAAS 1.8, the Debian Installer has been deprecated and it is no longer supported. While it is still available in MAAS, it is not recommended and is not supported. maas-1.9.5+bzr4599.orig/docs/ipv6.rst0000644000000000000000000001471613056115004015232 0ustar 00000000000000.. -*- mode: rst -*- .. _ipv6: Managing IPv6 Networks ====================== .. note:: This feature is available in MAAS versions 1.7 and above, starting with lp:maas revision 2992. If you're writing a client application that makes use of this feature, you can query the region-server API for the ``ipv6-deployment-ubuntu`` :ref:`capability`. MAAS has limited IPv6 support for networking nodes. It works much like IPv4 support, but with a number of limitations: * Nodes still boot, register, and install using the IPv4 network. * IPv6 addresses are only configured when using the default Ubuntu installer. * Most BMCs can only be controlled (e.g. to power nodes on/off) using IPv4. * MAAS still uses IPv4 for its internal operation, installing nodes, etc. * For now, MAAS only supports IPv6 on networks where it also manages IPv4 DHCP. * A network interface on a node can only be on one IPv6 subnet. * A network interface on a cluster controller can manage only one IPv6 subnet. The web user interface and REST API can be accessed in the same way on both IPv4 and IPv6. To use an IPv6 address as the hostname in a URL, in your browser or elsewhere, surround it with square brackets. For example, on the local machine (``::1``, the IPv6 equivalent of ``localhost``) you might request:: http://[::1]/MAAS/ If your MAAS server has a DNS hostname that resolves to both IPv4 and IPv6 addresses, your browser may already be accessing the UI through IPv6 without you noticing. Enabling IPv6 ------------- You enable IPv6 networking in the same way that you enable IPv4 networking: configure a separate cluster interface for your IPv6 subnet, in addition to the one you need for your IPv4 subnet. The IPv6 cluster interface must define a static address range. Provided that you already have a functioning IPv6 network, that's all there is to it. The following sections will go into more detail about what is supported, what is needed, and what to do if you don't yet have a functioning IPv6 network. An IPv6 cluster interface can use the same network interface on the cluster controller as an existing IPv4 network interface. It just defines a different subnet, with IPv6 addressing. A node that's connected to the IPv4 subnet will also be connected to the IPv6 subnet on the same network segment. Configuring your IPv6 subnet ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When you configure your IPv6 cluster interface, be sure to define a static IP address range. Deployed nodes on the subnet will get static addresses in this range. IPv6 networks are normally 64 bits wide, so you can be generous with the ranges' sizes. It also means that you can leave the netmask field blank. (There are no broadcast addresses in IPv6, so leave the broadcast address field blank as well.) You may want MAAS to manage DHCP and DNS, but it's not required. In fact nodes do not need a DHCP server at all for IPv6; MAAS configures static IPv6 addresses on the node's network interfaces while deploying it. A DHCPv6 server can provide addresses for containers or virtual machines running on the nodes, as well as devices on the network that are not managed by MAAS, but it is not needed for the nodes themselves. MAAS will not be aware of any addresses issued by DHCP, and does not guarantee that they will stay unchanged. .. _ipv6-routing: Routing ^^^^^^^ In IPv6, clients do not discover routes through DHCP. Routers make themselves known on their networks by sending out *route advertisements*. These *RAs* contain other configuration as well: whether clients should statelessly configure their own unique IP addresses based on their MAC addresses; whether they should request stateless configuration from a DHCP server; and finally, whether they should request a stateful IP address from a DHCP server. Since a network interface can have any number of IPv6 addresses even on a single subnet, several of these address assignment mechanisms can be combined. However, when MAAS configures IPv6 networking on a node, it does not rely on RAs. it statically configures your nodes' default IPv6 route to use the router that is configured on the cluster interface, so that the nodes will know their default gateway. They do not need DHCP and will not autoconfigure global addresses. However, if you are planning to operate DHCPv6 clients as well, e.g. on machines not managed by MAAS or on virtual machines hosted by MAAS nodes, you may still want to have RAs configured to make those clients obtain configuration over DHCP. If you need RAs but your gateway does not send them, install and configure ``radvd`` somewhere on the network to advertise its route. Other installers and operating systems ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Static IPv6 addresses are currently only configured on Ubuntu, when installed using the "fast" installer. Other operating systems, or Ubuntu with the classic Debian installer, will not have their IPv6 addresses configured. The same applies when a user manually installs an operating system on a node, or overwrites its networking configuration: the node will no longer have its static IPv6 address configured, even if MAAS has allocated it to the node. However, as long as the address remains allocated to the node, you may still configure its operating system to use that address. The node can then use that address as if it had been configured by MAAS. Disabling IPv4 -------------- For advanced users, there is an experimental capability to deploy nodes with pure IPv6, with IPv4 networking disabled. To enable this on a node, check the "Disable IPv4 when deployed" box on the node's Edit page. The process of managing and deploying the node will still largely work through IPv4, but once deployed, the node will have IPv6 networking only. In practice nodes may not be functional without IPv4 networking. A few things are known to be needed in any case: Configuring the MAAS URL ^^^^^^^^^^^^^^^^^^^^^^^^ The *maas-cluster-controller* package has a configuration item for the URL where nodes and cluster controllers can reach the MAAS region API. By default, this URL is set based on the region controller's IPv4 address. To make it work for nodes that won't have IP4, you must set the MAAS URL to use a hostname instead of an IP address. The hostname must resolve to both IPv4 and IPv6 addresses, and both on the cluster controller and on the nodes. To change this setting, run:: dpkg-reconfigure maas-cluster-controller It will prompt you for the URL, with its current setting as the initial value. maas-1.9.5+bzr4599.orig/docs/juju-quick-start.rst0000644000000000000000000001031313056115004017555 0ustar 00000000000000Juju Quick Start ================ These instructions will help you deploy your first charm with Juju to a MAAS cluster. In the following, we assume that you have a MAAS cluster set-up with at least 2 nodes enlisted with it. Your API key, SSH key, and environments.yaml -------------------------------------------- You'll need an API key from MAAS so that the Juju client can access it. Each user account in MAAS can have as many API keys as desired. One hard and fast rule is that you'll need to use a different API key for each Juju *environment* you set up within a single MAAS cluster. There is no need to explicitly add an SSH key to MAAS when using Juju; it will automatically put your public key on any hosts that it starts up. **Note**: You do not need to use the MAAS web UI or API to allocate a node to yourself, Juju will do this for you. Getting a key ^^^^^^^^^^^^^ To get the API key: #. Go to your MAAS preferences page (go to your MAAS home page ``http://${my-maas-server}:80/MAAS/`` and choose *Preferences* from the drop-down menu that appears when clicking your username at the top-right of the page). #. Optionally add a new MAAS key. Do this if you're setting up another environment within the same MAAS cluster. The ``${my-maas-server}`` slot should be replaced with the hostname of your MAAS server. Adding an SSH key ^^^^^^^^^^^^^^^^^ While you're still on the MAAS preferences page, add your SSH key by clicking *Add SSH key*. Use the public half of your SSH key, the content of ``~/.ssh/id_rsa.pub`` for example; don't paste the private half. Creating environments.yaml ^^^^^^^^^^^^^^^^^^^^^^^^^^ Create or modify ``~/.juju/environments.yaml`` with the following content:: environments: maas: type: maas maas-server: 'http://${my-maas-server}:80/MAAS' maas-oauth: '${maas-api-key}' admin-secret: ${your-admin-secret} default-series: precise Substitute the API key from earlier into the ``${maas-api-key}`` slot, and the hostname of your MAAS server into the ``${my-maas-server}`` slot. The ``${your-admin-secret}`` slot should be replaced with a random pass-phrase, there is no default. Now Juju -------- If juju-core is not yet installed on the client machine, run:: $ sudo apt-get install juju-core Now, use juju to display the status of the default environment:: $ juju status As you've not bootstrapped you ought to see:: error: Unable to connect to environment "". Please check your credentials or use 'juju bootstrap' to create a new environment. **Note**: if Juju complains that there are multiple environments and no explicit default, add ``-e ${environment-name}`` after each command, e.g.:: $ juju status -e maas Bootstrap:: $ juju sync-tools $ juju bootstrap If bootstrapping on a version of juju older than 1.14.0 then use:: $ juju bootstrap --upload-tools This will return quickly, but the master node may take a *long* time to come up. It has to completely install Ubuntu and Juju on it and reboot before it'll be available for use. It's probably worth either trying a ``juju status`` once in a while to check on progress, or following the install on the node directly. **Beware** of `bug 413415`_ - *console-setup hangs under chroot debootstrap with a console login on ttyX* - when monitoring an installation on the node. .. _bug 413415: https://bugs.launchpad.net/ubuntu/+source/console-setup/+bug/413415 Once the boostrap node has been installed a status command should come up with something a bit more interesting:: environment: maas machines: "0": agent-state: started agent-version: 1.13.3.1 dns-name: kmhwd.master instance-id: /MAAS/api/1.0/nodes/node-5c5b713a-1afc-11e3-9904-525400123456/ series: precise services: {} Now it's possible to deploy a charm:: $ juju deploy mysql $ juju status If you have another node free you can finish off the canonical and by now familiar example:: $ juju deploy wordpress $ juju add-relation wordpress mysql $ juju expose wordpress $ juju status Note that each charm runs on its own host, so each deployment will actually take as long as it took to bootstrap. Have a beer, drown your sorrows in liquor, or, my preference, have another cup of tea. maas-1.9.5+bzr4599.orig/docs/kernel-options.rst0000644000000000000000000000230413056115004017305 0ustar 00000000000000=========================== Setting kernel boot options =========================== MAAS is able to send specific kernel options to booting nodes on both a global basis and a per-node basis. Global kernel options --------------------- As an admin, click on the gear icon at the top right and scroll down to the Global Kernel Parameters section, as shown here: .. image:: media/global_kernel_opts.png Whatever you set here is sent as-is to all booting nodes. Per-node kernel options ----------------------- Per-node kernel options are set using tags. The easiest way of doing this is to use the ``maas`` command. You will need to :ref:`be logged in to the API first ` and then you can add a tag which has its ``kernel_opts`` value set, like this:: $ maas maas tags new name='nomodeset' \ comment='nomodeset kernel option' kernel_opts='nomodeset vga' Once the tag is defined, you can add it to a node or nodes:: $ maas maas tag update-nodes nomodeset add= \ add= .. note:: Any per-node kernel options set will completely override the global options. If multiple tags attached to a node have the kernel_opts defined, the first one ordered by name is used. maas-1.9.5+bzr4599.orig/docs/maascli.rst0000644000000000000000000004206713056115004015757 0ustar 00000000000000.. _cli: ---------------------- Command Line Interface ---------------------- As well as the web interface, many tasks can be performed by accessing the MAAS API directly through the `maas` command. This section details how to log in with this tool and perform some common operations. .. _api-key: Logging in ---------- Before the API will accept any commands from maas, you must first log in. To do this, you need an API key for your MAAS account. A key was generated for you as soon as your account was created, although you can still generate additional keys if you prefer. The key can be found in the web user interface, or if you have root privileges on the region controller, retrieved from the command line. To obtain the key from the web user interface, log in and click on your user name in the top right corner of the page, and select 'Preferences' from the menu which appears. .. image:: media/maascli-prefs.* A new page will load... .. image:: media/maascli-key.* Your MAAS API keys appear at the top of the preferences form. It's easiest to just select and copy the key (it's quite long!) and then paste it into the command line. To obtain the key through the command line, run this command on the region controller (it requires root access):: $ sudo maas-region-admin apikey --username=my-username (Substitute your MAAS user name for my-username). Once you have your API key, log in with:: $ maas login This command logs you in, and creates a "profile" with the profile name you have selected. The profile is an easy way of storing the server URL and your login credentials, and re-using them across command-line invocations. Think of the profile as a persistent session. You can have multiple profiles open at the same time, and so as part of the login command, you assign a unique name to the new profile. Later invocations of the maas command line will refer to the profile by this name. For example, you might log in with a command line like:: $ maas login my-maas http://10.98.0.13/MAAS/api/1.0 AWSCRMzqMNy:jjk...5e1FenoP82Qm5te2 This creates the profile 'my-maas' and registers it with the given key at the specified API endpoint URL. If you omit the API key, the command will prompt you for it in the console. It is also possible to use a hyphen, '-' in place of the API key. In this case the command will read the API key from standard input, as a single line, ignoring whitespace. This mode of input can be useful if you want to read the API key from a file, or if you wish to avoid including the API key in a command line where it may be observed by other users on the system. Specifying an empty string instead of an API key will make the profile act as an anonymous user. Some calls in the API are accessible without logging in, but most of them are not. maas commands ------------- The ``maas`` command exposes the whole API, so you can do anything you actually *can* do with MAAS using this command. Unsurprisingly, this leaves us with a vast number of options, but before we delve into detail on the specifics, here is a sort of 'cheat-sheet' for common tasks you might want to do using ``maas``. * :ref:`Configure DHCP and DNS services ` * :ref:`Commission all enlisted nodes ` * :ref:`Setting IPMI power parameters for a node ` The main maas commands are: .. program:: maas :samp:`list` lists the details [name url auth-key] of all the currently logged-in profiles. :samp:`login ` Logs in to the MAAS controller API at the given URL, using the key provided and associates this connection with the given profile name. :samp:`logout ` Logs out from the given profile, flushing the stored credentials. :samp:`refresh` Refreshes the API descriptions of all the current logged in profiles. This may become necessary for example when upgrading the maas packages to ensure the command-line options match with the API. :samp:` [command] [options] ...` Using the given profile name instructs ``maas`` to direct the subsequent commands and options to the relevant MAAS, which for the current API are detailed below... account ^^^^^^^ This command is used for creating and destroying the MAAS authorisation tokens associated with a profile. Usage: maas ** account [-d --debug] [-h --help] create-authorisation-token | delete-authorisation-token [token_key=\ **] .. program:: maas account :samp:`-d, --debug` Displays debug information listing the API responses. :samp:`-h, --help` Display usage information. :samp:`-k, --insecure` Disables the SSL certificate check. :samp:`create-authorisation-token` Creates a new MAAS authorisation token for the current profile which can be used to authenticate connections to the API. :samp:`delete-authorisation-token token_key=` Removes the given key from the list of authorisation tokens. .. boot-images - not useful in user context .. ^^^^^^^^^^^ .. files - not useful in user context .. ^^^^^ node ^^^^ API calls which operate on individual nodes. With these commands, the node is always identified by its "system_id" property - a unique tag allocated at the time of enlistment. To discover the value of the system_id, you can use the ``maas nodes list`` command. USAGE: maas node [-h] release | start | stop | delete | read | update .. program:: maas node :samp:`-h, --help` Display usage information. :samp:`release ` Releases the node given by ** :samp:`start ` Powers up the node identified by ** (where MAAS has information for power management for this node). :samp:`stop ` Powers off the node identified by ** (where MAAS has information for power management for this node). :samp:`delete ` Removes the given node from the MAAS database. :samp:`read ` Returns all the current known information about the node specified by ** :samp:`update [parameters...]` Used to change or set specific values for the node. The valid parameters are listed below:: hostname= The new hostname for this node. architecture= Sets the architecture type, where is a string containing a valid architecture type, e.g. "i386/generic" distro_series= Sets the distro series of Ubuntu to use (e.g. "precise"). power_type= Set the given power type on the node. (e.g. "ipmi") power_parameters_{param1}... = Set the given power parameters. Note that the valid options for these depend on the power type chosen. power_parameters_skip_check 'true' | 'false' Whether to sanity check the supplied parameters against this node's declared power type. The default is 'false'. .. _cli-power: Example: Setting the power parameters for an ipmi enabled node:: maas maas node update \ power_type="ipmi" \ power_parameters_power_address=192.168.22.33 \ power_parameters_power_user=root \ power_parameters_power_pass=ubuntu; nodes ^^^^^ Usage: maas nodes [-h] is-registered | list-allocated | acquire | list | accept | accept-all | new | check-commissioning .. program:: maas nodes :samp:`-h, --help` Display usage information. :samp:`accept ` Accepts the node referenced by . :samp:`accept-all` Accepts all currently discovered but not previously accepted nodes. :samp:`acquire` Allocates a node to the profile used to issue the command. Any ready node may be allocated. :samp:`is-registered mac_address=
` Checks to see whether the specified MAC address is registered to a node. :samp:`list` Returns a JSON formatted object listing all the currently known nodes, their system_id, status and other details. :samp:`list-allocated` Returns a JSON formatted object listing all the currently allocated nodes, their system_id, status and other details. :samp:`new architecture= mac_addresses= [parameters]` Creates a new node entry given the provided key=value information for the node. A minimum of the MAC address and architecture must be provided. Other parameters may also be supplied:: architecture="" - The architecture of the node, must be one of the recognised architecture strings (e.g. "i386/generic") hostname="" - a name for this node. If not supplied a name will be generated. mac_addresses="" - The mac address(es) allocated to this node. power_type="" - the power type of the node (e.g. virsh, ipmi) :samp:`check-commissioning` Displays current status of nodes in the commissioning phase. Any that have not returned before the system timeout value are listed as "failed". .. _cli-commission: Examples: Accept and commission all discovered nodes:: $ maas maas nodes accept-all List all known nodes:: $ maas maas nodes list Filter the list using specific key/value pairs:: $ maas maas nodes list architecture="i386/generic" node-groups ^^^^^^^^^^^ Usage: maas node-groups [-d --debug] [-h --help] [-k --insecure] register | list | accept | reject .. program:: maas node-groups :samp:`-d, --debug` Displays debug information listing the API responses. :samp:`-h, --help` Display usage information. :samp:`-k, --insecure` Disables the SSL certificate check. :samp:`register uuid= name= interfaces=` Registers a new node group with the given name and uuid. The interfaces parameter must be supplied in the form of a JSON string comprising the key/value data for the interface to be used, for example: interface='["ip":"192.168.21.5","interface":"eth1", \ "subnet_mask":"255.255.255.0","broadcast_ip":"192.168.21.255", \ "router_ip":"192.168.21.1", "ip_range_low":"192.168.21.10", \ "ip_range_high":"192.168.21.50"}]' :samp:`list` Returns a JSON list of all currently defined node groups. :samp:`accept ` Accepts a node-group or number of nodegroups indicated by the supplied UUID :samp:`reject ` Rejects a node-group or number of nodegroups indicated by the supplied UUID node-group-interface ^^^^^^^^^^^^^^^^^^^^ For managing the interfaces. See also :ref:`node-group-interfaces` Usage: maas ** node-group-interfaces [-d --debug] [-h --help] [-k --insecure] read | update | delete [parameters...] ..program:: maas node-group-interface :samp:`read ` Returns the current settings for the given UUID and interface :samp:`update [parameters]` Changes the settings for the interface according to the given parameters:: management= 0 | 1 | 2 The service to be managed on the interface ( 0= none, 1=DHCP, 2=DHCP and DNS). subnet_mask= Apply the given dotted decimal value as the subnet mask. broadcast_ip= Apply the given dotted decimal value as the broadcast IP address for this subnet. router_ip= Apply the given dotted decimal value as the default router address for this subnet. ip_range_low= The lowest value of IP address to allocate via DHCP ip_range_high= The highest value of IP address to allocate via DHCP :samp:`delete ` Removes the entry for the given UUID and interface. .. _cli-dhcp: Example: Configuring DHCP and DNS. To enable MAAS to manage DHCP and DNS, it needs to be supplied with the relevant interface information. To do this we need to first determine the UUID of the node group affected:: $ uuid=$(maas node-groups list | grep uuid | cut -d\" -f4) Once we have the UUID we can use this to update the node-group-interface for that nodegroup, and pass it the relevant interface details:: $ maas node-group-interface update $uuid eth0 \ ip_range_high=192.168.123.200 \ ip_range_low=192.168.123.100 \ management=2 \ broadcast_ip=192.168.123.255 \ router_ip=192.168.123.1 \ Replacing the example values with those required for this network. The only non-obvious parameter is 'management' which takes the values 0 (no management), 1 (manage DHCP) and 2 (manage DHCP and DNS). .. _node-group-interfaces: node-group-interfaces ^^^^^^^^^^^^^^^^^^^^^ The node-group-interfaces commands are used for configuring the management of DHCP and DNS services where these are managed by MAAS. Usage: maas ** node-group-interfaces [-d --debug] [-h --help] [-k --insecure] list | new [parameters...] .. program:: maas node-group-interfaces :samp:`-d, --debug` Displays debug information listing the API responses. :samp:`-h, --help` Display usage information. :samp:`-k, --insecure` Disables the SSL certificate check. :samp:`list ') .set('href', '#') .set('id','create_token') .addClass('link-cta-ubuntu') .addClass('right') .set('text', "+ Generate MAAS key"); this.status_node = Y.Node.create('
') .set('id','create_error'); this.spinnerNode = Y.Node.create('') .addClass('spinner') .set('src', MAAS_config.uris.statics + 'img/spinner.gif'); this.get('srcNode').one('#token_creation_placeholder') .append(this.create_link) .append(this.status_node); }, confirm: function(message) { return confirm(message); }, bindDeleteRow: function(row) { var self = this; var delete_link = row.one('a.delete-link'); delete_link.on('click', function(e) { e.preventDefault(); if (self.confirm("Are you sure you want to delete this key?")) { self.deleteToken(row); } }); }, bindUI: function() { var self = this; this.create_link.on('click', function(e) { e.preventDefault(); self.requestKeys(); }); Y.each(this.get('srcNode').all('.bundle'), function(row) { self.bindDeleteRow(row); }); }, /** * Delete the token contained in the provided row. * Call the API to delete the token and then remove the table row. * * @method deleteToken */ deleteToken: function(row) { var token_key = row.one('input').get('id'); var self = this; var cfg = { method: 'POST', data: Y.QueryString.stringify({ op: 'delete_authorisation_token', token_key: token_key }), sync: false, on: { start: Y.bind(self.showSpinner, self), end: Y.bind(self.hideSpinner, self), success: function(id, out) { row.remove(); }, failure: function(id, out) { Y.log(out); if (out.status === 404) { self.displayError("The key has already been deleted."); } else { self.displayError("Unable to delete the key."); } } } }; var request = module._io.send( MAAS_config.uris.account_handler, cfg); }, showSpinner: function() { this.displayError(''); this.status_node.insert(this.spinnerNode, 'after'); }, hideSpinner: function() { this.spinnerNode.remove(); }, /** * Create a single string token from a key set. * * A key set is composed of 3 keys: consumer_key, token_key, token_secret. * For an easy copy and paste experience, the string handed over to the * user is a colon separated concatenation of these keys called 'token'. * * @method createTokenFromKeys */ createTokenFromKeys: function(consumer_key, token_key, token_secret) { return consumer_key + ':' + token_key + ':' + token_secret; }, /** * Add a token to the list of tokens. * * @method addToken */ addToken: function(token, token_key) { var list = this.get('srcNode').one('ul'); var row = Y.Node.create('
  • ') .addClass('bundle') .append(Y.Node.create('') .set('href', '#') .addClass('delete-link') .addClass('right') .append(Y.Node.create('') .set('title', 'Delete token') .set( 'src', MAAS_config.uris.statics + 'img/delete.png'))) .append(Y.Node.create('') .set('type', 'text') .addClass('disabled') .set('id', token_key) .set('value', token)); list.append(row); this.bindDeleteRow(row); }, /** * Request a new OAuth key set from the API. * * @method requestKeys */ requestKeys: function() { var self = this; var cfg = { method: 'POST', data: 'op=create_authorisation_token', sync: false, on: { start: Y.bind(self.showSpinner, self), end: Y.bind(self.hideSpinner, self), success: function(id, out) { var keys; try { keys = JSON.parse(out.response); } catch(e) { // Parsing error. self.displayError('Unable to create a new token.'); } // Generate a token from the keys. var token = self.createTokenFromKeys( keys.consumer_key, keys.token_key, keys.token_secret); // Add the new token to the list of tokens. self.addToken(token, keys.token_key); }, failure: function(id, out) { self.displayError('Unable to create a new token.'); } } }; var request = module._io.send( MAAS_config.uris.account_handler, cfg); } }); module.TokenWidget = TokenWidget; }, '0.1', {'requires': ['widget', 'maas.io']} ); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/reveal.js0000644000000000000000000001566613056115004021347 0ustar 00000000000000/* Copyright 2012-2014 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Widget to expand (make visible) or fold (make invisible) a content div, * in response to clicks on a button link. * * Write your initial HTML for the visible state. If the client does not * execute the script, the content div will be visible. Upon initialization, * the widget immediately goes into its "hidden" state. * * Once the widget is set up, its reveal() method will toggle it between its * visible and invisible states. The transition is animated with a sliding * effect. * * The widget fires a "hiding" event before hiding, and "hidden" after. * Similarly, it fires "revealing" before revealing and "revealed" after. * * Synonyms: expander, collapsible, foldable. * * @module Y.maas.reveal */ YUI.add('maas.reveal', function(Y) { Y.log('loading maas.reveal'); var module = Y.namespace('maas.reveal'); var Reveal; Reveal = function(config) { Reveal.superclass.constructor.apply(this, arguments); }; Reveal.NAME = 'reveal'; Reveal.ATTRS = { /** * DOM node for the content div. This is the div that will be hidden * or revealed. It must contain exactly one tag. * * The widget will add the "slider" class to this node, and the "content" * class to its child node. * * Hiding the content is done by setting the target node's height to zero; * it's child node keeps its original size but becomes invisible. * * @attribute targetNode * @type node */ targetNode: { value: null }, /** * DOM node for the button link that triggers the reveal. * * @attribute linkNode * @type node */ linkNode: { value: null }, /** * The text the button link should contain when the content div is * visible. * * @attribute hideText * @type string */ hideText: { value: null }, /** * The text the button link should contain when the content div is hidden. * * @attribute showText * @type string */ showText: { value: null }, /** * Skip animations? * * Use this when testing, to avoid wasting time on delays. * * @attribute quick * @type bool */ quick: { value: false } }; // Return a style attribute for a node, as an int. // Any suffix to the number, such as the typical "px," is ignored. function get_style_int(node, attribute) { return parseInt(node.getStyle(attribute), 10); } Y.extend(Reveal, Y.Widget, { /** * Standard YUI hook: prepare the DOM for the widget. * * @method renderUI */ renderUI: function() { var target = this.get('targetNode'); target.addClass('slider'); target.get('children').addClass('content'); }, /** * Standard YUI hook: install event listeners for the widget. * * @method bindUI */ bindUI: function() { var self = this; this.get('linkNode').on('click', function(e) { e.preventDefault(); self.reveal(); }); }, /** * Standard YUI hook: update UI to match the widget's state at the time * it is rendered. * * The HTML is written in an expanded state, but during rendering, the * widget immediately (and without animation) goes into its hidden state. * * @method syncUI */ syncUI: function() { this.fire("hiding"); this.get('targetNode').setStyle('height', 0); this.set_hidden_link(this.get('linkNode')); this.fire("hidden"); }, /** * Is this widget currently in its visible state? * * @method is_visible */ is_visible: function() { return get_style_int(this.get('targetNode'), 'height') > 0; }, /** * Set link to its "hidden" state. * * @method set_hidden_link */ set_hidden_link: function(link) { var new_text = this.get('showText'); if (new_text !== null && new_text !== undefined) { link.set('text', new_text); } }, /** * Set link to its "visible" state. * * @method set_visible_link */ set_visible_link: function(link) { var new_text = this.get('hideText'); if (new_text !== null && new_text !== undefined) { link.set('text', new_text); } }, /** * Get the desired duration for an animation. * * Returns the suggested duration, unless the "quick" attribute is set * in which case it returns a very brief duration. * * @method get_animation_duration */ get_animation_duration: function(suggested_duration) { if (this.get('quick')) { return 0.01; } else { return suggested_duration; } }, /** * Create the animation for sliding in the content div. * * @method create_slide_in */ create_slide_in: function(node, publisher) { var anim = new Y.Anim({ node: node, duration: this.get_animation_duration(0.3), to: {height: 0} }); anim.on('end', function() { publisher.fire('hidden'); }); return anim; }, /** * Create the animation for sliding out the content div. * * @method create_slide_out */ create_slide_out: function(node, publisher) { // The target node contains exactly one node of content. Its height // is constant. We calculate the appropriate expanded height for the // target node from the height of the content node, plus marings and // padding. var content_node = node.one('.content'); var new_height = ( get_style_int(content_node, 'height') + get_style_int(content_node, 'paddingTop') + get_style_int(content_node, 'paddingBottom') + get_style_int(content_node, 'marginTop') + get_style_int(content_node, 'marginBottom')); var anim = new Y.Anim({ node: node, duration: this.get_animation_duration(0.2), to: {height: new_height} }); anim.on('end', function() { publisher.fire('revealed'); }); return anim; }, /** * Toggle between the hidden and revealed states. * * @method reveal */ reveal: function() { var target = this.get('targetNode'); var link = this.get('linkNode'); if (this.is_visible()) { this.fire('hiding'); this.create_slide_in(target, this).run(); this.set_hidden_link(link); } else { this.fire('revealing'); this.create_slide_out(target, this).run(); this.set_visible_link(link); } } }); module.Reveal = Reveal; }, '0.1', {'requires': ['widget', 'node', 'event', 'anim']}); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/shortpoll.js0000644000000000000000000001307613056115004022110 0ustar 00000000000000/* Copyright 2014 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * The shortpoll module provides the functionality to deal with updating * in-browser data by polling the server. * * @module shortpoll */ YUI.add('maas.shortpoll', function(Y) { var namespace = Y.namespace('maas.shortpoll'); // Event fired when the short-polling request starts. namespace.shortpoll_start_event = 'maas.shortpoll.start'; // Event fired each time the short-polling request fails (to connect or // to parse the returned result). namespace.shortpoll_fail_event = 'maas.shortpoll.failure'; // After MAX_SHORT_DELAY_FAILED_ATTEMPTS failed connections (real failed // connections or connection getting an invalid return) separated // by SHORT_DELAY (millisec), wait LONG_DELAY (millisec) between // each failed connection. namespace.MAX_SHORT_DELAY_FAILED_ATTEMPTS = 5; namespace.SHORT_DELAY = 15 * 1000; // 15 seconds. namespace.LONG_DELAY = 3 * 60 * 1000; // 3 minutes. // Ugly hack for tests, to prevent repolling. namespace._repoll = true; // Overridden by tests. namespace._io = new Y.maas.io.getIO(); /** * * A ShortPollManager creates and manages a polling connection to the server * to fetch objects. * * @class ShortPollManager */ function ShortPollManager(config) { ShortPollManager.superclass.constructor.apply(this, arguments); } ShortPollManager.NAME = "shortPollManager"; ShortPollManager.ATTRS = { /** * The URI to poll. * * @attribute uri * @type string */ uri: { value: "" }, /** * The key with which to publish polled responses. * * @attribute eventKey * @type string */ eventKey: { valueFn: function() { return Y.guid("shortpoll_"); } }, /** * The IO instance used. * * @attribute io * @type Y.IO */ io: { readOnly: true, getter: function() { return namespace._io; } } }; Y.extend(ShortPollManager, Y.Base, { initializer : function(cfg) { this._started = false; this._failed_attempts = 0; this._sequence = 0; }, successPoll : function (id, response) { try { var eventKey = this.get("eventKey"); var data = Y.JSON.parse(response.responseText); Y.fire(eventKey, data); return true; } catch (e) { Y.fire(namespace.shortpoll_fail_event, e); return false; } }, failurePoll : function () { Y.fire(namespace.shortpoll_fail_event); }, /** * Return the delay (milliseconds) to wait before trying to reconnect * again after a failed connection. * * The rationale here is that: * 1. We should not try to reconnect instantaneously after a failed * connection. * 2. After a certain number of failed connections, we should set the * delay between two failed connection to a bigger number because the * server may be having problems. * * @method _pollDelay */ _pollDelay : function() { if (this._failed_attempts >= namespace.MAX_SHORT_DELAY_FAILED_ATTEMPTS) { return namespace.LONG_DELAY; } else { return namespace.SHORT_DELAY; } }, /** * Relaunch a connection to the server after a successful or * a failed connection. * * @method repoll * @param {Boolean} failed: whether or not the previous connection * has failed. */ repoll : function(failed) { if (failed) { this._failed_attempts += 1; } else { this._failed_attempts = 0; } if (namespace._repoll) { var delay = this._pollDelay(); Y.later(delay, this, this.poll); return delay; } else { return this._pollDelay(); } }, poll : function() { var that = this; var config = { method: "GET", sync: false, on: { failure: function(id, response) { if (Y.Lang.isValue(response) && Y.Lang.isValue(response.status) && (response.status === 408 || response.status === 504)) { // If the error code is: // - 408 Request timeout // - 504 Gateway timeout // Then ignore the error and start // polling again. that.repoll(false); } else { that.failurePoll(); that.repoll(true); } }, success: function(id, response) { var success = that.successPoll(id, response); that.repoll(!success); } } }; this._sequence = this._sequence + 1; var poll_uri = this.get("uri"); if (poll_uri.indexOf("?") >= 0) { poll_uri += "&sequence=" + this._sequence; } else { poll_uri += "?sequence=" + this._sequence; } if (!this._started) { Y.fire(namespace.shortpoll_start_event); this._started = true; } this.get("io").send(poll_uri, config); } }); namespace.ShortPollManager = ShortPollManager; }, "0.1", {"requires":["base", "event", "json", "maas.io"]}); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/testing/0000755000000000000000000000000013056115004021172 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/js/tests/0000755000000000000000000000000013056115004020657 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/js/user_panel.js0000644000000000000000000000251313056115004022211 0ustar 00000000000000/* Copyright 2012-2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Widget to show user options. * * @module Y.maas.user_panel */ YUI.add('maas.user_panel', function(Y) { Y.log('loading maas.user_panel'); var module = Y.namespace('maas.user_panel'); module._user_panel_singleton = null; /** * Initialise a widget to display user options. * * @method createUserPanelWidget */ module.createUserPanelWidget = function(event) { Y.Base.mix(Y.Overlay, [Y.WidgetAutohide]); var cfg = { srcNode: '#user-options', align: {node:'#user-link', points: [Y.WidgetPositionAlign.TR, Y.WidgetPositionAlign.BR]}, width: '150px', zIndex: 2, hideOn: [{eventName: 'clickoutside'}], visible: false, render: true }; module._user_panel_singleton = new Y.Overlay(cfg); Y.one(cfg.srcNode).removeClass('hidden'); }; /** * Show a widget to display user options. * * @method showUserPanelWidget */ module.showUserPanelWidget = function(event) { // Cope with manual calls as well as event calls. if (Y.Lang.isValue(event)) { event.preventDefault(); } module._user_panel_singleton.show(); }; }, '0.1', {'requires': ['overlay', 'base-build', 'widget-autohide']} ); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/3rdparty/0000755000000000000000000000000013056115004022716 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/controllers/0000755000000000000000000000000013056115004023514 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/0000755000000000000000000000000013056115004023307 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/0000755000000000000000000000000013056115004023125 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/filters/0000755000000000000000000000000013056115004022616 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/maas.js0000644000000000000000000000530613056115004022431 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Module * * Initializes the MAAS module with its required dependencies and sets up * the interpolater to use '{$' and '$}' instead of '{{' and '}}' as this * conflicts with Django templates. */ angular.module('MAAS', ['ngRoute', 'ngCookies', 'ngTagsInput']).config( function($interpolateProvider, $routeProvider) { $interpolateProvider.startSymbol('{$'); $interpolateProvider.endSymbol('$}'); // Helper that wrappers the templateUrl to append the files version // to the path. Used to override client cache. function versionedPath(path) { return path + "?v=" + MAAS_config.files_version; } // Setup routes only for the index page, all remaining pages should // not use routes. Once all pages are converted to using Angular this // will go away. Causing the page to never have to reload. var href = angular.element("base").attr('href'); var path = document.location.pathname; if(path[path.length - 1] !== '/') { path += '/'; } if(path === href) { $routeProvider. when('/nodes', { templateUrl: versionedPath( 'static/partials/nodes-list.html'), controller: 'NodesListController' }). when('/node/:system_id', { templateUrl: versionedPath( 'static/partials/node-details.html'), controller: 'NodeDetailsController' }). when('/node/:system_id/result/:filename', { templateUrl: versionedPath( 'static/partials/node-result.html'), controller: 'NodeResultController' }). when('/node/:system_id/events', { templateUrl: versionedPath( 'static/partials/node-events.html'), controller: 'NodeEventsController' }). when('/subnets', { templateUrl: versionedPath( 'static/partials/subnets-list.html'), controller: 'SubnetsListController' }). when('/subnet/:subnet_id', { templateUrl: versionedPath( 'static/partials/subnet-details.html'), controller: 'SubnetDetailsController' }). otherwise({ redirectTo: '/nodes' }); } }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/services/0000755000000000000000000000000013056115004022771 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/testing/0000755000000000000000000000000013056115004022623 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/3rdparty/ng-tags-input.js0000644000000000000000000012704513056115004025762 0ustar 00000000000000/*! * ngTagsInput v2.3.0 * http://mbenford.github.io/ngTagsInput * * Copyright (c) 2013-2015 Michael Benford * License: MIT * * Generated at 2015-03-24 00:49:44 -0300 */ (function() { 'use strict'; var KEYS = { backspace: 8, tab: 9, enter: 13, escape: 27, space: 32, up: 38, down: 40, left: 37, right: 39, delete: 46, comma: 188 }; var MAX_SAFE_INTEGER = 9007199254740991; var SUPPORTED_INPUT_TYPES = ['text', 'email', 'url']; var tagsInput = angular.module('ngTagsInput', []); /** * @ngdoc directive * @name tagsInput * @module ngTagsInput * * @description * Renders an input box with tag editing support. * * @param {string} ngModel Assignable angular expression to data-bind to. * @param {string=} [displayProperty=text] Property to be rendered as the tag label. * @param {string=} [keyProperty=text] Property to be used as a unique identifier for the tag. * @param {string=} [type=text] Type of the input element. Only 'text', 'email' and 'url' are supported values. * @param {number=} tabindex Tab order of the control. * @param {string=} [placeholder=Add a tag] Placeholder text for the control. * @param {number=} [minLength=3] Minimum length for a new tag. * @param {number=} [maxLength=MAX_SAFE_INTEGER] Maximum length allowed for a new tag. * @param {number=} [minTags=0] Sets minTags validation error key if the number of tags added is less than minTags. * @param {number=} [maxTags=MAX_SAFE_INTEGER] Sets maxTags validation error key if the number of tags added is greater than maxTags. * @param {boolean=} [allowLeftoverText=false] Sets leftoverText validation error key if there is any leftover text in * the input element when the directive loses focus. * @param {string=} [removeTagSymbol=×] Symbol character for the remove tag button. * @param {boolean=} [addOnEnter=true] Flag indicating that a new tag will be added on pressing the ENTER key. * @param {boolean=} [addOnSpace=false] Flag indicating that a new tag will be added on pressing the SPACE key. * @param {boolean=} [addOnComma=true] Flag indicating that a new tag will be added on pressing the COMMA key. * @param {boolean=} [addOnBlur=true] Flag indicating that a new tag will be added when the input field loses focus. * @param {boolean=} [addOnPaste=false] Flag indicating that the text pasted into the input field will be split into tags. * @param {string=} [pasteSplitPattern=,] Regular expression used to split the pasted text into tags. * @param {boolean=} [replaceSpacesWithDashes=true] Flag indicating that spaces will be replaced with dashes. * @param {string=} [allowedTagsPattern=.+] Regular expression that determines whether a new tag is valid. * @param {boolean=} [enableEditingLastTag=false] Flag indicating that the last tag will be moved back into * the new tag input box instead of being removed when the backspace key * is pressed and the input box is empty. * @param {boolean=} [addFromAutocompleteOnly=false] Flag indicating that only tags coming from the autocomplete list will be allowed. * When this flag is true, addOnEnter, addOnComma, addOnSpace, addOnBlur and * allowLeftoverText values are ignored. * @param {boolean=} [spellcheck=true] Flag indicating whether the browser's spellcheck is enabled for the input field or not. * @param {expression} onTagAdding Expression to evaluate that will be invoked before adding a new tag. The new tag is available as $tag. This method must return either true or false. If false, the tag will not be added. * @param {expression} onTagAdded Expression to evaluate upon adding a new tag. The new tag is available as $tag. * @param {expression} onInvalidTag Expression to evaluate when a tag is invalid. The invalid tag is available as $tag. * @param {expression} onTagRemoving Expression to evaluate that will be invoked before removing a tag. The tag is available as $tag. This method must return either true or false. If false, the tag will not be removed. * @param {expression} onTagRemoved Expression to evaluate upon removing an existing tag. The removed tag is available as $tag. */ tagsInput.directive('tagsInput', ["$timeout","$document","$window","tagsInputConfig","tiUtil", function($timeout, $document, $window, tagsInputConfig, tiUtil) { function TagList(options, events, onTagAdding, onTagRemoving) { var self = {}, getTagText, setTagText, tagIsValid; getTagText = function(tag) { return tiUtil.safeToString(tag[options.displayProperty]); }; setTagText = function(tag, text) { tag[options.displayProperty] = text; }; tagIsValid = function(tag) { var tagText = getTagText(tag); return tagText && tagText.length >= options.minLength && tagText.length <= options.maxLength && options.allowedTagsPattern.test(tagText) && !tiUtil.findInObjectArray(self.items, tag, options.keyProperty || options.displayProperty) && onTagAdding({ $tag: tag }); }; self.items = []; self.addText = function(text) { var tag = {}; setTagText(tag, text); return self.add(tag); }; self.add = function(tag) { var tagText = getTagText(tag); if (options.replaceSpacesWithDashes) { tagText = tiUtil.replaceSpacesWithDashes(tagText); } setTagText(tag, tagText); if (tagIsValid(tag)) { self.items.push(tag); events.trigger('tag-added', { $tag: tag }); } else if (tagText) { events.trigger('invalid-tag', { $tag: tag }); } return tag; }; self.remove = function(index) { var tag = self.items[index]; if (onTagRemoving({ $tag: tag })) { self.items.splice(index, 1); self.clearSelection(); events.trigger('tag-removed', { $tag: tag }); return tag; } }; self.select = function(index) { if (index < 0) { index = self.items.length - 1; } else if (index >= self.items.length) { index = 0; } self.index = index; self.selected = self.items[index]; }; self.selectPrior = function() { self.select(--self.index); }; self.selectNext = function() { self.select(++self.index); }; self.removeSelected = function() { return self.remove(self.index); }; self.clearSelection = function() { self.selected = null; self.index = -1; }; self.clearSelection(); return self; } function validateType(type) { return SUPPORTED_INPUT_TYPES.indexOf(type) !== -1; } return { restrict: 'E', require: 'ngModel', scope: { tags: '=ngModel', onTagAdding: '&', onTagAdded: '&', onInvalidTag: '&', onTagRemoving: '&', onTagRemoved: '&' }, replace: false, transclude: true, templateUrl: 'ngTagsInput/tags-input.html', controller: ["$scope","$attrs","$element", function($scope, $attrs, $element) { $scope.events = tiUtil.simplePubSub(); tagsInputConfig.load('tagsInput', $scope, $attrs, { template: [String, 'ngTagsInput/tag-item.html'], type: [String, 'text', validateType], placeholder: [String, 'Add a tag'], tabindex: [Number, null], removeTagSymbol: [String, String.fromCharCode(215)], replaceSpacesWithDashes: [Boolean, true], minLength: [Number, 3], maxLength: [Number, MAX_SAFE_INTEGER], addOnEnter: [Boolean, true], addOnSpace: [Boolean, false], addOnComma: [Boolean, true], addOnBlur: [Boolean, true], addOnPaste: [Boolean, false], pasteSplitPattern: [RegExp, /,/], allowedTagsPattern: [RegExp, /.+/], enableEditingLastTag: [Boolean, false], minTags: [Number, 0], maxTags: [Number, MAX_SAFE_INTEGER], displayProperty: [String, 'text'], keyProperty: [String, ''], allowLeftoverText: [Boolean, false], addFromAutocompleteOnly: [Boolean, false], spellcheck: [Boolean, true] }); $scope.tagList = new TagList($scope.options, $scope.events, tiUtil.handleUndefinedResult($scope.onTagAdding, true), tiUtil.handleUndefinedResult($scope.onTagRemoving, true)); this.registerAutocomplete = function() { var input = $element.find('input'); return { addTag: function(tag) { return $scope.tagList.add(tag); }, focusInput: function() { // blake_r - Stop the focus as this breaks on the // version of AngularJS that ships with MAAS. //input[0].focus(); }, getTags: function() { return $scope.tags; }, getCurrentTagText: function() { return $scope.newTag.text; }, getOptions: function() { return $scope.options; }, on: function(name, handler) { $scope.events.on(name, handler); return this; } }; }; this.registerTagItem = function() { return { getOptions: function() { return $scope.options; }, removeTag: function(index) { if ($scope.disabled) { return; } $scope.tagList.remove(index); } }; }; }], link: function(scope, element, attrs, ngModelCtrl) { var hotkeys = [KEYS.enter, KEYS.comma, KEYS.space, KEYS.backspace, KEYS.delete, KEYS.left, KEYS.right], tagList = scope.tagList, events = scope.events, options = scope.options, input = element.find('input'), validationOptions = ['minTags', 'maxTags', 'allowLeftoverText'], setElementValidity; setElementValidity = function() { ngModelCtrl.$setValidity('maxTags', scope.tags.length <= options.maxTags); ngModelCtrl.$setValidity('minTags', scope.tags.length >= options.minTags); ngModelCtrl.$setValidity('leftoverText', scope.hasFocus || options.allowLeftoverText ? true : !scope.newTag.text); }; ngModelCtrl.$isEmpty = function(value) { return !value || !value.length; }; scope.newTag = { text: '', invalid: null, setText: function(value) { this.text = value; events.trigger('input-change', value); } }; scope.track = function(tag) { return tag[options.keyProperty || options.displayProperty]; }; scope.$watch('tags', function(value) { scope.tags = tiUtil.makeObjectArray(value, options.displayProperty); tagList.items = scope.tags; }); scope.$watch('tags.length', function() { setElementValidity(); }); attrs.$observe('disabled', function(value) { scope.disabled = value; }); scope.eventHandlers = { input: { change: function(text) { events.trigger('input-change', text); }, keydown: function($event) { events.trigger('input-keydown', $event); }, focus: function() { if (scope.hasFocus) { return; } scope.hasFocus = true; events.trigger('input-focus'); }, blur: function() { $timeout(function() { var activeElement = $document.prop('activeElement'), lostFocusToBrowserWindow = activeElement === input[0], lostFocusToChildElement = element[0].contains(activeElement); if (lostFocusToBrowserWindow || !lostFocusToChildElement) { scope.hasFocus = false; events.trigger('input-blur'); } }); }, paste: function($event) { $event.getTextData = function() { var clipboardData = $event.clipboardData || ($event.originalEvent && $event.originalEvent.clipboardData); return clipboardData ? clipboardData.getData('text/plain') : $window.clipboardData.getData('Text'); }; events.trigger('input-paste', $event); } }, host: { click: function() { if (scope.disabled) { return; } // blake_r - Stop the focus as this breaks on the // version of AngularJS that ships with MAAS. //input[0].focus(); } } }; events .on('tag-added', scope.onTagAdded) .on('invalid-tag', scope.onInvalidTag) .on('tag-removed', scope.onTagRemoved) .on('tag-added', function() { scope.newTag.setText(''); }) .on('tag-added tag-removed', function() { // Sets the element to its dirty state // In Angular 1.3 this will be replaced with $setDirty. ngModelCtrl.$setViewValue(scope.tags); }) .on('invalid-tag', function() { scope.newTag.invalid = true; }) .on('option-change', function(e) { if (validationOptions.indexOf(e.name) !== -1) { setElementValidity(); } }) .on('input-change', function() { tagList.clearSelection(); scope.newTag.invalid = null; }) .on('input-focus', function() { element.triggerHandler('focus'); ngModelCtrl.$setValidity('leftoverText', true); }) .on('input-blur', function() { if (options.addOnBlur && !options.addFromAutocompleteOnly) { tagList.addText(scope.newTag.text); } element.triggerHandler('blur'); setElementValidity(); }) .on('input-keydown', function(event) { var key = event.keyCode, isModifier = event.shiftKey || event.altKey || event.ctrlKey || event.metaKey, addKeys = {}, shouldAdd, shouldRemove, shouldSelect, shouldEditLastTag; if (isModifier || hotkeys.indexOf(key) === -1) { return; } addKeys[KEYS.enter] = options.addOnEnter; addKeys[KEYS.comma] = options.addOnComma; addKeys[KEYS.space] = options.addOnSpace; shouldAdd = !options.addFromAutocompleteOnly && addKeys[key]; shouldRemove = (key === KEYS.backspace || key === KEYS.delete) && tagList.selected; shouldEditLastTag = key === KEYS.backspace && scope.newTag.text.length === 0 && options.enableEditingLastTag; shouldSelect = (key === KEYS.backspace || key === KEYS.left || key === KEYS.right) && scope.newTag.text.length === 0 && !options.enableEditingLastTag; if (shouldAdd) { tagList.addText(scope.newTag.text); } else if (shouldEditLastTag) { var tag; tagList.selectPrior(); tag = tagList.removeSelected(); if (tag) { scope.newTag.setText(tag[options.displayProperty]); } } else if (shouldRemove) { tagList.removeSelected(); } else if (shouldSelect) { if (key === KEYS.left || key === KEYS.backspace) { tagList.selectPrior(); } else if (key === KEYS.right) { tagList.selectNext(); } } if (shouldAdd || shouldSelect || shouldRemove || shouldEditLastTag) { event.preventDefault(); } }) .on('input-paste', function(event) { if (options.addOnPaste) { var data = event.getTextData(); var tags = data.split(options.pasteSplitPattern); if (tags.length > 1) { tags.forEach(function(tag) { tagList.addText(tag); }); event.preventDefault(); } } }); } }; }]); /** * @ngdoc directive * @name tiTagItem * @module ngTagsInput * * @description * Represents a tag item. Used internally by the tagsInput directive. */ tagsInput.directive('tiTagItem', ["tiUtil", function(tiUtil) { return { restrict: 'E', require: '^tagsInput', template: '', scope: { data: '=' }, link: function(scope, element, attrs, tagsInputCtrl) { var tagsInput = tagsInputCtrl.registerTagItem(), options = tagsInput.getOptions(); scope.$$template = options.template; scope.$$removeTagSymbol = options.removeTagSymbol; scope.$getDisplayText = function() { return tiUtil.safeToString(scope.data[options.displayProperty]); }; scope.$removeTag = function() { tagsInput.removeTag(scope.$index); }; scope.$watch('$parent.$index', function(value) { scope.$index = value; }); } }; }]); /** * @ngdoc directive * @name autoComplete * @module ngTagsInput * * @description * Provides autocomplete support for the tagsInput directive. * * @param {expression} source Expression to evaluate upon changing the input content. The input value is available as * $query. The result of the expression must be a promise that eventually resolves to an * array of strings. * @param {string=} [displayProperty=text] Property to be rendered as the autocomplete label. * @param {number=} [debounceDelay=100] Amount of time, in milliseconds, to wait before evaluating the expression in * the source option after the last keystroke. * @param {number=} [minLength=3] Minimum number of characters that must be entered before evaluating the expression * in the source option. * @param {boolean=} [highlightMatchedText=true] Flag indicating that the matched text will be highlighted in the * suggestions list. * @param {number=} [maxResultsToShow=10] Maximum number of results to be displayed at a time. * @param {boolean=} [loadOnDownArrow=false] Flag indicating that the source option will be evaluated when the down arrow * key is pressed and the suggestion list is closed. The current input value * is available as $query. * @param {boolean=} {loadOnEmpty=false} Flag indicating that the source option will be evaluated when the input content * becomes empty. The $query variable will be passed to the expression as an empty string. * @param {boolean=} {loadOnFocus=false} Flag indicating that the source option will be evaluated when the input element * gains focus. The current input value is available as $query. * @param {boolean=} [selectFirstMatch=true] Flag indicating that the first match will be automatically selected once * the suggestion list is shown. * @param {string=} [template=] URL or id of a custom template for rendering each element of the autocomplete list. */ tagsInput.directive('autoComplete', ["$document","$timeout","$sce","$q","tagsInputConfig","tiUtil", function($document, $timeout, $sce, $q, tagsInputConfig, tiUtil) { function SuggestionList(loadFn, options, events) { var self = {}, getDifference, lastPromise, getTagId; getTagId = function() { return options.tagsInput.keyProperty || options.tagsInput.displayProperty; }; getDifference = function(array1, array2) { return array1.filter(function(item) { return !tiUtil.findInObjectArray(array2, item, getTagId(), function(a, b) { if (options.tagsInput.replaceSpacesWithDashes) { a = tiUtil.replaceSpacesWithDashes(a); b = tiUtil.replaceSpacesWithDashes(b); } return tiUtil.defaultComparer(a, b); }); }); }; self.reset = function() { lastPromise = null; self.items = []; self.visible = false; self.index = -1; self.selected = null; self.query = null; }; self.show = function() { if (options.selectFirstMatch) { self.select(0); } else { self.selected = null; } self.visible = true; }; self.load = tiUtil.debounce(function(query, tags) { self.query = query; var promise = $q.when(loadFn({ $query: query })); lastPromise = promise; promise.then(function(items) { if (promise !== lastPromise) { return; } items = tiUtil.makeObjectArray(items.data || items, getTagId()); items = getDifference(items, tags); self.items = items.slice(0, options.maxResultsToShow); if (self.items.length > 0) { self.show(); } else { self.reset(); } }); }, options.debounceDelay); self.selectNext = function() { self.select(++self.index); }; self.selectPrior = function() { self.select(--self.index); }; self.select = function(index) { if (index < 0) { index = self.items.length - 1; } else if (index >= self.items.length) { index = 0; } self.index = index; self.selected = self.items[index]; events.trigger('suggestion-selected', index); }; self.reset(); return self; } function scrollToElement(root, index) { var element = root.find('li').eq(index), parent = element.parent(), elementTop = element.prop('offsetTop'), elementHeight = element.prop('offsetHeight'), parentHeight = parent.prop('clientHeight'), parentScrollTop = parent.prop('scrollTop'); if (elementTop < parentScrollTop) { parent.prop('scrollTop', elementTop); } else if (elementTop + elementHeight > parentHeight + parentScrollTop) { parent.prop('scrollTop', elementTop + elementHeight - parentHeight); } } return { restrict: 'E', require: '^tagsInput', scope: { source: '&' }, templateUrl: 'ngTagsInput/auto-complete.html', controller: ["$scope","$element","$attrs", function($scope, $element, $attrs) { $scope.events = tiUtil.simplePubSub(); tagsInputConfig.load('autoComplete', $scope, $attrs, { template: [String, 'ngTagsInput/auto-complete-match.html'], debounceDelay: [Number, 100], minLength: [Number, 3], highlightMatchedText: [Boolean, true], maxResultsToShow: [Number, 10], loadOnDownArrow: [Boolean, false], loadOnEmpty: [Boolean, false], loadOnFocus: [Boolean, false], selectFirstMatch: [Boolean, true], displayProperty: [String, ''] }); $scope.suggestionList = new SuggestionList($scope.source, $scope.options, $scope.events); this.registerAutocompleteMatch = function() { return { getOptions: function() { return $scope.options; }, getQuery: function() { return $scope.suggestionList.query; } }; }; }], link: function(scope, element, attrs, tagsInputCtrl) { var hotkeys = [KEYS.enter, KEYS.tab, KEYS.escape, KEYS.up, KEYS.down], suggestionList = scope.suggestionList, tagsInput = tagsInputCtrl.registerAutocomplete(), options = scope.options, events = scope.events, shouldLoadSuggestions; options.tagsInput = tagsInput.getOptions(); shouldLoadSuggestions = function(value) { return value && value.length >= options.minLength || !value && options.loadOnEmpty; }; scope.addSuggestionByIndex = function(index) { suggestionList.select(index); scope.addSuggestion(); }; scope.addSuggestion = function() { var added = false; if (suggestionList.selected) { tagsInput.addTag(angular.copy(suggestionList.selected)); suggestionList.reset(); tagsInput.focusInput(); added = true; } return added; }; scope.track = function(item) { return item[options.tagsInput.keyProperty || options.tagsInput.displayProperty]; }; tagsInput .on('tag-added invalid-tag input-blur', function() { suggestionList.reset(); }) .on('input-change', function(value) { if (shouldLoadSuggestions(value)) { suggestionList.load(value, tagsInput.getTags()); } else { suggestionList.reset(); } }) .on('input-focus', function() { var value = tagsInput.getCurrentTagText(); if (options.loadOnFocus && shouldLoadSuggestions(value)) { suggestionList.load(value, tagsInput.getTags()); } }) .on('input-keydown', function(event) { var key = event.keyCode, handled = false; if (hotkeys.indexOf(key) === -1) { return; } if (suggestionList.visible) { if (key === KEYS.down) { suggestionList.selectNext(); handled = true; } else if (key === KEYS.up) { suggestionList.selectPrior(); handled = true; } else if (key === KEYS.escape) { suggestionList.reset(); handled = true; } else if (key === KEYS.enter || key === KEYS.tab) { handled = scope.addSuggestion(); } } else { if (key === KEYS.down && scope.options.loadOnDownArrow) { suggestionList.load(tagsInput.getCurrentTagText(), tagsInput.getTags()); handled = true; } } if (handled) { event.preventDefault(); event.stopImmediatePropagation(); return false; } }); events.on('suggestion-selected', function(index) { scrollToElement(element, index); }); } }; }]); /** * @ngdoc directive * @name tiAutocompleteMatch * @module ngTagsInput * * @description * Represents an autocomplete match. Used internally by the autoComplete directive. */ tagsInput.directive('tiAutocompleteMatch', ["$sce","tiUtil", function($sce, tiUtil) { return { restrict: 'E', require: '^autoComplete', template: '', scope: { data: '=' }, link: function(scope, element, attrs, autoCompleteCtrl) { var autoComplete = autoCompleteCtrl.registerAutocompleteMatch(), options = autoComplete.getOptions(); scope.$$template = options.template; scope.$index = scope.$parent.$index; scope.$highlight = function(text) { if (options.highlightMatchedText) { text = tiUtil.safeHighlight(text, autoComplete.getQuery()); } return $sce.trustAsHtml(text); }; scope.$getDisplayText = function() { return tiUtil.safeToString(scope.data[options.displayProperty || options.tagsInput.displayProperty]); }; } }; }]); /** * @ngdoc directive * @name tiTranscludeAppend * @module ngTagsInput * * @description * Re-creates the old behavior of ng-transclude. Used internally by tagsInput directive. */ tagsInput.directive('tiTranscludeAppend', function() { return function(scope, element, attrs, ctrl, transcludeFn) { transcludeFn(function(clone) { element.append(clone); }); }; }); /** * @ngdoc directive * @name tiAutosize * @module ngTagsInput * * @description * Automatically sets the input's width so its content is always visible. Used internally by tagsInput directive. */ tagsInput.directive('tiAutosize', ["tagsInputConfig", function(tagsInputConfig) { return { restrict: 'A', require: 'ngModel', link: function(scope, element, attrs, ctrl) { var threshold = tagsInputConfig.getTextAutosizeThreshold(), span, resize; span = angular.element(''); span.css('display', 'none') .css('visibility', 'hidden') .css('width', 'auto') .css('white-space', 'pre'); element.parent().append(span); resize = function(originalValue) { var value = originalValue, width; if (angular.isString(value) && value.length === 0) { value = attrs.placeholder; } if (value) { span.text(value); span.css('display', ''); width = span.prop('offsetWidth'); span.css('display', 'none'); } element.css('width', width ? width + threshold + 'px' : ''); return originalValue; }; ctrl.$parsers.unshift(resize); ctrl.$formatters.unshift(resize); attrs.$observe('placeholder', function(value) { if (!ctrl.$modelValue) { resize(value); } }); } }; }]); /** * @ngdoc directive * @name tiBindAttrs * @module ngTagsInput * * @description * Binds attributes to expressions. Used internally by tagsInput directive. */ tagsInput.directive('tiBindAttrs', function() { return function(scope, element, attrs) { scope.$watch(attrs.tiBindAttrs, function(value) { angular.forEach(value, function(value, key) { /** * blake_r - Added to work around the version of jQuery that * MAAS currently ships with. Once packaging for jQuery is * version >1.9 this can be removed. */ if(key === "type") { element[0].type = value; } else { attrs.$set(key, value); } }); }, true); }; }); /** * @ngdoc service * @name tagsInputConfig * @module ngTagsInput * * @description * Sets global configuration settings for both tagsInput and autoComplete directives. It's also used internally to parse and * initialize options from HTML attributes. */ tagsInput.provider('tagsInputConfig', function() { var globalDefaults = {}, interpolationStatus = {}, autosizeThreshold = 3; /** * @ngdoc method * @name setDefaults * @description Sets the default configuration option for a directive. * @methodOf tagsInputConfig * * @param {string} directive Name of the directive to be configured. Must be either 'tagsInput' or 'autoComplete'. * @param {object} defaults Object containing options and their values. * * @returns {object} The service itself for chaining purposes. */ this.setDefaults = function(directive, defaults) { globalDefaults[directive] = defaults; return this; }; /*** * @ngdoc method * @name setActiveInterpolation * @description Sets active interpolation for a set of options. * @methodOf tagsInputConfig * * @param {string} directive Name of the directive to be configured. Must be either 'tagsInput' or 'autoComplete'. * @param {object} options Object containing which options should have interpolation turned on at all times. * * @returns {object} The service itself for chaining purposes. */ this.setActiveInterpolation = function(directive, options) { interpolationStatus[directive] = options; return this; }; /*** * @ngdoc method * @name setTextAutosizeThreshold * @description Sets the threshold used by the tagsInput directive to re-size the inner input field element based on its contents. * @methodOf tagsInputConfig * * @param {number} threshold Threshold value, in pixels. * * @returns {object} The service itself for chaining purposes. */ this.setTextAutosizeThreshold = function(threshold) { autosizeThreshold = threshold; return this; }; this.$get = ["$interpolate", function($interpolate) { var converters = {}; converters[String] = function(value) { return value; }; converters[Number] = function(value) { return parseInt(value, 10); }; converters[Boolean] = function(value) { return value.toLowerCase() === 'true'; }; converters[RegExp] = function(value) { return new RegExp(value); }; return { load: function(directive, scope, attrs, options) { var defaultValidator = function() { return true; }; scope.options = {}; angular.forEach(options, function(value, key) { var type, localDefault, validator, converter, getDefault, updateValue; type = value[0]; localDefault = value[1]; validator = value[2] || defaultValidator; converter = converters[type]; getDefault = function() { var globalValue = globalDefaults[directive] && globalDefaults[directive][key]; return angular.isDefined(globalValue) ? globalValue : localDefault; }; updateValue = function(value) { scope.options[key] = value && validator(value) ? converter(value) : getDefault(); }; if (interpolationStatus[directive] && interpolationStatus[directive][key]) { attrs.$observe(key, function(value) { updateValue(value); scope.events.trigger('option-change', { name: key, newValue: value }); }); } else { updateValue(attrs[key] && $interpolate(attrs[key])(scope.$parent)); } }); }, getTextAutosizeThreshold: function() { return autosizeThreshold; } }; }]; }); /*** * @ngdoc factory * @name tiUtil * @module ngTagsInput * * @description * Helper methods used internally by the directive. Should not be called directly from user code. */ tagsInput.factory('tiUtil', ["$timeout", function($timeout) { var self = {}; self.debounce = function(fn, delay) { var timeoutId; return function() { var args = arguments; $timeout.cancel(timeoutId); timeoutId = $timeout(function() { fn.apply(null, args); }, delay); }; }; self.makeObjectArray = function(array, key) { array = array || []; if (array.length > 0 && !angular.isObject(array[0])) { array.forEach(function(item, index) { array[index] = {}; array[index][key] = item; }); } return array; }; self.findInObjectArray = function(array, obj, key, comparer) { var item = null; comparer = comparer || self.defaultComparer; array.some(function(element) { if (comparer(element[key], obj[key])) { item = element; return true; } }); return item; }; self.defaultComparer = function(a, b) { // I'm aware of the internationalization issues regarding toLowerCase() // but I couldn't come up with a better solution right now return self.safeToString(a).toLowerCase() === self.safeToString(b).toLowerCase(); }; self.safeHighlight = function(str, value) { if (!value) { return str; } function escapeRegexChars(str) { return str.replace(/([.?*+^$[\]\\(){}|-])/g, '\\$1'); } str = self.encodeHTML(str); value = self.encodeHTML(value); var expression = new RegExp('&[^;]+;|' + escapeRegexChars(value), 'gi'); return str.replace(expression, function(match) { return match.toLowerCase() === value.toLowerCase() ? '' + match + '' : match; }); }; self.safeToString = function(value) { return angular.isUndefined(value) || value == null ? '' : value.toString().trim(); }; self.encodeHTML = function(value) { return self.safeToString(value) .replace(/&/g, '&') .replace(//g, '>'); }; self.handleUndefinedResult = function(fn, valueIfUndefined) { return function() { var result = fn.apply(null, arguments); return angular.isUndefined(result) ? valueIfUndefined : result; }; }; self.replaceSpacesWithDashes = function(str) { return self.safeToString(str).replace(/\s/g, '-'); }; self.simplePubSub = function() { var events = {}; return { on: function(names, handler) { names.split(' ').forEach(function(name) { if (!events[name]) { events[name] = []; } events[name].push(handler); }); return this; }, trigger: function(name, args) { var handlers = events[name] || []; handlers.every(function(handler) { return self.handleUndefinedResult(handler, true)(args); }); return this; } }; }; return self; }]); /* HTML templates */ tagsInput.run(["$templateCache", function($templateCache) { $templateCache.put('ngTagsInput/tags-input.html', "
    " ); $templateCache.put('ngTagsInput/tag-item.html', "
    " ); $templateCache.put('ngTagsInput/auto-complete.html', "
    " ); $templateCache.put('ngTagsInput/auto-complete-match.html', "" ); }]); }()); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/controllers/add_device.js0000644000000000000000000002766713056115004026143 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Add Device Controller */ angular.module('MAAS').controller('AddDeviceController', [ '$scope', 'ClustersManager', 'DevicesManager', 'ManagerHelperService', 'ValidationService', function($scope, ClustersManager, DevicesManager, ManagerHelperService, ValidationService) { // Set the addDeviceScope in the parent, so it can call functions // in this controller. var parentScope = $scope.$parent; parentScope.addDeviceScope = $scope; // Set initial values. $scope.viewable = false; $scope.clusters = ClustersManager.getItems(); $scope.error = null; // Device ip assignment options. $scope.ipAssignments = [ { name: "external", title: "External" }, { name: "dynamic", title: "Dynamic" }, { name: "static", title: "Static" } ]; // Makes a new interface. function makeInterface() { return { mac: "", ipAssignment: null, clusterInterfaceId: null, ipAddress: "" }; } // Makes a new device. function makeDevice() { return { name: "", interfaces: [makeInterface()] }; } // Initial device. $scope.device = makeDevice(); // Converts the device information from how it is held in the UI to // how it is handled over the websocket. function convertDeviceToProtocol(device) { // Return the new object. var convertedDevice = { hostname: device.name, primary_mac: device.interfaces[0].mac, extra_macs: [], interfaces: [] }; var i; for(i = 1; i < device.interfaces.length; i++) { convertedDevice.extra_macs.push(device.interfaces[i].mac); } angular.forEach(device.interfaces, function(nic) { convertedDevice.interfaces.push({ mac: nic.mac, ip_assignment: nic.ipAssignment.name, ip_address: nic.ipAddress, "interface": nic.clusterInterfaceId }); }); return convertedDevice; } // Gets the cluster interface by id from the managed cluster // interfaces. function getInterfaceById(id) { var i, clusterInterfaces = $scope.getManagedInterfaces(); for(i = 0; i < clusterInterfaces.length; i++) { if(clusterInterfaces[i].id === id) { return clusterInterfaces[i]; } } return null; } // Called by the parent scope when this controller is viewable. $scope.show = function() { // Exit early if already viewable. if($scope.viewable) { return; } $scope.device = makeDevice(); $scope.viewable = true; }; // Called by the parent scope when this controller is hidden. $scope.hide = function() { $scope.viewable = false; // Emit the hidden event. $scope.$emit('addDeviceHidden'); }; // Return all of the managed interfaces from the clusters. $scope.getManagedInterfaces = function() { var nics = []; angular.forEach($scope.clusters, function(cluster) { angular.forEach(cluster.interfaces, function(cInterface) { if(cInterface.management > 0) { nics.push(cInterface); } }); }); return nics; }; // Return text to show an interfaces static range. $scope.getInterfaceStaticRange = function(cInterfaceId) { if(!angular.isNumber(cInterfaceId)) { return ""; } var clusterInterface = getInterfaceById(cInterfaceId); if(!angular.isObject(clusterInterface)) { return ""; } return clusterInterface.static_range.low + " - " + clusterInterface.static_range.high + " (Optional)"; }; // Returns true if the name is in error. $scope.nameHasError = function() { // If the name is empty don't show error. if($scope.device.name.length === 0) { return false; } return !ValidationService.validateHostname($scope.device.name); }; // Returns true if the MAC is in error. $scope.macHasError = function(deviceInterface) { // If the MAC is empty don't show error. if(deviceInterface.mac.length === 0) { return false; } // If the MAC is invalid show error. if(!ValidationService.validateMAC(deviceInterface.mac)) { return true; } // If the MAC is the same as another MAC show error. var i; for(i = 0; i < $scope.device.interfaces.length; i++) { var isSelf = $scope.device.interfaces[i] === deviceInterface; if(!isSelf && $scope.device.interfaces[i].mac === deviceInterface.mac) { return true; } } return false; }; // Returns true if the IP address is in error. $scope.ipHasError = function(deviceInterface) { // If the IP is empty don't show error. if(deviceInterface.ipAddress.length === 0) { return false; } // If ip address is invalid, then exit early. if(!ValidationService.validateIP(deviceInterface.ipAddress)) { return true; } var i, inNetwork, managedInterfaces = $scope.getManagedInterfaces(); if(angular.isObject(deviceInterface.ipAssignment)){ if(deviceInterface.ipAssignment.name === "external") { // External IP address cannot be within a managed interface // on one of the clusters. for(i = 0; i < managedInterfaces.length; i++) { inNetwork = ValidationService.validateIPInNetwork( deviceInterface.ipAddress, managedInterfaces[i].network); if(inNetwork) { return true; } } } else if(deviceInterface.ipAssignment.name === "static" && angular.isNumber(deviceInterface.clusterInterfaceId)) { // Static IP address must be within the static range // of the selected clusterInterface. var clusterInterface = getInterfaceById( deviceInterface.clusterInterfaceId); inNetwork = ValidationService.validateIPInNetwork( deviceInterface.ipAddress, clusterInterface.network); var inDynamicRange = ValidationService.validateIPInRange( deviceInterface.ipAddress, clusterInterface.network, clusterInterface.dynamic_range.low, clusterInterface.dynamic_range.high); if(!inNetwork || inDynamicRange) { return true; } } } return false; }; // Return true when the device is missing information or invalid // information. $scope.deviceHasError = function() { if($scope.device.name === '' || $scope.nameHasError()) { return true; } var i; for(i = 0; i < $scope.device.interfaces.length; i++) { var deviceInterface = $scope.device.interfaces[i]; if(deviceInterface.mac === '' || $scope.macHasError(deviceInterface) || !angular.isObject(deviceInterface.ipAssignment)) { return true; } var externalIpError = ( deviceInterface.ipAssignment.name === "external" && ( deviceInterface.ipAddress === '' || $scope.ipHasError(deviceInterface))); var staticIpError = ( deviceInterface.ipAssignment.name === "static" && ( !angular.isNumber(deviceInterface.clusterInterfaceId) || $scope.ipHasError(deviceInterface))); if(externalIpError || staticIpError) { return true; } } return false; }; // Adds new interface to device. $scope.addInterface = function() { $scope.device.interfaces.push(makeInterface()); }; // Returns true if the first interface in the device interfaces array. $scope.isPrimaryInterface = function(deviceInterface) { return $scope.device.interfaces.indexOf(deviceInterface) === 0; }; // Removes the interface from the devices interfaces array. $scope.deleteInterface = function(deviceInterface) { // Don't remove the primary. if($scope.isPrimaryInterface(deviceInterface)) { return; } $scope.device.interfaces.splice( $scope.device.interfaces.indexOf(deviceInterface), 1); }; // Called when cancel clicked. $scope.cancel = function() { $scope.error = null; $scope.device = makeDevice(); $scope.hide(); }; // Convert the Python dict error message to displayed message. $scope.convertPythonDictToErrorMsg = function(pythonError) { var elements = pythonError.match(/'([A-Za-z0-9 \.:_\-]+)'/g); var result = '', msg = ''; for (k=0; k < elements.length; ++k) { if (elements.hasOwnProperty(k)) { switch(elements[k]) { case "'hostname'": msg = elements[++k].replace(/'/g,''); result += msg.replace(/^Node/,'Device') + ' '; break; case "'mac_addresses'": msg = elements[++k].replace(/'/g,''); result += msg + ' '; break; default: result += elements[k].replace(/'/g,''); } } } return result; }; // Called when save is clicked. $scope.save = function(addAnother) { // Do nothing if device in error. if($scope.deviceHasError()) { return; } // Clear the error so it can be set again, if it fails to save // the device. $scope.error = null; // Create the device. var device = convertDeviceToProtocol($scope.device); DevicesManager.create(device).then(function(device) { $scope.device = makeDevice(); if(!addAnother) { // Hide the scope if not adding another. $scope.hide(); } }, function(error) { $scope.error = $scope.convertPythonDictToErrorMsg(error); }); }; // Load clusters to get the managed interfaces. ManagerHelperService.loadManager(ClustersManager); }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/controllers/add_hardware.js0000644000000000000000000005000013056115004026452 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Add Hardware Controller */ angular.module('MAAS').controller('AddHardwareController', [ '$scope', '$http', '$cookies', 'ClustersManager', 'ZonesManager', 'NodesManager', 'GeneralManager', 'RegionConnection', 'ManagerHelperService', 'ValidationService', function( $scope, $http, $cookies, ClustersManager, ZonesManager, NodesManager, GeneralManager, RegionConnection, ManagerHelperService, ValidationService) { // Set the addHardwareScope in the parent, so it can call functions // in this controller. var parentScope = $scope.$parent; parentScope.addHardwareScope = $scope; // Set initial values. $scope.viewable = false; $scope.model = 'machine'; $scope.clusters = ClustersManager.getItems(); $scope.zones = ZonesManager.getItems(); $scope.architectures = GeneralManager.getData("architectures"); $scope.hwe_kernels = GeneralManager.getData("hwe_kernels"); $scope.default_min_hwe_kernel = GeneralManager.getData( "default_min_hwe_kernel"); $scope.error = null; // Input values. $scope.machine = null; $scope.chassis = null; // Hard coded chassis types. This is because there is no method in // MAAS to get a full list of supported chassis. This needs to be // fixed ASAP. var virshFields = [ { name: 'power_address', label: 'Address', field_type: 'string', "default": '', // Using "default" to make lint happy. choices: [], required: true }, { name: 'power_pass', label: 'Password', field_type: 'string', "default": '', choices: [], required: false }, { name: 'prefix_filter', label: 'Prefix filter', field_type: 'string', "default": '', choices: [], required: false } ]; $scope.chassisPowerTypes = [ { name: 'mscm', description: 'Moonshot Chassis Manager', fields: [ { name: 'host', label: 'Host', field_type: 'string', "default": '', choices: [], required: true }, { name: 'username', label: 'Username', field_type: 'string', "default": '', choices: [], required: true }, { name: 'password', label: 'Password', field_type: 'string', "default": '', choices: [], required: true } ] }, { name: 'powerkvm', description: 'PowerKVM', fields: virshFields }, { name: 'seamicro15k', description: 'SeaMicro 15000', fields: [ { name: 'mac', label: 'MAC', field_type: 'mac_address', "default": '', choices: [], required: true }, { name: 'username', label: 'Username', field_type: 'string', "default": '', choices: [], required: true }, { name: 'password', label: 'Password', field_type: 'string', "default": '', choices: [], required: true }, { name: 'power_control', label: 'Power Control', field_type: 'choice', "default": 'restapi2', choices: [ ['restapi2', 'REST API V2.0'], ['restapi', 'REST API V0.9'], ['ipmi', 'IPMI'] ], required: true } ] }, { name: 'ucsm', description: 'UCS Chassis Manager', fields: [ { name: 'url', label: 'URL', field_type: 'string', "default": '', choices: [], required: true }, { name: 'username', label: 'Username', field_type: 'string', "default": '', choices: [], required: true }, { name: 'password', label: 'Password', field_type: 'string', "default": '', choices: [], required: true } ] }, { name: 'virsh', description: 'Virsh (virtual systems)', fields: virshFields }, { name: 'vmware', description: 'VMWare', fields: [ { name: 'host', label: 'Host', field_type: 'string', "default": '', choices: [], required: true }, { name: 'username', label: 'Username', field_type: 'string', "default": '', choices: [], required: true }, { name: 'password', label: 'Password', field_type: 'string', "default": '', choices: [], required: true }, { name: 'prefix_filter', label: 'Prefix filter', field_type: 'string', "default": '', choices: [], required: false } ] } ]; // Get the master cluster from the loaded clusters. function masterCluster() { if($scope.clusters.length === 0) { return null; } else { return $scope.clusters[0]; } } // Get the default zone from the loaded zones. function defaultZone() { if($scope.zones.length === 0) { return null; } else { return $scope.zones[0]; } } // Get the default architecture from the loaded architectures. function defaultArchitecture() { if($scope.architectures.length === 0) { return ''; } else { // Return amd64/generic first if available. var i; for(i = 0; i < $scope.architectures.length; i++) { if($scope.architectures[i] === "amd64/generic") { return $scope.architectures[i]; } } return $scope.architectures[0]; } } // Return a new MAC address object. function newMAC() { return { mac: '', error: false }; } // Return a new machine object. function newMachine(cloneMachine) { // Clone the machine instead of just creating a new one. // This helps the user by already having the previous selected // items selected for the new machine. if(angular.isObject(cloneMachine)) { return { cluster: cloneMachine.cluster, name: '', macs: [newMAC()], zone: cloneMachine.zone, architecture: cloneMachine.architecture, min_hwe_kernel: cloneMachine.min_hwe_kernel, power: { type: cloneMachine.power.type, parameters: {} } }; } // No clone machine. So create a new blank machine. return { cluster: masterCluster(), name: '', macs: [newMAC()], zone: defaultZone(), architecture: defaultArchitecture(), min_hwe_kernel: $scope.default_min_hwe_kernel.text, power: { type: null, parameters: {} } }; } // Return a new chassis object. function newChassis() { return { cluster: masterCluster(), power: { type: null, parameters: {} } }; } // Converts the machine information from how it is held in the UI to // how it is handled over the websocket. function convertMachineToProtocol(machine) { // Convert the mac addresses. var macs = angular.copy(machine.macs); var pxe_mac = macs.shift().mac; var extra_macs = macs.map(function(mac) { return mac.mac; }); // Return the new object. return { hostname: machine.name, architecture: machine.architecture, min_hwe_kernel: machine.min_hwe_kernel, pxe_mac: pxe_mac, extra_macs: extra_macs, power_type: machine.power.type.name, power_parameters: angular.copy(machine.power.parameters), zone: { id: machine.zone.id, name: machine.zone.name }, nodegroup: { id: machine.cluster.id, uuid: machine.cluster.uuid, cluster_name: machine.cluster.cluster_name } }; } // Validate that all the parameters are there for the given power type. function powerParametersHasError(power_type, parameters) { var i; for(i = 0; i < power_type.fields.length; i++) { var field = power_type.fields[i]; var value = parameters[field.name]; if(field.required) { if(angular.isUndefined(value) || value === '') { return true; } } } return false; } // Called by the parent scope when this controller is viewable. $scope.show = function(mode) { // Change the mode. if($scope.mode !== mode) { if($scope.mode === "machine") { $scope.machine = newMachine(); } else if($scope.mode === "chassis") { $scope.chassis = newChassis(); } $scope.error = null; $scope.mode = mode; } // Exit early if alreayd viewable. if($scope.viewable) { return; } $scope.viewable = true; // Start the polling of architectures. GeneralManager.startPolling("architectures"); // Start the polling of hwe_kernels. GeneralManager.startPolling("hwe_kernels"); }; // Called by the parent scope when this controller is hidden. $scope.hide = function() { $scope.viewable = false; // Stop the polling of architectures. GeneralManager.stopPolling("architectures"); // Stop the polling of hwe_kernels. GeneralManager.stopPolling("hwe_kernels"); // Emit the hidden event. $scope.$emit('addHardwareHidden'); }; // Return True when architectures loaded and in machine mode. $scope.showMachine = function() { if($scope.architectures.length === 0) { return false; } return $scope.mode === "machine"; }; // Return True when architectures loaded and in chassis mode. $scope.showChassis = function() { if($scope.architectures.length === 0) { return false; } return $scope.mode === "chassis"; }; // Add a new MAC address to the machine. $scope.addMac = function() { $scope.machine.macs.push(newMAC()); }; // Remove a MAC address to the machine. $scope.removeMac = function(mac) { var idx = $scope.machine.macs.indexOf(mac); if(idx > -1) { $scope.machine.macs.splice(idx, 1); } }; // Return true if the machine name is invalid. $scope.invalidName = function() { // Not invalid if empty. if($scope.machine.name.length === 0) { return false; } return !ValidationService.validateHostname($scope.machine.name); }; // Validate that the mac address is valid. $scope.validateMac = function(mac) { if(mac.mac === '') { mac.error = false; } else { mac.error = !ValidationService.validateMAC(mac.mac); } }; // Return true when the machine is missing information or invalid // information. $scope.machineHasError = function() { // Early-out for errors. in_error = ( $scope.machine === null || $scope.machine.cluster === null || $scope.machine.zone === null || $scope.machine.architecture === '' || $scope.machine.power.type === null || $scope.invalidName($scope.machine)); if(in_error) { return in_error; } // Make sure none of the mac addresses are in error. The first one // cannot be blank the remaining are allowed to be empty. if($scope.machine.macs[0].mac === '' || $scope.machine.macs[0].error) { return true; } var i; for(i = 1; i < $scope.machine.macs.length; i++) { var mac = $scope.machine.macs[i]; if(mac.mac !== '' && mac.error) { return true; } } return false; }; // Return true if the chassis has errors. $scope.chassisHasErrors = function() { // Early-out for errors. in_error = ( $scope.chassis === null || $scope.chassis.cluster === null || $scope.chassis.power.type === null); if(in_error) { return in_error; } return powerParametersHasError( $scope.chassis.power.type, $scope.chassis.power.parameters); }; // Called when the cancel button is pressed. $scope.cancel = function() { $scope.error = null; $scope.machine = newMachine(); $scope.chassis = newChassis(); // Hide the controller. $scope.hide(); }; // Called to perform the saving of the machine. $scope.saveMachine = function(addAnother) { // Does nothing if machine has errors. if($scope.machineHasError()) { return; } // Clear the error so it can be set again, if it fails to save // the device. $scope.error = null; // Add the machine. NodesManager.create(convertMachineToProtocol($scope.machine)).then( function() { if(addAnother) { $scope.machine = newMachine($scope.machine); } else { $scope.machine = newMachine(); // Hide the scope if not adding another. $scope.hide(); } }, function(error) { $scope.error = error; }); }; // Called to perform the saving of the chassis. $scope.saveChassis = function(addAnother) { // Does nothing if error exists. if($scope.chassisHasErrors()) { return; } // Clear the error so it can be set again, if it fails to save // the device. $scope.error = null; // Create the parameters. var params = angular.copy($scope.chassis.power.parameters); params.model = $scope.chassis.power.type.name; // Add the chassis. For now we use the API as the websocket doesn't // support probe and enlist. $http({ method: 'POST', url: 'api/1.0/nodegroups/' + $scope.chassis.cluster.uuid + '/?op=probe_and_enlist_hardware', data: $.param(params), headers: { 'Content-Type': 'application/x-www-form-urlencoded', 'X-CSRFTOKEN': $cookies.csrftoken } }).then(function() { $scope.chassis = newChassis(); if(!addAnother) { // Hide the scope if not adding another. $scope.hide(); } }, function(error) { $scope.error = error; }); }; // Load clusters and zones. Once loaded create the first machine and // chassis. ManagerHelperService.loadManagers( [ClustersManager, ZonesManager]).then(function() { // Add the first machine and chassis. $scope.machine = newMachine(); $scope.chassis = newChassis(); }); // Load the general manager. ManagerHelperService.loadManager(GeneralManager).then(function() { if($scope.architectures.length > 0) { // If the machine doesn't have an architecture // set then it was created before all of the // architectures were loaded. Set the default // architecture for that machine. if(angular.isObject($scope.machine) && $scope.machine.architecture === '') { $scope.machine.architecture = defaultArchitecture(); } } }); // Stop polling when the scope is destroyed. $scope.$on("$destroy", function() { GeneralManager.stopPolling("architectures"); GeneralManager.stopPolling("hwe_kernels"); }); }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/controllers/node_details.js0000644000000000000000000011774513056115004026523 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Node Details Controller */ angular.module('MAAS').controller('NodeDetailsController', [ '$scope', '$rootScope', '$routeParams', '$location', 'NodesManager', 'ClustersManager', 'ZonesManager', 'GeneralManager', 'UsersManager', 'TagsManager', 'ManagerHelperService', 'ErrorService', 'ValidationService', function( $scope, $rootScope, $routeParams, $location, NodesManager, ClustersManager, ZonesManager, GeneralManager, UsersManager, TagsManager, ManagerHelperService, ErrorService, ValidationService) { // Set title and page. $rootScope.title = "Loading..."; $rootScope.page = "nodes"; // Initial values. $scope.loaded = false; $scope.node = null; $scope.actionOption = null; $scope.allActionOptions = GeneralManager.getData("node_actions"); $scope.availableActionOptions = []; $scope.actionError = null; $scope.osinfo = GeneralManager.getData("osinfo"); $scope.osSelection = { osystem: null, release: null, hwe_kernel: null }; $scope.commissionOptions = { enableSSH: false, skipNetworking: false, skipStorage: false }; $scope.checkingPower = false; $scope.devices = []; // Holds errors that are displayed on the details page. $scope.errors = { cluster_disconnected: { viewable: false, message: "The cluster this node belongs to is disconnected. " + "Reconnect the cluster to the region controller to save " + "changes on this node." }, invalid_arch: { viewable: false, message: "This node has an invalid architecture. Update the " + "architecture for this node in the summary section below." }, missing_power: { viewable: false, message: "This node does not have a power type set and " + "MAAS will be unable to control it. Update the power " + "information in the power section below." } }; // Node name header section. $scope.nameHeader = { editing: false, value: "" }; // Summary section. $scope.summary = { editing: false, cluster: { selected: null, options: ClustersManager.getItems() }, architecture: { selected: null, options: GeneralManager.getData("architectures") }, min_hwe_kernel: { selected: null, options: GeneralManager.getData("hwe_kernels") }, zone: { selected: null, options: ZonesManager.getItems() }, tags: [] }; // Power section. $scope.power = { editing: false, type: null, parameters: {} }; // Events section. $scope.events = { limit: 10 }; // Machine output section. $scope.machine_output = { viewable: false, selectedView: null, views: [], showSummaryToggle: true, summaryType: 'yaml' }; // Show given error. function showError(name) { $scope.errors[name].viewable = true; } // Hide given error. function hideError(name) { $scope.errors[name].viewable = false; } // Return true if the error is viewable. function isErrorViewable(name) { return $scope.errors[name].viewable; } // Return true if the architecture for the given node is invalid. function hasInvalidArchitecture(node) { return ( node.architecture === "" || $scope.summary.architecture.options.indexOf( node.architecture) === -1); } // Update the shown errors based on the status of the node. function updateErrors() { // Check if the nodes power type is null, if so then show the // missing_power error. if($scope.node.power_type === "") { showError("missing_power"); } else { hideError("missing_power"); } // Show architecture error if the node doesn't have an architecture // or if the current architecture is not in the available // architectures. if(hasInvalidArchitecture($scope.node)) { showError("invalid_arch"); } else { hideError("invalid_arch"); } // Show the cluster disconnected error if the cluster is not // connected. if(angular.isObject($scope.node.nodegroup)) { var cluster = ClustersManager.getItemFromList( $scope.node.nodegroup.id); if(!cluster.connected) { showError("cluster_disconnected"); } else { hideError("cluster_disconnected"); } } } // Updates the page title. function updateTitle() { if($scope.node && $scope.node.fqdn) { $rootScope.title = $scope.node.fqdn; } } function updateName() { // Don't update the value if in editing mode. As this would // overwrite the users changes. if($scope.nameHeader.editing) { return; } $scope.nameHeader.value = $scope.node.fqdn; } // Update the available action options for the node. function updateAvailableActionOptions() { $scope.availableActionOptions = []; if(!$scope.node) { return; } // Build the available action options control from the // allowed actions, except set-zone which does not make // sense in this view because the form has this // functionality angular.forEach($scope.allActionOptions, function(option) { if($scope.node.actions.indexOf(option.name) >= 0 && option.name !== "set-zone") { $scope.availableActionOptions.push(option); } }); } // Updates the currently selected items in the power section. function updatePower() { // Update the viewable errors. updateErrors(); // Always keep the available power types up-to-date even in // editing mode. var cluster; if(angular.isObject($scope.node.nodegroup)) { cluster = ClustersManager.getItemFromList( $scope.node.nodegroup.id); } if(angular.isObject(cluster)) { $scope.power.types = cluster.power_types; } else { $scope.power.types = []; } // If the no cluster or the cluster is disconnected then always // force editing to false. Its not possible to stay editing the // power section when the cluster is disconnected. if(!angular.isObject(cluster) || !cluster.connected) { $scope.power.editing = false; } // Do not update the selected items, when editing this would // cause the users selection to change. if($scope.power.editing) { return; } var i; $scope.power.type = null; for(i = 0; i < $scope.power.types.length; i++) { if($scope.node.power_type === $scope.power.types[i].name) { $scope.power.type = $scope.power.types[i]; break; } } $scope.power.parameters = angular.copy( $scope.node.power_parameters); if(!angular.isObject($scope.power.parameters)) { $scope.power.parameters = {}; } // Force editing mode on, if the power_type is missing. This is // placed at the bottom because we wanted the selected items to // be filled in at least once. if($scope.canEdit() && $scope.node.power_type === "") { $scope.power.editing = true; } } // Updates the currently selected items in the summary section. function updateSummary() { // Update the viewable errors. updateErrors(); // Do not update the selected items, when editing this would // cause the users selection to change. if($scope.summary.editing) { return; } if(angular.isObject($scope.node.nodegroup)) { $scope.summary.cluster.selected = ClustersManager.getItemFromList($scope.node.nodegroup.id); } $scope.summary.zone.selected = ZonesManager.getItemFromList( $scope.node.zone.id); $scope.summary.architecture.selected = $scope.node.architecture; $scope.summary.min_hwe_kernel.selected = $scope.node.min_hwe_kernel; $scope.summary.tags = angular.copy($scope.node.tags); // Force editing mode on, if the architecture is invalid. This is // placed at the bottom because we wanted the selected items to // be filled in at least once. if($scope.canEdit() && hasInvalidArchitecture($scope.node)) { $scope.summary.editing = true; } // Since the summary contains the selected cluster and the // power type is derived for that selection. Update the power // section as well. updatePower(); } // Updates the machine output section. function updateMachineOutput() { // Set if it should even be viewable. $scope.machine_output.viewable = ( angular.isString($scope.node.summary_xml) || angular.isString($scope.node.summary_yaml) || (angular.isArray($scope.node.commissioning_results) && $scope.node.commissioning_results.length > 0) || (angular.isArray($scope.node.installation_results) && $scope.node.installation_results.length > 0)); // Grab the selected view name, so it can be kept the same if // possible. var viewName = null; if(angular.isObject($scope.machine_output.selectedView)) { viewName = $scope.machine_output.selectedView.name; } // If the viewName is empty, then a default one was not selected. // We want the installation output to be the default if possible. if(!angular.isString(viewName)) { viewName = "install"; } // Setup the views that are viewable. $scope.machine_output.views = []; if(angular.isString($scope.node.summary_xml) || angular.isString($scope.node.summary_yaml)) { $scope.machine_output.views.push({ name: "summary", title: "Commissioning Summary" }); } if(angular.isArray($scope.node.commissioning_results) && $scope.node.commissioning_results.length > 0) { $scope.machine_output.views.push({ name: "output", title: "Commissioning Output" }); } if(angular.isArray($scope.node.installation_results) && $scope.node.installation_results.length > 0) { $scope.machine_output.views.push({ name: "install", title: "Installation Output" }); } // Set the selected view to its previous value or to the first // entry in the views list. var selectedView = null; angular.forEach($scope.machine_output.views, function(view) { if(view.name === viewName) { selectedView = view; } }); if(angular.isObject(selectedView)) { $scope.machine_output.selectedView = selectedView; } else if ($scope.machine_output.views.length > 0) { $scope.machine_output.selectedView = $scope.machine_output.views[0]; } else { $scope.machine_output.selectedView = null; } // Show the summary toggle if in the summary view. $scope.machine_output.showSummaryToggle = false; if(angular.isObject($scope.machine_output.selectedView) && $scope.machine_output.selectedView.name === "summary") { $scope.machine_output.showSummaryToggle = true; } } // Update the devices array on the scope based on the device children // on the node. function updateDevices() { $scope.devices = []; angular.forEach($scope.node.devices, function(child) { var device = { name: child.fqdn }; // Add the interfaces to the device object if any exists. if(angular.isArray(child.interfaces) && child.interfaces.length > 0) { angular.forEach(child.interfaces, function(nic, nicIdx) { var deviceWithMAC = angular.copy(device); deviceWithMAC.mac_address = nic.mac_address; // Remove device name so it is not duplicated in the // table since this is another MAC address on this // device. if(nicIdx > 0) { deviceWithMAC.name = ""; } // Add this links to the device object if any exists. if(angular.isArray(nic.links) && nic.links.length > 0) { angular.forEach(nic.links, function(link, lIdx) { var deviceWithLink = angular.copy( deviceWithMAC); deviceWithLink.ip_address = link.ip_address; // Remove the MAC address so it is not // duplicated in the table since this is // another link on this interface. if(lIdx > 0) { deviceWithLink.mac_address = ""; } $scope.devices.push(deviceWithLink); }); } else { $scope.devices.push(deviceWithMAC); } }); } else { $scope.devices.push(device); } }); } // Starts the watchers on the scope. function startWatching() { // Update the title and name when the node fqdn changes. $scope.$watch("node.fqdn", function() { updateTitle(); updateName(); }); // Update the devices on the node. $scope.$watch("node.devices", updateDevices); // Update the availableActionOptions when the node actions change. $scope.$watch("node.actions", updateAvailableActionOptions); // Update the summary when the node or clusters list is // updated. $scope.$watch("node.nodegroup.id", updateSummary); $scope.$watchCollection( $scope.summary.cluster.options, updateSummary); // Update the summary when the node or architectures list is // updated. $scope.$watch("node.architecture", updateSummary); $scope.$watchCollection( $scope.summary.architecture.options, updateSummary); // Uppdate the summary when min_hwe_kernel is updated. $scope.$watch("node.min_hwe_kernel", updateSummary); $scope.$watchCollection( $scope.summary.min_hwe_kernel.options, updateSummary); // Update the summary when the node or zone list is // updated. $scope.$watch("node.zone.id", updateSummary); $scope.$watchCollection( $scope.summary.zone.options, updateSummary); // Update the power when the node power_type or power_parameters // are updated. $scope.$watch("node.power_type", updatePower); $scope.$watch("node.power_parameters", updatePower); // Update power section when the selected cluster becomes // connected or disconnected. Calling updatePower also // calls updateErrors $scope.$watch("summary.cluster.selected.connected", updatePower); // Update the machine output view when summary, commissioning, or // installation results are updated on the node. $scope.$watch("node.summary_xml", updateMachineOutput); $scope.$watch("node.summary_yaml", updateMachineOutput); $scope.$watch("node.commissioning_results", updateMachineOutput); $scope.$watch("node.installation_results", updateMachineOutput); } // Return true if the given error is because of RPC. function isDisconnectedClusterError(error) { // Contains disconnected cluster if error contains this content. var errorString = "Unable to get RPC connection for cluster"; return error.indexOf(errorString) >= 0; } // Process the given error when saving the node. function handleSaveError(error) { // If it errored because the cluster was disconnected update // the cluster information, because this is not pushed over // the websocket. If it didn't error for that reason then // the cluster is connected. var cluster = ClustersManager.getItemFromList( $scope.node.nodegroup.id); if(isDisconnectedClusterError(error)) { cluster.connected = false; } else { // Not a disconnection, so, cluster.connected needs to be true. cluster.connected = true; console.log(error); } } // Update the node with new data on the region. $scope.updateNode = function(node) { return NodesManager.updateItem(node).then(function(node) { // If it was able to save correctly then the cluster is // connected. An error would have been raised if it wasn't. var cluster = ClustersManager.getItemFromList( node.nodegroup.id); if(angular.isObject(cluster)) { cluster.connected = true; } updateName(); updateSummary(); }, function(error) { handleSaveError(error); updateName(); updateSummary(); }); }; // Called when the node has been loaded. function nodeLoaded(node) { $scope.node = node; $scope.loaded = true; updateTitle(); updateSummary(); updateMachineOutput(); startWatching(); // Tell the storageController and networkingController that the // node has been loaded. if(angular.isObject($scope.storageController)) { $scope.storageController.nodeLoaded(); } if(angular.isObject($scope.networkingController)) { $scope.networkingController.nodeLoaded(); } } // Return true if user is a super user/ $scope.isSuperUser = function() { var authUser = UsersManager.getAuthUser(); if(!angular.isObject(authUser)) { return false; } return authUser.is_superuser; }; // Called for autocomplete when the user is typing a tag name. $scope.tagsAutocomplete = function(query) { return TagsManager.autocomplete(query); }; $scope.getPowerStateClass = function() { // This will get called very early and node can be empty. // In that case just return an empty string. It will be // called again to show the correct information. if(!angular.isObject($scope.node)) { return ""; } if($scope.checkingPower) { return "checking"; } else { return $scope.node.power_state; } }; // Get the power state text to show. $scope.getPowerStateText = function() { // This will get called very early and node can be empty. // In that case just return an empty string. It will be // called again to show the correct information. if(!angular.isObject($scope.node)) { return ""; } if($scope.checkingPower) { return "Checking power"; } else if($scope.node.power_state === "unknown") { return ""; } else { return "Power " + $scope.node.power_state; } }; // Returns true when the "check now" button for updating the power // state should be shown. $scope.canCheckPowerState = function() { // This will get called very early and node can be empty. // In that case just return false. It will be // called again to show the correct information. if(!angular.isObject($scope.node)) { return false; } return ( $scope.node.power_state !== "unknown" && !$scope.checkingPower); }; // Check the power state of the node. $scope.checkPowerState = function() { $scope.checkingPower = true; NodesManager.checkPowerState($scope.node).then(function() { $scope.checkingPower = false; }); }; // Returns the nice name of the OS for the node. $scope.getOSText = function() { // This will get called very early and node can be empty. // In that case just return an empty string. It will be // called again to show the correct information. if(!angular.isObject($scope.node)) { return ""; } var i; var os_release = $scope.node.osystem + "/" + $scope.node.distro_series; // Possible that osinfo has not been fully loaded. In that case // we just return the os_release identifier. if(angular.isUndefined($scope.osinfo.releases)) { return os_release; } // Get the nice release name from osinfo. for(i = 0; i < $scope.osinfo.releases.length; i++) { var release = $scope.osinfo.releases[i]; if(release[0] === os_release) { return release[1]; } } return os_release; }; $scope.isUbuntuOS = function() { // This will get called very early and node can be empty. // In that case just return an empty string. It will be // called again to show the correct information. if(!angular.isObject($scope.node)) { return false; } if($scope.node.osystem === "ubuntu") { return true; } return false; }; // Return true if there is an action error. $scope.isActionError = function() { return $scope.actionError !== null; }; // Return True if in deploy action and the osinfo is missing. $scope.isDeployError = function() { // Never a deploy error when there is an action error. if($scope.isActionError()) { return false; } var missing_osinfo = ( angular.isUndefined($scope.osinfo.osystems) || $scope.osinfo.osystems.length === 0); if(angular.isObject($scope.actionOption) && $scope.actionOption.name === "deploy" && missing_osinfo) { return true; } return false; }; // Return True if unable to deploy because of missing ssh keys. $scope.isSSHKeyError = function() { // Never a deploy error when there is an action error. if($scope.isActionError()) { return false; } if(angular.isObject($scope.actionOption) && $scope.actionOption.name === "deploy" && UsersManager.getSSHKeyCount() === 0) { return true; } return false; }; // Called when the actionOption has changed. $scope.actionOptionChanged = function() { // Clear the action error. $scope.actionError = null; }; // Cancel the action. $scope.actionCancel = function() { $scope.actionOption = null; $scope.actionError = null; }; // Perform the action. $scope.actionGo = function() { var extra = {}; // Set deploy parameters if a deploy. if($scope.actionOption.name === "deploy" && angular.isString($scope.osSelection.osystem) && angular.isString($scope.osSelection.release)) { // Set extra. UI side the release is structured os/release, but // when it is sent over the websocket only the "release" is // sent. extra.osystem = $scope.osSelection.osystem; var release = $scope.osSelection.release; release = release.split("/"); release = release[release.length-1]; extra.distro_series = release; // hwe_kernel is optional so only include it if its specified if(angular.isString($scope.osSelection.hwe_kernel) && ($scope.osSelection.hwe_kernel.indexOf('hwe-') >= 0)) { extra.hwe_kernel = $scope.osSelection.hwe_kernel; } } else if($scope.actionOption.name === "commission") { extra.enable_ssh = $scope.commissionOptions.enableSSH; extra.skip_networking = ( $scope.commissionOptions.skipNetworking); extra.skip_storage = $scope.commissionOptions.skipStorage; } NodesManager.performAction( $scope.node, $scope.actionOption.name, extra).then(function() { // If the action was delete, then go back to listing. if($scope.actionOption.name === "delete") { $location.path("/nodes"); } $scope.actionOption = null; $scope.actionError = null; $scope.osSelection.$reset(); $scope.commissionOptions.enableSSH = false; $scope.commissionOptions.skipNetworking = false; $scope.commissionOptions.skipStorage = false; }, function(error) { $scope.actionError = error; }); }; // Return true if the authenticated user is super user. $scope.isSuperUser = function() { var authUser = UsersManager.getAuthUser(); if(!angular.isObject(authUser)) { return false; } return authUser.is_superuser; }; // Return true if the current architecture selection is invalid. $scope.invalidArchitecture = function() { return ( $scope.summary.architecture.selected === "" || $scope.summary.architecture.options.indexOf( $scope.summary.architecture.selected) === -1); }; // Return true when the edit buttons can be clicked. $scope.canEdit = function() { return ( $scope.isSuperUser() && !isErrorViewable("cluster_disconnected")); }; // Called to edit the node name. $scope.editName = function() { if(!$scope.canEdit()) { return; } // Do nothing if already editing because we don't want to reset // the current value. if($scope.nameHeader.editing) { return; } $scope.nameHeader.editing = true; // Set the value to the hostname, as that is what can be changed // not the fqdn. $scope.nameHeader.value = $scope.node.hostname; }; // Return true when the value in nameHeader is invalid. $scope.editNameInvalid = function() { // Not invalid unless editing. if(!$scope.nameHeader.editing) { return false; } // The value cannot be blank. var value = $scope.nameHeader.value; if(value.length === 0) { return true; } return !ValidationService.validateHostname(value); }; // Called to cancel editing of the node name. $scope.cancelEditName = function() { $scope.nameHeader.editing = false; updateName(); }; // Called to save editing of node name. $scope.saveEditName = function() { // Does nothing if invalid. if($scope.editNameInvalid()) { return; } $scope.nameHeader.editing = false; // Copy the node and make the changes. var node = angular.copy($scope.node); node.hostname = $scope.nameHeader.value; // Update the node. $scope.updateNode(node); }; // Called to enter edit mode in the summary section. $scope.editSummary = function() { if(!$scope.canEdit()) { return; } $scope.summary.editing = true; }; // Called to cancel editing in the summary section. $scope.cancelEditSummary = function() { // Leave edit mode only if node has valid architecture. if(!hasInvalidArchitecture($scope.node)) { $scope.summary.editing = false; } updateSummary(); }; // Called to save the changes made in the summary section. $scope.saveEditSummary = function() { // Do nothing if invalidArchitecture. if($scope.invalidArchitecture()) { return; } $scope.summary.editing = false; // Copy the node and make the changes. var node = angular.copy($scope.node); node.nodegroup = angular.copy($scope.summary.cluster.selected); node.zone = angular.copy($scope.summary.zone.selected); node.architecture = $scope.summary.architecture.selected; if($scope.summary.min_hwe_kernel.selected === null) { node.min_hwe_kernel = ""; }else{ node.min_hwe_kernel = $scope.summary.min_hwe_kernel.selected; } node.tags = []; angular.forEach($scope.summary.tags, function(tag) { node.tags.push(tag.text); }); // Update the node. $scope.updateNode(node); }; // Return true if the current power type selection is invalid. $scope.invalidPowerType = function() { return !angular.isObject($scope.power.type); }; // Called to enter edit mode in the power section. $scope.editPower = function() { if(!$scope.canEdit()) { return; } $scope.power.editing = true; }; // Called to cancel editing in the power section. $scope.cancelEditPower = function() { // Only leave edit mode if node has valid power type. if($scope.node.power_type !== "") { $scope.power.editing = false; } updatePower(); }; // Called to save the changes made in the power section. $scope.saveEditPower = function() { // Does nothing if invalid power type. if($scope.invalidPowerType()) { return; } $scope.power.editing = false; // Copy the node and make the changes. var node = angular.copy($scope.node); node.power_type = $scope.power.type.name; node.power_parameters = angular.copy($scope.power.parameters); // Update the node. $scope.updateNode(node); }; // Return true if the "load more" events button should be available. $scope.allowShowMoreEvents = function() { if(!angular.isObject($scope.node)) { return false; } if(!angular.isArray($scope.node.events)) { return false; } return ( $scope.node.events.length > 0 && $scope.node.events.length > $scope.events.limit && $scope.events.limit < 50); }; // Show another 10 events. $scope.showMoreEvents = function() { $scope.events.limit += 10; }; // Return the nice text for the given event. $scope.getEventText = function(event) { var text = event.type.description; if(angular.isString(event.description) && event.description.length > 0) { text += " - " + event.description; } return text; }; // Called when the machine output view has changed. $scope.machineOutputViewChanged = function() { if(angular.isObject($scope.machine_output.selectedView) && $scope.machine_output.selectedView.name === "summary") { $scope.machine_output.showSummaryToggle = true; } else { $scope.machine_output.showSummaryToggle = false; } }; // Return the commissioning summary output data. $scope.getSummaryData = function() { // Can be called by angular before the node is set in the scope, // in that case return blank string. It will be called once the // node is set to get the correct information. if(!angular.isObject($scope.node)) { return ""; } // Prepend a newline before the summary output, because the code // tag requires that the content start on a newline. return "\n" + $scope.node["summary_" + $scope.machine_output.summaryType]; }; // Return the installation log data. $scope.getInstallationData = function() { // Can be called by angular before the node is set in the scope, // in that case return blank string. It will be called once the // node is set to get the correct information. if(!angular.isObject($scope.node)) { return ""; } // It is possible for the node to have multiple installation // results, but it is unused. Only one installation result will // exists for a node. Grab the first one in the array. var results = $scope.node.installation_results; if(!angular.isArray(results)) { return ""; } if(results.length === 0) { return ""; } else { // Prepend a newline before the data, because the code // tag requires that the content start on a newline. return "\n" + results[0].data; } }; // true if power error prevents the provided action $scope.hasActionPowerError = function(actionName) { if(!$scope.hasPowerError()) { return false; // no error, no need to check state } // these states attempt to manipulate power var powerChangingStates = [ 'commission', 'deploy', 'on', 'off', 'release' ]; if(actionName && powerChangingStates.indexOf(actionName) > -1) { return true; } return false; }; // Check to see if the power type has any missing system packages. $scope.hasPowerError = function() { if(angular.isObject($scope.power.type)) { return $scope.power.type.missing_packages.length > 0; } else { return false; } }; // Returns a formatted string of missing system packages. $scope.getPowerErrors = function() { var i; var result = ""; if(angular.isObject($scope.power.type)) { var packages = $scope.power.type.missing_packages; packages.sort(); for(i = 0; i < packages.length; i++) { result += packages[i]; if(i+2 < packages.length) { result += ", "; } else if(i+1 < packages.length) { result += " and "; } } result += packages.length > 1 ? " packages" : " package"; } return result; }; // Load all the required managers. ManagerHelperService.loadManagers([ NodesManager, ClustersManager, ZonesManager, GeneralManager, UsersManager, TagsManager ]).then(function() { // Possibly redirected from another controller that already had // this node set to active. Only call setActiveItem if not already // the activeItem. var activeNode = NodesManager.getActiveItem(); if(angular.isObject(activeNode) && activeNode.system_id === $routeParams.system_id) { nodeLoaded(activeNode); } else { NodesManager.setActiveItem( $routeParams.system_id).then(function(node) { nodeLoaded(node); }, function(error) { ErrorService.raiseError(error); }); } // Poll for architectures, hwe_kernels, and osinfo the whole // time. This is because the user can always see the architecture // and operating system. Need to keep this information up-to-date // so the user is viewing current data. GeneralManager.startPolling("architectures"); GeneralManager.startPolling("hwe_kernels"); GeneralManager.startPolling("osinfo"); }); // Stop polling for architectures, hwe_kernels, and osinfo when the // scope is destroyed. $scope.$on("$destroy", function() { GeneralManager.stopPolling("architectures"); GeneralManager.stopPolling("hwe_kernels"); GeneralManager.stopPolling("osinfo"); }); }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/controllers/node_details_networking.js0000644000000000000000000014172713056115004030767 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Node Networking Controller */ // Filter that is specific to the NodeNetworkingController. Filters the // list of VLANs to be only those that are unused by the interface. angular.module('MAAS').filter('filterByUnusedForInterface', function() { return function(vlans, nic, originalInterfaces) { var filtered = []; if(!angular.isObject(nic) || !angular.isObject(originalInterfaces)) { return filtered; } var usedVLANs = []; angular.forEach(originalInterfaces, function(inter) { if(inter.type === "vlan") { var parent = inter.parents[0]; if(parent === nic.id) { usedVLANs.push(inter.vlan_id); } } }); angular.forEach(vlans, function(vlan) { var idx = usedVLANs.indexOf(vlan.id); if(idx === -1) { filtered.push(vlan); } }); return filtered; }; }); // Filter that is specific to the NodeNetworkingController. Filters the // list of interfaces to not include the current parent interfaces being // bonded together. angular.module('MAAS').filter('removeBondParents', function() { return function(interfaces, bondInterface) { if(!angular.isObject(bondInterface) || !angular.isArray(bondInterface.parents)) { return interfaces; } var filtered = []; angular.forEach(interfaces, function(nic) { var i, parent, found = false; for(i = 0; i < bondInterface.parents.length; i++) { parent = bondInterface.parents[i]; if(parent.id === nic.id && parent.link_id === nic.link_id) { found = true; break; } } if(!found) { filtered.push(nic); } }); return filtered; }; }); // Filter that is specific to the NodeNetworkingController. Remove the default // VLAN if the interface is a VLAN interface. angular.module('MAAS').filter('removeDefaultVLANIfVLAN', function() { return function(vlans, interfaceType) { if(!angular.isString(interfaceType)) { return vlans; } var filtered = []; angular.forEach(vlans, function(vlan) { if(interfaceType !== "vlan") { filtered.push(vlan); } else if(vlan.vid !== 0) { filtered.push(vlan); } }); return filtered; }; }); // Filter that is specific to the NodeNetworkingController. Only provide the // available modes for that interface type. angular.module('MAAS').filter('filterLinkModes', function() { return function(modes, nic) { if(!angular.isObject(nic)) { return modes; } var filtered = []; if(!angular.isObject(nic.subnet)) { // No subnet is configure so the only allowed mode // is 'link_up'. angular.forEach(modes, function(mode) { if(mode.mode === "link_up") { filtered.push(mode); } }); } else { // Don't add LINK_UP if more than one link exists or // if the interface is an alias. var allowLinkUp = ( (angular.isObject(nic.links) && nic.links.length > 1) || (nic.type === "alias")); angular.forEach(modes, function(mode) { if(allowLinkUp && mode.mode === "link_up") { return; } // Can't run DHCP twice on one NIC. if(nic.type === "alias" && mode.mode === "dhcp") { return; } filtered.push(mode); }); } return filtered; }; }); angular.module('MAAS').controller('NodeNetworkingController', [ '$scope', '$filter', 'FabricsManager', 'VLANsManager', 'SubnetsManager', 'NodesManager', 'GeneralManager', 'UsersManager', 'ManagerHelperService', 'ValidationService', 'JSONService', function( $scope, $filter, FabricsManager, VLANsManager, SubnetsManager, NodesManager, GeneralManager, UsersManager, ManagerHelperService, ValidationService, JSONService) { // Different interface types. var INTERFACE_TYPE = { PHYSICAL: "physical", BOND: "bond", VLAN: "vlan", ALIAS: "alias" }; var INTERFACE_TYPE_TEXTS = { "physical": "Physical", "bond": "Bond", "vlan": "VLAN", "alias": "Alias" }; // Different link modes for an interface. var LINK_MODE = { AUTO: "auto", STATIC: "static", DHCP: "dhcp", LINK_UP: "link_up" }; var LINK_MODE_TEXTS = { "auto": "Auto assign", "static": "Static assign", "dhcp": "DHCP", "link_up": "Unconfigured" }; // Different selection modes. var SELECTION_MODE = { NONE: null, SINGLE: "single", MULTI: "multi", DELETE: "delete", ADD: "add", CREATE_BOND: "create-bond", CREATE_PHYSICAL: "create-physical" }; // Set the initial values for this scope. $scope.loaded = false; $scope.nodeHasLoaded = false; $scope.managersHaveLoaded = false; $scope.column = 'name'; $scope.fabrics = FabricsManager.getItems(); $scope.vlans = VLANsManager.getItems(); $scope.subnets = SubnetsManager.getItems(); $scope.interfaces = []; $scope.interfaceLinksMap = {}; $scope.interfaceErrorsByLinkId = {}; $scope.originalInterfaces = {}; $scope.showingMembers = []; $scope.focusInterface = null; $scope.selectedInterfaces = []; $scope.selectedMode = null; $scope.newInterface = {}; $scope.newBondInterface = {}; $scope.bondOptions = GeneralManager.getData("bond_options"); $scope.modes = [ { mode: LINK_MODE.AUTO, text: LINK_MODE_TEXTS[LINK_MODE.AUTO] }, { mode: LINK_MODE.STATIC, text: LINK_MODE_TEXTS[LINK_MODE.STATIC] }, { mode: LINK_MODE.DHCP, text: LINK_MODE_TEXTS[LINK_MODE.DHCP] }, { mode: LINK_MODE.LINK_UP, text: LINK_MODE_TEXTS[LINK_MODE.LINK_UP] } ]; // Give $parent which is the NodeDetailsController access to this scope // it will call `nodeLoaded` once the node has been fully loaded. $scope.$parent.networkingController = $scope; // Sets loaded to true if both the node has been loaded at the // other required managers for this scope have been loaded. function updateLoaded() { $scope.loaded = $scope.nodeHasLoaded && $scope.managersHaveLoaded; if($scope.loaded) { updateInterfaces(); } } // Update the list of interfaces for the node. For each link on the // interface, the interface is duplicated in the list to make render // in a data-ng-repeat easier. function updateInterfaces() { $scope.originalInterfaces = {}; angular.forEach($scope.node.interfaces, function(nic) { $scope.originalInterfaces[nic.id] = nic; }); var interfaces = []; angular.forEach($scope.node.interfaces, function(nic) { // When a interface has a child that is a bond. Then that // interface is not included in the interface list. Parent // interface with a bond child can only have one child. if(nic.children.length === 1) { var child = $scope.originalInterfaces[nic.children[0]]; if(child.type === INTERFACE_TYPE.BOND) { // This parent now has a bond for a child, if this was // the focusInterface then the focus needs to be // removed. We only need to check the "id" not the // "link_id", because if this interface did have // aliases they have now been removed. if(angular.isObject($scope.focusInterface) && $scope.focusInterface.id === nic.id) { $scope.focusInterface = null; } return; } } // When the interface is a bond, place the children as members // for that interface. if(nic.type === INTERFACE_TYPE.BOND) { nic.members = []; angular.forEach(nic.parents, function(parent) { nic.members.push( angular.copy($scope.originalInterfaces[parent])); }); } // Add the VLAN and fabric to the interface. nic.vlan = VLANsManager.getItemFromList(nic.vlan_id); if(angular.isObject(nic.vlan)) { nic.fabric = FabricsManager.getItemFromList( nic.vlan.fabric); } // Update the interface based on its links or duplicate the // interface if it has multiple links. if(nic.links.length === 0) { // No links on this interface. The interface is either // disabled or has no links (which means the interface // is in LINK_UP mode). nic.link_id = -1; nic.subnet = null; nic.mode = LINK_MODE.LINK_UP; nic.ip_address = ""; interfaces.push(nic); } else { var idx = 0; angular.forEach(nic.links, function(link) { var nic_copy = angular.copy(nic); nic_copy.link_id = link.id; nic_copy.subnet = SubnetsManager.getItemFromList( link.subnet_id); nic_copy.mode = link.mode; nic_copy.ip_address = link.ip_address; if(angular.isUndefined(nic_copy.ip_address)) { nic_copy.ip_address = ""; } // We don't want to deep copy the VLAN and fabric // object so we set those back to the original. nic_copy.vlan = nic.vlan; nic_copy.fabric = nic.fabric; if(idx > 0) { // Each extra link is an alais on the original // interface. nic_copy.type = INTERFACE_TYPE.ALIAS; nic_copy.name += ":" + idx; } idx++; interfaces.push(nic_copy); }); } }); // Update the scopes interfaces. $scope.interfaces = interfaces; // Update the scope interface links mapping. $scope.interfaceLinksMap = {}; angular.forEach($scope.interfaces, function(nic) { var linkMaps = $scope.interfaceLinksMap[nic.id]; if(angular.isUndefined(linkMaps)) { linkMaps = {}; $scope.interfaceLinksMap[nic.id] = linkMaps; } linkMaps[nic.link_id] = nic; }); // Clear the focusInterface if it no longer exists in the // interfaceLinksMap. if(angular.isObject($scope.focusInterface)) { var links = $scope.interfaceLinksMap[$scope.focusInterface.id]; if(angular.isUndefined(links)) { $scope.focusInterface = null; } else { var link = links[$scope.focusInterface.link_id]; if(angular.isUndefined(link)) { $scope.focusInterface = null; } } } // Update newInterface.parent if it has changed. updateNewInterface(); } // Return the original link object for the given interface. function mapNICToOriginalLink(nic) { var originalInteface = $scope.originalInterfaces[nic.id]; if(angular.isObject(originalInteface)) { var i, link = null; for(i = 0; i < originalInteface.links.length; i++) { link = originalInteface.links[i]; if(link.id === nic.link_id) { break; } } return link; } else { return null; } } // Leave single selection mode. function leaveSingleSelectionMode() { if($scope.selectedMode === SELECTION_MODE.SINGLE || $scope.selectedMode === SELECTION_MODE.ADD || $scope.selectedMode === SELECTION_MODE.DELETE) { $scope.selectedMode = SELECTION_MODE.NONE; } } // Update the new interface since the interfaces list has // been reloaded. function updateNewInterface() { if(angular.isObject($scope.newInterface.parent)) { var parentId = $scope.newInterface.parent.id; var linkId = $scope.newInterface.parent.link_id; var links = $scope.interfaceLinksMap[parentId]; if(angular.isObject(links)) { var newParent = links[linkId]; if(angular.isObject(newParent)) { $scope.newInterface.parent = newParent; var iType = $scope.newInterface.type; var isAlias = iType === INTERFACE_TYPE.ALIAS; var isVLAN = iType === INTERFACE_TYPE.VLAN; var canAddAlias = $scope.canAddAlias(newParent); var canAddVLAN = $scope.canAddVLAN(newParent); if(!canAddAlias && !canAddVLAN) { // Cannot do any adding now. $scope.newInterface = {}; leaveSingleSelectionMode(); } else { if(isAlias && !canAddAlias && canAddVLAN) { $scope.newInterface.type = "vlan"; $scope.addTypeChanged(); } else if(isVLAN && !canAddVLAN && canAddAlias) { $scope.newInterface.type = "alias"; $scope.addTypeChanged(); } } return; } } // Parent no longer exists. Exit the single selection modes. $scope.newInterface = {}; leaveSingleSelectionMode(); } } // Return the default VLAN for a fabric. function getDefaultVLAN(fabric) { return VLANsManager.getItemFromList(fabric.vlan_ids[0]); } // Return list of unused VLANs for an interface. Also remove the // ignoreVLANs from the returned list. function getUnusedVLANs(nic, ignoreVLANs) { var vlans = $filter('removeDefaultVLAN')($scope.vlans); vlans = $filter('filterByFabric')(vlans, nic.fabric); vlans = $filter('filterByUnusedForInterface')( vlans, nic, $scope.originalInterfaces); // Remove the VLAN's that should be ignored when getting the unused // VLANs. This is done to help the selection of the next default. if(angular.isUndefined(ignoreVLANs)) { ignoreVLANs = []; } angular.forEach(ignoreVLANs, function(vlan) { var i; for(i = 0; i < vlans.length; i++) { if(vlans[i].id === vlan.id) { vlans.splice(i, 1); break; } } }); return vlans; } // Return the currently selected interface objects. function getSelectedInterfaces() { var interfaces = []; angular.forEach($scope.selectedInterfaces, function(key) { var splitKey = key.split('/'); var links = $scope.interfaceLinksMap[splitKey[0]]; if(angular.isObject(links)) { var nic = links[splitKey[1]]; if(angular.isObject(nic)) { interfaces.push(nic); } } }); return interfaces; } // Get the next available name. function getNextName(prefix) { var idx = 0; angular.forEach($scope.originalInterfaces, function(nic) { if(nic.name === prefix + idx) { idx++; } }); return prefix + idx; } // Called by $parent when the node has been loaded. $scope.nodeLoaded = function() { $scope.$watch("node.interfaces", updateInterfaces); $scope.nodeHasLoaded = true; updateLoaded(); }; // Return true if the networking information cannot be edited. // (it can't be changed when the node is in any state other // than Ready or Broken and the user is not a superuser) $scope.isAllNetworkingDisabled = function() { if (!$scope.isSuperUser()) { // If the user is not a superuser, disable the networking panel. return true; } else if (angular.isObject($scope.node) && ["Ready", "Broken"].indexOf($scope.node.status) === -1) { // If the node is not ready or broken, disable networking panel. return true; } else { // User must be a superuser and the node must be // either ready or broken. Enable it. return false; } }; // Return true if the interface is the boot interface or has a parent // that is the boot interface. $scope.isBootInterface = function(nic) { if(!angular.isObject(nic)) { return false; } if(nic.is_boot && nic.type !== INTERFACE_TYPE.ALIAS) { return true; } else if(nic.type === INTERFACE_TYPE.BOND) { var i; for(i = 0; i < nic.members.length; i++) { if(nic.members[i].is_boot) { return true; } } } return false; }; // Get the text for the type of the interface. $scope.getInterfaceTypeText = function(nic) { var text = INTERFACE_TYPE_TEXTS[nic.type]; if(angular.isDefined(text)) { return text; } else { return nic.type; } }; // Get the text for the link mode of the interface. $scope.getLinkModeText = function(nic) { var text = LINK_MODE_TEXTS[nic.mode]; if(angular.isDefined(text)) { return text; } else { return nic.mode; } }; // Get the text to display in the VLAN dropdown. $scope.getVLANText = function(vlan) { if(!angular.isObject(vlan)) { return ""; } if(vlan.vid === 0) { return "untagged"; } else if(angular.isString(vlan.name) && vlan.name.length > 0) { return vlan.vid + " (" + vlan.name + ")"; } else { return vlan.vid; } }; // Get the text to display in the subnet dropdown. $scope.getSubnetText = function(subnet) { if(!angular.isObject(subnet)) { return "Unconfigured"; } else if(angular.isString(subnet.name) && subnet.name.length > 0 && subnet.cidr !== subnet.name) { return subnet.cidr + " (" + subnet.name + ")"; } else { return subnet.cidr; } }; // Get the subnet from its ID. $scope.getSubnet = function(subnetId) { return SubnetsManager.getItemFromList(subnetId); }; // Toggle showing or hiding the members of the interface. $scope.toggleMembers = function(nic) { var idx = $scope.showingMembers.indexOf(nic.id); if(idx === -1) { $scope.showingMembers.push(nic.id); } else { $scope.showingMembers.splice(idx, 1); } }; // Return True when the interface is showing its members section. $scope.isShowingMembers = function(nic) { return $scope.showingMembers.indexOf(nic.id) > -1; }; // Save the following interface on the node. This will only save if // the interface has changed. $scope.saveInterface = function(nic) { var originalInteface = $scope.originalInterfaces[nic.id]; if($scope.isInterfaceNameInvalid(nic)) { nic.name = originalInteface.name; } else if(originalInteface.name !== nic.name || originalInteface.vlan_id !== nic.vlan.id) { var params = { "name": nic.name, "vlan": nic.vlan.id }; NodesManager.updateInterface($scope.node, nic.id, params).then( null, function(error) { // XXX blake_r: Just log the error in the console, but // we need to expose this as a better message to the // user. console.log(error); // Update the interfaces so it is back to the way it // was before the user changed it. updateInterfaces(); }); } }; // Set the focus to this interface. $scope.setFocusInterface = function(nic) { $scope.focusInterface = nic; }; // Clear the current focused interface. This will save the interface // if it has changed. $scope.clearFocusInterface = function(nic) { if(angular.isUndefined(nic)) { if($scope.focusInterface.type !== INTERFACE_TYPE.ALIAS) { $scope.saveInterface($scope.focusInterface); } $scope.saveInterfaceIPAddress($scope.focusInterface); $scope.focusInterface = null; } else if($scope.focusInterface === nic) { if($scope.focusInterface.type !== INTERFACE_TYPE.ALIAS) { $scope.saveInterface($scope.focusInterface); } $scope.saveInterfaceIPAddress($scope.focusInterface); $scope.focusInterface = null; } }; // Return True if the interface name that the user typed is invalid. $scope.isInterfaceNameInvalid = function(nic) { if(!angular.isString(nic.name) || nic.name.length === 0) { return true; } else if(angular.isArray($scope.node.interfaces)) { var i; for(i = 0; i < $scope.node.interfaces.length; i++) { var otherNic = $scope.node.interfaces[i]; if(otherNic.name === nic.name && otherNic.id !== nic.id) { return true; } } } return false; }; // Called when the fabric dropdown is changed. $scope.fabricChanged = function(nic) { // Update the VLAN on the node to be the default VLAN for that // fabric. The first VLAN for the fabric is the default. nic.vlan = getDefaultVLAN(nic.fabric); $scope.saveInterface(nic); }; // Return True if the link mode select should be disabled. $scope.isLinkModeDisabled = function(nic) { // This is only disabled when a subnet has not been selected. return !angular.isObject(nic.subnet); }; $scope.getInterfaceError = function(nic) { if(angular.isDefined(nic.link_id) && nic.link_id >= 0) { return $scope.interfaceErrorsByLinkId[nic.link_id]; } return null; }; // Called when the link mode for this interface and link has been // changed. $scope.saveInterfaceLink = function(nic) { var params = { "mode": nic.mode }; if(angular.isObject(nic.subnet)) { params.subnet = nic.subnet.id; } if(angular.isDefined(nic.link_id) && nic.link_id >= 0) { params.link_id = nic.link_id; delete $scope.interfaceErrorsByLinkId[nic.link_id]; } if(nic.mode === LINK_MODE.STATIC && nic.ip_address.length > 0) { params.ip_address = nic.ip_address; } return NodesManager.linkSubnet($scope.node, nic.id, params).then( null, function(error) { console.log(error); if(angular.isDefined(nic.link_id) && nic.link_id >= 0) { $scope.interfaceErrorsByLinkId[nic.link_id] = error; } // Update the interfaces so it is back to the way it // was before the user changed it. updateInterfaces(); }); }; // Called when the user changes the subnet. $scope.subnetChanged = function(nic) { if(!angular.isObject(nic.subnet)) { // Set to 'Unconfigured' so the link mode should be set to // 'link_up'. nic.mode = LINK_MODE.LINK_UP; } // Clear the IP address so a new one on the subnet is assigned. nic.ip_address = ""; $scope.saveInterfaceLink(nic); }; // Return True when the IP address input field should be shown. $scope.shouldShowIPAddress = function(nic) { if(nic.mode === LINK_MODE.STATIC) { // Check that the original has an IP address if it doesn't then // it should not be shown as the IP address still has not been // loaded over the websocket. If the subnets have been switched // then the IP address has been clear, don't show the IP // address until the original subnet and nic subnet match. var originalLink = mapNICToOriginalLink(nic); return ( angular.isObject(originalLink) && angular.isString(originalLink.ip_address) && originalLink.ip_address.length > 0 && angular.isObject(nic.subnet) && nic.subnet.id === originalLink.subnet_id); } else if(angular.isString(nic.ip_address) && nic.ip_address.length > 0) { return true; } else { return false; } }; // Return True if the interface IP address that the user typed is // invalid. $scope.isIPAddressInvalid = function(nic) { return (!angular.isString(nic.ip_address) || nic.ip_address.length === 0 || !ValidationService.validateIP(nic.ip_address) || !ValidationService.validateIPInNetwork( nic.ip_address, nic.subnet.cidr)); }; // Save the interface IP address. $scope.saveInterfaceIPAddress = function(nic) { var originalLink = mapNICToOriginalLink(nic); var prevIPAddress = originalLink.ip_address; if($scope.isIPAddressInvalid(nic)) { nic.ip_address = prevIPAddress; } else if(nic.ip_address !== prevIPAddress) { $scope.saveInterfaceLink(nic); } }; // Return unique key for the interface. $scope.getUniqueKey = function(nic) { return nic.id + "/" + nic.link_id; }; // Toggle selection of the interface. $scope.toggleInterfaceSelect = function(nic) { var key = $scope.getUniqueKey(nic); var idx = $scope.selectedInterfaces.indexOf(key); if(idx > -1) { $scope.selectedInterfaces.splice(idx, 1); } else { $scope.selectedInterfaces.push(key); } if($scope.selectedInterfaces.length > 1) { if($scope.selectedMode !== SELECTION_MODE.BOND) { $scope.selectedMode = SELECTION_MODE.MULTI; } } else if($scope.selectedInterfaces.length === 1) { $scope.selectedMode = SELECTION_MODE.SINGLE; } else { $scope.selectedMode = SELECTION_MODE.NONE; } }; // Return true when the interface is selected. $scope.isInterfaceSelected = function(nic) { return $scope.selectedInterfaces.indexOf( $scope.getUniqueKey(nic)) > -1; }; // Returns true if the interface is not selected $scope.cannotEditInterface = function(nic) { if ($scope.selectedMode === SELECTION_MODE.NONE) { return false; } else if ( $scope.selectedMode !== SELECTION_MODE.MULTI && $scope.isInterfaceSelected(nic)) { return false; } else { return true; } }; // Return true if the interface options is being shown. $scope.isShowingInterfaceOptions = function() { return $scope.selectedMode === SELECTION_MODE.SINGLE; }; // Return true if the interface delete confirm is being shown. $scope.isShowingDeleteComfirm = function() { return $scope.selectedMode === SELECTION_MODE.DELETE; }; // Return true if the interface add interface is being shown. $scope.isShowingAdd = function() { return $scope.selectedMode === SELECTION_MODE.ADD; }; // Return true if the alias can be added to interface. $scope.canAddAlias = function(nic) { if(!angular.isObject(nic)) { return false; } else if(nic.type === INTERFACE_TYPE.ALIAS) { return false; } else if(nic.links.length === 0 || nic.links[0].mode === LINK_MODE.LINK_UP) { return false; } else { return true; } }; // Return true if the VLAN can be added to interface. $scope.canAddVLAN = function(nic) { if(!angular.isObject(nic)) { return false; } else if(nic.type === INTERFACE_TYPE.ALIAS || nic.type === INTERFACE_TYPE.VLAN) { return false; } var unusedVLANs = getUnusedVLANs(nic); return unusedVLANs.length > 0; }; // Return true if another VLAN can be added to this already being // added interface. $scope.canAddAnotherVLAN = function(nic) { if(!$scope.canAddVLAN(nic)) { return false; } var unusedVLANs = getUnusedVLANs(nic); return unusedVLANs.length > 1; }; // Return the text to use for the remove link and message. $scope.getRemoveTypeText = function(nic) { if(nic.type === INTERFACE_TYPE.PHYSICAL) { return "interface"; } else if(nic.type === INTERFACE_TYPE.VLAN) { return "VLAN"; } else { return nic.type; } }; // Enter remove mode. $scope.remove = function() { $scope.selectedMode = SELECTION_MODE.DELETE; }; // Quickly enter remove by selecting the node first. $scope.quickRemove = function(nic) { $scope.selectedInterfaces = [$scope.getUniqueKey(nic)]; $scope.remove(); }; // Cancel the current mode go back to sinle selection mode. $scope.cancel = function() { $scope.newInterface = {}; $scope.newBondInterface = {}; if($scope.selectedMode === SELECTION_MODE.CREATE_BOND) { $scope.selectedMode = SELECTION_MODE.MULTI; } else if($scope.selectedMode === SELECTION_MODE.CREATE_PHYSICAL) { $scope.selectedMode = SELECTION_MODE.NONE; } else { $scope.selectedMode = SELECTION_MODE.SINGLE; } }; // Confirm the removal of interface. $scope.confirmRemove = function(nic) { $scope.selectedMode = SELECTION_MODE.NONE; $scope.selectedInterfaces = []; if(nic.type !== INTERFACE_TYPE.ALIAS) { NodesManager.deleteInterface($scope.node, nic.id); } else { NodesManager.unlinkSubnet($scope.node, nic.id, nic.link_id); } // Remove the interface from available interfaces var idx = $scope.interfaces.indexOf(nic); if(idx > -1) { $scope.interfaces.splice(idx, 1); } }; // Enter add mode. $scope.add = function(type, nic) { // When this is called right after another VLAN was just added, we // remove its used VLAN from the available list. var ignoreVLANs = []; if(angular.isObject($scope.newInterface.vlan)) { ignoreVLANs.push($scope.newInterface.vlan); } // Get the default VLAN for the new interface. var vlans = getUnusedVLANs(nic, ignoreVLANs); var defaultVLAN = null; if(vlans.length > 0) { defaultVLAN = vlans[0]; } var defaultSubnet = null; var defaultMode = LINK_MODE.LINK_UP; // Alias used defaults based from its parent. if(type === INTERFACE_TYPE.ALIAS) { defaultVLAN = nic.vlan; defaultSubnet = VLANsManager.getSubnets(defaultVLAN)[0]; defaultMode = LINK_MODE.AUTO; } // Setup the new interface and enter add mode. $scope.newInterface = { type: type, vlan: defaultVLAN, subnet: defaultSubnet, mode: defaultMode, parent: nic }; $scope.selectedMode = SELECTION_MODE.ADD; }; // Quickly enter add by selecting the node first. $scope.quickAdd = function(nic) { $scope.selectedInterfaces = [$scope.getUniqueKey(nic)]; var type = 'alias'; if(!$scope.canAddAlias(nic)) { type = 'vlan'; } $scope.add(type, nic); }; // Return the name of the interface being added. $scope.getAddName = function() { if($scope.newInterface.type === INTERFACE_TYPE.ALIAS) { var aliasIdx = $scope.newInterface.parent.links.length; return $scope.newInterface.parent.name + ":" + aliasIdx; } else if ($scope.newInterface.type === INTERFACE_TYPE.VLAN) { return ( $scope.newInterface.parent.name + "." + $scope.newInterface.vlan.vid); } }; // Called when the type of interface is changed. $scope.addTypeChanged = function() { if($scope.newInterface.type === INTERFACE_TYPE.ALIAS) { $scope.newInterface.vlan = $scope.newInterface.parent.vlan; $scope.newInterface.subnet = VLANsManager.getSubnets( $scope.newInterface.vlan)[0]; $scope.newInterface.mode = LINK_MODE.AUTO; } else if($scope.newInterface.type === INTERFACE_TYPE.VLAN) { var vlans = getUnusedVLANs($scope.newInterface.parent); $scope.newInterface.vlan = null; if(vlans.length > 0) { $scope.newInterface.vlan = vlans[0]; } $scope.newInterface.subnet = null; $scope.newInterface.mode = LINK_MODE.LINK_UP; } }; // Called when the VLAN is changed. $scope.addVLANChanged = function() { $scope.newInterface.subnet = null; }; // Called when the subnet is changed. $scope.addSubnetChanged = function() { if(!angular.isObject($scope.newInterface.subnet)) { $scope.newInterface.mode = LINK_MODE.LINK_UP; } }; // Perform the add action over the websocket. $scope.addInterface = function(type) { if($scope.newInterface.type === INTERFACE_TYPE.ALIAS) { // Add a link to the current interface. var nic = { id: $scope.newInterface.parent.id, mode: $scope.newInterface.mode, subnet: $scope.newInterface.subnet, ip_address: "" }; $scope.saveInterfaceLink(nic); } else if($scope.newInterface.type === INTERFACE_TYPE.VLAN) { var params = { parent: $scope.newInterface.parent.id, vlan: $scope.newInterface.vlan.id, mode: $scope.newInterface.mode }; if(angular.isObject($scope.newInterface.subnet)) { params.subnet = $scope.newInterface.subnet.id; } NodesManager.createVLANInterface($scope.node, params).then( null, function(error) { // Should do something better but for now just log // the error. console.log(error); }); } // Add again based on the clicked option. if(angular.isString(type)) { $scope.add(type, $scope.newInterface.parent); } else { $scope.selectedMode = SELECTION_MODE.NONE; $scope.selectedInterfaces = []; $scope.newInterface = {}; } }; // Return true if the networking information cannot be edited // or if this interface should be disabled in the list. Only // returns true when in create bond mode. $scope.isDisabled = function() { if ($scope.isAllNetworkingDisabled()) { return true; } else { return ( $scope.selectedMode !== SELECTION_MODE.NONE && $scope.selectedMode !== SELECTION_MODE.SINGLE && $scope.selectedMode !== SELECTION_MODE.MULTI); } }; // Return true when a bond can be created based on the current // selection. Only can be done if no aliases are selected and all // selected interfaces are on the same VLAN. $scope.canCreateBond = function() { if($scope.selectedMode !== SELECTION_MODE.MULTI) { return false; } var interfaces = getSelectedInterfaces(); var i, vlan; for(i = 0; i < interfaces.length; i++) { var nic = interfaces[i]; if(nic.type === INTERFACE_TYPE.ALIAS || nic.type === INTERFACE_TYPE.BOND) { return false; } else if(!angular.isObject(vlan)) { vlan = nic.vlan; } else if(vlan !== nic.vlan) { return false; } } return true; }; // Return true when the create bond view is being shown. $scope.isShowingCreateBond = function() { return $scope.selectedMode === SELECTION_MODE.CREATE_BOND; }; // Show the create bond view. $scope.showCreateBond = function() { if($scope.selectedMode === SELECTION_MODE.MULTI && $scope.canCreateBond()) { $scope.selectedMode = SELECTION_MODE.CREATE_BOND; var parents = getSelectedInterfaces(); $scope.newBondInterface = { name: getNextName("bond"), parents: parents, primary: parents[0], macAddress: "", mode: "active-backup", lacpRate: "slow", xmitHashPolicy: "layer2" }; } }; // Return true if the new bond interface has a parent that is a boot // interface. $scope.getBondIsBootInterface = function() { if(!angular.isArray($scope.newBondInterface.parents)) { return false; } var i; for(i = 0; i < $scope.newBondInterface.parents.length; i++) { if($scope.newBondInterface.parents[i].is_boot) { return true; } } return false; }; // Return the MAC address that should be shown as a placeholder. This // this is the MAC address of the primary interface. $scope.getBondPlaceholderMACAddress = function() { if(!angular.isObject($scope.newBondInterface.primary)) { return ""; } else { return $scope.newBondInterface.primary.mac_address; } }; // Return true if the user has inputed a value in the MAC address field // but it is invalid. $scope.isMACAddressInvalid = function(macAddress, invalidEmpty) { if(angular.isUndefined(invalidEmpty)) { invalidEmpty = false; } if(!angular.isString(macAddress) || macAddress === "") { return invalidEmpty; } return !ValidationService.validateMAC(macAddress); }; // Return true when the LACR rate selection should be shown. $scope.showLACPRate = function() { return $scope.newBondInterface.mode === "802.3ad"; }; // Return true when the XMIT hash policy should be shown. $scope.showXMITHashPolicy = function() { return ( $scope.newBondInterface.mode === "balance-xor" || $scope.newBondInterface.mode === "802.3ad" || $scope.newBondInterface.mode === "balance-tlb"); }; // Return true if cannot add the bond. $scope.cannotAddBond = function() { return ( $scope.isInterfaceNameInvalid($scope.newBondInterface) || $scope.isMACAddressInvalid($scope.newBondInterface.macAddress)); }; // Actually add the bond. $scope.addBond = function() { if($scope.cannotAddBond()) { return; } var parents = $scope.newBondInterface.parents.map( function(nic) { return nic.id; }); var macAddress = $scope.newBondInterface.macAddress; if(macAddress === "") { macAddress = $scope.newBondInterface.primary.mac_address; } var params = { name: $scope.newBondInterface.name, mac_address: macAddress, parents: parents, vlan: $scope.newBondInterface.primary.vlan.id, bond_mode: $scope.newBondInterface.mode, bond_lacp_rate: $scope.newBondInterface.lacpRate, bond_xmit_hash_policy: $scope.newBondInterface.xmitHashPolicy }; NodesManager.createBondInterface($scope.node, params).then( null, function(error) { // Should do something better but for now just log // the error. console.log(error); }); // Remove the parent interfaces so that they don't show up // in the listing unti the new bond appears. angular.forEach($scope.newBondInterface.parents, function(parent) { var idx = $scope.interfaces.indexOf(parent); if(idx > -1) { $scope.interfaces.splice(idx, 1); } }); // Clear the bond interface and reset the mode. $scope.newBondInterface = {}; $scope.selectedInterfaces = []; $scope.selectedMode = SELECTION_MODE.NONE; }; // Return true when the create physical interface view is being shown. $scope.isShowingCreatePhysical = function() { return $scope.selectedMode === SELECTION_MODE.CREATE_PHYSICAL; }; // Show the create interface view. $scope.showCreatePhysical = function() { if($scope.selectedMode === SELECTION_MODE.NONE) { $scope.selectedMode = SELECTION_MODE.CREATE_PHYSICAL; $scope.newInterface = { name: getNextName("eth"), macAddress: "", macError: false, errorMsg: null, fabric: $scope.fabrics[0], vlan: getDefaultVLAN($scope.fabrics[0]), subnet: null, mode: LINK_MODE.LINK_UP }; } }; // Called when the fabric changes on the new interface. $scope.newPhysicalFabricChanged = function() { $scope.newInterface.vlan = getDefaultVLAN( $scope.newInterface.fabric); $scope.newInterface.subnet = null; $scope.newInterface.mode = LINK_MODE.LINK_UP; }; // Called when the subnet changes on the new interface. $scope.newPhysicalSubnetChanged = function() { if(!angular.isObject($scope.newInterface.subnet)) { $scope.newInterface.mode = LINK_MODE.LINK_UP; } }; // Return true if cannot add the interface. $scope.cannotAddPhysicalInterface = function() { return ( $scope.isInterfaceNameInvalid($scope.newInterface) || $scope.isMACAddressInvalid( $scope.newInterface.macAddress, true)); }; // Actually add the new physical interface. $scope.addPhysicalInterface = function() { if($scope.cannotAddPhysicalInterface()) { return; } var params = { name: $scope.newInterface.name, mac_address: $scope.newInterface.macAddress, vlan: $scope.newInterface.vlan.id, mode: $scope.newInterface.mode }; if(angular.isObject($scope.newInterface.subnet)) { params.subnet = $scope.newInterface.subnet.id; } $scope.newInterface.macError = false; $scope.newInterface.errorMsg = null; NodesManager.createPhysicalInterface($scope.node, params).then( function() { // Clear the interface and reset the mode. $scope.newInterface = {}; $scope.selectedMode = SELECTION_MODE.NONE; }, function(errorStr) { error = JSONService.tryParse(errorStr); if(!angular.isObject(error)) { // Was not a JSON error. This is wrong here as it // should be, so just log to the console. console.log(errorStr); } else { macError = error.mac_address; if(angular.isArray(macError)) { $scope.newInterface.macError = true; $scope.newInterface.errorMsg = macError[0]; } } }); }; // Load all the required managers. NodesManager and GeneralManager are // loaded by the parent controller "NodeDetailsController". ManagerHelperService.loadManagers([ FabricsManager, VLANsManager, SubnetsManager, UsersManager ]).then(function() { $scope.managersHaveLoaded = true; updateLoaded(); }); }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/controllers/node_details_storage.js0000644000000000000000000021700113056115004030231 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Node Storage Controller */ // Filter that is specific to the NodeStorageController. Remove the available // disks from the list if being used in the availableNew. angular.module('MAAS').filter('removeAvailableByNew', function() { return function(disks, availableNew) { if(!angular.isObject(availableNew) || ( !angular.isObject(availableNew.device) && !angular.isArray(availableNew.devices))) { return disks; } var filtered = []; var single = true; if(angular.isArray(availableNew.devices)) { single = false; } angular.forEach(disks, function(disk) { if(single) { if(disk !== availableNew.device) { filtered.push(disk); } } else { var i, found = false; for(i = 0; i < availableNew.devices.length; i++) { if(disk === availableNew.devices[i]) { found = true; break; } } if(!found) { filtered.push(disk); } } }); return filtered; }; }); angular.module('MAAS').controller('NodeStorageController', [ '$scope', 'NodesManager', 'ConverterService', 'UsersManager', function($scope, NodesManager, ConverterService, UsersManager) { // From models/partitiontable.py - must be kept in sync. var INITIAL_PARTITION_OFFSET = 4 * 1024 * 1024; var END_OF_PARTITION_TABLE_SPACE = 1024 * 1024; var PARTITION_TABLE_EXTRA_SPACE = INITIAL_PARTITION_OFFSET + END_OF_PARTITION_TABLE_SPACE; var PREP_PARTITION_SIZE = 8 * 1024 * 1024; // From models/partition.py - must be kept in sync. var PARTITION_ALIGNMENT_SIZE = 4 * 1024 * 1024; var MIN_PARTITION_SIZE = PARTITION_ALIGNMENT_SIZE; // Different selection modes. var SELECTION_MODE = { NONE: null, SINGLE: "single", MUTLI: "multi", UNMOUNT: "unmount", UNFORMAT: "unformat", DELETE: "delete", FORMAT_AND_MOUNT: "format-mount", PARTITION: "partition", BCACHE: "bcache", RAID: "raid", VOLUME_GROUP: "volume-group", LOGICAL_VOLUME: "logical-volume" }; // Different available raid modes. var RAID_MODES = [ { level: "raid-0", title: "RAID 0", min_disks: 2, allows_spares: false, calculateSize: function(minSize, numDisks) { return minSize * numDisks; } }, { level: "raid-1", title: "RAID 1", min_disks: 2, allows_spares: true, calculateSize: function(minSize, numDisks) { return minSize; } }, { level: "raid-5", title: "RAID 5", min_disks: 3, allows_spares: true, calculateSize: function(minSize, numDisks) { return minSize * (numDisks - 1); } }, { level: "raid-6", title: "RAID 6", min_disks: 4, allows_spares: true, calculateSize: function(minSize, numDisks) { return minSize * (numDisks - 2); } }, { level: "raid-10", title: "RAID 10", min_disks: 3, allows_spares: true, calculateSize: function(minSize, numDisks) { return minSize * numDisks / 2; } } ]; $scope.column = 'name'; $scope.has_disks = false; $scope.filesystems = []; $scope.filesystemsMap = {}; $scope.filesystemMode = SELECTION_MODE.NONE; $scope.filesystemAllSelected = false; $scope.cachesets = []; $scope.cachesetsMap = {}; $scope.cachesetsMode = SELECTION_MODE.NONE; $scope.cachesetsAllSelected = false; $scope.available = []; $scope.availableMap = {}; $scope.availableMode = SELECTION_MODE.NONE; $scope.availableAllSelected = false; $scope.availableNew = {}; $scope.used = []; // Give $parent which is the NodeDetailsController access to this scope // it will call `nodeLoaded` once the node has been fully loaded. $scope.$parent.storageController = $scope; // Return True if the item has a filesystem and its mounted. function hasMountedFilesystem(item) { return angular.isObject(item.filesystem) && angular.isString(item.filesystem.mount_point) && item.filesystem.mount_point !== ""; } // Returns the fstype if the item has a filesystem and its unmounted. function hasFormattedUnmountedFilesystem(item) { if(angular.isObject(item.filesystem) && angular.isString(item.filesystem.fstype) && item.filesystem.fstype !== '' && (angular.isString(item.filesystem.mount_point) === false || item.filesystem.mount_point === '')) { return item.filesystem.fstype; }else{ return null; } } // Return True if the item is in use. function isInUse(item) { if(item.type === "cache-set") { return true; } else if(angular.isObject(item.filesystem)) { if(item.filesystem.is_format_fstype && angular.isString(item.filesystem.mount_point) && item.filesystem.mount_point !== "") { return true; } else if(!item.filesystem.is_format_fstype) { return true; } return false; } return item.available_size < MIN_PARTITION_SIZE; } // Return the tags formatted for ngTagInput. function getTags(disk) { var tags = []; angular.forEach(disk.tags, function(tag) { tags.push({ text: tag }); }); return tags; } // Return a unique key that will never change. function getUniqueKey(disk) { if(disk.type === "cache-set") { return "cache-set-" + disk.cache_set_id; } else { var key = disk.type + "-" + disk.block_id; if(angular.isNumber(disk.partition_id)) { key += "-" + disk.partition_id; } return key; } } // Update the list of filesystems. Only filesystems with a mount point // set go here. If no mount point is set, it goes in available. function updateFilesystems() { // Create the new list of filesystems. var filesystems = []; angular.forEach($scope.node.disks, function(disk) { if(hasMountedFilesystem(disk)) { var data = { "type": "filesystem", "name": disk.name, "size_human": disk.size_human, "fstype": disk.filesystem.fstype, "mount_point": disk.filesystem.mount_point, "block_id": disk.id, "partition_id": null, "original_type": disk.type, "original": disk }; if(disk.type === "virtual") { disk.parent_type = disk.parent.type; } filesystems.push(data); } angular.forEach(disk.partitions, function(partition) { if(hasMountedFilesystem(partition)) { filesystems.push({ "type": "filesystem", "name": partition.name, "size_human": partition.size_human, "fstype": partition.filesystem.fstype, "mount_point": partition.filesystem.mount_point, "block_id": disk.id, "partition_id": partition.id, "original_type": "partition", "original": partition }); } }); }); // Update the selected filesystems with the currently selected // filesystems. angular.forEach(filesystems, function(filesystem) { var key = getUniqueKey(filesystem); var oldFilesystem = $scope.filesystemsMap[key]; if(angular.isObject(oldFilesystem)) { filesystem.$selected = oldFilesystem.$selected; } else { filesystem.$selected = false; } }); // Update the filesystems and filesystemsMap on the scope. $scope.filesystems = filesystems; $scope.filesystemsMap = {}; angular.forEach(filesystems, function(filesystem) { $scope.filesystemsMap[getUniqueKey(filesystem)] = filesystem; }); // Update the selection mode. $scope.updateFilesystemSelection(false); } // Update the list of cache sets. function updateCacheSets() { // Create the new list of cache sets. var cachesets = []; angular.forEach($scope.node.disks, function(disk) { if(disk.type === "cache-set") { cachesets.push({ "type": "cache-set", "name": disk.name, "size_human": disk.size_human, "cache_set_id": disk.id, "used_by": disk.used_for }); } }); // Update the selected cache sets with the currently selected // cache sets. angular.forEach(cachesets, function(cacheset) { var key = getUniqueKey(cacheset); var oldCacheSet = $scope.cachesetsMap[key]; if(angular.isObject(oldCacheSet)) { cacheset.$selected = oldCacheSet.$selected; } else { cacheset.$selected = false; } }); // Update the cachesets and cachesetsMap on the scope. $scope.cachesets = cachesets; $scope.cachesetsMap = {}; angular.forEach(cachesets, function(cacheset) { $scope.cachesetsMap[getUniqueKey(cacheset)] = cacheset; }); // Update the selection mode. $scope.updateCacheSetsSelection(false); } // Update list of all available disks. function updateAvailable() { var available = []; angular.forEach($scope.node.disks, function(disk) { if(!isInUse(disk)) { var has_partitions = false; if(angular.isArray(disk.partitions) && disk.partitions.length > 0) { has_partitions = true; } var data = { "name": disk.name, "size_human": disk.size_human, "available_size_human": disk.available_size_human, "used_size_human": disk.used_size_human, "type": disk.type, "model": disk.model, "serial": disk.serial, "tags": getTags(disk), "fstype": hasFormattedUnmountedFilesystem(disk), "mount_point": null, "block_id": disk.id, "partition_id": null, "has_partitions": has_partitions, "is_boot": disk.is_boot, "original": disk }; if(disk.type === "virtual") { data.parent_type = disk.parent.type; } available.push(data); } angular.forEach(disk.partitions, function(partition) { if(!isInUse(partition)) { available.push({ "name": partition.name, "size_human": partition.size_human, "available_size_human": ( partition.available_size_human), "used_size_human": partition.used_size_human, "type": "partition", "model": "", "serial": "", "tags": [], "fstype": hasFormattedUnmountedFilesystem(partition), "mount_point": null, "block_id": disk.id, "partition_id": partition.id, "has_partitions": false, "is_boot": false, "original": partition }); } }); }); // Update the selected available disks with the currently selected // available disks. Also copy the $options so they are not lost // for the current action. angular.forEach(available, function(disk) { var key = getUniqueKey(disk); var oldDisk = $scope.availableMap[key]; if(angular.isObject(oldDisk)) { disk.$selected = oldDisk.$selected; disk.$options = oldDisk.$options; } else { disk.$selected = false; disk.$options = {}; } }); // Update available and availableMap on the scope. $scope.available = available; $scope.availableMap = {}; angular.forEach(available, function(disk) { $scope.availableMap[getUniqueKey(disk)] = disk; }); // Update device or devices on the availableNew object to be // there new objects. if(angular.isObject($scope.availableNew)) { // Update device. if(angular.isObject($scope.availableNew.device)) { var key = getUniqueKey($scope.availableNew.device); $scope.availableNew.device = $scope.availableMap[key]; // Update devices. } else if(angular.isArray($scope.availableNew.devices)) { var newDevices = []; angular.forEach( $scope.availableNew.devices, function(device) { var key = getUniqueKey(device); var newDevice = $scope.availableMap[key]; if(angular.isObject(newDevice)) { newDevices.push(newDevice); } }); $scope.availableNew.devices = newDevices; } } // Update the selection mode. $scope.updateAvailableSelection(false); } // Update list of all used disks. function updateUsed() { var used = []; angular.forEach($scope.node.disks, function(disk) { if(isInUse(disk) && disk.type !== "cache-set") { var data = { "name": disk.name, "type": disk.type, "model": disk.model, "serial": disk.serial, "tags": getTags(disk), "used_for": disk.used_for, "is_boot": disk.is_boot }; if(disk.type === "virtual") { data.parent_type = disk.parent.type; } used.push(data); } angular.forEach(disk.partitions, function(partition) { if(isInUse(partition) && partition.type !== "cache-set") { used.push({ "name": partition.name, "type": "partition", "model": "", "serial": "", "tags": [], "used_for": partition.used_for, "is_boot": false }); } }); }); $scope.used = used; } // Updates the filesystem, available, and used list. function updateDisks() { if(angular.isArray($scope.node.disks)) { $scope.has_disks = $scope.node.disks.length > 0; updateFilesystems(); updateCacheSets(); updateAvailable(); updateUsed(); } else { $scope.has_disks = false; $scope.filesystems = []; $scope.filesystemsMap = {}; $scope.filesystemMode = SELECTION_MODE.NONE; $scope.filesystemAllSelected = false; $scope.cachesets = []; $scope.cachesetsMap = {}; $scope.cachesetsMode = SELECTION_MODE.NONE; $scope.cachesetsAllSelected = false; $scope.available = []; $scope.availableMap = {}; $scope.availableMode = SELECTION_MODE.NONE; $scope.availableAllSelected = false; $scope.availableNew = {}; $scope.used = []; } } // Deselect all items in the array. function deselectAll(items) { angular.forEach(items, function(item) { item.$selected = false; }); } // Capitalize the first letter of the string. function capitalizeFirstLetter(string) { return string.charAt(0).toUpperCase() + string.slice(1); } // Return true if the string is a number. function isNumber(string) { var pattern = /^-?\d+\.?\d*$/; return pattern.test(string); } // Extract the index from the name based on prefix. function getIndexFromName(prefix, name) { var pattern = new RegExp("^" + prefix + "([0-9]+)$"); var match = pattern.exec(name); if(angular.isArray(match) && match.length === 2) { return parseInt(match[1], 10); } } // Get the next device name based on prefix. function getNextName(prefix) { var idx = -1; angular.forEach($scope.node.disks, function(disk) { var dIdx = getIndexFromName(prefix, disk.name); if(angular.isNumber(dIdx)) { idx = Math.max(idx, dIdx); } angular.forEach(disk.partitions, function(partition) { dIdx = getIndexFromName(prefix, partition.name); if(angular.isNumber(dIdx)) { idx = Math.max(idx, dIdx); } }); }); return prefix + (idx + 1); } // Return true if another disk exists with name. function isNameAlreadyInUse(name, exclude_disk) { if(!angular.isArray($scope.node.disks)) { return false; } var i, j; for(i = 0; i < $scope.node.disks.length; i++) { var disk = $scope.node.disks[i]; if(disk.name === name) { if(!angular.isObject(exclude_disk) || exclude_disk.type === "partition" || exclude_disk.block_id !== disk.id) { return true; } } if(angular.isArray(disk.partitions)) { for(j = 0; j < disk.partitions.length; j++) { var partition = disk.partitions[j]; if(partition.name === name) { if(!angular.isObject(exclude_disk) || exclude_disk.type !== "partition" || exclude_disk.partition_id !== partition.id) { return true; } } } } } return false; } // Return true if the disk is a logical volume. function isLogicalVolume(disk) { return disk.type === "virtual" && disk.parent_type === "lvm-vg"; } // Called by $parent when the node has been loaded. $scope.nodeLoaded = function() { $scope.$watch("node.disks", updateDisks); }; // Return true if the item can be a boot disk. $scope.isBootDiskDisabled = function(item, section) { if(item.type !== "physical") { return true; } // If the disk is in the used section and does not have any // partitions then it cannot be a boot disk. Boot disk either // require that it be unused or that some partitions exists // on the disk. This is because the boot disk has to have a // partition table header. if(section === "used") { return !item.has_partitions; } return false; }; // Called to change the disk to a boot disk. $scope.setAsBootDisk = function(item) { // Do nothing if already the boot disk. if(item.is_boot) { return; } // Do nothing if disabled. if($scope.isBootDiskDisabled(item)) { return; } NodesManager.setBootDisk($scope.node, item.block_id); }; // Return array of selected filesystems. $scope.getSelectedFilesystems = function() { var filesystems = []; angular.forEach($scope.filesystems, function(filesystem) { if(filesystem.$selected) { filesystems.push(filesystem); } }); return filesystems; }; // Update the currect mode for the filesystem section and the all // selected value. $scope.updateFilesystemSelection = function(force) { if(angular.isUndefined(force)) { force = false; } var filesystems = $scope.getSelectedFilesystems(); if(filesystems.length === 0) { $scope.filesystemMode = SELECTION_MODE.NONE; } else if(filesystems.length === 1 && force) { $scope.filesystemMode = SELECTION_MODE.SINGLE; } else if(force) { $scope.filesystemMode = SELECTION_MODE.MUTLI; } if($scope.filesystems.length === 0) { $scope.filesystemAllSelected = false; } else if(filesystems.length === $scope.filesystems.length) { $scope.filesystemAllSelected = true; } else { $scope.filesystemAllSelected = false; } }; // Toggle the selection of the filesystem. $scope.toggleFilesystemSelect = function(filesystem) { filesystem.$selected = !filesystem.$selected; $scope.updateFilesystemSelection(true); }; // Toggle the selection of all filesystems. $scope.toggleFilesystemAllSelect = function() { angular.forEach($scope.filesystems, function(filesystem) { if($scope.filesystemAllSelected) { filesystem.$selected = false; } else { filesystem.$selected = true; } }); $scope.updateFilesystemSelection(true); }; // Return true if checkboxes in the filesystem section should be // disabled. $scope.isFilesystemsDisabled = function() { return (( $scope.filesystemMode !== SELECTION_MODE.NONE && $scope.filesystemMode !== SELECTION_MODE.SINGLE && $scope.filesystemMode !== SELECTION_MODE.MUTLI) || $scope.isAllStorageDisabled()); }; // Cancel the current filesystem operation. $scope.filesystemCancel = function() { $scope.updateFilesystemSelection(true); }; // Enter unmount mode. $scope.filesystemUnmount = function() { $scope.filesystemMode = SELECTION_MODE.UNMOUNT; }; // Quickly enter unmount by selecting the filesystem first. $scope.quickFilesystemUnmount = function(filesystem) { deselectAll($scope.filesystems); filesystem.$selected = true; $scope.updateFilesystemSelection(true); $scope.filesystemUnmount(); }; // Confirm the unmount action for filesystem. $scope.filesystemConfirmUnmount = function(filesystem) { NodesManager.updateFilesystem( $scope.node, filesystem.block_id, filesystem.partition_id, filesystem.fstype, null); var idx = $scope.filesystems.indexOf(filesystem); $scope.filesystems.splice(idx, 1); $scope.updateFilesystemSelection(); }; // Enter delete mode. $scope.filesystemDelete = function() { $scope.filesystemMode = SELECTION_MODE.DELETE; }; // Quickly enter delete by selecting the filesystem first. $scope.quickFilesystemDelete = function(filesystem) { deselectAll($scope.filesystems); filesystem.$selected = true; $scope.updateFilesystemSelection(true); $scope.filesystemDelete(); }; // Confirm the delete action for filesystem. $scope.filesystemConfirmDelete = function(filesystem) { if(filesystem.original_type === "partition") { // Delete the partition. NodesManager.deletePartition( $scope.node, filesystem.original.id); } else { // Delete the disk. NodesManager.deleteDisk( $scope.node, filesystem.original.id); } var idx = $scope.filesystems.indexOf(filesystem); $scope.filesystems.splice(idx, 1); $scope.updateFilesystemSelection(); }; // Return true if the disk has an unmouted filesystem. $scope.hasUnmountedFilesystem = function(disk) { if(angular.isString(disk.fstype) && disk.fstype !== "") { if(!angular.isString(disk.mount_point) || disk.mount_point === "") { return true; } } return false; }; // Return true if the free space label should be shown. $scope.showFreeSpace = function(disk) { if(disk.type === "lvm-vg") { return true; } else if(disk.type === "physical" || disk.type === "virtual") { return disk.has_partitions; } else { return false; } }; // Return the device type for the disk. $scope.getDeviceType = function(disk) { if(angular.isUndefined(disk)) { return ""; } if(disk.type === "virtual") { if(disk.parent_type === "lvm-vg") { return "Logical volume"; } else if(disk.parent_type.indexOf("raid-") === 0) { return "RAID " + disk.parent_type.split("-")[1]; } else { return capitalizeFirstLetter(disk.parent_type); } } else if(disk.type === "lvm-vg") { return "Volume group"; } else { return capitalizeFirstLetter(disk.type); } }; // Return array of selected available disks. $scope.getSelectedAvailable = function() { var available = []; angular.forEach($scope.available, function(disk) { if(disk.$selected) { available.push(disk); } }); return available; }; // Update the currect mode for the available section and the all // selected value. $scope.updateAvailableSelection = function(force) { if(angular.isUndefined(force)) { force = false; } var available = $scope.getSelectedAvailable(); if(available.length === 0) { $scope.availableMode = SELECTION_MODE.NONE; } else if(available.length === 1 && force) { $scope.availableMode = SELECTION_MODE.SINGLE; } else if(force) { $scope.availableMode = SELECTION_MODE.MUTLI; } if($scope.available.length === 0) { $scope.availableAllSelected = false; } else if(available.length === $scope.available.length) { $scope.availableAllSelected = true; } else { $scope.availableAllSelected = false; } }; // Toggle the selection of the available disk. $scope.toggleAvailableSelect = function(disk) { disk.$selected = !disk.$selected; $scope.updateAvailableSelection(true); }; // Toggle the selection of all available disks. $scope.toggleAvailableAllSelect = function() { angular.forEach($scope.available, function(disk) { if(!$scope.availableAllSelected) { disk.$selected = true; } else { disk.$selected = false; } }); $scope.updateAvailableSelection(true); }; // Return true if checkboxes in the avaiable section should be // disabled. $scope.isAvailableDisabled = function() { return (( $scope.availableMode !== SELECTION_MODE.NONE && $scope.availableMode !== SELECTION_MODE.SINGLE && $scope.availableMode !== SELECTION_MODE.MUTLI) || $scope.isAllStorageDisabled()); }; // Return true if the disk can be formatted and mounted. $scope.canFormatAndMount = function(disk) { if($scope.isAllStorageDisabled()) { return false; } else if(disk.type === "lvm-vg" || disk.has_partitions) { return false; } else if(disk.type === "physical" && disk.original.is_boot) { return false; } else { return true; } }; // Return the text for the format and mount button. $scope.getFormatAndMountButtonText = function(disk) { if($scope.hasUnmountedFilesystem(disk)) { return "Mount"; } else { return "Format"; } }; // Return the text for the partition button. $scope.getPartitionButtonText = function(disk) { if(disk.has_partitions) { return "Add partition"; } else { return "Partition"; } }; $scope.availablePartitionSpace = function(disk) { var space_to_reserve = 0; if(!angular.isString(disk.original.partition_table_type) || disk.original.partition_table_type === "") { // Disk has no partition table, so reserve space for it. space_to_reserve = PARTITION_TABLE_EXTRA_SPACE; // ppc64el node requires that space be saved for the prep // partition. if($scope.node.architecture.indexOf("ppc64el") === 0) { space_to_reserve += PREP_PARTITION_SIZE; } } return ConverterService.roundByBlockSize( disk.original.available_size - space_to_reserve, PARTITION_ALIGNMENT_SIZE); }; // Return true if a partition can be added to disk. $scope.canAddPartition = function(disk) { if(!$scope.isSuperUser() || $scope.isAllStorageDisabled()) { return false; } else if(disk.type === "partition" || disk.type === "lvm-vg") { return false; } else if(disk.type === "virtual" && (disk.parent_type === "lvm-vg" || disk.parent_type === "bcache")) { return false; } else if(angular.isString(disk.fstype) && disk.fstype !== "") { return false; } // If we can fit a minimum partition, we're golden. return ($scope.availablePartitionSpace(disk) - MIN_PARTITION_SIZE) >= 0; }; // Return true if the name is invalid. $scope.isNameInvalid = function(disk) { if(disk.name === "") { return false; } else if(isNameAlreadyInUse(disk.name, disk)) { return true; } else { return false; } }; // Save the new name of the disk if it changed. $scope.saveAvailableName = function(disk) { if(disk.name === "") { disk.name = disk.original.name; } else if(disk.name !== disk.original.name) { var name = disk.name; if(isLogicalVolume(disk)){ var parentName = disk.original.name.split("-")[0] + "-"; name = name.slice(parentName.length); } NodesManager.updateDisk($scope.node, disk.block_id, { name: name }); } }; // Prevent logical volumes from changing the volume group prefix. $scope.nameHasChanged = function(disk) { if(isLogicalVolume(disk)) { var parentName = disk.original.name.split("-")[0] + "-"; var startsWith = disk.name.indexOf(parentName); if(startsWith !== 0) { disk.name = parentName; } } }; // Cancel the current available operation. $scope.availableCancel = function() { $scope.updateAvailableSelection(true); $scope.availableNew = {}; }; // Enter unformat mode. $scope.availableUnformat = function() { $scope.availableMode = SELECTION_MODE.UNFORMAT; }; // Confirm the unformat action. $scope.availableConfirmUnformat = function(disk) { NodesManager.updateFilesystem( $scope.node, disk.block_id, disk.partition_id, null, null); // Clear the fstype. disk.fstype = null; $scope.updateAvailableSelection(true); }; // Enter format and mount mode. $scope.availableFormatAndMount = function(disk) { disk.$options = { fstype: disk.fstype || "ext4", mountPoint: disk.mount_point || "" }; $scope.availableMode = SELECTION_MODE.FORMAT_AND_MOUNT; }; // Quickly enter the format and mount mode. $scope.availableQuickFormatAndMount = function(disk) { deselectAll($scope.available); disk.$selected = true; $scope.updateAvailableSelection(true); $scope.availableFormatAndMount(disk); }; // Return the text for the submit button in the format and mount mode. $scope.getAvailableFormatSubmitText = function(disk) { if(angular.isString(disk.$options.mountPoint) && disk.$options.mountPoint !== "") { return "Mount"; } else { return "Format"; } }; // Confirm the format and mount action. $scope.availableConfirmFormatAndMount = function(disk) { // Do nothing if its invalid. if($scope.isMountPointInvalid(disk.$options.mountPoint)) { return; } // Update the filesystem. NodesManager.updateFilesystem( $scope.node, disk.block_id, disk.partition_id, disk.$options.fstype, disk.$options.mountPoint); // Set the options on the object so no flicker occurs while waiting // for the new object to be received. disk.fstype = disk.$options.fstype; disk.mount_point = disk.$options.mountPoint; $scope.updateAvailableSelection(true); // If the mount_point is set the we need to transition this to // the filesystem section. if(angular.isString(disk.mount_point) && disk.mount_point !== "") { $scope.filesystems.push({ "name": disk.name, "size_human": disk.size_human, "fstype": disk.fstype, "mount_point": disk.mount_point, "block_id": disk.block_id, "partition_id": disk.partition_id }); // Remove the selected disk from available. var idx = $scope.available.indexOf(disk); $scope.available.splice(idx, 1); $scope.updateAvailableSelection(true); } }; // Return true if the mount point is invalid. $scope.isMountPointInvalid = function(mountPoint) { if(angular.isUndefined(mountPoint) || mountPoint === "") { return false; } else if(mountPoint[0] !== "/") { return true; } else { return false; } }; // Return true if the disk can be deleted. $scope.canDelete = function(disk) { if(!$scope.isSuperUser() || $scope.isAllStorageDisabled()) { return false; } else if(disk.type === "lvm-vg") { return disk.original.used_size === 0; } else { return !disk.has_partitions; } }; // Enter unformat mode. $scope.availableUnformat = function() { $scope.availableMode = SELECTION_MODE.UNFORMAT; }; // Quickly enter unformat mode. $scope.availableQuickUnformat = function(disk) { deselectAll($scope.available); disk.$selected = true; $scope.updateAvailableSelection(true); $scope.availableUnformat(); }; // Enter delete mode. $scope.availableDelete = function() { $scope.availableMode = SELECTION_MODE.DELETE; }; // Quickly enter delete mode. $scope.availableQuickDelete = function(disk) { deselectAll($scope.available); disk.$selected = true; $scope.updateAvailableSelection(true); $scope.availableDelete(); }; // Return the text for remove confirmation message. $scope.getRemoveTypeText = function(disk) { if(disk.type === "filesystem") { disk = disk.original; } if(disk.type === "physical") { return "physical disk"; } else if(disk.type === "partition") { return "partition"; } else if(disk.type === "lvm-vg") { return "volume group"; } else if(disk.type === "virtual") { if(disk.parent_type === "lvm-vg") { return "logical volume"; } else if(disk.parent_type.indexOf("raid-") === 0) { return "RAID " + disk.parent_type.split("-")[1] + " disk"; } else { return disk.parent_type + " disk"; } } }; // Delete the disk, partition, or volume group. $scope.availableConfirmDelete = function(disk) { if(disk.type === "lvm-vg") { // Delete the volume group. NodesManager.deleteVolumeGroup( $scope.node, disk.block_id); } else if(disk.type === "partition") { // Delete the partition. NodesManager.deletePartition( $scope.node, disk.partition_id); } else { // Delete the disk. NodesManager.deleteDisk( $scope.node, disk.block_id); } // Remove the selected disk from available. var idx = $scope.available.indexOf(disk); $scope.available.splice(idx, 1); $scope.updateAvailableSelection(true); }; // Enter partition mode. $scope.availablePartiton = function(disk) { $scope.availableMode = SELECTION_MODE.PARTITION; // Set starting size to the maximum available space. var size_and_units = disk.available_size_human.split(" "); disk.$options = { size: size_and_units[0], sizeUnits: size_and_units[1], fstype: null, mountPoint: "" }; }; // Quickly enter partition mode. $scope.availableQuickPartition = function(disk) { deselectAll($scope.available); disk.$selected = true; $scope.updateAvailableSelection(true); $scope.availablePartiton(disk); }; // Get the new name of the partition. $scope.getAddPartitionName = function(disk) { var length, partitions = disk.original.partitions; if(angular.isArray(partitions)) { length = partitions.length; } else { length = 0; } if(disk.original.partition_table_type === "mbr" && length > 2) { return disk.name + "-part" + (length + 2); } else if($scope.node.architecture.indexOf("ppc64el") === 0 && disk.original.is_boot) { // Boot disk on ppc64el machines skip the first partition as // its reserved for the prep partition. return disk.name + "-part" + (length + 2); } else { return disk.name + "-part" + (length + 1); } }; // Return true if the size is invalid. $scope.isAddPartitionSizeInvalid = function(disk) { if(disk.$options.size === "" || !isNumber(disk.$options.size)) { return true; } else { var bytes = ConverterService.unitsToBytes( disk.$options.size, disk.$options.sizeUnits); if(bytes < MIN_PARTITION_SIZE) { return true; } else if(bytes > disk.original.available_size) { // Round the size down to the lowest tolerance for that // to see if it now fits. var rounded = ConverterService.roundUnits( disk.$options.size, disk.$options.sizeUnits); if(rounded > disk.original.available_size) { return true; } else { return false; } } else { return false; } } }; // Confirm the partition creation. $scope.availableConfirmPartition = function(disk) { // Do nothing if not valid. if($scope.isAddPartitionSizeInvalid(disk) || $scope.isMountPointInvalid(disk.$options.mountPoint)) { return; } // Get the bytes to create the partition. var bytes = ConverterService.unitsToBytes( disk.$options.size, disk.$options.sizeUnits); // Accepting prefilled defaults means use whole disk (lp:1509535). var size_and_units = disk.original.available_size_human.split(" "); if(disk.$options.size === size_and_units[0] && disk.$options.sizeUnits === size_and_units[1]) { bytes = disk.original.available_size; } var removeDisk = false; var available_space = $scope.availablePartitionSpace(disk); if(bytes >= available_space) { // Clamp to available space. bytes = available_space; // Remove the disk if partition uses all the remaining space. removeDisk = true; } // Create the partition. var params = {}; if(angular.isString(disk.$options.fstype) && disk.$options.fstype !== "") { params.fstype = disk.$options.fstype; if(disk.$options.mountPoint !== "") { params.mount_point = disk.$options.mountPoint; } } NodesManager.createPartition( $scope.node, disk.block_id, bytes, params); // Remove the disk if needed. if(removeDisk) { var idx = $scope.available.indexOf(disk); $scope.available.splice(idx, 1); } $scope.updateAvailableSelection(true); }; // Return array of selected cache sets. $scope.getSelectedCacheSets = function() { var cachesets = []; angular.forEach($scope.cachesets, function(cacheset) { if(cacheset.$selected) { cachesets.push(cacheset); } }); return cachesets; }; // Update the currect mode for the cache sets section and the all // selected value. $scope.updateCacheSetsSelection = function(force) { if(angular.isUndefined(force)) { force = false; } var cachesets = $scope.getSelectedCacheSets(); if(cachesets.length === 0) { $scope.cachesetsMode = SELECTION_MODE.NONE; } else if(cachesets.length === 1 && force) { $scope.cachesetsMode = SELECTION_MODE.SINGLE; } else if(force) { $scope.cachesetsMode = SELECTION_MODE.MUTLI; } if($scope.cachesets.length === 0) { $scope.cachesetsAllSelected = false; } else if(cachesets.length === $scope.cachesets.length) { $scope.cachesetsAllSelected = true; } else { $scope.cachesetsAllSelected = false; } }; // Toggle the selection of the filesystem. $scope.toggleCacheSetSelect = function(cacheset) { cacheset.$selected = !cacheset.$selected; $scope.updateCacheSetsSelection(true); }; // Toggle the selection of all filesystems. $scope.toggleCacheSetAllSelect = function() { angular.forEach($scope.cachesets, function(cacheset) { if($scope.cachesetsAllSelected) { cacheset.$selected = false; } else { cacheset.$selected = true; } }); $scope.updateCacheSetsSelection(true); }; // Return true if checkboxes in the cache sets section should be // disabled. $scope.isCacheSetsDisabled = function() { return (( $scope.isAllStorageDisabled() && !$scope.isSuperUser()) || ( $scope.cachesetsMode !== SELECTION_MODE.NONE && $scope.cachesetsMode !== SELECTION_MODE.SINGLE && $scope.cachesetsMode !== SELECTION_MODE.MUTLI)); }; // Cancel the current cache set operation. $scope.cacheSetCancel = function() { $scope.updateCacheSetsSelection(true); }; // Can delete the cache set. $scope.canDeleteCacheSet = function(cacheset) { return (cacheset.used_by === "" && !$scope.isAllStorageDisabled() && $scope.isSuperUser()); }; // Enter delete mode. $scope.cacheSetDelete = function() { $scope.cachesetsMode = SELECTION_MODE.DELETE; }; // Quickly enter delete by selecting the cache set first. $scope.quickCacheSetDelete = function(cacheset) { deselectAll($scope.cachesets); cacheset.$selected = true; $scope.updateCacheSetsSelection(true); $scope.cacheSetDelete(); }; // Confirm the delete action for cache set. $scope.cacheSetConfirmDelete = function(cacheset) { NodesManager.deleteCacheSet( $scope.node, cacheset.cache_set_id); var idx = $scope.cachesets.indexOf(cacheset); $scope.cachesets.splice(idx, 1); $scope.updateCacheSetsSelection(); }; // Return true if a cache set can be created. $scope.canCreateCacheSet = function() { if($scope.isAvailableDisabled() || !$scope.isSuperUser()) { return false; } var selected = $scope.getSelectedAvailable(); if(selected.length === 1) { return ( !selected[0].has_partitions && !$scope.hasUnmountedFilesystem(selected[0]) && selected[0].type !== "lvm-vg"); } return false; }; // Called to create a cache set. $scope.createCacheSet = function() { if(!$scope.canCreateCacheSet()) { return; } // Create cache set. var disk = $scope.getSelectedAvailable()[0]; NodesManager.createCacheSet( $scope.node, disk.block_id, disk.partition_id); // Remove from available. var idx = $scope.available.indexOf(disk); $scope.available.splice(idx, 1); }; // Return true if a bcache can be created. $scope.canCreateBcache = function() { if($scope.isAvailableDisabled() || ! $scope.isSuperUser()) { return false; } var selected = $scope.getSelectedAvailable(); if(selected.length === 1) { var allowed = ( !$scope.hasUnmountedFilesystem(selected[0]) && selected[0].type !== "lvm-vg"); return allowed && $scope.cachesets.length > 0; } return false; }; // Enter bcache mode. $scope.createBcache = function() { if(!$scope.canCreateBcache()) { return; } $scope.availableMode = SELECTION_MODE.BCACHE; $scope.availableNew = { name: getNextName("bcache"), device: $scope.getSelectedAvailable()[0], cacheset: $scope.cachesets[0], cacheMode: "writeback", fstype: null, mountPoint: "" }; }; // Clear mount point when the fstype is changed. $scope.fstypeChanged = function(options) { if(options.fstype === null) { options.mountPoint = ""; } }; // Return true when the name of the new disk is invalid. $scope.isNewDiskNameInvalid = function() { if(!angular.isObject($scope.node) || !angular.isArray($scope.node.disks)) { return true; } if($scope.availableNew.name === "") { return true; } else { var i, j; for(i = 0; i < $scope.node.disks.length; i++) { var disk = $scope.node.disks[i]; if($scope.availableNew.name === disk.name) { return true; } if(angular.isArray(disk.partitions)) { for(j = 0; j < disk.partitions.length; j++) { var partition = disk.partitions[j]; if($scope.availableNew.name === partition.name) { return true; } } } } } return false; }; // Return true if bcache can be saved. $scope.createBcacheCanSave = function() { return ( !$scope.isNewDiskNameInvalid() && !$scope.isMountPointInvalid($scope.availableNew.mountPoint)); }; // Confirm and create the bcache device. $scope.availableConfirmCreateBcache = function() { if(!$scope.createBcacheCanSave()) { return; } // Create the bcache. var params = { name: $scope.availableNew.name, cache_set: $scope.availableNew.cacheset.cache_set_id, cache_mode: $scope.availableNew.cacheMode }; if($scope.availableNew.device.type === "partition") { params.partition_id = $scope.availableNew.device.partition_id; } else { params.block_id = $scope.availableNew.device.block_id; } if(angular.isString($scope.availableNew.fstype) && $scope.availableNew.fstype !== "") { params.fstype = $scope.availableNew.fstype; if($scope.availableNew.mountPoint !== "") { params.mount_point = $scope.availableNew.mountPoint; } } NodesManager.createBcache($scope.node, params); // Remove device from available. var idx = $scope.available.indexOf($scope.availableNew.device); $scope.available.splice(idx, 1); $scope.availableNew = {}; // Update the selection. $scope.updateAvailableSelection(true); }; // Return true if a RAID can be created. $scope.canCreateRAID = function() { if($scope.isAvailableDisabled() || !$scope.isSuperUser()) { return false; } var selected = $scope.getSelectedAvailable(); if(selected.length > 1) { var i; for(i = 0; i < selected.length; i++) { if($scope.hasUnmountedFilesystem(selected[i])) { return false; } else if(selected[i].type === "lvm-vg") { return false; } } return true; } return false; }; // Called to create a RAID. $scope.createRAID = function() { if(!$scope.canCreateRAID()) { return; } $scope.availableMode = SELECTION_MODE.RAID; $scope.availableNew = { name: getNextName("md"), devices: $scope.getSelectedAvailable(), mode: null, spares: [], fstype: null, mountPoint: "" }; $scope.availableNew.mode = $scope.getAvailableRAIDModes()[0]; }; // Get the available RAID modes. $scope.getAvailableRAIDModes = function() { if(!angular.isObject($scope.availableNew) || !angular.isArray($scope.availableNew.devices)) { return []; } var modes = []; angular.forEach(RAID_MODES, function(mode) { if($scope.availableNew.devices.length >= mode.min_disks) { modes.push(mode); } }); return modes; }; // Return the total number of available spares for the current mode. $scope.getTotalNumberOfAvailableSpares = function() { var mode = $scope.availableNew.mode; if(angular.isUndefined(mode) || !mode.allows_spares) { return 0; } else { var diff = $scope.availableNew.devices.length - mode.min_disks; if(diff < 0) { diff = 0; } return diff; } }; // Return the number of remaining spares that can be selected. $scope.getNumberOfRemainingSpares = function() { var allowed = $scope.getTotalNumberOfAvailableSpares(); if(allowed <= 0) { return 0; } else { return allowed - $scope.availableNew.spares.length; } }; // Return true if the spares column should be shown. $scope.showSparesColumn = function() { return $scope.getTotalNumberOfAvailableSpares() > 0; }; // Called when the RAID mode is changed to reset the selected spares. $scope.RAIDModeChanged = function() { $scope.availableNew.spares = []; }; // Return true if the disk is an active RAID member. $scope.isActiveRAIDMember = function(disk) { if(!angular.isArray($scope.availableNew.spares)) { return true; } else { var idx = $scope.availableNew.spares.indexOf( getUniqueKey(disk)); return idx === -1; } }; // Return true if the disk is a spare RAID member. $scope.isSpareRAIDMember = function(disk) { return !$scope.isActiveRAIDMember(disk); }; // Set the disk as an active RAID member. $scope.setAsActiveRAIDMember = function(disk) { var idx = $scope.availableNew.spares.indexOf(getUniqueKey(disk)); if(idx > -1) { $scope.availableNew.spares.splice(idx, 1); } }; // Set the disk as a spare RAID member. $scope.setAsSpareRAIDMember = function(disk) { var key = getUniqueKey(disk); var idx = $scope.availableNew.spares.indexOf(key); if(idx === -1) { $scope.availableNew.spares.push(key); } }; // Return the size of the new RAID device. $scope.getNewRAIDSize = function() { if(angular.isUndefined($scope.availableNew.mode)) { return ""; } var calculateSize = $scope.availableNew.mode.calculateSize; if(!angular.isFunction(calculateSize)) { return ""; } // Get the number of disks and the minimum disk size in the RAID. var numDisks = ( $scope.availableNew.devices.length - $scope.availableNew.spares.length); var minSize = Number.MAX_VALUE; angular.forEach($scope.availableNew.devices, function(device) { // Get the size of the device. For a block device it will be // at available_size and for a partition it will be at size. var deviceSize = ( device.original.available_size || device.original.size); minSize = Math.min(minSize, deviceSize); }); // Calculate the new size. var size = calculateSize(minSize, numDisks); return ConverterService.bytesToUnits(size).string; }; // Return true if RAID can be saved. $scope.createRAIDCanSave = function() { return ( !$scope.isNewDiskNameInvalid() && !$scope.isMountPointInvalid($scope.availableNew.mountPoint)); }; // Confirm and create the RAID device. $scope.availableConfirmCreateRAID = function() { if(!$scope.createRAIDCanSave()) { return; } // Create the RAID. var params = { name: $scope.availableNew.name, level: $scope.availableNew.mode.level, block_devices: [], partitions: [], spare_devices: [], spare_partitions: [] }; angular.forEach($scope.availableNew.devices, function(device) { if($scope.isActiveRAIDMember(device)) { if(device.type === "partition") { params.partitions.push(device.partition_id); } else { params.block_devices.push(device.block_id); } } else { if(device.type === "partition") { params.spare_partitions.push(device.partition_id); } else { params.spare_devices.push(device.block_id); } } }); if(angular.isString($scope.availableNew.fstype) && $scope.availableNew.fstype !== "") { params.fstype = $scope.availableNew.fstype; if($scope.availableNew.mountPoint !== "") { params.mount_point = $scope.availableNew.mountPoint; } } NodesManager.createRAID($scope.node, params); // Remove devices from available. angular.forEach($scope.availableNew.devices, function(device) { var idx = $scope.available.indexOf($scope.availableNew.device); $scope.available.splice(idx, 1); }); $scope.availableNew = {}; // Update the selection. $scope.updateAvailableSelection(true); }; // Return true if a volume group can be created. $scope.canCreateVolumeGroup = function() { if($scope.isAvailableDisabled() || !$scope.isSuperUser()) { return false; } var selected = $scope.getSelectedAvailable(); if(selected.length > 0) { var i; for(i = 0; i < selected.length; i++) { if(selected[i].has_partitions) { return false; } else if($scope.hasUnmountedFilesystem(selected[i])) { return false; } else if(selected[i].type === "lvm-vg") { return false; } } return true; } return false; }; // Called to create a volume group. $scope.createVolumeGroup = function() { if(!$scope.canCreateVolumeGroup()) { return; } $scope.availableMode = SELECTION_MODE.VOLUME_GROUP; $scope.availableNew = { name: getNextName("vg"), devices: $scope.getSelectedAvailable() }; }; // Return the size of the new volume group. $scope.getNewVolumeGroupSize = function() { var total = 0; angular.forEach($scope.availableNew.devices, function(device) { // Add available_size or size if available_size is not set. total += ( device.original.available_size || device.original.size); }); return ConverterService.bytesToUnits(total).string; }; // Return true if volume group can be saved. $scope.createVolumeGroupCanSave = function() { return !$scope.isNewDiskNameInvalid(); }; // Confirm and create the volume group device. $scope.availableConfirmCreateVolumeGroup = function() { if(!$scope.createVolumeGroupCanSave()) { return; } // Create the RAID. var params = { name: $scope.availableNew.name, block_devices: [], partitions: [] }; angular.forEach($scope.availableNew.devices, function(device) { if(device.type === "partition") { params.partitions.push(device.partition_id); } else { params.block_devices.push(device.block_id); } }); NodesManager.createVolumeGroup($scope.node, params); // Remove devices from available. angular.forEach($scope.availableNew.devices, function(device) { var idx = $scope.available.indexOf($scope.availableNew.device); $scope.available.splice(idx, 1); }); $scope.availableNew = {}; // Update the selection. $scope.updateAvailableSelection(true); }; // Return true if a logical volume can be added to disk. $scope.canAddLogicalVolume = function(disk) { if(disk.type !== "lvm-vg") { return false; } else if(disk.original.available_size < MIN_PARTITION_SIZE) { return false; } else { return true; } }; // Enter logical volume mode. $scope.availableLogicalVolume = function(disk) { $scope.availableMode = SELECTION_MODE.LOGICAL_VOLUME; // Set starting size to the maximum available space. var size_and_units = disk.available_size_human.split(" "); var namePrefix = disk.name + "-lv"; disk.$options = { name: getNextName(namePrefix), size: size_and_units[0], sizeUnits: size_and_units[1] }; }; // Return true if the name of the logical volume is invalid. $scope.isLogicalVolumeNameInvalid = function(disk) { if(!angular.isString(disk.$options.name)) { return false; } var startsWith = disk.$options.name.indexOf(disk.name + "-"); return ( startsWith !== 0 || disk.$options.name.length <= disk.name.length + 1 || isNameAlreadyInUse(disk.$options.name)); }; // Don't allow the name of the logical volume to remove the volume // group name. $scope.newLogicalVolumeNameChanged = function(disk) { if(!angular.isString(disk.$options.name)) { return; } var startsWith = disk.$options.name.indexOf(disk.name + "-"); if(startsWith !== 0) { disk.$options.name = disk.name + "-"; } }; // Return true if the logical volume size is invalid. $scope.isAddLogicalVolumeSizeInvalid = function(disk) { // Uses the same logic as the partition size checked. return $scope.isAddPartitionSizeInvalid(disk); }; // Confirm the logical volume creation. $scope.availableConfirmLogicalVolume = function(disk) { // Do nothing if not valid. if($scope.isLogicalVolumeNameInvalid(disk) || $scope.isAddLogicalVolumeSizeInvalid(disk) || $scope.isMountPointInvalid(disk.$options.mountPoint)) { return; } // Get the bytes to create the partition. var bytes = ConverterService.unitsToBytes( disk.$options.size, disk.$options.sizeUnits); // Accepting prefilled defaults means use whole disk (lp:1509535). var size_and_units = disk.original.available_size_human.split(" "); if(disk.$options.size === size_and_units[0] && disk.$options.sizeUnits === size_and_units[1]) { bytes = disk.original.available_size; } // Clamp to available space. if(bytes > disk.original.available_size) { bytes = disk.original.available_size; } // Remove the disk if it is going to use all the remaining space. var removeDisk = false; if(bytes === disk.original.available_size) { removeDisk = true; } // Remove the volume group name from the name. var name = disk.$options.name.slice(disk.name.length + 1); // Create the logical volume. var params = {}; if(angular.isString(disk.$options.fstype) && disk.$options.fstype !== "") { params.fstype = disk.$options.fstype; if(disk.$options.mountPoint !== "") { params.mount_point = disk.$options.mountPoint; } } NodesManager.createLogicalVolume( $scope.node, disk.block_id, name, bytes, params); // Remove the disk if needed. if(removeDisk) { var idx = $scope.available.indexOf(disk); $scope.available.splice(idx, 1); } $scope.updateAvailableSelection(true); }; // Return true when tags can be edited. $scope.canEditTags = function(disk) { return (disk.type !== "partition" && disk.type !== "lvm-vg" && !$scope.isAllStorageDisabled() && $scope.isSuperUser()); }; // Called to enter tag editing mode $scope.availableEditTags = function(disk) { disk.$options = { editingTags: true, tags: angular.copy(disk.tags) }; }; // Called to cancel editing tags. $scope.availableCancelTags = function(disk) { disk.$options = {}; }; // Called to save the tag changes. $scope.availableSaveTags = function(disk) { var tags = []; angular.forEach(disk.$options.tags, function(tag) { tags.push(tag.text); }); NodesManager.updateDiskTags( $scope.node, disk.block_id, tags); disk.tags = disk.$options.tags; disk.$options = {}; }; // Returns true if storage cannot be edited. // (it can't be changed when the node is in any state other // than Ready or Allocated) $scope.isAllStorageDisabled = function() { var authUser = UsersManager.getAuthUser(); if(!angular.isObject(authUser) || !angular.isObject($scope.node) || (!authUser.is_superuser && authUser.username !== $scope.node.owner)) { return true; }else if (angular.isObject($scope.node) && ["Ready", "Allocated"].indexOf( $scope.node.status) === -1) { // If the node is not ready or allocated, disable storage panel. return true; } else { // The node must be either ready or broken. Enable it. return false; } }; // Returns true if there are storage layout errors $scope.hasStorageLayoutIssues = function() { if(angular.isObject($scope.node)) { return $scope.node.storage_layout_issues.length > 0; } return false; }; }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/controllers/node_events.js0000644000000000000000000000630313056115004026365 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Node Events Controller */ angular.module('MAAS').controller('NodeEventsController', [ '$scope', '$rootScope', '$routeParams', 'NodesManager', 'EventsManagerFactory', 'ManagerHelperService', 'ErrorService', function($scope, $rootScope, $routeParams, NodesManager, EventsManagerFactory, ManagerHelperService, ErrorService) { // Events manager that is loaded once the node is loaded. var eventsManager = null; // Set the title and page. $rootScope.title = "Loading..."; $rootScope.page = "nodes"; // Initial values. $scope.loaded = false; $scope.node = null; $scope.events = []; $scope.eventsLoaded = false; $scope.days = 30; // Called once the node is loaded. function nodeLoaded(node) { $scope.node = node; $scope.loaded = true; // Get the events manager and load it. eventsManager = EventsManagerFactory.getManager(node.id); $scope.events = eventsManager.getItems(); $scope.days = eventsManager.getMaximumDays(); eventsManager.loadItems().then(function() { $scope.eventsLoaded = true; }); // Update the title when the fqdn of the node changes. $scope.$watch("node.fqdn", function() { $rootScope.title = $scope.node.fqdn + " - events"; }); } // Return the nice text for the given event. $scope.getEventText = function(event) { var text = event.type.description; if(angular.isString(event.description) && event.description.length > 0) { text += " - " + event.description; } return text; }; // Called to load more events. $scope.loadMore = function() { $scope.days += 30; eventsManager.loadMaximumDays($scope.days); }; // Load nodes manager. ManagerHelperService.loadManager(NodesManager).then(function() { // If redirected from the NodeDetailsController then the node // will already be active. No need to set it active again. var activeNode = NodesManager.getActiveItem(); if(angular.isObject(activeNode) && activeNode.system_id === $routeParams.system_id) { nodeLoaded(activeNode); } else { NodesManager.setActiveItem( $routeParams.system_id).then(function(node) { nodeLoaded(node); }, function(error) { ErrorService.raiseError(error); }); } }); // Destory the events manager when the scope is destroyed. This is so // the client will not recieve any more notifications about events // for this node. $scope.$on("$destroy", function() { if(angular.isObject(eventsManager)) { eventsManager.destroy(); } }); }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/controllers/node_result.js0000644000000000000000000000555313056115004026405 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Commissioning Script Controller */ angular.module('MAAS').controller('NodeResultController', [ '$scope', '$rootScope', '$routeParams', '$location', 'NodesManager', 'ManagerHelperService', 'ErrorService', function( $scope, $rootScope, $routeParams, $location, NodesManager, ManagerHelperService, ErrorService) { // Set the title and page. $rootScope.title = "Loading..."; $rootScope.page = "nodes"; // Initial values. $scope.loaded = false; $scope.node = null; $scope.filename = $routeParams.filename; // Called once the node is loaded. function nodeLoaded(node) { $scope.node = node; $scope.loaded = true; // Update the title when the fqdn of the node changes. $scope.$watch("node.fqdn", function() { $rootScope.title = $scope.node.fqdn + " - " + $scope.filename; }); } // Returns the result data for the requested filename. $scope.getResultData = function() { if(!angular.isObject($scope.node)) { return ""; } var i; for(i = 0; i < $scope.node.commissioning_results.length; i++) { var result = $scope.node.commissioning_results[i]; if(result.name === $scope.filename) { // tags require the content to start on a newline. var data = result.data.trim(); if(data.length === 0) { return "\nEmpty file"; } else { return "\n" + result.data; } } } // If we made it this far then the filename from the routeParams, // was incorrect. Redirect the user back to the node details page. $location.path('/node/' + $scope.node.system_id); return ""; }; // Load nodes manager. ManagerHelperService.loadManager(NodesManager).then(function() { // If redirected from the NodeDetailsController then the node // will already be active. No need to set it active again. var activeNode = NodesManager.getActiveItem(); if(angular.isObject(activeNode) && activeNode.system_id === $routeParams.system_id) { nodeLoaded(activeNode); } else { NodesManager.setActiveItem( $routeParams.system_id).then(function(node) { nodeLoaded(node); }, function(error) { ErrorService.raiseError(error); }); } }); }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/controllers/nodes_list.js0000644000000000000000000005511313056115004026222 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Nodes List Controller */ angular.module('MAAS').controller('NodesListController', [ '$scope', '$rootScope', '$routeParams', 'NodesManager', 'DevicesManager', 'GeneralManager', 'ManagerHelperService', 'SearchService', 'ZonesManager', 'UsersManager', function($scope, $rootScope, $routeParams, NodesManager, DevicesManager, GeneralManager, ManagerHelperService, SearchService, ZonesManager, UsersManager) { // Mapping of device.ip_assignment to viewable text. var DEVICE_IP_ASSIGNMENT = { external: "External", dynamic: "Dynamic", "static": "Static" }; // Set title and page. $rootScope.title = "Nodes"; $rootScope.page = "nodes"; // Set initial values. $scope.nodes = NodesManager.getItems(); $scope.zones = ZonesManager.getItems(); $scope.devices = DevicesManager.getItems(); $scope.currentpage = "nodes"; $scope.osinfo = GeneralManager.getData("osinfo"); $scope.loading = true; $scope.tabs = {}; // Nodes tab. $scope.tabs.nodes = {}; $scope.tabs.nodes.pagetitle = "Nodes"; $scope.tabs.nodes.currentpage = "nodes"; $scope.tabs.nodes.manager = NodesManager; $scope.tabs.nodes.previous_search = ""; $scope.tabs.nodes.search = ""; $scope.tabs.nodes.searchValid = true; $scope.tabs.nodes.selectedItems = NodesManager.getSelectedItems(); $scope.tabs.nodes.filtered_items = []; $scope.tabs.nodes.predicate = 'fqdn'; $scope.tabs.nodes.allViewableChecked = false; $scope.tabs.nodes.metadata = NodesManager.getMetadata(); $scope.tabs.nodes.filters = SearchService.getEmptyFilter(); $scope.tabs.nodes.column = 'fqdn'; $scope.tabs.nodes.actionOption = null; $scope.tabs.nodes.takeActionOptions = GeneralManager.getData( "node_actions"); $scope.tabs.nodes.actionErrorCount = 0; $scope.tabs.nodes.actionProgress = { total: 0, completed: 0, errors: {} }; $scope.tabs.nodes.osSelection = { osystem: null, release: null, hwe_kernel: null }; $scope.tabs.nodes.zoneSelection = null; $scope.tabs.nodes.commissionOptions = { enableSSH: false, skipNetworking: false, skipStorage: false }; // Device tab. $scope.tabs.devices = {}; $scope.tabs.devices.pagetitle = "Devices"; $scope.tabs.devices.currentpage = "devices"; $scope.tabs.devices.manager = DevicesManager; $scope.tabs.devices.previous_search = ""; $scope.tabs.devices.search = ""; $scope.tabs.devices.searchValid = true; $scope.tabs.devices.selectedItems = DevicesManager.getSelectedItems(); $scope.tabs.devices.filtered_items = []; $scope.tabs.devices.predicate = 'fqdn'; $scope.tabs.devices.allViewableChecked = false; $scope.tabs.devices.metadata = DevicesManager.getMetadata(); $scope.tabs.devices.filters = SearchService.getEmptyFilter(); $scope.tabs.devices.column = 'fqdn'; $scope.tabs.devices.actionOption = null; $scope.tabs.devices.takeActionOptions = GeneralManager.getData( "device_actions"); $scope.tabs.devices.actionErrorCount = 0; $scope.tabs.devices.actionProgress = { total: 0, completed: 0, errors: {} }; $scope.tabs.devices.zoneSelection = null; // Options for add hardware dropdown. $scope.addHardwareOption = null; $scope.addHardwareOptions = [ { name: "machine", title: "Machine" }, { name: "chassis", title: "Chassis" } ]; // This will hold the AddHardwareController once it is initialized. // The controller will set this variable as it's always a child of // this scope. $scope.addHardwareScope = null; // This will hold the AddDeviceController once it is initialized. // The controller will set this variable as it's always a child of // this scope. $scope.addDeviceScope = null; // When the addHardwareScope is hidden it will emit this event. We // clear the call to action button, so it can be used again. $scope.$on("addHardwareHidden", function() { $scope.addHardwareOption = null; }); // Return true if the tab is in viewing selected mode. function isViewingSelected(tab) { var search = $scope.tabs[tab].search.toLowerCase(); return search === "in:(selected)" || search === "in:selected"; } // Sets the search bar to only show selected. function enterViewSelected(tab) { $scope.tabs[tab].previous_search = $scope.tabs[tab].search; $scope.tabs[tab].search = "in:(Selected)"; } // Clear search bar from viewing selected. function leaveViewSelected(tab) { if(isViewingSelected(tab)) { $scope.tabs[tab].search = $scope.tabs[tab].previous_search; $scope.updateFilters(tab); } } // Called to update `allViewableChecked`. function updateAllViewableChecked(tab) { // Not checked when the filtered nodes are empty. if($scope.tabs[tab].filtered_items.length === 0) { $scope.tabs[tab].allViewableChecked = false; return; } // Loop through all filtered nodes and see if all are checked. var i; for(i = 0; i < $scope.tabs[tab].filtered_items.length; i++) { if(!$scope.tabs[tab].filtered_items[i].$selected) { $scope.tabs[tab].allViewableChecked = false; return; } } $scope.tabs[tab].allViewableChecked = true; } function clearAction(tab) { resetActionProgress(tab); leaveViewSelected(tab); $scope.tabs[tab].actionOption = null; $scope.tabs[tab].zoneSelection = null; if(tab === "nodes") { // Possible for this to be called before the osSelect // direction is initialized. In that case it has not // created the $reset function on the model object. if(angular.isFunction( $scope.tabs[tab].osSelection.$reset)) { $scope.tabs[tab].osSelection.$reset(); } $scope.tabs[tab].commissionOptions.enableSSH = false; $scope.tabs[tab].commissionOptions.skipNetworking = false; $scope.tabs[tab].commissionOptions.skipStorage = false; } } // Clear the action if required. function shouldClearAction(tab) { if($scope.tabs[tab].selectedItems.length === 0) { clearAction(tab); } if($scope.tabs[tab].actionOption && !isViewingSelected(tab)) { $scope.tabs[tab].actionOption = null; } } // Called when the filtered_items are updated. Checks if the // filtered_items are empty and if the search still matches the // previous search. This will reset the search when no nodes match // the current filter. function removeEmptyFilter(tab) { if($scope.tabs[tab].filtered_items.length === 0 && $scope.tabs[tab].search !== "" && $scope.tabs[tab].search === $scope.tabs[tab].previous_search) { $scope.tabs[tab].search = ""; $scope.updateFilters(tab); } } // Update the number of selected items which have an error based on the // current selected action. function updateActionErrorCount(tab) { var i; $scope.tabs[tab].actionErrorCount = 0; for(i = 0; i < $scope.tabs[tab].selectedItems.length; i++) { var supported = $scope.supportsAction( $scope.tabs[tab].selectedItems[i], tab); if(!supported) { $scope.tabs[tab].actionErrorCount += 1; } $scope.tabs[tab].selectedItems[i].action_failed = false; } } // Reset actionProgress on tab to zero. function resetActionProgress(tab) { var progress = $scope.tabs[tab].actionProgress; progress.completed = progress.total = 0; progress.errors = {}; } // Add error to action progress and group error messages by nodes. function addErrorToActionProgress(tab, error, node) { var progress = $scope.tabs[tab].actionProgress; progress.completed += 1; var nodes = progress.errors[error]; if(angular.isUndefined(nodes)) { progress.errors[error] = [node]; } else { nodes.push(node); } } // After an action has been performed check if we can leave all nodes // selected or if an error occured and we should only show the failed // nodes. function updateSelectedItems(tab) { if(!$scope.hasActionsFailed(tab)) { if(!$scope.hasActionsInProgress(tab)) { clearAction(tab); } return; } angular.forEach($scope.tabs[tab].manager.getItems(), function(node) { if(node.action_failed === false) { $scope.tabs[tab].manager.unselectItem(node.system_id); } }); } // Toggles between the current tab. $scope.toggleTab = function(tab) { $rootScope.title = $scope.tabs[tab].pagetitle; $scope.currentpage = tab; }; // Clear the search bar. $scope.clearSearch = function(tab) { $scope.tabs[tab].search = ""; $scope.updateFilters(tab); }; // Mark a node as selected or unselected. $scope.toggleChecked = function(node, tab) { if($scope.tabs[tab].manager.isSelected(node.system_id)) { $scope.tabs[tab].manager.unselectItem(node.system_id); } else { $scope.tabs[tab].manager.selectItem(node.system_id); } updateAllViewableChecked(tab); updateActionErrorCount(tab); shouldClearAction(tab); }; // Select all viewable nodes or deselect all viewable nodes. $scope.toggleCheckAll = function(tab) { if($scope.tabs[tab].allViewableChecked) { angular.forEach( $scope.tabs[tab].filtered_items, function(node) { $scope.tabs[tab].manager.unselectItem(node.system_id); }); } else { angular.forEach( $scope.tabs[tab].filtered_items, function(node) { $scope.tabs[tab].manager.selectItem(node.system_id); }); } updateAllViewableChecked(tab); updateActionErrorCount(tab); shouldClearAction(tab); }; // When the filtered nodes change update if all check buttons // should be checked or not. $scope.$watchCollection("tabs.nodes.filtered_items", function() { updateAllViewableChecked("nodes"); removeEmptyFilter("nodes"); }); $scope.$watchCollection("tabs.devices.filtered_items", function() { updateAllViewableChecked("devices"); removeEmptyFilter("devices"); }); // Shows the current selection. $scope.showSelected = function(tab) { enterViewSelected(tab); $scope.updateFilters(tab); }; // Adds or removes a filter to the search. $scope.toggleFilter = function(type, value, tab) { // Don't allow a filter to be changed when an action is // in progress. if(angular.isObject($scope.tabs[tab].actionOption)) { return; } $scope.tabs[tab].filters = SearchService.toggleFilter( $scope.tabs[tab].filters, type, value, true); $scope.tabs[tab].search = SearchService.filtersToString( $scope.tabs[tab].filters); }; // Return True if the filter is active. $scope.isFilterActive = function(type, value, tab) { return SearchService.isFilterActive( $scope.tabs[tab].filters, type, value, true); }; // Update the filters object when the search bar is updated. $scope.updateFilters = function(tab) { var filters = SearchService.getCurrentFilters( $scope.tabs[tab].search); if(filters === null) { $scope.tabs[tab].filters = SearchService.getEmptyFilter(); $scope.tabs[tab].searchValid = false; } else { $scope.tabs[tab].filters = filters; $scope.tabs[tab].searchValid = true; } shouldClearAction(tab); }; // Sorts the table by predicate. $scope.sortTable = function(predicate, tab) { $scope.tabs[tab].predicate = predicate; $scope.tabs[tab].reverse = !$scope.tabs[tab].reverse; }; // Sets the viewable column or sorts. $scope.selectColumnOrSort = function(predicate, tab) { if($scope.tabs[tab].column !== predicate) { $scope.tabs[tab].column = predicate; } else { $scope.sortTable(predicate, tab); } }; // Return True if the node supports the action. $scope.supportsAction = function(node, tab) { if(!$scope.tabs[tab].actionOption) { return true; } return node.actions.indexOf( $scope.tabs[tab].actionOption.name) >= 0; }; // Called when the action option gets changed. $scope.actionOptionSelected = function(tab) { updateActionErrorCount(tab); enterViewSelected(tab); var actionOption = $scope.tabs[tab].actionOption; if(angular.isObject(actionOption) && actionOption.name === "deploy") { GeneralManager.startPolling("osinfo"); } else { GeneralManager.stopPolling("osinfo"); } // Hide the add hardware/device section. if (tab === 'nodes') { if(angular.isObject($scope.addHardwareScope)) { $scope.addHardwareScope.hide(); } } else if(tab === 'devices') { if(angular.isObject($scope.addDeviceScope)) { $scope.addDeviceScope.hide(); } } }; // Return True if there is an action error. $scope.isActionError = function(tab) { if(angular.isObject($scope.tabs[tab].actionOption) && $scope.tabs[tab].actionOption.name === "deploy" && $scope.tabs[tab].actionErrorCount === 0 && ($scope.osinfo.osystems.length === 0 || UsersManager.getSSHKeyCount() === 0)) { return true; } return $scope.tabs[tab].actionErrorCount !== 0; }; // Return True if unable to deploy because of missing images. $scope.isDeployError = function(tab) { if($scope.tabs[tab].actionErrorCount !== 0) { return false; } if(angular.isObject($scope.tabs[tab].actionOption) && $scope.tabs[tab].actionOption.name === "deploy" && $scope.osinfo.osystems.length === 0) { return true; } return false; }; // Return True if unable to deploy because of missing ssh keys. $scope.isSSHKeyError = function(tab) { if($scope.tabs[tab].actionErrorCount !== 0) { return false; } if(angular.isObject($scope.tabs[tab].actionOption) && $scope.tabs[tab].actionOption.name === "deploy" && UsersManager.getSSHKeyCount() === 0) { return true; } return false; }; // Called when the current action is cancelled. $scope.actionCancel = function(tab) { resetActionProgress(tab); leaveViewSelected(tab); $scope.tabs[tab].actionOption = null; GeneralManager.stopPolling("osinfo"); }; // Perform the action on all nodes. $scope.actionGo = function(tab) { var extra = {}; // Set deploy parameters if a deploy or set zone action. if($scope.tabs[tab].actionOption.name === "deploy" && angular.isString($scope.tabs[tab].osSelection.osystem) && angular.isString($scope.tabs[tab].osSelection.release)) { // Set extra. UI side the release is structured os/release, but // when it is sent over the websocket only the "release" is // sent. extra.osystem = $scope.tabs[tab].osSelection.osystem; var release = $scope.tabs[tab].osSelection.release; release = release.split("/"); release = release[release.length-1]; extra.distro_series = release; // hwe_kernel is optional so only include it if its specified if(angular.isString($scope.tabs[tab].osSelection.hwe_kernel) && ($scope.tabs[tab].osSelection.hwe_kernel.indexOf('hwe-') >= 0)) { extra.hwe_kernel = $scope.tabs[tab].osSelection.hwe_kernel; } } else if($scope.tabs[tab].actionOption.name === "set-zone" && angular.isNumber($scope.tabs[tab].zoneSelection.id)) { // Set the zone parameter. extra.zone_id = $scope.tabs[tab].zoneSelection.id; } else if($scope.tabs[tab].actionOption.name === "commission") { // Set the commission options. extra.enable_ssh = ( $scope.tabs[tab].commissionOptions.enableSSH); extra.skip_networking = ( $scope.tabs[tab].commissionOptions.skipNetworking); extra.skip_storage = ( $scope.tabs[tab].commissionOptions.skipStorage); } // Setup actionProgress. resetActionProgress(tab); $scope.tabs[tab].actionProgress.total = $scope.tabs[tab].selectedItems.length; // Perform the action on all selected items. angular.forEach($scope.tabs[tab].selectedItems, function(node) { $scope.tabs[tab].manager.performAction( node, $scope.tabs[tab].actionOption.name, extra).then(function() { $scope.tabs[tab].actionProgress.completed += 1; node.action_failed = false; updateSelectedItems(tab); }, function(error) { addErrorToActionProgress(tab, error, node); node.action_failed = true; updateSelectedItems(tab); }); }); }; // Returns true when actions are being performed. $scope.hasActionsInProgress = function(tab) { var progress = $scope.tabs[tab].actionProgress; return progress.total > 0 && progress.completed !== progress.total; }; // Returns true if any of the actions have failed. $scope.hasActionsFailed = function(tab) { return Object.keys( $scope.tabs[tab].actionProgress.errors).length > 0; }; // Called to when the addHardwareOption has changed. $scope.addHardwareOptionChanged = function() { if($scope.addHardwareOption) { $scope.addHardwareScope.show( $scope.addHardwareOption.name); } }; // Called when the add device button is pressed. $scope.addDevice = function() { $scope.addDeviceScope.show(); }; // Called when the cancel add device button is pressed. $scope.cancelAddDevice = function() { $scope.addDeviceScope.cancel(); }; // Get the display text for device ip assignment type. $scope.getDeviceIPAssignment = function(ipAssignment) { return DEVICE_IP_ASSIGNMENT[ipAssignment]; }; // Load NodesManager, DevicesManager, GeneralManager and ZonesManager. ManagerHelperService.loadManagers( [NodesManager, DevicesManager, GeneralManager, ZonesManager, UsersManager]).then( function() { $scope.loading = false; }); // Stop polling and save the current filter when the scope is destroyed. $scope.$on("$destroy", function() { GeneralManager.stopPolling("osinfo"); SearchService.storeFilters("nodes", $scope.tabs.nodes.filters); SearchService.storeFilters("devices", $scope.tabs.devices.filters); }); // Restore the filters if any saved. var nodesFilter = SearchService.retrieveFilters("nodes"); if(angular.isObject(nodesFilter)) { $scope.tabs.nodes.search = SearchService.filtersToString( nodesFilter); $scope.updateFilters("nodes"); } var devicesFilter = SearchService.retrieveFilters("devices"); if(angular.isObject(devicesFilter)) { $scope.tabs.devices.search = SearchService.filtersToString( devicesFilter); $scope.updateFilters("devices"); } // Switch to the specified tab, if specified. if($routeParams.tab === "nodes" || $routeParams.tab === "devices") { $scope.toggleTab($routeParams.tab); } // Set the query if the present in $routeParams. var query = $routeParams.query; if(angular.isString(query)) { $scope.tabs[$scope.currentpage].search = query; $scope.updateFilters($scope.currentpage); } }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/controllers/subnet_details.js0000644000000000000000000000460113056115004027060 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Subnet Details Controller */ angular.module('MAAS').controller('SubnetDetailsController', [ '$scope', '$rootScope', '$routeParams', '$location', 'SubnetsManager', 'ManagerHelperService', 'ErrorService', function( $scope, $rootScope, $routeParams, $location, SubnetsManager, ManagerHelperService, ErrorService) { // Set title and page. $rootScope.title = "Loading..."; // Note: this value must match the top-level tab, in order for // highlighting to occur properly. $rootScope.page = "subnets"; // Initial values. $scope.loaded = false; $scope.subnet = null; // Updates the page title. function updateTitle() { subnet = $scope.subnet; if(subnet && subnet.cidr) { $rootScope.title = subnet.cidr; if(subnet.name && subnet.cidr !== subnet.name) { $rootScope.title += " (" +subnet.name + ")"; } } } // Called when the subnet has been loaded. function subnetLoaded(subnet) { $scope.subnet = subnet; $scope.loaded = true; updateTitle(); } // Load all the required managers. ManagerHelperService.loadManagers([ SubnetsManager ]).then(function() { // Possibly redirected from another controller that already had // this subnet set to active. Only call setActiveItem if not // already the activeItem. var activeSubnet = SubnetsManager.getActiveItem(); var requestedSubnet = parseInt($routeParams.subnet_id, 10); if(isNaN(requestedSubnet)) { ErrorService.raiseError("Invalid subnet identifier."); } else if(angular.isObject(activeSubnet) && activeSubnet.id === requestedSubnet) { subnetLoaded(activeSubnet); } else { SubnetsManager.setActiveItem( requestedSubnet).then(function(node) { subnetLoaded(node); }, function(error) { ErrorService.raiseError(error); }); } }); }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/controllers/subnets_list.js0000644000000000000000000002017113056115004026571 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Subnets List Controller */ angular.module('MAAS').controller('SubnetsListController', [ '$scope', '$rootScope', '$routeParams', '$filter', 'SubnetsManager', 'FabricsManager', 'SpacesManager', 'VLANsManager', 'ManagerHelperService', function($scope, $rootScope, $routeParams, $filter, SubnetsManager, FabricsManager, SpacesManager, VLANsManager, ManagerHelperService) { // Load the filters that are used inside the controller. var filterByVLAN = $filter('filterByVLAN'); var filterByFabric = $filter('filterByFabric'); var filterBySpace = $filter('filterBySpace'); // Set title and page. $rootScope.title = "Fabrics"; $rootScope.page = "subnets"; // Set initial values. $scope.subnets = SubnetsManager.getItems(); $scope.fabrics = FabricsManager.getItems(); $scope.spaces = SpacesManager.getItems(); $scope.vlans = VLANsManager.getItems(); $scope.currentpage = "fabrics"; $scope.loading = true; $scope.tabs = {}; // Fabrics tab. $scope.tabs.fabrics = {}; $scope.tabs.fabrics.pagetitle = "Fabrics"; $scope.tabs.fabrics.currentpage = "fabrics"; $scope.tabs.fabrics.data = []; // Spaces tab. $scope.tabs.spaces = {}; $scope.tabs.spaces.pagetitle = "Spaces"; $scope.tabs.spaces.currentpage = "spaces"; $scope.tabs.spaces.data = []; // Update the data that is displayed on the fabrics tab. function updateFabricsData() { var data = []; angular.forEach($scope.fabrics, function(fabric) { var rows = []; var vlans = filterByFabric($scope.vlans, fabric); angular.forEach(vlans, function(vlan) { var subnets = filterByVLAN($scope.subnets, vlan); if(subnets.length > 0) { angular.forEach(subnets, function(subnet) { var space = SpacesManager.getItemFromList( subnet.space); var row = { vlan: vlan, space: space, subnet: subnet }; rows.push(row); }); } else { rows.push({ vlan: vlan, space: null, subnet: null }); } }); data.push({ fabric: fabric, rows: rows }); }); $scope.tabs.fabrics.data = data; } // Update the data that is displayed on the spaces tab. function updateSpacesData() { var data = []; angular.forEach($scope.spaces, function(space) { var rows = []; var subnets = filterBySpace($scope.subnets, space); angular.forEach(subnets, function(subnet) { var vlan = VLANsManager.getItemFromList(subnet.vlan); var fabric = FabricsManager.getItemFromList(vlan.fabric); var row = { fabric: fabric, vlan: vlan, subnet: subnet }; rows.push(row); }); data.push({ space: space, rows: rows }); }); $scope.tabs.spaces.data = data; } // Return the name name for the VLAN. function getVLANName(vlan) { var name = vlan.vid; if(vlan.vid === 0) { name = "untagged"; } else if(angular.isString(vlan.name) && vlan.name !== "") { name += " (" + vlan.name + ")"; } return name; } // Toggles between the current tab. $scope.toggleTab = function(tab) { $rootScope.title = $scope.tabs[tab].pagetitle; $scope.currentpage = tab; }; // Get the name of the fabric. Will return empty if the previous // row already included the same fabric. $scope.getFabricName = function(row, sortedData) { if(!angular.isObject(row.fabric)) { return ""; } var idx = sortedData.indexOf(row); if(idx === 0) { return row.fabric.name; } else { var prevRow = sortedData[idx - 1]; if(prevRow.fabric === row.fabric) { return ""; } else { return row.fabric.name; } } }; // Get the name of the VLAN. Will return empty if the previous // row already included the same VLAN unless the fabric is different. $scope.getVLANName = function(row, sortedData) { if(!angular.isObject(row.vlan)) { return ""; } var idx = sortedData.indexOf(row); if(idx === 0) { return getVLANName(row.vlan); } else { var prevRow = sortedData[idx - 1]; var differentFabric = false; if(angular.isObject(row.fabric) && angular.isObject(prevRow.fabric)) { differentFabric = prevRow.fabric !== row.fabric; } if(prevRow.vlan === row.vlan && !differentFabric) { return ""; } else { return getVLANName(row.vlan); } } }; // Get the name of the space. Will return empty if the previous // row already included the same space unless the vlan is different. $scope.getSpaceName = function(row, sortedData) { if(!angular.isObject(row.space)) { return ""; } var idx = sortedData.indexOf(row); if(idx === 0) { return row.space.name; } else { var prevRow = sortedData[idx - 1]; if(prevRow.vlan === row.vlan && prevRow.space === row.space) { return ""; } else { return row.space.name; } } }; // Return the name of the subnet. Will include the name of the subnet // in '(', ')' if it exists and not the same as the cidr. $scope.getSubnetName = function(subnet) { if(!angular.isObject(subnet)) { return ""; } var name = subnet.cidr; if(angular.isString(subnet.name) && subnet.name !== "" && subnet.name !== subnet.cidr) { name += " (" + subnet.name + ")"; } return name; }; ManagerHelperService.loadManagers([ SubnetsManager, FabricsManager, SpacesManager, VLANsManager]).then( function() { $scope.loading = false; // Fabrics $scope.$watchCollection("fabrics", updateFabricsData); $scope.$watchCollection("vlans", updateFabricsData); $scope.$watchCollection("subnets", updateFabricsData); $scope.$watchCollection("spaces", updateFabricsData); // Spaces $scope.$watchCollection("fabrics", updateSpacesData); $scope.$watchCollection("vlans", updateSpacesData); $scope.$watchCollection("subnets", updateSpacesData); $scope.$watchCollection("spaces", updateSpacesData); }); // Switch to the specified tab, if specified. if($routeParams.tab === "fabrics" || $routeParams.tab === "spaces") { $scope.toggleTab($routeParams.tab); } }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/controllers/tests/0000755000000000000000000000000013056115004024656 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/controllers/tests/test_add_device.js0000644000000000000000000010005413056115004030322 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for AddDeviceController. */ describe("AddDeviceController", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Grab the needed angular pieces. var $controller, $rootScope, $q; beforeEach(inject(function($injector) { $controller = $injector.get("$controller"); $rootScope = $injector.get("$rootScope"); $q = $injector.get("$q"); })); // Load the required dependencies for the AddDeviceController // and mock the websocket connection. var ClustersManager, DevicesManager, ManagerHelperService; var ValidationService, RegionConnection, webSocket; beforeEach(inject(function($injector) { ClustersManager = $injector.get("ClustersManager"); DevicesManager = $injector.get("DevicesManager"); ManagerHelperService = $injector.get("ManagerHelperService"); ValidationService = $injector.get("ValidationService"); RegionConnection = $injector.get("RegionConnection"); // Mock buildSocket so an actual connection is not made. webSocket = new MockWebSocket(); spyOn(RegionConnection, "buildSocket").and.returnValue(webSocket); })); // Create the parent scope and the scope for the controller. var parentScope, $scope; beforeEach(function() { parentScope = $rootScope.$new(); parentScope.addDeviceScope = null; $scope = parentScope.$new(); }); // Makes the AddDeviceController function makeController() { // Start the connection so a valid websocket is created in the // RegionConnection. RegionConnection.connect(""); return $controller("AddDeviceController", { $scope: $scope, ClustersManager: ClustersManager, DevicesManager: DevicesManager, ValidationService: ValidationService, ManagerHelperService: ManagerHelperService }); } // Generating random networks is difficult, so we just use an array // of random networks and select one from it. var networks = [ { ip: "192.168.1.2", network: "192.168.1.0/24", subnet_mask: "255.255.255.0", broadcast_ip: "192.168.1.255", router_ip: "192.168.1.1", static_range: { low: "192.168.1.10", high: "192.168.1.149" }, dynamic_range: { low: "192.168.1.150", high: "192.168.1.254" } }, { ip: "192.168.2.2", network: "192.168.2.0/24", subnet_mask: "255.255.255.0", broadcast_ip: "192.168.2.255", router_ip: "192.168.2.1", static_range: { low: "192.168.2.10", high: "192.168.2.149" }, dynamic_range: { low: "192.168.2.150", high: "192.168.2.254" } }, { ip: "172.16.1.2", network: "172.16.0.0/16", subnet_mask: "255.255.0.0", broadcast_ip: "172.16.255.255", router_ip: "172.16.1.1", static_range: { low: "172.16.2.1", high: "172.16.3.254" }, dynamic_range: { low: "172.16.4.1", high: "172.16.6.254" } }, { ip: "172.17.1.2", network: "172.17.0.0/16", subnet_mask: "255.255.0.0", broadcast_ip: "172.17.255.255", router_ip: "172.17.1.1", static_range: { low: "172.17.2.1", high: "172.17.3.254" }, dynamic_range: { low: "172.17.4.1", high: "172.17.6.254" } } ]; var _nextNetwork = 0; beforeEach(function() { // Reset the next network before each test. _nextNetwork = 0; }); // Make an unmanaged cluster interface. var _nicId = 0; function makeClusterInterface() { if(_nextNetwork >= networks.length) { throw new Error("Out of fake networks."); } var nic = networks[_nextNetwork++]; nic.id = _nicId++; nic.management = 0; return nic; } // Make a managed cluster interface. function makeManagedClusterInterface() { var nic = makeClusterInterface(); nic.management = 2; return nic; } // Make a cluster with give interfaces. var _clusterId = 0; function makeCluster(interfaces) { if(!angular.isArray(interfaces)) { interfaces = [makeManagedClusterInterface()]; } return { id: _clusterId++, uuid: makeName("uuid"), interfaces: interfaces }; } // Make a interface function makeInterface(mac, ipAssignment, clusterInterfaceId, ipAddress) { if(angular.isUndefined(mac)) { mac = ""; } if(angular.isUndefined(ipAssignment)) { ipAssignment = null; } if(angular.isUndefined(clusterInterfaceId)) { clusterInterfaceId = null; } if(angular.isUndefined(ipAddress)) { ipAddress = ""; } return { mac: mac, ipAssignment: ipAssignment, clusterInterfaceId: clusterInterfaceId, ipAddress: ipAddress }; } it("sets addDeviceScope on $scope.$parent", function() { var controller = makeController(); expect(parentScope.addDeviceScope).toBe($scope); }); it("sets initial values on $scope", function() { var controller = makeController(); expect($scope.viewable).toBe(false); expect($scope.clusters).toBe(ClustersManager.getItems()); expect($scope.error).toBe(null); expect($scope.ipAssignments).toEqual([ { name: "external", title: "External" }, { name: "dynamic", title: "Dynamic" }, { name: "static", title: "Static" } ]); expect($scope.device).toEqual({ name: "", interfaces: [{ mac: "", ipAssignment: null, clusterInterfaceId: null, ipAddress: "" }] }); }); it("calls loadManager with ClustersManagers", function() { spyOn(ManagerHelperService, "loadManager"); var controller = makeController(); expect(ManagerHelperService.loadManager).toHaveBeenCalledWith( ClustersManager); }); describe("show", function() { it("does nothing if already viewable", function() { var controller = makeController(); $scope.viewable = true; var name = makeName("name"); $scope.device.name = name; $scope.show(); // The device name should have stayed the same, showing that // the call did nothing. expect($scope.device.name).toBe(name); }); it("clears device and sets viewable to true", function() { var controller = makeController(); $scope.device.name = makeName("name"); $scope.show(); expect($scope.device.name).toBe(""); expect($scope.viewable).toBe(true); }); }); describe("hide", function() { it("sets viewable to false", function() { var controller = makeController(); $scope.viewable = true; $scope.hide(); expect($scope.viewable).toBe(false); }); it("emits event addDeviceHidden", function(done) { var controller = makeController(); $scope.viewable = true; $scope.$on("addDeviceHidden", function() { done(); }); $scope.hide(); }); }); describe("getManagedInterfaces", function() { it("returns only managed interfaces", function() { var controller = makeController(); var managedInterfaces = [ makeManagedClusterInterface(), makeManagedClusterInterface(), makeManagedClusterInterface() ]; $scope.clusters = [ makeCluster([]), makeCluster([managedInterfaces[0]]), makeCluster([managedInterfaces[1], makeClusterInterface()]), makeCluster([managedInterfaces[2]]) ]; expect($scope.getManagedInterfaces()).toEqual(managedInterfaces); }); }); describe("getInterfaceStaticRange", function() { it("returns text including low and high of static range", function() { var controller = makeController(); var nic = makeManagedClusterInterface(); $scope.clusters = [ makeCluster([nic]) ]; expect($scope.getInterfaceStaticRange(nic.id)).toEqual( nic.static_range.low + " - " + nic.static_range.high + " (Optional)"); }); }); describe("nameHasError", function() { it("returns false if name is empty", function() { var controller = makeController(); expect($scope.nameHasError()).toBe(false); }); it("returns false if valid name", function() { var controller = makeController(); $scope.device.name = "abc"; expect($scope.nameHasError()).toBe(false); }); it("returns true if invalid name", function() { var controller = makeController(); $scope.device.name = "a_bc.local"; expect($scope.nameHasError()).toBe(true); }); }); describe("macHasError", function() { it("returns false if mac is empty", function() { var controller = makeController(); var nic = makeInterface(); expect($scope.macHasError(nic)).toBe(false); }); it("returns false if valid mac", function() { var controller = makeController(); var nic = makeInterface("00:00:11:22:33:44"); expect($scope.macHasError(nic)).toBe(false); }); it("returns false if not repeat mac", function() { var controller = makeController(); var nic = makeInterface("00:00:11:22:33:44"); var nic2 = makeInterface("00:00:11:22:33:55"); $scope.device.interfaces = [ nic, nic2 ]; expect($scope.macHasError(nic)).toBe(false); expect($scope.macHasError(nic2)).toBe(false); }); it("returns true if invalid mac", function() { var controller = makeController(); var nic = makeInterface("00:00:11:22:33"); expect($scope.macHasError(nic)).toBe(true); }); it("returns true if repeat mac", function() { var controller = makeController(); var nic = makeInterface("00:00:11:22:33:44"); var nic2 = makeInterface("00:00:11:22:33:44"); $scope.device.interfaces = [ nic, nic2 ]; expect($scope.macHasError(nic)).toBe(true); expect($scope.macHasError(nic2)).toBe(true); }); }); describe("ipHasError", function() { it("returns false if ip is empty", function() { var controller = makeController(); var nic = makeInterface(); expect($scope.ipHasError(nic)).toBe(false); }); it("returns false if valid ipv4", function() { var controller = makeController(); var nic = makeInterface(); nic.ipAddress = "192.168.1.1"; expect($scope.ipHasError(nic)).toBe(false); }); it("returns false if valid ipv6", function() { var controller = makeController(); var nic = makeInterface(); nic.ipAddress = "2001:db8::1"; expect($scope.ipHasError(nic)).toBe(false); }); it("returns true if invalid ipv4", function() { var controller = makeController(); var nic = makeInterface(); nic.ipAddress = "192.168.1"; expect($scope.ipHasError(nic)).toBe(true); }); it("returns true if invalid ipv6", function() { var controller = makeController(); var nic = makeInterface(); nic.ipAddress = "2001::db8::1"; expect($scope.ipHasError(nic)).toBe(true); }); it("returns false if external ip out of managed network", function() { var controller = makeController(); var nic = makeManagedClusterInterface(); var cluster = makeCluster([nic]); $scope.clusters = [cluster]; // No class A address is in the fake networks. var deviceInterface = makeInterface(); deviceInterface.ipAddress = "10.0.1.1"; deviceInterface.ipAssignment = { name: "external" }; expect($scope.ipHasError(deviceInterface)).toBe(false); }); it("returns true if external ip in managed network", function() { var controller = makeController(); var nic = makeManagedClusterInterface(); var cluster = makeCluster([nic]); $scope.clusters = [cluster]; var deviceInterface = makeInterface(); deviceInterface.ipAddress = nic.static_range.low; deviceInterface.ipAssignment = { name: "external" }; expect($scope.ipHasError(deviceInterface)).toBe(true); }); it("returns false if static in managed network", function() { var controller = makeController(); var nic = makeManagedClusterInterface(); var cluster = makeCluster([nic]); $scope.clusters = [cluster]; var deviceInterface = makeInterface(); deviceInterface.ipAddress = nic.static_range.low; deviceInterface.ipAssignment = { name: "static" }; expect($scope.ipHasError(deviceInterface)).toBe(false); }); it("returns false if static ip in select network", function() { var controller = makeController(); var nic = makeManagedClusterInterface(); var cluster = makeCluster([nic]); $scope.clusters = [cluster]; var deviceInterface = makeInterface(); deviceInterface.ipAddress = nic.static_range.low; deviceInterface.clusterInterfaceId = nic.id; deviceInterface.ipAssignment = { name: "static" }; expect($scope.ipHasError(deviceInterface)).toBe(false); }); it("returns true if static ip out of select network", function() { var controller = makeController(); var nic = makeManagedClusterInterface(); var otherNic = makeManagedClusterInterface(); var cluster = makeCluster([nic]); $scope.clusters = [cluster]; var deviceInterface = makeInterface(); deviceInterface.ipAddress = otherNic.static_range.low; deviceInterface.clusterInterfaceId = nic.id; deviceInterface.ipAssignment = { name: "static" }; expect($scope.ipHasError(deviceInterface)).toBe(true); }); it("returns true if static ip in dynamic range of network", function() { var controller = makeController(); var nic = makeManagedClusterInterface(); var cluster = makeCluster([nic]); $scope.clusters = [cluster]; var deviceInterface = makeInterface(); deviceInterface.ipAddress = nic.dynamic_range.low; deviceInterface.clusterInterfaceId = nic.id; deviceInterface.ipAssignment = { name: "static" }; expect($scope.ipHasError(deviceInterface)).toBe(true); }); }); describe("deviceHasError", function() { it("returns true if name empty", function() { var controller = makeController(); $scope.device.interfaces[0].mac = '00:11:22:33:44:55'; $scope.device.interfaces[0].ipAssignment = { name: "dynamic" }; expect($scope.deviceHasError()).toBe(true); }); it("returns true if mac empty", function() { var controller = makeController(); $scope.device.name = "abc"; $scope.device.interfaces[0].ipAssignment = { name: "dynamic" }; expect($scope.deviceHasError()).toBe(true); }); it("returns true if name invalid", function() { var controller = makeController(); $scope.device.name = "ab_c.local"; $scope.device.interfaces[0].mac = '00:11:22:33:44:55'; $scope.device.interfaces[0].ipAssignment = { name: "dynamic" }; expect($scope.deviceHasError()).toBe(true); }); it("returns true if mac invalid", function() { var controller = makeController(); $scope.device.name = "abc"; $scope.device.interfaces[0].mac = '00:11:22:33:44'; $scope.device.interfaces[0].ipAssignment = { name: "dynamic" }; expect($scope.deviceHasError()).toBe(true); }); it("returns true if missing ip assignment selection", function() { var controller = makeController(); $scope.device.name = "abc"; $scope.device.interfaces[0].mac = '00:11:22:33:44:55'; expect($scope.deviceHasError()).toBe(true); }); it("returns false if dynamic ip assignment selection", function() { var controller = makeController(); $scope.device.name = "abc"; $scope.device.interfaces[0].mac = '00:11:22:33:44:55'; $scope.device.interfaces[0].ipAssignment = { name: "dynamic" }; expect($scope.deviceHasError()).toBe(false); }); it("returns true if external ip assignment and ip empty", function() { var controller = makeController(); $scope.device.name = "abc"; $scope.device.interfaces[0].mac = '00:11:22:33:44:55'; $scope.device.interfaces[0].ipAssignment = { name: "external" }; $scope.device.interfaces[0].ipAddress = ""; expect($scope.deviceHasError()).toBe(true); }); it("returns true if external ip assignment and ip invalid", function() { var controller = makeController(); $scope.device.name = "abc"; $scope.device.interfaces[0].mac = '00:11:22:33:44:55'; $scope.device.interfaces[0].ipAssignment = { name: "external" }; $scope.device.interfaces[0].ipAddress = "192.168"; expect($scope.deviceHasError()).toBe(true); }); it("returns false if external ip assignment and ip valid", function() { var controller = makeController(); $scope.device.name = "abc"; $scope.device.interfaces[0].mac = '00:11:22:33:44:55'; $scope.device.interfaces[0].ipAssignment = { name: "external" }; $scope.device.interfaces[0].ipAddress = "192.168.1.1"; expect($scope.deviceHasError()).toBe(false); }); it("returns true if static ip assignment and no cluster interface", function() { var controller = makeController(); $scope.device.name = "abc"; $scope.device.interfaces[0].mac = '00:11:22:33:44:55'; $scope.device.interfaces[0].ipAssignment = { name: "static" }; expect($scope.deviceHasError()).toBe(true); }); it("returns false if static ip assignment and cluster interface", function() { var controller = makeController(); var nic = makeManagedClusterInterface(); var cluster = makeCluster([nic]); $scope.clusters = [cluster]; $scope.device.name = "abc"; $scope.device.interfaces[0].mac = '00:11:22:33:44:55'; $scope.device.interfaces[0].ipAssignment = { name: "static" }; $scope.device.interfaces[0].clusterInterfaceId = nic.id; expect($scope.deviceHasError()).toBe(false); }); it("returns true if static ip assignment, cluster interface, and " + "invalid ip address", function() { var controller = makeController(); var nic = makeManagedClusterInterface(); var cluster = makeCluster([nic]); $scope.clusters = [cluster]; $scope.device.name = "abc"; $scope.device.interfaces[0].mac = '00:11:22:33:44:55'; $scope.device.interfaces[0].ipAssignment = { name: "static" }; $scope.device.interfaces[0].clusterInterfaceId = nic.id; $scope.device.interfaces[0].ipAddress = "192.168"; expect($scope.deviceHasError()).toBe(true); }); it("returns true if static ip assignment, cluster interface, and " + "ip address out of network", function() { var controller = makeController(); var nic = makeManagedClusterInterface(); var otherNic = makeManagedClusterInterface(); var cluster = makeCluster([nic]); $scope.clusters = [cluster]; $scope.device.name = "abc"; $scope.device.interfaces[0].mac = '00:11:22:33:44:55'; $scope.device.interfaces[0].ipAssignment = { name: "static" }; $scope.device.interfaces[0].clusterInterfaceId = nic.id; $scope.device.interfaces[0].ipAddress = otherNic.static_range.low; expect($scope.deviceHasError()).toBe(true); }); it("returns false if static ip assignment, cluster interface, and " + "ip address in network", function() { var controller = makeController(); var nic = makeManagedClusterInterface(); var cluster = makeCluster([nic]); $scope.clusters = [cluster]; $scope.device.name = "abc"; $scope.device.interfaces[0].mac = '00:11:22:33:44:55'; $scope.device.interfaces[0].ipAssignment = { name: "static" }; $scope.device.interfaces[0].clusterInterfaceId = nic.id; $scope.device.interfaces[0].ipAddress = nic.static_range.low; expect($scope.deviceHasError()).toBe(false); }); }); describe("addInterface", function() { it("adds another interface", function() { var controller = makeController(); $scope.addInterface(); expect($scope.device.interfaces.length).toBe(2); }); }); describe("isPrimaryInterface", function() { it("returns true for first interface", function() { var controller = makeController(); $scope.addInterface(); expect( $scope.isPrimaryInterface( $scope.device.interfaces[0])).toBe(true); }); it("returns false for second interface", function() { var controller = makeController(); $scope.addInterface(); expect( $scope.isPrimaryInterface( $scope.device.interfaces[1])).toBe(false); }); }); describe("deleteInterface", function() { it("doesnt remove primary interface", function() { var controller = makeController(); var nic = $scope.device.interfaces[0]; $scope.deleteInterface(nic); expect($scope.device.interfaces[0]).toBe(nic); }); it("removes interface", function() { var controller = makeController(); $scope.addInterface(); var nic = $scope.device.interfaces[1]; $scope.deleteInterface(nic); expect($scope.device.interfaces.indexOf(nic)).toBe(-1); }); }); describe("cancel", function() { it("clears error", function() { var controller = makeController(); $scope.error = makeName("error"); $scope.cancel(); expect($scope.error).toBeNull(); }); it("clears device", function() { var controller = makeController(); $scope.device.name = makeName("name"); $scope.cancel(); expect($scope.device.name).toBe(""); }); it("calls hide", function() { var controller = makeController(); spyOn($scope, "hide"); $scope.cancel(); expect($scope.hide).toHaveBeenCalled(); }); }); describe("save", function() { it("doest nothing if device in error", function() { var controller = makeController(); var error = makeName("error"); $scope.error = error; spyOn($scope, "deviceHasError").and.returnValue(true); $scope.save(); // Error would have been cleared if save did anything. expect($scope.error).toBe(error); }); it("clears error before calling create", function() { var controller = makeController(); $scope.error = makeName("error"); spyOn($scope, "deviceHasError").and.returnValue(false); spyOn(DevicesManager, "create").and.returnValue( $q.defer().promise); $scope.device.interfaces[0].ipAssignment = { name: "dynamic" }; $scope.save(); expect($scope.error).toBeNull(); }); it("calls create with converted device", function() { var controller = makeController(); $scope.error = makeName("error"); spyOn($scope, "deviceHasError").and.returnValue(false); spyOn(DevicesManager, "create").and.returnValue( $q.defer().promise); var name = makeName("name"); var mac = makeName("mac"); var assignment = "static"; var nicId = makeInteger(); var ipAddress = makeName("ip"); $scope.device = { name: name, interfaces: [{ mac: mac, ipAssignment: { name: assignment }, clusterInterfaceId: nicId, ipAddress: ipAddress }] }; $scope.save(); expect(DevicesManager.create).toHaveBeenCalledWith({ hostname: name, primary_mac: mac, extra_macs: [], interfaces: [{ mac: mac, ip_assignment: assignment, ip_address: ipAddress, "interface": nicId }] }); }); it("on create resolve device is cleared", function() { var controller = makeController(); $scope.error = makeName("error"); spyOn($scope, "deviceHasError").and.returnValue(false); var defer = $q.defer(); spyOn(DevicesManager, "create").and.returnValue(defer.promise); $scope.device.name = makeName("name"); $scope.device.interfaces[0].ipAssignment = { name: "dynamic" }; $scope.save(); defer.resolve(); $rootScope.$digest(); expect($scope.device.name).toBe(""); }); it("on create resolve hide is called when addAnother is false", function() { var controller = makeController(); $scope.error = makeName("error"); spyOn($scope, "deviceHasError").and.returnValue(false); var defer = $q.defer(); spyOn(DevicesManager, "create").and.returnValue(defer.promise); $scope.device.name = makeName("name"); $scope.device.interfaces[0].ipAssignment = { name: "dynamic" }; spyOn($scope, "hide"); $scope.save(false); defer.resolve(); $rootScope.$digest(); expect($scope.hide).toHaveBeenCalled(); }); it("on create resolve hide is not called when addAnother is true", function() { var controller = makeController(); $scope.error = makeName("error"); spyOn($scope, "deviceHasError").and.returnValue(false); var defer = $q.defer(); spyOn(DevicesManager, "create").and.returnValue(defer.promise); $scope.device.name = makeName("name"); $scope.device.interfaces[0].ipAssignment = { name: "dynamic" }; spyOn($scope, "hide"); $scope.save(true); defer.resolve(); $rootScope.$digest(); expect($scope.hide).not.toHaveBeenCalled(); }); it("on create reject error is set", function() { var controller = makeController(); $scope.error = makeName("error"); spyOn($scope, "deviceHasError").and.returnValue(false); var defer = $q.defer(); spyOn(DevicesManager, "create").and.returnValue(defer.promise); $scope.device.name = makeName("name"); $scope.device.interfaces[0].ipAssignment = { name: "dynamic" }; $scope.save(); var errorMsg = makeName("error"); var error = "{'hostname': ['" + errorMsg + "']}"; defer.reject(error); $rootScope.$digest(); expect($scope.error).toBe(errorMsg + " "); }); }); describe("convertPythonDictToErrorMsg", function() { it("converts hostname error for display", function() { var controller = makeController(); var errorMsg = makeName("error"); var error = "{'hostname': ['Node " + errorMsg + "']}"; var expected = "Device " + errorMsg + " "; expect($scope.convertPythonDictToErrorMsg( error)).toBe(expected); }); it("converts mac_addresses error for display", function() { var controller = makeController(); var errorMsg = makeName("error"); var error = "{'mac_addresses': ['" + errorMsg + "']}"; var expected = errorMsg + " "; expect($scope.convertPythonDictToErrorMsg( error)).toBe(expected); }); it("converts unknown segments by default", function() { var controller = makeController(); var errorSegment1 = makeName("error"); var errorSegment2 = makeName("error"); var error = "{'" + errorSegment1 + "': ['" + errorSegment2 + "']}"; var expected = errorSegment1 + errorSegment2; expect($scope.convertPythonDictToErrorMsg( error)).toBe(expected); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/controllers/tests/test_add_hardware.js0000644000000000000000000007016613056115004030672 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for AddHardwareController. */ describe("AddHardwareController", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Grab the needed angular pieces. var $controller, $rootScope, $timeout, $http, $cookies, $q; beforeEach(inject(function($injector) { $controller = $injector.get("$controller"); $rootScope = $injector.get("$rootScope"); $timeout = $injector.get("$timeout"); $http = $injector.get("$http"); $cookies = $injector.get("$cookies"); $q = $injector.get("$q"); })); // Load the ClustersManager, ZonesManager, NodesManager, RegionConnection, // and mock the websocket connection. var ClustersManager, ZonesManager, NodesManager, GeneralManager; var RegionConnection, ManagerHelperService, webSocket; beforeEach(inject(function($injector) { ClustersManager = $injector.get("ClustersManager"); ZonesManager = $injector.get("ZonesManager"); NodesManager = $injector.get("NodesManager"); GeneralManager = $injector.get("GeneralManager"); RegionConnection = $injector.get("RegionConnection"); ManagerHelperService = $injector.get("ManagerHelperService"); // Mock buildSocket so an actual connection is not made. webSocket = new MockWebSocket(); spyOn(RegionConnection, "buildSocket").and.returnValue(webSocket); })); // Create the parent scope and the scope for the controller. var parentScope, $scope; beforeEach(function() { parentScope = $rootScope.$new(); parentScope.addHardwareScope = null; $scope = parentScope.$new(); }); // Makes the AddHardwareController function makeController(loadManagersDefer, loadManagerDefer) { var loadManagers = spyOn(ManagerHelperService, "loadManagers"); if(angular.isObject(loadManagersDefer)) { loadManagers.and.returnValue(loadManagersDefer.promise); } else { loadManagers.and.returnValue($q.defer().promise); } var loadManager = spyOn(ManagerHelperService, "loadManager"); if(angular.isObject(loadManagerDefer)) { loadManager.and.returnValue(loadManagerDefer.promise); } else { loadManager.and.returnValue($q.defer().promise); } // Start the connection so a valid websocket is created in the // RegionConnection. RegionConnection.connect(""); return $controller("AddHardwareController", { $scope: $scope, $timeout: $timeout, $http: $http, $cookies: $cookies, ZonesManager: ZonesManager, NodesManager: NodesManager, GeneralManager: GeneralManager, RegionConnection: RegionConnection, ManagerHelperService: ManagerHelperService }); } // Makes the AddHardwareController with the $scope.machine already // initialized. function makeControllerWithMachine() { var defer = $q.defer(); var controller = makeController(defer); defer.resolve(); $rootScope.$digest(); return controller; } it("sets addHardwareScope on $scope.$parent", function() { var controller = makeController(); expect(parentScope.addHardwareScope).toBe($scope); }); it("sets initial values on $scope", function() { var controller = makeController(); expect($scope.viewable).toBe(false); expect($scope.clusters).toBe(ClustersManager.getItems()); expect($scope.zones).toBe(ZonesManager.getItems()); expect($scope.architectures).toEqual([]); expect($scope.hwe_kernels).toEqual([]); expect($scope.error).toBeNull(); expect($scope.machine).toBeNull(); expect($scope.chassis).toBeNull(); }); it("calls loadManagers with ClustersManager and ZonesManager", function() { var controller = makeController(); expect(ManagerHelperService.loadManagers).toHaveBeenCalledWith( [ClustersManager, ZonesManager]); }); it("calls loadManager with GeneralManager", function() { var controller = makeController(); expect(ManagerHelperService.loadManager).toHaveBeenCalledWith( GeneralManager); }); it("intializes machine once ClustersManager and ZonesManager loaded", function() { var defer = $q.defer(); var controller = makeController(defer); defer.resolve(); $scope.$digest(); expect($scope.machine).not.toBeNull(); }); it("intializes chassis once ClustersManager and ZonesManager loaded", function() { var defer = $q.defer(); var controller = makeController(defer); defer.resolve(); $scope.$digest(); expect($scope.chassis).not.toBeNull(); }); it("initializes machine architecture with first arch", function() { var defer = $q.defer(); var controller = makeController(null, defer); var arch = makeName("arch"); $scope.architectures = [arch]; $scope.machine = { architecture: '' }; defer.resolve(); $scope.$digest(); expect($scope.machine.architecture).toEqual(arch); }); it("initializes machine architecture with amd64 arch", function() { var defer = $q.defer(); var controller = makeController(null, defer); var arch = makeName("arch"); $scope.architectures = [arch, "amd64/generic"]; $scope.machine = { architecture: '' }; defer.resolve(); $scope.$digest(); expect($scope.machine.architecture).toEqual("amd64/generic"); }); it("doesnt initializes machine architecture if set", function() { var defer = $q.defer(); var controller = makeController(null, defer); var arch = makeName("arch"); var newArch = makeName("arch"); $scope.architectures = [newArch]; $scope.machine = { architecture: arch }; defer.resolve(); $scope.$digest(); expect($scope.machine.architecture).toEqual(arch); }); it("initializes machine min_hwe_kernel with hwe-t", function() { var defer = $q.defer(); var controller = makeController(null, defer); var arch = makeName("arch"); var min_hwe_kernel = "hwe-t"; $scope.architectures = [arch]; $scope.machine = { architecture: '', min_hwe_kernel: 'hwe-t' }; defer.resolve(); $scope.$digest(); expect($scope.machine.min_hwe_kernel).toEqual("hwe-t"); }); it("calls stopPolling when scope destroyed", function() { var controller = makeController(); spyOn(GeneralManager, "stopPolling"); $scope.$destroy(); expect(GeneralManager.stopPolling).toHaveBeenCalledWith( "architectures"); expect(GeneralManager.stopPolling).toHaveBeenCalledWith( "hwe_kernels"); }); describe("show", function() { it("sets viewable to true", function() { var controller = makeController(); $scope.show(); expect($scope.viewable).toBe(true); }); it("calls startPolling for architectures", function() { var controller = makeController(); spyOn(GeneralManager, "startPolling"); $scope.show(); expect(GeneralManager.startPolling).toHaveBeenCalledWith( "architectures"); }); it("calls startPolling for hwe_kernels", function() { var controller = makeController(); spyOn(GeneralManager, "startPolling"); $scope.show(); expect(GeneralManager.startPolling).toHaveBeenCalledWith( "hwe_kernels"); }); }); describe("hide", function() { it("sets viewable to false", function() { var controller = makeController(); $scope.viewable = true; $scope.hide(); expect($scope.viewable).toBe(false); }); it("calls stopPolling for architectures", function() { var controller = makeController(); spyOn(GeneralManager, "stopPolling"); $scope.hide(); expect(GeneralManager.stopPolling).toHaveBeenCalledWith( "architectures"); }); it("calls stopPolling for hwe_kernels", function() { var controller = makeController(); spyOn(GeneralManager, "stopPolling"); $scope.hide(); expect(GeneralManager.stopPolling).toHaveBeenCalledWith( "hwe_kernels"); }); it("emits addHardwareHidden event", function(done) { var controller = makeController(); $scope.$on("addHardwareHidden", function() { done(); }); $scope.hide(); }); }); describe("addMac", function() { it("adds mac address object to machine", function() { var controller = makeControllerWithMachine(); $scope.addMac(); expect($scope.machine.macs.length).toBe(2); }); }); describe("removeMac", function() { it("removes mac address object from machine", function() { var controller = makeControllerWithMachine(); $scope.addMac(); var mac = $scope.machine.macs[1]; $scope.removeMac(mac); expect($scope.machine.macs.length).toBe(1); }); it("ignores second remove if mac object removed again", function() { var controller = makeControllerWithMachine(); $scope.addMac(); var mac = $scope.machine.macs[1]; $scope.removeMac(mac); $scope.removeMac(mac); expect($scope.machine.macs.length).toBe(1); }); }); describe("invalidName", function() { it("return false if machine name empty", function() { var controller = makeControllerWithMachine(); expect($scope.invalidName($scope.machine)).toBe(false); }); it("return false if machine name valid", function() { var controller = makeControllerWithMachine(); $scope.machine.name = "abc"; expect($scope.invalidName($scope.machine)).toBe(false); }); it("return true if machine name invalid", function() { var controller = makeControllerWithMachine(); $scope.machine.name = "ab_c.local"; expect($scope.invalidName($scope.machine)).toBe(true); }); }); describe("validateMac", function() { it("sets error to false if blank", function() { var controller = makeController(); var mac = { mac: '', error: true }; $scope.validateMac(mac); expect(mac.error).toBe(false); }); it("sets error to true if invalid", function() { var controller = makeController(); var mac = { mac: '00:11:22', error: false }; $scope.validateMac(mac); expect(mac.error).toBe(true); }); it("sets error to false if valid", function() { var controller = makeController(); var mac = { mac: '00:11:22:33:44:55', error: true }; $scope.validateMac(mac); expect(mac.error).toBe(false); }); }); describe("machineHasError", function() { it("returns true if machine is null", function() { var controller = makeControllerWithMachine(); $scope.machine = null; expect($scope.machineHasError()).toBe(true); }); it("returns true if cluster is null", function() { var controller = makeControllerWithMachine(); $scope.machine.cluster = null; $scope.machine.zone = {}; $scope.machine.architecture = makeName("arch"); $scope.machine.power.type = {}; $scope.machine.macs[0].mac = '00:11:22:33:44:55'; $scope.machine.macs[0].error = false; expect($scope.machineHasError()).toBe(true); }); it("returns true if zone is null", function() { var controller = makeControllerWithMachine(); $scope.machine.cluster = {}; $scope.machine.zone = null; $scope.machine.architecture = makeName("arch"); $scope.machine.power.type = {}; $scope.machine.macs[0].mac = '00:11:22:33:44:55'; $scope.machine.macs[0].error = false; expect($scope.machineHasError()).toBe(true); }); it("returns true if architecture is empty", function() { var controller = makeControllerWithMachine(); $scope.machine.cluster = {}; $scope.machine.zone = {}; $scope.machine.architecture = ''; $scope.machine.power.type = {}; $scope.machine.macs[0].mac = '00:11:22:33:44:55'; $scope.machine.macs[0].error = false; expect($scope.machineHasError()).toBe(true); }); it("returns true if power.type is null", function() { var controller = makeControllerWithMachine(); $scope.machine.cluster = {}; $scope.machine.zone = {}; $scope.machine.architecture = makeName("arch"); $scope.machine.power.type = null; $scope.machine.macs[0].mac = '00:11:22:33:44:55'; $scope.machine.macs[0].error = false; expect($scope.machineHasError()).toBe(true); }); it("returns true if machine.name invalid", function() { var controller = makeControllerWithMachine(); $scope.machine.cluster = {}; $scope.machine.zone = {}; $scope.machine.architecture = makeName("arch"); $scope.machine.name = "ab_c.local"; $scope.machine.power.type = {}; $scope.machine.macs[0].mac = '00:11:22:33:44:55'; $scope.machine.macs[0].error = false; expect($scope.machineHasError()).toBe(true); }); it("returns true if mac[0] is empty", function() { var controller = makeControllerWithMachine(); $scope.machine.cluster = {}; $scope.machine.zone = {}; $scope.machine.architecture = makeName("arch"); $scope.machine.power.type = {}; $scope.machine.macs[0].mac = ''; $scope.machine.macs[0].error = false; expect($scope.machineHasError()).toBe(true); }); it("returns true if mac[0] is in error", function() { var controller = makeControllerWithMachine(); $scope.machine.cluster = {}; $scope.machine.zone = {}; $scope.machine.architecture = makeName("arch"); $scope.machine.power.type = {}; $scope.machine.macs[0].mac = '00:11:22:33:44'; $scope.machine.macs[0].error = true; expect($scope.machineHasError()).toBe(true); }); it("returns true if mac[1] is in error", function() { var controller = makeControllerWithMachine(); $scope.machine.cluster = {}; $scope.machine.zone = {}; $scope.machine.architecture = makeName("arch"); $scope.machine.power.type = {}; $scope.machine.macs[0].mac = '00:11:22:33:44:55'; $scope.machine.macs[0].error = false; $scope.machine.macs.push({ mac: '00:11:22:33:55', error: true }); expect($scope.machineHasError()).toBe(true); }); it("returns false if all is correct", function() { var controller = makeControllerWithMachine(); $scope.machine.cluster = {}; $scope.machine.zone = {}; $scope.machine.architecture = makeName("arch"); $scope.machine.power.type = {}; $scope.machine.macs[0].mac = '00:11:22:33:44:55'; $scope.machine.macs[0].error = false; expect($scope.machineHasError()).toBe(false); }); it("returns false if all is correct and mac[1] is blank", function() { var controller = makeControllerWithMachine(); $scope.machine.cluster = {}; $scope.machine.zone = {}; $scope.machine.architecture = makeName("arch"); $scope.machine.power.type = {}; $scope.machine.macs[0].mac = '00:11:22:33:44:55'; $scope.machine.macs[0].error = false; $scope.machine.macs.push({ mac: '', error: false }); expect($scope.machineHasError()).toBe(false); }); }); describe("chassisHasErrors", function() { it("returns true if chassis is null", function() { var controller = makeController(); $scope.chassis = null; expect($scope.chassisHasErrors()).toBe(true); }); it("returns true if cluster is null", function() { var controller = makeController(); $scope.chassis = { cluster: null, power: { type: {}, parameters: {} } }; expect($scope.chassisHasErrors()).toBe(true); }); it("returns true if power.type is null", function() { var controller = makeController(); $scope.chassis = { cluster: {}, power: { type: null, parameters: {} } }; expect($scope.chassisHasErrors()).toBe(true); }); it("returns true if power.parameters is invalid", function() { var controller = makeController(); $scope.chassis = { cluster: {}, power: { type: { fields: [ { name: "test", required: true } ] }, parameters: { test: "" } } }; expect($scope.chassisHasErrors()).toBe(true); }); it("returns false if all valid", function() { var controller = makeController(); $scope.chassis = { cluster: {}, power: { type: { fields: [ { name: "test", required: true } ] }, parameters: { test: "data" } } }; expect($scope.chassisHasErrors()).toBe(false); }); }); describe("cancel", function() { it("clears error", function() { var controller = makeControllerWithMachine(); $scope.error = makeName("error"); $scope.cancel(); expect($scope.error).toBeNull(); }); it("clears machine and adds a new one", function() { var controller = makeControllerWithMachine(); $scope.machine.name = makeName("name"); $scope.cancel(); expect($scope.machine.name).toBe(""); }); it("clears chassis and adds a new one", function() { var controller = makeControllerWithMachine(); $scope.chassis.power.type = makeName("type"); $scope.cancel(); expect($scope.chassis.power.type).toBeNull(); }); it("calls hide", function() { var controller = makeControllerWithMachine(); spyOn($scope, "hide"); $scope.cancel(); expect($scope.hide).toHaveBeenCalled(); }); }); describe("saveMachine", function() { // Setup a valid machine before each test. beforeEach(function() { var controller = makeControllerWithMachine(); $scope.addMac(); $scope.machine.name = makeName("name").replace("_", ""); $scope.machine.cluster = { id: 1, uuid: makeName("uuid"), cluster_name: makeName("cluster_name") }; $scope.machine.zone = { id: 1, name: makeName("zone") }; $scope.machine.architecture = makeName("arch"); $scope.machine.power.type = { name: "ether_wake" }; $scope.machine.power.parameters = { mac_address: "00:11:22:33:44:55" }; $scope.machine.macs[0].mac = '00:11:22:33:44:55'; $scope.machine.macs[0].error = false; $scope.machine.macs[1].mac = '00:11:22:33:44:66'; $scope.machine.macs[1].error = false; }); it("does nothing if errors", function() { var error = makeName("error"); $scope.error = error; // Force the machine to be invalid. spyOn($scope, "machineHasError").and.returnValue(true); $scope.saveMachine(false); expect($scope.error).toBe(error); }); it("clears error", function() { $scope.error = makeName("error"); $scope.saveMachine(false); expect($scope.error).toBeNull(); }); it("calls NodesManager.create with converted machine", function() { spyOn(NodesManager, "create").and.returnValue($q.defer().promise); $scope.saveMachine(false); expect(NodesManager.create).toHaveBeenCalledWith({ hostname: $scope.machine.name, architecture: $scope.machine.architecture, min_hwe_kernel: $scope.machine.min_hwe_kernel, pxe_mac: $scope.machine.macs[0].mac, extra_macs: [$scope.machine.macs[1].mac], power_type: $scope.machine.power.type.name, power_parameters: $scope.machine.power.parameters, zone: { id: $scope.machine.zone.id, name: $scope.machine.zone.name }, nodegroup: { id: $scope.machine.cluster.id, uuid: $scope.machine.cluster.uuid, cluster_name: $scope.machine.cluster.cluster_name } }); }); it("calls hide once NodesManager.create is resolved", function() { var defer = $q.defer(); spyOn(NodesManager, "create").and.returnValue(defer.promise); spyOn($scope, "hide"); $scope.saveMachine(false); defer.resolve(); $rootScope.$digest(); expect($scope.hide).toHaveBeenCalled(); }); it("resets machine once NodesManager.create is resolved", function() { var defer = $q.defer(); spyOn(NodesManager, "create").and.returnValue(defer.promise); $scope.saveMachine(false); defer.resolve(); $rootScope.$digest(); expect($scope.machine.name).toBe(""); }); it("clones machine once NodesManager.create is resolved", function() { var defer = $q.defer(); spyOn(NodesManager, "create").and.returnValue(defer.promise); var cluster_name = $scope.machine.cluster.cluster_name; $scope.saveMachine(true); defer.resolve(); $rootScope.$digest(); expect($scope.machine.name).toBe(""); expect($scope.machine.cluster.cluster_name).toBe(cluster_name); }); it("deosnt call hide if addAnother is true", function() { var defer = $q.defer(); spyOn(NodesManager, "create").and.returnValue(defer.promise); spyOn($scope, "hide"); $scope.saveMachine(true); defer.resolve(); $rootScope.$digest(); expect($scope.hide).not.toHaveBeenCalled(); }); it("sets error when NodesManager.create is rejected", function() { var defer = $q.defer(); spyOn(NodesManager, "create").and.returnValue(defer.promise); spyOn($scope, "hide"); var error = makeName("error"); $scope.saveMachine(false); defer.reject(error); $rootScope.$digest(); expect($scope.error).toBe(error); expect($scope.hide).not.toHaveBeenCalled(); }); }); describe("saveChassis", function() { // Setup a valid chassis before each test. var httpDefer; beforeEach(function() { httpDefer = $q.defer(); // Mock $http. $http = jasmine.createSpy("$http"); $http.and.returnValue(httpDefer.promise); // Create the controller and the valid chassis. var controller = makeController(); $scope.chassis = { cluster: { uuid: makeName("uuid") }, power: { type: { name: makeName("model"), fields: [ { name: "one", required: true }, { name: "one", required: true } ] }, parameters: { "one": makeName("one"), "two": makeName("two") } } }; }); it("does nothing if errors", function() { var error = makeName("error"); $scope.error = error; spyOn($scope, "chassisHasErrors").and.returnValue(true); $scope.saveChassis(false); expect($scope.error).toBe(error); }); it("calls $http with correct parameters", function() { $cookies.csrftoken = makeName("csrf"); $scope.saveChassis(false); var parameters = $scope.chassis.power.parameters; parameters.model = $scope.chassis.power.type.name; expect($http).toHaveBeenCalledWith({ method: 'POST', url: 'api/1.0/nodegroups/' + $scope.chassis.cluster.uuid + '/?op=probe_and_enlist_hardware', data: $.param(parameters), headers: { 'Content-Type': 'application/x-www-form-urlencoded', 'X-CSRFTOKEN': $cookies.csrftoken } }); }); it("creates new chassis when $http resolves", function() { $scope.saveChassis(false); httpDefer.resolve(); $rootScope.$digest(); expect($scope.chassis.power.type).toBeNull(); }); it("calls hide if addAnother false when $http resolves", function() { spyOn($scope, "hide"); $scope.saveChassis(false); httpDefer.resolve(); $rootScope.$digest(); expect($scope.hide).toHaveBeenCalled(); }); it("doesnt call hide if addAnother true when $http resolves", function() { spyOn($scope, "hide"); $scope.saveChassis(true); httpDefer.resolve(); $rootScope.$digest(); expect($scope.hide).not.toHaveBeenCalled(); }); it("sets error when $http rejects", function() { $scope.saveChassis(false); var error = makeName("error"); httpDefer.reject(error); $rootScope.$digest(); expect($scope.error).toBe(error); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/controllers/tests/test_node_details.js0000644000000000000000000023111113056115004030704 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for NodeDetailsController. */ // Make a fake user. var userId = 0; function makeUser() { return { id: userId++, username: makeName("username"), first_name: makeName("first_name"), last_name: makeName("last_name"), email: makeName("email"), is_superuser: false, sshkeys_count: 0 }; } describe("NodeDetailsController", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Grab the needed angular pieces. var $controller, $rootScope, $location, $scope, $q; beforeEach(inject(function($injector) { $controller = $injector.get("$controller"); $rootScope = $injector.get("$rootScope"); $location = $injector.get("$location"); $scope = $rootScope.$new(); $q = $injector.get("$q"); })); // Load the required dependencies for the NodeDetails controller and // mock the websocket connection. var NodesManager, DevicesManager, GeneralManager, UsersManager, TagsManager; var RegionConnection, ManagerHelperService, ErrorService, webSocket; beforeEach(inject(function($injector) { NodesManager = $injector.get("NodesManager"); ClustersManager = $injector.get("ClustersManager"); ZonesManager = $injector.get("ZonesManager"); GeneralManager = $injector.get("GeneralManager"); UsersManager = $injector.get("UsersManager"); TagsManager = $injector.get("TagsManager"); RegionConnection = $injector.get("RegionConnection"); ManagerHelperService = $injector.get("ManagerHelperService"); ErrorService = $injector.get("ErrorService"); // Mock buildSocket so an actual connection is not made. webSocket = new MockWebSocket(); spyOn(RegionConnection, "buildSocket").and.returnValue(webSocket); })); // Make a fake cluster. function makeCluster() { var cluster = { id: makeInteger(0, 10000), name: makeName("cluster"), uuid: makeName("uuid"), power_types: [], connected: true }; ClustersManager._items.push(cluster); return cluster; } // Make a fake zone. function makeZone() { var zone = { id: makeInteger(0, 10000), name: makeName("zone") }; ZonesManager._items.push(zone); return zone; } // Make a fake node. function makeNode() { var cluster = makeCluster(); var zone = makeZone(); var node = { system_id: makeName("system_id"), hostname: makeName("hostname"), fqdn: makeName("fqdn"), actions: [], architecture: "amd64/generic", nodegroup: angular.copy(cluster), zone: angular.copy(zone), power_type: "", power_parameters: null, summary_xml: null, summary_yaml: null, commissioning_results: [], installation_results: [], events: [], interfaces: [], extra_macs: [] }; NodesManager._items.push(node); return node; } // Make a fake event. function makeEvent() { return { type: { description: makeName("type") }, description: makeName("description") }; } // Create the node that will be used and set the routeParams. var node, $routeParams; beforeEach(function() { node = makeNode(); $routeParams = { system_id: node.system_id }; }); // Makes the NodeDetailsController function makeController(loadManagersDefer) { var loadManagers = spyOn(ManagerHelperService, "loadManagers"); if(angular.isObject(loadManagersDefer)) { loadManagers.and.returnValue(loadManagersDefer.promise); } else { loadManagers.and.returnValue($q.defer().promise); } // Start the connection so a valid websocket is created in the // RegionConnection. RegionConnection.connect(""); // Set the authenticated user, and by default make them superuser. UsersManager._authUser = { is_superuser: true }; // Create the controller. var controller = $controller("NodeDetailsController", { $scope: $scope, $rootScope: $rootScope, $routeParams: $routeParams, $location: $location, NodesManager: NodesManager, ClustersManager: ClustersManager, ZonesManager: ZonesManager, GeneralManager: GeneralManager, UsersManager: UsersManager, TagsManager: TagsManager, ManagerHelperService: ManagerHelperService, ErrorService: ErrorService }); // Since the osSelection directive is not used in this test the // osSelection item on the model needs to have $reset function added // because it will be called throughout many of the tests. $scope.osSelection.$reset = jasmine.createSpy("$reset"); return controller; } // Make the controller and resolve the setActiveItem call. function makeControllerResolveSetActiveItem() { var setActiveDefer = $q.defer(); spyOn(NodesManager, "setActiveItem").and.returnValue( setActiveDefer.promise); var defer = $q.defer(); var controller = makeController(defer); defer.resolve(); $rootScope.$digest(); setActiveDefer.resolve(node); $rootScope.$digest(); return controller; } it("sets title to loading and page to nodes", function() { var controller = makeController(); expect($rootScope.title).toBe("Loading..."); expect($rootScope.page).toBe("nodes"); }); it("sets the initial $scope values", function() { var controller = makeController(); expect($scope.loaded).toBe(false); expect($scope.node).toBeNull(); expect($scope.actionOption).toBeNull(); expect($scope.allActionOptions).toBe( GeneralManager.getData("node_actions")); expect($scope.availableActionOptions).toEqual([]); expect($scope.actionError).toBeNull(); expect($scope.osinfo).toBe(GeneralManager.getData("osinfo")); expect($scope.osSelection.osystem).toBeNull(); expect($scope.osSelection.release).toBeNull(); expect($scope.commissionOptions).toEqual({ enableSSH: false, skipNetworking: false, skipStorage: false }); expect($scope.checkingPower).toBe(false); expect($scope.devices).toEqual([]); }); it("sets initial values for summary section", function() { var controller = makeController(); expect($scope.summary).toEqual({ editing: false, cluster: { selected: null, options: ClustersManager.getItems() }, architecture: { selected: null, options: GeneralManager.getData("architectures") }, min_hwe_kernel: { selected: null, options: GeneralManager.getData("hwe_kernels") }, zone: { selected: null, options: ZonesManager.getItems() }, tags: [] }); expect($scope.summary.cluster.options).toBe( ClustersManager.getItems()); expect($scope.summary.architecture.options).toBe( GeneralManager.getData("architectures")); expect($scope.summary.min_hwe_kernel.options).toBe( GeneralManager.getData("hwe_kernels")); expect($scope.summary.zone.options).toBe( ZonesManager.getItems()); }); it("sets initial values for power section", function() { var controller = makeController(); expect($scope.power).toEqual({ editing: false, type: null, parameters: {} }); }); it("sets initial values for events section", function() { var controller = makeController(); expect($scope.events).toEqual({ limit: 10 }); }); it("sets initial values for machine output section", function() { var controller = makeController(); expect($scope.machine_output).toEqual({ viewable: false, selectedView: null, views: [], showSummaryToggle: true, summaryType: 'yaml' }); }); it("calls loadManagers with all needed managers", function() { var controller = makeController(); expect(ManagerHelperService.loadManagers).toHaveBeenCalledWith([ NodesManager, ClustersManager, ZonesManager, GeneralManager, UsersManager, TagsManager]); }); it("doesnt call setActiveItem if node is loaded", function() { spyOn(NodesManager, "setActiveItem").and.returnValue( $q.defer().promise); var defer = $q.defer(); var controller = makeController(defer); NodesManager._activeItem = node; defer.resolve(); $rootScope.$digest(); expect($scope.node).toBe(node); expect($scope.loaded).toBe(true); expect(NodesManager.setActiveItem).not.toHaveBeenCalled(); }); it("calls setActiveItem if node is not active", function() { spyOn(NodesManager, "setActiveItem").and.returnValue( $q.defer().promise); var defer = $q.defer(); var controller = makeController(defer); defer.resolve(); $rootScope.$digest(); expect(NodesManager.setActiveItem).toHaveBeenCalledWith( node.system_id); }); it("sets node and loaded once setActiveItem resolves", function() { var controller = makeControllerResolveSetActiveItem(); expect($scope.node).toBe(node); expect($scope.loaded).toBe(true); }); it("title is updated once setActiveItem resolves", function() { var controller = makeControllerResolveSetActiveItem(); expect($rootScope.title).toBe(node.fqdn); }); it("invalid_arch error visible if node architecture empty", function() { node.architecture = ""; var controller = makeControllerResolveSetActiveItem(); expect($scope.errors.invalid_arch.viewable).toBe(true); }); it("invalid_arch error visible if node architecture not present", function() { GeneralManager._data.architectures.data = [makeName("arch")]; var controller = makeControllerResolveSetActiveItem(); expect($scope.errors.invalid_arch.viewable).toBe(true); }); it("invalid_arch error not visible if node architecture present", function() { GeneralManager._data.architectures.data = [node.architecture]; var controller = makeControllerResolveSetActiveItem(); expect($scope.errors.invalid_arch.viewable).toBe(false); }); it("summary section placed in edit mode if architecture blank", function() { node.architecture = ""; var controller = makeControllerResolveSetActiveItem(); expect($scope.summary.editing).toBe(true); }); it("summary section not placed in edit mode if architecture present", function() { GeneralManager._data.architectures.data = [node.architecture]; var controller = makeControllerResolveSetActiveItem(); expect($scope.summary.editing).toBe(false); }); it("skips cluster_disconnected error if the nodegroup on node is invalid", function() { var cluster = ClustersManager.getItemFromList(node.nodegroup.id); cluster.connected = false; node.nodegroup = undefined; var controller = makeControllerResolveSetActiveItem(); expect($scope.errors.cluster_disconnected.viewable).toBe(false); }); it("cluster_disconnected error visible if cluster disconnected", function() { var cluster = ClustersManager.getItemFromList(node.nodegroup.id); cluster.connected = false; var controller = makeControllerResolveSetActiveItem(); expect($scope.errors.cluster_disconnected.viewable).toBe(true); }); it("cluster_disconnected error not visible if cluster connected", function() { var cluster = ClustersManager.getItemFromList(node.nodegroup.id); cluster.connected = true; var controller = makeControllerResolveSetActiveItem(); expect($scope.errors.cluster_disconnected.viewable).toBe(false); }); it("power section is disabled when the cluster disconnects", function() { var cluster = ClustersManager.getItemFromList(node.nodegroup.id); cluster.connected = false; var controller = makeControllerResolveSetActiveItem(); expect($scope.power.editing).toBe(false); }); it("power section is editable when the cluster connects", function() { var cluster = ClustersManager.getItemFromList(node.nodegroup.id); cluster.connected = true; var controller = makeControllerResolveSetActiveItem(); expect($scope.power.editing).toBe(true); }); it("power section editability transitions according to cluster connection", function() { var cluster = ClustersManager.getItemFromList(node.nodegroup.id); cluster.connected = true; var controller = makeControllerResolveSetActiveItem(); // Should begin as true, ... expect($scope.power.editing).toBe(true); // turn false when the cluster disconnects... cluster.connected = false; $rootScope.$digest(); expect($scope.power.editing).toBe(false); // ...and back on again, when it reconnects. cluster.connected = true; $rootScope.$digest(); expect($scope.power.editing).toBe(true); }); it("summary section is updated once setActiveItem resolves", function() { var controller = makeControllerResolveSetActiveItem(); expect($scope.summary.cluster.selected).toBe( ClustersManager.getItemFromList(node.nodegroup.id)); expect($scope.summary.zone.selected).toBe( ZonesManager.getItemFromList(node.zone.id)); expect($scope.summary.architecture.selected).toBe(node.architecture); expect($scope.summary.tags).toEqual(node.tags); }); it("missing_power error visible if node power_type empty", function() { var controller = makeControllerResolveSetActiveItem(); expect($scope.errors.missing_power.viewable).toBe(true); }); it("missing_power error not visible if node power_type empty", function() { node.power_type = makeName("power"); var controller = makeControllerResolveSetActiveItem(); expect($scope.errors.missing_power.viewable).toBe(false); }); it("power section is updated once setActiveItem resolves", function() { var power_types = [ { name: makeName("power") }, { name: makeName("power") }, { name: makeName("power") } ]; var cluster = ClustersManager.getItemFromList(node.nodegroup.id); cluster.power_types = power_types; node.power_type = power_types[0].name; node.power_parameters = { data: makeName("data") }; var controller = makeControllerResolveSetActiveItem(); expect($scope.power.types).toBe(power_types); expect($scope.power.type).toBe(power_types[0]); expect($scope.power.parameters).toEqual(node.power_parameters); expect($scope.power.parameters).not.toBe(node.power_parameters); }); it("power section placed in edit mode if power_type blank", function() { var controller = makeControllerResolveSetActiveItem(); expect($scope.power.editing).toBe(true); }); it("power section not placed in edit mode if power_type", function() { node.power_type = makeName("power"); var controller = makeControllerResolveSetActiveItem(); expect($scope.power.editing).toBe(false); }); it("machine output not visible if all required data missing", function() { var controller = makeControllerResolveSetActiveItem(); expect($scope.machine_output.viewable).toBe(false); }); it("machine output visible if summary_xml and summary_yaml", function() { node.summary_xml = node.summary_yaml = "summary"; var controller = makeControllerResolveSetActiveItem(); expect($scope.machine_output.viewable).toBe(true); }); it("machine output visible if commissioning_results", function() { node.commissioning_results.push({}); var controller = makeControllerResolveSetActiveItem(); expect($scope.machine_output.viewable).toBe(true); }); it("machine output not visible if commissioning_results not an array", function() { node.commissioning_results = undefined; var controller = makeControllerResolveSetActiveItem(); expect($scope.machine_output.viewable).toBe(false); }); it("machine output visible if installation_results", function() { node.installation_results.push({}); var controller = makeControllerResolveSetActiveItem(); expect($scope.machine_output.viewable).toBe(true); }); it("machine output not visible if installation_results not an array", function() { node.installation_results = undefined; var controller = makeControllerResolveSetActiveItem(); expect($scope.machine_output.viewable).toBe(false); }); it("machine output summary view available if summary_xml and summary_yaml", function() { node.summary_xml = node.summary_yaml = "summary"; var controller = makeControllerResolveSetActiveItem(); expect($scope.machine_output.views).toEqual([{ name: "summary", title: "Commissioning Summary" }]); }); it("machine output output view available if commissioning_results", function() { node.commissioning_results.push({}); var controller = makeControllerResolveSetActiveItem(); expect($scope.machine_output.views).toEqual([{ name: "output", title: "Commissioning Output" }]); }); it("machine output install view available if installation_results", function() { node.installation_results.push({}); var controller = makeControllerResolveSetActiveItem(); expect($scope.machine_output.views).toEqual([{ name: "install", title: "Installation Output" }]); }); it("machine output first available view is set as selectedView", function() { node.commissioning_results.push({}); var controller = makeControllerResolveSetActiveItem(); expect($scope.machine_output.selectedView).toEqual({ name: "output", title: "Commissioning Output" }); }); it("machine output previous selected view is still selected", function() { node.commissioning_results.push({}); var controller = makeControllerResolveSetActiveItem(); // Add summary output and make updateMachineOutput be called // again, but forcing a digest cycle. node.summary_xml = node.summary_yaml = "summary"; $rootScope.$digest(); // Output view should still be selected as it was initially // selected. expect($scope.machine_output.selectedView).toEqual({ name: "output", title: "Commissioning Output" }); }); it("machine output install view is always selected first if possible", function() { node.commissioning_results.push({}); node.installation_results.push({}); var controller = makeControllerResolveSetActiveItem(); expect($scope.machine_output.selectedView).toEqual({ name: "install", title: "Installation Output" }); }); it("machine output summary toggle is viewable when summary view selected", function() { node.summary_xml = node.summary_yaml = "summary"; var controller = makeControllerResolveSetActiveItem(); expect($scope.machine_output.showSummaryToggle).toBe(true); }); it("machine output summary toggle is not viewable when not summary view", function() { node.commissioning_results.push({}); var controller = makeControllerResolveSetActiveItem(); expect($scope.machine_output.showSummaryToggle).toBe(false); }); it("starts watching once setActiveItem resolves", function() { var setActiveDefer = $q.defer(); spyOn(NodesManager, "setActiveItem").and.returnValue( setActiveDefer.promise); var defer = $q.defer(); var controller = makeController(defer); spyOn($scope, "$watch"); spyOn($scope, "$watchCollection"); defer.resolve(); $rootScope.$digest(); setActiveDefer.resolve(node); $rootScope.$digest(); var watches = []; var i, calls = $scope.$watch.calls.allArgs(); for(i = 0; i < calls.length; i++) { watches.push(calls[i][0]); } var watchCollections = []; calls = $scope.$watchCollection.calls.allArgs(); for(i = 0; i < calls.length; i++) { watchCollections.push(calls[i][0]); } expect(watches).toEqual([ "node.fqdn", "node.devices", "node.actions", "node.nodegroup.id", "node.architecture", "node.min_hwe_kernel", "node.zone.id", "node.power_type", "node.power_parameters", "summary.cluster.selected.connected", "node.summary_xml", "node.summary_yaml", "node.commissioning_results", "node.installation_results" ]); expect(watchCollections).toEqual([ $scope.summary.cluster.options, $scope.summary.architecture.options, $scope.summary.min_hwe_kernel.options, $scope.summary.zone.options ]); }); it("calls startPolling onces managers loaded", function() { spyOn(NodesManager, "setActiveItem").and.returnValue( $q.defer().promise); spyOn(GeneralManager, "startPolling"); var defer = $q.defer(); var controller = makeController(defer); defer.resolve(); $rootScope.$digest(); expect(GeneralManager.startPolling.calls.allArgs()).toEqual( [["architectures"], ["hwe_kernels"], ["osinfo"]]); }); it("calls stopPolling when the $scope is destroyed", function() { spyOn(GeneralManager, "stopPolling"); var controller = makeController(); $scope.$destroy(); expect(GeneralManager.stopPolling.calls.allArgs()).toEqual( [["architectures"], ["hwe_kernels"], ["osinfo"]]); }); it("updates $scope.devices", function() { var setActiveDefer = $q.defer(); spyOn(NodesManager, "setActiveItem").and.returnValue( setActiveDefer.promise); var defer = $q.defer(); var controller = makeController(defer); node.devices = [ { fqdn: "device1.maas", interfaces: [] }, { fqdn: "device2.maas", interfaces: [ { mac_address: "00:11:22:33:44:55", links: [] } ] }, { fqdn: "device3.maas", interfaces: [ { mac_address: "00:11:22:33:44:66", links: [] }, { mac_address: "00:11:22:33:44:77", links: [ { ip_address: "192.168.122.1" }, { ip_address: "192.168.122.2" }, { ip_address: "192.168.122.3" } ] } ] } ]; defer.resolve(); $rootScope.$digest(); setActiveDefer.resolve(node); $rootScope.$digest(); expect($scope.devices).toEqual([ { name: "device1.maas" }, { name: "device2.maas", mac_address: "00:11:22:33:44:55" }, { name: "device3.maas", mac_address: "00:11:22:33:44:66" }, { name: "", mac_address: "00:11:22:33:44:77", ip_address: "192.168.122.1" }, { name: "", mac_address: "", ip_address: "192.168.122.2" }, { name: "", mac_address: "", ip_address: "192.168.122.3" } ]); }); describe("tagsAutocomplete", function() { it("calls TagsManager.autocomplete with query", function() { var controller = makeController(); spyOn(TagsManager, "autocomplete"); var query = makeName("query"); $scope.tagsAutocomplete(query); expect(TagsManager.autocomplete).toHaveBeenCalledWith(query); }); }); describe("isSuperUser", function() { it("returns true if the user is a superuser", function() { var controller = makeController(); spyOn(UsersManager, "getAuthUser").and.returnValue( { is_superuser: true }); expect($scope.isSuperUser()).toBe(true); }); it("returns false if the user is not a superuser", function() { var controller = makeController(); spyOn(UsersManager, "getAuthUser").and.returnValue( { is_superuser: false }); expect($scope.isSuperUser()).toBe(false); }); }); describe("getPowerStateClass", function() { it("returns blank if no node", function() { var controller = makeController(); expect($scope.getPowerStateClass()).toBe(""); }); it("returns check if checkingPower is true", function() { var controller = makeController(); $scope.node = node; $scope.checkingPower = true; expect($scope.getPowerStateClass()).toBe("checking"); }); it("returns power_state from node ", function() { var controller = makeController(); var state = makeName("state"); $scope.node = node; node.power_state = state; expect($scope.getPowerStateClass()).toBe(state); }); }); describe("getPowerStateText", function() { it("returns blank if no node", function() { var controller = makeController(); expect($scope.getPowerStateText()).toBe(""); }); it("returns 'Checking' if checkingPower is true", function() { var controller = makeController(); $scope.node = node; $scope.checkingPower = true; node.power_state = "unknown"; expect($scope.getPowerStateText()).toBe("Checking power"); }); it("returns blank if power_state is unknown", function() { var controller = makeController(); $scope.node = node; node.power_state = "unknown"; expect($scope.getPowerStateText()).toBe(""); }); it("returns power_state prefixed with Power ", function() { var controller = makeController(); var state = makeName("state"); $scope.node = node; node.power_state = state; expect($scope.getPowerStateText()).toBe("Power " + state); }); }); describe("canCheckPowerState", function() { it("returns false if no node", function() { var controller = makeController(); expect($scope.canCheckPowerState()).toBe(false); }); it("returns false if power_state is unknown", function() { var controller = makeController(); $scope.node = node; node.power_state = "unknown"; expect($scope.canCheckPowerState()).toBe(false); }); it("returns false if checkingPower is true", function() { var controller = makeController(); $scope.node = node; $scope.checkingPower = true; expect($scope.canCheckPowerState()).toBe(false); }); it("returns true if not checkingPower and power_state not unknown", function() { var controller = makeController(); $scope.node = node; expect($scope.canCheckPowerState()).toBe(true); }); }); describe("checkPowerState", function() { it("sets checkingPower to true", function() { var controller = makeController(); spyOn(NodesManager, "checkPowerState").and.returnValue( $q.defer().promise); $scope.checkPowerState(); expect($scope.checkingPower).toBe(true); }); it("sets checkingPower to false once checkPowerState resolves", function() { var controller = makeController(); var defer = $q.defer(); spyOn(NodesManager, "checkPowerState").and.returnValue( defer.promise); $scope.checkPowerState(); defer.resolve(); $rootScope.$digest(); expect($scope.checkingPower).toBe(false); }); }); describe("getOSText", function() { it("returns blank if no node", function() { var controller = makeController(); expect($scope.getOSText()).toBe(""); }); it("returns osystem/series if no osinfo", function() { var controller = makeController(); var osystem = makeName("osystem"); var series = makeName("distro_series"); var os_series = osystem + "/" + series; $scope.node = node; node.osystem = osystem; node.distro_series = series; expect($scope.getOSText()).toBe(os_series); }); it("returns release title if osinfo", function() { var controller = makeController(); var osystem = makeName("osystem"); var series = makeName("distro_series"); var os_series = osystem + "/" + series; var title = makeName("title"); $scope.node = node; $scope.osinfo = { releases: [ [os_series, title] ] }; node.osystem = osystem; node.distro_series = series; expect($scope.getOSText()).toBe(title); }); it("returns osystem/series not in osinfo", function() { var controller = makeController(); var osystem = makeName("osystem"); var series = makeName("distro_series"); var os_series = osystem + "/" + series; $scope.node = node; $scope.osinfo = { releases: [ [makeName("release"), makeName("title")] ] }; node.osystem = osystem; node.distro_series = series; expect($scope.getOSText()).toBe(os_series); }); }); describe("isUbuntuOS", function() { it("returns true when ubuntu", function() { var controller = makeController(); $scope.node = node; node.osystem = 'ubuntu'; node.distro_series = makeName("distro_series"); expect($scope.isUbuntuOS()).toBe(true); }); it("returns false when otheros", function() { var controller = makeController(); $scope.node = node; node.osystem = makeName("osystem"); node.distro_series = makeName("distro_series"); expect($scope.isUbuntuOS()).toBe(false); }); }); describe("isActionError", function() { it("returns true if actionError", function() { var controller = makeController(); $scope.actionError = makeName("error"); expect($scope.isActionError()).toBe(true); }); it("returns false if not actionError", function() { var controller = makeController(); $scope.actionError = null; expect($scope.isActionError()).toBe(false); }); }); describe("isDeployError", function() { it("returns false if already actionError", function() { var controller = makeController(); $scope.actionError = makeName("error"); expect($scope.isDeployError()).toBe(false); }); it("returns true if deploy action and missing osinfo", function() { var controller = makeController(); $scope.actionOption = { name: "deploy" }; expect($scope.isDeployError()).toBe(true); }); it("returns true if deploy action and no osystems", function() { var controller = makeController(); $scope.actionOption = { name: "deploy" }; $scope.osinfo = { osystems: [] }; expect($scope.isDeployError()).toBe(true); }); it("returns false if actionOption null", function() { var controller = makeController(); expect($scope.isDeployError()).toBe(false); }); it("returns false if not deploy action", function() { var controller = makeController(); $scope.actionOption = { name: "release" }; expect($scope.isDeployError()).toBe(false); }); it("returns false if osystems present", function() { var controller = makeController(); $scope.actionOption = { name: "deploy" }; $scope.osinfo = { osystems: [makeName("os")] }; expect($scope.isDeployError()).toBe(false); }); }); describe("isSSHKeyError", function() { it("returns true if deploy action and missing ssh keys", function() { var controller = makeController(); $scope.actionOption = { name: "deploy" }; var firstUser = makeUser(); firstUser.sshkeys_count = 0; UsersManager._authUser = firstUser; expect($scope.isSSHKeyError()).toBe(true); }); it("returns false if actionOption null", function() { var controller = makeController(); var firstUser = makeUser(); firstUser.sshkeys_count = 1; UsersManager._authUser = firstUser; expect($scope.isSSHKeyError()).toBe(false); }); it("returns false if not deploy action", function() { var controller = makeController(); $scope.actionOption = { name: "release" }; var firstUser = makeUser(); firstUser.sshkeys_count = 1; UsersManager._authUser = firstUser; expect($scope.isSSHKeyError()).toBe(false); }); it("returns false if ssh keys present", function() { var controller = makeController(); $scope.actionOption = { name: "deploy" }; var firstUser = makeUser(); firstUser.sshkeys_count = 1; UsersManager._authUser = firstUser; expect($scope.isSSHKeyError()).toBe(false); }); }); describe("actionOptionChanged", function() { it("clears actionError", function() { var controller = makeController(); $scope.actionError = makeName("error"); $scope.actionOptionChanged(); expect($scope.actionError).toBeNull(); }); }); describe("actionCancel", function() { it("sets actionOption to null", function() { var controller = makeController(); $scope.actionOption = {}; $scope.actionCancel(); expect($scope.actionOption).toBeNull(); }); it("clears actionError", function() { var controller = makeController(); $scope.actionError = makeName("error"); $scope.actionCancel(); expect($scope.actionError).toBeNull(); }); }); describe("actionGo", function() { it("calls performAction with node and actionOption name", function() { var controller = makeController(); spyOn(NodesManager, "performAction").and.returnValue( $q.defer().promise); $scope.node = node; $scope.actionOption = { name: "release" }; $scope.actionGo(); expect(NodesManager.performAction).toHaveBeenCalledWith( node, "release", {}); }); it("calls performAction with osystem and distro_series", function() { var controller = makeController(); spyOn(NodesManager, "performAction").and.returnValue( $q.defer().promise); $scope.node = node; $scope.actionOption = { name: "deploy" }; $scope.osSelection.osystem = "ubuntu"; $scope.osSelection.release = "ubuntu/trusty"; $scope.actionGo(); expect(NodesManager.performAction).toHaveBeenCalledWith( node, "deploy", { osystem: "ubuntu", distro_series: "trusty" }); }); it("calls performAction with commissionOptions", function() { var controller = makeController(); spyOn(NodesManager, "performAction").and.returnValue( $q.defer().promise); $scope.node = node; $scope.actionOption = { name: "commission" }; $scope.commissionOptions.enableSSH = true; $scope.commissionOptions.skipNetworking = false; $scope.commissionOptions.skipStorage = false; $scope.actionGo(); expect(NodesManager.performAction).toHaveBeenCalledWith( node, "commission", { enable_ssh: true, skip_networking: false, skip_storage: false }); }); it("clears actionOption on resolve", function() { var controller = makeController(); var defer = $q.defer(); spyOn(NodesManager, "performAction").and.returnValue( defer.promise); $scope.node = node; $scope.actionOption = { name: "deploy" }; $scope.actionGo(); defer.resolve(); $rootScope.$digest(); expect($scope.actionOption).toBeNull(); }); it("clears osSelection on resolve", function() { var controller = makeController(); var defer = $q.defer(); spyOn(NodesManager, "performAction").and.returnValue( defer.promise); $scope.node = node; $scope.actionOption = { name: "deploy" }; $scope.osSelection.osystem = "ubuntu"; $scope.osSelection.release = "ubuntu/trusty"; $scope.actionGo(); defer.resolve(); $rootScope.$digest(); expect($scope.osSelection.$reset).toHaveBeenCalled(); }); it("clears commissionOptions on resolve", function() { var controller = makeController(); var defer = $q.defer(); spyOn(NodesManager, "performAction").and.returnValue( defer.promise); $scope.node = node; $scope.actionOption = { name: "commission" }; $scope.commissionOptions.enableSSH = true; $scope.commissionOptions.skipNetworking = true; $scope.commissionOptions.skipStorage = true; $scope.actionGo(); defer.resolve(); $rootScope.$digest(); expect($scope.commissionOptions).toEqual({ enableSSH: false, skipNetworking: false, skipStorage: false }); }); it("clears actionError on resolve", function() { var controller = makeController(); var defer = $q.defer(); spyOn(NodesManager, "performAction").and.returnValue( defer.promise); $scope.node = node; $scope.actionOption = { name: "deploy" }; $scope.actionError = makeName("error"); $scope.actionGo(); defer.resolve(); $rootScope.$digest(); expect($scope.actionError).toBeNull(); }); it("changes path to node listing on delete", function() { var controller = makeController(); var defer = $q.defer(); spyOn(NodesManager, "performAction").and.returnValue( defer.promise); spyOn($location, "path"); $scope.node = node; $scope.actionOption = { name: "delete" }; $scope.actionGo(); defer.resolve(); $rootScope.$digest(); expect($location.path).toHaveBeenCalledWith("/nodes"); }); it("sets actionError when rejected", function() { var controller = makeController(); var defer = $q.defer(); spyOn(NodesManager, "performAction").and.returnValue( defer.promise); $scope.node = node; $scope.actionOption = { name: "deploy" }; var error = makeName("error"); $scope.actionGo(); defer.reject(error); $rootScope.$digest(); expect($scope.actionError).toBe(error); }); }); describe("isSuperUser", function() { it("returns false if no authUser", function() { var controller = makeController(); UsersManager._authUser = null; expect($scope.isSuperUser()).toBe(false); }); it("returns false if authUser.is_superuser is false", function() { var controller = makeController(); UsersManager._authUser.is_superuser = false; expect($scope.isSuperUser()).toBe(false); }); it("returns true if authUser.is_superuser is true", function() { var controller = makeController(); UsersManager._authUser.is_superuser = true; expect($scope.isSuperUser()).toBe(true); }); }); describe("invalidArchitecture", function() { it("returns true if selected architecture empty", function() { var controller = makeController(); $scope.summary.architecture.selected = ""; expect($scope.invalidArchitecture()).toBe(true); }); it("returns true if selected architecture not in options", function() { var controller = makeController(); $scope.summary.architecture.options = [makeName("arch")]; $scope.summary.architecture.selected = makeName("arch"); expect($scope.invalidArchitecture()).toBe(true); }); it("returns false if selected architecture in options", function() { var controller = makeController(); var arch = makeName("arch"); $scope.summary.architecture.options = [arch]; $scope.summary.architecture.selected = arch; expect($scope.invalidArchitecture()).toBe(false); }); }); describe("canEdit", function() { it("returns false if not super user", function() { var controller = makeController(); spyOn($scope, "isSuperUser").and.returnValue(false); expect($scope.canEdit()).toBe(false); }); it("returns false if cluster_disconnected error viewable", function() { var controller = makeController(); $scope.errors.cluster_disconnected.viewable = true; expect($scope.canEdit()).toBe(false); }); it("returns true if super user and not cluster_disconnected error", function() { var controller = makeController(); $scope.errors.cluster_disconnected.viewable = false; expect($scope.canEdit()).toBe(true); }); }); describe("editName", function() { it("doesnt sets editing to true if cannot edit", function() { var controller = makeController(); spyOn($scope, "canEdit").and.returnValue(false); $scope.nameHeader.editing = false; $scope.editName(); expect($scope.nameHeader.editing).toBe(false); }); it("sets editing to true for nameHeader section", function() { var controller = makeController(); $scope.node = node; spyOn($scope, "canEdit").and.returnValue(true); $scope.nameHeader.editing = false; $scope.editName(); expect($scope.nameHeader.editing).toBe(true); }); it("sets nameHeader.value to node hostname", function() { var controller = makeController(); $scope.node = node; spyOn($scope, "canEdit").and.returnValue(true); $scope.editName(); expect($scope.nameHeader.value).toBe(node.hostname); }); it("doesnt reset nameHeader.value on multiple calls", function() { var controller = makeController(); $scope.node = node; spyOn($scope, "canEdit").and.returnValue(true); $scope.editName(); var updatedName = makeName("name"); $scope.nameHeader.value = updatedName; $scope.editName(); expect($scope.nameHeader.value).toBe(updatedName); }); }); describe("editNameInvalid", function() { it("returns false if not editing", function() { var controller = makeController(); $scope.nameHeader.editing = false; $scope.nameHeader.value = "abc_invalid.local"; expect($scope.editNameInvalid()).toBe(false); }); it("returns true for bad values", function() { var controller = makeController(); $scope.nameHeader.editing = true; var values = [ { input: "aB0-z", output: false }, { input: "abc_alpha", output: true }, { input: "ab^&c", output: true }, { input: "abc.local", output: true } ]; angular.forEach(values, function(value) { $scope.nameHeader.value = value.input; expect($scope.editNameInvalid()).toBe(value.output); }); }); }); describe("cancelEditName", function() { it("sets editing to false for nameHeader section", function() { var controller = makeController(); $scope.node = node; $scope.nameHeader.editing = true; $scope.cancelEditName(); expect($scope.nameHeader.editing).toBe(false); }); it("sets nameHeader.value back to fqdn", function() { var controller = makeController(); $scope.node = node; $scope.nameHeader.editing = true; $scope.nameHeader.value = makeName("name"); $scope.cancelEditName(); expect($scope.nameHeader.value).toBe(node.fqdn); }); }); describe("saveEditName", function() { it("does nothing if value is invalid", function() { var controller = makeController(); $scope.node = node; spyOn($scope, "editNameInvalid").and.returnValue(true); var sentinel = {}; $scope.nameHeader.editing = sentinel; $scope.saveEditName(); expect($scope.nameHeader.editing).toBe(sentinel); }); it("sets editing to false", function() { var controller = makeController(); spyOn(NodesManager, "updateItem").and.returnValue( $q.defer().promise); spyOn($scope, "editNameInvalid").and.returnValue(false); $scope.node = node; $scope.nameHeader.editing = true; $scope.nameHeader.value = makeName("name"); $scope.saveEditName(); expect($scope.nameHeader.editing).toBe(false); }); it("calls updateItem with copy of node", function() { var controller = makeController(); spyOn(NodesManager, "updateItem").and.returnValue( $q.defer().promise); spyOn($scope, "editNameInvalid").and.returnValue(false); $scope.node = node; $scope.nameHeader.editing = true; $scope.nameHeader.value = makeName("name"); $scope.saveEditName(); var calledWithNode = NodesManager.updateItem.calls.argsFor(0)[0]; expect(calledWithNode).not.toBe(node); }); it("calls updateItem with new hostname on node", function() { var controller = makeController(); spyOn(NodesManager, "updateItem").and.returnValue( $q.defer().promise); spyOn($scope, "editNameInvalid").and.returnValue(false); var newName = makeName("name"); $scope.node = node; $scope.nameHeader.editing = true; $scope.nameHeader.value = newName; $scope.saveEditName(); var calledWithNode = NodesManager.updateItem.calls.argsFor(0)[0]; expect(calledWithNode.hostname).toBe(newName); }); it("calls updateName once updateItem resolves", function() { var controller = makeController(); var defer = $q.defer(); spyOn(NodesManager, "updateItem").and.returnValue( defer.promise); spyOn($scope, "editNameInvalid").and.returnValue(false); $scope.node = node; $scope.nameHeader.editing = true; $scope.nameHeader.value = makeName("name"); $scope.saveEditName(); defer.resolve(node); $rootScope.$digest(); // Since updateName is private in the controller, check // that the nameHeader.value is set to the nodes fqdn. expect($scope.nameHeader.value).toBe(node.fqdn); }); }); describe("editSummary", function() { it("doesnt sets editing to true if cannot edit", function() { var controller = makeController(); spyOn($scope, "canEdit").and.returnValue(false); $scope.summary.editing = false; $scope.editSummary(); expect($scope.summary.editing).toBe(false); }); it("sets editing to true for summary section", function() { var controller = makeController(); spyOn($scope, "canEdit").and.returnValue(true); $scope.summary.editing = false; $scope.editSummary(); expect($scope.summary.editing).toBe(true); }); }); describe("cancelEditSummary", function() { it("sets editing to false for summary section", function() { var controller = makeController(); $scope.node = node; $scope.summary.architecture.options = [node.architecture]; $scope.summary.editing = true; $scope.cancelEditSummary(); expect($scope.summary.editing).toBe(false); }); it("doesnt set editing to false if invalid architecture", function() { var controller = makeController(); $scope.node = node; $scope.summary.editing = true; $scope.cancelEditSummary(); expect($scope.summary.editing).toBe(true); }); it("calls updateSummary", function() { var controller = makeController(); $scope.node = node; $scope.summary.architecture.options = [node.architecture]; $scope.summary.editing = true; $scope.cancelEditSummary(); // Since updateSummary is private in the controller, check // that the selected cluster is set, this will prove that // the method was called. expect($scope.summary.cluster.selected).toBe( ClustersManager.getItemFromList(node.nodegroup.id)); }); }); describe("saveEditSummary", function() { // Configures the summary area in the scope to have a new cluster, // zone, and architecture. function configureSummary() { $scope.summary.editing = true; $scope.summary.cluster.selected = makeCluster(); $scope.summary.zone.selected = makeZone(); $scope.summary.architecture.selected = makeName("architecture"); $scope.summary.tags = [ { text: makeName("tag") }, { text: makeName("tag") } ]; } it("does nothing if invalidArchitecture", function() { var controller = makeController(); spyOn($scope, "invalidArchitecture").and.returnValue(true); $scope.node = node; var editing = {}; $scope.summary.editing = editing; $scope.saveEditSummary(); // Editing remains the same then the method exited early. expect($scope.summary.editing).toBe(editing); }); it("sets editing to false", function() { var controller = makeController(); spyOn($scope, "invalidArchitecture").and.returnValue(false); spyOn(NodesManager, "updateItem").and.returnValue( $q.defer().promise); $scope.node = node; $scope.summary.editing = true; $scope.saveEditSummary(); expect($scope.summary.editing).toBe(false); }); it("calls updateItem with copy of node", function() { var controller = makeController(); spyOn($scope, "invalidArchitecture").and.returnValue(false); spyOn(NodesManager, "updateItem").and.returnValue( $q.defer().promise); $scope.node = node; $scope.summary.editing = true; $scope.saveEditSummary(); var calledWithNode = NodesManager.updateItem.calls.argsFor(0)[0]; expect(calledWithNode).not.toBe(node); }); it("calls updateItem with new copied values on node", function() { var controller = makeController(); spyOn($scope, "invalidArchitecture").and.returnValue(false); spyOn(NodesManager, "updateItem").and.returnValue( $q.defer().promise); $scope.node = node; configureSummary(); var newCluster = $scope.summary.cluster.selected; var newZone = $scope.summary.zone.selected; var newArchitecture = $scope.summary.architecture.selected; var newTags = []; angular.forEach($scope.summary.tags, function(tag) { newTags.push(tag.text); }); $scope.saveEditSummary(); var calledWithNode = NodesManager.updateItem.calls.argsFor(0)[0]; expect(calledWithNode.nodegroup).toEqual(newCluster); expect(calledWithNode.nodegroup).not.toBe(newCluster); expect(calledWithNode.zone).toEqual(newZone); expect(calledWithNode.zone).not.toBe(newZone); expect(calledWithNode.architecture).toBe(newArchitecture); expect(calledWithNode.tags).toEqual(newTags); }); it("calls updateSummary once updateItem resolves", function() { var controller = makeController(); spyOn($scope, "invalidArchitecture").and.returnValue(false); var defer = $q.defer(); spyOn(NodesManager, "updateItem").and.returnValue( defer.promise); $scope.node = node; configureSummary(); $scope.saveEditSummary(); defer.resolve(node); $rootScope.$digest(); // Since updateSummary is private in the controller, check // that the selected cluster is set, this will prove that // the method was called. expect($scope.summary.cluster.selected).toBe( ClustersManager.getItemFromList(node.nodegroup.id)); }); it("sets cluster connected once updateItem resolves", function() { var controller = makeController(); spyOn($scope, "invalidArchitecture").and.returnValue(false); var defer = $q.defer(); spyOn(NodesManager, "updateItem").and.returnValue( defer.promise); var cluster = ClustersManager.getItemFromList( node.nodegroup.id); cluster.connected = false; $scope.node = node; configureSummary(); $scope.summary.cluster.selected = node.nodegroup; $scope.saveEditSummary(); defer.resolve(node); $rootScope.$digest(); expect(cluster.connected).toBe(true); }); it("calls updateSummary once updateItem is rejected", function() { var controller = makeController(); spyOn($scope, "invalidArchitecture").and.returnValue(false); var defer = $q.defer(); spyOn(NodesManager, "updateItem").and.returnValue( defer.promise); $scope.node = node; configureSummary(); $scope.saveEditSummary(); spyOn(console, "log"); defer.reject(makeName("error")); $rootScope.$digest(); // Since updateSummary is private in the controller, check // that the selected cluster is set, this will prove that // the method was called. expect($scope.summary.cluster.selected).toBe( ClustersManager.getItemFromList(node.nodegroup.id)); }); it("logs error if not disconnected error", function() { var controller = makeController(); spyOn($scope, "invalidArchitecture").and.returnValue(false); var defer = $q.defer(); spyOn(NodesManager, "updateItem").and.returnValue( defer.promise); $scope.node = node; configureSummary(); $scope.saveEditSummary(); spyOn(console, "log"); var error = makeName("error"); defer.reject(error); $rootScope.$digest(); expect(console.log).toHaveBeenCalledWith(error); }); it("doesnt log error if disconnected error", function() { var controller = makeController(); spyOn($scope, "invalidArchitecture").and.returnValue(false); var defer = $q.defer(); spyOn(NodesManager, "updateItem").and.returnValue( defer.promise); $scope.node = node; configureSummary(); $scope.saveEditSummary(); spyOn(console, "log"); defer.reject("Unable to get RPC connection for cluster"); $rootScope.$digest(); expect(console.log).not.toHaveBeenCalled(); }); it("sets cluster disconnected if disconnected error", function() { var controller = makeController(); spyOn($scope, "invalidArchitecture").and.returnValue(false); var defer = $q.defer(); spyOn(NodesManager, "updateItem").and.returnValue( defer.promise); var cluster = ClustersManager.getItemFromList( node.nodegroup.id); cluster.connected = true; $scope.node = node; configureSummary(); $scope.saveEditSummary(); defer.reject("Unable to get RPC connection for cluster"); $rootScope.$digest(); expect(cluster.connected).toBe(false); }); it("sets cluster connected if not disconnected error", function() { var controller = makeController(); spyOn($scope, "invalidArchitecture").and.returnValue(false); var defer = $q.defer(); spyOn(NodesManager, "updateItem").and.returnValue( defer.promise); var cluster = ClustersManager.getItemFromList( node.nodegroup.id); cluster.connected = false; $scope.node = node; configureSummary(); $scope.summary.cluster.selected = node.nodegroup; $scope.saveEditSummary(); spyOn(console, "log"); defer.reject(makeName("error")); $rootScope.$digest(); expect(cluster.connected).toBe(true); }); }); describe("invalidPowerType", function() { it("returns true if missing power type", function() { var controller = makeController(); $scope.power.type = null; expect($scope.invalidPowerType()).toBe(true); }); it("returns false if selected power type", function() { var controller = makeController(); $scope.power.type = { name: makeName("power") }; expect($scope.invalidPowerType()).toBe(false); }); }); describe("editPower", function() { it("doesnt sets editing to true if cannot edit", function() { var controller = makeController(); spyOn($scope, "canEdit").and.returnValue(false); $scope.power.editing = false; $scope.editPower(); expect($scope.power.editing).toBe(false); }); it("sets editing to true for power section", function() { var controller = makeController(); spyOn($scope, "canEdit").and.returnValue(true); $scope.power.editing = false; $scope.editPower(); expect($scope.power.editing).toBe(true); }); }); describe("cancelEditPower", function() { it("sets editing to false for power section", function() { var controller = makeController(); node.power_type = makeName("power"); $scope.node = node; $scope.power.editing = true; $scope.cancelEditPower(); expect($scope.power.editing).toBe(false); }); it("doesnt sets editing to false when no power_type", function() { var controller = makeController(); $scope.node = node; $scope.power.editing = true; $scope.cancelEditPower(); expect($scope.power.editing).toBe(true); }); it("calls updatePower", function() { var controller = makeController(); $scope.node = node; $scope.power.editing = true; // Set power_types so we can check that updatePower is called. var cluster = ClustersManager.getItemFromList( node.nodegroup.id); cluster.power_types = [ { type: makeName("power") } ]; $scope.cancelEditPower(); // Since updatePower is private in the controller, check // that the power types are set from the cluster, this will // prove that the method was called. expect($scope.power.types).toEqual(cluster.power_types); }); }); describe("saveEditPower", function() { it("does nothing if no selected power_type", function() { var controller = makeController(); $scope.node = node; var editing = {}; $scope.power.editing = editing; $scope.power.type = null; $scope.saveEditPower(); // Editing should still be true, because the function exitted // early. expect($scope.power.editing).toBe(editing); }); it("sets editing to false", function() { var controller = makeController(); spyOn(NodesManager, "updateItem").and.returnValue( $q.defer().promise); $scope.node = node; $scope.power.editing = true; $scope.power.type = { name: makeName("power") }; $scope.saveEditPower(); expect($scope.power.editing).toBe(false); }); it("calls updateItem with copy of node", function() { var controller = makeController(); spyOn(NodesManager, "updateItem").and.returnValue( $q.defer().promise); $scope.node = node; $scope.power.editing = true; $scope.power.type = { name: makeName("power") }; $scope.saveEditPower(); var calledWithNode = NodesManager.updateItem.calls.argsFor(0)[0]; expect(calledWithNode).not.toBe(node); }); it("calls updateItem with new copied values on node", function() { var controller = makeController(); spyOn(NodesManager, "updateItem").and.returnValue( $q.defer().promise); var newPowerType = { name: makeName("power") }; var newPowerParameters = { foo: makeName("bar") }; $scope.node = node; $scope.power.editing = true; $scope.power.type = newPowerType; $scope.power.parameters = newPowerParameters; $scope.saveEditPower(); var calledWithNode = NodesManager.updateItem.calls.argsFor(0)[0]; expect(calledWithNode.power_type).toBe(newPowerType.name); expect(calledWithNode.power_parameters).toEqual( newPowerParameters); expect(calledWithNode.power_parameters).not.toBe( newPowerParameters); }); it("calls updateSummary once updateItem resolves", function() { var controller = makeController(); var defer = $q.defer(); spyOn(NodesManager, "updateItem").and.returnValue( defer.promise); $scope.node = node; $scope.power.editing = true; $scope.power.type = { name: makeName("power") }; $scope.power.parameters = { foo: makeName("bar") }; $scope.saveEditPower(); defer.resolve(node); $rootScope.$digest(); // Since updateSummary is private in the controller, check // that the selected cluster is set, this will prove that // the method was called. expect($scope.summary.cluster.selected).toBe( ClustersManager.getItemFromList(node.nodegroup.id)); }); it("sets cluster connected once updateItem resolves", function() { var controller = makeController(); var defer = $q.defer(); spyOn(NodesManager, "updateItem").and.returnValue( defer.promise); var cluster = ClustersManager.getItemFromList( node.nodegroup.id); cluster.connected = false; $scope.node = node; $scope.power.editing = true; $scope.power.type = { name: makeName("power") }; $scope.power.parameters = { foo: makeName("bar") }; $scope.saveEditPower(); defer.resolve(node); $rootScope.$digest(); expect(cluster.connected).toBe(true); }); it("calls updateSummary once updateItem is rejected", function() { var controller = makeController(); var defer = $q.defer(); spyOn(NodesManager, "updateItem").and.returnValue( defer.promise); $scope.node = node; $scope.power.editing = true; $scope.power.type = { name: makeName("power") }; $scope.power.parameters = { foo: makeName("bar") }; $scope.saveEditPower(); spyOn(console, "log"); defer.reject(makeName("error")); $rootScope.$digest(); // Since updateSummary is private in the controller, check // that the selected cluster is set, this will prove that // the method was called. expect($scope.summary.cluster.selected).toBe( ClustersManager.getItemFromList(node.nodegroup.id)); }); it("calls handleSaveError once updateItem is rejected", function() { var controller = makeController(); var defer = $q.defer(); spyOn(NodesManager, "updateItem").and.returnValue( defer.promise); $scope.node = node; $scope.power.editing = true; $scope.power.type = { name: makeName("power") }; $scope.power.parameters = { foo: makeName("bar") }; $scope.saveEditPower(); spyOn(console, "log"); var error = makeName("error"); defer.reject(error); $rootScope.$digest(); // If the error message was logged to the console then // handleSaveError was called. expect(console.log).toHaveBeenCalledWith(error); }); }); describe("allowShowMoreEvents", function() { it("returns false if node is null", function() { var controller = makeController(); $scope.node = null; expect($scope.allowShowMoreEvents()).toBe(false); }); it("returns false if node.events is not array", function() { var controller = makeController(); $scope.node = node; $scope.node.events = undefined; expect($scope.allowShowMoreEvents()).toBe(false); }); it("returns false if node has no events", function() { var controller = makeController(); $scope.node = node; expect($scope.allowShowMoreEvents()).toBe(false); }); it("returns false if node events less then the limit", function() { var controller = makeController(); $scope.node = node; $scope.node.events = [ makeEvent(), makeEvent() ]; $scope.events.limit = 10; expect($scope.allowShowMoreEvents()).toBe(false); }); it("returns false if events limit greater than 50", function() { var controller = makeController(); $scope.node = node; var i; for(i = 0; i < 50; i++) { $scope.node.events.push(makeEvent()); } $scope.events.limit = 50; expect($scope.allowShowMoreEvents()).toBe(false); }); it("returns true if more events than limit", function() { var controller = makeController(); $scope.node = node; var i; for(i = 0; i < 20; i++) { $scope.node.events.push(makeEvent()); } $scope.events.limit = 10; expect($scope.allowShowMoreEvents()).toBe(true); }); }); describe("showMoreEvents", function() { it("increments events limit by 10", function() { var controller = makeController(); $scope.showMoreEvents(); expect($scope.events.limit).toBe(20); $scope.showMoreEvents(); expect($scope.events.limit).toBe(30); }); }); describe("getEventText", function() { it("returns just event type description without dash", function() { var controller = makeController(); var evt = makeEvent(); delete evt.description; expect($scope.getEventText(evt)).toBe(evt.type.description); }); it("returns event type description with event description", function() { var controller = makeController(); var evt = makeEvent(); expect($scope.getEventText(evt)).toBe( evt.type.description + " - " + evt.description); }); }); describe("machineOutputViewChanged", function() { it("sets showSummaryToggle to false if no selectedView", function() { var controller = makeController(); $scope.machine_output.selectedView = null; $scope.machineOutputViewChanged(); expect($scope.machine_output.showSummaryToggle).toBe(false); }); it("sets showSummaryToggle to false if not summary view", function() { var controller = makeController(); $scope.machine_output.selectedView = { name: "output" }; $scope.machineOutputViewChanged(); expect($scope.machine_output.showSummaryToggle).toBe(false); }); it("sets showSummaryToggle to true if summary view", function() { var controller = makeController(); $scope.machine_output.selectedView = { name: "summary" }; $scope.machineOutputViewChanged(); expect($scope.machine_output.showSummaryToggle).toBe(true); }); }); describe("getSummaryData", function() { it("returns blank string if node is null", function() { var controller = makeController(); expect($scope.getSummaryData()).toBe(""); }); it("returns summary_xml when summaryType equal xml", function() { var controller = makeController(); $scope.node = makeNode(); var summary_xml = {}; $scope.node.summary_xml = summary_xml; $scope.machine_output.summaryType = "xml"; expect($scope.getSummaryData()).toBe("\n" + summary_xml); }); it("returns summary_yaml when summaryType equal yaml", function() { var controller = makeController(); $scope.node = makeNode(); var summary_yaml = {}; $scope.node.summary_yaml = summary_yaml; $scope.machine_output.summaryType = "yaml"; expect($scope.getSummaryData()).toBe("\n" + summary_yaml); }); }); describe("getInstallationData", function() { it("returns blank string if node is null", function() { var controller = makeController(); expect($scope.getInstallationData()).toBe(""); }); it("returns blank string if installation results not an array", function() { var controller = makeController(); $scope.node = makeNode(); $scope.node.installation_results = undefined; expect($scope.getInstallationData()).toBe(""); }); it("returns blank string if no installation results", function() { var controller = makeController(); $scope.node = makeNode(); expect($scope.getInstallationData()).toBe(""); }); it("returns first installation result data", function() { var controller = makeController(); $scope.node = makeNode(); var install_result = {}; $scope.node.installation_results.push({ data: install_result }); $scope.node.installation_results.push({ data: {} }); expect($scope.getInstallationData()).toBe("\n" + install_result); }); }); }); ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/controllers/tests/test_node_details_networking.jsmaas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/controllers/tests/test_node_details_network0000644000000000000000000034166313056115004032060 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for NodeNetworkingController. */ describe("filterByUnusedForInterface", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Load the filterByUnusedForInterface. var filterByUnusedForInterface; beforeEach(inject(function($filter) { filterByUnusedForInterface = $filter("filterByUnusedForInterface"); })); it("returns empty if undefined nic", function() { var i, vlan, vlans = []; for(i = 0; i < 3; i++) { vlan = { fabric: 0 }; vlans.push(vlan); } expect(filterByUnusedForInterface(vlans)).toEqual([]); }); it("returns only free vlans", function() { var i, vlan, used_vlans = [], free_vlans = [], all_vlans = []; for(i = 0; i < 3; i++) { vlan = { id: i, fabric: 0 }; used_vlans.push(vlan); all_vlans.push(vlan); } for(i = 3; i < 6; i++) { vlan = { id: i, fabric: 0 }; free_vlans.push(vlan); all_vlans.push(vlan); } var nic = { id: 0 }; var originalInterfaces = { 0: { type: "vlan", parents: [0], vlan_id: used_vlans[0].id }, 1: { type: "vlan", parents: [0], vlan_id: used_vlans[1].id }, 2: { type: "vlan", parents: [0], vlan_id: used_vlans[2].id }, 3: { type: "physical", vlan_id: free_vlans[0].id } }; expect( filterByUnusedForInterface( all_vlans, nic, originalInterfaces)).toEqual(free_vlans); }); }); describe("removeBondParents", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Load the removeBondParents. var removeBondParents; beforeEach(inject(function($filter) { removeBondParents = $filter("removeBondParents"); })); it("returns empty if undefined bondInterface", function() { var i, nic, interfaces = []; for(i = 0; i < 3; i++) { nic = { id: i, link_id: i }; interfaces.push(nic); } expect(removeBondParents(interfaces)).toEqual(interfaces); }); it("removes parents from interfaces", function() { var vlan = { id: makeInteger(0, 100) }; var nic1 = { id: makeInteger(0, 100), link_id: makeInteger(0, 100), type: "physical", vlan: vlan }; var nic2 = { id: makeInteger(0, 100), link_id: makeInteger(0, 100), type: "physical", vlan: vlan }; var interfaces = [nic1, nic2]; var bondInterface = { parents: interfaces }; expect(removeBondParents(interfaces, bondInterface)).toEqual([]); }); }); describe("removeDefaultVLANIfVLAN", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Load the removeDefaultVLANIfVLAN. var removeDefaultVLANIfVLAN; beforeEach(inject(function($filter) { removeDefaultVLANIfVLAN = $filter("removeDefaultVLANIfVLAN"); })); it("returns vlans if undefined type", function() { var i, vlan, vlans = []; for(i = 0; i < 3; i++) { vlan = { id: i, vid: i, fabric: 0 }; vlans.push(vlan); } expect(removeDefaultVLANIfVLAN(vlans)).toEqual(vlans); }); it("removes default vlans from vlans", function() { var i, vlan, vlans = []; for(i = 0; i < 3; i++) { vlan = { id: i, vid: i, fabric: 0 }; vlans.push(vlan); } expect( removeDefaultVLANIfVLAN( vlans, "vlan")).toEqual([vlans[1], vlans[2]]); }); }); describe("filterLinkModes", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Load the filterLinkModes. var filterLinkModes; beforeEach(inject(function($filter) { filterLinkModes = $filter("filterLinkModes"); })); // Load the modes before each test. var modes; beforeEach(function() { modes = [ { mode: "auto", text: "Auto assign" }, { mode: "static", text: "Static assign" }, { mode: "dhcp", text: "DHCP" }, { mode: "link_up", text: "Unconfigured" } ]; }); it("only link_up when no subnet", function() { var nic = { subnet : null }; expect(filterLinkModes(modes, nic)).toEqual([ { "mode": "link_up", "text": "Unconfigured" } ]); }); it("all modes if only one link", function() { var nic = { subnet : {}, links: [{}] }; expect(filterLinkModes(modes, nic)).toEqual([ { "mode": "auto", "text": "Auto assign" }, { "mode": "static", "text": "Static assign" }, { "mode": "dhcp", "text": "DHCP" }, { "mode": "link_up", "text": "Unconfigured" } ]); }); it("auto, static, and dhcp modes if more than one link", function() { var nic = { subnet : {}, links: [{}, {}] }; expect(filterLinkModes(modes, nic)).toEqual([ { "mode": "auto", "text": "Auto assign" }, { "mode": "static", "text": "Static assign" }, { "mode": "dhcp", "text": "DHCP" } ]); }); it("auto and static modes if interface is alias", function() { var nic = { type: "alias", subnet : {} }; expect(filterLinkModes(modes, nic)).toEqual([ { "mode": "auto", "text": "Auto assign" }, { "mode": "static", "text": "Static assign" } ]); }); }); describe("NodeNetworkingController", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Grab the needed angular pieces. var $controller, $rootScope, $parentScope, $scope, $q; beforeEach(inject(function($injector) { $controller = $injector.get("$controller"); $rootScope = $injector.get("$rootScope"); $parentScope = $rootScope.$new(); $scope = $parentScope.$new(); $q = $injector.get("$q"); })); // Load the required dependencies for the NodeNetworkingController. var FabricsManager, VLANsManager, SubnetsManager, UsersManager; var NodesManager, GeneralManager, ManagerHelperService; beforeEach(inject(function($injector) { FabricsManager = $injector.get("FabricsManager"); VLANsManager = $injector.get("VLANsManager"); SubnetsManager = $injector.get("SubnetsManager"); NodesManager = $injector.get("NodesManager"); GeneralManager = $injector.get("GeneralManager"); UsersManager = $injector.get("UsersManager"); ManagerHelperService = $injector.get("ManagerHelperService"); })); var node; beforeEach(function() { node = { interfaces: [] }; $parentScope.node = node; }); // Makes the NodeStorageController. function makeController(loadManagersDefer) { var loadManagers = spyOn(ManagerHelperService, "loadManagers"); if(angular.isObject(loadManagersDefer)) { loadManagers.and.returnValue(loadManagersDefer.promise); } else { loadManagers.and.returnValue($q.defer().promise); } // Create the controller. var controller = $controller("NodeNetworkingController", { $scope: $scope, FabricsManager: FabricsManager, VLANsManager: VLANsManager, SubnetsManager: SubnetsManager, NodesManager: NodesManager, GeneralManager: GeneralManager, ManagerHelperService: ManagerHelperService }); return controller; } it("sets initial values", function() { var controller = makeController(); expect($scope.loaded).toBe(false); expect($scope.nodeHasLoaded).toBe(false); expect($scope.managersHaveLoaded).toBe(false); expect($scope.column).toBe('name'); expect($scope.fabrics).toBe(FabricsManager.getItems()); expect($scope.vlans).toBe(VLANsManager.getItems()); expect($scope.subnets).toBe(SubnetsManager.getItems()); expect($scope.interfaces).toEqual([]); expect($scope.interfaceLinksMap).toEqual({}); expect($scope.originalInterfaces).toEqual({}); expect($scope.showingMembers).toEqual([]); expect($scope.focusInterface).toBeNull(); expect($scope.selectedInterfaces).toEqual([]); expect($scope.selectedMode).toBeNull(); expect($scope.newInterface).toEqual({}); expect($scope.newBondInterface).toEqual({}); expect($scope.bondOptions).toBe( GeneralManager.getData("bond_options")); }); it("sets loaded once node loaded then managers loaded", function() { var defer = $q.defer(); var controller = makeController(defer); // All should false. expect($scope.loaded).toBe(false); expect($scope.nodeHasLoaded).toBe(false); expect($scope.managersHaveLoaded).toBe(false); // Only nodeHasLoaded should be true. $scope.nodeLoaded(); expect($scope.loaded).toBe(false); expect($scope.nodeHasLoaded).toBe(true); expect($scope.managersHaveLoaded).toBe(false); // All three should be true. defer.resolve(); $rootScope.$digest(); expect($scope.loaded).toBe(true); expect($scope.nodeHasLoaded).toBe(true); expect($scope.managersHaveLoaded).toBe(true); }); it("sets loaded once managers loaded then node loaded", function() { var defer = $q.defer(); var controller = makeController(defer); // All should false. expect($scope.loaded).toBe(false); expect($scope.nodeHasLoaded).toBe(false); expect($scope.managersHaveLoaded).toBe(false); // Only managersHaveLoaded should be true. defer.resolve(); $rootScope.$digest(); expect($scope.loaded).toBe(false); expect($scope.nodeHasLoaded).toBe(false); expect($scope.managersHaveLoaded).toBe(true); // All three should be true. $scope.nodeLoaded(); expect($scope.loaded).toBe(true); expect($scope.nodeHasLoaded).toBe(true); expect($scope.managersHaveLoaded).toBe(true); }); it("starts watching interfaces once nodeLoaded called", function() { var controller = makeController(); spyOn($scope, "$watch"); $scope.nodeLoaded(); var watches = []; var i, calls = $scope.$watch.calls.allArgs(); for(i = 0; i < calls.length; i++) { watches.push(calls[i][0]); } expect(watches).toEqual(["node.interfaces"]); }); describe("updateInterfaces", function() { // updateInterfaces is a private method in the controller but we test // it by calling nodeLoaded which will setup the watcher which call // updateInterfaces and set $scope.interfaces. function updateInterfaces(controller) { if(!angular.isObject(controller)) { controller = makeController(); } $scope.nodeLoaded(); $scope.$digest(); } it("returns empty list when node.interfaces empty", function() { node.interfaces = []; updateInterfaces(); expect($scope.interfaces).toEqual([]); }); it("adds interfaces to originalInterfaces map", function() { var nic1 = { id: 1, name: "eth0", type: "physical", parents: [], children: [], links: [] }; var nic2 = { id: 2, name: "eth1", type: "physical", parents: [], children: [], links: [] }; node.interfaces = [nic1, nic2]; updateInterfaces(); expect($scope.originalInterfaces).toEqual({ 1: nic1, 2: nic2 }); }); it("removes bond parents and places them as members", function() { var parent1 = { id: 0, name: "eth0", type: "physical", parents: [], children: [2], links: [] }; var parent2 = { id: 1, name: "eth1", type: "physical", parents: [], children: [2], links: [] }; var bond = { id: 2, name: "bond0", type: "bond", parents: [0, 1], children: [], links: [] }; node.interfaces = [parent1, parent2, bond]; updateInterfaces(); expect($scope.interfaces).toEqual([{ id: 2, name: "bond0", type: "bond", parents: [0, 1], children: [], links: [], members: [parent1, parent2], vlan: null, link_id: -1, subnet: null, mode: "link_up", ip_address: "" }]); }); it("clears focusInterface if parent is now in a bond", function() { var parent1 = { id: 0, name: "eth0", type: "physical", parents: [], children: [2], links: [] }; var parent2 = { id: 1, name: "eth1", type: "physical", parents: [], children: [2], links: [] }; var bond = { id: 2, name: "bond0", type: "bond", parents: [0, 1], children: [], links: [] }; node.interfaces = [parent1, parent2, bond]; $scope.focusInterface = { id: 0, link_id: -1 }; updateInterfaces(); expect($scope.focusInterface).toBeNull(); }); it("sets vlan and fabric on interface", function() { var fabric = { id: 0 }; var vlan = { id: 0, fabric: 0 }; var nic = { id: 0, name: "eth0", type: "physical", parents: [], children: [], links: [], vlan_id: 0 }; FabricsManager._items = [fabric]; VLANsManager._items = [vlan]; node.interfaces = [nic]; updateInterfaces(); expect($scope.interfaces[0].vlan).toBe(vlan); expect($scope.interfaces[0].fabric).toBe(fabric); }); it("sets default to link_up if not links", function() { var nic = { id: 0, name: "eth0", type: "physical", parents: [], children: [], links: [] }; node.interfaces = [nic]; updateInterfaces(); expect($scope.interfaces).toEqual([{ id: 0, name: "eth0", type: "physical", parents: [], children: [], links: [], vlan: null, link_id: -1, subnet: null, mode: "link_up", ip_address: "" }]); }); it("duplicates links as alias interfaces", function() { var subnet0 = { id: 0 }, subnet1 = { id: 1 }, subnet2 = { id: 2 }; SubnetsManager._items = [subnet0, subnet1, subnet2]; var links = [ { id: 0, subnet_id: 0, mode: "dhcp", ip_address: "" }, { id: 1, subnet_id: 1, mode: "auto", ip_address: "" }, { id: 2, subnet_id: 2, mode: "static", ip_address: "192.168.122.10" } ]; var nic = { id: 0, name: "eth0", type: "physical", parents: [], children: [], links: links }; node.interfaces = [nic]; updateInterfaces(); expect($scope.interfaces).toEqual([ { id: 0, name: "eth0", type: "physical", parents: [], children: [], links: links, vlan: null, fabric: undefined, link_id: 0, subnet: subnet0, mode: "dhcp", ip_address: "" }, { id: 0, name: "eth0:1", type: "alias", parents: [], children: [], links: links, vlan: null, fabric: undefined, link_id: 1, subnet: subnet1, mode: "auto", ip_address: "" }, { id: 0, name: "eth0:2", type: "alias", parents: [], children: [], links: links, vlan: null, fabric: undefined, link_id: 2, subnet: subnet2, mode: "static", ip_address: "192.168.122.10" } ]); }); it("creates interfaceLinksMap", function() { var links = [ { id: 0, subnet_id: 0, mode: "dhcp", ip_address: "" }, { id: 1, subnet_id: 1, mode: "auto", ip_address: "" }, { id: 2, subnet_id: 2, mode: "static", ip_address: "192.168.122.10" } ]; var nic = { id: 0, name: "eth0", type: "physical", parents: [], children: [], links: links }; node.interfaces = [nic]; updateInterfaces(); expect($scope.interfaceLinksMap[0][0].link_id).toBe(0); expect($scope.interfaceLinksMap[0][1].link_id).toBe(1); expect($scope.interfaceLinksMap[0][2].link_id).toBe(2); }); it("clears focusInterface if interface no longer exists", function() { node.interfaces = []; $scope.focusInterface = { id: 0, link_id: -1 }; updateInterfaces(); expect($scope.focusInterface).toBeNull(); }); it("clears focusInterface if link no longer exists", function() { var nic = { id: 0, name: "eth0", type: "physical", parents: [], children: [], links: [] }; node.interfaces = [nic]; $scope.focusInterface = { id: 0, link_id: 0 }; updateInterfaces(); expect($scope.focusInterface).toBeNull(); }); describe("newInterface", function() { // Setup the initial data for newInterface to be set. function setupNewInterface(controller, newInterface) { var links = [ { id: 0, subnet_id: 0, mode: "dhcp", ip_address: "" }, { id: 1, subnet_id: 1, mode: "auto", ip_address: "" }, { id: 2, subnet_id: 2, mode: "static", ip_address: "192.168.122.10" } ]; var nic = { id: 0, name: "eth0", type: "physical", parents: [], children: [], links: links }; node.interfaces = [nic]; updateInterfaces(controller); var parent = $scope.interfaceLinksMap[0][0]; newInterface.parent = parent; $scope.newInterface = newInterface; } // Cause the updateInterfaces to be called again to perform // the logic on newInterface. function reloadNewInterface(controller) { // Add another nic to interfaces so that updateInterfaces // really performs an action. node.interfaces.push({ id: 1, name: "eth1", type: "physical", parents: [], children: [], links: [] }); updateInterfaces(controller); } it("updates newInterface.parent object", function() { var controller = makeController(); var newInterface = { type: "alias" }; setupNewInterface(controller, newInterface); var parent = newInterface.parent; reloadNewInterface(controller); // Should be the same value but a different object. expect(newInterface.parent).toEqual(parent); expect(newInterface.parent).not.toBe(parent); }); it("changes newInterface.type from alias to VLAN", function() { var controller = makeController(); var newInterface = { type: "alias" }; setupNewInterface(controller, newInterface); spyOn($scope, "canAddAlias").and.returnValue(false); spyOn($scope, "canAddVLAN").and.returnValue(true); spyOn($scope, "addTypeChanged"); reloadNewInterface(controller); expect(newInterface.type).toBe("vlan"); expect($scope.addTypeChanged).toHaveBeenCalled(); }); it("changes newInterface.type from VLAN to alias", function() { var controller = makeController(); var newInterface = { type: "vlan" }; setupNewInterface(controller, newInterface); spyOn($scope, "canAddAlias").and.returnValue(true); spyOn($scope, "canAddVLAN").and.returnValue(false); spyOn($scope, "addTypeChanged"); reloadNewInterface(controller); expect(newInterface.type).toBe("alias"); expect($scope.addTypeChanged).toHaveBeenCalled(); }); it("clears newInterface if cannot add VLAN or alias", function() { var controller = makeController(); var newInterface = { type: "vlan" }; setupNewInterface(controller, newInterface); spyOn($scope, "canAddAlias").and.returnValue(false); spyOn($scope, "canAddVLAN").and.returnValue(false); reloadNewInterface(controller); expect($scope.newInterface).toEqual({}); }); it("clears newInterface if parent removed", function() { var controller = makeController(); var newInterface = { type: "vlan" }; setupNewInterface(controller, newInterface); spyOn($scope, "canAddAlias").and.returnValue(false); spyOn($scope, "canAddVLAN").and.returnValue(false); $scope.selectedMode = "add"; reloadNewInterface(controller); expect($scope.selectedMode).toBeNull(); }); it("leaves single selection mode if newInterface is cleared", function() { var controller = makeController(); var newInterface = { type: "vlan" }; setupNewInterface(controller, newInterface); spyOn($scope, "canAddAlias").and.returnValue(false); spyOn($scope, "canAddVLAN").and.returnValue(false); $scope.selectedMode = "add"; node.interfaces = []; updateInterfaces(controller); expect($scope.newInterface).toEqual({}); expect($scope.selectedMode).toBeNull(); }); }); }); describe("isBootInterface", function() { it("returns true if is_boot is true", function() { var controller = makeController(); var nic = { type: "physical", is_boot: true }; expect($scope.isBootInterface(nic)).toBe(true); }); it("returns true if is_boot is true and alias", function() { var controller = makeController(); var nic = { type: "alias", is_boot: true }; expect($scope.isBootInterface(nic)).toBe(false); }); it("returns false if is_boot is false", function() { var controller = makeController(); var nic = { type: "physical", is_boot: false }; expect($scope.isBootInterface(nic)).toBe(false); }); it("returns false if bond has no members with is_boot", function() { var controller = makeController(); var nic = { type: "bond", is_boot: false, members: [ { is_boot: false }, { is_boot: false } ] }; expect($scope.isBootInterface(nic)).toBe(false); }); it("returns true if bond has member with is_boot", function() { var controller = makeController(); var nic = { type: "bond", is_boot: false, members: [ { is_boot: false }, { is_boot: true } ] }; expect($scope.isBootInterface(nic)).toBe(true); }); }); describe("getInterfaceTypeText", function() { var INTERFACE_TYPE_TEXTS = { "physical": "Physical", "bond": "Bond", "vlan": "VLAN", "alias": "Alias", "missing_type": "missing_type" }; angular.forEach(INTERFACE_TYPE_TEXTS, function(value, type) { it("returns correct value for '" + type + "'", function() { var controller = makeController(); var nic = { type: type }; expect($scope.getInterfaceTypeText(nic)).toBe(value); }); }); }); describe("getLinkModeText", function() { var LINK_MODE_TEXTS = { "auto": "Auto assign", "static": "Static assign", "dhcp": "DHCP", "link_up": "Unconfigured", "missing_type": "missing_type" }; angular.forEach(LINK_MODE_TEXTS, function(value, mode) { it("returns correct value for '" + mode + "'", function() { var controller = makeController(); var nic = { mode: mode }; expect($scope.getLinkModeText(nic)).toBe(value); }); }); }); describe("getVLANText", function() { it("returns empty if vlan undefined", function() { var controller = makeController(); expect($scope.getVLANText()).toBe(""); }); it("returns just vid", function() { var controller = makeController(); var vlan = { vid: 5 }; expect($scope.getVLANText(vlan)).toBe(5); }); it("returns vid + name", function() { var controller = makeController(); var name = makeName("vlan"); var vlan = { vid: 5, name: name }; expect($scope.getVLANText(vlan)).toBe("5 (" + name + ")"); }); }); describe("getSubnetText", function() { it("returns 'Unconfigured' for null", function() { var controller = makeController(); expect($scope.getSubnetText(null)).toBe("Unconfigured"); }); it("returns just cidr if no name", function() { var controller = makeController(); var cidr = makeName("cidr"); var subnet = { cidr: cidr }; expect($scope.getSubnetText(subnet)).toBe(cidr); }); it("returns just cidr if name same as cidr", function() { var controller = makeController(); var cidr = makeName("cidr"); var subnet = { cidr: cidr, name: cidr }; expect($scope.getSubnetText(subnet)).toBe(cidr); }); it("returns cidr + name", function() { var controller = makeController(); var cidr = makeName("cidr"); var name = makeName("name"); var subnet = { cidr: cidr, name: name }; expect($scope.getSubnetText(subnet)).toBe( cidr + " (" + name + ")"); }); }); describe("getSubnet", function() { it("calls SubnetsManager.getItemFromList", function() { var controller = makeController(); var subnetId = makeInteger(0, 100); var subnet = {}; spyOn(SubnetsManager, "getItemFromList").and.returnValue(subnet); expect($scope.getSubnet(subnetId)).toBe(subnet); expect(SubnetsManager.getItemFromList).toHaveBeenCalledWith( subnetId); }); }); describe("toggleMembers", function() { it("adds interface id to showingMembers", function() { var controller = makeController(); var nic = { id: makeInteger(0, 100) }; $scope.toggleMembers(nic); expect($scope.showingMembers).toEqual([nic.id]); }); it("removes interface id from showingMembers", function() { var controller = makeController(); var nic = { id: makeInteger(0, 100) }; $scope.showingMembers = [nic.id]; $scope.toggleMembers(nic); expect($scope.showingMembers).toEqual([]); }); }); describe("isShowingMembers", function() { it("returns true if interface id in showingMembers", function() { var controller = makeController(); var nic = { id: makeInteger(0, 100) }; $scope.showingMembers = [nic.id]; expect($scope.isShowingMembers(nic)).toBe(true); }); it("returns true if interface id in showingMembers", function() { var controller = makeController(); var nic = { id: makeInteger(0, 100) }; $scope.showingMembers = []; expect($scope.isShowingMembers(nic)).toBe(false); }); }); describe("saveInterface", function() { it("does nothing if nothing changed", function() { var controller = makeController(); var id = makeInteger(0, 100); var name = makeName("nic"); var vlan = { id: makeInteger(0, 100) }; var original_nic = { id: id, name: name, vlan_id: vlan.id }; var nic = { id: id, name: name, vlan: vlan }; $scope.originalInterfaces[id] = original_nic; $scope.interfaces = [nic]; spyOn(NodesManager, "updateInterface").and.returnValue( $q.defer().promise); $scope.saveInterface(nic); expect(NodesManager.updateInterface).not.toHaveBeenCalled(); }); it("resets name if its invalid and doesn't call update", function() { var controller = makeController(); var id = makeInteger(0, 100); var name = makeName("nic"); var vlan = { id: makeInteger(0, 100) }; var original_nic = { id: id, name: name, vlan_id: vlan.id }; var nic = { id: id, name: "", vlan: vlan }; $scope.originalInterfaces[id] = original_nic; $scope.interfaces = [nic]; spyOn(NodesManager, "updateInterface").and.returnValue( $q.defer().promise); $scope.saveInterface(nic); expect(nic.name).toBe(name); expect(NodesManager.updateInterface).not.toHaveBeenCalled(); }); it("calls NodesManager.updateInterface if name changed", function() { var controller = makeController(); var id = makeInteger(0, 100); var name = makeName("nic"); var vlan = { id: makeInteger(0, 100) }; var original_nic = { id: id, name: name, vlan_id: vlan.id }; var nic = { id: id, name: makeName("newName"), vlan: vlan }; $scope.originalInterfaces[id] = original_nic; $scope.interfaces = [nic]; spyOn(NodesManager, "updateInterface").and.returnValue( $q.defer().promise); $scope.saveInterface(nic); expect(NodesManager.updateInterface).toHaveBeenCalledWith( node, id, { "name": nic.name, "vlan": vlan.id }); }); it("calls NodesManager.updateInterface if vlan changed", function() { var controller = makeController(); var id = makeInteger(0, 100); var name = makeName("nic"); var vlan = { id: makeInteger(0, 100) }; var original_nic = { id: id, name: name, vlan_id: makeInteger(200, 300) }; var nic = { id: id, name: name, vlan: vlan }; $scope.originalInterfaces[id] = original_nic; $scope.interfaces = [nic]; spyOn(NodesManager, "updateInterface").and.returnValue( $q.defer().promise); $scope.saveInterface(nic); expect(NodesManager.updateInterface).toHaveBeenCalledWith( node, id, { "name": name, "vlan": vlan.id }); }); }); describe("setFocusInterface", function() { it("sets focusInterface", function() { var controller = makeController(); var nic = {}; $scope.setFocusInterface(nic); expect($scope.focusInterface).toBe(nic); }); }); describe("clearFocusInterface", function() { it("clears focusInterface no arguments", function() { var controller = makeController(); var nic = { type: "physical" }; $scope.focusInterface = nic; spyOn($scope, "saveInterface"); spyOn($scope, "saveInterfaceIPAddress"); $scope.clearFocusInterface(); expect($scope.focusInterface).toBeNull(); expect($scope.saveInterface).toHaveBeenCalledWith(nic); expect($scope.saveInterfaceIPAddress).toHaveBeenCalledWith(nic); }); it("clears focusInterface if same interface", function() { var controller = makeController(); var nic = { type: "physical" }; $scope.focusInterface = nic; spyOn($scope, "saveInterface"); spyOn($scope, "saveInterfaceIPAddress"); $scope.clearFocusInterface(nic); expect($scope.focusInterface).toBeNull(); expect($scope.saveInterface).toHaveBeenCalledWith(nic); expect($scope.saveInterfaceIPAddress).toHaveBeenCalledWith(nic); }); it("doesnt clear focusInterface if different interface", function() { var controller = makeController(); var nic = { type: "physical" }; $scope.focusInterface = nic; spyOn($scope, "saveInterface"); spyOn($scope, "saveInterfaceIPAddress"); $scope.clearFocusInterface({}); expect($scope.focusInterface).toBe(nic); expect($scope.saveInterface).not.toHaveBeenCalled(); expect($scope.saveInterfaceIPAddress).not.toHaveBeenCalled(); }); it("doesnt call save with focusInterface no arguments", function() { var controller = makeController(); var nic = { type: "alias" }; $scope.focusInterface = nic; spyOn($scope, "saveInterface"); spyOn($scope, "saveInterfaceIPAddress"); $scope.clearFocusInterface(); expect($scope.focusInterface).toBeNull(); expect($scope.saveInterface).not.toHaveBeenCalled(); expect($scope.saveInterfaceIPAddress).toHaveBeenCalledWith(nic); }); it("doesnt call save with focusInterface if same nic", function() { var controller = makeController(); var nic = { type: "alias" }; $scope.focusInterface = nic; spyOn($scope, "saveInterface"); spyOn($scope, "saveInterfaceIPAddress"); $scope.clearFocusInterface(nic); expect($scope.focusInterface).toBeNull(); expect($scope.saveInterface).not.toHaveBeenCalled(); expect($scope.saveInterfaceIPAddress).toHaveBeenCalledWith(nic); }); }); describe("isInterfaceNameInvalid", function() { it("returns true if name is empty", function() { var controller = makeController(); var nic = { name: "" }; expect($scope.isInterfaceNameInvalid(nic)).toBe(true); }); it("returns true if name is same as another interface", function() { var controller = makeController(); var name = makeName("nic"); var nic = { id: 0, name: name }; var otherNic = { id: 1, name: name }; $scope.node.interfaces = [nic, otherNic]; expect($scope.isInterfaceNameInvalid(nic)).toBe(true); }); it("returns false if name is same name as self", function() { var controller = makeController(); var name = makeName("nic"); var nic = { id: 0, name: name }; $scope.node.interfaces = [nic]; expect($scope.isInterfaceNameInvalid(nic)).toBe(false); }); it("returns false if name is different", function() { var controller = makeController(); var name = makeName("nic"); var newName = makeName("newNic"); var nic = { id: 0, name: newName }; var otherNic = { id: 1, name: name }; $scope.node.interfaces = [otherNic]; expect($scope.isInterfaceNameInvalid(nic)).toBe(false); }); }); describe("fabricChanged", function() { it("sets vlan on interface", function() { var controller = makeController(); var fabric = { id: 0, vlan_ids: [0] }; var vlan = { id: 0, fabric: fabric.id }; FabricsManager._items = [fabric]; VLANsManager._items = [vlan]; var nic = { vlan: null, fabric: fabric }; spyOn($scope, "saveInterface"); $scope.fabricChanged(nic); expect(nic.vlan).toBe(vlan); }); it("calls saveInterface", function() { var controller = makeController(); var fabric = { id: 0, vlan_ids: [0] }; var vlan = { id: 0, fabric: fabric.id }; FabricsManager._items = [fabric]; VLANsManager._items = [vlan]; var nic = { vlan: null, fabric: fabric }; spyOn($scope, "saveInterface"); $scope.fabricChanged(nic); expect($scope.saveInterface).toHaveBeenCalledWith(nic); }); }); describe("isLinkModeDisabled", function() { it("enabled when subnet", function() { var controller = makeController(); var nic = { subnet : {} }; expect($scope.isLinkModeDisabled(nic)).toBe(false); }); it("disabled when not subnet", function() { var controller = makeController(); var nic = { subnet : null }; expect($scope.isLinkModeDisabled(nic)).toBe(true); }); }); describe("saveInterfaceLink", function() { it("calls NodesManager.linkSubnet with params", function() { var controller = makeController(); var nic = { id: makeInteger(0, 100), mode: "static", subnet: { id: makeInteger(0, 100) }, link_id: makeInteger(0, 100), ip_address: "192.168.122.1" }; spyOn(NodesManager, "linkSubnet").and.returnValue( $q.defer().promise); $scope.saveInterfaceLink(nic); expect(NodesManager.linkSubnet).toHaveBeenCalledWith( node, nic.id, { "mode": "static", "subnet": nic.subnet.id, "link_id": nic.link_id, "ip_address": nic.ip_address }); }); it("handles errors", function() { var controller = makeController(); var nic = { id: makeInteger(0, 100), mode: "static", subnet: { id: makeInteger(0, 100) }, link_id: makeInteger(0, 100), ip_address: "192.168.122.1" }; var defer = $q.defer(); spyOn(console, "log"); spyOn(NodesManager, "linkSubnet").and.returnValue( defer.promise); $scope.saveInterfaceLink(nic); defer.reject("error"); $scope.$digest(); // Make sure error is set for UI. expect($scope.interfaceErrorsByLinkId[nic.link_id]).toBe("error"); expect(console.log).toHaveBeenCalledWith("error"); // Make sure error is cleared on success. $scope.saveInterfaceLink(nic); expect($scope.interfaceErrorsByLinkId[nic.link_id]).toBeUndefined(); }); }); describe("subnetChanged", function() { it("sets mode to link_up if set to no subnet", function() { var controller = makeController(); var nic = { subnet: null }; spyOn($scope, "saveInterfaceLink"); $scope.subnetChanged(nic); expect(nic.mode).toBe("link_up"); expect($scope.saveInterfaceLink).toHaveBeenCalledWith(nic); }); it("doesnt set mode to link_up if set if subnet", function() { var controller = makeController(); var nic = { mode: "static", subnet: {} }; spyOn($scope, "saveInterfaceLink"); $scope.subnetChanged(nic); expect(nic.mode).toBe("static"); expect($scope.saveInterfaceLink).toHaveBeenCalledWith(nic); }); it("clears ip_address", function() { var controller = makeController(); var nic = { subnet: null, ip_address: makeName("ip") }; spyOn($scope, "saveInterfaceLink"); $scope.subnetChanged(nic); expect(nic.ip_address).toBe(""); }); }); describe("shouldShowIPAddress", function() { it("true if not static and has ip address", function() { var controller = makeController(); var nic = { mode: "auto", ip_address: "192.168.122.1" }; expect($scope.shouldShowIPAddress(nic)).toBe(true); }); it("false if not static and doesn't have ip address", function() { var controller = makeController(); var nic = { mode: "dhcp", ip_address: "" }; expect($scope.shouldShowIPAddress(nic)).toBe(false); }); describe("static", function() { it("false if no orginial link", function() { var controller = makeController(); var nic = { id: 0, mode: "static", link_id: -1, ip_address: "" }; expect($scope.shouldShowIPAddress(nic)).toBe(false); }); it("false if orginial link has no IP address", function() { var controller = makeController(); var originalInterface = { id: 0, links: [ { id: 0, mode: "static" } ] }; $scope.originalInterfaces = [originalInterface]; var nic = { id: 0, mode: "static", link_id: 0, ip_address: "" }; expect($scope.shouldShowIPAddress(nic)).toBe(false); }); it("false if orginial link has empty IP address", function() { var controller = makeController(); var originalInterface = { id: 0, links: [ { id: 0, mode: "static", ip_address: "" } ] }; $scope.originalInterfaces = [originalInterface]; var nic = { id: 0, mode: "static", link_id: 0, ip_address: "" }; expect($scope.shouldShowIPAddress(nic)).toBe(false); }); it("false if no subnet on nic", function() { var controller = makeController(); var originalInterface = { id: 0, links: [ { id: 0, mode: "static", ip_address: "192.168.122.2" } ] }; $scope.originalInterfaces = [originalInterface]; var nic = { id: 0, mode: "static", link_id: 0, ip_address: "" }; expect($scope.shouldShowIPAddress(nic)).toBe(false); }); it("false if the subnets don't match", function() { var controller = makeController(); var originalInterface = { id: 0, links: [ { id: 0, mode: "static", ip_address: "192.168.122.2", subnet_id: 0 } ] }; $scope.originalInterfaces = [originalInterface]; var nic = { id: 0, mode: "static", link_id: 0, ip_address: "", subnet: { id: 1 } }; expect($scope.shouldShowIPAddress(nic)).toBe(false); }); it("true if all condititions match", function() { var controller = makeController(); var originalInterface = { id: 0, links: [ { id: 0, mode: "static", ip_address: "192.168.122.2", subnet_id: 0 } ] }; $scope.originalInterfaces = [originalInterface]; var nic = { id: 0, mode: "static", link_id: 0, ip_address: "", subnet: { id: 0 } }; expect($scope.shouldShowIPAddress(nic)).toBe(true); }); }); }); describe("isIPAddressInvalid", function() { it("true if empty IP address", function() { var controller = makeController(); var nic = { ip_address: "" }; expect($scope.isIPAddressInvalid(nic)).toBe(true); }); it("true if not valid IP address", function() { var controller = makeController(); var nic = { ip_address: "192.168.260.5" }; expect($scope.isIPAddressInvalid(nic)).toBe(true); }); it("true if IP address not in subnet", function() { var controller = makeController(); var nic = { ip_address: "192.168.123.10", subnet: { cidr: "192.168.122.0/24" } }; expect($scope.isIPAddressInvalid(nic)).toBe(true); }); it("false if IP address in subnet", function() { var controller = makeController(); var nic = { ip_address: "192.168.122.10", subnet: { cidr: "192.168.122.0/24" } }; expect($scope.isIPAddressInvalid(nic)).toBe(false); }); }); describe("saveInterfaceIPAddress", function() { it("resets IP address if invalid doesn't save", function() { var controller = makeController(); var originalInterface = { id: 0, links: [ { id: 0, mode: "static", ip_address: "192.168.122.10", subnet_id: 0 } ] }; $scope.originalInterfaces = [originalInterface]; var nic = { id: 0, mode: "static", link_id: 0, ip_address: "192.168.123.10", subnet: { id: 0, cidr: "192.168.122.0/24" } }; spyOn($scope, "saveInterfaceLink"); $scope.saveInterfaceIPAddress(nic); expect(nic.ip_address).toBe("192.168.122.10"); expect($scope.saveInterfaceLink).not.toHaveBeenCalled(); }); it("saves the link if valid", function() { var controller = makeController(); var originalInterface = { id: 0, links: [ { id: 0, mode: "static", ip_address: "192.168.122.10", subnet_id: 0 } ] }; $scope.originalInterfaces = [originalInterface]; var nic = { id: 0, mode: "static", link_id: 0, ip_address: "192.168.122.11", subnet: { id: 0, cidr: "192.168.122.0/24" } }; spyOn($scope, "saveInterfaceLink"); $scope.saveInterfaceIPAddress(nic); expect(nic.ip_address).toBe("192.168.122.11"); expect($scope.saveInterfaceLink).toHaveBeenCalledWith(nic); }); }); describe("getUniqueKey", function() { it("returns id + / + link_id", function() { var controller = makeController(); var nic = { id: makeInteger(0, 100), link_id: makeInteger(0, 100) }; expect($scope.getUniqueKey(nic)).toBe(nic.id + "/" + nic.link_id); }); }); describe("toggleInterfaceSelect", function() { it("selects interface and enters single mode", function() { var controller = makeController(); var nic = { id: makeInteger(0, 100), link_id: makeInteger(0, 100) }; var key = $scope.getUniqueKey(nic); $scope.toggleInterfaceSelect(nic); expect($scope.selectedInterfaces).toEqual([key]); expect($scope.selectedMode).toBe("single"); }); it("deselects interface and enters none mode", function() { var controller = makeController(); var nic = { id: makeInteger(0, 100), link_id: makeInteger(0, 100) }; var key = $scope.getUniqueKey(nic); $scope.toggleInterfaceSelect(nic); $scope.toggleInterfaceSelect(nic); expect($scope.selectedInterfaces).toEqual([]); expect($scope.selectedMode).toBeNull(); }); it("selecting multiple enters multi mode", function() { var controller = makeController(); var nic1 = { id: makeInteger(0, 100), link_id: makeInteger(0, 100) }; var nic2 = { id: makeInteger(100, 200), link_id: makeInteger(0, 100) }; var key1 = $scope.getUniqueKey(nic1); var key2 = $scope.getUniqueKey(nic2); $scope.toggleInterfaceSelect(nic1); $scope.toggleInterfaceSelect(nic2); expect($scope.selectedInterfaces).toEqual([key1, key2]); expect($scope.selectedMode).toBe("multi"); }); }); describe("isInterfaceSelected", function() { it("returns true when selected", function() { var controller = makeController(); var nic = { id: makeInteger(0, 100), link_id: makeInteger(0, 100) }; var key = $scope.getUniqueKey(nic); $scope.selectedInterfaces = [key]; expect($scope.isInterfaceSelected(nic)).toBe(true); }); it("returns false when not selected", function() { var controller = makeController(); var nic = { id: makeInteger(0, 100), link_id: makeInteger(0, 100) }; $scope.selectedInterfaces = []; expect($scope.isInterfaceSelected(nic)).toBe(false); }); }); describe("cannotEditInterface", function() { it("returns true when only one selected", function() { var controller = makeController(); var nic = { id: makeInteger(0, 100), link_id: makeInteger(0, 100) }; var key = $scope.getUniqueKey(nic); $scope.selectedInterfaces = [key]; expect($scope.cannotEditInterface(nic)).toBe(false); }); it("returns false when multiple selected", function() { var controller = makeController(); var nic1 = { id: makeInteger(0, 100), link_id: makeInteger(0, 100) }; var nic2 = { id: makeInteger(100, 200), link_id: makeInteger(0, 100) }; var key1 = $scope.getUniqueKey(nic1); var key2 = $scope.getUniqueKey(nic2); $scope.selectedInterfaces = [key1, key2]; expect($scope.cannotEditInterface(nic1)).toBe(false); }); it("returns false when not selected", function() { var controller = makeController(); var nic = { id: makeInteger(0, 100), link_id: makeInteger(0, 100) }; $scope.selectedInterfaces = []; expect($scope.cannotEditInterface(nic)).toBe(false); }); }); describe("isShowingInterfaceOptions", function() { it("returns true in single mode", function() { var controller = makeController(); $scope.selectedMode = "single"; expect($scope.isShowingInterfaceOptions()).toBe(true); }); it("returns false not in single mode", function() { var controller = makeController(); $scope.selectedMode = "mutli"; expect($scope.isShowingInterfaceOptions()).toBe(false); }); }); describe("isShowingDeleteComfirm", function() { it("returns true in delete mode", function() { var controller = makeController(); $scope.selectedMode = "delete"; expect($scope.isShowingDeleteComfirm()).toBe(true); }); it("returns false not in delete mode", function() { var controller = makeController(); $scope.selectedMode = "single"; expect($scope.isShowingDeleteComfirm()).toBe(false); }); }); describe("isShowingAdd", function() { it("returns true in add mode", function() { var controller = makeController(); $scope.selectedMode = "add"; expect($scope.isShowingAdd()).toBe(true); }); it("returns false not in add mode", function() { var controller = makeController(); $scope.selectedMode = "delete"; expect($scope.isShowingAdd()).toBe(false); }); }); describe("canAddAlias", function() { it("returns false if nic undefined", function() { var controller = makeController(); expect($scope.canAddAlias()).toBe(false); }); it("returns false if nic type is alias", function() { var controller = makeController(); var nic = { type: "alias" }; expect($scope.canAddAlias(nic)).toBe(false); }); it("returns false if nic has no links", function() { var controller = makeController(); var nic = { type: "physical", links: [] }; expect($scope.canAddAlias(nic)).toBe(false); }); it("returns false if nic has link_up", function() { var controller = makeController(); var nic = { type: "physical", links: [{ mode: "link_up" }] }; expect($scope.canAddAlias(nic)).toBe(false); }); it("returns true if nic has dhcp", function() { var controller = makeController(); var nic = { type: "physical", links: [{ mode: "dhcp" }] }; expect($scope.canAddAlias(nic)).toBe(true); }); it("returns true if nic has static", function() { var controller = makeController(); var nic = { type: "physical", links: [{ mode: "static" }] }; expect($scope.canAddAlias(nic)).toBe(true); }); it("returns true if nic has auto", function() { var controller = makeController(); var nic = { type: "physical", links: [{ mode: "auto" }] }; expect($scope.canAddAlias(nic)).toBe(true); }); }); describe("canAddVLAN", function() { it("returns false if nic undefined", function() { var controller = makeController(); expect($scope.canAddVLAN()).toBe(false); }); it("returns false if nic type is alias", function() { var controller = makeController(); var nic = { type: "alias" }; expect($scope.canAddVLAN(nic)).toBe(false); }); it("returns false if nic type is vlan", function() { var controller = makeController(); var nic = { type: "vlan" }; expect($scope.canAddVLAN(nic)).toBe(false); }); it("returns false if no unused vlans", function() { var controller = makeController(); var fabric = { id: 0 }; var vlans = [ { id: 0, fabric: 0 }, { id: 1, fabric: 0 }, { id: 2, fabric: 0 } ]; var originalInterfaces = [ { id: 0, type: "physical", parents: [], children: [1, 2, 3], vlan_id: 0 }, { id: 1, type: "vlan", parents: [0], children: [], vlan_id: 0 }, { id: 2, type: "vlan", parents: [0], children: [], vlan_id: 1 }, { id: 3, type: "vlan", parents: [0], children: [], vlan_id: 2 } ]; var nic = { id: 0, type: "physical", fabric: fabric }; $scope.originalInterfaces = originalInterfaces; $scope.vlans = vlans; expect($scope.canAddVLAN(nic)).toBe(false); }); it("returns true if unused vlans", function() { var controller = makeController(); var fabric = { id: 0 }; var vlans = [ { id: 0, fabric: 0 }, { id: 1, fabric: 0 }, { id: 2, fabric: 0 } ]; var originalInterfaces = [ { id: 0, type: "physical", parents: [], children: [1, 2, 3], vlan_id: 0 }, { id: 1, type: "vlan", parents: [0], children: [], vlan_id: 0 }, { id: 2, type: "vlan", parents: [0], children: [], vlan_id: 1 } ]; var nic = { id: 0, type: "physical", fabric: fabric }; $scope.originalInterfaces = originalInterfaces; $scope.vlans = vlans; expect($scope.canAddVLAN(nic)).toBe(true); }); }); describe("canAddAnotherVLAN", function() { it("returns false if canAddVLAN returns false", function() { var controller = makeController(); spyOn($scope, "canAddVLAN").and.returnValue(false); expect($scope.canAddAnotherVLAN()).toBe(false); }); it("returns false if only 1 unused vlans", function() { var controller = makeController(); var fabric = { id: 0 }; var vlans = [ { id: 0, fabric: 0 }, { id: 1, fabric: 0 }, { id: 2, fabric: 0 } ]; var originalInterfaces = [ { id: 0, type: "physical", parents: [], children: [1, 2, 3], vlan_id: 0 }, { id: 1, type: "vlan", parents: [0], children: [], vlan_id: 0 }, { id: 2, type: "vlan", parents: [0], children: [], vlan_id: 1 } ]; var nic = { id: 0, type: "physical", fabric: fabric }; $scope.originalInterfaces = originalInterfaces; $scope.vlans = vlans; expect($scope.canAddAnotherVLAN(nic)).toBe(false); }); it("returns true if more than 1 unused vlans", function() { var controller = makeController(); var fabric = { id: 0 }; var vlans = [ { id: 0, fabric: 0 }, { id: 1, fabric: 0 }, { id: 2, fabric: 0 } ]; var originalInterfaces = [ { id: 0, type: "physical", parents: [], children: [1, 2, 3], vlan_id: 0 }, { id: 1, type: "vlan", parents: [0], children: [], vlan_id: 0 } ]; var nic = { id: 0, type: "physical", fabric: fabric }; $scope.originalInterfaces = originalInterfaces; $scope.vlans = vlans; expect($scope.canAddAnotherVLAN(nic)).toBe(true); }); }); describe("getRemoveTypeText", function() { it("returns interface for physical interface", function() { var controller = makeController(); var nic = { type: "physical" }; expect($scope.getRemoveTypeText(nic)).toBe("interface"); }); it("returns VLAN for VLAN interface", function() { var controller = makeController(); var nic = { type: "vlan" }; expect($scope.getRemoveTypeText(nic)).toBe("VLAN"); }); it("returns type for other types", function() { var controller = makeController(); var type = makeName("type"); var nic = { type: type }; expect($scope.getRemoveTypeText(nic)).toBe(type); }); }); describe("remove", function() { it("sets selectedMode to delete", function() { var controller = makeController(); $scope.remove(); expect($scope.selectedMode).toBe("delete"); }); }); describe("quickRemove", function() { it("selects interface and sets selectedMode to delete", function() { var controller = makeController(); var nic = { id: makeInteger(0, 100), link_id: makeInteger(0, 100) }; $scope.quickRemove(nic); expect($scope.isInterfaceSelected(nic)).toBe(true); expect($scope.selectedMode).toBe("delete"); }); }); describe("cancel", function() { it("clears newInterface and sets selectedMode to single", function() { var controller = makeController(); var newInterface = {}; $scope.newInterface = newInterface; $scope.selectedMode = "delete"; $scope.cancel(); expect($scope.newInterface).not.toBe(newInterface); expect($scope.selectedMode).toBe("single"); }); it("clears newInterface and create resets to none", function() { var controller = makeController(); var newInterface = {}; $scope.newInterface = newInterface; $scope.selectedMode = "create-physical"; $scope.cancel(); expect($scope.newInterface).not.toBe(newInterface); expect($scope.selectedMode).toBeNull(); }); it("clears newBondInterface and sets selectedMode to multi", function() { var controller = makeController(); var newBondInterface = {}; $scope.newBondInterface = newBondInterface; $scope.selectedMode = "create-bond"; $scope.cancel(); expect($scope.newBondInterface).not.toBe(newBondInterface); expect($scope.selectedMode).toBe("multi"); }); }); describe("confirmRemove", function() { it("sets selectedMode to none", function() { var controller = makeController(); var nic = { id: makeInteger(0, 100), type: "physical", link_id: makeInteger(0, 100) }; $scope.toggleInterfaceSelect(nic); $scope.selectedMode = "delete"; spyOn(NodesManager, "deleteInterface"); $scope.confirmRemove(nic); expect($scope.selectedMode).toBeNull(); expect($scope.selectedInterfaces).toEqual([]); }); it("calls NodesManager.deleteInterface", function() { var controller = makeController(); var nic = { id: makeInteger(0, 100), type: "physical", link_id: makeInteger(0, 100) }; $scope.toggleInterfaceSelect(nic); $scope.selectedMode = "delete"; spyOn(NodesManager, "deleteInterface"); $scope.confirmRemove(nic); expect(NodesManager.deleteInterface).toHaveBeenCalledWith( node, nic.id); }); it("calls NodesManager.unlinkSubnet", function() { var controller = makeController(); var nic = { id: makeInteger(0, 100), type: "alias", link_id: makeInteger(0, 100) }; $scope.toggleInterfaceSelect(nic); $scope.selectedMode = "delete"; spyOn(NodesManager, "unlinkSubnet"); $scope.confirmRemove(nic); expect(NodesManager.unlinkSubnet).toHaveBeenCalledWith( node, nic.id, nic.link_id); }); it("removes nic from interfaces", function() { var controller = makeController(); var nic = { id: makeInteger(0, 100), type: "alias", link_id: makeInteger(0, 100) }; $scope.interfaces = [nic]; $scope.toggleInterfaceSelect(nic); $scope.selectedMode = "delete"; spyOn(NodesManager, "unlinkSubnet"); $scope.confirmRemove(nic); expect($scope.interfaces).toEqual([]); }); }); describe("add", function() { it("sets up newInterface for alias", function() { var controller = makeController(); var vlan = {}; var nic = { id: makeInteger(0, 100), type: "physical", link_id: makeInteger(0, 100), vlan: vlan }; var subnet = {}; spyOn(VLANsManager, "getSubnets").and.returnValue([subnet]); $scope.add('alias', nic); expect($scope.newInterface).toEqual({ type: "alias", vlan: vlan, subnet: subnet, mode: "auto", parent: nic }); expect($scope.newInterface.vlan).toBe(vlan); expect($scope.newInterface.subnet).toBe(subnet); expect($scope.newInterface.parent).toBe(nic); expect($scope.selectedMode).toBe("add"); }); it("sets up newInterface for vlan", function() { var controller = makeController(); var fabric = { id: 0 }; var vlans = [ { id: 0, fabric: 0 }, { id: 1, fabric: 0 }, { id: 2, fabric: 0 } ]; var originalInterfaces = [ { id: 0, type: "physical", parents: [], children: [1], vlan_id: 0 }, { id: 1, type: "vlan", parents: [0], children: [], vlan_id: 0 } ]; var nic = { id: 0, type: "physical", link_id: -1, fabric: fabric, vlan: vlans[0] }; $scope.originalInterfaces = originalInterfaces; $scope.vlans = vlans; $scope.newInterface = { vlan: vlans[1] }; $scope.add('vlan', nic); expect($scope.newInterface).toEqual({ type: "vlan", vlan: vlans[2], subnet: null, mode: "link_up", parent: nic }); expect($scope.newInterface.vlan).toBe(vlans[2]); expect($scope.newInterface.parent).toBe(nic); expect($scope.selectedMode).toBe("add"); }); }); describe("quickAdd", function() { it("selects nic and calls add with alias", function() { var controller = makeController(); var nic = { id: makeInteger(0, 100), link_id: makeInteger(0, 100) }; $scope.selectedInterfaces = [{}, {}, {}]; spyOn($scope, "canAddAlias").and.returnValue(true); spyOn($scope, "add"); $scope.quickAdd(nic); expect($scope.selectedInterfaces).toEqual( [$scope.getUniqueKey(nic)]); expect($scope.add).toHaveBeenCalledWith('alias', nic); }); it("selects nic and calls add with vlan", function() { var controller = makeController(); var nic = { id: makeInteger(0, 100), link_id: makeInteger(0, 100) }; $scope.selectedInterfaces = [{}, {}, {}]; spyOn($scope, "canAddAlias").and.returnValue(false); spyOn($scope, "add"); $scope.quickAdd(nic); expect($scope.selectedInterfaces).toEqual( [$scope.getUniqueKey(nic)]); expect($scope.add).toHaveBeenCalledWith('vlan', nic); }); }); describe("getAddName", function() { it("returns alias name based on links length", function() { var controller = makeController(); var name = makeName("eth"); var parent = { id: makeInteger(0, 100), name: name, link_id: makeInteger(0, 100), links: [{}, {}, {}] }; $scope.newInterface = { type: "alias", parent: parent }; expect($scope.getAddName()).toBe(name + ":3"); }); it("returns VLAN name based on VLAN vid", function() { var controller = makeController(); var name = makeName("eth"); var vid = makeInteger(0, 100); var parent = { id: makeInteger(0, 100), name: name, link_id: makeInteger(0, 100) }; $scope.newInterface = { type: "vlan", parent: parent, vlan: { vid: vid } }; expect($scope.getAddName()).toBe(name + "." + vid); }); }); describe("addTypeChanged", function() { it("reset properties based on the new type alias", function() { var controller = makeController(); var vlan = {}; var subnet = {}; var parent = { id: makeInteger(0, 100), name: name, link_id: makeInteger(0, 100), vlan: vlan }; spyOn(VLANsManager, "getSubnets").and.returnValue([subnet]); $scope.newInterface = { type: "alias", parent: parent }; $scope.addTypeChanged(); expect($scope.newInterface.vlan).toBe(vlan); expect($scope.newInterface.subnet).toBe(subnet); expect($scope.newInterface.mode).toBe("auto"); }); it("reset properties based on the new type VLAN", function() { var controller = makeController(); var fabric = { id: 0 }; var vlans = [ { id: 0, fabric: 0 }, { id: 1, fabric: 0 }, { id: 2, fabric: 0 } ]; var originalInterfaces = [ { id: 0, type: "physical", parents: [], children: [1], vlan_id: 0 }, { id: 1, type: "vlan", parents: [0], children: [], vlan_id: 0 } ]; var parent = { id: 0, type: "physical", link_id: -1, fabric: fabric, vlan: vlans[0] }; $scope.originalInterfaces = originalInterfaces; $scope.vlans = vlans; $scope.newInterface = { type: "vlan", parent: parent }; $scope.addTypeChanged(); expect($scope.newInterface.vlan).toBe(vlans[1]); expect($scope.newInterface.subnet).toBeNull(); expect($scope.newInterface.mode).toBe("link_up"); }); }); describe("addVLANChanged", function() { it("clears subnets on newInterface", function() { var controller = makeController(); $scope.newInterface = { subnet: {} }; $scope.addVLANChanged(); expect($scope.newInterface.subnet).toBeNull(); }); }); describe("addSubnetChanged", function() { it("sets mode to link_up if no subnet", function() { var controller = makeController(); $scope.newInterface = { mode: "auto" }; $scope.addSubnetChanged(); expect($scope.newInterface.mode).toBe("link_up"); }); it("leaves mode to alone when subnet", function() { var controller = makeController(); $scope.newInterface = { mode: "auto", subnet: {} }; $scope.addSubnetChanged(); expect($scope.newInterface.mode).toBe("auto"); }); }); describe("addInterface", function() { it("calls saveInterfaceLink with correct params", function() { var controller = makeController(); var parent = { id: makeInteger(0, 100) }; var subnet = {}; $scope.newInterface = { type: "alias", mode: "auto", subnet: subnet, parent: parent }; $scope.selectedInterfaces = [{}]; $scope.selectedMode = "add"; spyOn($scope, "saveInterfaceLink"); $scope.addInterface(); expect($scope.saveInterfaceLink).toHaveBeenCalledWith({ id: parent.id, mode: "auto", subnet: subnet, ip_address: "" }); expect($scope.selectedMode).toBeNull(); expect($scope.selectedInterfaces).toEqual([]); expect($scope.newInterface).toEqual({}); }); it("calls createVLANInterface with correct params", function() { var controller = makeController(); var parent = { id: makeInteger(0, 100) }; var vlan = { id: makeInteger(0, 100) }; var subnet = { id: makeInteger(0, 100) }; $scope.newInterface = { type: "vlan", mode: "auto", parent: parent, vlan: vlan, subnet: subnet }; $scope.selectedInterfaces = [{}]; $scope.selectedMode = "add"; spyOn(NodesManager, "createVLANInterface").and.returnValue( $q.defer().promise); $scope.addInterface(); expect(NodesManager.createVLANInterface).toHaveBeenCalledWith( node, { parent: parent.id, vlan: vlan.id, mode: "auto", subnet: subnet.id }); expect($scope.selectedMode).toBeNull(); expect($scope.selectedInterfaces).toEqual([]); expect($scope.newInterface).toEqual({}); }); it("calls add again with type", function() { var controller = makeController(); var parent = { id: makeInteger(0, 100) }; $scope.newInterface = { type: "alias", mode: "auto", subnet: {}, parent: parent }; var selection = [{}]; $scope.selectedInterfaces = selection; $scope.selectedMode = "add"; spyOn($scope, "saveInterfaceLink"); spyOn($scope, "add"); $scope.addInterface("alias"); expect($scope.add).toHaveBeenCalledWith("alias", parent); expect($scope.selectedMode).toBe("add"); expect($scope.selectedInterfaces).toBe(selection); }); }); describe("isDisabled", function() { it("returns false when in none, single, or multi mode", function() { var controller = makeController(); $scope.isSuperUser = function() { return true; }; // Node needs to be Ready or Broken for the mode to be considered. $scope.node = {status: "Ready"}; $scope.selectedMode = null; expect($scope.isDisabled()).toBe(false); $scope.selectedMode = "single"; expect($scope.isDisabled()).toBe(false); $scope.selectedMode = "multi"; expect($scope.isDisabled()).toBe(false); }); it("returns true when in delete, add, or create modes", function() { var controller = makeController(); $scope.isSuperUser = function() { return true; }; // Node needs to be Ready or Broken for the mode to be considered. $scope.node = {status: "Ready"}; $scope.selectedMode = "create-bond"; expect($scope.isDisabled()).toBe(true); $scope.selectedMode = "add"; expect($scope.isDisabled()).toBe(true); $scope.selectedMode = "delete"; expect($scope.isDisabled()).toBe(true); }); it("returns true when the node state is not 'Ready' or 'Broken'", function() { var controller = makeController(); $scope.isSuperUser = function() { return true; }; $scope.node = {status: "Ready"}; expect($scope.isDisabled()).toBe(false); $scope.node = {status: "Broken"}; expect($scope.isDisabled()).toBe(false); ["New", "Commissioning", "Failed commissioning", "Missing", "Reserved", "Allocated", "Deploying", "Deployed", "Retired", "Failed deployment", "Releasing", "Releasing failed", "Disk erasing", "Failed disk erasing"].forEach(function (s) { $scope.node = {state: s}; expect($scope.isDisabled()).toBe(true); }); }); it("returns true if the user is not a superuser", function() { var controller = makeController(); $scope.isSuperUser = function() { return false; }; $scope.node = {status: "Ready"}; expect($scope.isDisabled()).toBe(true); $scope.node = {status: "Broken"}; expect($scope.isDisabled()).toBe(true); }); }); describe("isAllNetworkingDisabled", function() { it("returns true if the user is not a superuser and the node is ready", function() { var controller = makeController(); $scope.isSuperUser = function() { return false; }; expect($scope.isAllNetworkingDisabled()).toBe(true); }); it("return false if the node is Ready and we are a superuser", function() { var controller = makeController(); $scope.isSuperUser = function() { return true; }; $scope.node.status = "Ready"; expect($scope.isAllNetworkingDisabled()).toBe(false); }); it("return false if the node is broken and we are a superuser", function() { var controller = makeController(); $scope.isSuperUser = function() { return true; }; $scope.node.status = "Broken"; expect($scope.isAllNetworkingDisabled()).toBe(false); }); it("return true if the node is deploying and we are a superuser", function() { var controller = makeController(); $scope.isSuperUser = function() { return true; }; $scope.node.status = "Deploying"; expect($scope.isAllNetworkingDisabled()).toBe(true); }); }); describe("canCreateBond", function() { it("returns false if not in multi mode", function() { var controller = makeController(); var modes = [null, "add", "delete", "single", "delete"]; angular.forEach(modes, function(mode) { $scope.selectedMode = mode; expect($scope.canCreateBond()).toBe(false); }); }); it("returns false if selected interface is bond", function() { var controller = makeController(); var nic1 = { id: makeInteger(0, 100), link_id: makeInteger(0, 100), type: "bond" }; var nic2 = { id: makeInteger(0, 100), link_id: makeInteger(0, 100), type: "bond" }; $scope.interfaces = [nic1, nic2]; $scope.interfaceLinksMap = {}; $scope.interfaceLinksMap[nic1.id] = {}; $scope.interfaceLinksMap[nic1.id][nic1.link_id] = nic1; $scope.interfaceLinksMap[nic2.id] = {}; $scope.interfaceLinksMap[nic2.id][nic2.link_id] = nic2; $scope.toggleInterfaceSelect(nic1); $scope.toggleInterfaceSelect(nic2); expect($scope.canCreateBond()).toBe(false); }); it("returns false if selected interface is alias", function() { var controller = makeController(); var nic1 = { id: makeInteger(0, 100), link_id: makeInteger(0, 100), type: "alias" }; var nic2 = { id: makeInteger(101, 200), link_id: makeInteger(0, 100), type: "alias" }; $scope.interfaces = [nic1, nic2]; $scope.interfaceLinksMap = {}; $scope.interfaceLinksMap[nic1.id] = {}; $scope.interfaceLinksMap[nic1.id][nic1.link_id] = nic1; $scope.interfaceLinksMap[nic2.id] = {}; $scope.interfaceLinksMap[nic2.id][nic2.link_id] = nic2; $scope.toggleInterfaceSelect(nic1); $scope.toggleInterfaceSelect(nic2); expect($scope.canCreateBond()).toBe(false); }); it("returns false if not same selected vlan", function() { var controller = makeController(); var nic1 = { id: makeInteger(0, 100), link_id: makeInteger(0, 100), type: "physical", vlan: {} }; var nic2 = { id: makeInteger(101, 200), link_id: makeInteger(0, 100), type: "physical", vlan: {} }; $scope.interfaces = [nic1, nic2]; $scope.interfaceLinksMap = {}; $scope.interfaceLinksMap[nic1.id] = {}; $scope.interfaceLinksMap[nic1.id][nic1.link_id] = nic1; $scope.interfaceLinksMap[nic2.id] = {}; $scope.interfaceLinksMap[nic2.id][nic2.link_id] = nic2; $scope.toggleInterfaceSelect(nic1); $scope.toggleInterfaceSelect(nic2); expect($scope.canCreateBond()).toBe(false); }); it("returns true if same selected vlan", function() { var controller = makeController(); var vlan = {}; var nic1 = { id: makeInteger(0, 100), link_id: makeInteger(0, 100), type: "physical", vlan: vlan }; var nic2 = { id: makeInteger(101, 200), link_id: makeInteger(0, 100), type: "physical", vlan: vlan }; $scope.interfaces = [nic1, nic2]; $scope.interfaceLinksMap = {}; $scope.interfaceLinksMap[nic1.id] = {}; $scope.interfaceLinksMap[nic1.id][nic1.link_id] = nic1; $scope.interfaceLinksMap[nic2.id] = {}; $scope.interfaceLinksMap[nic2.id][nic2.link_id] = nic2; $scope.toggleInterfaceSelect(nic1); $scope.toggleInterfaceSelect(nic2); expect($scope.canCreateBond()).toBe(true); }); }); describe("isShowingCreateBond", function() { it("returns true in create-bond mode", function() { var controller = makeController(); $scope.selectedMode = "create-bond"; expect($scope.isShowingCreateBond()).toBe(true); }); it("returns false in multi mode", function() { var controller = makeController(); $scope.selectedMode = "multi"; expect($scope.isShowingCreateBond()).toBe(false); }); }); describe("showCreateBond", function() { it("sets mode to create-bond", function() { var controller = makeController(); $scope.selectedMode = "multi"; spyOn($scope, "canCreateBond").and.returnValue(true); $scope.showCreateBond(); expect($scope.selectedMode).toBe("create-bond"); }); it("creates the newBondInterface", function() { var controller = makeController(); var vlan = {}; var nic1 = { id: makeInteger(0, 100), link_id: makeInteger(0, 100), type: "physical", vlan: vlan }; var nic2 = { id: makeInteger(101, 200), link_id: makeInteger(0, 100), type: "physical", vlan: vlan }; $scope.interfaces = [nic1, nic2]; $scope.interfaceLinksMap = {}; $scope.interfaceLinksMap[nic1.id] = {}; $scope.interfaceLinksMap[nic1.id][nic1.link_id] = nic1; $scope.interfaceLinksMap[nic2.id] = {}; $scope.interfaceLinksMap[nic2.id][nic2.link_id] = nic2; $scope.toggleInterfaceSelect(nic1); $scope.toggleInterfaceSelect(nic2); $scope.showCreateBond(); expect($scope.newBondInterface).toEqual({ name: "bond0", parents: [nic1, nic2], primary: nic1, macAddress: "", mode: "active-backup", lacpRate: "slow", xmitHashPolicy: "layer2" }); }); }); describe("getBondIsBootInterface", function() { it("returns false if bond has no members with is_boot", function() { var controller = makeController(); $scope.newBondInterface = { parents: [ { is_boot: false }, { is_boot: false } ] }; expect($scope.getBondIsBootInterface()).toBe(false); }); it("returns true if bond has member with is_boot", function() { var controller = makeController(); $scope.newBondInterface = { parents: [ { is_boot: false }, { is_boot: true } ] }; expect($scope.getBondIsBootInterface()).toBe(true); }); }); describe("getBondPlaceholderMACAddress", function() { it("returns empty string if primary not set", function() { var controller = makeController(); expect($scope.getBondPlaceholderMACAddress()).toBe(""); }); it("returns the MAC address of the primary interface", function() { var controller = makeController(); var macAddress = makeName("mac"); $scope.newBondInterface.primary = { mac_address: macAddress }; expect($scope.getBondPlaceholderMACAddress()).toBe(macAddress); }); }); describe("isMACAddressInvalid", function() { it("returns false when the macAddress blank and not invalidEmpty", function() { var controller = makeController(); expect($scope.isMACAddressInvalid("")).toBe(false); }); it("returns truw when the macAddress is blank and invalidEmpty", function() { var controller = makeController(); expect($scope.isMACAddressInvalid("", true)).toBe(true); }); it("returns false if valid macAddress", function() { var controller = makeController(); expect($scope.isMACAddressInvalid("00:11:22:33:44:55")).toBe(false); }); it("returns true if invalid macAddress", function() { var controller = makeController(); expect($scope.isMACAddressInvalid("00:11:22:33:44")).toBe(true); }); }); describe("showLACPRate", function() { it("returns true if in 802.3ad mode", function() { var controller = makeController(); $scope.newBondInterface.mode = "802.3ad"; expect($scope.showLACPRate()).toBe(true); }); it("returns false if not in 802.3ad mode", function() { var controller = makeController(); $scope.newBondInterface.mode = makeName("otherMode"); expect($scope.showLACPRate()).toBe(false); }); }); describe("showXMITHashPolicy", function() { it("returns true if in balance-xor mode", function() { var controller = makeController(); $scope.newBondInterface.mode = "balance-xor"; expect($scope.showXMITHashPolicy()).toBe(true); }); it("returns true if in 802.3ad mode", function() { var controller = makeController(); $scope.newBondInterface.mode = "802.3ad"; expect($scope.showXMITHashPolicy()).toBe(true); }); it("returns true if in balance-tlb mode", function() { var controller = makeController(); $scope.newBondInterface.mode = "balance-tlb"; expect($scope.showXMITHashPolicy()).toBe(true); }); it("returns false if not in other modes", function() { var controller = makeController(); $scope.newBondInterface.mode = makeName("otherMode"); expect($scope.showXMITHashPolicy()).toBe(false); }); }); describe("cannotAddBond", function() { it("returns true when isInterfaceNameInvalid is true", function() { var controller = makeController(); spyOn($scope, "isInterfaceNameInvalid").and.returnValue(true); expect($scope.cannotAddBond()).toBe(true); }); it("returns true when isMACAddressInvalid is true", function() { var controller = makeController(); spyOn($scope, "isInterfaceNameInvalid").and.returnValue(false); spyOn($scope, "isMACAddressInvalid").and.returnValue(true); expect($scope.cannotAddBond()).toBe(true); }); it("returns false when both are false", function() { var controller = makeController(); spyOn($scope, "isInterfaceNameInvalid").and.returnValue(false); spyOn($scope, "isMACAddressInvalid").and.returnValue(false); expect($scope.cannotAddBond()).toBe(false); }); }); describe("addBond", function() { it("deos nothing if cannotAddBond returns true", function() { var controller = makeController(); var vlan = { id: makeInteger(0, 100) }; var nic1 = { id: makeInteger(0, 100), link_id: makeInteger(0, 100), type: "physical", vlan: vlan }; var nic2 = { id: makeInteger(101, 200), link_id: makeInteger(0, 100), type: "physical", vlan: vlan }; $scope.interfaces = [nic1, nic2]; $scope.interfaceLinksMap = {}; $scope.interfaceLinksMap[nic1.id] = {}; $scope.interfaceLinksMap[nic1.id][nic1.link_id] = nic1; $scope.interfaceLinksMap[nic2.id] = {}; $scope.interfaceLinksMap[nic2.id][nic2.link_id] = nic2; $scope.toggleInterfaceSelect(nic1); $scope.toggleInterfaceSelect(nic2); $scope.showCreateBond(); spyOn(NodesManager, "createBondInterface").and.returnValue( $q.defer().promise); spyOn($scope, "cannotAddBond").and.returnValue(true); $scope.newBondInterface.name = "bond0"; $scope.newBondInterface.macAddress = "00:11:22:33:44:55"; $scope.addBond(); expect(NodesManager.createBondInterface).not.toHaveBeenCalled(); }); it("calls createBondInterface and removes selection", function() { var controller = makeController(); var vlan = { id: makeInteger(0, 100) }; var nic1 = { id: makeInteger(0, 100), link_id: makeInteger(0, 100), type: "physical", vlan: vlan }; var nic2 = { id: makeInteger(101, 200), link_id: makeInteger(0, 100), type: "physical", vlan: vlan }; $scope.interfaces = [nic1, nic2]; $scope.interfaceLinksMap = {}; $scope.interfaceLinksMap[nic1.id] = {}; $scope.interfaceLinksMap[nic1.id][nic1.link_id] = nic1; $scope.interfaceLinksMap[nic2.id] = {}; $scope.interfaceLinksMap[nic2.id][nic2.link_id] = nic2; $scope.toggleInterfaceSelect(nic1); $scope.toggleInterfaceSelect(nic2); $scope.showCreateBond(); spyOn(NodesManager, "createBondInterface").and.returnValue( $q.defer().promise); spyOn($scope, "cannotAddBond").and.returnValue(false); $scope.newBondInterface.name = "bond0"; $scope.newBondInterface.macAddress = "00:11:22:33:44:55"; $scope.addBond(); expect(NodesManager.createBondInterface).toHaveBeenCalledWith( node, { name: "bond0", mac_address: "00:11:22:33:44:55", parents: [nic1.id, nic2.id], vlan: vlan.id, bond_mode: "active-backup", bond_lacp_rate: "slow", bond_xmit_hash_policy: "layer2" }); expect($scope.interfaces).toEqual([]); expect($scope.newBondInterface).toEqual({}); expect($scope.selectedInterfaces).toEqual([]); expect($scope.selectedMode).toBeNull(); }); }); describe("isShowingCreatePhysical", function() { it("returns true in create-physical mode", function() { var controller = makeController(); $scope.selectedMode = "create-physical"; expect($scope.isShowingCreatePhysical()).toBe(true); }); it("returns false in single mode", function() { var controller = makeController(); $scope.selectedMode = "single"; expect($scope.isShowingCreatePhysical()).toBe(false); }); }); describe("showCreatePhysical", function() { it("sets mode to create-physical", function() { var controller = makeController(); var vlan = { id: 0, fabric: 0 }; var fabric = { id: 0, name: makeName("fabric"), vlan_ids: [0] }; VLANsManager._items = [vlan]; $scope.fabrics = [fabric]; $scope.selectedMode = null; $scope.showCreatePhysical(); expect($scope.selectedMode).toBe("create-physical"); }); it("creates the newInterface", function() { var controller = makeController(); var vlan = { id: 0, fabric: 0 }; var fabric = { id: 0, name: makeName("fabric"), vlan_ids: [0] }; VLANsManager._items = [vlan]; $scope.fabrics = [fabric]; $scope.selectedMode = null; $scope.showCreatePhysical(); expect($scope.newInterface).toEqual({ name: "eth0", macAddress: "", macError: false, errorMsg: null, fabric: fabric, vlan: vlan, subnet: null, mode: "link_up" }); }); }); describe("newPhysicalFabricChanged", function() { it("sets newInterface.vlan with new fabric", function() { var controller = makeController(); var vlan = { id: 0, fabric: 0 }; var fabric = { id: 0, name: makeName("fabric"), vlan_ids: [0] }; VLANsManager._items = [vlan]; $scope.newInterface.fabric = fabric; $scope.newInterface.subnet = {}; $scope.newInterface.mode = "auto"; $scope.newPhysicalFabricChanged(); expect($scope.newInterface.vlan).toBe(vlan); expect($scope.newInterface.subnet).toBeNull(); expect($scope.newInterface.mode).toBe("link_up"); }); }); describe("newPhysicalSubnetChanged", function() { it("sets mode to link_up when no subnet", function() { var controller = makeController(); $scope.newInterface.subnet = null; $scope.newInterface.mode = "auto"; $scope.newPhysicalSubnetChanged(); expect($scope.newInterface.mode).toBe("link_up"); }); it("leaves mode to original when subnet", function() { var controller = makeController(); $scope.newInterface.subnet = {}; $scope.newInterface.mode = "auto"; $scope.newPhysicalSubnetChanged(); expect($scope.newInterface.mode).toBe("auto"); }); }); describe("cannotAddPhysicalInterface", function() { it("returns true when isInterfaceNameInvalid is true", function() { var controller = makeController(); spyOn($scope, "isInterfaceNameInvalid").and.returnValue(true); expect($scope.cannotAddPhysicalInterface()).toBe(true); }); it("returns true when isMACAddressInvalid is true", function() { var controller = makeController(); spyOn($scope, "isInterfaceNameInvalid").and.returnValue(false); spyOn($scope, "isMACAddressInvalid").and.returnValue(true); expect($scope.cannotAddPhysicalInterface()).toBe(true); }); it("returns false when both are false", function() { var controller = makeController(); spyOn($scope, "isInterfaceNameInvalid").and.returnValue(false); spyOn($scope, "isMACAddressInvalid").and.returnValue(false); expect($scope.cannotAddPhysicalInterface()).toBe(false); }); }); describe("addPhysicalInterface", function() { it("deos nothing if cannotAddInterface returns true", function() { var controller = makeController(); var vlan = { id: makeInteger(0, 100) }; var subnet = { id: makeInteger(0, 100) }; $scope.newInterface = { name: "eth0", macAddress: "00:11:22:33:44:55", vlan: vlan, subnet: subnet, mode: "auto" }; spyOn(NodesManager, "createPhysicalInterface").and.returnValue( $q.defer().promise); spyOn($scope, "cannotAddPhysicalInterface").and.returnValue(true); $scope.addPhysicalInterface(); expect(NodesManager.createPhysicalInterface).not.toHaveBeenCalled(); }); it("calls createPhysicalInterface and removes selection", function() { var controller = makeController(); var vlan = { id: makeInteger(0, 100) }; var subnet = { id: makeInteger(0, 100) }; $scope.newInterface = { name: "eth0", macAddress: "00:11:22:33:44:55", vlan: vlan, subnet: subnet, mode: "auto" }; $scope.selectedMode = "create-physical"; var defer = $q.defer(); spyOn(NodesManager, "createPhysicalInterface").and.returnValue( defer.promise); spyOn($scope, "cannotAddPhysicalInterface").and.returnValue(false); $scope.addPhysicalInterface(); defer.resolve(); $scope.$digest(); expect(NodesManager.createPhysicalInterface).toHaveBeenCalledWith( node, { name: "eth0", mac_address: "00:11:22:33:44:55", vlan: vlan.id, subnet: subnet.id, mode: "auto" }); expect($scope.newInterface).toEqual({}); expect($scope.selectedMode).toBeNull(); }); it("clears error on call", function() { var controller = makeController(); var vlan = { id: makeInteger(0, 100) }; var subnet = { id: makeInteger(0, 100) }; $scope.newInterface = { name: "eth0", macAddress: "00:11:22:33:44:55", vlan: vlan, subnet: subnet, mode: "auto", macError: true, errorMsg: "error" }; var defer = $q.defer(); spyOn(NodesManager, "createPhysicalInterface").and.returnValue( defer.promise); spyOn($scope, "cannotAddPhysicalInterface").and.returnValue(false); $scope.addPhysicalInterface(); expect($scope.newInterface.macError).toBe(false); expect($scope.newInterface.errorMsg).toBeNull(); }); it("handles macAddress error", function() { var controller = makeController(); var vlan = { id: makeInteger(0, 100) }; var subnet = { id: makeInteger(0, 100) }; $scope.newInterface = { name: "eth0", macAddress: "00:11:22:33:44:55", vlan: vlan, subnet: subnet, mode: "auto" }; var defer = $q.defer(); spyOn(NodesManager, "createPhysicalInterface").and.returnValue( defer.promise); spyOn($scope, "cannotAddPhysicalInterface").and.returnValue(false); $scope.addPhysicalInterface(); var error = { "mac_address": ["MACAddress is already in use"] }; defer.reject(angular.toJson(error)); $scope.$digest(); expect($scope.newInterface.macError).toBe(true); expect($scope.newInterface.errorMsg).toBe( "MACAddress is already in use"); }); }); }); ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/controllers/tests/test_node_details_storage.jsmaas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/controllers/tests/test_node_details_storage0000644000000000000000000051160413056115004032025 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for NodeStorageController. */ describe("removeAvailableByNew", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Load the removeAvailableByNew. var removeAvailableByNew; beforeEach(inject(function($filter) { removeAvailableByNew = $filter("removeAvailableByNew"); })); it("returns disks if undefined availableNew", function() { var i, disk, disks = []; for(i = 0; i < 3; i++) { disk = { id: i }; disks.push(disk); } expect(removeAvailableByNew(disks)).toBe(disks); }); it("returns disks if undefined device(s) in availableNew", function() { var i, disk, disks = []; for(i = 0; i < 3; i++) { disk = { id: i }; disks.push(disk); } var availableNew = {}; expect(removeAvailableByNew(disks, availableNew)).toBe(disks); }); it("removes availableNew.device from disks", function() { var i, disk, disks = []; for(i = 0; i < 3; i++) { disk = { id: i }; disks.push(disk); } var availableNew = { device: disks[0] }; var expectedDisks = angular.copy(disks); expectedDisks.splice(0, 1); expect(removeAvailableByNew(disks, availableNew)).toEqual( expectedDisks); }); it("removes availableNew.devices from disks", function() { var i, disk, disks = []; for(i = 0; i < 6; i++) { disk = { id: i }; disks.push(disk); } var availableNew = { devices: [disks[0], disks[1]] }; var expectedDisks = angular.copy(disks); expectedDisks.splice(0, 2); expect(removeAvailableByNew(disks, availableNew)).toEqual( expectedDisks); }); }); describe("NodeStorageController", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Grab the needed angular pieces. var $controller, $rootScope, $parentScope, $scope, $q; beforeEach(inject(function($injector) { $controller = $injector.get("$controller"); $rootScope = $injector.get("$rootScope"); $parentScope = $rootScope.$new(); $scope = $parentScope.$new(); $q = $injector.get("$q"); })); // Load the required dependencies for the NodeStorageController. var NodesManager; beforeEach(inject(function($injector) { NodesManager = $injector.get("NodesManager"); })); // Create the node and functions that will be called on the parent. var node, updateNodeSpy, canEditSpy; beforeEach(function() { node = { architecture: "amd64/generic", disks: [] }; updateNodeSpy = jasmine.createSpy("updateNode"); canEditSpy = jasmine.createSpy("canEdit"); $parentScope.node = node; $parentScope.updateNode = updateNodeSpy; $parentScope.canEdit = canEditSpy; }); // Makes the NodeStorageController function makeController() { // Create the controller. var controller = $controller("NodeStorageController", { $scope: $scope, NodesManager: NodesManager }); return controller; } // Return a known set of disks for testing the loading of disks // into the controller. function makeDisks() { return [ { // Blank disk id: 0, is_boot: true, name: makeName("name"), model: makeName("model"), serial: makeName("serial"), tags: [], type: makeName("type"), size: Math.pow(1024, 4), size_human: "1024 GB", available_size: Math.pow(1024, 4), available_size_human: "1024 GB", used_size: 0, used_size_human: "0.0 Bytes", partition_table_type: makeName("partition_table_type"), used_for: "Unused", filesystem: null, partitions: null }, { // Disk with filesystem, no mount point id: 1, is_boot: false, name: makeName("name"), model: makeName("model"), serial: makeName("serial"), tags: [], type: makeName("type"), size: Math.pow(1024, 4), size_human: "1024 GB", available_size: 0, available_size_human: "0 GB", used_size: Math.pow(1024, 4), used_size_human: "1024 GB", partition_table_type: makeName("partition_table_type"), used_for: "Unmounted ext4 formatted filesystem.", filesystem: { is_format_fstype: true, fstype: "ext4", mount_point: null }, partitions: null }, { // Disk with mounted filesystem id: 2, is_boot: false, name: makeName("name"), model: makeName("model"), serial: makeName("serial"), tags: [], type: makeName("type"), size: Math.pow(1024, 4), size_human: "1024 GB", available_size: 0, available_size_human: "0 GB", used_size: Math.pow(1024, 4), used_size_human: "1024 GB", partition_table_type: makeName("partition_table_type"), used_for: "ext4 formatted filesystem mounted at /.", filesystem: { is_format_fstype: true, fstype: "ext4", mount_point: "/" }, partitions: null }, { // Partitioned disk, one partition free one used id: 3, is_boot: false, name: makeName("name"), model: makeName("model"), serial: makeName("serial"), tags: [], type: makeName("type"), size: Math.pow(1024, 4), size_human: "1024 GB", available_size: 0, available_size_human: "0 GB", used_size: Math.pow(1024, 4), used_size_human: "1024 GB", partition_table_type: "GPT", filesystem: null, partitions: [ { id: 0, name: makeName("partition_name"), size_human: "512 GB", type: "partition", filesystem: null, used_for: "Unused" }, { id: 1, name: makeName("partition_name"), size_human: "512 GB", type: "partition", filesystem: { is_format_fstype: true, fstype: "ext4", mount_point: "/mnt" }, used_for: "ext4 formatted filesystem mounted at /mnt." } ] }, { // Disk that is a cache set. id: 4, is_boot: false, name: "cache0", model: "", serial: "", tags: [], type: "cache-set", size: Math.pow(1024, 4), size_human: "1024 GB", available_size: 0, available_size_human: "0 GB", used_size: Math.pow(1024, 4), used_size_human: "1024 GB", partition_table_type: null, used_for: "", filesystem: null, partitions: null } ]; } it("sets initial values", function() { var controller = makeController(); expect($scope.column).toBe('name'); expect($scope.has_disks).toBe(false); expect($scope.filesystems).toEqual([]); expect($scope.filesystemsMap).toEqual({}); expect($scope.filesystemMode).toBeNull(); expect($scope.filesystemAllSelected).toBe(false); expect($scope.available).toEqual([]); expect($scope.availableMap).toEqual({}); expect($scope.availableMode).toBeNull(); expect($scope.availableAllSelected).toBe(false); expect($scope.cachesets).toEqual([]); expect($scope.cachesetsMap).toEqual({}); expect($scope.cachesetsMode).toBeNull(); expect($scope.cachesetsAllSelected).toBe(false); expect($scope.used).toEqual([]); }); it("starts watching disks once nodeLoaded called", function() { var controller = makeController(); spyOn($scope, "$watch"); $scope.nodeLoaded(); var watches = []; var i, calls = $scope.$watch.calls.allArgs(); for(i = 0; i < calls.length; i++) { watches.push(calls[i][0]); } expect(watches).toEqual(["node.disks"]); }); it("disks updated once nodeLoaded called", function() { var disks = makeDisks(); node.disks = disks; var filesystems = [ { type: "filesystem", name: disks[2].name, size_human: disks[2].size_human, fstype: disks[2].filesystem.fstype, mount_point: disks[2].filesystem.mount_point, block_id: disks[2].id, partition_id: null, original_type: disks[2].type, original: disks[2], $selected: false }, { type: "filesystem", name: disks[3].partitions[1].name, size_human: disks[3].partitions[1].size_human, fstype: disks[3].partitions[1].filesystem.fstype, mount_point: disks[3].partitions[1].filesystem.mount_point, block_id: disks[3].id, partition_id: disks[3].partitions[1].id, original_type: "partition", original: disks[3].partitions[1], $selected: false } ]; var cachesets = [ { type: "cache-set", name: disks[4].name, size_human: disks[4].size_human, cache_set_id: disks[4].id, used_by: disks[4].used_for, $selected: false } ]; var available = [ { name: disks[0].name, is_boot: disks[0].is_boot, size_human: disks[0].size_human, available_size_human: disks[0].available_size_human, used_size_human: disks[0].used_size_human, type: disks[0].type, model: disks[0].model, serial: disks[0].serial, tags: disks[0].tags, fstype: null, mount_point: null, block_id: 0, partition_id: null, has_partitions: false, original: disks[0], $selected: false, $options: {} }, { name: disks[1].name, is_boot: disks[1].is_boot, size_human: disks[1].size_human, available_size_human: disks[1].available_size_human, used_size_human: disks[1].used_size_human, type: disks[1].type, model: disks[1].model, serial: disks[1].serial, tags: disks[1].tags, fstype: "ext4", mount_point: null, block_id: 1, partition_id: null, has_partitions: false, original: disks[1], $selected: false, $options: {} }, { name: disks[3].partitions[0].name, is_boot: false, size_human: disks[3].partitions[0].size_human, available_size_human: ( disks[3].partitions[0].available_size_human), used_size_human: disks[3].partitions[0].used_size_human, type: disks[3].partitions[0].type, model: "", serial: "", tags: [], fstype: null, mount_point: null, block_id: 3, partition_id: 0, has_partitions: false, original: disks[3].partitions[0], $selected: false, $options: {} } ]; var used = [ { name: disks[2].name, is_boot: disks[2].is_boot, type: disks[2].type, model: disks[2].model, serial: disks[2].serial, tags: disks[2].tags, used_for: disks[2].used_for }, { name: disks[3].name, is_boot: disks[3].is_boot, type: disks[3].type, model: disks[3].model, serial: disks[3].serial, tags: disks[3].tags, used_for: disks[3].used_for }, { name: disks[3].partitions[1].name, is_boot: false, type: "partition", model: "", serial: "", tags: [], used_for: disks[3].partitions[1].used_for } ]; var controller = makeController(); $scope.nodeLoaded(); $rootScope.$digest(); expect($scope.has_disks).toEqual(true); expect($scope.filesystems).toEqual(filesystems); expect($scope.cachesets).toEqual(cachesets); expect($scope.available).toEqual(available); expect($scope.used).toEqual(used); }); it("disks $selected and $options not lost on update", function() { var controller = makeController(); var disks = makeDisks(); node.disks = disks; // Load the filesystems, cachesets, available, and used once. $scope.nodeLoaded(); $rootScope.$digest(); // Set all filesystems, cachesets, and available to selected. angular.forEach($scope.filesystems, function(filesystem) { filesystem.$selected = true; }); angular.forEach($scope.cachesets, function(cacheset) { cacheset.$selected = true; }); angular.forEach($scope.available, function(disk) { disk.$selected = true; }); // Get all the options for available. var options = []; angular.forEach($scope.available, function(disk) { options.push(disk.$options); }); // Force the disks to change so the filesystems, cachesets, available, // and used are reloaded. var firstFilesystem = $scope.filesystems[0]; node.disks = angular.copy(node.disks); $rootScope.$digest(); expect($scope.filesystems[0]).not.toBe(firstFilesystem); expect($scope.filesystems[0]).toEqual(firstFilesystem); // All filesystems, cachesets and available should be selected. angular.forEach($scope.filesystems, function(filesystem) { expect(filesystem.$selected).toBe(true); }); angular.forEach($scope.cachesets, function(cacheset) { expect(cacheset.$selected).toBe(true); }); angular.forEach($scope.available, function(disk) { expect(disk.$selected).toBe(true); }); // All available should have the same options. angular.forEach($scope.available, function(disk, idx) { expect(disk.$options).toBe(options[idx]); }); }); it("availableNew.device object is updated", function() { var controller = makeController(); var disks = makeDisks(); node.disks = disks; // Load the filesystems, cachesets, available, and used once. $scope.nodeLoaded(); $rootScope.$digest(); // Set availableNew.device to a disk from available. var disk = $scope.available[0]; $scope.availableNew.device = disk; // Force the update. The device should be the same value but // a new object. node.disks = angular.copy(node.disks); $rootScope.$digest(); expect($scope.availableNew.device).toEqual(disk); expect($scope.availableNew.device).not.toBe(disk); }); it("availableNew.devices array is updated", function() { var controller = makeController(); var disks = makeDisks(); node.disks = disks; // Load the filesystems, cachesets, available, and used once. $scope.nodeLoaded(); $rootScope.$digest(); // Set availableNew.device to a disk from available. var disk0 = $scope.available[0]; var disk1 = $scope.available[1]; $scope.availableNew.devices = [disk0, disk1]; // Force the update. The devices should be the same values but // a new objects. node.disks = angular.copy(node.disks); $rootScope.$digest(); expect($scope.availableNew.devices[0]).toEqual(disk0); expect($scope.availableNew.devices[0]).not.toBe(disk0); expect($scope.availableNew.devices[1]).toEqual(disk1); expect($scope.availableNew.devices[1]).not.toBe(disk1); }); describe("isBootDiskDisabled", function() { it("returns true if not physical", function() { var controller = makeController(); var disk = { type: "virtual" }; expect($scope.isBootDiskDisabled(disk, "available")).toBe(true); }); it("returns false if in available", function() { var controller = makeController(); var disk = { type: "physical" }; expect($scope.isBootDiskDisabled(disk, "available")).toBe(false); }); it("returns true when used and no partitions", function() { var controller = makeController(); var disk = { type: "physical", has_partitions: false }; expect($scope.isBootDiskDisabled(disk, "used")).toBe(true); }); it("returns false when used and partitions", function() { var controller = makeController(); var disk = { type: "physical", has_partitions: true }; expect($scope.isBootDiskDisabled(disk, "used")).toBe(false); }); }); describe("setAsBootDisk", function() { it("does nothing if already boot disk", function() { var controller = makeController(); var disk = { is_boot: true }; spyOn(NodesManager, "setBootDisk"); spyOn($scope, "isBootDiskDisabled").and.returnValue(false); $scope.setAsBootDisk(disk); expect(NodesManager.setBootDisk).not.toHaveBeenCalled(); }); it("does nothing if set boot disk disabled", function() { var controller = makeController(); var disk = { is_boot: false }; spyOn(NodesManager, "setBootDisk"); spyOn($scope, "isBootDiskDisabled").and.returnValue(true); $scope.setAsBootDisk(disk); expect(NodesManager.setBootDisk).not.toHaveBeenCalled(); }); it("calls NodesManager.setBootDisk", function() { var controller = makeController(); var disk = { block_id: makeInteger(0, 100), is_boot: false }; spyOn(NodesManager, "setBootDisk"); spyOn($scope, "isBootDiskDisabled").and.returnValue(false); $scope.setAsBootDisk(disk); expect(NodesManager.setBootDisk).toHaveBeenCalledWith( node, disk.block_id); }); }); describe("getSelectedFilesystems", function() { it("returns selected filesystems", function() { var controller = makeController(); var filesystems = [ { $selected: true }, { $selected: true }, { $selected: false }, { $selected: false } ]; $scope.filesystems = filesystems; expect($scope.getSelectedFilesystems()).toEqual( [filesystems[0], filesystems[1]]); }); }); describe("updateFilesystemSelection", function() { it("sets filesystemMode to NONE when none selected", function() { var controller = makeController(); spyOn($scope, "getSelectedFilesystems").and.returnValue([]); $scope.filesystemMode = "other"; $scope.updateFilesystemSelection(); expect($scope.filesystemMode).toBeNull(); }); it("doesn't sets filesystemMode to SINGLE when not force", function() { var controller = makeController(); spyOn($scope, "getSelectedFilesystems").and.returnValue([{}]); $scope.filesystemMode = "other"; $scope.updateFilesystemSelection(); expect($scope.filesystemMode).toBe("other"); }); it("sets filesystemMode to SINGLE when force", function() { var controller = makeController(); spyOn($scope, "getSelectedFilesystems").and.returnValue([{}]); $scope.filesystemMode = "other"; $scope.updateFilesystemSelection(true); expect($scope.filesystemMode).toBe("single"); }); it("doesn't sets filesystemMode to MUTLI when not force", function() { var controller = makeController(); spyOn($scope, "getSelectedFilesystems").and.returnValue([{}, {}]); $scope.filesystemMode = "other"; $scope.updateFilesystemSelection(); expect($scope.filesystemMode).toBe("other"); }); it("sets filesystemMode to MULTI when force", function() { var controller = makeController(); spyOn($scope, "getSelectedFilesystems").and.returnValue([{}, {}]); $scope.filesystemMode = "other"; $scope.updateFilesystemSelection(true); expect($scope.filesystemMode).toBe("multi"); }); it("sets filesystemAllSelected to false when none selected", function() { var controller = makeController(); spyOn($scope, "getSelectedFilesystems").and.returnValue([]); $scope.filesystemAllSelected = true; $scope.updateFilesystemSelection(); expect($scope.filesystemAllSelected).toBe(false); }); it("sets filesystemAllSelected to false when not all selected", function() { var controller = makeController(); $scope.filesystems = [{}, {}]; spyOn($scope, "getSelectedFilesystems").and.returnValue([{}]); $scope.filesystemAllSelected = true; $scope.updateFilesystemSelection(); expect($scope.filesystemAllSelected).toBe(false); }); it("sets filesystemAllSelected to true when all selected", function() { var controller = makeController(); $scope.filesystems = [{}, {}]; spyOn($scope, "getSelectedFilesystems").and.returnValue( [{}, {}]); $scope.filesystemAllSelected = false; $scope.updateFilesystemSelection(); expect($scope.filesystemAllSelected).toBe(true); }); }); describe("toggleFilesystemSelect", function() { it("inverts $selected", function() { var controller = makeController(); var filesystem = { $selected: true }; spyOn($scope, "updateFilesystemSelection"); $scope.toggleFilesystemSelect(filesystem); expect(filesystem.$selected).toBe(false); $scope.toggleFilesystemSelect(filesystem); expect(filesystem.$selected).toBe(true); expect($scope.updateFilesystemSelection).toHaveBeenCalledWith( true); }); }); describe("toggleFilesystemAllSelect", function() { it("sets all to true if not all selected", function() { var controller = makeController(); var filesystems = [{ $selected: true }, { $selected: false }]; $scope.filesystems = filesystems; $scope.filesystemAllSelected = false; spyOn($scope, "updateFilesystemSelection"); $scope.toggleFilesystemAllSelect(); expect(filesystems[0].$selected).toBe(true); expect(filesystems[1].$selected).toBe(true); expect($scope.updateFilesystemSelection).toHaveBeenCalledWith( true); }); it("sets all to false if all selected", function() { var controller = makeController(); var filesystems = [{ $selected: true }, { $selected: true }]; $scope.filesystems = filesystems; $scope.filesystemAllSelected = true; spyOn($scope, "updateFilesystemSelection"); $scope.toggleFilesystemAllSelect(); expect(filesystems[0].$selected).toBe(false); expect(filesystems[1].$selected).toBe(false); expect($scope.updateFilesystemSelection).toHaveBeenCalledWith( true); }); }); describe("isFilesystemsDisabled", function() { it("returns false for NONE", function() { var controller = makeController(); $scope.filesystemMode = null; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.isFilesystemsDisabled()).toBe(false); }); it("returns false for SINGLE", function() { var controller = makeController(); $scope.filesystemMode = "single"; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.isFilesystemsDisabled()).toBe(false); }); it("returns false for MULTI", function() { var controller = makeController(); $scope.filesystemMode = "multi"; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.isFilesystemsDisabled()).toBe(false); }); it("returns true for UNMOUNT", function() { var controller = makeController(); $scope.filesystemMode = "unmount"; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.isFilesystemsDisabled()).toBe(true); }); it("returns true when isAllStorageDisabled", function() { var controller = makeController(); $scope.filesystemMode = "multi"; spyOn($scope, "isAllStorageDisabled").and.returnValue(true); expect($scope.isFilesystemsDisabled()).toBe(true); }); }); describe("filesystemCancel", function() { it("calls updateFilesystemSelection with force true", function() { var controller = makeController(); spyOn($scope, "updateFilesystemSelection"); $scope.filesystemCancel(); expect($scope.updateFilesystemSelection).toHaveBeenCalledWith( true); }); }); describe("filesystemUnmount", function() { it("sets filesystemMode to UNMOUNT", function() { var controller = makeController(); $scope.filesystemMode = "other"; $scope.filesystemUnmount(); expect($scope.filesystemMode).toBe("unmount"); }); }); describe("quickFilesystemUnmount", function() { it("selects filesystem and calls filesystemUnmount", function() { var controller = makeController(); var filesystems = [{ $selected: true }, { $selected: false }]; $scope.filesystems = filesystems; spyOn($scope, "updateFilesystemSelection"); spyOn($scope, "filesystemUnmount"); $scope.quickFilesystemUnmount(filesystems[1]); expect(filesystems[0].$selected).toBe(false); expect(filesystems[1].$selected).toBe(true); expect($scope.updateFilesystemSelection).toHaveBeenCalledWith( true); expect($scope.filesystemUnmount).toHaveBeenCalled(); }); }); describe("filesystemConfirmUnmount", function() { it("calls NodesManager.updateFilesystem", function() { var controller = makeController(); var filesystem = { block_id: makeInteger(0, 100), partition_id: makeInteger(0, 100), fstype: makeName("fs") }; $scope.filesystems = [filesystem]; spyOn(NodesManager, "updateFilesystem"); spyOn($scope, "updateFilesystemSelection"); $scope.filesystemConfirmUnmount(filesystem); expect(NodesManager.updateFilesystem).toHaveBeenCalledWith( node, filesystem.block_id, filesystem.partition_id, filesystem.fstype, null); }); it("removes filesystem from filesystems", function() { var controller = makeController(); var filesystem = { block_id: makeInteger(0, 100), partition_id: makeInteger(0, 100), fstype: makeName("fs") }; $scope.filesystems = [filesystem]; spyOn(NodesManager, "updateFilesystem"); spyOn($scope, "updateFilesystemSelection"); $scope.filesystemConfirmUnmount(filesystem); expect($scope.filesystems).toEqual([]); expect($scope.updateFilesystemSelection).toHaveBeenCalledWith(); }); }); describe("filesystemDelete", function() { it("sets filesystemMode to DELETE", function() { var controller = makeController(); $scope.filesystemMode = "other"; $scope.filesystemDelete(); expect($scope.filesystemMode).toBe("delete"); }); }); describe("quickFilesystemDelete", function() { it("selects filesystem and calls filesystemDelete", function() { var controller = makeController(); var filesystems = [{ $selected: true }, { $selected: false }]; $scope.filesystems = filesystems; spyOn($scope, "updateFilesystemSelection"); spyOn($scope, "filesystemDelete"); $scope.quickFilesystemDelete(filesystems[1]); expect(filesystems[0].$selected).toBe(false); expect(filesystems[1].$selected).toBe(true); expect($scope.updateFilesystemSelection).toHaveBeenCalledWith( true); expect($scope.filesystemDelete).toHaveBeenCalled(); }); }); describe("filesystemConfirmDelete", function() { it("calls NodesManager.deletePartition for partition", function() { var controller = makeController(); var filesystem = { original_type: "partition", original: { id: makeInteger(0, 100) } }; $scope.filesystems = [filesystem]; spyOn(NodesManager, "deletePartition"); spyOn($scope, "updateFilesystemSelection"); $scope.filesystemConfirmDelete(filesystem); expect(NodesManager.deletePartition).toHaveBeenCalledWith( node, filesystem.original.id); expect($scope.filesystems).toEqual([]); expect($scope.updateFilesystemSelection).toHaveBeenCalledWith(); }); it("calls NodesManager.deleteDisk for disk", function() { var controller = makeController(); var filesystem = { original_type: "physical", original: { id: makeInteger(0, 100) } }; $scope.filesystems = [filesystem]; spyOn(NodesManager, "deleteDisk"); spyOn($scope, "updateFilesystemSelection"); $scope.filesystemConfirmDelete(filesystem); expect(NodesManager.deleteDisk).toHaveBeenCalledWith( node, filesystem.original.id); expect($scope.filesystems).toEqual([]); expect($scope.updateFilesystemSelection).toHaveBeenCalledWith(); }); }); describe("hasUnmountedFilesystem", function() { it("returns false if no fstype", function() { var controller = makeController(); var disk = { fstype: null }; expect($scope.hasUnmountedFilesystem(disk)).toBe(false); }); it("returns false if empty fstype", function() { var controller = makeController(); var disk = { fstype: "" }; expect($scope.hasUnmountedFilesystem(disk)).toBe(false); }); it("returns true if no mount_point", function() { var controller = makeController(); var disk = { fstype: "ext4", mount_point: null }; expect($scope.hasUnmountedFilesystem(disk)).toBe(true); }); it("returns true if empty mount_point", function() { var controller = makeController(); var disk = { fstype: "ext4", mount_point: "" }; expect($scope.hasUnmountedFilesystem(disk)).toBe(true); }); it("returns false if has mount_point", function() { var controller = makeController(); var disk = { fstype: "ext4", mount_point: "/" }; expect($scope.hasUnmountedFilesystem(disk)).toBe(false); }); }); describe("showFreeSpace", function() { it("returns true if volume group", function() { var controller = makeController(); var disk = { type: "lvm-vg" }; expect($scope.showFreeSpace(disk)).toBe(true); }); it("returns true if physical with partitions", function() { var controller = makeController(); var disk = { type: "physical", has_partitions: true }; expect($scope.showFreeSpace(disk)).toBe(true); }); it("returns false if physical without partitions", function() { var controller = makeController(); var disk = { type: "physical", has_partitions: false }; expect($scope.showFreeSpace(disk)).toBe(false); }); it("returns true if virtual with partitions", function() { var controller = makeController(); var disk = { type: "virtual", has_partitions: true }; expect($scope.showFreeSpace(disk)).toBe(true); }); it("returns false if virtual without partitions", function() { var controller = makeController(); var disk = { type: "virtual", has_partitions: false }; expect($scope.showFreeSpace(disk)).toBe(false); }); it("returns false otherwise", function() { var controller = makeController(); var disk = { type: "other" }; expect($scope.showFreeSpace(disk)).toBe(false); }); }); describe("getDeviceType", function() { it("returns logical volume", function() { var controller = makeController(); var disk = { type: "virtual", parent_type: "lvm-vg" }; expect($scope.getDeviceType(disk)).toBe("Logical volume"); }); it("returns raid", function() { var controller = makeController(); var disk = { type: "virtual", parent_type: "raid-5" }; expect($scope.getDeviceType(disk)).toBe("RAID 5"); }); it("returns parent_type", function() { var controller = makeController(); var disk = { type: "virtual", parent_type: "other" }; expect($scope.getDeviceType(disk)).toBe("Other"); }); it("returns volume group", function() { var controller = makeController(); var disk = { type: "lvm-vg" }; expect($scope.getDeviceType(disk)).toBe("Volume group"); }); it("returns type", function() { var controller = makeController(); var disk = { type: "physical" }; expect($scope.getDeviceType(disk)).toBe("Physical"); }); }); describe("getSelectedAvailable", function() { it("returns selected available", function() { var controller = makeController(); var available = [ { $selected: true }, { $selected: true }, { $selected: false }, { $selected: false } ]; $scope.available = available; expect($scope.getSelectedAvailable()).toEqual( [available[0], available[1]]); }); }); describe("updateAvailableSelection", function() { it("sets availableMode to NONE when none selected", function() { var controller = makeController(); spyOn($scope, "getSelectedAvailable").and.returnValue([]); $scope.availableMode = "other"; $scope.updateAvailableSelection(); expect($scope.availableMode).toBeNull(); }); it("doesn't sets availableMode to SINGLE when not force", function() { var controller = makeController(); spyOn($scope, "getSelectedAvailable").and.returnValue([{}]); $scope.availableMode = "other"; $scope.updateAvailableSelection(); expect($scope.availableMode).toBe("other"); }); it("sets availableMode to SINGLE when force", function() { var controller = makeController(); spyOn($scope, "getSelectedAvailable").and.returnValue([{}]); $scope.availableMode = "other"; $scope.updateAvailableSelection(true); expect($scope.availableMode).toBe("single"); }); it("doesn't sets availableMode to MUTLI when not force", function() { var controller = makeController(); spyOn($scope, "getSelectedAvailable").and.returnValue([{}, {}]); $scope.availableMode = "other"; $scope.updateAvailableSelection(); expect($scope.availableMode).toBe("other"); }); it("sets availableMode to MULTI when force", function() { var controller = makeController(); spyOn($scope, "getSelectedAvailable").and.returnValue([{}, {}]); $scope.availableMode = "other"; $scope.updateAvailableSelection(true); expect($scope.availableMode).toBe("multi"); }); it("sets availableAllSelected to false when none selected", function() { var controller = makeController(); spyOn($scope, "getSelectedAvailable").and.returnValue([]); $scope.availableAllSelected = true; $scope.updateAvailableSelection(); expect($scope.availableAllSelected).toBe(false); }); it("sets availableAllSelected to false when not all selected", function() { var controller = makeController(); $scope.available = [{}, {}]; spyOn($scope, "getSelectedAvailable").and.returnValue([{}]); $scope.availableAllSelected = true; $scope.updateAvailableSelection(); expect($scope.availableAllSelected).toBe(false); }); it("sets availableAllSelected to true when all selected", function() { var controller = makeController(); $scope.available = [{}, {}]; spyOn($scope, "getSelectedAvailable").and.returnValue( [{}, {}]); $scope.availableAllSelected = false; $scope.updateAvailableSelection(); expect($scope.availableAllSelected).toBe(true); }); }); describe("toggleAvailableSelect", function() { it("inverts $selected", function() { var controller = makeController(); var disk = { $selected: true }; spyOn($scope, "updateAvailableSelection"); $scope.toggleAvailableSelect(disk); expect(disk.$selected).toBe(false); $scope.toggleAvailableSelect(disk); expect(disk.$selected).toBe(true); expect($scope.updateAvailableSelection).toHaveBeenCalledWith( true); }); }); describe("toggleAvailableAllSelect", function() { it("sets all to true if not all selected", function() { var controller = makeController(); var available = [{ $selected: true }, { $selected: false }]; $scope.available = available; $scope.availableAllSelected = false; spyOn($scope, "updateAvailableSelection"); $scope.toggleAvailableAllSelect(); expect(available[0].$selected).toBe(true); expect(available[1].$selected).toBe(true); expect($scope.updateAvailableSelection).toHaveBeenCalledWith( true); }); it("sets all to false if all selected", function() { var controller = makeController(); var available = [{ $selected: true }, { $selected: true }]; $scope.available = available; $scope.availableAllSelected = true; spyOn($scope, "updateAvailableSelection"); $scope.toggleAvailableAllSelect(); expect(available[0].$selected).toBe(false); expect(available[1].$selected).toBe(false); expect($scope.updateAvailableSelection).toHaveBeenCalledWith( true); }); }); describe("isAvailableDisabled", function() { it("returns false for NONE", function() { var controller = makeController(); $scope.availableMode = null; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.isAvailableDisabled()).toBe(false); }); it("returns false for SINGLE", function() { var controller = makeController(); $scope.availableMode = "single"; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.isAvailableDisabled()).toBe(false); }); it("returns false for MULTI", function() { var controller = makeController(); $scope.availableMode = "multi"; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.isAvailableDisabled()).toBe(false); }); it("returns true for UNMOUNT", function() { var controller = makeController(); $scope.availableMode = "unmount"; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.isAvailableDisabled()).toBe(true); }); }); describe("canFormatAndMount", function() { it("returns false if lvm-vg", function() { var controller = makeController(); var disk = { type: "lvm-vg" }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.canFormatAndMount(disk)).toBe(false); }); it("returns false if has_partitions", function() { var controller = makeController(); var disk = { type: "physical", has_partitions: true }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.canFormatAndMount(disk)).toBe(false); }); it("returns false if physical and is boot disk", function() { var controller = makeController(); var disk = { type: "physical", has_partitions: false, original: { is_boot: true } }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.canFormatAndMount(disk)).toBe(false); }); it("returns true otherwise", function() { var controller = makeController(); var disk = { type: "physical", has_partitions: false, original: { is_boot: false } }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.canFormatAndMount(disk)).toBe(true); }); }); describe("getFormatAndMountButtonText", function() { it("returns Mount if umounted filesystem", function() { var controller = makeController(); spyOn($scope, "hasUnmountedFilesystem").and.returnValue(true); expect($scope.getFormatAndMountButtonText({})).toBe("Mount"); }); it("returns Format if not formatted filesystem", function() { var controller = makeController(); spyOn($scope, "hasUnmountedFilesystem").and.returnValue(false); expect($scope.getFormatAndMountButtonText({})).toBe("Format"); }); }); describe("getPartitionButtonText", function() { it("returns Add Partition if already has partitions", function() { var controller = makeController(); expect($scope.getPartitionButtonText({ has_partitions: true })).toBe("Add partition"); }); it("returns Partition if no partitions", function() { var controller = makeController(); expect($scope.getPartitionButtonText({ has_partitions: false })).toBe("Partition"); }); }); describe("canAddPartition", function() { it("returns false if partition", function() { var controller = makeController(); spyOn($scope, "isAllStorageDisabled").and.returnValue(false); $scope.isSuperUser = function() { return true; }; expect($scope.canAddPartition({ type: "partition" })).toBe(false); }); it("returns false if lvm-vg", function() { var controller = makeController(); spyOn($scope, "isAllStorageDisabled").and.returnValue(false); $scope.isSuperUser = function() { return true; }; expect($scope.canAddPartition({ type: "lvm-vg" })).toBe(false); }); it("returns false if logical volume", function() { var controller = makeController(); spyOn($scope, "isAllStorageDisabled").and.returnValue(false); $scope.isSuperUser = function() { return true; }; expect($scope.canAddPartition({ type: "virtual", parent_type: "lvm-vg" })).toBe(false); }); it("returns false if bcache", function() { var controller = makeController(); $scope.isSuperUser = function() { return true; }; expect($scope.canAddPartition({ type: "virtual", parent_type: "bcache" })).toBe(false); }); it("returns false if formatted", function() { var controller = makeController(); spyOn($scope, "isAllStorageDisabled").and.returnValue(false); $scope.isSuperUser = function() { return true; }; expect($scope.canAddPartition({ type: "physical", fstype: "ext4" })).toBe(false); }); it("returns false if available_size is less than partition size " + "and partition table extra space", function() { var controller = makeController(); var disk = { type: "physical", fstype: "", original: { partition_table_type: null, available_size: 2.5 * 1024 * 1024, block_size: 1024 } }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); $scope.isSuperUser = function() { return true; }; expect($scope.canAddPartition(disk)).toBe(false); }); it("returns false if available_size is less than partition size ", function() { var controller = makeController(); var disk = { type: "physical", fstype: "", original: { partition_table_type: "mbr", available_size: 1024 * 1024, block_size: 1024 } }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); $scope.isSuperUser = function() { return true; }; expect($scope.canAddPartition(disk)).toBe(false); }); it("returns false if available_size is less than partition size " + "when node is ppc64el architecture", function() { var controller = makeController(); var disk = { type: "physical", fstype: "", original: { partition_table_type: null, available_size: (2.5 * 1024 * 1024) + (8 * 1024 * 1024), block_size: 1024 } }; node.architecture = "ppc64el/generic"; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); $scope.isSuperUser = function() { return true; }; expect($scope.canAddPartition(disk)).toBe(false); }); it("returns false if not super user", function() { var controller = makeController(); var disk = { type: "physical", fstype: "", original: { partition_table_type: null, available_size: 10 * 1024 * 1024, block_size: 1024 } }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); $scope.isSuperUser = function() { return false; }; expect($scope.canAddPartition(disk)).toBe(false); }); it("returns false if isAllStorageDisabled", function() { var controller = makeController(); var disk = { type: "physical", fstype: "", original: { partition_table_type: null, available_size: 10 * 1024 * 1024, block_size: 1024 } }; spyOn($scope, "isAllStorageDisabled").and.returnValue(true); $scope.isSuperUser = function() { return true; }; expect($scope.canAddPartition(disk)).toBe(false); }); it("returns true otherwise", function() { var controller = makeController(); var disk = { type: "physical", fstype: "", original: { partition_table_type: null, available_size: 10 * 1024 * 1024, block_size: 1024 } }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); $scope.isSuperUser = function() { return true; }; expect($scope.canAddPartition(disk)).toBe(true); }); }); describe("isNameInvalid", function() { it("returns false if name is blank", function() { var controller = makeController(); var disk = { name: "" }; expect($scope.isNameInvalid(disk)).toBe(false); }); it("returns true if name is already used by another disk", function() { var controller = makeController(); var otherId = makeInteger(0, 100); var id = makeInteger(100, 200); var name = makeName("name"); var otherDisk = { id: otherId, type: "physical", name: name }; var thisDisk = { id: id, type: "physical", name: name }; $scope.node.disks = [otherDisk, thisDisk]; var disk = { name: name, block_id: id }; expect($scope.isNameInvalid(disk)).toBe(true); }); it("returns false if name is the same as self", function() { var controller = makeController(); var id = makeInteger(100, 200); var name = makeName("name"); var thisDisk = { id: id, type: "physical", name: name }; $scope.node.disks = [thisDisk]; var disk = { name: name, type: "physical", block_id: id }; expect($scope.isNameInvalid(disk)).toBe(false); }); }); describe("saveAvailableName", function() { it("resets name to original if empty", function() { var controller = makeController(); var name = makeName("name"); var disk = { name: "", original: { name: name } }; spyOn(NodesManager, "updateDisk"); $scope.saveAvailableName(disk); expect(disk.name).toBe(name); expect(NodesManager.updateDisk).not.toHaveBeenCalled(); }); it("does nothing if name is the same", function() { var controller = makeController(); var name = makeName("name"); var disk = { name: name, original: { name: name } }; spyOn(NodesManager, "updateDisk"); $scope.saveAvailableName(disk); expect(NodesManager.updateDisk).not.toHaveBeenCalled(); }); it("calls updateDisks with new name", function() { var controller = makeController(); var name = makeName("name"); var newName = makeName("newName"); var id = makeInteger(0, 100); var disk = { name: newName, type: "physical", block_id: id, original: { name: name } }; spyOn(NodesManager, "updateDisk"); $scope.saveAvailableName(disk); expect(NodesManager.updateDisk).toHaveBeenCalledWith( node, id, { name: newName }); }); it("calls updateDisks with new name for logical volume", function() { var controller = makeController(); var id = makeInteger(0, 100); var disk = { name: "vg0-lvnew", type: "virtual", parent_type: "lvm-vg", block_id: id, original: { name: "vg0-lvold" } }; spyOn(NodesManager, "updateDisk"); $scope.saveAvailableName(disk); expect(NodesManager.updateDisk).toHaveBeenCalledWith( node, id, { name: "lvnew" }); }); }); describe("nameHasChanged", function() { it("logical volume resets name to include parents name", function() { var controller = makeController(); var disk = { name: "", type: "virtual", parent_type: "lvm-vg", original: { name: "vg0-lvname" } }; $scope.nameHasChanged(disk); expect(disk.name).toBe("vg0-"); }); }); describe("availableCancel", function() { it("calls updateAvailableSelection with force true", function() { var controller = makeController(); spyOn($scope, "updateAvailableSelection"); $scope.availableCancel(); expect($scope.updateAvailableSelection).toHaveBeenCalledWith( true); }); }); describe("availableUnformat", function() { it("sets filesystemMode to UNFORMAT", function() { var controller = makeController(); $scope.availableMode = "other"; $scope.availableUnformat(); expect($scope.availableMode).toBe("unformat"); }); }); describe("availableConfirmUnformat", function() { it("calls NodesManager.updateFilesystem", function() { var controller = makeController(); var disk = { block_id: makeInteger(0, 100), partition_id: makeInteger(0, 100) }; spyOn(NodesManager, "updateFilesystem"); spyOn($scope, "updateAvailableSelection"); $scope.availableConfirmUnformat(disk); expect(NodesManager.updateFilesystem).toHaveBeenCalledWith( node, disk.block_id, disk.partition_id, null, null); }); it("clears fstype", function() { var controller = makeController(); var disk = { block_id: makeInteger(0, 100), partition_id: makeInteger(0, 100), fstype: "ext4" }; spyOn(NodesManager, "updateFilesystem"); spyOn($scope, "updateAvailableSelection"); $scope.availableConfirmUnformat(disk); expect(disk.fstype).toBeNull(); expect($scope.updateAvailableSelection).toHaveBeenCalledWith( true); }); }); describe("availableFormatAndMount", function() { it("sets default $options", function() { var controller = makeController(); var disk = {}; $scope.availableFormatAndMount(disk); expect(disk.$options).toEqual({ fstype: "ext4", mountPoint: "" }); }); it("sets $options with disk values", function() { var controller = makeController(); var disk = { fstype: makeName("fs"), mount_point: makeName("mount") }; $scope.availableFormatAndMount(disk); expect(disk.$options).toEqual({ fstype: disk.fstype, mountPoint: disk.mount_point }); }); it("sets availableMode to FORMAT_AND_MOUNT", function() { var controller = makeController(); var disk = {}; $scope.availableFormatAndMount(disk); expect($scope.availableMode).toBe("format-mount"); }); }); describe("availableQuickFormatAndMount", function() { it("selects disks and deselects others", function() { var controller = makeController(); var available = [{ $selected: false }, { $selected: true }]; $scope.available = available; spyOn($scope, "updateAvailableSelection"); spyOn($scope, "availableFormatAndMount"); $scope.availableQuickFormatAndMount(available[0]); expect(available[0].$selected).toBe(true); expect(available[1].$selected).toBe(false); }); it("calls updateAvailableSelection with force true", function() { var controller = makeController(); var available = [{ $selected: false }, { $selected: true }]; spyOn($scope, "updateAvailableSelection"); spyOn($scope, "availableFormatAndMount"); $scope.availableQuickFormatAndMount(available[0]); expect($scope.updateAvailableSelection).toHaveBeenCalledWith( true); }); it("calls availableFormatAndMount with disk", function() { var controller = makeController(); var available = [{ $selected: false }, { $selected: true }]; spyOn($scope, "updateAvailableSelection"); spyOn($scope, "availableFormatAndMount"); $scope.availableQuickFormatAndMount(available[0]); expect($scope.availableFormatAndMount).toHaveBeenCalledWith( available[0]); }); }); describe("getAvailableFormatSubmitText", function() { it("returns 'Mount' when mount_point set", function() { var controller = makeController(); var disk = { $options: { mountPoint: "/" } }; expect($scope.getAvailableFormatSubmitText(disk)).toBe( "Mount"); }); it("returns 'Format' when mount_point is null", function() { var controller = makeController(); var disk = { $options: { mount_point: null } }; expect($scope.getAvailableFormatSubmitText(disk)).toBe( "Format"); }); it("returns 'Format' when mount_point is empty", function() { var controller = makeController(); var disk = { $options: { mount_point: "" } }; expect($scope.getAvailableFormatSubmitText(disk)).toBe( "Format"); }); }); describe("availableConfirmFormatAndMount", function() { it("does nothing when isMountPointInvalid returns true", function() { var controller = makeController(); var disk = { $options: { mount_point: "invalid" } }; spyOn($scope, "isMountPointInvalid").and.returnValue(true); spyOn(NodesManager, "updateFilesystem"); $scope.availableConfirmFormatAndMount(disk); expect(NodesManager.updateFilesystem).not.toHaveBeenCalled(); }); it("calls NodesManager.updateFilesystem with fstype and mount_point", function() { var controller = makeController(); var disk = { block_id: makeInteger(0, 100), partition_id: makeInteger(0, 100), $options: { fstype: makeName("fs"), mountPoint: makeName("/path") } }; spyOn(NodesManager, "updateFilesystem"); $scope.availableConfirmFormatAndMount(disk); expect(NodesManager.updateFilesystem).toHaveBeenCalledWith( node, disk.block_id, disk.partition_id, disk.$options.fstype, disk.$options.mountPoint); }); it("sets new values on disk", function() { var controller = makeController(); var disk = { block_id: makeInteger(0, 100), partition_id: makeInteger(0, 100), $options: { fstype: makeName("fs"), mountPoint: makeName("/path") } }; spyOn(NodesManager, "updateFilesystem"); spyOn($scope, "updateAvailableSelection"); $scope.availableConfirmFormatAndMount(disk); expect(disk.fstype).toBe(disk.$options.fstype); expect(disk.mount_point).toBe(disk.$options.mountPoint); expect($scope.updateAvailableSelection).toHaveBeenCalledWith( true); }); it("moves disks to filesystems list", function() { var controller = makeController(); var disk = { name: makeName("name"), block_id: makeInteger(0, 100), partition_id: makeInteger(0, 100), size_human: makeName("size"), used_size_human: makeName("used_size"), $options: { fstype: makeName("fs"), mountPoint: makeName("/path") } }; spyOn(NodesManager, "updateFilesystem"); $scope.available = [disk]; $scope.availableConfirmFormatAndMount(disk); expect($scope.filesystems).toEqual([{ "name": disk.name, "size_human": disk.size_human, "fstype": disk.fstype, "mount_point": disk.mount_point, "block_id": disk.block_id, "partition_id": disk.partition_id }]); expect($scope.available).toEqual([]); }); }); describe("isMountPointInvalid", function() { it("returns false if mount_point is undefined", function() { var controller = makeController(); expect($scope.isMountPointInvalid()).toBe(false); }); it("returns false if mount_point is empty", function() { var controller = makeController(); expect($scope.isMountPointInvalid("")).toBe(false); }); it("returns true if mount_point doesn't start with '/'", function() { var controller = makeController(); expect($scope.isMountPointInvalid("a")).toBe(true); }); it("returns false if mount_point start with '/'", function() { var controller = makeController(); expect($scope.isMountPointInvalid("/")).toBe(false); }); }); describe("canDelete", function() { it("returns true if volume group not used", function() { var controller = makeController(); var disk = { type: "lvm-vg", fstype: null, has_partitions: false, original: { used_size: 0 } }; $scope.isSuperUser = function() { return true; }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.canDelete(disk)).toBe(true); }); it("returns false if not super user", function() { var controller = makeController(); var disk = { type: "lvm-vg", fstype: null, has_partitions: false, original: { used_size: 0 } }; $scope.isSuperUser = function() { return false; }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.canDelete(disk)).toBe(false); }); it("returns false if isAllStorageDisabled", function() { var controller = makeController(); var disk = { type: "lvm-vg", fstype: null, has_partitions: false, original: { used_size: 0 } }; $scope.isSuperUser = function() { return true; }; spyOn($scope, "isAllStorageDisabled").and.returnValue(true); expect($scope.canDelete(disk)).toBe(false); }); it("returns false if volume group used", function() { var controller = makeController(); var disk = { type: "lvm-vg", fstype: null, has_partitions: false, original: { used_size: makeInteger(100, 10000) } }; $scope.isSuperUser = function() { return true; }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.canDelete(disk)).toBe(false); }); it("returns true if fstype is null", function() { var controller = makeController(); var disk = { fstype: null, has_partitions: false }; $scope.isSuperUser = function() { return true; }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.canDelete(disk)).toBe(true); }); it("returns true if fstype is empty", function() { var controller = makeController(); var disk = { fstype: "", has_partitions: false }; $scope.isSuperUser = function() { return true; }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.canDelete(disk)).toBe(true); }); it("returns true if fstype is not empty", function() { var controller = makeController(); var disk = { fstype: "ext4" }; $scope.isSuperUser = function() { return true; }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.canDelete(disk)).toBe(true); }); it("returns false if has_partitions is true", function() { var controller = makeController(); var disk = { fstype: "", has_partitions: true }; $scope.isSuperUser = function() { return true; }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.canDelete(disk)).toBe(false); }); }); describe("availableUnformat", function() { it("sets availableMode to UNFORMAT", function() { var controller = makeController(); $scope.availableMode = "other"; $scope.availableUnformat(); expect($scope.availableMode).toBe("unformat"); }); }); describe("availableQuickUnformat", function() { it("selects disks and deselects others", function() { var controller = makeController(); var available = [{ $selected: false }, { $selected: true }]; $scope.available = available; spyOn($scope, "updateAvailableSelection"); spyOn($scope, "availableUnformat"); $scope.availableQuickUnformat(available[0]); expect(available[0].$selected).toBe(true); expect(available[1].$selected).toBe(false); }); it("calls updateAvailableSelection with force true", function() { var controller = makeController(); var available = [{ $selected: false }, { $selected: true }]; spyOn($scope, "updateAvailableSelection"); spyOn($scope, "availableUnformat"); $scope.availableQuickUnformat(available[0]); expect($scope.updateAvailableSelection).toHaveBeenCalledWith( true); }); it("calls availableUnformat", function() { var controller = makeController(); var available = [{ $selected: false }, { $selected: true }]; spyOn($scope, "updateAvailableSelection"); spyOn($scope, "availableUnformat"); $scope.availableQuickUnformat(available[0]); expect($scope.availableUnformat).toHaveBeenCalledWith(); }); }); describe("availableDelete", function() { it("sets availableMode to DELETE", function() { var controller = makeController(); $scope.availableMode = "other"; $scope.availableDelete(); expect($scope.availableMode).toBe("delete"); }); }); describe("availableQuickDelete", function() { it("selects disks and deselects others", function() { var controller = makeController(); var available = [{ $selected: false }, { $selected: true }]; $scope.available = available; spyOn($scope, "updateAvailableSelection"); spyOn($scope, "availableDelete"); $scope.availableQuickDelete(available[0]); expect(available[0].$selected).toBe(true); expect(available[1].$selected).toBe(false); }); it("calls updateAvailableSelection with force true", function() { var controller = makeController(); var available = [{ $selected: false }, { $selected: true }]; spyOn($scope, "updateAvailableSelection"); spyOn($scope, "availableDelete"); $scope.availableQuickDelete(available[0]); expect($scope.updateAvailableSelection).toHaveBeenCalledWith( true); }); it("calls availableDelete", function() { var controller = makeController(); var available = [{ $selected: false }, { $selected: true }]; spyOn($scope, "updateAvailableSelection"); spyOn($scope, "availableDelete"); $scope.availableQuickDelete(available[0]); expect($scope.availableDelete).toHaveBeenCalledWith(); }); }); describe("getRemoveTypeText", function() { it("returns 'physical disk' for physical on filesystem", function() { var controller = makeController(); expect($scope.getRemoveTypeText({ type: "filesystem", original: { type: "physical" } })).toBe("physical disk"); }); it("returns 'physical disk' for physical", function() { var controller = makeController(); expect($scope.getRemoveTypeText({ type: "physical" })).toBe("physical disk"); }); it("returns 'partition' for partition", function() { var controller = makeController(); expect($scope.getRemoveTypeText({ type: "partition" })).toBe("partition"); }); it("returns 'volume group' for lvm-vg", function() { var controller = makeController(); expect($scope.getRemoveTypeText({ type: "lvm-vg" })).toBe("volume group"); }); it("returns 'logical volume' for virtual on lvm-vg", function() { var controller = makeController(); expect($scope.getRemoveTypeText({ type: "virtual", parent_type: "lvm-vg" })).toBe("logical volume"); }); it("returns 'RAID %d' for virtual on raid", function() { var controller = makeController(); expect($scope.getRemoveTypeText({ type: "virtual", parent_type: "raid-1" })).toBe("RAID 1 disk"); }); it("returns parent_type + 'disk' for other virtual", function() { var controller = makeController(); expect($scope.getRemoveTypeText({ type: "virtual", parent_type: "raid0" })).toBe("raid0 disk"); }); }); describe("availableConfirmDelete", function() { it("calls NodesManager.deleteVolumeGroup for lvm-vg", function() { var controller = makeController(); var disk = { type: "lvm-vg", block_id: makeInteger(0, 100), partition_id: makeInteger(0, 100) }; $scope.available = [disk]; spyOn(NodesManager, "deleteVolumeGroup"); spyOn($scope, "updateAvailableSelection"); $scope.availableConfirmDelete(disk); expect(NodesManager.deleteVolumeGroup).toHaveBeenCalledWith( node, disk.block_id); expect($scope.available).toEqual([]); expect($scope.updateAvailableSelection).toHaveBeenCalledWith( true); }); it("calls NodesManager.deletePartition for partition", function() { var controller = makeController(); var disk = { type: "partition", block_id: makeInteger(0, 100), partition_id: makeInteger(0, 100) }; $scope.available = [disk]; spyOn(NodesManager, "deletePartition"); spyOn($scope, "updateAvailableSelection"); $scope.availableConfirmDelete(disk); expect(NodesManager.deletePartition).toHaveBeenCalledWith( node, disk.partition_id); expect($scope.available).toEqual([]); expect($scope.updateAvailableSelection).toHaveBeenCalledWith( true); }); it("calls NodesManager.deleteDisk for disk", function() { var controller = makeController(); var disk = { type: "physical", block_id: makeInteger(0, 100), partition_id: makeInteger(0, 100) }; $scope.available = [disk]; spyOn(NodesManager, "deleteDisk"); spyOn($scope, "updateAvailableSelection"); $scope.availableConfirmDelete(disk); expect(NodesManager.deleteDisk).toHaveBeenCalledWith( node, disk.block_id); expect($scope.available).toEqual([]); expect($scope.updateAvailableSelection).toHaveBeenCalledWith( true); }); }); describe("availablePartiton", function() { it("sets availableMode to 'partition'", function() { var controller = makeController(); var disk = { available_size_human: "10 GB" }; $scope.availableMode = "other"; $scope.availablePartiton(disk); expect($scope.availableMode).toBe("partition"); }); it("sets $options to values from available_size_human", function() { var controller = makeController(); var disk = { available_size_human: "10 GB" }; $scope.availablePartiton(disk); expect(disk.$options).toEqual({ size: "10", sizeUnits: "GB", fstype: null, mountPoint: "" }); }); }); describe("availableQuickPartition", function() { it("selects disks and deselects others", function() { var controller = makeController(); var available = [{ $selected: false }, { $selected: true }]; $scope.available = available; spyOn($scope, "updateAvailableSelection"); spyOn($scope, "availablePartiton"); $scope.availableQuickPartition(available[0]); expect(available[0].$selected).toBe(true); expect(available[1].$selected).toBe(false); }); it("calls updateAvailableSelection with force true", function() { var controller = makeController(); var available = [{ $selected: false }, { $selected: true }]; spyOn($scope, "updateAvailableSelection"); spyOn($scope, "availablePartiton"); $scope.availableQuickPartition(available[0]); expect($scope.updateAvailableSelection).toHaveBeenCalledWith( true); }); it("calls availablePartiton", function() { var controller = makeController(); var available = [{ $selected: false }, { $selected: true }]; spyOn($scope, "updateAvailableSelection"); spyOn($scope, "availablePartiton"); $scope.availableQuickPartition(available[0]); expect($scope.availablePartiton).toHaveBeenCalledWith( available[0]); }); }); describe("getAddPartitionName", function() { it("returns disk.name with -part#", function() { var controller = makeController(); var name = makeName("sda"); var disk = { name: name, original: { partition_table_type: "gpt", partitions: [{}, {}] } }; expect($scope.getAddPartitionName(disk)).toBe(name + "-part3"); }); it("returns disk.name with -part2 for ppc64el", function() { node.architecture = "ppc64el/generic"; var controller = makeController(); var name = makeName("sda"); var disk = { name: name, original: { is_boot: true, partition_table_type: "gpt" } }; expect($scope.getAddPartitionName(disk)).toBe(name + "-part2"); }); it("returns disk.name with -part4 for ppc64el", function() { node.architecture = "ppc64el/generic"; var controller = makeController(); var name = makeName("sda"); var disk = { name: name, original: { is_boot: true, partition_table_type: "gpt", partitions: [{}, {}] } }; expect($scope.getAddPartitionName(disk)).toBe(name + "-part4"); }); it("returns disk.name with -part3 for MBR", function() { var controller = makeController(); var name = makeName("sda"); var disk = { name: name, original: { partition_table_type: "mbr", partitions: [{}, {}] } }; expect($scope.getAddPartitionName(disk)).toBe(name + "-part3"); }); it("returns disk.name with -part5 for MBR", function() { var controller = makeController(); var name = makeName("sda"); var disk = { name: name, original: { partition_table_type: "mbr", partitions: [{}, {}, {}] } }; expect($scope.getAddPartitionName(disk)).toBe(name + "-part5"); }); }); describe("isAddPartitionSizeInvalid", function() { it("returns true if blank", function() { var controller = makeController(); var disk = { $options: { size: "", sizeUnits: "GB" } }; expect($scope.isAddPartitionSizeInvalid(disk)).toBe(true); }); it("returns true if not numbers", function() { var controller = makeController(); var disk = { $options: { size: makeName("invalid"), sizeUnits: "GB" } }; expect($scope.isAddPartitionSizeInvalid(disk)).toBe(true); }); it("returns true if smaller than MIN_PARTITION_SIZE", function() { var controller = makeController(); var disk = { $options: { size: "1", sizeUnits: "MB" } }; expect($scope.isAddPartitionSizeInvalid(disk)).toBe(true); }); it("returns true if larger than available_size more than tolerance", function() { var controller = makeController(); var disk = { original: { available_size: 2 * 1000 * 1000 * 1000 }, $options: { size: "4", sizeUnits: "GB" } }; expect($scope.isAddPartitionSizeInvalid(disk)).toBe(true); }); it("returns false if larger than available_size in tolerance", function() { var controller = makeController(); var disk = { original: { available_size: 2.6 * 1000 * 1000 * 1000 }, $options: { size: "2.62", sizeUnits: "GB" } }; expect($scope.isAddPartitionSizeInvalid(disk)).toBe(false); }); it("returns false if less than available_size", function() { var controller = makeController(); var disk = { original: { available_size: 2.6 * 1000 * 1000 * 1000 }, $options: { size: "1.6", sizeUnits: "GB" } }; expect($scope.isAddPartitionSizeInvalid(disk)).toBe(false); }); }); describe("availableConfirmPartition", function() { it("does nothing if invalid", function() { var controller = makeController(); var disk = { $options: { size: "", sizeUnits: "GB" } }; spyOn(NodesManager, "createPartition"); $scope.availableConfirmPartition(disk); expect(NodesManager.createPartition).not.toHaveBeenCalled(); }); it("calls createPartition with bytes", function() { var controller = makeController(); var disk = { block_id: makeInteger(0, 100), original: { partition_table_type: "mbr", available_size: 4 * 1000 * 1000 * 1000, available_size_human: "4.0 GB", block_size: 512 }, $options: { size: "2", sizeUnits: "GB" } }; spyOn(NodesManager, "createPartition"); $scope.availableConfirmPartition(disk); expect(NodesManager.createPartition).toHaveBeenCalledWith( node, disk.block_id, 2 * 1000 * 1000 * 1000, {}); }); it("calls createPartition with fstype and mountPoint", function() { var controller = makeController(); var disk = { block_id: makeInteger(0, 100), original: { partition_table_type: "mbr", available_size: 4 * 1000 * 1000 * 1000, available_size_human: "4.0 GB", block_size: 512 }, $options: { size: "2", sizeUnits: "GB", fstype: "ext4", mountPoint: "/" } }; spyOn(NodesManager, "createPartition"); $scope.availableConfirmPartition(disk); expect(NodesManager.createPartition).toHaveBeenCalledWith( node, disk.block_id, 2 * 1000 * 1000 * 1000, { fstype: "ext4", mount_point: "/" }); }); it("calls createPartition with available_size bytes", function() { var controller = makeController(); var available_size = 2.6 * 1000 * 1000 * 1000; var disk = { block_id: makeInteger(0, 100), original: { partition_table_type: "mbr", available_size: available_size, available_size_human: "2.6 GB", block_size: 512 }, $options: { size: "2.62", sizeUnits: "GB" } }; spyOn(NodesManager, "createPartition"); $scope.availableConfirmPartition(disk); // Align to 4MiB. var align_size = (4 * 1024 * 1024); var expected = align_size * Math.floor(available_size / align_size); expect(NodesManager.createPartition).toHaveBeenCalledWith( node, disk.block_id, expected, {}); }); // regression test for https://bugs.launchpad.net/maas/+bug/1509535 it("calls createPartition with available_size bytes" + " even when human size gets rounded down", function() { var controller = makeController(); var available_size = 2.035 * 1000 * 1000 * 1000; var disk = { block_id: makeInteger(0, 100), original: { partition_table_type: "mbr", available_size: available_size, available_size_human: "2.0 GB", block_size: 512 }, $options: { size: "2.0", sizeUnits: "GB" } }; spyOn(NodesManager, "createPartition"); $scope.availableConfirmPartition(disk); // Align to 4MiB. var align_size = (4 * 1024 * 1024); var expected = align_size * Math.floor(available_size / align_size); expect(NodesManager.createPartition).toHaveBeenCalledWith( node, disk.block_id, expected, {}); }); it("calls createPartition with bytes minus partition table extra", function() { var controller = makeController(); var available_size = 2.6 * 1000 * 1000 * 1000; var disk = { block_id: makeInteger(0, 100), original: { partition_table_type: "", available_size: available_size, available_size_human: "2.6 GB", block_size: 512 }, $options: { size: "2.62", sizeUnits: "GB" } }; spyOn(NodesManager, "createPartition"); $scope.availableConfirmPartition(disk); // Remove partition extra space and align to 4MiB. var align_size = (4 * 1024 * 1024); var expected = align_size * Math.floor( (available_size - (5 * 1024 * 1024)) / align_size); expect(NodesManager.createPartition).toHaveBeenCalledWith( node, disk.block_id, expected, {}); }); }); describe("getSelectedCacheSets", function() { it("returns selected cachesets", function() { var controller = makeController(); var cachesets = [ { $selected: true }, { $selected: true }, { $selected: false }, { $selected: false } ]; $scope.cachesets = cachesets; expect($scope.getSelectedCacheSets()).toEqual( [cachesets[0], cachesets[1]]); }); }); describe("updateCacheSetsSelection", function() { it("sets cachesetsMode to NONE when none selected", function() { var controller = makeController(); spyOn($scope, "getSelectedCacheSets").and.returnValue([]); $scope.cachesetsMode = "other"; $scope.updateCacheSetsSelection(); expect($scope.cachesetsMode).toBeNull(); }); it("doesn't sets cachesetsMode to SINGLE when not force", function() { var controller = makeController(); spyOn($scope, "getSelectedCacheSets").and.returnValue([{}]); $scope.cachesetsMode = "other"; $scope.updateCacheSetsSelection(); expect($scope.cachesetsMode).toBe("other"); }); it("sets cachesetsMode to SINGLE when force", function() { var controller = makeController(); spyOn($scope, "getSelectedCacheSets").and.returnValue([{}]); $scope.cachesetsMode = "other"; $scope.updateCacheSetsSelection(true); expect($scope.cachesetsMode).toBe("single"); }); it("doesn't sets cachesetsMode to MUTLI when not force", function() { var controller = makeController(); spyOn($scope, "getSelectedCacheSets").and.returnValue([{}, {}]); $scope.cachesetsMode = "other"; $scope.updateCacheSetsSelection(); expect($scope.cachesetsMode).toBe("other"); }); it("sets cachesetsMode to MULTI when force", function() { var controller = makeController(); spyOn($scope, "getSelectedCacheSets").and.returnValue([{}, {}]); $scope.cachesetsMode = "other"; $scope.updateCacheSetsSelection(true); expect($scope.cachesetsMode).toBe("multi"); }); it("sets cachesetsAllSelected to false when none selected", function() { var controller = makeController(); spyOn($scope, "getSelectedCacheSets").and.returnValue([]); $scope.cachesetsAllSelected = true; $scope.updateCacheSetsSelection(); expect($scope.cachesetsAllSelected).toBe(false); }); it("sets cachesetsAllSelected to false when not all selected", function() { var controller = makeController(); $scope.cachesets = [{}, {}]; spyOn($scope, "getSelectedCacheSets").and.returnValue([{}]); $scope.cachesetsAllSelected = true; $scope.updateCacheSetsSelection(); expect($scope.cachesetsAllSelected).toBe(false); }); it("sets cachesetsAllSelected to true when all selected", function() { var controller = makeController(); $scope.cachesets = [{}, {}]; spyOn($scope, "getSelectedCacheSets").and.returnValue( [{}, {}]); $scope.cachesetsAllSelected = false; $scope.updateCacheSetsSelection(); expect($scope.cachesetsAllSelected).toBe(true); }); }); describe("toggleCacheSetSelect", function() { it("inverts $selected", function() { var controller = makeController(); var cacheset = { $selected: true }; spyOn($scope, "updateCacheSetsSelection"); $scope.toggleCacheSetSelect(cacheset); expect(cacheset.$selected).toBe(false); $scope.toggleCacheSetSelect(cacheset); expect(cacheset.$selected).toBe(true); expect($scope.updateCacheSetsSelection).toHaveBeenCalledWith( true); }); }); describe("toggleCacheSetAllSelect", function() { it("sets all to true if not all selected", function() { var controller = makeController(); var cachesets = [{ $selected: true }, { $selected: false }]; $scope.cachesets = cachesets; $scope.cachesetsAllSelected = false; spyOn($scope, "updateCacheSetsSelection"); $scope.toggleCacheSetAllSelect(); expect(cachesets[0].$selected).toBe(true); expect(cachesets[1].$selected).toBe(true); expect($scope.updateCacheSetsSelection).toHaveBeenCalledWith( true); }); it("sets all to false if all selected", function() { var controller = makeController(); var cachesets = [{ $selected: true }, { $selected: true }]; $scope.cachesets = cachesets; $scope.cachesetsAllSelected = true; spyOn($scope, "updateCacheSetsSelection"); $scope.toggleCacheSetAllSelect(); expect(cachesets[0].$selected).toBe(false); expect(cachesets[1].$selected).toBe(false); expect($scope.updateCacheSetsSelection).toHaveBeenCalledWith( true); }); }); describe("isCacheSetsDisabled", function() { it("returns false for NONE", function() { var controller = makeController(); $scope.cachesetsMode = null; $scope.isSuperUser = function() { return true; }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.isCacheSetsDisabled()).toBe(false); }); it("returns false for SINGLE", function() { var controller = makeController(); $scope.cachesetsMode = "single"; $scope.isSuperUser = function() { return true; }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.isCacheSetsDisabled()).toBe(false); }); it("returns false for MULTI", function() { var controller = makeController(); $scope.cachesetsMode = "multi"; $scope.isSuperUser = function() { return true; }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.isCacheSetsDisabled()).toBe(false); }); it("returns true for when not super user", function() { var controller = makeController(); $scope.cachesetsMode = "delete"; $scope.isSuperUser = function() { return false; }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.isCacheSetsDisabled()).toBe(true); }); it("returns true for when isAllStorageDisabled", function() { var controller = makeController(); $scope.cachesetsMode = "delete"; $scope.isSuperUser = function() { return true; }; spyOn($scope, "isAllStorageDisabled").and.returnValue(true); expect($scope.isCacheSetsDisabled()).toBe(true); }); it("returns true for DELETE", function() { var controller = makeController(); $scope.cachesetsMode = "delete"; $scope.isSuperUser = function() { return true; }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.isCacheSetsDisabled()).toBe(true); }); }); describe("cacheSetCancel", function() { it("calls updateCacheSetsSelection with force true", function() { var controller = makeController(); spyOn($scope, "updateCacheSetsSelection"); $scope.cacheSetCancel(); expect($scope.updateCacheSetsSelection).toHaveBeenCalledWith( true); }); }); describe("canDeleteCacheSet", function() { it("returns true when not being used", function() { var controller = makeController(); var cacheset = { used_by: "" }; $scope.isSuperUser = function() { return true; }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.canDeleteCacheSet(cacheset)).toBe(true); }); it("returns false when being used", function() { var controller = makeController(); var cacheset = { used_by: "bcache0" }; $scope.isSuperUser = function() { return true; }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.canDeleteCacheSet(cacheset)).toBe(false); }); it("returns false when not super user", function() { var controller = makeController(); var cacheset = { used_by: "" }; $scope.isSuperUser = function() { return false; }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.canDeleteCacheSet(cacheset)).toBe(false); }); it("returns false when isAllStorageDisabled", function() { var controller = makeController(); var cacheset = { used_by: "" }; $scope.isSuperUser = function() { return true; }; spyOn($scope, "isAllStorageDisabled").and.returnValue(true); expect($scope.canDeleteCacheSet(cacheset)).toBe(false); }); }); describe("cacheSetDelete", function() { it("sets cachesetsMode to DELETE", function() { var controller = makeController(); $scope.cachesetsMode = "other"; $scope.cacheSetDelete(); expect($scope.cachesetsMode).toBe("delete"); }); }); describe("quickCacheSetDelete", function() { it("selects cacheset and calls cacheSetDelete", function() { var controller = makeController(); var cachesets = [{ $selected: true }, { $selected: false }]; $scope.cachesets = cachesets; spyOn($scope, "updateCacheSetsSelection"); spyOn($scope, "cacheSetDelete"); $scope.quickCacheSetDelete(cachesets[1]); expect(cachesets[0].$selected).toBe(false); expect(cachesets[1].$selected).toBe(true); expect($scope.updateCacheSetsSelection).toHaveBeenCalledWith( true); expect($scope.cacheSetDelete).toHaveBeenCalled(); }); }); describe("cacheSetConfirmDelete", function() { it("calls NodesManager.deleteCacheSet and removes from list", function() { var controller = makeController(); var cacheset = { cache_set_id: makeInteger(0, 100) }; $scope.cachesets = [cacheset]; spyOn(NodesManager, "deleteCacheSet"); spyOn($scope, "updateCacheSetsSelection"); $scope.cacheSetConfirmDelete(cacheset); expect(NodesManager.deleteCacheSet).toHaveBeenCalledWith( node, cacheset.cache_set_id); expect($scope.cachesets).toEqual([]); expect($scope.updateCacheSetsSelection).toHaveBeenCalledWith(); }); }); describe("canCreateCacheSet", function() { it("returns false if isAvailableDisabled returns true", function() { var controller = makeController(); spyOn($scope, "isAvailableDisabled").and.returnValue(true); $scope.isSuperUser = function() { return true; }; expect($scope.canCreateCacheSet()).toBe(false); }); it("returns false if two selected", function() { var controller = makeController(); spyOn($scope, "isAvailableDisabled").and.returnValue(false); $scope.available = [ { $selected: true }, { $selected: true }]; $scope.isSuperUser = function() { return true; }; expect($scope.canCreateCacheSet()).toBe(false); }); it("returns false if selected has fstype", function() { var controller = makeController(); spyOn($scope, "isAvailableDisabled").and.returnValue(false); $scope.available = [ { fstype: "ext4", $selected: true } ]; $scope.isSuperUser = function() { return true; }; expect($scope.canCreateCacheSet()).toBe(false); }); it("returns false if selected is volume group", function() { var controller = makeController(); spyOn($scope, "isAvailableDisabled").and.returnValue(false); $scope.available = [ { type: "lvm-vg", fstype: null, $selected: true } ]; $scope.isSuperUser = function() { return true; }; expect($scope.canCreateCacheSet()).toBe(false); }); it("returns false if not super user", function() { var controller = makeController(); spyOn($scope, "isAvailableDisabled").and.returnValue(false); $scope.available = [ { fstype: null, $selected: true } ]; $scope.isSuperUser = function() { return false; }; expect($scope.canCreateCacheSet()).toBe(false); }); it("returns true if selected has no fstype", function() { var controller = makeController(); spyOn($scope, "isAvailableDisabled").and.returnValue(false); $scope.available = [ { fstype: null, $selected: true } ]; $scope.isSuperUser = function() { return true; }; expect($scope.canCreateCacheSet()).toBe(true); }); }); describe("createCacheSet", function() { it("does nothing if canCreateCacheSet returns false", function() { var controller = makeController(); var disk = { block_id: makeInteger(0, 100), partition_id: makeInteger(0, 100), $selected: true }; $scope.available = [disk]; spyOn($scope, "canCreateCacheSet").and.returnValue(false); spyOn(NodesManager, "createCacheSet"); $scope.createCacheSet(); expect(NodesManager.createCacheSet).not.toHaveBeenCalled(); }); it("calls NodesManager.createCacheSet and removes from available", function() { var controller = makeController(); var disk = { block_id: makeInteger(0, 100), partition_id: makeInteger(0, 100), $selected: true }; $scope.available = [disk]; spyOn($scope, "canCreateCacheSet").and.returnValue(true); spyOn(NodesManager, "createCacheSet"); $scope.createCacheSet(); expect(NodesManager.createCacheSet).toHaveBeenCalledWith( node, disk.block_id, disk.partition_id); expect($scope.available).toEqual([]); }); }); describe("canCreateBcache", function() { it("returns false when isAvailableDisabled is true", function() { var controller = makeController(); spyOn($scope, "isAvailableDisabled").and.returnValue(true); $scope.isSuperUser = function() { return true; }; expect($scope.canCreateBcache()).toBe(false); }); it("returns false if two selected", function() { var controller = makeController(); spyOn($scope, "isAvailableDisabled").and.returnValue(false); $scope.available = [ { $selected: true }, { $selected: true }]; $scope.isSuperUser = function() { return true; }; expect($scope.canCreateBcache()).toBe(false); }); it("returns false if selected has fstype", function() { var controller = makeController(); spyOn($scope, "isAvailableDisabled").and.returnValue(false); $scope.available = [ { fstype: "ext4", $selected: true } ]; $scope.cachesets = [{}]; $scope.isSuperUser = function() { return true; }; expect($scope.canCreateBcache()).toBe(false); }); it("returns false if selected is volume group", function() { var controller = makeController(); spyOn($scope, "isAvailableDisabled").and.returnValue(false); $scope.available = [ { type: "lvm-vg", fstype: null, $selected: true } ]; $scope.cachesets = [{}]; $scope.isSuperUser = function() { return true; }; expect($scope.canCreateBcache()).toBe(false); }); it("returns false if selected has no fstype but not cachesets ", function() { var controller = makeController(); spyOn($scope, "isAvailableDisabled").and.returnValue(false); $scope.available = [ { fstype: null, $selected: true } ]; $scope.cachesets = []; $scope.isSuperUser = function() { return true; }; expect($scope.canCreateBcache()).toBe(false); }); it("returns false if not super user ", function() { var controller = makeController(); spyOn($scope, "isAvailableDisabled").and.returnValue(false); $scope.available = [ { fstype: null, $selected: true } ]; $scope.cachesets = [{}]; $scope.isSuperUser = function() { return false; }; expect($scope.canCreateBcache()).toBe(false); }); it("returns true if selected has no fstype but has cachesets ", function() { var controller = makeController(); spyOn($scope, "isAvailableDisabled").and.returnValue(false); $scope.available = [ { fstype: null, $selected: true } ]; $scope.cachesets = [{}]; $scope.isSuperUser = function() { return true; }; expect($scope.canCreateBcache()).toBe(true); }); }); describe("createBcache", function() { it("does nothing if canCreateBcache returns false", function() { var controller = makeController(); $scope.availableMode = "other"; spyOn($scope, "canCreateBcache").and.returnValue(false); $scope.createBcache(); expect($scope.availableMode).toBe("other"); }); it("sets availableMode and availableNew", function() { var controller = makeController(); $scope.availableMode = "other"; spyOn($scope, "canCreateBcache").and.returnValue(true); // Add bcache name to create a name after that index. var otherBcache = { name: "bcache4" }; node.disks = [otherBcache]; // Will be set as the device. var disk = { $selected: true }; $scope.available = [disk]; // Will be set as the cacheset. var cacheset = {}; $scope.cachesets = [cacheset]; $scope.createBcache(); expect($scope.availableMode).toBe("bcache"); expect($scope.availableNew).toEqual({ name: "bcache5", device: disk, cacheset: cacheset, cacheMode: "writeback", fstype: null, mountPoint: "" }); expect($scope.availableNew.device).toBe(disk); expect($scope.availableNew.cacheset).toBe(cacheset); }); }); describe("fstypeChanged", function() { it("leave mountPoint when fstype is not null", function() { var controller = makeController(); var mountPoint = makeName("srv"); $scope.availableNew = { fstype: "ext4", mountPoint: mountPoint }; $scope.fstypeChanged($scope.availableNew); expect($scope.availableNew.mountPoint).toBe(mountPoint); }); it("clear mountPoint when fstype null", function() { var controller = makeController(); $scope.availableNew = { fstype: null, mountPoint: makeName("srv") }; $scope.fstypeChanged($scope.availableNew); expect($scope.availableNew.mountPoint).toBe(""); }); }); describe("isNewDiskNameInvalid", function() { it("returns true if blank name", function() { var controller = makeController(); $scope.node.disks = []; $scope.availableNew.name = ""; expect($scope.isNewDiskNameInvalid()).toBe(true); }); it("returns true if name used by disk", function() { var controller = makeController(); var name = makeName("disk"); $scope.node.disks = [{ name: name }]; $scope.availableNew.name = name; expect($scope.isNewDiskNameInvalid()).toBe(true); }); it("returns true if name used by partition", function() { var controller = makeController(); var name = makeName("disk"); $scope.node.disks = [{ name: makeName("other"), partitions: [ { name: name } ] }]; $scope.availableNew.name = name; expect($scope.isNewDiskNameInvalid()).toBe(true); }); it("returns false if the name is not already used", function() { var controller = makeController(); var name = makeName("disk"); $scope.node.disks = [{ name: makeName("other"), partitions: [ { name: makeName("part") } ] }]; $scope.availableNew.name = name; expect($scope.isNewDiskNameInvalid()).toBe(false); }); }); describe("createBcacheCanSave", function() { it("returns false if isNewDiskNameInvalid returns true", function() { var controller = makeController(); $scope.availableNew.mountPoint = "/"; spyOn($scope, "isNewDiskNameInvalid").and.returnValue(true); expect($scope.createBcacheCanSave()).toBe(false); }); it("returns false if isMountPointInvalid returns true", function() { var controller = makeController(); $scope.availableNew.mountPoint = "not/absolute"; spyOn($scope, "isNewDiskNameInvalid").and.returnValue(false); expect($scope.createBcacheCanSave()).toBe(false); }); it("returns true if both return false", function() { var controller = makeController(); $scope.availableNew.mountPoint = "/"; spyOn($scope, "isNewDiskNameInvalid").and.returnValue(false); expect($scope.createBcacheCanSave()).toBe(true); }); }); describe("availableConfirmCreateBcache", function() { it("does nothing if createBcacheCanSave returns false", function() { var controller = makeController(); spyOn($scope, "createBcacheCanSave").and.returnValue(false); var availableNew = { name: makeName("bcache"), cacheset: { cache_set_id: makeInteger(0, 100) }, cacheMode: "writearound", device: { type: "partition", partition_id: makeInteger(0, 100) }, fstype: null, mountPoint: "" }; $scope.availableNew = availableNew; spyOn(NodesManager, "createBcache"); $scope.availableConfirmCreateBcache(); expect(NodesManager.createBcache).not.toHaveBeenCalled(); }); it("calls NodesManager.createBcache for partition", function() { var controller = makeController(); spyOn($scope, "createBcacheCanSave").and.returnValue(true); var device = { type: "partition", partition_id: makeInteger(0, 100), $selected: true }; var availableNew = { name: makeName("bcache"), cacheset: { cache_set_id: makeInteger(0, 100) }, cacheMode: "writearound", device: device, fstype: "ext4", mountPoint: "/" }; $scope.available = [device]; $scope.availableNew = availableNew; spyOn(NodesManager, "createBcache"); spyOn($scope, "updateAvailableSelection"); $scope.availableConfirmCreateBcache(); expect(NodesManager.createBcache).toHaveBeenCalledWith( node, { name: availableNew.name, cache_set: availableNew.cacheset.cache_set_id, cache_mode: "writearound", partition_id: device.partition_id, fstype: "ext4", mount_point: "/" }); expect($scope.available).toEqual([]); expect($scope.updateAvailableSelection).toHaveBeenCalledWith( true); }); it("calls NodesManager.createBcache for block device", function() { var controller = makeController(); spyOn($scope, "createBcacheCanSave").and.returnValue(true); var device = { type: "physical", block_id: makeInteger(0, 100), $selected: true }; var availableNew = { name: makeName("bcache"), cacheset: { cache_set_id: makeInteger(0, 100) }, cacheMode: "writearound", device: device, fstype: null, mountPoint: "/" }; $scope.available = [device]; $scope.availableNew = availableNew; spyOn(NodesManager, "createBcache"); spyOn($scope, "updateAvailableSelection"); $scope.availableConfirmCreateBcache(); expect(NodesManager.createBcache).toHaveBeenCalledWith( node, { name: availableNew.name, cache_set: availableNew.cacheset.cache_set_id, cache_mode: "writearound", block_id: device.block_id }); expect($scope.available).toEqual([]); expect($scope.updateAvailableSelection).toHaveBeenCalledWith( true); }); }); describe("canCreateRAID", function() { it("returns false isAvailableDisabled returns true", function() { var controller = makeController(); spyOn($scope, "isAvailableDisabled").and.returnValue(true); $scope.isSuperUser = function() { return true; }; expect($scope.canCreateRAID()).toBe(false); }); it("returns false if less than 2 is selected", function() { var controller = makeController(); spyOn($scope, "isAvailableDisabled").and.returnValue(false); spyOn($scope, "getSelectedAvailable").and.returnValue([{}]); $scope.isSuperUser = function() { return true; }; expect($scope.canCreateRAID()).toBe(false); }); it("returns false if any selected has filesystem", function() { var controller = makeController(); spyOn($scope, "isAvailableDisabled").and.returnValue(false); spyOn($scope, "getSelectedAvailable").and.returnValue([{}, {}]); spyOn($scope, "hasUnmountedFilesystem").and.returnValue(true); $scope.isSuperUser = function() { return true; }; expect($scope.canCreateRAID()).toBe(false); }); it("returns false if any selected is volume group", function() { var controller = makeController(); spyOn($scope, "isAvailableDisabled").and.returnValue(false); spyOn($scope, "getSelectedAvailable").and.returnValue([ { type: "lvm-vg" }, { type: "physical" } ]); spyOn($scope, "hasUnmountedFilesystem").and.returnValue(false); $scope.isSuperUser = function() { return true; }; expect($scope.canCreateRAID()).toBe(false); }); it("returns false if not super user", function() { var controller = makeController(); spyOn($scope, "isAvailableDisabled").and.returnValue(false); spyOn($scope, "getSelectedAvailable").and.returnValue([{}, {}]); spyOn($scope, "hasUnmountedFilesystem").and.returnValue(false); $scope.isSuperUser = function() { return false; }; expect($scope.canCreateRAID()).toBe(false); }); it("returns true if more than 1 selected", function() { var controller = makeController(); spyOn($scope, "isAvailableDisabled").and.returnValue(false); spyOn($scope, "getSelectedAvailable").and.returnValue([{}, {}]); spyOn($scope, "hasUnmountedFilesystem").and.returnValue(false); $scope.isSuperUser = function() { return true; }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.canCreateRAID()).toBe(true); }); }); describe("createRAID", function() { it("does nothing if canCreateRAID returns false", function() { var controller = makeController(); spyOn($scope, "canCreateRAID").and.returnValue(false); $scope.availableMode = "other"; $scope.createRAID(); expect($scope.availableMode).toBe("other"); }); it("sets up availableNew", function() { var controller = makeController(); spyOn($scope, "canCreateRAID").and.returnValue(true); $scope.availableMode = "other"; // Add md name to create a name after that index. var otherRAID = { name: "md4" }; node.disks = [otherRAID]; // Will be set as the devices. var disk0 = { $selected: true }; var disk1 = { $selected: true }; $scope.available = [disk0, disk1]; $scope.createRAID(); expect($scope.availableMode).toBe("raid"); expect($scope.availableNew.name).toBe("md5"); expect($scope.availableNew.devices).toEqual([disk0, disk1]); expect($scope.availableNew.mode.level).toEqual("raid-0"); expect($scope.availableNew.spares).toEqual([]); expect($scope.availableNew.fstype).toBeNull(); expect($scope.availableNew.mountPoint).toEqual(""); }); }); describe("getAvailableRAIDModes", function() { it("returns empty list if availableNew null", function() { var controller = makeController(); $scope.availableNew = null; expect($scope.getAvailableRAIDModes()).toEqual([]); }); it("returns empty list if availableNew.devices not defined", function() { var controller = makeController(); $scope.availableNew = {}; expect($scope.getAvailableRAIDModes()).toEqual([]); }); it("returns raid 0 and 1 for 2 disks", function() { var controller = makeController(); $scope.availableNew.devices = [{}, {}]; var modes = $scope.getAvailableRAIDModes(); expect(modes[0].level).toEqual("raid-0"); expect(modes[1].level).toEqual("raid-1"); expect(modes.length).toEqual(2); }); it("returns raid 0,1,5,10 for 3 disks", function() { var controller = makeController(); $scope.availableNew.devices = [{}, {}, {}]; var modes = $scope.getAvailableRAIDModes(); expect(modes[0].level).toEqual("raid-0"); expect(modes[1].level).toEqual("raid-1"); expect(modes[2].level).toEqual("raid-5"); expect(modes[3].level).toEqual("raid-10"); expect(modes.length).toEqual(4); }); it("returns raid 0,1,5,6,10 for 4 disks", function() { var controller = makeController(); $scope.availableNew.devices = [{}, {}, {}, {}]; var modes = $scope.getAvailableRAIDModes(); expect(modes[0].level).toEqual("raid-0"); expect(modes[1].level).toEqual("raid-1"); expect(modes[2].level).toEqual("raid-5"); expect(modes[3].level).toEqual("raid-6"); expect(modes[4].level).toEqual("raid-10"); expect(modes.length).toEqual(5); }); }); describe("getTotalNumberOfAvailableSpares", function() { var modes = [ { level: "raid-0", min_disks: 2, allows_spares: false }, { level: "raid-1", min_disks: 2, allows_spares: true }, { level: "raid-5", min_disks: 3, allows_spares: true }, { level: "raid-6", min_disks: 4, allows_spares: true }, { level: "raid-10", min_disks: 3, allows_spares: true } ]; angular.forEach(modes, function(mode) { it("returns current result for " + mode.level, function() { var controller = makeController(); $scope.availableNew.mode = mode; if(!mode.allows_spares) { expect($scope.getTotalNumberOfAvailableSpares()).toBe(0); } else { var count = makeInteger(mode.min_disks, 100); var i, devices = []; for(i = 0; i < count; i++) { devices.push({}); } $scope.availableNew.devices = devices; expect( $scope.getTotalNumberOfAvailableSpares(), count - mode.min_disks); } }); }); }); describe("getNumberOfRemainingSpares", function() { it("returns 0 when getTotalNumberOfAvailableSpares returns 0", function() { var controller = makeController(); spyOn( $scope, "getTotalNumberOfAvailableSpares").and.returnValue(0); expect($scope.getNumberOfRemainingSpares()).toBe(0); }); it("returns allowed minus the current number of spares", function() { var controller = makeController(); var count = makeInteger(10, 100); spyOn( $scope, "getTotalNumberOfAvailableSpares").and.returnValue(count); var sparesCount = makeInteger(0, count); var i, spares = []; for(i = 0; i < sparesCount; i++) { spares.push({}); } $scope.availableNew.spares = spares; expect($scope.getNumberOfRemainingSpares()).toBe( count - sparesCount); }); }); describe("showSparesColumn", function() { it("returns true when getTotalNumberOfAvailableSpares greater than 0", function() { var controller = makeController(); spyOn( $scope, "getTotalNumberOfAvailableSpares").and.returnValue(1); expect($scope.showSparesColumn()).toBe(true); }); it("returns false when getTotalNumberOfAvailableSpares less than 1", function() { var controller = makeController(); spyOn( $scope, "getTotalNumberOfAvailableSpares").and.returnValue(0); expect($scope.showSparesColumn()).toBe(false); }); }); describe("RAIDModeChanged", function() { it("clears availableNew.spares", function() { var controller = makeController(); $scope.availableNew.spares = [{}, {}]; $scope.RAIDModeChanged(); expect($scope.availableNew.spares).toEqual([]); }); }); describe("isActiveRAIDMember", function() { it("returns true when disk key not in spares", function() { var controller = makeController(); var disk = { type: "physical", block_id: makeInteger() }; $scope.availableNew.spares = []; $scope.availableNew.devices = [disk]; $scope.setAsActiveRAIDMember(disk); expect($scope.isActiveRAIDMember(disk)).toBe(true); }); it("returns false when disk key in spares", function() { var controller = makeController(); var disk = { type: "physical", block_id: makeInteger() }; $scope.availableNew.spares = []; $scope.availableNew.devices = [disk]; $scope.setAsSpareRAIDMember(disk); expect($scope.isActiveRAIDMember(disk)).toBe(false); }); }); describe("isSpareRAIDMember", function() { it("returns false when disk key not in spares", function() { var controller = makeController(); var disk = { type: "physical", block_id: makeInteger() }; $scope.availableNew.spares = []; $scope.availableNew.devices = [disk]; $scope.setAsActiveRAIDMember(disk); expect($scope.isSpareRAIDMember(disk)).toBe(false); }); it("returns true when disk key in spares", function() { var controller = makeController(); var disk = { type: "physical", block_id: makeInteger() }; $scope.availableNew.spares = []; $scope.availableNew.devices = [disk]; $scope.setAsSpareRAIDMember(disk); expect($scope.isSpareRAIDMember(disk)).toBe(true); }); }); describe("setAsActiveRAIDMember", function() { it("sets the disk as an active RAID member", function() { var controller = makeController(); var disk = { type: "physical", block_id: makeInteger() }; $scope.availableNew.spares = []; $scope.availableNew.devices = [disk]; $scope.setAsSpareRAIDMember(disk); expect($scope.isSpareRAIDMember(disk)).toBe(true); $scope.setAsActiveRAIDMember(disk); expect($scope.isActiveRAIDMember(disk)).toBe(true); }); }); describe("setAsSpareRAIDMember", function() { it("sets the disk as a spare RAID member", function() { var controller = makeController(); var disk = { type: "physical", block_id: makeInteger() }; $scope.availableNew.spares = []; $scope.availableNew.devices = [disk]; $scope.setAsActiveRAIDMember(disk); expect($scope.isActiveRAIDMember(disk)).toBe(true); $scope.setAsSpareRAIDMember(disk); expect($scope.isSpareRAIDMember(disk)).toBe(true); }); }); describe("getNewRAIDSize", function() { it("gets proper raid-0 size", function() { var controller = makeController(); var disk0 = { original: { available_size: 1000 * 1000 } }; var disk1 = { original: { available_size: 1000 * 1000 } }; $scope.availableNew.spares = []; $scope.availableNew.devices = [disk0, disk1]; $scope.availableNew.mode = $scope.getAvailableRAIDModes()[0]; expect($scope.getNewRAIDSize()).toBe("2.0 MB"); }); it("gets proper raid-0 size using size", function() { var controller = makeController(); var disk0 = { original: { size: 1000 * 1000 } }; var disk1 = { original: { size: 1000 * 1000 } }; $scope.availableNew.spares = []; $scope.availableNew.devices = [disk0, disk1]; $scope.availableNew.mode = $scope.getAvailableRAIDModes()[0]; expect($scope.getNewRAIDSize()).toBe("2.0 MB"); }); it("gets proper raid-1 size", function() { var controller = makeController(); var disk0 = { original: { available_size: 1000 * 1000 } }; var disk1 = { original: { available_size: 1000 * 1000 } }; $scope.availableNew.spares = []; $scope.availableNew.devices = [disk0, disk1]; $scope.availableNew.mode = $scope.getAvailableRAIDModes()[1]; expect($scope.getNewRAIDSize()).toBe("1.0 MB"); }); it("gets proper raid-5 size", function() { var controller = makeController(); var disk0 = { original: { available_size: 2 * 1000 * 1000 } }; var disk1 = { original: { available_size: 2 * 1000 * 1000 } }; var disk2 = { original: { available_size: 2 * 1000 * 1000 } }; var spare0 = { original: { available_size: 1000 * 1000 } }; $scope.availableNew.spares = []; $scope.availableNew.devices = [disk0, disk1, disk2, spare0]; $scope.availableNew.mode = $scope.getAvailableRAIDModes()[2]; $scope.setAsSpareRAIDMember(spare0); // The 1MB spare causes us to only use 1MB of each active disk. expect($scope.getNewRAIDSize()).toBe("2.0 MB"); }); it("gets proper raid-6 size", function() { var controller = makeController(); var disk0 = { original: { available_size: 2 * 1000 * 1000 } }; var disk1 = { original: { available_size: 2 * 1000 * 1000 } }; var disk2 = { original: { available_size: 2 * 1000 * 1000 } }; var disk3 = { original: { available_size: 2 * 1000 * 1000 } }; var spare0 = { original: { available_size: 1000 * 1000 } }; $scope.availableNew.spares = []; $scope.availableNew.devices = [disk0, disk1, disk2, disk3, spare0]; $scope.availableNew.mode = $scope.getAvailableRAIDModes()[3]; $scope.setAsSpareRAIDMember(spare0); // The 1MB spare causes us to only use 1MB of each active disk. expect($scope.getNewRAIDSize()).toBe("2.0 MB"); }); it("gets proper raid-10 size", function() { var controller = makeController(); var disk0 = { original: { available_size: 2 * 1000 * 1000 } }; var disk1 = { original: { available_size: 2 * 1000 * 1000 } }; var disk2 = { original: { available_size: 2 * 1000 * 1000 } }; var spare0 = { original: { available_size: 1000 * 1000 } }; $scope.availableNew.spares = []; $scope.availableNew.devices = [disk0, disk1, disk2, spare0]; $scope.availableNew.mode = $scope.getAvailableRAIDModes()[4]; $scope.setAsSpareRAIDMember(spare0); // The 1MB spare causes us to only use 1MB of each active disk. expect($scope.getNewRAIDSize()).toBe("1.5 MB"); }); }); describe("createRAIDCanSave", function() { it("returns false if isNewDiskNameInvalid returns true", function() { var controller = makeController(); $scope.availableNew.mountPoint = "/"; spyOn($scope, "isNewDiskNameInvalid").and.returnValue(true); expect($scope.createRAIDCanSave()).toBe(false); }); it("returns false if isMountPointInvalid returns true", function() { var controller = makeController(); $scope.availableNew.mountPoint = "not/absolute"; spyOn($scope, "isNewDiskNameInvalid").and.returnValue(false); expect($scope.createRAIDCanSave()).toBe(false); }); it("returns true if both return false", function() { var controller = makeController(); $scope.availableNew.mountPoint = "/"; spyOn($scope, "isNewDiskNameInvalid").and.returnValue(false); expect($scope.createRAIDCanSave()).toBe(true); }); }); describe("availableConfirmCreateRAID", function() { it("does nothing if createRAIDCanSave returns false", function() { var controller = makeController(); spyOn($scope, "createRAIDCanSave").and.returnValue(false); var partition0 = { type: "partition", block_id: makeInteger(0, 10), partition_id: makeInteger(0, 10) }; var partition1 = { type: "partition", block_id: makeInteger(10, 20), partition_id: makeInteger(10, 20) }; var disk0 = { type: "physical", block_id: makeInteger(0, 10) }; var disk1 = { type: "physical", block_id: makeInteger(10, 20) }; var availableNew = { name: makeName("md"), mode: { level: "raid-1" }, devices: [partition0, partition1, disk0, disk1], spares: [], fstype: null, mountPoint: "" }; $scope.availableNew = availableNew; $scope.setAsSpareRAIDMember(partition0); $scope.setAsSpareRAIDMember(disk0); spyOn(NodesManager, "createRAID"); $scope.availableConfirmCreateRAID(); expect(NodesManager.createRAID).not.toHaveBeenCalled(); }); it("calls NodesManager.createRAID", function() { var controller = makeController(); spyOn($scope, "createRAIDCanSave").and.returnValue(true); var partition0 = { type: "partition", block_id: makeInteger(0, 10), partition_id: makeInteger(0, 10) }; var partition1 = { type: "partition", block_id: makeInteger(10, 20), partition_id: makeInteger(10, 20) }; var disk0 = { type: "physical", block_id: makeInteger(0, 10) }; var disk1 = { type: "physical", block_id: makeInteger(10, 20) }; var availableNew = { name: makeName("md"), mode: { level: "raid-1" }, devices: [partition0, partition1, disk0, disk1], spares: [], fstype: null, mountPoint: "" }; $scope.availableNew = availableNew; $scope.setAsSpareRAIDMember(partition0); $scope.setAsSpareRAIDMember(disk0); spyOn(NodesManager, "createRAID"); $scope.availableConfirmCreateRAID(); expect(NodesManager.createRAID).toHaveBeenCalledWith( node, { name: availableNew.name, level: "raid-1", block_devices: [disk1.block_id], partitions: [partition1.partition_id], spare_devices: [disk0.block_id], spare_partitions: [partition0.partition_id] }); }); it("calls NodesManager.createRAID with filesystem", function() { var controller = makeController(); spyOn($scope, "createRAIDCanSave").and.returnValue(true); var partition0 = { type: "partition", block_id: makeInteger(0, 10), partition_id: makeInteger(0, 10) }; var partition1 = { type: "partition", block_id: makeInteger(10, 20), partition_id: makeInteger(10, 20) }; var disk0 = { type: "physical", block_id: makeInteger(0, 10) }; var disk1 = { type: "physical", block_id: makeInteger(10, 20) }; var availableNew = { name: makeName("md"), mode: { level: "raid-1" }, devices: [partition0, partition1, disk0, disk1], spares: [], fstype: "ext4", mountPoint: "/" }; $scope.availableNew = availableNew; $scope.setAsSpareRAIDMember(partition0); $scope.setAsSpareRAIDMember(disk0); spyOn(NodesManager, "createRAID"); $scope.availableConfirmCreateRAID(); expect(NodesManager.createRAID).toHaveBeenCalledWith( node, { name: availableNew.name, level: "raid-1", block_devices: [disk1.block_id], partitions: [partition1.partition_id], spare_devices: [disk0.block_id], spare_partitions: [partition0.partition_id], fstype: "ext4", mount_point: "/" }); }); }); describe("canCreateVolumeGroup", function() { it("returns false isAvailableDisabled returns true", function() { var controller = makeController(); spyOn($scope, "isAvailableDisabled").and.returnValue(true); $scope.isSuperUser = function() { return true; }; expect($scope.canCreateVolumeGroup()).toBe(false); }); it("returns false if any selected has filesystem", function() { var controller = makeController(); spyOn($scope, "isAvailableDisabled").and.returnValue(false); spyOn($scope, "getSelectedAvailable").and.returnValue([{}]); spyOn($scope, "hasUnmountedFilesystem").and.returnValue(true); $scope.isSuperUser = function() { return true; }; expect($scope.canCreateVolumeGroup()).toBe(false); }); it("returns false if any selected is volume group", function() { var controller = makeController(); spyOn($scope, "isAvailableDisabled").and.returnValue(false); spyOn($scope, "getSelectedAvailable").and.returnValue([ { type: "lvm-vg" }, { type: "physical" } ]); spyOn($scope, "hasUnmountedFilesystem").and.returnValue(false); $scope.isSuperUser = function() { return true; }; expect($scope.canCreateVolumeGroup()).toBe(false); }); it("returns false if not super user", function() { var controller = makeController(); spyOn($scope, "isAvailableDisabled").and.returnValue(false); spyOn($scope, "getSelectedAvailable").and.returnValue([{}]); spyOn($scope, "hasUnmountedFilesystem").and.returnValue(false); $scope.isSuperUser = function() { return false; }; expect($scope.canCreateVolumeGroup()).toBe(false); }); it("returns true if aleast 1 selected", function() { var controller = makeController(); spyOn($scope, "isAvailableDisabled").and.returnValue(false); spyOn($scope, "getSelectedAvailable").and.returnValue([{}]); spyOn($scope, "hasUnmountedFilesystem").and.returnValue(false); $scope.isSuperUser = function() { return true; }; expect($scope.canCreateVolumeGroup()).toBe(true); }); }); describe("createVolumeGroup", function() { it("does nothing if canCreateVolumeGroup returns false", function() { var controller = makeController(); spyOn($scope, "canCreateVolumeGroup").and.returnValue(false); $scope.availableMode = "other"; $scope.createVolumeGroup(); expect($scope.availableMode).toBe("other"); }); it("sets up availableNew", function() { var controller = makeController(); spyOn($scope, "canCreateVolumeGroup").and.returnValue(true); $scope.availableMode = "other"; // Add vg name to create a name after that index. var otherVG = { name: "vg4" }; node.disks = [otherVG]; // Will be set as the devices. var disk0 = { $selected: true }; var disk1 = { $selected: true }; $scope.available = [disk0, disk1]; $scope.createVolumeGroup(); expect($scope.availableMode).toBe("volume-group"); expect($scope.availableNew.name).toBe("vg5"); expect($scope.availableNew.devices).toEqual([disk0, disk1]); }); }); describe("getNewVolumeGroupSize", function() { it("return the total of all devices", function() { var controller = makeController(); $scope.availableNew.devices = [ { original: { available_size: 1000 * 1000 } }, { original: { available_size: 1000 * 1000 } }, { original: { available_size: 1000 * 1000 } } ]; expect($scope.getNewVolumeGroupSize()).toBe("3.0 MB"); }); it("return the total of all devices using size", function() { var controller = makeController(); $scope.availableNew.devices = [ { original: { size: 1000 * 1000 } }, { original: { size: 1000 * 1000 } }, { original: { size: 1000 * 1000 } } ]; expect($scope.getNewVolumeGroupSize()).toBe("3.0 MB"); }); }); describe("createVolumeGroupCanSave", function() { it("return true if isNewDiskNameInvalid returns false", function() { var controller = makeController(); spyOn($scope, "isNewDiskNameInvalid").and.returnValue(false); expect($scope.createVolumeGroupCanSave()).toBe(true); }); it("return false if isNewDiskNameInvalid returns true", function() { var controller = makeController(); spyOn($scope, "isNewDiskNameInvalid").and.returnValue(true); expect($scope.createVolumeGroupCanSave()).toBe(false); }); }); describe("availableConfirmCreateVolumeGroup", function() { it("does nothing if createVolumeGroupCanSave returns false", function() { var controller = makeController(); spyOn($scope, "createVolumeGroupCanSave").and.returnValue( false); var partition0 = { type: "partition", block_id: makeInteger(0, 10), partition_id: makeInteger(0, 10) }; var partition1 = { type: "partition", block_id: makeInteger(10, 20), partition_id: makeInteger(10, 20) }; var disk0 = { type: "physical", block_id: makeInteger(0, 10) }; var disk1 = { type: "physical", block_id: makeInteger(10, 20) }; var availableNew = { name: makeName("vg"), devices: [partition0, partition1, disk0, disk1] }; $scope.availableNew = availableNew; spyOn(NodesManager, "createVolumeGroup"); $scope.availableConfirmCreateVolumeGroup(); expect(NodesManager.createVolumeGroup).not.toHaveBeenCalled(); }); it("calls NodesManager.createVolumeGroup", function() { var controller = makeController(); spyOn($scope, "createVolumeGroupCanSave").and.returnValue(true); var partition0 = { type: "partition", block_id: makeInteger(0, 10), partition_id: makeInteger(0, 10) }; var partition1 = { type: "partition", block_id: makeInteger(10, 20), partition_id: makeInteger(10, 20) }; var disk0 = { type: "physical", block_id: makeInteger(0, 10) }; var disk1 = { type: "physical", block_id: makeInteger(10, 20) }; var availableNew = { name: makeName("vg"), devices: [partition0, partition1, disk0, disk1] }; $scope.availableNew = availableNew; spyOn(NodesManager, "createVolumeGroup"); $scope.availableConfirmCreateVolumeGroup(); expect(NodesManager.createVolumeGroup).toHaveBeenCalledWith( node, { name: availableNew.name, block_devices: [disk0.block_id, disk1.block_id], partitions: [ partition0.partition_id, partition1.partition_id] }); }); }); describe("canAddLogicalVolume", function() { it("returns false if not volume group", function() { var controller = makeController(); expect($scope.canAddLogicalVolume({ type: "physical" })).toBe(false); expect($scope.canAddLogicalVolume({ type: "virtual" })).toBe(false); expect($scope.canAddLogicalVolume({ type: "partition" })).toBe(false); }); it("returns false if not enough space", function() { var controller = makeController(); expect($scope.canAddLogicalVolume({ type: "lvm-vg", original: { available_size: 1.5 * 1024 * 1024 } })).toBe(false); }); it("returns true if enough space", function() { var controller = makeController(); expect($scope.canAddLogicalVolume({ type: "lvm-vg", original: { available_size: 10 * 1024 * 1024 } })).toBe(true); }); }); describe("availableLogicalVolume", function() { it("sets availableMode to 'logical-volume'", function() { var controller = makeController(); var disk = { type: "lvm-vg", name: "vg0", available_size_human: "10 GB" }; $scope.availableMode = "other"; $scope.availableLogicalVolume(disk); expect($scope.availableMode).toBe("logical-volume"); }); it("sets $options to correct values", function() { var controller = makeController(); var disk = { type: "lvm-vg", name: "vg0", available_size_human: "10 GB" }; $scope.availableLogicalVolume(disk); expect(disk.$options).toEqual({ name: "vg0-lv0", size: "10", sizeUnits: "GB" }); }); }); describe("isLogicalVolumeNameInvalid", function() { it("returns true if doesn't start with volume group", function() { var controller = makeController(); var disk = { type: "lvm-vg", name: "vg0", $options: { name: "v" } }; expect($scope.isLogicalVolumeNameInvalid(disk)).toBe(true); }); it("returns true if equal to volume group", function() { var controller = makeController(); var disk = { type: "lvm-vg", name: "vg0", $options: { name: "vg0-" } }; expect($scope.isLogicalVolumeNameInvalid(disk)).toBe(true); }); it("returns false has text after the volume group", function() { var controller = makeController(); var disk = { type: "lvm-vg", name: "vg0", $options: { name: "vg0-l" } }; expect($scope.isLogicalVolumeNameInvalid(disk)).toBe(false); }); }); describe("newLogicalVolumeNameChanged", function() { it("resets name to volume group name if not present", function() { var controller = makeController(); var disk = { type: "lvm-vg", name: "vg0", $options: { name: "v" } }; $scope.newLogicalVolumeNameChanged(disk); expect(disk.$options.name).toBe("vg0-"); }); }); describe("isAddLogicalVolumeSizeInvalid", function() { it("returns value from isAddPartitionSizeInvalid", function() { var controller = makeController(); var sentinel = {}; spyOn($scope, "isAddPartitionSizeInvalid").and.returnValue( sentinel); expect($scope.isAddLogicalVolumeSizeInvalid({})).toBe(sentinel); }); }); describe("availableConfirmLogicalVolume", function() { it("does nothing if invalid", function() { var controller = makeController(); var disk = { $options: { size: "", sizeUnits: "GB" } }; spyOn(NodesManager, "createLogicalVolume"); $scope.availableConfirmLogicalVolume(disk); expect(NodesManager.createLogicalVolume).not.toHaveBeenCalled(); }); it("calls createLogicalVolume with bytes", function() { var controller = makeController(); var disk = { name: "vg0", block_id: makeInteger(0, 100), original: { available_size: 4 * 1000 * 1000 * 1000, available_size_human: "4.0 GB" }, $options: { name: "vg0-lv0", size: "2", sizeUnits: "GB", fstype: null, mountPoint: "" } }; spyOn(NodesManager, "createLogicalVolume"); $scope.availableConfirmLogicalVolume(disk); expect(NodesManager.createLogicalVolume).toHaveBeenCalledWith( node, disk.block_id, "lv0", 2 * 1000 * 1000 * 1000, {}); }); it("calls createLogicalVolume with fstype and mountPoint", function() { var controller = makeController(); var disk = { name: "vg0", block_id: makeInteger(0, 100), original: { available_size: 4 * 1000 * 1000 * 1000, available_size_human: "4.0 GB" }, $options: { name: "vg0-lv0", size: "2", sizeUnits: "GB", fstype: "ext4", mountPoint: "/" } }; spyOn(NodesManager, "createLogicalVolume"); $scope.availableConfirmLogicalVolume(disk); expect(NodesManager.createLogicalVolume).toHaveBeenCalledWith( node, disk.block_id, "lv0", 2 * 1000 * 1000 * 1000, { fstype: "ext4", mount_point: "/" }); }); it("calls createLogicalVolume with available_size bytes", function() { var controller = makeController(); var disk = { name: "vg0", block_id: makeInteger(0, 100), original: { available_size: 2.6 * 1000 * 1000 * 1000, available_size_human: "2.6 GB" }, $options: { name: "vg0-lv0", size: "2.62", sizeUnits: "GB", fstype: null, mountPoint: "" } }; spyOn(NodesManager, "createLogicalVolume"); $scope.availableConfirmLogicalVolume(disk); expect(NodesManager.createLogicalVolume).toHaveBeenCalledWith( node, disk.block_id, "lv0", 2.6 * 1000 * 1000 * 1000, {}); }); // regression test for https://bugs.launchpad.net/maas/+bug/1509535 it("calls createLogicalVolume with available_size bytes" + " even when human size gets rounded down", function() { var controller = makeController(); var disk = { name: "vg0", block_id: makeInteger(0, 100), original: { available_size: 2.035 * 1000 * 1000 * 1000, available_size_human: "2.0 GB" }, $options: { name: "vg0-lv0", size: "2.0", sizeUnits: "GB", fstype: null, mountPoint: "" } }; spyOn(NodesManager, "createLogicalVolume"); $scope.availableConfirmLogicalVolume(disk); expect(NodesManager.createLogicalVolume).toHaveBeenCalledWith( node, disk.block_id, "lv0", 2.035 * 1000 * 1000 * 1000, {}); }); }); describe("canEditTags", function() { it("returns false for partition", function() { var controller = makeController(); expect($scope.canEditTags({ type: "partition" })).toBe(false); }); it("returns false for lvm-vg", function() { var controller = makeController(); expect($scope.canEditTags({ type: "lvm-vg" })).toBe(false); }); it("returns false when not super user", function() { var controller = makeController(); $scope.isSuperUser = function() { return false; }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.canEditTags({ type: "physical" })).toBe(false); }); it("returns false when isAllStorageDisabled", function() { var controller = makeController(); $scope.isSuperUser = function() { return true; }; spyOn($scope, "isAllStorageDisabled").and.returnValue(true); expect($scope.canEditTags({ type: "physical" })).toBe(false); }); it("returns true for physical", function() { var controller = makeController(); $scope.isSuperUser = function() { return true; }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.canEditTags({ type: "physical" })).toBe(true); }); it("returns true for virtual", function() { var controller = makeController(); $scope.isSuperUser = function() { return true; }; spyOn($scope, "isAllStorageDisabled").and.returnValue(false); expect($scope.canEditTags({ type: "virtual" })).toBe(true); }); }); describe("availableEditTags", function() { it("sets $options", function() { var controller = makeController(); var tags = [{}, {}]; var disk = { tags: tags }; $scope.availableEditTags(disk); expect(disk.$options.editingTags).toBe(true); expect(disk.$options.tags).toEqual(tags); expect(disk.$options.tags).not.toBe(tags); }); }); describe("availableCancelTags", function() { it("clears $options", function() { var controller = makeController(); var options = {}; var disk = { $options: options }; $scope.availableCancelTags(disk); expect(disk.$options).toEqual({}); expect(disk.$options).not.toBe(options); }); }); describe("availableSaveTags", function() { it("calls NodesManager.updateDiskTags", function() { var controller = makeController(); var tags = [ { text: "new" }, { text: "old" } ]; var disk = { block_id: makeInteger(0, 100), tags: [], $options: { editingTags:true, tags: tags } }; spyOn(NodesManager, "updateDiskTags"); $scope.availableSaveTags(disk); expect(NodesManager.updateDiskTags).toHaveBeenCalledWith( node, disk.block_id, ["new", "old"]); expect(disk.$options).toEqual({}); expect(disk.tags).toEqual(tags); }); }); describe("isAllStorageDisabled", function() { var RegionConnection, UserManager; beforeEach(inject(function($injector) { UsersManager = $injector.get("UsersManager"); RegionConnection = $injector.get("RegionConnection"); // Mock buildSocket so an actual connection is not made. webSocket = new MockWebSocket(); spyOn(RegionConnection, "buildSocket").and.returnValue(webSocket); })); it("false when status is Ready", function() { var controller = makeController(); $scope.node.status = "Ready"; spyOn(UsersManager, "getAuthUser").and.returnValue( { is_superuser: true }); expect($scope.isAllStorageDisabled()).toBe(false); }); it("false when status is Allocated", function() { var controller = makeController(); $scope.node.status = "Allocated"; spyOn(UsersManager, "getAuthUser").and.returnValue( { is_superuser: true }); expect($scope.isAllStorageDisabled()).toBe(false); }); it("false when Allocated and owned", function() { var controller = makeController(); var user = makeName("user"); $scope.node.status = "Allocated"; $scope.node.owner = user; spyOn(UsersManager, "getAuthUser").and.returnValue( { is_superuser: false, username: user }); expect($scope.isAllStorageDisabled()).toBe(false); }); it("true when not admin", function() { var controller = makeController(); $scope.node.status = "Allocated"; $scope.node.owner = makeName("user"); spyOn(UsersManager, "getAuthUser").and.returnValue( { is_superuser: false, username: makeName("user") }); expect($scope.isAllStorageDisabled()).toBe(true); }); it("true otherwise", function() { var controller = makeController(); $scope.node.status = makeName("status"); spyOn(UsersManager, "getAuthUser").and.returnValue( { is_superuser: true }); expect($scope.isAllStorageDisabled()).toBe(true); }); }); describe("hasStorageLayoutIssues", function() { it("true when node.storage_layout_issues has issues", function() { var controller = makeController(); $scope.node.storage_layout_issues = [makeName("issue")]; expect($scope.hasStorageLayoutIssues()).toBe(true); }); it("false when node.storage_layout_issues has no issues", function() { var controller = makeController(); $scope.node.storage_layout_issues = []; expect($scope.hasStorageLayoutIssues()).toBe(false); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/controllers/tests/test_node_events.js0000644000000000000000000002107013056115004030564 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for NodeEventsController. */ describe("NodeEventsController", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Grab the needed angular pieces. var $controller, $rootScope, $scope, $q; beforeEach(inject(function($injector) { $controller = $injector.get("$controller"); $rootScope = $injector.get("$rootScope"); $scope = $rootScope.$new(); $q = $injector.get("$q"); })); // Load the required dependencies for the NodeEventsController and // mock the websocket connection. var NodesManager, EventsManagerFactory, ManagerHelperService, ErrorService; var RegionConnection, webSocket; beforeEach(inject(function($injector) { NodesManager = $injector.get("NodesManager"); EventsManagerFactory = $injector.get("EventsManagerFactory"); ManagerHelperService = $injector.get("ManagerHelperService"); ErrorService = $injector.get("ErrorService"); RegionConnection = $injector.get("RegionConnection"); // Mock buildSocket so an actual connection is not made. webSocket = new MockWebSocket(); spyOn(RegionConnection, "buildSocket").and.returnValue(webSocket); })); // Make a fake node. var _id = 0; function makeNode() { var node = { id: _id++, system_id: makeName("system_id"), fqdn: makeName("fqdn") }; NodesManager._items.push(node); return node; } // Make a fake event. function makeEvent() { return { type: { description: makeName("type") }, description: makeName("description") }; } // Create the node that will be used and set the routeParams. var node, $routeParams; beforeEach(function() { node = makeNode(); $routeParams = { system_id: node.system_id }; }); // Makes the NodeEventsController function makeController(loadManagerDefer) { var loadManager = spyOn(ManagerHelperService, "loadManager"); if(angular.isObject(loadManagerDefer)) { loadManager.and.returnValue(loadManagerDefer.promise); } else { loadManager.and.returnValue($q.defer().promise); } // Start the connection so a valid websocket is created in the // RegionConnection. RegionConnection.connect(""); return $controller("NodeEventsController", { $scope: $scope, $rootScope: $rootScope, $routeParams: $routeParams, NodesManager: NodesManager, EventsManagerFactory: EventsManagerFactory, ManagerHelperService: ManagerHelperService, ErrorService: ErrorService }); } it("sets title to loading and page to nodes", function() { var controller = makeController(); expect($rootScope.title).toBe("Loading..."); expect($rootScope.page).toBe("nodes"); }); it("sets the initial $scope values", function() { var controller = makeController(); expect($scope.loaded).toBe(false); expect($scope.node).toBeNull(); expect($scope.events).toEqual([]); expect($scope.eventsLoaded).toEqual(false); expect($scope.days).toEqual(30); }); it("calls loadManager with NodesManager", function() { var controller = makeController(); expect(ManagerHelperService.loadManager).toHaveBeenCalledWith( NodesManager); }); it("doesnt call setActiveItem if node already loaded", function() { var defer = $q.defer(); var controller = makeController(defer); NodesManager._activeItem = node; spyOn(NodesManager, "setActiveItem"); defer.resolve(); $rootScope.$digest(); expect($scope.node).toBe(node); expect($scope.loaded).toBe(true); expect(NodesManager.setActiveItem).not.toHaveBeenCalled(); }); it("calls setActiveItem if node not loaded", function() { var defer = $q.defer(); var controller = makeController(defer); var setActiveDefer = $q.defer(); spyOn(NodesManager, "setActiveItem").and.returnValue( setActiveDefer.promise); defer.resolve(); $rootScope.$digest(); setActiveDefer.resolve(node); $rootScope.$digest(); expect($scope.node).toBe(node); expect($scope.loaded).toBe(true); expect(NodesManager.setActiveItem).toHaveBeenCalledWith( node.system_id); }); it("calls raiseError if setActiveItem is rejected", function() { var defer = $q.defer(); var controller = makeController(defer); var setActiveDefer = $q.defer(); spyOn(NodesManager, "setActiveItem").and.returnValue( setActiveDefer.promise); spyOn(ErrorService, "raiseError"); defer.resolve(); $rootScope.$digest(); var error = makeName("error"); setActiveDefer.reject(error); $rootScope.$digest(); expect(ErrorService.raiseError).toHaveBeenCalledWith(error); }); it("gets the events manager for the node", function() { var defer = $q.defer(); var controller = makeController(defer); NodesManager._activeItem = node; spyOn(EventsManagerFactory, "getManager").and.callThrough(); defer.resolve(); $rootScope.$digest(); expect(EventsManagerFactory.getManager).toHaveBeenCalledWith(node.id); var manager = EventsManagerFactory.getManager(node.id); expect($scope.events).toBe(manager.getItems()); }); it("calls loadItems on the events manager", function() { var defer = $q.defer(); var controller = makeController(defer); NodesManager._activeItem = node; var manager = EventsManagerFactory.getManager(node.id); spyOn(manager, "loadItems").and.returnValue($q.defer().promise); defer.resolve(); $rootScope.$digest(); expect(manager.loadItems).toHaveBeenCalled(); }); it("sets eventsLoaded once events manager loadItems resolves", function() { var defer = $q.defer(); var controller = makeController(defer); NodesManager._activeItem = node; var manager = EventsManagerFactory.getManager(node.id); var loadDefer = $q.defer(); spyOn(manager, "loadItems").and.returnValue(loadDefer.promise); defer.resolve(); $rootScope.$digest(); loadDefer.resolve(); $rootScope.$digest(); expect($scope.eventsLoaded).toBe(true); }); it("watches node.fqdn updates $rootScope.title", function() { var defer = $q.defer(); var controller = makeController(defer); NodesManager._activeItem = node; defer.resolve(); $rootScope.$digest(); node.fqdn = makeName("fqdn"); $rootScope.$digest(); expect($rootScope.title).toBe(node.fqdn + " - events"); }); describe("getEventText", function() { it("returns just event type description without dash", function() { var controller = makeController(); var evt = makeEvent(); delete evt.description; expect($scope.getEventText(evt)).toBe(evt.type.description); }); it("returns event type description with event description", function() { var controller = makeController(); var evt = makeEvent(); expect($scope.getEventText(evt)).toBe( evt.type.description + " - " + evt.description); }); }); describe("loadMore", function() { it("adds 30 days to $scope.days", function() { var defer = $q.defer(); var controller = makeController(defer); NodesManager._activeItem = node; defer.resolve(); $rootScope.$digest(); $scope.loadMore(); expect($scope.days).toBe(60); }); it("calls loadMaximumDays with $scope.days", function() { var defer = $q.defer(); var controller = makeController(defer); NodesManager._activeItem = node; var manager = EventsManagerFactory.getManager(node.id); spyOn(manager, "loadMaximumDays"); defer.resolve(); $rootScope.$digest(); $scope.loadMore(); expect(manager.loadMaximumDays).toHaveBeenCalledWith(60); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/controllers/tests/test_node_result.js0000644000000000000000000001565213056115004030607 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for NodeResultController. */ describe("NodeResultController", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Grab the needed angular pieces. var $controller, $rootScope, $location, $scope, $q; beforeEach(inject(function($injector) { $controller = $injector.get("$controller"); $rootScope = $injector.get("$rootScope"); $location = $injector.get("$location"); $scope = $rootScope.$new(); $q = $injector.get("$q"); })); // Load the required dependencies for the NodeResultController and // mock the websocket connection. var NodesManager, RegionConnection, ManagerHelperService, ErrorService; var webSocket; beforeEach(inject(function($injector) { NodesManager = $injector.get("NodesManager"); RegionConnection = $injector.get("RegionConnection"); ManagerHelperService = $injector.get("ManagerHelperService"); ErrorService = $injector.get("ErrorService"); // Mock buildSocket so an actual connection is not made. webSocket = new MockWebSocket(); spyOn(RegionConnection, "buildSocket").and.returnValue(webSocket); })); // Make a fake commissioning result. function makeCommissioningResult() { return { name: makeName("name"), data: makeName("data") }; } // Make a fake node. function makeNode() { var node = { system_id: makeName("system_id"), fqdn: makeName("fqdn"), commissioning_results: [ makeCommissioningResult(), makeCommissioningResult(), makeCommissioningResult() ] }; NodesManager._items.push(node); return node; } // Create the node that will be used and set the routeParams. var node, $routeParams; beforeEach(function() { node = makeNode(); $routeParams = { system_id: node.system_id, filename: node.commissioning_results[0].name }; }); // Makes the NodeResultController function makeController(loadManagerDefer) { var loadManager = spyOn(ManagerHelperService, "loadManager"); if(angular.isObject(loadManagerDefer)) { loadManager.and.returnValue(loadManagerDefer.promise); } else { loadManager.and.returnValue($q.defer().promise); } // Start the connection so a valid websocket is created in the // RegionConnection. RegionConnection.connect(""); return $controller("NodeResultController", { $scope: $scope, $rootScope: $rootScope, $routeParams: $routeParams, $location: $location, NodesManager: NodesManager, ManagerHelperService: ManagerHelperService, ErrorService: ErrorService }); } it("sets title to loading and page to nodes", function() { var controller = makeController(); expect($rootScope.title).toBe("Loading..."); expect($rootScope.page).toBe("nodes"); }); it("sets the initial $scope values", function() { var controller = makeController(); expect($scope.loaded).toBe(false); expect($scope.node).toBeNull(); expect($scope.filename).toBe($routeParams.filename); }); it("calls loadManager with NodesManager", function() { var controller = makeController(); expect(ManagerHelperService.loadManager).toHaveBeenCalledWith( NodesManager); }); it("doesnt call setActiveItem if node already loaded", function() { var defer = $q.defer(); var controller = makeController(defer); NodesManager._activeItem = node; spyOn(NodesManager, "setActiveItem"); defer.resolve(); $rootScope.$digest(); expect($scope.node).toBe(node); expect($scope.loaded).toBe(true); expect(NodesManager.setActiveItem).not.toHaveBeenCalled(); }); it("calls setActiveItem if node not loaded", function() { var defer = $q.defer(); var controller = makeController(defer); var setActiveDefer = $q.defer(); spyOn(NodesManager, "setActiveItem").and.returnValue( setActiveDefer.promise); defer.resolve(); $rootScope.$digest(); setActiveDefer.resolve(node); $rootScope.$digest(); expect($scope.node).toBe(node); expect($scope.loaded).toBe(true); expect(NodesManager.setActiveItem).toHaveBeenCalledWith( node.system_id); }); it("calls raiseError if setActiveItem is rejected", function() { var defer = $q.defer(); var controller = makeController(defer); var setActiveDefer = $q.defer(); spyOn(NodesManager, "setActiveItem").and.returnValue( setActiveDefer.promise); spyOn(ErrorService, "raiseError"); defer.resolve(); $rootScope.$digest(); var error = makeName("error"); setActiveDefer.reject(error); $rootScope.$digest(); expect(ErrorService.raiseError).toHaveBeenCalledWith(error); }); it("watches node.fqdn updates $rootScope.title", function() { var defer = $q.defer(); var controller = makeController(defer); NodesManager._activeItem = node; defer.resolve(); $rootScope.$digest(); node.fqdn = makeName("fqdn"); $rootScope.$digest(); expect($rootScope.title).toBe( node.fqdn + " - " + $routeParams.filename); }); describe("getResultData", function() { it("returns empty string if node not loaded", function() { var controller = makeController(); expect($scope.getResultData()).toBe(""); }); it("returns data from result with newline prepended", function() { var controller = makeController(); $scope.node = node; expect($scope.getResultData()).toBe( "\n" + node.commissioning_results[0].data); }); it("returns 'Empty file` for empty data from result", function() { var controller = makeController(); $scope.node = node; node.commissioning_results[0].data = ""; expect($scope.getResultData()).toBe( "\nEmpty file"); }); it("calls $location.path back to node details if result missing", function() { $routeParams.filename = makeName("wrong_name"); var controller = makeController(); $scope.node = node; spyOn($location, "path"); expect($scope.getResultData()).toBe(""); expect($location.path).toHaveBeenCalledWith( "/node/" + node.system_id); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/controllers/tests/test_nodes_list.js0000644000000000000000000016321313056115004030424 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for NodesListController. */ // Make a fake user. var userId = 0; function makeUser() { return { id: userId++, username: makeName("username"), first_name: makeName("first_name"), last_name: makeName("last_name"), email: makeName("email"), is_superuser: false, sshkeys_count: 0 }; } describe("NodesListController", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Grab the needed angular pieces. var $controller, $rootScope, $scope, $q, $routeParams; beforeEach(inject(function($injector) { $controller = $injector.get("$controller"); $rootScope = $injector.get("$rootScope"); $scope = $rootScope.$new(); $q = $injector.get("$q"); $routeParams = {}; })); // Load the NodesManager, DevicesManager, GeneralManager, // NodesManager, RegionConnection, SearchService and mock the // websocket connection. var NodesManager, DevicesManager, GeneralManager; var ManagerHelperService, SearchService; beforeEach(inject(function($injector) { NodesManager = $injector.get("NodesManager"); DevicesManager = $injector.get("DevicesManager"); GeneralManager = $injector.get("GeneralManager"); ZonesManager = $injector.get("ZonesManager"); UsersManager = $injector.get("UsersManager"); RegionConnection = $injector.get("RegionConnection"); ManagerHelperService = $injector.get("ManagerHelperService"); SearchService = $injector.get("SearchService"); })); // Mock the websocket connection to the region var RegionConnection, webSocket; beforeEach(inject(function($injector) { RegionConnection = $injector.get("RegionConnection"); // Mock buildSocket so an actual connection is not made. webSocket = new MockWebSocket(); spyOn(RegionConnection, "buildSocket").and.returnValue(webSocket); })); // Makes the NodesListController function makeController(loadManagersDefer, defaultConnectDefer) { var loadManagers = spyOn(ManagerHelperService, "loadManagers"); if(angular.isObject(loadManagersDefer)) { loadManagers.and.returnValue(loadManagersDefer.promise); } else { loadManagers.and.returnValue($q.defer().promise); } var defaultConnect = spyOn(RegionConnection, "defaultConnect"); if(angular.isObject(defaultConnectDefer)) { defaultConnect.and.returnValue(defaultConnectDefer.promise); } else { defaultConnect.and.returnValue($q.defer().promise); } // Start the connection so a valid websocket is created in the // RegionConnection. RegionConnection.connect(""); // Create the controller. var controller = $controller("NodesListController", { $scope: $scope, $rootScope: $rootScope, $routeParams: $routeParams, NodesManager: NodesManager, DevicesManager: DevicesManager, ManagerHelperService: ManagerHelperService, SearchService: SearchService }); // Since the osSelection directive is not used in this test the // osSelection item on the model needs to have $reset function added // because it will be called throughout many of the tests. $scope.tabs.nodes.osSelection.$reset = jasmine.createSpy("$reset"); return controller; } // Makes a fake node/device. function makeObject(tab) { if (tab === 'nodes') { var node = { system_id: makeName("system_id"), $selected: false }; NodesManager._items.push(node); return node; } else if (tab === 'devices') { var device = { system_id: makeName("system_id"), $selected: false }; DevicesManager._items.push(device); return device; } return null; } it("sets title and page on $rootScope", function() { var controller = makeController(); expect($rootScope.title).toBe("Nodes"); expect($rootScope.page).toBe("nodes"); }); it("sets initial values on $scope", function() { // tab-independent variables. var controller = makeController(); expect($scope.nodes).toBe(NodesManager.getItems()); expect($scope.devices).toBe(DevicesManager.getItems()); expect($scope.osinfo).toBe(GeneralManager.getData("osinfo")); expect($scope.addHardwareOption).toBeNull(); expect($scope.addHardwareOptions).toEqual([ { name: "machine", title: "Machine" }, { name: "chassis", title: "Chassis" } ]); expect($scope.addHardwareScope).toBeNull(); expect($scope.loading).toBe(true); }); it("calls stopPolling when scope destroyed", function() { var controller = makeController(); spyOn(GeneralManager, "stopPolling"); $scope.$destroy(); expect(GeneralManager.stopPolling).toHaveBeenCalledWith( "osinfo"); }); it("saves current filters for nodes and devices when scope destroyed", function() { var controller = makeController(); var nodesFilters = {}, devicesFilters = {}; $scope.tabs.nodes.filters = nodesFilters; $scope.tabs.devices.filters = devicesFilters; $scope.$destroy(); expect(SearchService.retrieveFilters("nodes")).toBe(nodesFilters); expect(SearchService.retrieveFilters("devices")).toBe( devicesFilters); }); it("calls loadManagers with NodesManager, DevicesManager," + "GeneralManager, UsersManager", function() { var controller = makeController(); expect(ManagerHelperService.loadManagers).toHaveBeenCalledWith( [NodesManager, DevicesManager, GeneralManager, ZonesManager, UsersManager]); }); it("sets loading to false with loadManagers resolves", function() { var defer = $q.defer(); var controller = makeController(defer); defer.resolve(); $rootScope.$digest(); expect($scope.loading).toBe(false); }); it("sets nodes search from SearchService", function() { var query = makeName("query"); SearchService.storeFilters( "nodes", SearchService.getCurrentFilters(query)); var controller = makeController(); expect($scope.tabs.nodes.search).toBe(query); }); it("sets devices search from SearchService", function() { var query = makeName("query"); SearchService.storeFilters( "devices", SearchService.getCurrentFilters(query)); var controller = makeController(); expect($scope.tabs.devices.search).toBe(query); }); it("sets nodes search from $routeParams.query", function() { var query = makeName("query"); $routeParams.query = query; var controller = makeController(); expect($scope.tabs.nodes.search).toBe(query); }); it("calls updateFilters for nodes if search from $routeParams.query", function() { var query = makeName("query"); $routeParams.query = query; var controller = makeController(); expect($scope.tabs.nodes.filters._).toEqual([query]); }); describe("toggleTab", function() { it("sets $rootScope.title", function() { var controller = makeController(); $scope.toggleTab('devices'); expect($rootScope.title).toBe($scope.tabs.devices.pagetitle); $scope.toggleTab('nodes'); expect($rootScope.title).toBe($scope.tabs.nodes.pagetitle); }); it("sets currentpage", function() { var controller = makeController(); $scope.toggleTab('devices'); expect($scope.currentpage).toBe('devices'); $scope.toggleTab('nodes'); expect($scope.currentpage).toBe('nodes'); }); }); angular.forEach(["nodes", "devices"], function(tab) { describe("tab(" + tab + ")", function() { var manager; beforeEach(function() { if(tab === "nodes") { manager = NodesManager; } else if(tab === "devices") { manager = DevicesManager; } else { throw new Error("Unknown manager for tab: " + tab); } }); it("sets initial values on $scope", function() { var controller = makeController(); var tabScope = $scope.tabs[tab]; expect(tabScope.previous_search).toBe(""); expect(tabScope.search).toBe(""); expect(tabScope.searchValid).toBe(true); expect(tabScope.filtered_items).toEqual([]); expect(tabScope.predicate).toBe("fqdn"); expect(tabScope.allViewableChecked).toBe(false); expect(tabScope.selectedItems).toBe( tabScope.manager.getSelectedItems()); expect(tabScope.metadata).toBe(tabScope.manager.getMetadata()); expect(tabScope.filters).toEqual( SearchService.getEmptyFilter()); expect(tabScope.column).toBe("fqdn"); expect(tabScope.actionOption).toBeNull(); expect(tabScope.takeActionOptions).toEqual([]); expect(tabScope.actionErrorCount).toBe(0); expect(tabScope.zoneSelection).toBeNull(); // Only the nodes tab uses the osSelection and // commissionOptions fields. if(tab === "nodes") { expect(tabScope.osSelection.osystem).toBeNull(); expect(tabScope.osSelection.release).toBeNull(); expect(tabScope.commissionOptions).toEqual({ enableSSH: false, skipNetworking: false, skipStorage: false }); } }); it("resets search matches previous search and empty filtered_items", function() { var controller = makeController(); var tabScope = $scope.tabs[tab]; var search = makeName("search"); // Add item to filtered_items. tabScope.filtered_items.push(makeObject(tab)); tabScope.search = "in:(Selected)"; tabScope.previous_search = search; $scope.$digest(); // Empty the filtered_items, which should clear the search. tabScope.filtered_items.splice(0, 1); tabScope.search = search; $scope.$digest(); expect(tabScope.search).toBe(""); }); it("doesnt reset search matches if not empty filtered_items", function() { var controller = makeController(); var tabScope = $scope.tabs[tab]; var search = makeName("search"); // Add item to filtered_items. tabScope.filtered_items.push( makeObject(tab), makeObject(tab)); tabScope.search = "in:(Selected)"; tabScope.previous_search = search; $scope.$digest(); // Remove one item from filtered_items, which should not // clear the search. tabScope.filtered_items.splice(0, 1); tabScope.search = search; $scope.$digest(); expect(tabScope.search).toBe(search); }); it("doesnt reset search when previous search doesnt match", function() { var controller = makeController(); var tabScope = $scope.tabs[tab]; // Add item to filtered_items. tabScope.filtered_items.push(makeObject(tab)); tabScope.search = "in:(Selected)"; tabScope.previous_search = makeName("search"); $scope.$digest(); // Empty the filtered_items, but change the search which // should stop the search from being reset. tabScope.filtered_items.splice(0, 1); var search = makeName("search"); tabScope.search = search; $scope.$digest(); expect(tabScope.search).toBe(search); }); }); }); angular.forEach(["nodes", "devices"], function(tab) { describe("tab(" + tab + ")", function() { describe("clearSearch", function() { it("sets search to empty string", function() { var controller = makeController(); $scope.tabs[tab].search = makeName("search"); $scope.clearSearch(tab); expect($scope.tabs[tab].search).toBe(""); }); it("calls updateFilters", function() { var controller = makeController(); spyOn($scope, "updateFilters"); $scope.clearSearch(tab); expect($scope.updateFilters).toHaveBeenCalledWith(tab); }); }); describe("toggleChecked", function() { var controller, object, tabObj; beforeEach(function() { controller = makeController(); object = makeObject(tab); tabObj = $scope.tabs[tab]; $scope.tabs.nodes.filtered_items = $scope.nodes; $scope.tabs.devices.filtered_items = $scope.devices; }); it("selects object", function() { $scope.toggleChecked(object, tab); expect(object.$selected).toBe(true); }); it("deselects object", function() { tabObj.manager.selectItem(object.system_id); $scope.toggleChecked(object, tab); expect(object.$selected).toBe(false); }); it("sets allViewableChecked to true when all objects selected", function() { $scope.toggleChecked(object, tab); expect(tabObj.allViewableChecked).toBe(true); }); it( "sets allViewableChecked to false when not all objects " + "selected", function() { var object2 = makeObject(tab); $scope.toggleChecked(object, tab); expect(tabObj.allViewableChecked).toBe(false); }); it("sets allViewableChecked to false when selected and " + "deselected", function() { $scope.toggleChecked(object, tab); $scope.toggleChecked(object, tab); expect(tabObj.allViewableChecked).toBe(false); }); it("resets search when in:selected and none selected", function() { tabObj.search = "in:(Selected)"; $scope.toggleChecked(object, tab); $scope.toggleChecked(object, tab); expect(tabObj.search).toBe(""); }); it("ignores search when not in:selected and none selected", function() { tabObj.search = "other"; $scope.toggleChecked(object, tab); $scope.toggleChecked(object, tab); expect(tabObj.search).toBe("other"); }); it("updates actionErrorCount", function() { object.actions = []; tabObj.actionOption = { "name": "deploy" }; $scope.toggleChecked(object, tab); expect(tabObj.actionErrorCount).toBe(1); }); it("clears action option when none selected", function() { object.actions = []; tabObj.actionOption = {}; $scope.toggleChecked(object, tab); $scope.toggleChecked(object, tab); expect(tabObj.actionOption).toBeNull(); }); }); describe("toggleCheckAll", function() { var controller, object1, object2, tabObj; beforeEach(function() { controller = makeController(); object1 = makeObject(tab); object2 = makeObject(tab); tabObj = $scope.tabs[tab]; $scope.tabs.nodes.filtered_items = $scope.nodes; $scope.tabs.devices.filtered_items = $scope.devices; }); it("selects all objects", function() { $scope.toggleCheckAll(tab); expect(object1.$selected).toBe(true); expect(object2.$selected).toBe(true); }); it("deselects all objects", function() { $scope.toggleCheckAll(tab); $scope.toggleCheckAll(tab); expect(object1.$selected).toBe(false); expect(object2.$selected).toBe(false); }); it("resets search when in:selected and none selected", function() { tabObj.search = "in:(Selected)"; $scope.toggleCheckAll(tab); $scope.toggleCheckAll(tab); expect(tabObj.search).toBe(""); }); it("ignores search when not in:selected and none selected", function() { tabObj.search = "other"; $scope.toggleCheckAll(tab); $scope.toggleCheckAll(tab); expect(tabObj.search).toBe("other"); }); it("updates actionErrorCount", function() { object1.actions = []; object2.actions = []; tabObj.actionOption = { "name": "deploy" }; $scope.toggleCheckAll(tab); expect(tabObj.actionErrorCount).toBe(2); }); it("clears action option when none selected", function() { $scope.actionOption = {}; $scope.toggleCheckAll(tab); $scope.toggleCheckAll(tab); expect(tabObj.actionOption).toBeNull(); }); }); describe("showSelected", function() { it("sets search to in:selected", function() { var controller = makeController(); $scope.tabs[tab].selectedItems.push(makeObject(tab)); $scope.tabs[tab].actionOption = {}; $scope.showSelected(tab); expect($scope.tabs[tab].search).toBe("in:(Selected)"); }); it("updateFilters with the new search", function() { var controller = makeController(); $scope.tabs[tab].selectedItems.push(makeObject(tab)); $scope.tabs[tab].actionOption = {}; $scope.showSelected(tab); expect($scope.tabs[tab].filters["in"]).toEqual( ["Selected"]); }); }); describe("toggleFilter", function() { it("does nothing if actionOption", function() { var controller = makeController(); $scope.tabs[tab].actionOption = {}; var filters = { _: [], "in": ["Selected"] }; $scope.tabs[tab].filters = filters; $scope.toggleFilter("hostname", "test", tab); expect($scope.tabs[tab].filters).toEqual(filters); }); it("calls SearchService.toggleFilter", function() { var controller = makeController(); spyOn(SearchService, "toggleFilter").and.returnValue( SearchService.getEmptyFilter()); $scope.toggleFilter("hostname", "test", tab); expect(SearchService.toggleFilter).toHaveBeenCalled(); }); it("sets $scope.filters", function() { var controller = makeController(); var filters = { _: [], other: [] }; spyOn(SearchService, "toggleFilter").and.returnValue( filters); $scope.toggleFilter("hostname", "test", tab); expect($scope.tabs[tab].filters).toBe(filters); }); it("calls SearchService.filtersToString", function() { var controller = makeController(); spyOn(SearchService, "filtersToString").and.returnValue( ""); $scope.toggleFilter("hostname", "test", tab); expect(SearchService.filtersToString).toHaveBeenCalled(); }); it("sets $scope.search", function() { var controller = makeController(); $scope.toggleFilter("hostname", "test", tab); expect($scope.tabs[tab].search).toBe("hostname:(=test)"); }); }); describe("isFilterActive", function() { it("returns true when active", function() { var controller = makeController(); $scope.toggleFilter("hostname", "test", tab); expect( $scope.isFilterActive( "hostname", "test", tab)).toBe(true); }); it("returns false when inactive", function() { var controller = makeController(); $scope.toggleFilter("hostname", "test2", tab); expect( $scope.isFilterActive( "hostname", "test", tab)).toBe(false); }); }); describe("updateFilters", function() { it("updates filters and sets searchValid to true", function() { var controller = makeController(); $scope.tabs[tab].search = "test hostname:name"; $scope.updateFilters(tab); expect($scope.tabs[tab].filters).toEqual({ _: ["test"], hostname: ["name"] }); expect($scope.tabs[tab].searchValid).toBe(true); }); it("updates sets filters empty and sets searchValid to false", function() { var controller = makeController(); $scope.tabs[tab].search = "test hostname:(name"; $scope.updateFilters(tab); expect( $scope.tabs[tab].filters).toEqual( SearchService.getEmptyFilter()); expect($scope.tabs[tab].searchValid).toBe(false); }); }); describe("sortTable", function() { it("sets predicate", function() { var controller = makeController(); var predicate = makeName('predicate'); $scope.sortTable(predicate, tab); expect($scope.tabs[tab].predicate).toBe(predicate); }); it("reverses reverse", function() { var controller = makeController(); $scope.tabs[tab].reverse = true; $scope.sortTable(makeName('predicate'), tab); expect($scope.tabs[tab].reverse).toBe(false); }); }); describe("selectColumnOrSort", function() { it("sets column if different", function() { var controller = makeController(); var column = makeName('column'); $scope.selectColumnOrSort(column, tab); expect($scope.tabs[tab].column).toBe(column); }); it("calls sortTable if column already set", function() { var controller = makeController(); var column = makeName('column'); $scope.tabs[tab].column = column; spyOn($scope, "sortTable"); $scope.selectColumnOrSort(column, tab); expect($scope.sortTable).toHaveBeenCalledWith( column, tab); }); }); describe("supportsAction", function() { it("returns true if actionOption is null", function() { var controller = makeController(); var object = makeObject(tab); object.actions = ["start", "stop"]; expect($scope.supportsAction(object, tab)).toBe(true); }); it("returns true if actionOption in object.actions", function() { var controller = makeController(); var object = makeObject(tab); object.actions = ["start", "stop"]; $scope.tabs.nodes.actionOption = { name: "start" }; expect($scope.supportsAction(object, tab)).toBe(true); }); it("returns false if actionOption not in object.actions", function() { var controller = makeController(); var object = makeObject(tab); object.actions = ["start", "stop"]; $scope.tabs[tab].actionOption = { name: "deploy" }; expect($scope.supportsAction(object, tab)).toBe(false); }); }); }); }); angular.forEach(["nodes", "devices"], function(tab) { describe("tab(" + tab + ")", function() { describe("actionOptionSelected", function() { it("sets actionErrorCount to zero", function() { var controller = makeController(); $scope.tabs[tab].actionErrorCount = 1; $scope.actionOptionSelected(tab); expect($scope.tabs[tab].actionErrorCount).toBe(0); }); it("sets actionErrorCount to 1 when selected object doesn't " + "support action", function() { var controller = makeController(); var object = makeObject(tab); object.actions = ['start', 'stop']; $scope.tabs[tab].actionOption = { name: 'deploy' }; $scope.tabs[tab].selectedItems = [object]; $scope.actionOptionSelected(tab); expect($scope.tabs[tab].actionErrorCount).toBe(1); }); it("sets search to in:selected", function() { var controller = makeController(); $scope.actionOptionSelected(tab); expect($scope.tabs[tab].search).toBe("in:(Selected)"); }); it("sets previous_search to search value", function() { var controller = makeController(); var search = makeName("search"); $scope.tabs[tab].search = search; $scope.tabs[tab].actionErrorCount = 1; $scope.actionOptionSelected(tab); expect($scope.tabs[tab].previous_search).toBe(search); }); it("action deploy calls startPolling for osinfo", function() { var controller = makeController(); $scope.tabs[tab].actionOption = { "name": "deploy" }; spyOn(GeneralManager, "startPolling"); $scope.actionOptionSelected(tab); expect(GeneralManager.startPolling).toHaveBeenCalledWith( "osinfo"); }); it("changing away from deploy calls startPolling for osinfo", function() { var controller = makeController(); $scope.tabs[tab].actionOption = { "name": "deploy" }; spyOn(GeneralManager, "startPolling"); spyOn(GeneralManager, "stopPolling"); $scope.actionOptionSelected(tab); $scope.tabs[tab].actionOption = { "name": "acquire" }; $scope.actionOptionSelected(tab); var expected = expect(GeneralManager.stopPolling); expected.toHaveBeenCalledWith("osinfo"); }); it("calls hide on addHardwareScope", function() { var controller; if (tab === 'nodes') { controller = makeController(); $scope.addHardwareScope = { hide: jasmine.createSpy("hide") }; $scope.actionOptionSelected(tab); expect( $scope.addHardwareScope.hide).toHaveBeenCalled(); } else if (tab === 'devices') { controller = makeController(); $scope.addDeviceScope = { hide: jasmine.createSpy("hide") }; $scope.actionOptionSelected(tab); expect( $scope.addDeviceScope.hide).toHaveBeenCalled(); } }); }); describe("isActionError", function() { it("returns true if actionErrorCount > 0", function() { var controller = makeController(); $scope.tabs[tab].actionErrorCount = 2; expect($scope.isActionError(tab)).toBe(true); }); it("returns false if actionErrorCount === 0", function() { var controller = makeController(); $scope.tabs[tab].actionErrorCount = 0; expect($scope.isActionError(tab)).toBe(false); }); it("returns true if deploy action missing osinfo", function() { var controller = makeController(); $scope.tabs[tab].actionOption = { name: "deploy" }; $scope.tabs[tab].actionErrorCount = 0; $scope.osinfo = { osystems: [] }; expect($scope.isActionError(tab)).toBe(true); }); it("returns true if action missing ssh keys", function() { var controller = makeController(); $scope.tabs[tab].actionOption = { name: "deploy" }; $scope.tabs[tab].actionErrorCount = 0; $scope.osinfo = { osystems: [makeName("os")] }; var firstUser = makeUser(); UsersManager._authUser = firstUser; firstUser.sshkeys_count = 0; expect($scope.isActionError(tab)).toBe(true); }); it("returns false if deploy action not missing osinfo or keys", function() { var controller = makeController(); $scope.tabs[tab].actionOption = { name: "deploy" }; $scope.tabs[tab].actionErrorCount = 0; $scope.osinfo = { osystems: [makeName("os")] }; var firstUser = makeUser(); firstUser.sshkeys_count = 1; UsersManager._authUser = firstUser; expect($scope.isActionError(tab)).toBe(false); }); }); describe("isSSHKeyError", function() { it("returns false if actionErrorCount > 0", function() { var controller = makeController(); $scope.tabs[tab].actionErrorCount = 2; expect($scope.isSSHKeyError(tab)).toBe(false); }); it("returns true if deploy action missing ssh keys", function() { var controller = makeController(); $scope.tabs[tab].actionOption = { name: "deploy" }; $scope.tabs[tab].actionErrorCount = 0; expect($scope.isSSHKeyError(tab)).toBe(true); }); it("returns false if deploy action not missing ssh keys", function() { var controller = makeController(); $scope.tabs[tab].actionOption = { name: "deploy" }; $scope.tabs[tab].actionErrorCount = 0; var firstUser = makeUser(); firstUser.sshkeys_count = 1; UsersManager._authUser = firstUser; expect($scope.isSSHKeyError(tab)).toBe(false); }); }); describe("isDeployError", function() { it("returns false if actionErrorCount > 0", function() { var controller = makeController(); $scope.tabs[tab].actionErrorCount = 2; expect($scope.isDeployError(tab)).toBe(false); }); it("returns true if deploy action missing osinfo", function() { var controller = makeController(); $scope.tabs[tab].actionOption = { name: "deploy" }; $scope.tabs[tab].actionErrorCount = 0; $scope.osinfo = { osystems: [] }; expect($scope.isDeployError(tab)).toBe(true); }); it("returns false if deploy action not missing osinfo", function() { var controller = makeController(); $scope.tabs[tab].actionOption = { name: "deploy" }; $scope.tabs[tab].actionErrorCount = 0; $scope.osinfo = { osystems: [makeName("os")] }; expect($scope.isDeployError(tab)).toBe(false); }); }); describe("actionCancel", function() { it("clears search if in:selected", function() { var controller = makeController(); $scope.tabs[tab].search = "in:(Selected)"; $scope.actionCancel(tab); expect($scope.tabs[tab].search).toBe(""); }); it("clears search if in:selected (device)", function() { var controller = makeController(); $scope.tabs.devices.search = "in:(Selected)"; $scope.actionCancel('devices'); expect($scope.tabs.devices.search).toBe(""); }); it("doesnt clear search if not in:Selected", function() { var controller = makeController(); $scope.tabs[tab].search = "other"; $scope.actionCancel(tab); expect($scope.tabs[tab].search).toBe("other"); }); it("sets actionOption to null", function() { var controller = makeController(); $scope.tabs[tab].actionOption = {}; $scope.actionCancel(tab); expect($scope.tabs[tab].actionOption).toBeNull(); }); it("resets actionProgress", function() { var controller = makeController(); $scope.tabs[tab].actionProgress.total = makeInteger(0, 10); $scope.tabs[tab].actionProgress.completed = makeInteger(0, 10); $scope.tabs[tab].actionProgress.errors[makeName("error")] = [{}]; $scope.actionCancel(tab); expect($scope.tabs[tab].actionProgress.total).toBe(0); expect($scope.tabs[tab].actionProgress.completed).toBe(0); expect($scope.tabs[tab].actionProgress.errors).toEqual({}); }); }); describe("actionGo", function() { it("sets actionProgress.total to the number of selectedItems", function() { var controller = makeController(); var object = makeObject(tab); $scope.tabs[tab].actionOption = { name: "start" }; $scope.tabs[tab].selectedItems = [ makeObject(tab), makeObject(tab), makeObject(tab) ]; $scope.actionGo(tab); expect($scope.tabs[tab].actionProgress.total).toBe( $scope.tabs[tab].selectedItems.length); }); it("calls performAction for selected object", function() { var controller = makeController(); var object = makeObject(tab); var spy = spyOn( $scope.tabs[tab].manager, "performAction").and.returnValue($q.defer().promise); $scope.tabs[tab].actionOption = { name: "start" }; $scope.tabs[tab].selectedItems = [object]; $scope.actionGo(tab); expect(spy).toHaveBeenCalledWith( object, "start", {}); }); it("calls unselectItem after failed action", function() { var controller = makeController(); var object = makeObject(tab); object.action_failed = false; spyOn( $scope, 'hasActionsFailed').and.returnValue(true); var defer = $q.defer(); spyOn( $scope.tabs[tab].manager, "performAction").and.returnValue(defer.promise); var spy = spyOn($scope.tabs[tab].manager, "unselectItem"); $scope.tabs[tab].actionOption = { name: "start" }; $scope.tabs[tab].selectedItems = [object]; $scope.actionGo(tab); defer.resolve(); $scope.$digest(); expect(spy).toHaveBeenCalled(); }); it("keeps items selected after success", function() { var controller = makeController(); var object = makeObject(tab); spyOn( $scope, 'hasActionsFailed').and.returnValue(false); spyOn( $scope, 'hasActionsInProgress').and.returnValue(false); var defer = $q.defer(); spyOn( $scope.tabs[tab].manager, "performAction").and.returnValue(defer.promise); var spy = spyOn($scope.tabs[tab].manager, "unselectItem"); $scope.tabs[tab].actionOption = { name: "start" }; $scope.tabs[tab].selectedItems = [object]; $scope.actionGo(tab); defer.resolve(); $scope.$digest(); expect($scope.tabs[tab].selectedItems).toEqual([object]); }); it("increments actionProgress.completed after action complete", function() { var controller = makeController(); var object = makeObject(tab); var defer = $q.defer(); spyOn( $scope.tabs[tab].manager, "performAction").and.returnValue(defer.promise); spyOn( $scope, 'hasActionsFailed').and.returnValue(true); $scope.tabs[tab].actionOption = { name: "start" }; $scope.tabs[tab].selectedItems = [object]; $scope.actionGo(tab); defer.resolve(); $scope.$digest(); expect( $scope.tabs[tab].actionProgress.completed).toBe(1); }); it("set search to previous search after complete", function() { var controller = makeController(); var defer = $q.defer(); spyOn( $scope.tabs[tab].manager, "performAction").and.returnValue(defer.promise); spyOn( $scope, 'hasActionsFailed').and.returnValue(false); spyOn( $scope, 'hasActionsInProgress').and.returnValue(false); var object = makeObject(tab); var prev_search = makeName("search"); $scope.tabs[tab].manager._items.push(object); $scope.tabs[tab].manager._selectedItems.push(object); $scope.tabs[tab].previous_search = prev_search; $scope.tabs[tab].search = "in:(Selected)"; $scope.tabs[tab].actionOption = { name: "start" }; $scope.tabs[tab].filtered_items = [makeObject(tab)]; $scope.actionGo(tab); defer.resolve(); $scope.$digest(); expect($scope.tabs[tab].search).toBe(prev_search); }); it("ignores search when not in:selected after complete", function() { var controller = makeController(); var defer = $q.defer(); spyOn( $scope.tabs[tab].manager, "performAction").and.returnValue(defer.promise); spyOn( $scope, 'hasActionsInProgress').and.returnValue(false); var object = makeObject(tab); $scope.tabs[tab].manager._items.push(object); $scope.tabs[tab].manager._selectedItems.push(object); $scope.tabs[tab].search = "other"; $scope.tabs[tab].actionOption = { name: "start" }; $scope.actionGo(tab); defer.resolve(); $scope.$digest(); expect($scope.tabs[tab].search).toBe("other"); }); it("clears action option when complete", function() { var controller = makeController(); var defer = $q.defer(); spyOn( $scope.tabs[tab].manager, "performAction").and.returnValue(defer.promise); spyOn( $scope, 'hasActionsFailed').and.returnValue(false); spyOn( $scope, 'hasActionsInProgress').and.returnValue(false); var object = makeObject(tab); $scope.tabs[tab].manager._items.push(object); $scope.tabs[tab].manager._selectedItems.push(object); $scope.tabs[tab].actionOption = { name: "start" }; $scope.actionGo(tab); defer.resolve(); $scope.$digest(); expect($scope.tabs[tab].actionOption).toBeNull(); }); it("increments actionProgress.completed after action error", function() { var controller = makeController(); var object = makeObject(tab); var defer = $q.defer(); spyOn( $scope.tabs[tab].manager, "performAction").and.returnValue(defer.promise); $scope.tabs[tab].actionOption = { name: "start" }; $scope.tabs[tab].selectedItems = [object]; $scope.actionGo(tab); defer.reject(makeName("error")); $scope.$digest(); expect( $scope.tabs[tab].actionProgress.completed).toBe(1); }); it("adds error to actionProgress.errors on action error", function() { var controller = makeController(); var object = makeObject(tab); var defer = $q.defer(); spyOn( $scope.tabs[tab].manager, "performAction").and.returnValue(defer.promise); $scope.tabs[tab].actionOption = { name: "start" }; $scope.tabs[tab].selectedItems = [object]; $scope.actionGo(tab); var error = makeName("error"); defer.reject(error); $scope.$digest(); var errorObjects = $scope.tabs[tab].actionProgress.errors[error]; expect(errorObjects[0].system_id).toBe( object.system_id); }); }); describe("hasActionsInProgress", function() { it("returns false if actionProgress.total not > 0", function() { var controller = makeController(); $scope.tabs[tab].actionProgress.total = 0; expect($scope.hasActionsInProgress(tab)).toBe(false); }); it("returns true if actionProgress total != completed", function() { var controller = makeController(); $scope.tabs[tab].actionProgress.total = 1; $scope.tabs[tab].actionProgress.completed = 0; expect($scope.hasActionsInProgress(tab)).toBe(true); }); it("returns false if actionProgress total == completed", function() { var controller = makeController(); $scope.tabs[tab].actionProgress.total = 1; $scope.tabs[tab].actionProgress.completed = 1; expect($scope.hasActionsInProgress(tab)).toBe(false); }); }); describe("hasActionsFailed", function() { it("returns false if no errors", function() { var controller = makeController(); $scope.tabs[tab].actionProgress.errors = {}; expect($scope.hasActionsFailed(tab)).toBe(false); }); it("returns true if errors", function() { var controller = makeController(); var error = makeName("error"); var object = makeObject(tab); var errors = $scope.tabs[tab].actionProgress.errors; errors[error] = [object]; expect($scope.hasActionsFailed(tab)).toBe(true); }); }); describe("actionSetZone", function () { it("calls performAction with zone", function() { var controller = makeController(); var spy = spyOn( $scope.tabs[tab].manager, "performAction").and.returnValue( $q.defer().promise); var object = makeObject(tab); $scope.tabs[tab].actionOption = { name: "set-zone" }; $scope.tabs[tab].selectedItems = [object]; $scope.tabs[tab].zoneSelection = { id: 1 }; $scope.actionGo(tab); expect(spy).toHaveBeenCalledWith( object, "set-zone", { zone_id: 1 }); }); it("clears action option when successfully complete", function() { var controller = makeController(); var defer = $q.defer(); spyOn( $scope.tabs[tab].manager, "performAction").and.returnValue(defer.promise); spyOn( $scope, 'hasActionsFailed').and.returnValue(false); spyOn( $scope, 'hasActionsInProgress').and.returnValue(false); var object = makeObject(tab); $scope.tabs[tab].manager._items.push(object); $scope.tabs[tab].manager._selectedItems.push(object); $scope.tabs[tab].actionOption = { name: "set-zone" }; $scope.tabs[tab].zoneSelection = { id: 1 }; $scope.actionGo(tab); defer.resolve(); $scope.$digest(); expect($scope.tabs[tab].zoneSelection).toBeNull(); }); }); }); }); describe("tab(nodes)", function() { describe("actionGo", function() { it("calls performAction with osystem and distro_series", function() { var controller = makeController(); var object = makeObject("nodes"); var spy = spyOn( $scope.tabs.nodes.manager, "performAction").and.returnValue( $q.defer().promise); $scope.tabs.nodes.actionOption = { name: "deploy" }; $scope.tabs.nodes.selectedItems = [object]; $scope.tabs.nodes.osSelection.osystem = "ubuntu"; $scope.tabs.nodes.osSelection.release = "ubuntu/trusty"; $scope.actionGo("nodes"); expect(spy).toHaveBeenCalledWith( object, "deploy", { osystem: "ubuntu", distro_series: "trusty" }); }); it("clears selected os and release when successfully complete", function() { var controller = makeController(); var defer = $q.defer(); spyOn( NodesManager, "performAction").and.returnValue(defer.promise); spyOn( $scope, 'hasActionsFailed').and.returnValue(false); spyOn( $scope, 'hasActionsInProgress').and.returnValue(false); var object = makeObject("nodes"); NodesManager._items.push(object); NodesManager._selectedItems.push(object); $scope.tabs.nodes.actionOption = { name: "deploy" }; $scope.tabs.nodes.osSelection.osystem = "ubuntu"; $scope.tabs.nodes.osSelection.release = "ubuntu/trusty"; $scope.actionGo("nodes"); defer.resolve(); $scope.$digest(); expect($scope.tabs.nodes.osSelection.$reset).toHaveBeenCalled(); }); it("calls performAction with commissionOptions", function() { var controller = makeController(); var object = makeObject("nodes"); var spy = spyOn( $scope.tabs.nodes.manager, "performAction").and.returnValue( $q.defer().promise); $scope.tabs.nodes.actionOption = { name: "commission" }; $scope.tabs.nodes.selectedItems = [object]; $scope.tabs.nodes.commissionOptions.enableSSH = true; $scope.tabs.nodes.commissionOptions.skipNetworking = false; $scope.tabs.nodes.commissionOptions.skipStorage = false; $scope.actionGo("nodes"); expect(spy).toHaveBeenCalledWith( object, "commission", { enable_ssh: true, skip_networking: false, skip_storage: false }); }); it("clears commissionOptions when successfully complete", function() { var controller = makeController(); var defer = $q.defer(); spyOn( NodesManager, "performAction").and.returnValue(defer.promise); spyOn( $scope, 'hasActionsFailed').and.returnValue(false); spyOn( $scope, 'hasActionsInProgress').and.returnValue(false); var object = makeObject("nodes"); NodesManager._items.push(object); NodesManager._selectedItems.push(object); $scope.tabs.nodes.actionOption = { name: "commission" }; $scope.tabs.nodes.commissionOptions.enableSSH = true; $scope.tabs.nodes.commissionOptions.skipNetworking = true; $scope.tabs.nodes.commissionOptions.skipStorage = true; $scope.actionGo("nodes"); defer.resolve(); $scope.$digest(); expect($scope.tabs.nodes.commissionOptions).toEqual({ enableSSH: false, skipNetworking: false, skipStorage: false }); }); }); }); describe("addHardwareOptionChanged", function() { it("calls show in addHardwareScope", function() { var controller = makeController(); $scope.addHardwareScope = { show: jasmine.createSpy("show") }; $scope.addHardwareOption = { name: "hardware" }; $scope.addHardwareOptionChanged(); expect($scope.addHardwareScope.show).toHaveBeenCalledWith( "hardware"); }); }); describe("addDevice", function() { it("calls show in addDeviceScope", function() { var controller = makeController(); $scope.addDeviceScope = { show: jasmine.createSpy("show") }; $scope.addDevice(); expect($scope.addDeviceScope.show).toHaveBeenCalled(); }); }); describe("cancelAddDevice", function() { it("calls cancel in addDeviceScope", function() { var controller = makeController(); $scope.addDeviceScope = { cancel: jasmine.createSpy("cancel") }; $scope.cancelAddDevice(); expect($scope.addDeviceScope.cancel).toHaveBeenCalled(); }); }); describe("getDeviceIPAssignment", function() { it("returns 'External' for external assignment", function() { var controller = makeController(); expect($scope.getDeviceIPAssignment("external")).toBe( "External"); }); it("returns 'Dynamic' for dynamic assignment", function() { var controller = makeController(); expect($scope.getDeviceIPAssignment("dynamic")).toBe( "Dynamic"); }); it("returns 'Static' for static assignment", function() { var controller = makeController(); expect($scope.getDeviceIPAssignment("static")).toBe( "Static"); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/controllers/tests/test_subnet_details.js0000644000000000000000000001226713056115004031270 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for SubentsListController. */ describe("SubnetDetailsController", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Make a fake subnet function makeSubnet() { var subnet = { id: makeInteger(1, 10000), cidr: '169.254.0.0/24', name: 'Link Local' }; SubnetsManager._items.push(subnet); return subnet; } // Grab the needed angular pieces. var $controller, $rootScope, $location, $scope, $q, $routeParams; beforeEach(inject(function($injector) { $controller = $injector.get("$controller"); $rootScope = $injector.get("$rootScope"); $location = $injector.get("$location"); $scope = $rootScope.$new(); $q = $injector.get("$q"); $routeParams = {}; })); // Load any injected managers and services. var SubnetsManager, ManagerHelperService, ErrorService; beforeEach(inject(function($injector) { SubnetsManager = $injector.get("SubnetsManager"); ManagerHelperService = $injector.get("ManagerHelperService"); ErrorService = $injector.get("ErrorService"); })); var subnet; beforeEach(function() { subnet = makeSubnet(); }); // Makes the NodesListController function makeController(loadManagersDefer) { var loadManagers = spyOn(ManagerHelperService, "loadManagers"); if(angular.isObject(loadManagersDefer)) { loadManagers.and.returnValue(loadManagersDefer.promise); } else { loadManagers.and.returnValue($q.defer().promise); } // Create the controller. var controller = $controller("SubnetDetailsController", { $scope: $scope, $rootScope: $rootScope, $routeParams: $routeParams, $location: $location, SubnetsManager: SubnetsManager, ManagerHelperService: ManagerHelperService, ErrorService: ErrorService }); return controller; } // Make the controller and resolve the setActiveItem call. function makeControllerResolveSetActiveItem() { var setActiveDefer = $q.defer(); spyOn(SubnetsManager, "setActiveItem").and.returnValue( setActiveDefer.promise); var defer = $q.defer(); var controller = makeController(defer); $routeParams.subnet_id = subnet.id; defer.resolve(); $rootScope.$digest(); setActiveDefer.resolve(subnet); $rootScope.$digest(); return controller; } it("sets title and page on $rootScope", function() { var controller = makeController(); expect($rootScope.title).toBe("Loading..."); expect($rootScope.page).toBe("subnets"); }); it("calls loadManagers with SubnetsManager" + function() { var controller = makeController(); expect(ManagerHelperService.loadManagers).toHaveBeenCalledWith( [SubnetsManager]); }); it("raises error if subnet identifier is invalid", function() { spyOn(SubnetsManager, "setActiveItem").and.returnValue( $q.defer().promise); spyOn(ErrorService, "raiseError").and.returnValue( $q.defer().promise); var defer = $q.defer(); var controller = makeController(defer); $routeParams.subnet_id = 'xyzzy'; defer.resolve(); $rootScope.$digest(); expect($scope.subnet).toBe(null); expect($scope.loaded).toBe(false); expect(SubnetsManager.setActiveItem).not.toHaveBeenCalled(); expect(ErrorService.raiseError).toHaveBeenCalled(); }); it("doesn't call setActiveItem if subnet is loaded", function() { spyOn(SubnetsManager, "setActiveItem").and.returnValue( $q.defer().promise); var defer = $q.defer(); var controller = makeController(defer); SubnetsManager._activeItem = subnet; $routeParams.subnet_id = subnet.id; defer.resolve(); $rootScope.$digest(); expect($scope.subnet).toBe(subnet); expect($scope.loaded).toBe(true); expect(SubnetsManager.setActiveItem).not.toHaveBeenCalled(); }); it("calls setActiveItem if node is not active", function() { spyOn(SubnetsManager, "setActiveItem").and.returnValue( $q.defer().promise); var defer = $q.defer(); var controller = makeController(defer); $routeParams.subnet_id = subnet.id; defer.resolve(); $rootScope.$digest(); expect(SubnetsManager.setActiveItem).toHaveBeenCalledWith( subnet.id); }); it("sets subnet and loaded once setActiveItem resolves", function() { var controller = makeControllerResolveSetActiveItem(); expect($scope.subnet).toBe(subnet); expect($scope.loaded).toBe(true); }); it("title is updated once setActiveItem resolves", function() { var controller = makeControllerResolveSetActiveItem(); expect($rootScope.title).toBe(subnet.cidr + " (" + subnet.name + ")"); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/controllers/tests/test_subnets_list.js0000644000000000000000000003116313056115004030775 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for SubentsListController. */ describe("SubentsListController", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Grab the needed angular pieces. var $controller, $rootScope, $scope, $q, $routeParams; beforeEach(inject(function($injector) { $controller = $injector.get("$controller"); $rootScope = $injector.get("$rootScope"); $scope = $rootScope.$new(); $q = $injector.get("$q"); $routeParams = {}; })); // Load the NodesManager, DevicesManager, // NodesManager, RegionConnection, SearchService. var SubnetsManager, FabricsManager, SpacesManager, VLANsManager; var ManagerHelperService, RegionConnection; beforeEach(inject(function($injector) { SubnetsManager = $injector.get("SubnetsManager"); FabricsManager = $injector.get("FabricsManager"); SpacesManager = $injector.get("SpacesManager"); VLANsManager = $injector.get("VLANsManager"); ManagerHelperService = $injector.get("ManagerHelperService"); })); // Makes the NodesListController function makeController(loadManagersDefer, defaultConnectDefer) { var loadManagers = spyOn(ManagerHelperService, "loadManagers"); if(angular.isObject(loadManagersDefer)) { loadManagers.and.returnValue(loadManagersDefer.promise); } else { loadManagers.and.returnValue($q.defer().promise); } // Create the controller. var controller = $controller("SubnetsListController", { $scope: $scope, $rootScope: $rootScope, $routeParams: $routeParams, SubnetsManager: SubnetsManager, FabricsManager: FabricsManager, SpacesManager: SpacesManager, VLANsManager: VLANsManager, ManagerHelperService: ManagerHelperService }); return controller; } it("sets title and page on $rootScope", function() { var controller = makeController(); expect($rootScope.title).toBe("Fabrics"); expect($rootScope.page).toBe("subnets"); }); it("sets initial values on $scope", function() { // tab-independent variables. var controller = makeController(); expect($scope.subnets).toBe(SubnetsManager.getItems()); expect($scope.fabrics).toBe(FabricsManager.getItems()); expect($scope.spaces).toBe(SpacesManager.getItems()); expect($scope.vlans).toBe(VLANsManager.getItems()); expect($scope.loading).toBe(true); }); it("calls loadManagers with SubnetsManager, FabricsManager, " + "SpacesManager, VLANsManager", function() { var controller = makeController(); expect(ManagerHelperService.loadManagers).toHaveBeenCalledWith( [SubnetsManager, FabricsManager, SpacesManager, VLANsManager]); }); it("sets loading to false with loadManagers resolves", function() { var defer = $q.defer(); var controller = makeController(defer); defer.resolve(); $rootScope.$digest(); expect($scope.loading).toBe(false); }); describe("toggleTab", function() { it("sets $rootScope.title", function() { var controller = makeController(); $scope.toggleTab('spaces'); expect($rootScope.title).toBe($scope.tabs.spaces.pagetitle); $scope.toggleTab('fabrics'); expect($rootScope.title).toBe($scope.tabs.fabrics.pagetitle); }); it("sets currentpage", function() { var controller = makeController(); $scope.toggleTab('spaces'); expect($scope.currentpage).toBe('spaces'); $scope.toggleTab('fabrics'); expect($scope.currentpage).toBe('fabrics'); }); }); setupController = function(fabrics, spaces, vlans, subnets) { var defer = $q.defer(); var controller = makeController(defer); $scope.fabrics = fabrics; FabricsManager._items = fabrics; $scope.spaces = spaces; SpacesManager._items = spaces; $scope.vlans = vlans; VLANsManager._items = vlans; $scope.subnets = subnets; SubnetsManager._items = subnets; defer.resolve(); $rootScope.$digest(); return controller; }; testUpdates = function(controller, fabrics, spaces, vlans, subnets, expectedFabricsData, expectedSpacesData) { $scope.fabrics = fabrics; FabricsManager._items = fabrics; $scope.spaces = spaces; SpacesManager._items = spaces; $scope.vlans = vlans; VLANsManager._items = vlans; $scope.subnets = subnets; SubnetsManager._items = subnets; $rootScope.$digest(); expect($scope.tabs.fabrics.data).toEqual(expectedFabricsData); expect($scope.tabs.spaces.data).toEqual(expectedSpacesData); }; it("subnet_list initial update happens correctly", function() { var fabrics = [ { id: 0, name: "fabric 0" } ]; var spaces = [ { id: 0, name: "space 0" } ]; var vlans = [ { id: 1, name: "vlan4", vid: 4, fabric: 0 } ]; var subnets = [ { id:0, name:"subnet 0", vlan:1, space:0, cidr:"10.20.0.0/16" } ]; var expectedFabricsData = [ { fabric: { id: 0, name: 'fabric 0' }, rows: [ { vlan: { id: 1, name: 'vlan4', vid: 4, fabric: 0 }, space: { id: 0, name: 'space 0' }, subnet: { id: 0, name: 'subnet 0', vlan: 1, space: 0, cidr: '10.20.0.0/16'} } ] } ]; var expectedSpacesData = [ { space: { id: 0, name: 'space 0' }, rows: [ { fabric: { id: 0, name: 'fabric 0' }, vlan: { id: 1, name: 'vlan4', vid: 4, fabric: 0 }, subnet: { id: 0, name: 'subnet 0', vlan: 1, space: 0, cidr: '10.20.0.0/16' } } ] } ]; controller = setupController(fabrics, spaces, vlans, subnets); expect($scope.tabs.fabrics.data).toEqual(expectedFabricsData); expect($scope.tabs.spaces.data).toEqual(expectedSpacesData); }); it("adding fabric updates lists", function() { var fabrics = [ { id: 0, name: "fabric 0" } ]; var spaces = [ { id: 0, name: "space 0" } ]; var vlans = [ { id: 1, name: "vlan4", vid: 4, fabric: 0 } ]; var subnets = [ { id:0, name:"subnet 0", vlan:1, space:0, cidr:"10.20.0.0/16" } ]; var expectedFabricsData = [ { fabric: { id: 0, name: 'fabric 0' }, rows: [ { vlan: { id: 1, name: 'vlan4', vid: 4, fabric: 0 }, space: { id: 0, name: 'space 0' }, subnet: { id: 0, name: 'subnet 0', vlan: 1, space: 0, cidr: '10.20.0.0/16'} } ] }, { fabric: { id: 1, name: 'fabric 1' }, rows: [ ] } ]; var expectedSpacesData = [ { space: { id: 0, name: 'space 0' }, rows: [ { fabric: { id: 0, name: 'fabric 0' }, vlan: { id: 1, name: 'vlan4', vid: 4, fabric: 0 }, subnet: { id: 0, name: 'subnet 0', vlan: 1, space: 0, cidr: '10.20.0.0/16' } } ] } ]; controller = setupController(fabrics, spaces, vlans, subnets); fabrics.push({id: 1, name: "fabric 1"}); testUpdates(controller, fabrics, spaces, vlans, subnets, expectedFabricsData, expectedSpacesData); }); it("adding space updates lists", function() { var fabrics = [ { id: 0, name: "fabric 0" } ]; var spaces = [ { id: 0, name: "space 0" } ]; var vlans = [ { id: 1, name: "vlan4", vid: 4, fabric: 0 } ]; var subnets = [ { id:0, name:"subnet 0", vlan:1, space:0, cidr:"10.20.0.0/16" } ]; var expectedFabricsData = [ { fabric: { id: 0, name: 'fabric 0' }, rows: [ { vlan: { id: 1, name: 'vlan4', vid: 4, fabric: 0 }, space: { id: 0, name: 'space 0' }, subnet: { id: 0, name: 'subnet 0', vlan: 1, space: 0, cidr: '10.20.0.0/16'} } ] } ]; var expectedSpacesData = [ { space: { id: 0, name: 'space 0' }, rows: [ { fabric: { id: 0, name: 'fabric 0' }, vlan: { id: 1, name: 'vlan4', vid: 4, fabric: 0 }, subnet: { id: 0, name: 'subnet 0', vlan: 1, space: 0, cidr: '10.20.0.0/16' } } ] }, { space: { id: 1, name: 'space 1' }, rows: [ ]} ]; controller = setupController(fabrics, spaces, vlans, subnets); spaces.push({id: 1, name: "space 1"}); testUpdates(controller, fabrics, spaces, vlans, subnets, expectedFabricsData, expectedSpacesData); }); it("adding vlan updates lists", function() { var fabrics = [ { id: 0, name: "fabric 0" } ]; var spaces = [ { id: 0, name: "space 0" } ]; var vlans = [ { id: 1, name: "vlan4", vid: 4, fabric: 0 } ]; var subnets = [ { id:0, name:"subnet 0", vlan:1, space:0, cidr:"10.20.0.0/16" } ]; var expectedFabricsData = [ { fabric: { id: 0, name: 'fabric 0' }, rows: [ { vlan: { id: 1, name: 'vlan4', vid: 4, fabric: 0 }, space: { id: 0, name: 'space 0' }, subnet: { id: 0, name: 'subnet 0', vlan: 1, space: 0, cidr: '10.20.0.0/16'} }, { vlan: { id: 2, name: 'vlan2', vid: 2, fabric: 0 }, space: null, subnet: null } ] } ]; var expectedSpacesData = [ { space: { id: 0, name: 'space 0' }, rows: [ { fabric: { id: 0, name: 'fabric 0' }, vlan: { id: 1, name: 'vlan4', vid: 4, fabric: 0 }, subnet: { id: 0, name: 'subnet 0', vlan: 1, space: 0, cidr: '10.20.0.0/16' } } ] } ]; controller = setupController(fabrics, spaces, vlans, subnets); vlans.push({id: 2, name: "vlan2", vid: 2, fabric: 0}); testUpdates(controller, fabrics, spaces, vlans, subnets, expectedFabricsData, expectedSpacesData); }); it("adding subnet updates lists", function() { var fabrics = [ { id: 0, name: "fabric 0" } ]; var spaces = [ { id: 0, name: "space 0" } ]; var vlans = [ { id: 1, name: "vlan4", vid: 4, fabric: 0 } ]; var subnets = [ { id:0, name:"subnet 0", vlan:1, space:0, cidr:"10.20.0.0/16" } ]; var expectedFabricsData = [ { fabric: { id: 0, name: 'fabric 0' }, rows: [ { vlan: { id: 1, name: 'vlan4', vid: 4, fabric: 0 }, space: { id: 0, name: 'space 0' }, subnet: { id: 0, name: 'subnet 0', vlan: 1, space: 0, cidr: '10.20.0.0/16'} }, { vlan: { id: 1, name: 'vlan4', vid: 4, fabric: 0 }, space: { id: 0, name: 'space 0' }, subnet: { id: 1, name: 'subnet 1', vlan: 1, space: 0, cidr: '10.99.34.0/24'} } ] } ]; var expectedSpacesData = [ { space: { id: 0, name: 'space 0' }, rows: [ { fabric: { id: 0, name: 'fabric 0' }, vlan: { id: 1, name: 'vlan4', vid: 4, fabric: 0 }, subnet: { id: 0, name: 'subnet 0', vlan: 1, space: 0, cidr: '10.20.0.0/16' } }, { fabric: { id: 0, name: 'fabric 0' }, vlan: { id: 1, name: 'vlan4', vid: 4, fabric: 0 }, subnet: { id: 1, name: 'subnet 1', vlan: 1, space: 0, cidr: '10.99.34.0/24' } } ] } ]; controller = setupController(fabrics, spaces, vlans, subnets); subnets.push( {id: 1, name: "subnet 1", vlan: 1, space: 0, cidr: "10.99.34.0/24"} ); testUpdates(controller, fabrics, spaces, vlans, subnets, expectedFabricsData, expectedSpacesData); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/accordion.js0000644000000000000000000000326513056115004025614 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Accordion directive. * * Provides an accordion effect to an element with maas-accordion class and * all child elements with maas-accordion-tab. Only one accordion tab is open * at a time, selecting another accordion will set "active" on that * accordion tab. */ angular.module('MAAS').directive('maasAccordion', function() { return { restrict: "C", link: function(scope, element, attrs) { // Called when accordion tabs are clicked. Removes active on // all other tabs except to the tab that was clicked. var clickHandler = function(evt) { var tab = evt.data.tab; var tabs = evt.data.tabs; angular.forEach(tabs, function(innerTab) { angular.element(innerTab).removeClass("active"); }); angular.element(tab).addClass("active"); }; // Listen for the click event on all tabs in the accordion. var tabs = element.find(".maas-accordion-tab"); angular.forEach(tabs, function(tab) { tab = angular.element(tab); tab.on("click", { tab: tab, tabs: tabs }, clickHandler); }); // Remove the handlers when the scope is destroyed. scope.$on("$destroy", function() { angular.forEach(tabs, function(tab) { angular.element(tab).off("click", clickHandler); }); }); } }; }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/call_to_action.js0000644000000000000000000000660213056115004026623 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Call to action directive. */ angular.module('MAAS').run(['$templateCache', function ($templateCache) { // Inject the cta.html into the template cache. $templateCache.put('directive/templates/cta.html', [ '' ].join('')); }]); angular.module('MAAS').directive('maasCta', function() { return { restrict: "A", replace: true, require: "ngModel", scope: { maasCta: '=', ngModel: '=' }, templateUrl: 'directive/templates/cta.html', link : function(scope, element, attrs, ngModelCtrl) { // Use the link function to grab the ngModel controller. // Title of the button when not active. var defaultTitle = "Take action"; if(angular.isString(attrs.defaultTitle) && attrs.defaultTitle !== "") { defaultTitle = attrs.defaultTitle; } // When an item is selected in the list set the title, hide the // dropdown, and set the value to the given model. scope.selectItem = function(select) { scope.shown = false; ngModelCtrl.$setViewValue(select); }; // Return the title of the dropdown button. scope.getTitle = function() { if(angular.isObject(ngModelCtrl.$modelValue)) { scope.secondary = true; return ngModelCtrl.$modelValue.title; } else { scope.secondary = false; return defaultTitle; } }; // When the model changes in the above selectItem function this // function will be called causing the ngChange directive to be // fired. ngModelCtrl.$viewChangeListeners.push(function() { scope.$eval(attrs.ngChange); }); }, controller: function($scope, $rootScope, $element, $document) { // Default dropdown is hidden. $scope.shown = false; $scope.secondary = false; // Don't propagate the element click. This stops the click event // from firing on the body element. $element.bind('click', function (evt) { evt.stopPropagation(); }); // If a click makes it to the body element then hide the dropdown. $document.find('body').bind('click', function () { // Use $apply because this function will be called outside // of the digest cycle. $rootScope.$apply($scope.shown = false); }); } }; }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/code_lines.js0000644000000000000000000000307413056115004025755 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Accordion directive. * * Provides an accordion effect to an element with maas-accordion class and * all child elements with maas-accordion-tab. Only one accordion tab is open * at a time, selecting another accordion will set "active" on that * accordion tab. */ angular.module('MAAS').directive('maasCodeLines', function () { return { restrict: "A", scope: { maasCodeLines: '&' }, link: function(scope, element, attributes) { function insertContent() { // Empty the element contents and include again, this asures // its the most up-to-date content element.empty(); element.text(scope.maasCodeLines); // Count the line contents var lines = element.html().split('\n'), insert = ""; // Each line is to be wrapped by a span which is style & given // its appropriate line number $.each(lines, function() { insert += '' + this + '\n'; }); // Re-insert the contents element.html(insert); } // Watch the contents of the element so when it changes to // re-add the line numbers. scope.$watch(scope.maasCodeLines, insertContent); } }; }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/contenteditable.js0000644000000000000000000000426013056115004027013 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Content editable directive. * * HTML provides a feature that allows any element to be editable with * contenteditable attribute. This directive uses that attribute to link * the contents of that element to a model. This directive is not prefixed * with maas so Angular can identify all elements with this attribute. */ angular.module('MAAS').directive('contenteditable', function() { return { restrict: "A", require: "ngModel", scope: { ngDisabled: "&", maasEditing: "&" }, link: function(scope, element, attrs, ngModel) { // If the element is disabled then make the element lose focus. var focusHandler = function() { if(scope.ngDisabled()) { element.blur(); } else { // Didn't lose focus, so its now editing. scope.$apply(scope.maasEditing()); } }; element.bind("focus", focusHandler); // Update the value of the model when events occur that // can change the value of the model. var changeHandler = function() { scope.$apply(ngModel.$setViewValue(element.text())); }; element.bind("blur keyup change", changeHandler); // When the model changes set the html content for that element. ngModel.$render = function() { element.html(ngModel.$viewValue || ""); }; // When the model changes this function will be called causing the // ngChange directive to be fired. ngModel.$viewChangeListeners.push(function() { scope.$eval(attrs.ngChange); }); // Remove the event handler on the element when the scope is // destroyed. scope.$on("$destroy", function() { element.unbind("blur keyup change", changeHandler); element.unbind("focus", focusHandler); }); } }; }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/dbl_click_overlay.js0000644000000000000000000001226113056115004027316 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Double click overlay directive. * * Provides the ability for a disabled element to still accept the * double click event. By default if an element is disabled then it will * receive no click events. This overlays the element with another element * that will still receive click events. */ angular.module('MAAS').run(['$templateCache', function ($templateCache) { // Inject the style for the maas-dbl-overlay class. We inject the style // instead of placing it in maas-styles.css because it is required for // this directive to work at all. var styleElement = document.createElement('style'); styleElement.innerHTML = [ '.maas-dbl-overlay {', 'display: inline-block;', 'position: relative;', '}', '.maas-dbl-overlay--overlay {', 'position: absolute;', 'left: 0;', 'right: 0;', 'top: 0;', 'bottom: 0;', '-webkit-touch-callout: none;', '-webkit-user-select: none;', '-khtml-user-select: none;', '-moz-user-select: none;', '-ms-user-select: none;', 'user-select: none;', '}' ].join(''); document.body.appendChild(styleElement); // Inject the double_click_overlay.html into the template cache. $templateCache.put('directive/templates/double_click_overlay.html', [ '
    ', '', '
    ', '
    ' ].join('')); }]); angular.module('MAAS').directive('maasDblClickOverlay', ['BrowserService', function(BrowserService) { return { restrict: "A", transclude: true, replace: true, scope: { maasDblClickOverlay: '&' }, templateUrl: 'directive/templates/double_click_overlay.html', link: function(scope, element, attrs) { // Create the click function that will be called when the // overlay is clicked. This changes based on the element that // is transcluded into this directive. var overlay = element.find(".maas-dbl-overlay--overlay"); var transclude = element.find( "span[ng-transclude]").children()[0]; var clickElement; if(transclude.tagName === "SELECT") { clickElement = function() { // Have to create a custom mousedown event for the // select click to be handled. Using 'click()' or //'focus()' will not work. var evt = document.createEvent('MouseEvents'); evt.initMouseEvent( 'mousedown', true, true, window, 0, 0, 0, 0, 0, false, false, false, false, 0, null); transclude.dispatchEvent(evt); }; // Selects use a pointer for the cursor. overlay.css({ cursor: "pointer" }); } else if(transclude.tagName === "INPUT") { clickElement = function() { // An input will become in focus when clicked. angular.element(transclude).focus(); }; // Inputs use a text for the cursor. overlay.css({ cursor: "text" }); } else { clickElement = function() { // Standard element just call click on that element. angular.element(transclude).click(); }; // Don't set cursor on other element types. } // Add the click and double click handlers. var overlayClick = function(evt) { clickElement(); evt.preventDefault(); evt.stopPropagation(); }; var overlayDblClick = function(evt) { // Call the double click handler with in the scope. scope.$apply(scope.maasDblClickOverlay); evt.preventDefault(); evt.stopPropagation(); }; // Enable the handlers if not Firefox. It firefox, then hide // the overlay as Firefox does not allow sending click events // to select elements. if(BrowserService.browser !== "firefox") { overlay.on("click", overlayClick); overlay.on("dblclick", overlayDblClick); } else { overlay.addClass("ng-hide"); } // Remove the handlers when the scope is destroyed. scope.$on("$destroy", function() { overlay.off("click", overlayClick); overlay.off("dblclick", overlayDblClick); }); } }; }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/enter_blur.js0000644000000000000000000000117613056115004026013 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Enter blur directive. * * When the enter key is pressed make the element lose focus (aka. blur event). */ angular.module('MAAS').directive('maasEnterBlur', function() { return { restrict: "A", link: function(scope, element, attrs) { element.bind("keydown keypress", function(evt) { if(evt.which === 13) { element.blur(); evt.preventDefault(); } }); } }; }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/error_overlay.js0000644000000000000000000001602713056115004026545 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Error overlay. * * Directive overrides the entire transcluded element if an error occurs or * connection to the region over the websocket fails or becomes disconnected. */ angular.module('MAAS').run(['$templateCache', function ($templateCache) { // Inject the error_overlay.html into the template cache. $templateCache.put('directive/templates/error_overlay.html', [ '', '
    ', '
    ', '
    ' ].join('')); // Preload the svg and png error icon. Its possible that it has never been // loaded by the browser and if the region connection goes down and the // directive gets shown with an error the icon will be missing. // // Note: This is skipped if unit testing because it will throw 404 errors // continuously. if(!angular.isDefined(window.jasmine)) { var image = new Image(); image.src = "static/img/icons/error.svg"; image = new Image(); image.src = "static/img/icons/error.png"; } }]); angular.module('MAAS').directive('maasErrorOverlay', [ '$window', '$timeout', 'RegionConnection', 'ErrorService', function($window, $timeout, RegionConnection, ErrorService) { return { restrict: "A", transclude: true, scope: true, templateUrl: 'directive/templates/error_overlay.html', link: function(scope, element, attrs) { scope.connected = false; scope.showDisconnected = false; scope.clientError = false; scope.wasConnected = false; // Holds the promise that sets showDisconnected to true. Will // be cleared when the scope is destroyed. var markDisconnected; // Returns true when the overlay should be shown. scope.show = function() { // Always show if clientError. if(scope.clientError) { return true; } // Never show if connected. if(scope.connected) { return false; } // Never been connected then always show. if(!scope.wasConnected) { return true; } // Not connected. return scope.showDisconnected; }; // Returns the title for the header. scope.getTitle = function() { if(scope.clientError) { return "Error occurred"; } else if(scope.wasConnected) { return "Connection lost, reconnecting..."; } else { return "Connecting..."; } }; // Called to reload the page. scope.reload = function() { $window.location.reload(); }; // Called to when the connection status of the region // changes. Updates the scope connected and error values. var watchConnection = function() { // Do nothing if already a client error. if(scope.clientError) { return; } // Set connected and the time it changed. var connected = RegionConnection.isConnected(); if(connected !== scope.connected) { scope.connected = connected; if(!connected) { scope.showDisconnected = false; // Show disconnected after 1/2 second. This removes // the flicker that can occur, if it disconnecets // and reconnected quickly. markDisconnected = $timeout(function() { scope.showDisconnected = true; markDisconnected = undefined; }, 500); } } // Set error and whether of not the connection // has ever been made. scope.error = RegionConnection.error; if(!scope.wasConnected && connected) { scope.wasConnected = true; } }; // Watch the isConnected and error value on the // RegionConnection. scope.$watch(function() { return RegionConnection.isConnected(); }, watchConnection); scope.$watch(function() { return RegionConnection.error; }, watchConnection); // Called then the error value on the ErrorService changes. var watchError = function() { var error = ErrorService._error; if(angular.isString(error)) { scope.clientError = true; scope.error = ErrorService._error; } }; // Watch _error on the ErrorService. scope.$watch(function() { return ErrorService._error; }, watchError); // Cancel the timeout on scope destroy. scope.$on("$destroy", function() { if(angular.isDefined(markDisconnected)) { $timeout.cancel(markDisconnected); } }); } }; }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/error_toggle.js0000644000000000000000000000530513056115004026342 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Error toggle. * * Hides the element if an error occurs or no connection to the region * is present. */ angular.module('MAAS').directive('maasErrorToggle', [ '$timeout', 'RegionConnection', 'ErrorService', function($timeout, RegionConnection, ErrorService) { return { restrict: "A", link: function(scope, element, attrs) { // Holds timeout promise for setting ng-hide when // connection is lost. var disconnectedPromise; // Cancel the disconnected timeout. var cancelTimeout = function() { if(angular.isDefined(disconnectedPromise)) { $timeout.cancel(disconnectedPromise); disconnectedPromise = undefined; } }; // Called to when the connection status of the region // changes or the error on the ErrorService is set. // The element is shown when connected and no errors. var watchConnectionAndError = function() { var connected = RegionConnection.isConnected(); var error = ErrorService._error; if(connected && !angular.isString(error)) { cancelTimeout(); element.removeClass("ng-hide"); } else if(angular.isString(error)) { cancelTimeout(); element.addClass("ng-hide"); } else if(!connected) { // Hide the element after 1/2 second. This stops // flickering when the connection goes down and // reconnects quickly. cancelTimeout(); disconnectedPromise = $timeout(function() { element.addClass("ng-hide"); }, 500); } }; // Watch the RegionConnection.isConnected() and // ErrorService._error value. scope.$watch(function() { return RegionConnection.isConnected(); }, watchConnectionAndError); scope.$watch(function() { return ErrorService._error; }, watchConnectionAndError); // Cancel disconnect timeout on destroy. scope.$on("$destroy", function() { cancelTimeout(); }); } }; }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/os_select.js0000644000000000000000000001662013056115004025632 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * OS/Release select directive. */ angular.module('MAAS').run(['$templateCache', function ($templateCache) { // Inject the os-select.html into the template cache. $templateCache.put('directive/templates/os-select.html', [ '', '', '' ].join('')); }]); angular.module('MAAS').directive('maasOsSelect', function() { return { restrict: "A", require: "ngModel", scope: { maasOsSelect: '=', ngModel: '=' }, templateUrl: 'directive/templates/os-select.html', controller: function($scope) { // Return only the selectable releases based on the selected os. function getSelectableReleases() { if(angular.isObject($scope.maasOsSelect) && angular.isArray($scope.maasOsSelect.releases)) { var i, allChoices = $scope.maasOsSelect.releases; var choice, choices = []; for(i = 0; i < allChoices.length; i++) { choice = allChoices[i]; if(choice[0].indexOf($scope.ngModel.osystem) > -1) { choices.push(choice); } } return choices; } return []; } // Return only the selectable kernels based on the selected os. function getSelectableKernels() { if(angular.isObject($scope.maasOsSelect) && angular.isObject($scope.maasOsSelect.kernels) && angular.isString($scope.ngModel.osystem) && angular.isString($scope.ngModel.release)) { var os = $scope.ngModel.osystem; var release = $scope.ngModel.release.split('/')[1]; var osKernels = $scope.maasOsSelect.kernels[os]; if(angular.isObject(osKernels)) { return osKernels[release]; } } return []; } // Returns the defaultValue if its in the choices array. Otherwise // it returns the weighted choice if present, followed by the // first choice. function getDefaultOrFirst(array, defaultValue, weightValue) { var i, first, weightedPresent = false; for(i = 0; i < array.length; i++) { if(angular.isUndefined(first)) { first = array[i][0]; } if(array[i][0] === defaultValue) { return defaultValue; } if(angular.isString(weightValue) && array[i][0] === weightValue) { weightedPresent = true; } } if(weightedPresent) { return weightValue; } if(angular.isUndefined(first)) { return null; } return first; } // Sets the default selected values for the ngModel. Only sets the // values once the maasOsSelect is populated. Sets the selected // osystem to default_osystem if present, followed by 'ubuntu' if // present, followed by the first available. Sets the selected // release to the default_release if present, followed by the first // available. function setDefault() { // Do nothing if model is already set. if(angular.isString($scope.ngModel.osystem) && angular.isString($scope.ngModel.release)) { return; } // Do nothing if the default is not set. if(angular.isUndefined($scope.maasOsSelect.default_osystem) || angular.isUndefined($scope.maasOsSelect.default_release)) { return; } // Set the intial defaults. $scope.ngModel.osystem = getDefaultOrFirst( $scope.maasOsSelect.osystems, $scope.maasOsSelect.default_osystem, "ubuntu"); $scope.releases = getSelectableReleases(); $scope.ngModel.release = getDefaultOrFirst( $scope.releases, $scope.ngModel.osystem + "/" + $scope.maasOsSelect.default_release); $scope.ngModel.kernel = ""; } // Defaults if(!angular.isObject($scope.ngModel)) { $scope.ngModel = { osystem: null, release: null, hwe_kernel: null }; } $scope.releases = getSelectableReleases(); $scope.hwe_kernels = getSelectableKernels(); // Add the reset function to ngModel, allowing users to call // this function to reset the defauls. $scope.ngModel.$reset = function() { $scope.ngModel.osystem = null; $scope.ngModel.release = null; $scope.ngModel.hwe_kernel = null; setDefault(); }; // If the available os change update the available releases and // set the default. $scope.$watch("maasOsSelect.releases", function() { $scope.releases = getSelectableReleases(); setDefault(); }); // If the available release change update the available kernels and // set the default. $scope.$watch("maasOsSelect.kernels", function() { $scope.hwe_kernels = getSelectableKernels(); setDefault(); }); // Updates the default and selectable releases. $scope.selectedOSChanged = function() { $scope.releases = getSelectableReleases(); $scope.hwe_kernels = getSelectableKernels(); $scope.ngModel.release = null; $scope.ngModel.hwe_kernel = null; if($scope.releases.length > 0) { $scope.ngModel.release = $scope.releases[0][0]; } }; // Updates the default and selectable kernels. $scope.selectedReleaseChanged = function() { $scope.hwe_kernels = getSelectableKernels(); $scope.ngModel.hwe_kernel = null; }; } }; }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/placeholder.js0000644000000000000000000000113413056115004026126 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Placeholder directive. * * Allows the placeholder attribute on an element to be dynamic. */ angular.module('MAAS').directive('ngPlaceholder', function() { return { restrict: "A", scope: { ngPlaceholder: "=" }, link: function(scope, element, attrs) { scope.$watch('ngPlaceholder', function() { element[0].placeholder = scope.ngPlaceholder; }); } }; }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/power_parameters.js0000644000000000000000000001260513056115004027230 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Power parameters directive. */ angular.module('MAAS').run(['$templateCache', function ($templateCache) { // Inject the power-parameters.html into the template cache. $templateCache.put('directive/templates/power-parameters.html', [ '
    ', '', '
    ', '', '
    ', '
    ', '
    ', '', '
    ', '', '
    ', '
    ' ].join('')); }]); angular.module('MAAS').directive('maasPowerInput', ['$compile', function($compile) { return { restrict: "E", require: "ngModel", scope: { field: '=', ngModel: '=' }, link: function(scope, element, attrs) { var type = scope.field.field_type; var req = scope.field.required ? 'required="required" ' : ''; var html = ""; if(type === "string" || type === "mac_address" || type === "password") { // Build an input element with the correct attributes. var input_type = 'type="text"'; if(type === "password") { // If the input field is a password field, display it // as text or password depending on if we're editing // the fields. input_type = "data-ng-type=\"ngModel.editing && " + "'text' || 'password'\""; } html = ''; html += ''; // Set the default choice on the model. if(angular.isUndefined(scope.ngModel)) { var i; for(i = 0; i < scope.field.choices.length; i++) { var choice = scope.field.choices[i]; if(scope.field["default"] === choice[0]) { scope.ngModel = choice; break; } } } } else { throw new Error("Unknown power_type: "+ type); } // Replace the element with the compiled html using the parents // scope. The parent scope is used because we want to build the // element as if it was in the parent scope, not the scope that // is defined in this directive. element.replaceWith($compile(html)(scope.$parent)); } }; }]); angular.module('MAAS').directive('maasPowerParameters', function() { return { restrict: "A", require: "ngModel", scope: { maasPowerParameters: '=', ngModel: '=', ngDisabled: '=' }, templateUrl: 'directive/templates/power-parameters.html' }; }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/sticky_header.js0000644000000000000000000001123613056115004026466 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Sticky header directive. * * Keeps the height of the header in sync with the padding-top css value on * maas-wrapper element. */ angular.module('MAAS').directive('maasStickyHeader', function() { return { restrict: "A", link: function(scope, element, attrs) { // Amount extra to add to the bottom position of the header. This // gives the correct spacing between the page content and the // header. var EXTRA_OFFSET = 20; // Current height of the header. var headerHeight = -1; // Wrapper element. Grab the element from the root element, if that // fails search for the element as a parent of this directives. var wrapperElement = angular.element(".maas-wrapper"); if(wrapperElement.length === 0) { wrapperElement = element.parent(".maas-wrapper"); } if(wrapperElement.length === 0) { throw new Error("Unable to find the maas-wrapper element."); } // Holds the number of updateBodyPadding calls that are have // been perfromed. The height of the element is polled every 10ms // for a total of 1 second after checkHeaderHeight has noticed that // the height of the element has changed. This is done to smooth // the transition as a css animiation is applied to the height // of the header. var updateCount = 0; // Updates the padding top for the main body, if the height is // different. Function uses setTimeout instead of $timeout in // angular because it does not require a digest cycle to run // after this function completes. Doing so would actually be // a performance hit. Timeout of 10ms was choosen because it // provides a smooth animation as the height of the header is // animated. var nextUpdate, updateBodyPadding; updateBodyPadding = function() { // Stop polling once the updateCount is more than 100, // because then a total of 1 second has passed. if(updateCount >= 100) { updateCount = 0; nextUpdate = undefined; return; } updateCount++; // Don't update the padding-top of the main body unless the // height has actually changed. var currentHeight = element.height(); if(headerHeight === currentHeight) { nextUpdate = setTimeout(updateBodyPadding, 10); return; } // Update the padding-top on the main body. headerHeight = currentHeight; var bottomOfHeader = element.offset().top + headerHeight; var paddingTop = bottomOfHeader + EXTRA_OFFSET; wrapperElement.css("padding-top", paddingTop + "px"); nextUpdate = setTimeout(updateBodyPadding, 10); }; // Called every 100ms to check if the height of the element has // changed. When the element height has changed the polling of // updateBodyPadding will occur for 1 second. var nextCheck, checkHeaderHeight; checkHeaderHeight = function() { // See if height has changed. If not then do nothing and // check in 200ms. var currentHeight = element.height(); if(headerHeight === currentHeight) { nextCheck = setTimeout(checkHeaderHeight, 100); return; } // Header height has changed so start the polling of // the updateBodyPadding function. updateCount = 0; if(angular.isDefined(nextUpdate)) { clearTimeout(nextUpdate); } updateBodyPadding(); nextCheck = setTimeout(checkHeaderHeight, 100); }; checkHeaderHeight(); // Clear the timeouts and remove the padding-top on the wrapper // element when the scope is destroyed. scope.$on("$destroy", function() { clearTimeout(nextCheck); if(angular.isDefined(nextUpdate)) { clearTimeout(nextUpdate); } wrapperElement.css("padding-top", ""); }); } }; }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/tests/0000755000000000000000000000000013056115004024451 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/type.js0000644000000000000000000000263213056115004024631 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Type directive. */ angular.module('MAAS').directive('ngType', function() { return { restrict: "A", scope: { ngType: "=" }, link: function(scope, element, attrs) { scope.$watch('ngType', function() { valid_types = [ 'button', 'checkbox', 'color', 'date ', 'datetime ', 'datetime-local ', 'email ', 'file', 'hidden', 'image', 'month ', 'number ', 'password', 'radio', 'range ', 'reset', 'search', 'submit', 'tel', 'text', 'time ', 'url', 'week' ]; if(valid_types.indexOf(scope.ngType) !== -1) { element[0].type = scope.ngType; } else { throw new Error("Invalid input type: " + scope.ngType); } }); } }; });maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/version_reloader.js0000644000000000000000000000260413056115004027211 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Version reloader. * * Watches the version reported by the GeneralManager if it changes then * the entire page is reloaded by-passing the local browser cache. */ angular.module('MAAS').directive('maasVersionReloader', [ '$window', 'GeneralManager', 'ManagerHelperService', function($window, GeneralManager, ManagerHelperService) { return { restrict: "A", controller: function($scope) { $scope.version = GeneralManager.getData("version"); // Reload the page by-passing the browser cache. $scope.reloadPage = function() { // Force cache reload by passing true. $window.location.reload(true); }; ManagerHelperService.loadManager(GeneralManager).then( function() { GeneralManager.enableAutoReload(true); $scope.$watch("version.text", function(newValue, oldValue) { if(newValue !== oldValue) { $scope.reloadPage(); } }); }); } }; }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/tests/test_accordion.js0000644000000000000000000000511613056115004030012 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for accodion directive. */ describe("maasAccodion", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Create a new scope before each test. Not used in this test, but // required to compile the directive. var $scope; beforeEach(inject(function($rootScope) { $scope = $rootScope.$new(); })); // Return the compiled directive with the items from the scope. function compileDirective() { var directive; var html = [ '
    ', '
    ', '

    One

    ', '

    Two

    ', '

    Three

    ', '
    ', '
    ' ].join(''); // Compile the directive. inject(function($compile) { directive = $compile(html)($scope); }); // Perform the digest cycle to finish the compile. $scope.$digest(); return directive.find(".maas-accordion"); } // Compile the directive and get the tabs. var directive, tabs; beforeEach(function() { directive = compileDirective(); tabs = directive.find('.maas-accordion-tab'); }); it("sets a new active removing other actives", function() { angular.element(tabs[1]).click(); expect(angular.element(tabs[0]).hasClass("active")).toBe(false); expect(angular.element(tabs[1]).hasClass("active")).toBe(true); expect(angular.element(tabs[2]).hasClass("active")).toBe(false); angular.element(tabs[2]).click(); expect(angular.element(tabs[0]).hasClass("active")).toBe(false); expect(angular.element(tabs[1]).hasClass("active")).toBe(false); expect(angular.element(tabs[2]).hasClass("active")).toBe(true); }); it("leaves current active if clicked", function() { angular.element(tabs[0]).click(); expect(angular.element(tabs[0]).hasClass("active")).toBe(true); expect(angular.element(tabs[1]).hasClass("active")).toBe(false); expect(angular.element(tabs[2]).hasClass("active")).toBe(false); }); it("removes all click handlers on $destroy", function() { directive.scope().$destroy(); angular.forEach(tabs, function(tab) { expect($._data(angular.element(tab)[0], 'events')).toBeUndefined(); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/tests/test_call_to_action.js0000644000000000000000000001440013056115004031017 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for Call-To-Action dropdown directive. */ describe("maasCta", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Make items for the dropdown. function makeItems() { var i; var items = []; for(i = 0; i < 5; i++) { items.push({ title: makeName("option") }); } return items; } // Create a new scope before each test. var $scope; beforeEach(inject(function($rootScope) { $scope = $rootScope.$new(); $scope.items = makeItems(); $scope.active = null; })); // Return the compiled directive with the items from the scope. function compileDirective( maas_cta, ng_model, ng_change, ng_click, title) { if(!title) { title = ""; } var directive; var html = '
    '; // Compile the directive. inject(function($compile) { directive = $compile(html)($scope); }); // Perform the digest cycle to finish the compile. $scope.$digest(); return directive.find("div"); } it("default shown is false", function() { var directive = compileDirective("items", "active"); expect(directive.isolateScope().shown).toBe(false); }); it("default secondary is false", function() { var directive = compileDirective("items", "active"); expect(directive.isolateScope().secondary).toBe(false); }); it("sets default title to 'Take action'", function() { var directive = compileDirective("items", "active"); expect(directive.find("a.cta-group__link").text()).toBe("Take action"); }); it("sets default title to another name", function() { var name = makeName("title"); var directive = compileDirective("items", "active", null, null, name); expect(directive.find("a.cta-group__link").text()).toBe(name); }); it("click link sets shown to true", function() { var directive = compileDirective("items", "active"); directive.find("a.cta-group__link").click(); expect(directive.isolateScope().shown).toBe(true); }); it("dropdown hidden when shown is false", function() { var directive = compileDirective("items", "active"); var dropdown = directive.find("ul.cta-group__dropdown"); expect(dropdown.hasClass("ng-hide")).toBe(true); }); it("dropdown shown when shown is true", function() { var directive = compileDirective("items", "active"); directive.isolateScope().shown = true; $scope.$digest(); var dropdown = directive.find("ul.cta-group__dropdown"); expect(dropdown.hasClass("ng-hide")).toBe(false); }); it("dropdown secondary when secondary is true", function() { var directive = compileDirective("items", "active"); directive.isolateScope().secondary = true; $scope.$digest(); expect(directive.hasClass("secondary")).toBe(false); }); it("dropdown list options", function() { var directive = compileDirective("items", "active"); var links = directive.find("li.cta-group__item > a"); var listItems = []; angular.forEach(links, function(ele, i) { listItems.push(angular.element(ele).text()); }); var expectTitles = []; angular.forEach($scope.items, function(item) { expectTitles.push(item.title); }); expect(expectTitles).toEqual(listItems); }); it("dropdown select sets shown to false", function() { var directive = compileDirective("items", "active"); var links = directive.find("li.cta-group__item > a"); // Open the dropdown. directive.find("a.cta-group__link").click(); expect(directive.isolateScope().shown).toBe(true); // Clicking a link should close the dropdown. angular.element(links[0]).click(); expect(directive.isolateScope().shown).toBe(false); }); it("dropdown select sets model", function() { var directive = compileDirective("items", "active"); var links = directive.find("li.cta-group__item > a"); angular.element(links[0]).click(); expect(directive.scope().active).toBe($scope.items[0]); }); it("dropdown select sets title", function() { var directive = compileDirective("items", "active"); var links = directive.find("li.cta-group__item > a"); angular.element(links[0]).click(); var title = directive.find("a.cta-group__link").text(); expect(title).toBe($scope.items[0].title); }); it("dropdown select sets secondary", function() { var directive = compileDirective("items", "active"); var links = directive.find("li.cta-group__item > a"); angular.element(links[0]).click(); expect(directive.isolateScope().secondary).toBe(true); }); it("clicking body will set shown to false", function() { var directive = compileDirective("items", "active"); var links = directive.find("li.cta-group__item > a"); // Open the dropdown. directive.find("a.cta-group__link").click(); expect(directive.isolateScope().shown).toBe(true); // Click the body. var $document; inject(function($injector) { $document = $injector.get("$document"); }); angular.element($document.find('body')).click(); expect(directive.isolateScope().shown).toBe(false); }); it("clicking button will fire ng-click", function() { $scope.clicked = jasmine.createSpy("clicked"); var directive = compileDirective("items", "active", null, "clicked()"); var links = directive.find("li.cta-group__item > a"); // Open the dropdown. directive.find("a.cta-group__link").click(); expect($scope.clicked).toHaveBeenCalled(); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/tests/test_code_lines.js0000644000000000000000000000236113056115004030154 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for placeholder directive. */ describe("maasCodeLines", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Create a new scope before each test. var $scope; beforeEach(inject(function($rootScope) { $scope = $rootScope.$new(); })); // Return the compiled directive with the items from the scope. function compileDirective(maasCodeLines) { var directive; var html = [ '
    ', '', '
    ' ].join(''); // Compile the directive. inject(function($compile) { directive = $compile(html)($scope); }); // Perform the digest cycle to finish the compile. $scope.$digest(); return directive.find('code'); } it("spans should have the class line", function() { $scope.getText = function() { return "codetext"; }; var directive = compileDirective("getText()"); expect(directive.find('span').hasClass("line")).toBe(true); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/tests/test_contenteditable.js0000644000000000000000000000600313056115004031211 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for contenteditable. */ describe("contenteditable", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Create a new scope before each test. var $scope; beforeEach(inject(function($rootScope) { $scope = $rootScope.$new(); })); // Return the compiled directive with the osinfo from the scope. function compileDirective(ngModel, ngDisabled, maasEditing) { var directive; var html = '
    '; // Compile the directive. inject(function($compile) { directive = $compile(html)($scope); }); // Perform the digest cycle to finish the compile. $scope.$digest(); return directive.find("span"); } it("sets the content of span to the value of model", function() { var name = makeName("name"); $scope.name = name; var directive = compileDirective("name"); expect(directive.text()).toBe(name); }); it("change event on the span will change the value", function() { var name = makeName("name"); $scope.name = makeName("name"); var directive = compileDirective("name"); directive.text(name); directive.change(); $scope.$digest(); expect($scope.name).toBe(name); }); it("blur event on the span will change the value", function() { var name = makeName("name"); $scope.name = makeName("name"); var directive = compileDirective("name"); directive.text(name); directive.blur(); $scope.$digest(); expect($scope.name).toBe(name); }); it("keyup event on the span will change the value", function() { var name = makeName("name"); $scope.name = makeName("name"); var directive = compileDirective("name"); directive.text(name); directive.keyup(); $scope.$digest(); expect($scope.name).toBe(name); }); it("cannot gain focus if disabled", function() { $scope.name = makeName("name"); $scope.disabled = function() { return true; }; var directive = compileDirective("name", "disabled()"); directive.focus(); $scope.$digest(); expect(directive.is(":focus")).toBe(false); }); it("calls maasEditing on focus if enabled", function() { $scope.name = makeName("name"); $scope.disabled = function() { return false; }; $scope.nowEditing = jasmine.createSpy("nowEditing"); var directive = compileDirective("name", "disabled()", "nowEditing()"); directive.focus(); $scope.$digest(); expect($scope.nowEditing).toHaveBeenCalled(); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/tests/test_dbl_click_overlay.js0000644000000000000000000001235713056115004031525 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for the double click overlay. */ describe("maasDblClickOverlay", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Get the BrowserService before each test. var BrowserService; beforeEach(inject(function($injector) { BrowserService = $injector.get("BrowserService"); })); // Create a new scope before each test. var $scope; beforeEach(inject(function($rootScope) { $scope = $rootScope.$new(); })); // Return the compiled directive with the osinfo from the scope. function compileDirective(type, dblClickHandler) { var directive; var html = '
    '; if(type === "select") { html += ''; } else if(type === "input") { html += ''; } else if(type === "div") { html += '
    '; } else { throw new Error("Unknown type: " + type); } html += "
    "; // Compile the directive. inject(function($compile) { directive = $compile(html)($scope); }); // Perform the digest cycle to finish the compile. $scope.$digest(); return directive.find("div[data-maas-dbl-click-overlay]"); } it("creates directive with class maas-dbl-overlay", function() { var directive = compileDirective("select", ""); expect(directive.hasClass("maas-dbl-overlay")).toBe(true); }); it("creates directive with overlay element", function() { var directive = compileDirective("select", ""); var overlay = directive.find('div.maas-dbl-overlay--overlay'); expect(overlay.length).toBe(1); }); it("sets overlay cursor to pointer for select element", function() { var directive = compileDirective("select", ""); var overlay = directive.find('div.maas-dbl-overlay--overlay'); expect(overlay.css("cursor")).toBe("pointer"); }); it("sets overlay cursor to text for input element", function() { var directive = compileDirective("input", ""); var overlay = directive.find('div.maas-dbl-overlay--overlay'); expect(overlay.css("cursor")).toBe("text"); }); it("doesnt sets overlay cursor for div element", function() { var directive = compileDirective("div", ""); var overlay = directive.find('div.maas-dbl-overlay--overlay'); expect(overlay.css("cursor")).toBe(""); }); it("triggers mousedown on select when overlay clicked", function(done) { var directive = compileDirective("select", ""); var select = directive.find("select#test-element"); select.mousedown(function() { // Test will timeout if this handler is not called. done(); }); var overlay = directive.find('div.maas-dbl-overlay--overlay'); overlay.click(); }); it("sets focus on input when overlay clicked", function(done) { var directive = compileDirective("input", ""); var input = directive.find("input#test-element"); input.focus(function() { // Test will timeout if this handler is not called. done(); }); var overlay = directive.find('div.maas-dbl-overlay--overlay'); overlay.click(); }); it("triggers click on div when overlay clicked", function(done) { var directive = compileDirective("div", ""); var div = directive.find("div#test-element"); div.click(function() { // Test will timeout if this handler is not called. done(); }); var overlay = directive.find('div.maas-dbl-overlay--overlay'); overlay.click(); }); it("calls double click handler when the overlay is double clicked", function(done) { $scope.doubleClick = function() { // Test will timeout if this handler is not called. done(); }; var directive = compileDirective("div", "doubleClick()"); var overlay = directive.find('div.maas-dbl-overlay--overlay'); overlay.dblclick(); }); it("removes all click handlers on $destroy", function() { var directive = compileDirective("div", ""); var overlay = directive.find('div.maas-dbl-overlay--overlay'); directive.scope().$destroy(); expect($._data(angular.element(overlay)[0], 'events')).toBeUndefined(); }); it("hides overlay if on firefox", function() { BrowserService.browser = "firefox"; var directive = compileDirective("div", ""); var overlay = directive.find('div.maas-dbl-overlay--overlay'); expect(overlay.hasClass("ng-hide")).toBe(true); }); it("doesnt hide overlay if on firefox", function() { BrowserService.browser = "chrome"; var directive = compileDirective("div", ""); var overlay = directive.find('div.maas-dbl-overlay--overlay'); expect(overlay.hasClass("ng-hide")).toBe(false); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/tests/test_enter_blur.js0000644000000000000000000000365513056115004030220 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for enter blur directive. */ describe("maasEnterBlur", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Create a new scope before each test. Not used in this test, but // required to compile the directive. var $scope; beforeEach(inject(function($rootScope) { $scope = $rootScope.$new(); })); // Return the compiled directive with the items from the scope. function compileDirective() { var directive; var html = [ '
    ', '', '
    ' ].join(''); // Compile the directive. inject(function($compile) { directive = $compile(html)($scope); }); // Perform the digest cycle to finish the compile. $scope.$digest(); // Attach to document so it can grab focus. directive.appendTo(document.body); return directive.find("input"); } // Compile the directive. var directive; beforeEach(function() { directive = compileDirective(); }); it("removes focus on enter keydown", function() { directive.focus(); expect(document.activeElement).toBe(directive[0]); // Send enter. var evt = angular.element.Event("keydown"); evt.which = 13; directive.trigger(evt); expect(document.activeElement).not.toBe(directive[0]); }); it("removes focus on enter keypress", function() { directive.focus(); expect(document.activeElement).toBe(directive[0]); // Send enter. var evt = angular.element.Event("keypress"); evt.which = 13; directive.trigger(evt); expect(document.activeElement).not.toBe(directive[0]); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/tests/test_error_overlay.js0000644000000000000000000001316413056115004030745 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for error overlay. */ describe("maasErrorOverlay", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Get required angular pieces and create a new scope before each test. var $scope, $window, $timeout; beforeEach(inject(function($rootScope, $injector) { $scope = $rootScope.$new(); $window = $injector.get("$window"); $timeout = $injector.get("$timeout"); })); // Load the RegionConnection and ErrorService. var RegionConnection; beforeEach(inject(function($injector) { RegionConnection = $injector.get("RegionConnection"); ErrorService = $injector.get("ErrorService"); })); // Return the compiled directive. function compileDirective() { var directive; var html = '
    ' + '
    ' + '
    '; // Compile the directive. inject(function($compile) { directive = $compile(html)($scope); }); // Perform the digest cycle to finish the compile. $scope.$digest(); return directive.find("span"); } it("sets connected to value of isConnected", function() { spyOn(RegionConnection, "isConnected").and.returnValue(true); var directive = compileDirective(); expect(directive.scope().connected).toBe(true); }); it("sets wasConnected to true once connected", function() { spyOn(RegionConnection, "isConnected").and.returnValue(true); var directive = compileDirective(); expect(directive.scope().wasConnected).toBe(true); }); it("keeps wasConnected to true if becomes disconnected", function() { var spy = spyOn(RegionConnection, "isConnected"); spy.and.returnValue(true); var directive = compileDirective(); spy.and.returnValue(false); $scope.$digest(); expect(directive.scope().wasConnected).toBe(true); }); it("keeps clientError to true if error in ErrorService", function() { ErrorService._error = makeName("error"); var directive = compileDirective(); expect(directive.scope().clientError).toBe(true); }); it("sets error to error in ErrorService", function() { var error = makeName("error"); ErrorService._error = error; var directive = compileDirective(); expect(directive.scope().error).toBe(error); }); it("sets error to error in RegionConnection", function() { var error = makeName("error"); RegionConnection.error = error; var directive = compileDirective(); expect(directive.scope().error).toBe(error); }); it("doesnt sets error to error in RegionConnection if already error in " + "ErrorService", function() { var error = makeName("error"); ErrorService._error = error; RegionConnection.error = makeName("error"); var directive = compileDirective(); expect(directive.scope().error).toBe(error); }); describe("show", function() { it("returns true if not connected", function() { spyOn(RegionConnection, "isConnected").and.returnValue(false); var directive = compileDirective(); expect(directive.scope().show()).toBe(true); }); it("returns true if error in ErrorService", function() { ErrorService._error = makeName("error"); var directive = compileDirective(); expect(directive.scope().show()).toBe(true); }); it("returns false if connected and no error", function() { spyOn(RegionConnection, "isConnected").and.returnValue(true); var directive = compileDirective(); expect(directive.scope().show()).toBe(false); }); it("returns false if disconnected less than 1/2 second", function() { var spy = spyOn(RegionConnection, "isConnected"); spy.and.returnValue(true); var directive = compileDirective(); spy.and.returnValue(false); $scope.$digest(); expect(directive.scope().show()).toBe(false); }); it("returns true if disconnected more than 1/2 second", function() { var spy = spyOn(RegionConnection, "isConnected"); spy.and.returnValue(true); var directive = compileDirective(); spy.and.returnValue(false); $scope.$digest(); $timeout.flush(500); expect(directive.scope().show()).toBe(true); }); }); describe("getTitle", function() { it("returns error title", function() { ErrorService._error = makeName("error"); var directive = compileDirective(); expect(directive.scope().getTitle()).toBe( "Error occurred"); }); it("returns connection lost error", function() { var spy = spyOn(RegionConnection, "isConnected"); spy.and.returnValue(true); var directive = compileDirective(); spy.and.returnValue(false); $scope.$digest(); expect(directive.scope().getTitle()).toBe( "Connection lost, reconnecting..."); }); it("returns connecting", function() { spyOn(RegionConnection, "isConnected").and.returnValue(false); var directive = compileDirective(); expect(directive.scope().getTitle()).toBe( "Connecting..."); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/tests/test_error_toggle.js0000644000000000000000000000477213056115004030552 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for error toggle. */ describe("maasErrorToggle", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Get required angular pieces and create a new scope before each test. var $scope, $timeout; beforeEach(inject(function($rootScope, $injector) { $scope = $rootScope.$new(); $timeout = $injector.get("$timeout"); })); // Load the RegionConnection and ErrorService. var RegionConnection; beforeEach(inject(function($injector) { RegionConnection = $injector.get("RegionConnection"); ErrorService = $injector.get("ErrorService"); })); // Return the compiled directive. function compileDirective() { var directive; var html = '
    '; // Compile the directive. inject(function($compile) { directive = $compile(html)($scope); }); // Perform the digest cycle to finish the compile. $scope.$digest(); return directive.find("span"); } it("doesnt hide element instantly if region not connected", function() { spyOn(RegionConnection, "isConnected").and.returnValue(false); var directive = compileDirective(); expect(directive.hasClass("ng-hide")).toBe(false); }); it("hides element if region not connected after 1/2 second", function() { spyOn(RegionConnection, "isConnected").and.returnValue(false); var directive = compileDirective(); $timeout.flush(500); expect(directive.hasClass("ng-hide")).toBe(true); }); it("hides element if error in ErrorService", function() { ErrorService._error = makeName("error"); var directive = compileDirective(); expect(directive.hasClass("ng-hide")).toBe(true); }); it("shows element if connected and no error", function() { spyOn(RegionConnection, "isConnected").and.returnValue(true); var directive = compileDirective(); expect(directive.hasClass("ng-hide")).toBe(false); }); it("shows element if becomes connected", function() { var spy = spyOn(RegionConnection, "isConnected"); spy.and.returnValue(false); var directive = compileDirective(); spy.and.returnValue(true); $scope.$digest(); expect(directive.hasClass("ng-hide")).toBe(false); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/tests/test_os_select.js0000644000000000000000000002076513056115004030040 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for OS select directive. */ describe("maasOsSelect", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Make OS choice. function makeOS() { name = makeName("os"); return [name, name]; } // Make release choice for os. function makeRelease(os) { release = makeName("release"); osRelease = os[0] + "/" + release; return [osRelease, release]; } // Make fake os information object. function makeOSInfo() { var i, j; var osystems = [], releases = []; for(i = 0; i < 5; i++) { os = makeOS(); osystems.push(os); for(j = 0; j < 5; j++) { var release = makeRelease(os); releases.push(release); } } return { osystems: osystems, releases: releases, default_osystem: osystems[osystems.length - 1][0], default_release: releases[releases.length - 1][0].split("/")[1] }; } // Return subset of releases for the os. function getReleasesForOS(os, releases) { var i, available = []; for(i = 0; i < releases.length; i++) { choice = releases[i]; if(choice[0].indexOf(os) > -1) { available.push(choice); } } return available; } // Create a new scope before each test. var $scope; beforeEach(inject(function($rootScope) { $scope = $rootScope.$new(); $scope.osinfo = makeOSInfo(); $scope.selected = null; })); // Return the compiled directive with the osinfo from the scope. function compileDirective(maasOsSelect, ngModel) { var directive; var html = '
    '; // Compile the directive. inject(function($compile) { directive = $compile(html)($scope); }); // Perform the digest cycle to finish the compile. $scope.$digest(); return directive.find("span"); } it("creates os select with ng-options", function() { var directive = compileDirective("osinfo", "selected"); var select = directive.find('select[name="os"]'); expect(select.attr("data-ng-options")).toBe( "os[0] as os[1] for os in maasOsSelect.osystems"); }); it("creates os select with ng-model", function() { var directive = compileDirective("osinfo", "selected"); var select = directive.find('select[name="os"]'); expect(select.attr("data-ng-model")).toBe("ngModel.osystem"); }); it("creates os select with ng-change", function() { var directive = compileDirective("osinfo", "selected"); var select = directive.find('select[name="os"]'); expect(select.attr("data-ng-change")).toBe("selectedOSChanged()"); }); it("creates release select with ng-options", function() { var directive = compileDirective("osinfo", "selected"); var select = directive.find('select[name="release"]'); expect(select.attr("data-ng-options")).toBe( "release[0] as release[1] for release in releases"); }); it("creates release select with ng-model", function() { var directive = compileDirective("osinfo", "selected"); var select = directive.find('select[name="release"]'); expect(select.attr("data-ng-model")).toBe("ngModel.release"); }); it("adds the $reset function to the model", function() { var directive = compileDirective("osinfo", "selected"); expect(angular.isFunction($scope.selected.$reset)).toBe(true); }); it("model $reset resets the default selection", function() { var directive = compileDirective("osinfo", "selected"); $scope.selected.osystem = makeName("os"); $scope.selected.release = makeName("release"); $scope.selected.$reset(); expect($scope.selected.osystem).toBe($scope.osinfo.default_osystem); expect($scope.selected.release).toBe($scope.osinfo.default_osystem + "/" + $scope.osinfo.default_release); }); it("default $scope.selected to be initialized with defaults", function() { var directive = compileDirective("osinfo", "selected"); expect($scope.selected.osystem).toBe($scope.osinfo.default_osystem); expect($scope.selected.release).toBe($scope.osinfo.default_osystem + "/" + $scope.osinfo.default_release); }); it("default $scope.selected to be initialized with weighted ubuntu os", function() { os = ["ubuntu", "Ubuntu"]; release = ["ubuntu/trusty", "Ubuntu Trusty 14.04 (LTS)"]; $scope.osinfo.osystems.push(os); $scope.osinfo.releases.push(release); $scope.osinfo.default_osystem = makeName("default_os"); $scope.osinfo.default_release = makeName("default_release"); var directive = compileDirective("osinfo", "selected"); expect($scope.selected.osystem).toBe("ubuntu"); expect($scope.selected.release).toBe("ubuntu/trusty"); }); it("default $scope.selected to be initialized with first available", function() { $scope.osinfo.default_osystem = makeName("default_os"); $scope.osinfo.default_release = makeName("default_release"); var directive = compileDirective("osinfo", "selected"); expect($scope.selected.osystem).toBe($scope.osinfo.osystems[0][0]); expect($scope.selected.release).toBe($scope.osinfo.releases[0][0]); }); it("default $scope.selected to be initialized to null when empty osinfo", function() { $scope.osinfo.osystems = []; $scope.osinfo.releases = []; $scope.osinfo.default_osystem = makeName("default_os"); $scope.osinfo.default_release = makeName("default_release"); var directive = compileDirective("osinfo", "selected"); expect($scope.selected.osystem).toBeNull(); expect($scope.selected.release).toBeNull(); }); it("default $scope.selected to be untouched", function() { var current = { osystem: "os", release: "release" }; $scope.selected = current; var directive = compileDirective("osinfo", "selected"); expect($scope.selected.osystem).toBe(current.osystem); expect($scope.selected.release).toBe(current.release); }); it("initializes only selectable releases", function() { $scope.selected = { osystem: $scope.osinfo.osystems[0][0], release: "" }; var directive = compileDirective("osinfo", "selected"); expect(directive.isolateScope().releases).toEqual( getReleasesForOS( $scope.osinfo.osystems[0][0], $scope.osinfo.releases)); }); it("updates releases when osinfo changes", function() { var directive = compileDirective("osinfo", "selected"); $scope.osinfo = makeOSInfo(); $scope.selected = { osystem: $scope.osinfo.osystems[0][0], release: "" }; $scope.$digest(); expect(directive.isolateScope().releases).toEqual( getReleasesForOS( $scope.osinfo.osystems[0][0], $scope.osinfo.releases)); }); it("selectedOSChanged updates releases", function() { var directive = compileDirective("osinfo", "selected"); $scope.selected = { osystem: $scope.osinfo.osystems[1][0], release: "" }; $scope.$digest(); directive.isolateScope().selectedOSChanged(); expect(directive.isolateScope().releases).toEqual( getReleasesForOS( $scope.osinfo.osystems[1][0], $scope.osinfo.releases)); }); it("selectedOSChanged sets first release as selected release", function() { var directive = compileDirective("osinfo", "selected"); $scope.selected = { osystem: $scope.osinfo.osystems[1][0], release: "" }; $scope.$digest(); directive.isolateScope().selectedOSChanged(); var releases = getReleasesForOS( $scope.osinfo.osystems[1][0], $scope.osinfo.releases); expect($scope.selected.release).toEqual(releases[0][0]); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/tests/test_placeholder.js0000644000000000000000000000333213056115004030331 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for placeholder directive. */ describe("ngPlaceholder", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Create a new scope before each test. var $scope; beforeEach(inject(function($rootScope) { $scope = $rootScope.$new(); })); // Return the compiled directive with the items from the scope. function compileDirective(ngPlaceholder) { var directive; var html = [ '
    ', '', '
    ' ].join(''); // Compile the directive. inject(function($compile) { directive = $compile(html)($scope); }); // Perform the digest cycle to finish the compile. $scope.$digest(); return directive.find("input"); } it("sets placeholder attribute on input", function() { var placeholderText = makeName("placeholder"); $scope.placeholder = placeholderText; var directive = compileDirective("placeholder"); expect(directive[0].placeholder).toEqual(placeholderText); }); it("sets placeholder attribute on input when changed", function() { var placeholderText = makeName("placeholder"); $scope.placeholder = placeholderText; var directive = compileDirective("placeholder"); // Change the text. placeholderText = makeName("placeholder"); $scope.placeholder = placeholderText; $scope.$digest(); expect(directive[0].placeholder).toEqual(placeholderText); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/tests/test_power_parameters.js0000644000000000000000000002171613056115004031434 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for power parameters directive. */ describe("maasPowerParameters", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Make field for directive. function makeField(name, type, required, defaultValue, choices) { if(angular.isUndefined(type)) { type = "string"; } if(angular.isUndefined(required)) { required = false; } if(angular.isUndefined(defaultValue)) { defaultValue = ""; } if(angular.isUndefined(choices)) { choices = []; } return { "name": name, "label": name, "field_type": type, "required": required, "default": defaultValue, "choices": choices }; } // Make power type for directive. function makePowerType(name, description, fields) { if(angular.isUndefined(fields)) { fields = []; } return { "name": name, "description": description, "fields": fields }; } // Create a new scope before each test. var $scope; beforeEach(inject(function($rootScope) { $scope = $rootScope.$new(); })); describe("maas-power-input", function() { // Return the compiled directive with the items from the scope. function compileDirective(maas_power_input, ng_model, ng_disabled) { var directive; var html = '
    '; // Compile the directive. inject(function($compile) { directive = $compile(html)($scope); }); // Perform the digest cycle to finish the compile. $scope.$digest(); return directive; } it("creates input for field_type of string", function() { $scope.field = makeField("test", "string"); var directive = compileDirective("field", "value"); var input = directive.find("input"); expect(input.attr("type")).toBe("text"); expect(input.attr("name")).toBe("test"); expect(input.attr("data-ng-model")).toBe("value"); }); it("creates input with required", function() { $scope.field = makeField("test", "string", true); var directive = compileDirective("field", "value"); var input = directive.find("input"); expect(input.attr("required")).toBe("required"); }); it("creates input and sets defaultValue on ng-model", function() { var defaultValue = makeName("default"); $scope.field = makeField("test", "string", false, defaultValue); var directive = compileDirective("field", "value"); var input = directive.find("input"); expect(input.attr("type")).toBe("text"); expect($scope.value).toBe(defaultValue); }); it("creates input with ng-pattern for mac address", function() { $scope.field = makeField("test", "mac_address"); var directive = compileDirective("field", "value"); var input = directive.find("input"); expect(input.attr("data-ng-pattern")).toBe( "/^([0-9A-Fa-f]{2}:){5}([0-9A-Fa-f]{2})$/"); }); it("creates select for field_type of choice", function() { $scope.field = makeField("test", "choice"); var directive = compileDirective("field", "value"); var select = directive.find("select"); expect(select.attr("name")).toBe("test"); expect(select.attr("data-ng-model")).toBe("value"); expect(select.attr("data-ng-options")).toBe( "choice[0] as choice[1] for choice in field.choices"); }); it("creates select with required", function() { $scope.field = makeField("test", "choice", true); var directive = compileDirective("field", "value"); var select = directive.find("select"); expect(select.attr("required")).toBe("required"); }); it("creates select and sets defaultValue on ng-model", function() { var choice1 = ["name1", "title1"]; var choice2 = ["name2", "title2"]; var choices = [ choice1, choice2 ]; $scope.field = makeField( "test", "choice", false, "name2", choices); var directive = compileDirective("field", "value"); var select = directive.find("select"); expect(select.attr("name")).toBe("test"); expect($scope.value).toBe(choice2); }); it("creates input with ng-disabled", function() { $scope.field = makeField("test", "string"); $scope.disabled = true; var directive = compileDirective("field", "value", "disabled"); var input = directive.find("input"); expect(input.attr("data-ng-disabled")).toBe("disabled"); }); it("creates select with ng-disabled", function() { $scope.field = makeField("test", "choice"); $scope.disabled = true; var directive = compileDirective("field", "value", "disabled"); var select = directive.find("select"); expect(select.attr("data-ng-disabled")).toBe("disabled"); }); it("creates password for field_type of password", function() { $scope.field = makeField("test", "password"); var directive = compileDirective("field", "value"); var input = directive.find("input"); expect(input.attr("name")).toBe("test"); expect(input.attr("data-ng-model")).toBe("value"); expect(input.attr("data-ng-type")).toBe( "ngModel.editing && 'text' || 'password'"); }); }); describe("maas-power-parameters", function() { // Return the compiled directive with the items from the scope. function compileDirective( maas_power_parameters, ng_model, ng_disabled) { var directive; var html = '
    '; // Compile the directive. inject(function($compile) { directive = $compile(html)($scope); }); // Perform the digest cycle to finish the compile. $scope.$digest(); return directive; } it("creates select with ng-model, ng-options and ng-disabled", function() { var fields = [ makeField("test1"), makeField("test2") ]; var powerType = makePowerType("test", "Test Title", fields); $scope.powerTypes = [powerType]; var directive = compileDirective("powerTypes", "value"); var select = directive.find("select"); expect(select.attr("data-ng-model")).toBe("ngModel.type"); expect(select.attr("data-ng-options")).toBe( "type as type.description for type " + "in maasPowerParameters"); expect(select.attr("data-ng-disabled")).toBe("ngDisabled"); }); it("creates option with description", function() { var fields = [ makeField("test1"), makeField("test2") ]; var powerType = makePowerType("test", "Test Title", fields); $scope.powerTypes = [powerType]; var directive = compileDirective("powerTypes", "value"); var select = directive.find("select"); var option = select.find('option[value="0"]'); expect(option.text()).toBe("Test Title"); }); it("creates fields on power type select", function() { var fields = [ makeField("test1"), makeField("test2") ]; var powerType = makePowerType("test", "Test Title", fields); $scope.powerTypes = [powerType]; $scope.power = { type: null, parameters: {} }; var directive = compileDirective("powerTypes", "power"); var select = directive.find("select"); // Set the power type on the select scopes. select.scope().ngModel.type = powerType; $scope.$digest(); // Should have the two field show now. expect(directive.find("input").length).toBe(2); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/tests/test_sticky_header.js0000644000000000000000000000547313056115004030675 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for the sticky header. */ describe("maasStickyHeader", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Create a new scope before each test. var $scope; beforeEach(inject(function($rootScope) { $scope = $rootScope.$new(); })); // Return the compiled directive with the osinfo from the scope. function compileDirective() { var directive; var html = [ '
    ', '
    ', '', '
    ', '
    '].join(''); // Compile the directive. inject(function($compile) { directive = $compile(html)($scope); }); // Perform the digest cycle to finish the compile. $scope.$digest(); return directive; } it("changing header height, changes body padding-top", function(done) { var directive = compileDirective(); var body = directive.find("div.maas-wrapper"); var header = directive.find("header"); var height = makeInteger(100, 3000); header.height(height); setTimeout(function() { expect(body.css("padding-top")).toBe(height + 20 + "px"); done(); }, 100); }); it("changing header height quickly keeps body padding-top in sync", function(done) { var directive = compileDirective(); var body = directive.find("div.maas-wrapper"); var header = directive.find("header"); var height = makeInteger(100, 3000); header.height(height); var checkAndIncrement, count = 0; checkAndIncrement = function() { expect(body.css("padding-top")).toBe(height + 20 + "px"); count += 1; height += 1; header.height(height); if(count === 10) { done(); } else { setTimeout(checkAndIncrement, 10); } }; setTimeout(checkAndIncrement, 100); }); it("removes padding-top on $destroy", function(done) { var directive = compileDirective(); var body = directive.find("div.maas-wrapper"); var header = directive.find("header"); var height = makeInteger(100, 3000); header.height(height); setTimeout(function() { expect(body.css("padding-top")).toBe(height + 20 + "px"); $scope.$destroy(); expect(body.css("padding-top")).toBe(''); done(); }, 100); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/tests/test_type.js0000644000000000000000000000357613056115004027042 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for type directive. */ describe("ngType", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Create a new scope before each test. var $scope; beforeEach(inject(function($rootScope) { $scope = $rootScope.$new(); })); // Return the compiled directive with the items from the scope. function compileDirective(ngType) { var directive; var html = [ '
    ', '', '
    ' ].join(''); // Compile the directive. inject(function($compile) { directive = $compile(html)($scope); }); // Perform the digest cycle to finish the compile. $scope.$digest(); return directive.find("input"); } it("sets type attribute on input", function() { var type = 'text'; $scope.type = type; var directive = compileDirective("type"); expect(directive[0].type).toEqual(type); }); it("sets type attribute on input when changed", function() { var type = 'text'; $scope.type = type; var directive = compileDirective("type"); // Change the type. type = 'password'; $scope.type = type; $scope.$digest(); expect(directive[0].type).toEqual(type); }); it("rejects invalid input type", function() { var type = 'text'; $scope.type = type; var directive = compileDirective("type"); // Change the type to something invalid. type = makeName("type"); $scope.type = type; expect(function() { $scope.$digest(); }).toThrow(new Error("Invalid input type: " + type)); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/directives/tests/test_version_reloader.js0000644000000000000000000000541013056115004031410 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for version reloader. */ describe("maasVersionReloader", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Grab the needed angular pieces. var $q; beforeEach(inject(function($injector) { $q = $injector.get("$q"); })); // Load the GeneralManager, ManagerHelperService, RegionConnection and // mock the websocket connection. var GeneralManager, ManagerHelperService, RegionConnection, webSocket; beforeEach(inject(function($injector) { GeneralManager = $injector.get("GeneralManager"); RegionConnection = $injector.get("RegionConnection"); ManagerHelperService = $injector.get("ManagerHelperService"); // Mock buildSocket so an actual connection is not made. webSocket = new MockWebSocket(); spyOn(RegionConnection, "buildSocket").and.returnValue(webSocket); })); // Create a new scope before each test. var $scope; beforeEach(inject(function($rootScope) { $scope = $rootScope.$new(); })); // Return the compiled directive with the items from the scope. function compileDirective() { var directive; var html = [ '
    ', '
    ', '
    ' ].join(''); // Compile the directive. inject(function($compile) { directive = $compile(html)($scope); }); // Perform the digest cycle to finish the compile. $scope.$digest(); return directive.find("div[data-maas-version-reloader]"); } it("sets version from GeneralManager", function() { var directive = compileDirective(); expect($scope.version).toBe(GeneralManager.getData("version")); }); it("watches version.test onces ManagerHelperService resolves", function() { var defer = $q.defer(); spyOn(ManagerHelperService, "loadManager").and.returnValue( defer.promise); var directive = compileDirective(); spyOn($scope, "$watch"); defer.resolve(); $scope.$digest(); expect($scope.$watch.calls.argsFor(0)[0]).toBe("version.text"); }); it("calls reloadPage when version.text changes", function() { var defer = $q.defer(); spyOn(ManagerHelperService, "loadManager").and.returnValue( defer.promise); var directive = compileDirective(); spyOn($scope, "reloadPage"); defer.resolve(); $scope.$digest(); $scope.version.text = makeName("new"); $scope.$digest(); expect($scope.reloadPage).toHaveBeenCalled(); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/clusters.js0000644000000000000000000000203213056115004025324 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Clusters Manager * * Manages all of the clusters in the browser. The manager uses the * RegionConnection to load the clusters, update the clusters, and listen for * notification events about clusters. */ angular.module('MAAS').factory( 'ClustersManager', ['$q', '$rootScope', 'RegionConnection', 'Manager', function( $q, $rootScope, RegionConnection, Manager) { function ClustersManager() { Manager.call(this); this._pk = "id"; this._handler = "cluster"; // Listen for notify events for the cluster object. var self = this; RegionConnection.registerNotifier("cluster", function(action, data) { self.onNotify(action, data); }); } ClustersManager.prototype = new Manager(); return new ClustersManager(); }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/devices.js0000644000000000000000000000414413056115004025110 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Devices Manager * * Manages all of the devices in the browser. This manager is used for the * device listing and the device view page. The manager uses the * RegionConnection to load the devices, update the devices, and listen for * notification events about devices. */ angular.module('MAAS').factory( 'DevicesManager', ['$q', '$rootScope', 'RegionConnection', 'Manager', function( $q, $rootScope, RegionConnection, Manager) { function DevicesManager() { Manager.call(this); this._pk = "system_id"; this._handler = "device"; this._metadataAttributes = { "owner": null, "tags": null, "zone": function(device) { return device.zone.name; } }; // Listen for notify events for the device object. var self = this; RegionConnection.registerNotifier("device", function(action, data) { self.onNotify(action, data); }); } DevicesManager.prototype = new Manager(); // Create a device. DevicesManager.prototype.create = function(node) { // We don't add the item to the list because a NOTIFY event will // add the device to the list. Adding it here will cause angular to // complain because the same object exist in the list. return RegionConnection.callMethod("device.create", node); }; // Perform the action on the device. DevicesManager.prototype.performAction = function( device, action, extra) { if(!angular.isObject(extra)) { extra = {}; } return RegionConnection.callMethod("device.action", { "system_id": device.system_id, "action": action, "extra": extra }); }; return new DevicesManager(); }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/events.js0000644000000000000000000001166613056115004025001 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Event Manager Factory * * Manages all of the events for a node in the browser. The manager uses the * RegionConnection to load the events and listen for event notifications. */ angular.module('MAAS').factory( 'EventsManagerFactory', ['RegionConnection', 'Manager', 'ErrorService', function(RegionConnection, Manager, ErrorService) { function EventsManager(nodeId, factory) { Manager.call(this); this._pk = "id"; this._handler = "event"; this._nodeId = nodeId; this._factory = factory; this._maxDays = 30; } EventsManager.prototype = new Manager(); // Return the initial batch parameters with the id of the node // and the maximum number of days to load. EventsManager.prototype._initBatchLoadParameters = function() { return { "node_id": this._nodeId, "max_days": this._maxDays }; }; // Destroys its self. Removes self from the EventsManagerFactory. EventsManager.prototype.destroy = function(){ this._factory.destroyManager(this); // If this manager has ever loaded then the region is sending // events about this node. Tell the RegionConnection not to // stop sending notification for events from this node. if(this.isLoaded()) { RegionConnection.callMethod("event.clear", { node_id: this._nodeId }); } }; // Get the maximum number of days the manager will load. EventsManager.prototype.getMaximumDays = function() { return this._maxDays; }; // Changes the maximum number of days to load and loads the items. EventsManager.prototype.loadMaximumDays = function(days) { var self = this; var setMaximumDays = function() { self._maxDays = days; self.loadItems(); }; if(this.isLoading()) { // Call loadItems to get an extra defer to know when // the loading is done. this.loadItems().then(function() { setMaximumDays(); }); } else { setMaximumDays(); } }; // Factory that holds all created EventsManagers. function EventsManagerFactory() { // Holds a list of all EventsManager that have been created. this._managers = []; // Listen for notify events for the event object. var self = this; RegionConnection.registerNotifier("event", function(action, data) { self.onNotify(action, data); }); } // Gets the EventManager for the nodes with node_id. EventsManagerFactory.prototype._getManager = function(nodeId) { var i; for(i = 0; i < this._managers.length; i++) { if(this._managers[i]._nodeId === nodeId) { return this._managers[i]; } } return null; }; // Gets the EventManager for the nodes node_id. Creates a new manager // if one does not exist. EventsManagerFactory.prototype.getManager = function(nodeId) { var manager = this._getManager(nodeId); if(!angular.isObject(manager)) { // Not created so create it. manager = new EventsManager(nodeId, this); this._managers.push(manager); return manager; } return manager; }; // Destroy the EventManager. EventsManagerFactory.prototype.destroyManager = function(manager) { var idx = this._managers.indexOf(manager); if(idx >= 0) { this._managers.splice(idx, 1); } }; // Called when the RegionConnection gets a notification for an event. EventsManagerFactory.prototype.onNotify = function(action, data) { if(action === "delete") { // Send all delete actions to all managers. Only one will // remove the event with the given id. angular.forEach(this._managers, function(manager) { manager.onNotify(action, data); }); } else if (action === "create" || action === "update") { // Get the manager based on the node_id in data, and send // it the notification. var manager = this._getManager(data.node_id); if(angular.isObject(manager)) { manager.onNotify(action, data); } } }; return new EventsManagerFactory(); }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/fabrics.js0000644000000000000000000000315213056115004025075 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Fabric Manager * * Manages all of the fabrics in the browser. The manager uses the * RegionConnection to load the fabrics, update the fabrics, and listen for * notification events about fabrics. */ angular.module('MAAS').factory( 'FabricsManager', ['$q', '$rootScope', 'RegionConnection', 'Manager', 'VLANsManager', function($q, $rootScope, RegionConnection, Manager, VLANsManager) { function FabricsManager() { Manager.call(this); this._pk = "id"; this._handler = "fabric"; // Listen for notify events for the fabric object. var self = this; RegionConnection.registerNotifier("fabric", function(action, data) { self.onNotify(action, data); }); } FabricsManager.prototype = new Manager(); // Return the VLAN objects that are part of this fabric. The returned // array is calculated on each call, you should not watch this array, // instead you should watch this function. FabricsManager.prototype.getVLANs = function(fabric) { var vlans = []; angular.forEach(fabric.vlan_ids, function(vlan_id) { var vlan = VLANsManager.getItemFromList(vlan_id); if(angular.isObject(vlan)) { vlans.push(vlan); } }); return vlans; }; return new FabricsManager(); }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/general.js0000644000000000000000000002450613056115004025107 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS General Manager * * Manager for general information from the region. The general handler on the * region side does not push information to the client. This manager uses * polling to grab this data periodically from the region. * * This manage provides different pieces of data and is structure differently * than extending the Manager service. It still provides the Manager service * interface allowing the ManagerHelperService to load this manager. */ angular.module('MAAS').factory( 'GeneralManager', ['$q', '$timeout', 'RegionConnection', 'ErrorService', function($q, $timeout, RegionConnection, ErrorService) { // Constructor function GeneralManager() { // Holds the available endpoints and its data. this._data = { node_actions: { method: "general.node_actions", data: [], loaded: false, polling: false, nextPromise: null }, device_actions: { method: "general.device_actions", data: [], loaded: false, polling: false, nextPromise: null }, architectures: { method: "general.architectures", data: [], loaded: false, polling: false, nextPromise: null }, hwe_kernels: { method: "general.hwe_kernels", data: [], loaded: false, polling: false, nextPromise: null }, default_min_hwe_kernel: { method: "general.default_min_hwe_kernel", data: { text: '' }, loaded: false, polling: false, nextPromise: null, replaceData: function(oldData, newData) { oldData.text = newData; } }, osinfo: { method: "general.osinfo", data: {}, loaded: false, polling: false, nextPromise: null, isEmpty: function(data) { var osystems = data.osystems; return (angular.isUndefined(osystems) || osystems.length === 0); }, replaceData: function(oldData, newData) { angular.copy(newData, oldData); } }, bond_options: { method: "general.bond_options", data: {}, loaded: false, polling: false, nextPromise: null, replaceData: function(oldData, newData) { angular.copy(newData, oldData); } }, version: { method: "general.version", data: { text: null }, loaded: false, polling: false, nextPromise: null, replaceData: function(oldData, newData) { oldData.text = newData; } } }; // Amount of time in milliseconds the manager should wait to poll // for new data. this._pollTimeout = 10000; // Amount of time in milliseconds the manager should wait to poll // for new data when an error occurs. this._pollErrorTimeout = 3000; // Amount of time in milliseconds the manager should wait to poll // for new data when the retrieved data is empty. this._pollEmptyTimeout = 3000; // Set to true when the items list should reload upon re-connection // to the region. this._autoReload = false; } GeneralManager.prototype._getInternalData = function(name) { var data = this._data[name]; if(angular.isUndefined(data)) { throw new Error("Unknown data: " + name); } return data; }; // Return loaded data. GeneralManager.prototype.getData = function(name) { return this._getInternalData(name).data; }; // Return true when all data has been loaded. GeneralManager.prototype.isLoaded = function() { var loaded = true; angular.forEach(this._data, function(data) { if(!data.loaded) { loaded = false; } }); return loaded; }; // Return true when data has been loaded. GeneralManager.prototype.isDataLoaded = function(name) { return this._getInternalData(name).loaded; }; // Returns true when the manager is currently polling. GeneralManager.prototype.isPolling = function() { var polling = false; angular.forEach(this._data, function(data) { if(data.polling) { polling = true; } }); return polling; }; // Returns true when the manager is currently polling for that data. GeneralManager.prototype.isDataPolling = function(name) { return this._getInternalData(name).polling; }; // Starts the manager polling for data. GeneralManager.prototype.startPolling = function(name) { var data = this._getInternalData(name); if(!data.polling) { data.polling = true; this._poll(data); } }; // Stops the manager polling for data. GeneralManager.prototype.stopPolling = function(name) { var data = this._getInternalData(name); data.polling = false; if(angular.isObject(data.nextPromise)) { $timeout.cancel(data.nextPromise); data.nextPromise = null; } }; // Load the data from the region. GeneralManager.prototype._loadData = function(data, raiseError) { var replaceData = data.replaceData; raiseError = raiseError || false; // Set default replaceData function if data doesn't provide its // own function. if(angular.isUndefined(replaceData)) { replaceData = function(oldData, newData) { oldData.length = 0; oldData.push.apply(oldData, newData); }; } return RegionConnection.callMethod(data.method).then( function(newData) { replaceData(data.data, newData); data.loaded = true; return data.data; }, function(error) { if(raiseError) { ErrorService.raiseError(error); } return error; }); }; GeneralManager.prototype._pollAgain = function(data, timeout) { var self = this; data.nextPromise = $timeout(function() { self._poll(data); }, timeout); }; // Polls for the data from the region. GeneralManager.prototype._poll = function(data) { var self = this; var isEmpty = data.isEmpty; // Set default isEmpty function if data doesn't provide its // own function. if(angular.isUndefined(isEmpty)) { isEmpty = function(data) { return data.length === 0; }; } // Can only poll if connected. if(!RegionConnection.isConnected()) { this._pollAgain(data, this._pollErrorTimeout); return; } return this._loadData(data, false).then(function(newData) { var pollTimeout = self._pollTimeout; if(isEmpty(data.data)) { pollTimeout = self._pollEmptyTimeout; } self._pollAgain(data, pollTimeout); return newData; }, function(error) { // Don't raise the error, just log it and try again. console.log(error); self._pollAgain(data, self._pollErrorTimeout); }); }; // Loads all the items. This implemented so the ManagerHelperService // can work on this manager just like all the rest. GeneralManager.prototype.loadItems = function() { var self = this; var defer = $q.defer(); var waitingCount = Object.keys(this._data).length; var done = function() { waitingCount -= 1; if(waitingCount === 0) { defer.resolve(); } }; angular.forEach(this._data, function(data) { self._loadData(data, true).then(function() { done(); }); }); return defer.promise; }; // Enables auto reloading of the item list on connection to region. GeneralManager.prototype.enableAutoReload = function() { if(!this._autoReload) { this._autoReload = true; var self = this; this._reloadFunc = function() { self.loadItems(); }; RegionConnection.registerHandler("open", this._reloadFunc); } }; // Disable auto reloading of the item list on connection to region. GeneralManager.prototype.disableAutoReload = function() { if(this._autoReload) { RegionConnection.unregisterHandler("open", this._reloadFunc); this._reloadFunc = null; this._autoReload = false; } }; return new GeneralManager(); }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/nodes.js0000644000000000000000000003057413056115004024604 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Nodes Manager * * Manages all of the nodes in the browser. This manager is used for the * node listing and the node view page. The manager uses the RegionConnection * to load the nodes, update the nodes, and listen for notification events * about nodes. */ angular.module('MAAS').factory( 'NodesManager', ['$q', '$rootScope', 'RegionConnection', 'Manager', function( $q, $rootScope, RegionConnection, Manager) { function NodesManager() { Manager.call(this); this._pk = "system_id"; this._handler = "node"; this._metadataAttributes = { "status": null, "owner": null, "tags": null, "zone": function(node) { return node.zone.name; }, "subnets": null, "fabrics": null, "spaces": null, "storage_tags": null }; // Listen for notify events for the node object. var self = this; RegionConnection.registerNotifier("node", function(action, data) { self.onNotify(action, data); }); } NodesManager.prototype = new Manager(); // Create a node. NodesManager.prototype.create = function(node) { // We don't add the item to the list because a NOTIFY event will // add the node to the list. Adding it here will cause angular to // complain because the same object exist in the list. return RegionConnection.callMethod("node.create", node); }; // Perform the action on the node. NodesManager.prototype.performAction = function(node, action, extra) { if(!angular.isObject(extra)) { extra = {}; } return RegionConnection.callMethod("node.action", { "system_id": node.system_id, "action": action, "extra": extra }); }; // Check the power state for the node. NodesManager.prototype.checkPowerState = function(node) { return RegionConnection.callMethod("node.check_power", { "system_id": node.system_id }).then(function(state) { node.power_state = state; return state; }, function(error) { node.power_state = "error"; // Already been logged server side, but log it client // side so if they really care they can see why. console.log(error); // Return the state as error to the remaining callbacks. return "error"; }); }; // Create the physical interface on the node. NodesManager.prototype.createPhysicalInterface = function( node, params) { if(!angular.isObject(params)) { params = {}; } params.system_id = node.system_id; return RegionConnection.callMethod( "node.create_physical", params); }; // Create the VLAN interface on the node. NodesManager.prototype.createVLANInterface = function( node, params) { if(!angular.isObject(params)) { params = {}; } params.system_id = node.system_id; return RegionConnection.callMethod( "node.create_vlan", params); }; // Create the bond interface on the node. NodesManager.prototype.createBondInterface = function( node, params) { if(!angular.isObject(params)) { params = {}; } params.system_id = node.system_id; return RegionConnection.callMethod( "node.create_bond", params); }; // Update the interface for the node. NodesManager.prototype.updateInterface = function( node, interface_id, params) { if(!angular.isObject(params)) { params = {}; } params.system_id = node.system_id; params.interface_id = interface_id; return RegionConnection.callMethod( "node.update_interface", params); }; // Delete the interface for the node. NodesManager.prototype.deleteInterface = function( node, interface_id) { var params = { system_id: node.system_id, interface_id: interface_id }; return RegionConnection.callMethod( "node.delete_interface", params); }; // Create or update the link to the subnet for the interface. NodesManager.prototype.linkSubnet = function( node, interface_id, params) { if(!angular.isObject(params)) { params = {}; } params.system_id = node.system_id; params.interface_id = interface_id; return RegionConnection.callMethod( "node.link_subnet", params); }; // Remove the link to the subnet for the interface. NodesManager.prototype.unlinkSubnet = function( node, interface_id, link_id) { var params = { system_id: node.system_id, interface_id: interface_id, link_id: link_id }; return RegionConnection.callMethod( "node.unlink_subnet", params); }; // Send the update information to the region. NodesManager.prototype.updateFilesystem = function( node, block_id, partition_id, fstype, mount_point) { var self = this; var method = this._handler + ".update_filesystem"; var params = { system_id: node.system_id, block_id: block_id, partition_id: partition_id, fstype: fstype, mount_point: mount_point }; return RegionConnection.callMethod(method, params); }; // Update the tags on a disk. NodesManager.prototype.updateDiskTags = function( node, block_id, tags) { var self = this; var method = this._handler + ".update_disk_tags"; var params = { system_id: node.system_id, block_id: block_id, tags: tags }; return RegionConnection.callMethod(method, params); }; // Delete the disk. NodesManager.prototype.deleteDisk = function( node, block_id) { var self = this; var method = this._handler + ".delete_disk"; var params = { system_id: node.system_id, block_id: block_id }; return RegionConnection.callMethod(method, params); }; // Delete the partition. NodesManager.prototype.deletePartition = function( node, partition_id) { var self = this; var method = this._handler + ".delete_partition"; var params = { system_id: node.system_id, partition_id: partition_id }; return RegionConnection.callMethod(method, params); }; // Delete the disk or partition. NodesManager.prototype.deleteVolumeGroup = function( node, volume_group_id) { var self = this; var method = this._handler + ".delete_volume_group"; var params = { system_id: node.system_id, volume_group_id: volume_group_id }; return RegionConnection.callMethod(method, params); }; // Delete a cache set. NodesManager.prototype.deleteCacheSet = function( node, cache_set_id) { var self = this; var method = this._handler + ".delete_cache_set"; var params = { system_id: node.system_id, cache_set_id: cache_set_id }; return RegionConnection.callMethod(method, params); }; // Create a new partition. NodesManager.prototype.createPartition = function( node, block_id, size, params) { if(!angular.isObject(params)) { params = {}; } var self = this; var method = this._handler + ".create_partition"; params.system_id = node.system_id; params.block_id = block_id; params.partition_size = size; return RegionConnection.callMethod(method, params); }; // Create a new cache set. NodesManager.prototype.createCacheSet = function( node, block_id, partition_id) { var self = this; var method = this._handler + ".create_cache_set"; var params = { system_id: node.system_id, block_id: block_id, partition_id: partition_id }; return RegionConnection.callMethod(method, params); }; // Create a new bcache device. NodesManager.prototype.createBcache = function( node, params) { if(!angular.isObject(params)) { params = {}; } params.system_id = node.system_id; return RegionConnection.callMethod( "node.create_bcache", params); }; // Create a new RAID device. NodesManager.prototype.createRAID = function( node, params) { if(!angular.isObject(params)) { params = {}; } params.system_id = node.system_id; return RegionConnection.callMethod( "node.create_raid", params); }; // Create a new volume group. NodesManager.prototype.createVolumeGroup = function( node, params) { if(!angular.isObject(params)) { params = {}; } params.system_id = node.system_id; return RegionConnection.callMethod( "node.create_volume_group", params); }; // Create a new logical volume. NodesManager.prototype.createLogicalVolume = function( node, volume_group_id, name, size, params) { if(!angular.isObject(params)) { params = {}; } var self = this; var method = this._handler + ".create_logical_volume"; params.system_id = node.system_id; params.volume_group_id = volume_group_id; params.name = name; params.size = size; return RegionConnection.callMethod(method, params); }; // Update a disk. NodesManager.prototype.updateDisk = function( node, block_id, params) { if(!angular.isObject(params)) { params = {}; } params.system_id = node.system_id; params.block_id = block_id; return RegionConnection.callMethod( "node.update_disk", params); }; // Set disk as the boot disk. NodesManager.prototype.setBootDisk = function( node, block_id) { var params = { system_id: node.system_id, block_id: block_id }; return RegionConnection.callMethod( "node.set_boot_disk", params); }; return new NodesManager(); }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/region.js0000644000000000000000000002566413056115004024763 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Region Connection * * Provides the websocket connection between the client and the MAAS regiond * service. */ angular.module('MAAS').factory( 'RegionConnection', ['$q', '$rootScope', '$timeout', '$window', '$cookies', function( $q, $rootScope, $timeout, $window, $cookies) { // Message types var MSG_TYPE = { REQUEST: 0, RESPONSE: 1, NOTIFY: 2 }; // Response types var RESPONSE_TYPE = { SUCCESS: 0, ERROR: 1 }; // Constructor function RegionConnection() { this.callbacks = {}; this.requestId = 0; this.url = null; this.websocket = null; this.connected = false; this.autoReconnect = true; this.retryTimeout = 5000; this.error = null; // Defer used for defaultConnect. If defaultConnect is called // quickly only the first one will start the connection. The // remaining will recieve this defer. this.defaultConnectDefer = null; // List of functions to call when a WebSocket event occurs. Each // function will get the WebSocket event passed to it. this.handlers = { open: [], error: [], close: [] }; // Object containing a fields with list of functions. When // a NOTIFY message is received it will match the name to a field // in this object. If the field exists in the object the list // of functions will be called with the action and obj_id. this.notifiers = {}; } // Return a new request id. RegionConnection.prototype.newRequestId = function() { this.requestId += 1; return this.requestId; }; // Register event handler. RegionConnection.prototype.registerHandler = function (name, func) { if(!angular.isDefined(this.handlers[name])) { throw new Error("Invalid handler: " + name); } if(!angular.isFunction(func)) { throw new Error("Requires a function to register a handler."); } this.handlers[name].push(func); }; // Unregister event handler. RegionConnection.prototype.unregisterHandler = function (name, func) { if(!angular.isDefined(this.handlers[name])) { throw new Error("Invalid handler: " + name); } var idx = this.handlers[name].indexOf(func); if(idx >= 0) { this.handlers[name].splice(idx, 1); } }; // Register notification handler. RegionConnection.prototype.registerNotifier = function(name, func) { if(!angular.isFunction(func)) { throw new Error("Requires a function to register a notifier."); } if(angular.isUndefined(this.notifiers[name])) { this.notifiers[name] = []; } this.notifiers[name].push(func); }; // Unregister notification handler. RegionConnection.prototype.unregisterNotifier = function(name, func) { if(angular.isUndefined(this.notifiers[name])) { return; } var idx = this.notifiers[name].indexOf(func); if(idx >= 0) { this.notifiers[name].splice(idx, 1); } }; // Return True if currently connected to region. RegionConnection.prototype.isConnected = function() { return this.connected; }; // Builds the websocket connection. RegionConnection.prototype.buildSocket = function(url) { return new WebSocket(url); }; // Opens the websocket connection. RegionConnection.prototype.connect = function() { this.url = this._buildUrl(); this.autoReconnect = true; this.websocket = this.buildSocket(this.url); var self = this; this.websocket.onopen = function(evt) { self.connected = true; angular.forEach(self.handlers.open, function(func) { func(evt); }); }; this.websocket.onerror = function(evt) { angular.forEach(self.handlers.error, function(func) { func(evt); }); }; this.websocket.onclose = function(evt) { self.connected = false; self.error = "Unable to connect to: " + self.url.split("?")[0]; angular.forEach(self.handlers.close, function(func) { func(evt); }); if(self.autoReconnect) { $timeout(function() { self.connect(); }, self.retryTimeout); } }; this.websocket.onmessage = function(evt) { self.onMessage(angular.fromJson(evt.data)); }; }; // Closes the websocket connection. RegionConnection.prototype.close = function() { this.autoReconnect = false; this.websocket.close(); this.websocket = null; }; // Return the protocol used for the websocket connection. RegionConnection.prototype._getProtocol = function() { return $window.location.protocol; }; // Return connection url to websocket from current location and // html options. RegionConnection.prototype._buildUrl = function() { var host = $window.location.hostname; var port = $window.location.port; var path = $window.location.pathname; var proto = 'ws'; if (this._getProtocol() === 'https:') { proto = 'wss'; } // Port can be overridden by data-websocket-port in the base // element. var base = angular.element("base"); if(angular.isDefined(base)) { var newPort = base.data("websocket-port"); if(angular.isDefined(newPort)) { port = newPort; } } // Append final '/' if missing from end of path. if(path[path.length - 1] !== '/') { path += '/'; } // Build the URL. Include the :port only if it has a value. url = proto + "://" + host; if(angular.isString(port) && port.length > 0){ url += ":" + port; } url += path + "ws"; // Include the csrftoken in the URL if it's defined. csrftoken = $cookies.csrftoken; if(angular.isDefined(csrftoken)) { url += '?csrftoken=' + encodeURIComponent(csrftoken); } return url; }; // Opens the default websocket connection. RegionConnection.prototype.defaultConnect = function() { // Already been called but the connection has not been completed. if(angular.isObject(this.defaultConnectDefer)) { return this.defaultConnectDefer.promise; } // Already connected. var defer; if(this.isConnected()) { // Create a new defer as the defaultConnectDefer would // have already been resolved. defer = $q.defer(); // Cannot resolve the defer inline as it hasn't been given // back to the caller. It will be called in the next loop. $timeout(defer.resolve); return defer.promise; } // Start the connection. var self = this, opened, errored; defer = this.defaultConnectDefer = $q.defer(); opened = function(evt) { this.defaultConnectDefer = null; self.unregisterHandler("open", opened); self.unregisterHandler("error", errored); $rootScope.$apply(defer.resolve(evt)); }; errored = function(evt) { this.defaultConnectDefer = null; self.unregisterHandler("open", opened); self.unregisterHandler("error", errored); $rootScope.$apply(defer.reject(evt)); }; this.registerHandler("open", opened); this.registerHandler("error", errored); this.connect(); return defer.promise; }; // Called when a message is received. RegionConnection.prototype.onMessage = function(msg) { // Response if(msg.type === MSG_TYPE.RESPONSE) { this.onResponse(msg); // Notify } else if(msg.type === MSG_TYPE.NOTIFY) { this.onNotify(msg); } }; // Called when a response message is recieved. RegionConnection.prototype.onResponse = function(msg) { // Grab the registered defer from the callbacks list. var defer = this.callbacks[msg.request_id]; if(angular.isDefined(defer)) { if(msg.rtype === RESPONSE_TYPE.SUCCESS) { // Resolve the defer inside of the digest cycle, so any // update to an object or collection will trigger a // watcher. $rootScope.$apply(defer.resolve(msg.result)); } else if(msg.rtype === RESPONSE_TYPE.ERROR) { // Reject the defer since an error occurred. $rootScope.$apply(defer.reject(msg.error)); } // Remove the defer from the callback list. delete this.callbacks[msg.request_id]; } }; // Called when a notify response is recieved. RegionConnection.prototype.onNotify = function(msg) { var handlers = this.notifiers[msg.name]; if(angular.isArray(handlers)) { angular.forEach(handlers, function(handler) { handler(msg.action, msg.data); }); } }; // Call method on the region. RegionConnection.prototype.callMethod = function(method, params) { var defer = $q.defer(); var request_id = this.newRequestId(); var request = { type: MSG_TYPE.REQUEST, request_id: request_id, method: method, params: params }; this.callbacks[request_id] = defer; this.websocket.send(angular.toJson(request)); return defer.promise; }; return new RegionConnection(); }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/spaces.js0000644000000000000000000000316613056115004024747 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Space Manager * * Manages all of the spaces in the browser. The manager uses the * RegionConnection to load the spaces, update the spaces, and listen for * notification events about spaces. */ angular.module('MAAS').factory( 'SpacesManager', ['$q', '$rootScope', 'RegionConnection', 'Manager', 'SubnetsManager', function($q, $rootScope, RegionConnection, Manager, SubnetsManager) { function SpacesManager() { Manager.call(this); this._pk = "id"; this._handler = "space"; // Listen for notify events for the space object. var self = this; RegionConnection.registerNotifier("space", function(action, data) { self.onNotify(action, data); }); } SpacesManager.prototype = new Manager(); // Return the Subnet objects that are part of this space. The returned // array is calculated on each call, you should not watch this array, // instead you should watch this function. SpacesManager.prototype.getSubnets = function(space) { var subnets = []; angular.forEach(space.subnet_ids, function(subnet_id) { var subnet = SubnetsManager.getItemFromList(subnet_id); if(angular.isObject(subnet)) { subnets.push(subnet); } }); return subnets; }; return new SpacesManager(); }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/subnets.js0000644000000000000000000000201513056115004025144 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Subnet Manager * * Manages all of the subnets in the browser. The manager uses the * RegionConnection to load the subnets, update the subnets, and listen for * notification events about subnets. */ angular.module('MAAS').factory( 'SubnetsManager', ['$q', '$rootScope', 'RegionConnection', 'Manager', function( $q, $rootScope, RegionConnection, Manager) { function SubnetsManager() { Manager.call(this); this._pk = "id"; this._handler = "subnet"; // Listen for notify events for the subnet object. var self = this; RegionConnection.registerNotifier("subnet", function(action, data) { self.onNotify(action, data); }); } SubnetsManager.prototype = new Manager(); return new SubnetsManager(); }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/tags.js0000644000000000000000000000263313056115004024425 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Tag Manager * * Manages all of the tags in the browser. The manager uses the * RegionConnection to load the tags, update the tags, and listen for * notification events about tags. */ angular.module('MAAS').factory( 'TagsManager', ['$q', '$rootScope', 'RegionConnection', 'Manager', function( $q, $rootScope, RegionConnection, Manager) { function TagsManager() { Manager.call(this); this._pk = "id"; this._handler = "tag"; // Listen for notify events for the tag object. var self = this; RegionConnection.registerNotifier("tag", function(action, data) { self.onNotify(action, data); }); } TagsManager.prototype = new Manager(); // Helper for autocomplete that will return a string of tags that // contain the query text. TagsManager.prototype.autocomplete = function(query) { var matching = []; angular.forEach(this._items, function(item) { if(item.name.indexOf(query) > -1) { matching.push(item.name); } }); return matching; }; return new TagsManager(); }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/tests/0000755000000000000000000000000013056115004024267 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/users.js0000644000000000000000000000657213056115004024636 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS User Manager * * Manages all of the users in the browser. The manager uses the * RegionConnection to load the users, update the users, and listen for * notification events about users. */ angular.module('MAAS').factory( 'UsersManager', ['RegionConnection', 'Manager', 'ErrorService', function(RegionConnection, Manager, ErrorService) { function UsersManager() { Manager.call(this); this._pk = "id"; this._handler = "user"; // Holds the authenticated user for the connection. this._authUser = null; // Listen for notify events for the user object. var self = this; RegionConnection.registerNotifier("user", function(action, data) { self.onNotify(action, data); }); } UsersManager.prototype = new Manager(); // Get the authenticated user for the connection. UsersManager.prototype.getAuthUser = function() { return this._authUser; }; // Return true if the authenticated user has uploaded at // least one SSH key. UsersManager.prototype.getSSHKeyCount = function() { authuser = this._authUser; if(!angular.isObject(authuser)) { return 0; } return authuser.sshkeys_count; }; // Load the authencticated user. UsersManager.prototype._loadAuthUser = function() { var self = this; return RegionConnection.callMethod("user.auth_user", {}).then( function(user) { if(angular.isObject(self._authUser)) { // Copy the user into the authUser. This keeps the // reference the same, not requiring another call to // getAuthUser. angular.copy(user, self._authUser); } else { self._authUser = user; } return self._authUser; }, function(error) { ErrorService.raiseError(error); }); }; UsersManager.prototype._replaceItem = function(item) { Manager.prototype._replaceItem.call(this, item); // Update the authenticated user if updated item has the // same primary key. if(angular.isObject(this._authUser) && this._authUser[this._pk] === item[this._pk]) { // Copy the item into the authUser. This keeps the reference // the same, not requiring another call to getAuthUser. angular.copy(item, this._authUser); } }; UsersManager.prototype.loadItems = function() { // Load the auth user when all the items are loaded as well. this._loadAuthUser(); return Manager.prototype.loadItems.call(this); }; UsersManager.prototype.reloadItems = function() { // Load the auth user when all the items are reloaded as well. this._loadAuthUser(); return Manager.prototype.reloadItems.call(this); }; return new UsersManager(); }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/vlans.js0000644000000000000000000000314613056115004024612 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS VLAN Manager * * Manages all of the VLANs in the browser. The manager uses the * RegionConnection to load the VLANs, update the VLANs, and listen for * notification events about VLANs. */ angular.module('MAAS').factory( 'VLANsManager', ['$q', '$rootScope', 'RegionConnection', 'Manager', 'SubnetsManager', function($q, $rootScope, RegionConnection, Manager, SubnetsManager) { function VLANsManager() { Manager.call(this); this._pk = "id"; this._handler = "vlan"; // Listen for notify events for the vlan object. var self = this; RegionConnection.registerNotifier("vlan", function(action, data) { self.onNotify(action, data); }); } VLANsManager.prototype = new Manager(); // Return the Subnet objects that are part of this VLAN. The returned // array is calculated on each call, you should not watch this array, // instead you should watch this function. VLANsManager.prototype.getSubnets = function(vlan) { var subnets = []; angular.forEach(vlan.subnet_ids, function(subnet_id) { var subnet = SubnetsManager.getItemFromList(subnet_id); if(angular.isObject(subnet)) { subnets.push(subnet); } }); return subnets; }; return new VLANsManager(); }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/zones.js0000644000000000000000000000176513056115004024632 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Zone Manager * * Manages all of the zones in the browser. The manager uses the * RegionConnection to load the zones, update the zones, and listen for * notification events about zones. */ angular.module('MAAS').factory( 'ZonesManager', ['$q', '$rootScope', 'RegionConnection', 'Manager', function( $q, $rootScope, RegionConnection, Manager) { function ZonesManager() { Manager.call(this); this._pk = "id"; this._handler = "zone"; // Listen for notify events for the zone object. var self = this; RegionConnection.registerNotifier("zone", function(action, data) { self.onNotify(action, data); }); } ZonesManager.prototype = new Manager(); return new ZonesManager(); }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/tests/test_clusters.js0000644000000000000000000000116213056115004027530 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for ClustersManager. */ describe("ClustersManager", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Load the ClustersManager. var ClustersManager; beforeEach(inject(function($injector) { ClustersManager = $injector.get("ClustersManager"); })); it("set requires attributes", function() { expect(ClustersManager._pk).toBe("id"); expect(ClustersManager._handler).toBe("cluster"); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/tests/test_devices.js0000644000000000000000000000603013056115004027305 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for DevicesManager. */ describe("DevicesManager", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Load the DevicesManager and RegionConnection factory. var DevicesManager, RegionConnection, webSocket; beforeEach(inject(function($injector) { DevicesManager = $injector.get("DevicesManager"); RegionConnection = $injector.get("RegionConnection"); // Mock buildSocket so an actual connection is not made. webSocket = new MockWebSocket(); spyOn(RegionConnection, "buildSocket").and.returnValue(webSocket); })); // Open the connection to the region before each test. beforeEach(function(done) { RegionConnection.registerHandler("open", function() { done(); }); RegionConnection.connect(""); }); // Make a random device. function makeDevice(selected) { var device = { system_id: makeName("system_id"), name: makeName("name"), owner: makeName("owner") }; if(angular.isDefined(selected)) { device.$selected = selected; } return device; } it("set requires attributes", function() { expect(DevicesManager._pk).toBe("system_id"); expect(DevicesManager._handler).toBe("device"); expect(Object.keys(DevicesManager._metadataAttributes)).toEqual( ["owner", "tags", "zone"]); }); describe("performAction", function() { it("calls device.action with system_id and action", function(done) { var device = makeDevice(); webSocket.returnData.push(makeFakeResponse("deleted")); DevicesManager.performAction(device, "delete").then(function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.method).toBe("device.action"); expect(sentObject.params.system_id).toBe(device.system_id); expect(sentObject.params.action).toBe("delete"); expect(sentObject.params.extra).toEqual({}); done(); }); }); it("calls device.action with extra", function(done) { var device = makeDevice(); var extra = { osystem: makeName("os") }; webSocket.returnData.push(makeFakeResponse("deployed")); DevicesManager.performAction(device, "deploy", extra).then( function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.method).toBe("device.action"); expect(sentObject.params.system_id).toBe(device.system_id); expect(sentObject.params.action).toBe("deploy"); expect(sentObject.params.extra).toEqual(extra); done(); }); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/tests/test_events.js0000644000000000000000000001725013056115004027175 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for EventsManagerFactory. */ describe("EventsManagerFactory", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Grab the needed angular pieces. var $q, $rootScope; beforeEach(inject(function($injector) { $q = $injector.get("$q"); $rootScope = $injector.get("$rootScope"); })); // Load the EventsManagerFactory and RegionConnection. var EventsManagerFactory, RegionConnection; beforeEach(inject(function($injector) { EventsManagerFactory = $injector.get("EventsManagerFactory"); RegionConnection = $injector.get("RegionConnection"); })); describe("_getManager", function() { it("returns null when no manager with nodeId exists", function() { expect(EventsManagerFactory._getManager(0)).toBeNull(); }); it("returns object from _managers with nodeId", function() { var nodeId = makeInteger(0, 100); var fakeManager = { _nodeId: nodeId }; EventsManagerFactory._managers.push(fakeManager); expect(EventsManagerFactory._getManager(nodeId)).toBe(fakeManager); }); }); describe("getManager", function() { it("returns new manager with nodeId doesnt exists", function() { var nodeId = makeInteger(0, 100); var manager = EventsManagerFactory.getManager(nodeId); expect(manager._nodeId).toBe(nodeId); expect(EventsManagerFactory._managers).toEqual([manager]); }); it("returns same manager with nodeId exists", function() { var nodeId = makeInteger(0, 100); var manager = EventsManagerFactory.getManager(nodeId); expect(EventsManagerFactory.getManager(nodeId)).toBe(manager); }); }); describe("destroyManager", function() { it("removes manager from _managers", function() { var nodeId = makeInteger(0, 100); var manager = EventsManagerFactory.getManager(nodeId); EventsManagerFactory.destroyManager(manager); expect(EventsManagerFactory._managers).toEqual([]); }); }); describe("onNotify", function() { it("sends delete notification to all managers", function() { var i, id = 0; var managers = []; for(i = 0; i < 3; i++) { var manager = EventsManagerFactory.getManager(id++); spyOn(manager, "onNotify"); managers.push(manager); } var deleteId = makeInteger(0, 100); EventsManagerFactory.onNotify("delete", deleteId); angular.forEach(managers, function(manager) { expect(manager.onNotify).toHaveBeenCalledWith( "delete", deleteId); }); }); it("sends create notification to manager with nodeId", function() { var i, id = 0; var otherManagers = []; for(i = 0; i < 3; i++) { var manager = EventsManagerFactory.getManager(id++); spyOn(manager, "onNotify"); otherManagers.push(manager); } var calledManager = EventsManagerFactory.getManager(id); spyOn(calledManager, "onNotify"); var evt = { node_id: id }; EventsManagerFactory.onNotify("create", evt); angular.forEach(otherManagers, function(manager) { expect(manager.onNotify).not.toHaveBeenCalled(); }); expect(calledManager.onNotify).toHaveBeenCalledWith("create", evt); }); it("sends update notification to manager with nodeId", function() { var i, id = 0; var otherManagers = []; for(i = 0; i < 3; i++) { var manager = EventsManagerFactory.getManager(id++); spyOn(manager, "onNotify"); otherManagers.push(manager); } var calledManager = EventsManagerFactory.getManager(id); spyOn(calledManager, "onNotify"); var evt = { node_id: id }; EventsManagerFactory.onNotify("update", evt); angular.forEach(otherManagers, function(manager) { expect(manager.onNotify).not.toHaveBeenCalled(); }); expect(calledManager.onNotify).toHaveBeenCalledWith("update", evt); }); }); describe("EventsManager", function() { var nodeId, eventManager; beforeEach(function() { nodeId = makeInteger(0, 100); eventManager = EventsManagerFactory.getManager(nodeId); }); it("sets required attributes", function() { expect(eventManager._pk).toBe("id"); expect(eventManager._handler).toBe("event"); expect(eventManager._nodeId).toBe(nodeId); expect(eventManager._handler).toBe("event"); expect(eventManager._factory).toBe(EventsManagerFactory); expect(eventManager._maxDays).toBe(30); }); describe("_initBatchLoadParameters", function() { it("returns parameters with node_id and max_days", function() { expect(eventManager._initBatchLoadParameters()).toEqual({ "node_id": nodeId, "max_days": 30 }); }); }); describe("destroy", function() { it("calls _factory.destroyManager", function() { spyOn(EventsManagerFactory, "destroyManager"); eventManager.destroy(); expect( EventsManagerFactory.destroyManager).toHaveBeenCalledWith( eventManager); }); it("calls event.clear on the RegionConnection if loaded", function() { spyOn(eventManager, "isLoaded").and.returnValue(true); spyOn(RegionConnection, "callMethod"); eventManager.destroy(); expect(RegionConnection.callMethod).toHaveBeenCalledWith( "event.clear", {"node_id": nodeId}); }); }); describe("getMaximumDays", function() { it("returns _maxDays", function() { var sentinel = {}; eventManager._maxDays = sentinel; expect(eventManager.getMaximumDays()).toBe(sentinel); }); }); describe("loadMaximumDays", function() { it("sets _maxDays and calls loadItems", function() { var maxDays = makeInteger(30, 90); spyOn(eventManager, "loadItems"); eventManager.loadMaximumDays(maxDays); expect(eventManager._maxDays).toBe(maxDays); expect(eventManager.loadItems).toHaveBeenCalled(); }); it("doesnt sets _maxDays until loadItems resolves", function() { var maxDays = makeInteger(31, 90); var defer = $q.defer(); spyOn(eventManager, "loadItems").and.returnValue( defer.promise); spyOn(eventManager, "isLoading").and.returnValue(true); eventManager.loadMaximumDays(maxDays); expect(eventManager._maxDays).toBe(30); defer.resolve(); $rootScope.$digest(); expect(eventManager._maxDays).toBe(maxDays); expect(eventManager.loadItems.calls.count()).toBe(2); }); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/tests/test_fabrics.js0000644000000000000000000000303013056115004027271 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for FabricsManager. */ describe("FabricsManager", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Load the FabricsManager. var FabricsManager, VLANsManager; beforeEach(inject(function($injector) { FabricsManager = $injector.get("FabricsManager"); VLANsManager = $injector.get("VLANsManager"); })); // Make a fake VLAN. function makeVLAN() { return { id: makeInteger(0, 5000), name: makeName("vlan") }; } it("set requires attributes", function() { expect(FabricsManager._pk).toBe("id"); expect(FabricsManager._handler).toBe("fabric"); }); describe("getVLANs", function() { it("returns VLAN objects", function() { var i, vlans = [], fabric_vlans = []; for(i = 0; i < 6; i++) { var vlan = makeVLAN(); vlans.push(vlan); if(i < 3) { fabric_vlans.push(vlan); } } var vlan_ids = []; angular.forEach(fabric_vlans, function(vlan) { vlan_ids.push(vlan.id); }); VLANsManager._items = vlans; var fabric = { "vlan_ids": vlan_ids }; expect(fabric_vlans).toEqual(FabricsManager.getVLANs(fabric)); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/tests/test_general.js0000644000000000000000000004710513056115004027310 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for GeneralManager. */ describe("GeneralManager", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Grab the needed angular pieces. var $rootScope, $timeout, $q; beforeEach(inject(function($injector) { $rootScope = $injector.get("$rootScope"); $timeout = $injector.get("$timeout"); $q = $injector.get("$q"); })); // Load the GeneralManager, RegionConnection, and ErrorService factory. var GeneralManager, RegionConnection, ErrorService, webSocket; beforeEach(inject(function($injector) { GeneralManager = $injector.get("GeneralManager"); RegionConnection = $injector.get("RegionConnection"); ErrorService = $injector.get("ErrorService"); // Mock buildSocket so an actual connection is not made. webSocket = new MockWebSocket(); spyOn(RegionConnection, "buildSocket").and.returnValue(webSocket); })); // Open the connection to the region before each test. beforeEach(function(done) { RegionConnection.registerHandler("open", function() { done(); }); RegionConnection.connect(""); }); it("sets timeout values", function() { expect(GeneralManager._pollTimeout).toBe(10000); expect(GeneralManager._pollErrorTimeout).toBe(3000); expect(GeneralManager._pollEmptyTimeout).toBe(3000); }); it("autoReload off by default", function() { expect(GeneralManager._autoReload).toBe(false); }); it("_data has expected keys", function() { expect(Object.keys(GeneralManager._data)).toEqual( ["node_actions", "device_actions", "architectures", "hwe_kernels", "default_min_hwe_kernel", "osinfo", "bond_options", "version"]); }); it("_data.node_actions has correct data", function() { var node_actions = GeneralManager._data.node_actions; expect(node_actions.method).toBe("general.node_actions"); expect(node_actions.data).toEqual([]); expect(node_actions.loaded).toBe(false); expect(node_actions.polling).toBe(false); expect(node_actions.nextPromise).toBeNull(); }); it("_data.device_actions has correct data", function() { var device_actions = GeneralManager._data.device_actions; expect(device_actions.method).toBe("general.device_actions"); expect(device_actions.data).toEqual([]); expect(device_actions.loaded).toBe(false); expect(device_actions.polling).toBe(false); expect(device_actions.nextPromise).toBeNull(); }); it("_data.architectures has correct data", function() { var architectures = GeneralManager._data.architectures; expect(architectures.method).toBe("general.architectures"); expect(architectures.data).toEqual([]); expect(architectures.loaded).toBe(false); expect(architectures.polling).toBe(false); expect(architectures.nextPromise).toBeNull(); }); it("_data.hwe_kernels has correct data", function() { var hwe_kernels = GeneralManager._data.hwe_kernels; expect(hwe_kernels.method).toBe("general.hwe_kernels"); expect(hwe_kernels.data).toEqual([]); expect(hwe_kernels.loaded).toBe(false); expect(hwe_kernels.polling).toBe(false); expect(hwe_kernels.nextPromise).toBeNull(); }); it("_data.default_min_hwe_kernels has correct data", function() { var default_min_hwe_kernel = GeneralManager._data.default_min_hwe_kernel; expect(default_min_hwe_kernel.method).toBe( "general.default_min_hwe_kernel"); expect(default_min_hwe_kernel.data).toEqual({text: ''}); expect(default_min_hwe_kernel.loaded).toBe(false); expect(default_min_hwe_kernel.polling).toBe(false); expect(default_min_hwe_kernel.nextPromise).toBeNull(); }); it("_data.osinfo has correct data", function() { var osinfo = GeneralManager._data.osinfo; expect(osinfo.method).toBe("general.osinfo"); expect(osinfo.data).toEqual({}); expect(osinfo.loaded).toBe(false); expect(osinfo.polling).toBe(false); expect(osinfo.nextPromise).toBeNull(); expect(angular.isFunction(osinfo.isEmpty)).toBe(true); expect(angular.isFunction(osinfo.replaceData)).toBe(true); }); it("_data.bond_options has correct data", function() { var bond_options = GeneralManager._data.bond_options; expect(bond_options.method).toBe("general.bond_options"); expect(bond_options.data).toEqual({}); expect(bond_options.loaded).toBe(false); expect(bond_options.polling).toBe(false); expect(bond_options.nextPromise).toBeNull(); expect(angular.isFunction(bond_options.replaceData)).toBe(true); }); it("_data.version has correct data", function() { var version = GeneralManager._data.version; expect(version.method).toBe("general.version"); expect(version.data).toEqual({ text: null }); expect(version.loaded).toBe(false); expect(version.polling).toBe(false); expect(version.nextPromise).toBeNull(); expect(angular.isFunction(version.replaceData)).toBe(true); }); describe("_getInternalData", function() { it("raises error for unknown data", function() { var name = makeName("name"); expect(function() { GeneralManager._getInternalData(name); }).toThrow(new Error("Unknown data: " + name)); }); it("returns data object", function() { expect(GeneralManager._getInternalData("node_actions")).toBe( GeneralManager._data.node_actions); }); }); describe("getData", function() { it("returns data from internal data", function() { expect(GeneralManager.getData("node_actions")).toBe( GeneralManager._data.node_actions.data); }); }); describe("isLoaded", function() { it("returns false if all false", function() { expect(GeneralManager.isLoaded()).toBe(false); }); it("returns false if one false", function() { GeneralManager._data.node_actions.loaded = true; GeneralManager._data.device_actions.loaded = true; GeneralManager._data.architectures.loaded = true; GeneralManager._data.hwe_kernels.loaded = true; GeneralManager._data.osinfo.loaded = true; GeneralManager._data.bond_options.loaded = true; GeneralManager._data.version.loaded = false; expect(GeneralManager.isLoaded()).toBe(false); }); it("returns true if all true", function() { GeneralManager._data.node_actions.loaded = true; GeneralManager._data.device_actions.loaded = true; GeneralManager._data.architectures.loaded = true; GeneralManager._data.hwe_kernels.loaded = true; GeneralManager._data.default_min_hwe_kernel.loaded = true; GeneralManager._data.osinfo.loaded = true; GeneralManager._data.bond_options.loaded = true; GeneralManager._data.version.loaded = true; expect(GeneralManager.isLoaded()).toBe(true); }); }); describe("isDataLoaded", function() { it("returns loaded from internal data", function() { var loaded = {}; GeneralManager._data.node_actions.loaded = loaded; expect(GeneralManager.isDataLoaded("node_actions")).toBe(loaded); }); }); describe("isPolling", function() { it("returns false if all false", function() { expect(GeneralManager.isPolling()).toBe(false); }); it("returns true if one true", function() { GeneralManager._data.node_actions.polling = true; GeneralManager._data.architectures.polling = false; GeneralManager._data.hwe_kernels.polling = false; GeneralManager._data.osinfo.polling = false; expect(GeneralManager.isPolling()).toBe(true); }); it("returns true if all true", function() { GeneralManager._data.node_actions.polling = true; GeneralManager._data.architectures.polling = true; GeneralManager._data.hwe_kernels.polling = true; GeneralManager._data.osinfo.polling = true; expect(GeneralManager.isPolling()).toBe(true); }); }); describe("isDataPolling", function() { it("returns polling from internal data", function() { var polling = {}; GeneralManager._data.node_actions.polling = polling; expect(GeneralManager.isDataPolling("node_actions")).toBe(polling); }); }); describe("startPolling", function() { it("sets polling to true and calls _poll", function() { spyOn(GeneralManager, "_poll"); GeneralManager.startPolling("node_actions"); expect(GeneralManager._data.node_actions.polling).toBe(true); expect(GeneralManager._poll).toHaveBeenCalledWith( GeneralManager._data.node_actions); }); it("does nothing if already polling", function() { spyOn(GeneralManager, "_poll"); GeneralManager._data.node_actions.polling = true; GeneralManager.startPolling("node_actions"); expect(GeneralManager._poll).not.toHaveBeenCalled(); }); }); describe("stopPolling", function() { it("sets polling to false and cancels promise", function() { spyOn($timeout, "cancel"); var nextPromise = {}; GeneralManager._data.node_actions.polling = true; GeneralManager._data.node_actions.nextPromise = nextPromise; GeneralManager.stopPolling("node_actions"); expect(GeneralManager._data.node_actions.polling).toBe(false); expect($timeout.cancel).toHaveBeenCalledWith(nextPromise); }); }); describe("_loadData", function() { it("calls callMethod with method", function() { spyOn(RegionConnection, "callMethod").and.returnValue( $q.defer().promise); GeneralManager._loadData(GeneralManager._data.node_actions); expect(RegionConnection.callMethod).toHaveBeenCalledWith( GeneralManager._data.node_actions.method); }); it("sets loaded to true", function() { var defer = $q.defer(); spyOn(RegionConnection, "callMethod").and.returnValue( defer.promise); GeneralManager._loadData(GeneralManager._data.node_actions); defer.resolve([]); $rootScope.$digest(); expect(GeneralManager._data.node_actions.loaded).toBe(true); }); it("sets node_actions data without changing reference", function() { var defer = $q.defer(); spyOn(RegionConnection, "callMethod").and.returnValue( defer.promise); var actionsData = GeneralManager._data.node_actions.data; var newData = [makeName("action")]; GeneralManager._loadData(GeneralManager._data.node_actions); defer.resolve(newData); $rootScope.$digest(); expect(GeneralManager._data.node_actions.data).toEqual(newData); expect(GeneralManager._data.node_actions.data).toBe(actionsData); }); it("sets osinfo data without changing reference", function() { var defer = $q.defer(); spyOn(RegionConnection, "callMethod").and.returnValue( defer.promise); var osinfoData = GeneralManager._data.osinfo.data; var newData = { data: makeName("action") }; GeneralManager._loadData(GeneralManager._data.osinfo); defer.resolve(newData); $rootScope.$digest(); expect(GeneralManager._data.osinfo.data).toEqual(newData); expect(GeneralManager._data.osinfo.data).toBe(osinfoData); }); it("calls raiseError if raiseError is true", function() { var defer = $q.defer(); spyOn(RegionConnection, "callMethod").and.returnValue( defer.promise); spyOn(ErrorService, "raiseError"); var error = makeName("error"); GeneralManager._loadData(GeneralManager._data.node_actions, true); defer.reject(error); $rootScope.$digest(); expect(ErrorService.raiseError).toHaveBeenCalledWith(error); }); it("doesnt calls raiseError if raiseError is false", function() { var defer = $q.defer(); spyOn(RegionConnection, "callMethod").and.returnValue( defer.promise); spyOn(ErrorService, "raiseError"); var error = makeName("error"); GeneralManager._loadData(GeneralManager._data.node_actions, false); defer.reject(error); $rootScope.$digest(); expect(ErrorService.raiseError).not.toHaveBeenCalled(); }); it("doesnt calls raiseError if raiseError is undefined", function() { var defer = $q.defer(); spyOn(RegionConnection, "callMethod").and.returnValue( defer.promise); spyOn(ErrorService, "raiseError"); var error = makeName("error"); GeneralManager._loadData(GeneralManager._data.node_actions); defer.reject(error); $rootScope.$digest(); expect(ErrorService.raiseError).not.toHaveBeenCalled(); }); }); describe("_pollAgain", function() { it("sets nextPromise on data", function() { GeneralManager._pollAgain(GeneralManager._data.node_actions); expect( GeneralManager._data.node_actions.nextPromise).not.toBeNull(); }); }); describe("_poll", function() { it("calls _pollAgain with error timeout if not connected", function() { spyOn(RegionConnection, "isConnected").and.returnValue(false); spyOn(GeneralManager, "_pollAgain"); GeneralManager._poll(GeneralManager._data.node_actions); expect(GeneralManager._pollAgain).toHaveBeenCalledWith( GeneralManager._data.node_actions, GeneralManager._pollErrorTimeout); }); it("calls _loadData with raiseError false", function() { spyOn(GeneralManager, "_loadData").and.returnValue( $q.defer().promise); GeneralManager._poll(GeneralManager._data.node_actions); expect(GeneralManager._loadData).toHaveBeenCalledWith( GeneralManager._data.node_actions, false); }); it("calls _pollAgain with empty timeout for node_actions", function() { var defer = $q.defer(); spyOn(GeneralManager, "_pollAgain"); spyOn(GeneralManager, "_loadData").and.returnValue(defer.promise); GeneralManager._poll(GeneralManager._data.node_actions); defer.resolve([]); $rootScope.$digest(); expect(GeneralManager._pollAgain).toHaveBeenCalledWith( GeneralManager._data.node_actions, GeneralManager._pollEmptyTimeout); }); it("calls _pollAgain with empty timeout for osinfo", function() { var defer = $q.defer(); spyOn(GeneralManager, "_pollAgain"); spyOn(GeneralManager, "_loadData").and.returnValue(defer.promise); GeneralManager._poll(GeneralManager._data.osinfo); defer.resolve({}); $rootScope.$digest(); expect(GeneralManager._pollAgain).toHaveBeenCalledWith( GeneralManager._data.osinfo, GeneralManager._pollEmptyTimeout); }); it("calls _pollAgain with timeout for node_actions", function() { var defer = $q.defer(); spyOn(GeneralManager, "_pollAgain"); spyOn(GeneralManager, "_loadData").and.returnValue(defer.promise); var node_actions = [makeName("action")]; GeneralManager._data.node_actions.data = node_actions; GeneralManager._poll(GeneralManager._data.node_actions); defer.resolve(node_actions); $rootScope.$digest(); expect(GeneralManager._pollAgain).toHaveBeenCalledWith( GeneralManager._data.node_actions, GeneralManager._pollTimeout); }); it("calls _pollAgain with error timeout on reject", function() { var defer = $q.defer(); spyOn(GeneralManager, "_pollAgain"); spyOn(GeneralManager, "_loadData").and.returnValue(defer.promise); var error = makeName("error"); spyOn(console, "log"); GeneralManager._poll(GeneralManager._data.node_actions); defer.reject(error); $rootScope.$digest(); expect(console.log).toHaveBeenCalledWith(error); expect(GeneralManager._pollAgain).toHaveBeenCalledWith( GeneralManager._data.node_actions, GeneralManager._pollErrorTimeout); }); }); describe("loadItems", function() { it("calls _loadData for all data", function() { spyOn(GeneralManager, "_loadData").and.returnValue( $q.defer().promise); GeneralManager.loadItems(); expect(GeneralManager._loadData.calls.count()).toBe(8); }); it("resolve defer once all resolve", function(done) { var defers = [ $q.defer(), $q.defer(), $q.defer(), $q.defer(), $q.defer(), $q.defer(), $q.defer(), $q.defer() ]; var i = 0; spyOn(GeneralManager, "_loadData").and.callFake(function() { return defers[i++].promise; }); GeneralManager.loadItems().then(function() { done(); }); angular.forEach(defers, function(defer) { defer.resolve(); $rootScope.$digest(); }); }); }); describe("enableAutoReload", function() { it("does nothing if already enabled", function() { spyOn(RegionConnection, "registerHandler"); GeneralManager._autoReload = true; GeneralManager.enableAutoReload(); expect(RegionConnection.registerHandler).not.toHaveBeenCalled(); }); it("adds handler and sets autoReload to true", function() { spyOn(RegionConnection, "registerHandler"); GeneralManager.enableAutoReload(); expect(RegionConnection.registerHandler).toHaveBeenCalled(); expect(GeneralManager._autoReload).toBe(true); }); }); describe("disableAutoReload", function() { it("does nothing if already disabled", function() { spyOn(RegionConnection, "unregisterHandler"); GeneralManager._autoReload = false; GeneralManager.disableAutoReload(); expect(RegionConnection.unregisterHandler).not.toHaveBeenCalled(); }); it("removes handler and sets autoReload to false", function() { spyOn(RegionConnection, "unregisterHandler"); GeneralManager._autoReload = true; GeneralManager.disableAutoReload(); expect(RegionConnection.unregisterHandler).toHaveBeenCalled(); expect(GeneralManager._autoReload).toBe(false); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/tests/test_nodes.js0000644000000000000000000010111113056115004026767 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for NodesManager. */ describe("NodesManager", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Load the NodesManager and RegionConnection factory. var NodesManager, RegionConnection, webSocket; beforeEach(inject(function($injector) { NodesManager = $injector.get("NodesManager"); RegionConnection = $injector.get("RegionConnection"); // Mock buildSocket so an actual connection is not made. webSocket = new MockWebSocket(); spyOn(RegionConnection, "buildSocket").and.returnValue(webSocket); })); // Open the connection to the region before each test. beforeEach(function(done) { RegionConnection.registerHandler("open", function() { done(); }); RegionConnection.connect(""); }); // Make a random node. function makeNode(selected) { var node = { system_id: makeName("system_id"), name: makeName("name"), status: makeName("status"), owner: makeName("owner") }; if(angular.isDefined(selected)) { node.$selected = selected; } return node; } it("set requires attributes", function() { expect(NodesManager._pk).toBe("system_id"); expect(NodesManager._handler).toBe("node"); expect(Object.keys(NodesManager._metadataAttributes)).toEqual( ["status", "owner", "tags", "zone", "subnets", "fabrics", "spaces", "storage_tags"]); }); describe("create", function() { it("calls node.create with node", function(done) { var node = makeNode(); webSocket.returnData.push(makeFakeResponse(node)); NodesManager.create(node).then(function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.method).toBe("node.create"); expect(sentObject.params).toEqual(node); done(); }); }); }); describe("performAction", function() { it("calls node.action with system_id and action", function(done) { var node = makeNode(); webSocket.returnData.push(makeFakeResponse("deleted")); NodesManager.performAction(node, "delete").then(function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.method).toBe("node.action"); expect(sentObject.params.system_id).toBe(node.system_id); expect(sentObject.params.action).toBe("delete"); expect(sentObject.params.extra).toEqual({}); done(); }); }); it("calls node.action with extra", function(done) { var node = makeNode(); var extra = { osystem: makeName("os") }; webSocket.returnData.push(makeFakeResponse("deployed")); NodesManager.performAction(node, "deploy", extra).then(function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.method).toBe("node.action"); expect(sentObject.params.system_id).toBe(node.system_id); expect(sentObject.params.action).toBe("deploy"); expect(sentObject.params.extra).toEqual(extra); done(); }); }); }); describe("checkPowerState", function() { it("calls node.check_power with system_id", function(done) { var node = makeNode(); webSocket.returnData.push(makeFakeResponse("on")); NodesManager.checkPowerState(node).then(function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.method).toBe("node.check_power"); expect(sentObject.params.system_id).toBe(node.system_id); done(); }); }); it("sets power_state to results", function(done) { var node = makeNode(); var power_state = makeName("state"); webSocket.returnData.push(makeFakeResponse(power_state)); NodesManager.checkPowerState(node).then(function(state) { expect(node.power_state).toBe(power_state); expect(state).toBe(power_state); done(); }); }); it("sets power_state to error on error and logs error", function(done) { var node = makeNode(); var error = makeName("error"); spyOn(console, "log"); webSocket.returnData.push(makeFakeResponse(error, true)); NodesManager.checkPowerState(node).then(function(state) { expect(node.power_state).toBe("error"); expect(state).toBe("error"); expect(console.log).toHaveBeenCalledWith(error); done(); }); }); }); describe("createPhysicalInterface", function() { it("calls node.create_physical with system_id without params", function(done) { var node = makeNode(); webSocket.returnData.push(makeFakeResponse("created")); NodesManager.createPhysicalInterface(node).then( function() { var sentObject = angular.fromJson( webSocket.sentData[0]); expect(sentObject.method).toBe( "node.create_physical"); expect(sentObject.params.system_id).toBe( node.system_id); done(); }); }); it("calls node.create_physical with params", function(done) { var node = makeNode(); var params = { vlan: makeInteger(0, 100) }; webSocket.returnData.push(makeFakeResponse("created")); NodesManager.createPhysicalInterface(node, params).then( function() { var sentObject = angular.fromJson( webSocket.sentData[0]); expect(sentObject.method).toBe( "node.create_physical"); expect(sentObject.params.system_id).toBe( node.system_id); expect(sentObject.params.vlan).toBe(params.vlan); done(); }); }); }); describe("createVLANInterface", function() { it("calls node.create_vlan with system_id without params", function(done) { var node = makeNode(); webSocket.returnData.push(makeFakeResponse("created")); NodesManager.createVLANInterface(node).then( function() { var sentObject = angular.fromJson( webSocket.sentData[0]); expect(sentObject.method).toBe( "node.create_vlan"); expect(sentObject.params.system_id).toBe( node.system_id); done(); }); }); it("calls node.create_vlan with params", function(done) { var node = makeNode(); var params = { vlan: makeInteger(0, 100) }; webSocket.returnData.push(makeFakeResponse("created")); NodesManager.createVLANInterface(node, params).then( function() { var sentObject = angular.fromJson( webSocket.sentData[0]); expect(sentObject.method).toBe( "node.create_vlan"); expect(sentObject.params.system_id).toBe( node.system_id); expect(sentObject.params.vlan).toBe(params.vlan); done(); }); }); }); describe("createBondInterface", function() { it("calls node.create_bond with system_id without params", function(done) { var node = makeNode(); webSocket.returnData.push(makeFakeResponse("created")); NodesManager.createBondInterface(node).then( function() { var sentObject = angular.fromJson( webSocket.sentData[0]); expect(sentObject.method).toBe( "node.create_bond"); expect(sentObject.params.system_id).toBe( node.system_id); done(); }); }); it("calls node.create_bond with params", function(done) { var node = makeNode(); var params = { vlan: makeInteger(0, 100) }; webSocket.returnData.push(makeFakeResponse("created")); NodesManager.createBondInterface(node, params).then( function() { var sentObject = angular.fromJson( webSocket.sentData[0]); expect(sentObject.method).toBe( "node.create_bond"); expect(sentObject.params.system_id).toBe( node.system_id); expect(sentObject.params.vlan).toBe(params.vlan); done(); }); }); }); describe("updateInterface", function() { it("calls node.update_interface with system_id and interface_id", function(done) { var node = makeNode(), interface_id = makeInteger(0, 100); webSocket.returnData.push(makeFakeResponse("updated")); NodesManager.updateInterface(node, interface_id).then( function() { var sentObject = angular.fromJson( webSocket.sentData[0]); expect(sentObject.method).toBe( "node.update_interface"); expect(sentObject.params.system_id).toBe( node.system_id); expect(sentObject.params.interface_id).toBe( interface_id); done(); }); }); it("calls node.update_interface with params", function(done) { var node = makeNode(), interface_id = makeInteger(0, 100); var params = { name: makeName("eth0") }; webSocket.returnData.push(makeFakeResponse("updated")); NodesManager.updateInterface(node, interface_id, params).then( function() { var sentObject = angular.fromJson( webSocket.sentData[0]); expect(sentObject.method).toBe( "node.update_interface"); expect(sentObject.params.system_id).toBe( node.system_id); expect(sentObject.params.interface_id).toBe( interface_id); expect(sentObject.params.name).toBe(params.name); done(); }); }); }); describe("deleteInterface", function() { it("calls node.delete_interface with correct params", function(done) { var node = makeNode(), interface_id = makeInteger(0, 100); webSocket.returnData.push(makeFakeResponse("deleted")); NodesManager.deleteInterface(node, interface_id).then( function() { var sentObject = angular.fromJson( webSocket.sentData[0]); expect(sentObject.method).toBe( "node.delete_interface"); expect(sentObject.params.system_id).toBe( node.system_id); expect(sentObject.params.interface_id).toBe( interface_id); done(); }); }); }); describe("linkSubnet", function() { it("calls node.link_subnet with system_id and interface_id", function(done) { var node = makeNode(), interface_id = makeInteger(0, 100); webSocket.returnData.push(makeFakeResponse("updated")); NodesManager.linkSubnet(node, interface_id).then( function() { var sentObject = angular.fromJson( webSocket.sentData[0]); expect(sentObject.method).toBe( "node.link_subnet"); expect(sentObject.params.system_id).toBe( node.system_id); expect(sentObject.params.interface_id).toBe( interface_id); done(); }); }); it("calls node.link_subnet with params", function(done) { var node = makeNode(), interface_id = makeInteger(0, 100); var params = { name: makeName("eth0") }; webSocket.returnData.push(makeFakeResponse("updated")); NodesManager.linkSubnet(node, interface_id, params).then( function() { var sentObject = angular.fromJson( webSocket.sentData[0]); expect(sentObject.method).toBe( "node.link_subnet"); expect(sentObject.params.system_id).toBe( node.system_id); expect(sentObject.params.interface_id).toBe( interface_id); expect(sentObject.params.name).toBe(params.name); done(); }); }); }); describe("unlinkSubnet", function() { it("calls node.unlink_subnet with correct params", function(done) { var node = makeNode(), interface_id = makeInteger(0, 100); var link_id = makeInteger(0, 100); webSocket.returnData.push(makeFakeResponse("updated")); NodesManager.unlinkSubnet(node, interface_id, link_id).then( function() { var sentObject = angular.fromJson( webSocket.sentData[0]); expect(sentObject.method).toBe( "node.unlink_subnet"); expect(sentObject.params.system_id).toBe( node.system_id); expect(sentObject.params.interface_id).toBe( interface_id); expect(sentObject.params.link_id).toBe( link_id); done(); }); }); }); describe("updateFilesystem", function() { it("calls node.update_filesystem", function(done) { var fakeNode = makeNode(); webSocket.returnData.push(makeFakeResponse(null)); NodesManager.updateFilesystem( fakeNode.system_id, makeName("block_id"), makeName("partition_id"), makeName("fstype"), makeName("mount_point")).then(function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.method).toBe("node.update_filesystem"); done(); }); }); it("calls node.update_filesystem with params", function(done) { var fakeNode = makeNode(); var block_id = makeName("block_id"); var partition_id = makeName("partition_id"); var fstype = makeName("fstype"); var mount_point = makeName("mount_point"); webSocket.returnData.push(makeFakeResponse(null)); NodesManager.updateFilesystem( fakeNode, block_id, partition_id, fstype, mount_point).then( function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.method).toBe("node.update_filesystem"); expect(sentObject.params.system_id).toBe(fakeNode.system_id); expect(sentObject.params.block_id).toBe(block_id); expect(sentObject.params.partition_id).toBe(partition_id); expect(sentObject.params.fstype).toBe(fstype); expect(sentObject.params.mount_point).toBe(mount_point); done(); }); }); }); describe("updateDiskTags", function() { it("calls node.update_disk_tags", function(done) { var fakeNode = makeNode(); webSocket.returnData.push(makeFakeResponse(null)); NodesManager.updateDiskTags( fakeNode, makeName("block_id"), [ makeName("tag") ]).then(function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.method).toBe("node.update_disk_tags"); done(); }); }); it("calls node.update_disk_tags with params", function(done) { var fakeNode = makeNode(); var block_id = makeName("block_id"); var tags = [ makeName("tag") ]; webSocket.returnData.push(makeFakeResponse(null)); NodesManager.updateDiskTags( fakeNode, block_id, tags).then( function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.method).toBe("node.update_disk_tags"); expect(sentObject.params.system_id).toBe(fakeNode.system_id); expect(sentObject.params.block_id).toBe(block_id); expect(sentObject.params.tags[0]).toBe(tags[0]); done(); }); }); }); describe("deleteDisk", function() { it("calls node.delete_disk with correct params", function(done) { var node = makeNode(), block_id = makeInteger(0, 100); webSocket.returnData.push(makeFakeResponse("deleted")); NodesManager.deleteDisk(node, block_id).then( function() { var sentObject = angular.fromJson( webSocket.sentData[0]); expect(sentObject.method).toBe( "node.delete_disk"); expect(sentObject.params.system_id).toBe( node.system_id); expect(sentObject.params.block_id).toBe( block_id); done(); }); }); }); describe("deletePartition", function() { it("calls node.delete_partition with correct params", function(done) { var node = makeNode(), partition_id = makeInteger(0, 100); webSocket.returnData.push(makeFakeResponse("deleted")); NodesManager.deletePartition(node, partition_id).then( function() { var sentObject = angular.fromJson( webSocket.sentData[0]); expect(sentObject.method).toBe( "node.delete_partition"); expect(sentObject.params.system_id).toBe( node.system_id); expect(sentObject.params.partition_id).toBe( partition_id); done(); }); }); }); describe("deleteVolumeGroup", function() { it("calls node.delete_volume_group with correct params", function(done) { var node = makeNode(), volume_group_id = makeInteger(0, 100); webSocket.returnData.push(makeFakeResponse("deleted")); NodesManager.deleteVolumeGroup(node, volume_group_id).then( function() { var sentObject = angular.fromJson( webSocket.sentData[0]); expect(sentObject.method).toBe( "node.delete_volume_group"); expect(sentObject.params.system_id).toBe( node.system_id); expect(sentObject.params.volume_group_id).toBe( volume_group_id); done(); }); }); }); describe("deleteCacheSet", function() { it("calls node.delete_cache_set with correct params", function(done) { var node = makeNode(), cache_set_id = makeInteger(0, 100); webSocket.returnData.push(makeFakeResponse("deleted")); NodesManager.deleteCacheSet(node, cache_set_id).then( function() { var sentObject = angular.fromJson( webSocket.sentData[0]); expect(sentObject.method).toBe( "node.delete_cache_set"); expect(sentObject.params.system_id).toBe( node.system_id); expect(sentObject.params.cache_set_id).toBe( cache_set_id); done(); }); }); }); describe("createPartition", function() { it("calls node.create_partition with correct params", function(done) { var node = makeNode(), block_id = makeInteger(0, 100); var size = makeInteger(1024 * 1024, 1024 * 1024 * 1024); webSocket.returnData.push(makeFakeResponse("deleted")); NodesManager.createPartition(node, block_id, size).then( function() { var sentObject = angular.fromJson( webSocket.sentData[0]); expect(sentObject.method).toBe( "node.create_partition"); expect(sentObject.params.system_id).toBe( node.system_id); expect(sentObject.params.block_id).toBe( block_id); expect(sentObject.params.partition_size).toBe( size); done(); }); }); it("calls node.create_partition with extra params", function(done) { var params = { fstype: "ext4" }; var node = makeNode(), block_id = makeInteger(0, 100); var size = makeInteger(1024 * 1024, 1024 * 1024 * 1024); webSocket.returnData.push(makeFakeResponse("deleted")); NodesManager.createPartition(node, block_id, size, params).then( function() { var sentObject = angular.fromJson( webSocket.sentData[0]); expect(sentObject.method).toBe( "node.create_partition"); expect(sentObject.params.system_id).toBe( node.system_id); expect(sentObject.params.block_id).toBe( block_id); expect(sentObject.params.partition_size).toBe( size); expect(sentObject.params.fstype).toBe("ext4"); done(); }); }); }); describe("createCacheSet", function() { it("calls node.create_cache_set", function(done) { var fakeNode = makeNode(); webSocket.returnData.push(makeFakeResponse(null)); NodesManager.createCacheSet( fakeNode, "", "").then(function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.method).toBe("node.create_cache_set"); done(); }); }); it("calls node.create_cache_set with params", function(done) { var fakeNode = makeNode(); var block_id = makeName("block_id"); var partition_id = makeName("block_id"); webSocket.returnData.push(makeFakeResponse(null)); NodesManager.createCacheSet( fakeNode, block_id, partition_id).then( function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.method).toBe("node.create_cache_set"); expect(sentObject.params.system_id).toBe(fakeNode.system_id); expect(sentObject.params.block_id).toBe(block_id); expect(sentObject.params.partition_id).toBe(partition_id); done(); }); }); }); describe("createBcache", function() { it("calls node.create_bcache", function(done) { var fakeNode = makeNode(); webSocket.returnData.push(makeFakeResponse(null)); NodesManager.createBcache( fakeNode, {}).then(function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.method).toBe("node.create_bcache"); done(); }); }); it("calls node.create_bcache with params", function(done) { var fakeNode = makeNode(); var params = { block_id: makeName("block_id"), partition_id: makeName("block_id") }; webSocket.returnData.push(makeFakeResponse(null)); NodesManager.createBcache( fakeNode, params).then( function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.method).toBe("node.create_bcache"); expect(sentObject.params.system_id).toBe(fakeNode.system_id); expect(sentObject.params.block_id).toBe(params.block_id); expect(sentObject.params.partition_id).toBe( params.partition_id); done(); }); }); }); describe("createRAID", function() { it("calls node.create_raid", function(done) { var fakeNode = makeNode(); webSocket.returnData.push(makeFakeResponse(null)); NodesManager.createRAID( fakeNode, {}).then(function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.method).toBe("node.create_raid"); done(); }); }); it("calls node.create_raid with params", function(done) { var fakeNode = makeNode(); var params = { block_id: makeName("block_id"), partition_id: makeName("block_id") }; webSocket.returnData.push(makeFakeResponse(null)); NodesManager.createRAID( fakeNode, params).then( function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.method).toBe("node.create_raid"); expect(sentObject.params.system_id).toBe(fakeNode.system_id); expect(sentObject.params.block_id).toBe(params.block_id); expect(sentObject.params.partition_id).toBe( params.partition_id); done(); }); }); }); describe("createVolumeGroup", function() { it("calls node.create_volume_group", function(done) { var fakeNode = makeNode(); webSocket.returnData.push(makeFakeResponse(null)); NodesManager.createVolumeGroup( fakeNode, {}).then(function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.method).toBe("node.create_volume_group"); done(); }); }); it("calls node.create_volume_group with params", function(done) { var fakeNode = makeNode(); var params = { block_id: makeName("block_id"), partition_id: makeName("block_id") }; webSocket.returnData.push(makeFakeResponse(null)); NodesManager.createVolumeGroup( fakeNode, params).then( function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.method).toBe("node.create_volume_group"); expect(sentObject.params.system_id).toBe(fakeNode.system_id); expect(sentObject.params.block_id).toBe(params.block_id); expect(sentObject.params.partition_id).toBe( params.partition_id); done(); }); }); }); describe("createLogicalVolume", function() { it("calls node.create_logical_volume", function(done) { var fakeNode = makeNode(); var volume_group_id = makeInteger(0, 100); var name = makeName("lv"); var size = makeInteger(1000 * 1000, 1000 * 1000 * 1000); webSocket.returnData.push(makeFakeResponse(null)); NodesManager.createLogicalVolume( fakeNode, volume_group_id, name, size).then( function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.method).toBe("node.create_logical_volume"); expect(sentObject.params.system_id).toBe(fakeNode.system_id); expect(sentObject.params.volume_group_id).toBe( volume_group_id); expect(sentObject.params.name).toBe(name); expect(sentObject.params.size).toBe(size); done(); }); }); it("calls node.create_logical_volume with extra", function(done) { var fakeNode = makeNode(); var volume_group_id = makeInteger(0, 100); var name = makeName("lv"); var size = makeInteger(1000 * 1000, 1000 * 1000 * 1000); var extra = { fstype: "ext4" }; webSocket.returnData.push(makeFakeResponse(null)); NodesManager.createLogicalVolume( fakeNode, volume_group_id, name, size, extra).then( function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.method).toBe("node.create_logical_volume"); expect(sentObject.params.system_id).toBe(fakeNode.system_id); expect(sentObject.params.volume_group_id).toBe( volume_group_id); expect(sentObject.params.name).toBe(name); expect(sentObject.params.size).toBe(size); expect(sentObject.params.fstype).toBe("ext4"); done(); }); }); }); describe("setBootDisk", function() { it("calls node.set_boot_disk", function(done) { var fakeNode = makeNode(); var block_id = makeInteger(0, 100); webSocket.returnData.push(makeFakeResponse(null)); NodesManager.setBootDisk( fakeNode, block_id).then( function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.method).toBe("node.set_boot_disk"); expect(sentObject.params.system_id).toBe(fakeNode.system_id); expect(sentObject.params.block_id).toBe( block_id); done(); }); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/tests/test_region.js0000644000000000000000000004702313056115004027155 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for RegionConnection. */ describe("RegionConnection", function() { // Load the MAAS module to test. beforeEach(module("MAAS")); // Grab the needed angular pieces. var $timeout, $rootScope, $q, $cookies, $window; beforeEach(inject(function($injector) { $timeout = $injector.get("$timeout"); $rootScope = $injector.get("$rootScope"); $q = $injector.get("$q"); $cookies = $injector.get("$cookies"); $window = $injector.get("$window"); })); // Load the RegionConnection factory. var RegionConnection, webSocket; beforeEach(inject(function($injector) { RegionConnection = $injector.get("RegionConnection"); // Mock buildSocket so an actual connection is not made. webSocket = new MockWebSocket(); spyOn(RegionConnection, "buildSocket").and.returnValue(webSocket); })); describe("newRequestId", function() { it("starts at 1", function() { expect(RegionConnection.newRequestId()).toBe(1); }); it("increments by 1", function() { expect(RegionConnection.newRequestId()).toBe(1); expect(RegionConnection.newRequestId()).toBe(2); expect(RegionConnection.newRequestId()).toBe(3); }); }); describe("registerHandler", function() { var testHandler1, testHandler2; beforeEach(function() { testHandler1 = function() {}; testHandler2 = function() {}; }); it("throws error on unknown handler", function() { expect(function() { RegionConnection.registerHandler("unknown", function() {}); }).toThrow(new Error("Invalid handler: unknown")); }); it("throws error non-functions", function() { expect(function() { RegionConnection.registerHandler("open", {}); }).toThrow(new Error( "Requires a function to register a handler.")); }); it("registers open handlers", function() { RegionConnection.registerHandler("open", testHandler1); RegionConnection.registerHandler("open", testHandler2); expect(RegionConnection.handlers.open).toEqual( [testHandler1, testHandler2]); }); it("registers error handlers", function() { RegionConnection.registerHandler("error", testHandler1); RegionConnection.registerHandler("error", testHandler2); expect(RegionConnection.handlers.error).toEqual( [testHandler1, testHandler2]); }); it("registers close handlers", function() { RegionConnection.registerHandler("close", testHandler1); RegionConnection.registerHandler("close", testHandler2); expect(RegionConnection.handlers.close).toEqual( [testHandler1, testHandler2]); }); }); describe("unregisterHandler", function() { var testHandler1, testHandler2; beforeEach(function() { testHandler1 = function() {}; testHandler2 = function() {}; }); it("throws error on unknown handler", function() { expect(function() { RegionConnection.unregisterHandler("unknown", function() {}); }).toThrow(new Error("Invalid handler: unknown")); }); it("ignores unregistered handler", function() { RegionConnection.registerHandler("open", testHandler1); RegionConnection.unregisterHandler("open", testHandler2); expect(RegionConnection.handlers.open).toEqual( [testHandler1]); }); it("unregisters open handler", function() { RegionConnection.registerHandler("open", testHandler1); RegionConnection.unregisterHandler("open", testHandler1); expect(RegionConnection.handlers.open.length).toBe(0); }); it("unregisters error handler", function() { RegionConnection.registerHandler("error", testHandler1); RegionConnection.unregisterHandler("error", testHandler1); expect(RegionConnection.handlers.error.length).toBe(0); }); it("unregisters close handler", function() { RegionConnection.registerHandler("close", testHandler1); RegionConnection.unregisterHandler("close", testHandler1); expect(RegionConnection.handlers.close.length).toBe(0); }); }); describe("registerNotifier", function() { it("throws error non-functions", function() { expect(function() { RegionConnection.registerNotifier("testing", {}); }).toThrow(new Error( "Requires a function to register a notifier.")); }); it("adds handler", function() { var handler = function() {}; RegionConnection.registerNotifier("testing", handler); expect(RegionConnection.notifiers.testing).toEqual([handler]); }); it("adds multiple handlers", function() { var handler1 = function() {}; var handler2 = function() {}; RegionConnection.registerNotifier("testing", handler1); RegionConnection.registerNotifier("testing", handler2); expect(RegionConnection.notifiers.testing).toEqual( [handler1, handler2]); }); }); describe("unregisterNotifier", function() { it("removes handler", function() { var handler = function() {}; RegionConnection.registerNotifier("testing", handler); RegionConnection.unregisterNotifier("testing", handler); expect(RegionConnection.notifiers.testing.length).toBe(0); }); it("removes only one handler", function() { var handler1 = function() {}; var handler2 = function() {}; RegionConnection.registerNotifier("testing", handler1); RegionConnection.registerNotifier("testing", handler2); RegionConnection.unregisterNotifier("testing", handler1); expect(RegionConnection.notifiers.testing).toEqual( [handler2]); }); it("does nothing if notification name never registered", function() { RegionConnection.unregisterNotifier("testing", {}); expect(RegionConnection.notifiers.testing).toBeUndefined(); }); it("does nothing if handler never registered", function() { var handler1 = function() {}; var handler2 = function() {}; RegionConnection.registerNotifier("testing", handler1); RegionConnection.unregisterNotifier("testing", handler2); expect(RegionConnection.notifiers.testing).toEqual( [handler1]); }); }); describe("isConnected", function() { it("returns true", function() { RegionConnection.connected = true; expect(RegionConnection.isConnected()).toBe(true); }); it("returns false", function() { RegionConnection.connected = false; expect(RegionConnection.isConnected()).toBe(false); }); }); describe("connect", function() { var url = "http://test-url", buildUrlSpy; beforeEach(function() { buildUrlSpy = spyOn(RegionConnection, "_buildUrl"); buildUrlSpy.and.returnValue(url); }); it("sets url", function() { RegionConnection.connect(); expect(RegionConnection.url).toBe(url); }); it("sets autoReconnect to true", function() { RegionConnection.autoReconnect = false; RegionConnection.connect(); expect(RegionConnection.autoReconnect).toBe(true); }); it("calls buildSocket with url", function() { RegionConnection.connect(); expect(RegionConnection.buildSocket).toHaveBeenCalledWith(url); }); it("sets websocket handlers", function() { RegionConnection.connect(); expect(webSocket.onopen).not.toBeNull(); expect(webSocket.onerror).not.toBeNull(); expect(webSocket.onclose).not.toBeNull(); }); it("sets connect to true when onopen called", function() { RegionConnection.connect(); webSocket.onopen({}); expect(RegionConnection.connected).toBe(true); }); it("calls error handler when onerror called", function(done) { var evt_obj = {}; RegionConnection.registerHandler("error", function(evt) { expect(evt).toBe(evt_obj); done(); }); RegionConnection.connect(); webSocket.onerror(evt_obj); }); it("sets connect to false when onclose called", function() { RegionConnection.autoReconnect = false; RegionConnection.connect(); webSocket.onclose({}); expect(RegionConnection.connected).toBe(false); }); it("calls close handler when onclose called", function(done) { var evt_obj = {}; RegionConnection.autoReconnect = false; RegionConnection.connect(); RegionConnection.registerHandler("close", function(evt) { expect(evt).toBe(evt_obj); done(); }); webSocket.onclose(evt_obj); }); it("onclose calls connect when autoReconnect is true", function() { RegionConnection.connect(); var new_url = "http://new-url"; buildUrlSpy.and.returnValue(new_url); webSocket.onclose({}); $timeout.flush(); expect(RegionConnection.url).toBe(new_url); }); it("onclose sets error", function() { RegionConnection.connect(); webSocket.onclose(); $timeout.flush(); expect(RegionConnection.error).toBe( "Unable to connect to: " + url); }); it("calls onMessage when onmessage called", function() { var sampleData = { sample: "data" }; spyOn(RegionConnection, "onMessage"); RegionConnection.connect(); webSocket.onmessage({ data: angular.toJson(sampleData) }); expect(RegionConnection.onMessage).toHaveBeenCalledWith( sampleData); }); }); describe("close", function() { beforeEach(function() { spyOn(webSocket, "close"); }); it("sets autoReconnect to false", function() { RegionConnection.connect(""); RegionConnection.close(); expect(RegionConnection.autoReconnect).toBe(false); }); it("calls close on websocket", function() { RegionConnection.connect(""); RegionConnection.close(); expect(webSocket.close).toHaveBeenCalled(); }); it("sets websocket to null", function() { RegionConnection.connect(""); RegionConnection.close(); expect(RegionConnection.websocket).toBeNull(); }); }); describe("_getProtocol", function() { it("returns window protocol", function() { expect(RegionConnection._getProtocol()).toBe( $window.location.protocol); }); }); describe("_buildUrl", function() { it("returns url from $window.location", function() { expect(RegionConnection._buildUrl()).toBe( "ws://" + $window.location.hostname + ":" + $window.location.port + $window.location.pathname + "/ws"); }); it("uses wss connection if https protocol", function() { spyOn(RegionConnection, "_getProtocol").and.returnValue("https:"); expect(RegionConnection._buildUrl()).toBe( "wss://" + $window.location.hostname + ":" + $window.location.port + $window.location.pathname + "/ws"); }); it("uses port from data-websocket-port", function() { var port = "8888"; var fakeElement = { data: function(attr) { expect(attr).toBe("websocket-port"); return port; } }; spyOn(angular, "element").and.returnValue(fakeElement); expect(RegionConnection._buildUrl()).toBe( "ws://" + $window.location.hostname + ":" + port + $window.location.pathname + "/ws"); // Reset angular.element so the test will complete successfully as // angular.mock requires the actual call to work for afterEach. angular.element.and.callThrough(); }); it("doesnt include ':' when no port given", function() { if(angular.isString($window.location.port) && $window.location.port.length > 0) { expect(RegionConnection._buildUrl()).toBe( "ws://" + $window.location.hostname + ":" + $window.location.port + $window.location.pathname + "/ws"); } else { expect(RegionConnection._buildUrl()).toBe( "ws://" + $window.location.hostname + $window.location.pathname + "/ws"); } }); it("includes csrftoken if cookie defined", function() { csrftoken = makeName('csrftoken'); // No need to organize a cleanup: cookies are reset before each // test. $cookies.csrftoken = csrftoken; expect(RegionConnection._buildUrl()).toBe( "ws://" + $window.location.hostname + ":" + $window.location.port + $window.location.pathname + "/ws" + '?csrftoken=' + csrftoken); }); }); describe("defaultConnect", function() { it("resolve defer if already connected", function(done) { RegionConnection.connected = true; RegionConnection.defaultConnect().then(function() { done(); }); $timeout.flush(); }); it("resolves defer once open handler is called", function(done) { RegionConnection.defaultConnect().then(function() { expect(RegionConnection.handlers.open).toEqual([]); expect(RegionConnection.handlers.error).toEqual([]); done(); }); }); it("rejects defer once error handler is called", function(done) { spyOn(RegionConnection, "connect"); RegionConnection.defaultConnect().then(null, function() { expect(RegionConnection.handlers.open).toEqual([]); expect(RegionConnection.handlers.error).toEqual([]); done(); }); angular.forEach(RegionConnection.handlers.error, function(func) { func(); }); }); }); describe("onMessage", function() { it("calls onResponse for a response message", function() { spyOn(RegionConnection, "onResponse"); var msg = { type: 1 }; RegionConnection.onMessage(msg); expect(RegionConnection.onResponse).toHaveBeenCalledWith(msg); }); it("calls onNotify for a notify message", function() { spyOn(RegionConnection, "onNotify"); var msg = { type: 2 }; RegionConnection.onMessage(msg); expect(RegionConnection.onNotify).toHaveBeenCalledWith(msg); }); }); describe("onResponse", function() { it("resolves defer inside of rootScope", function(done) { var result = {}; var requestId = RegionConnection.newRequestId(); var defer = $q.defer(); defer.promise.then(function(msg_result) { expect(msg_result).toBe(result); done(); }); spyOn($rootScope, "$apply").and.callThrough(); RegionConnection.callbacks[requestId] = defer; RegionConnection.onResponse({ type: 1, rtype: 0, request_id: requestId, result: result }); expect($rootScope.$apply).toHaveBeenCalled(); expect(RegionConnection.callbacks[requestId]).toBeUndefined(); }); it("rejects defer inside of rootScope", function(done) { var error = {}; var requestId = RegionConnection.newRequestId(); var defer = $q.defer(); defer.promise.then(null, function(msg_error) { expect(msg_error).toBe(error); done(); }); spyOn($rootScope, "$apply").and.callThrough(); RegionConnection.callbacks[requestId] = defer; RegionConnection.onResponse({ type: 1, rtype: 1, request_id: requestId, error: error }); expect($rootScope.$apply).toHaveBeenCalled(); expect(RegionConnection.callbacks[requestId]).toBeUndefined(); }); }); describe("onNotify", function() { it("calls handler for notification", function(done) { var name = "test"; var action = "update"; var data = 12; RegionConnection.registerNotifier( name, function(msg_action, msg_data) { expect(msg_action).toBe(action); expect(msg_data).toBe(data); done(); }); RegionConnection.onNotify({ type: 2, name: name, action: action, data: data }); }); it("calls all handlers for notification", function() { var name = "test"; var handler1 = jasmine.createSpy(); var handler2 = jasmine.createSpy(); RegionConnection.registerNotifier(name, handler1); RegionConnection.registerNotifier(name, handler2); RegionConnection.onNotify({ type: 2, name: name, action: "delete", data: 12 }); expect(handler1).toHaveBeenCalled(); expect(handler2).toHaveBeenCalled(); }); }); describe("callMethod", function() { var promise, defer; beforeEach(function() { promise = {}; defer = { promise: promise }; spyOn($q, "defer").and.returnValue(defer); spyOn(webSocket, "send"); RegionConnection.connect(""); }); it("adds defer to callbacks", function() { RegionConnection.callMethod("testing_method", {}); expect( RegionConnection.callbacks[RegionConnection.requestId]).toBe( defer); }); it("returns defer promise", function() { expect( RegionConnection.callMethod("testing_method", {})).toBe( promise); }); it("sends JSON encoded message", function() { var method = "testing_method"; var params = { "arg1": 1, "arg2": 2}; RegionConnection.callMethod(method, params); expect(webSocket.send).toHaveBeenCalledWith(angular.toJson({ type: 0, request_id: RegionConnection.requestId, method: method, params: params })); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/tests/test_spaces.js0000644000000000000000000000307613056115004027150 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for SpacesManager. */ describe("SpacesManager", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Load the SpacesManager. var SpacesManager, SubnetsManager; beforeEach(inject(function($injector) { SpacesManager = $injector.get("SpacesManager"); SubnetsManager = $injector.get("SubnetsManager"); })); // Make a fake subnet. function makeSubnet() { return { id: makeInteger(0, 5000), name: makeName("subnet") }; } it("set requires attributes", function() { expect(SpacesManager._pk).toBe("id"); expect(SpacesManager._handler).toBe("space"); }); describe("getSubnets", function() { it("returns subnet objects", function() { var i, subnets = [], space_subnets = []; for(i = 0; i < 6; i++) { var subnet = makeSubnet(); subnets.push(subnet); if(i < 3) { space_subnets.push(subnet); } } var subnet_ids = []; angular.forEach(space_subnets, function(subnet) { subnet_ids.push(subnet.id); }); SubnetsManager._items = subnets; var space = { "subnet_ids": subnet_ids }; expect(space_subnets).toEqual(SpacesManager.getSubnets(space)); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/tests/test_subnets.js0000644000000000000000000000115113056115004027345 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for SubnetsManager. */ describe("SubnetsManager", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Load the SubnetsManager. var SubnetsManager; beforeEach(inject(function($injector) { SubnetsManager = $injector.get("SubnetsManager"); })); it("set requires attributes", function() { expect(SubnetsManager._pk).toBe("id"); expect(SubnetsManager._handler).toBe("subnet"); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/tests/test_tags.js0000644000000000000000000000222213056115004026620 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for TagsManager. */ describe("TagsManager", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Load the TagsManager. var TagsManager; beforeEach(inject(function($injector) { TagsManager = $injector.get("TagsManager"); })); it("set requires attributes", function() { expect(TagsManager._pk).toBe("id"); expect(TagsManager._handler).toBe("tag"); }); describe("autocomplete", function() { it("returns array of matching tags", function() { var tags = [ "apple", "banana", "cake", "donut" ]; angular.forEach(tags, function(tag) { TagsManager._items.push({ name: tag }); }); expect(TagsManager.autocomplete("a")).toEqual( ["apple", "banana", "cake"]); expect(TagsManager.autocomplete("do")).toEqual( ["donut"]); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/tests/test_users.js0000644000000000000000000001113513056115004027026 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for UsersManager. */ describe("UsersManager", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Grab the needed angular pieces. var $q, $rootScope; beforeEach(inject(function($injector) { $q = $injector.get("$q"); $rootScope = $injector.get("$rootScope"); })); // Load the UsersManager, RegionConnection, and ErrorService. var UsersManager, RegionConnection, ErrorService; beforeEach(inject(function($injector) { UsersManager = $injector.get("UsersManager"); RegionConnection = $injector.get("RegionConnection"); ErrorService = $injector.get("ErrorService"); })); // Make a fake user. var userId = 0; function makeUser() { return { id: userId++, username: makeName("username"), first_name: makeName("first_name"), last_name: makeName("last_name"), email: makeName("email"), is_superuser: false }; } it("set requires attributes", function() { expect(UsersManager._pk).toBe("id"); expect(UsersManager._handler).toBe("user"); expect(UsersManager._authUser).toBeNull(); }); describe("getAuthUser", function() { it("returns _authUser", function() { var user = {}; UsersManager._authUser = user; expect(UsersManager.getAuthUser()).toBe(user); }); }); describe("_loadAuthUser", function() { it("calls callMethod with user.auth_user", function() { spyOn(RegionConnection, "callMethod").and.returnValue( $q.defer().promise); UsersManager._loadAuthUser(); expect(RegionConnection.callMethod).toHaveBeenCalledWith( "user.auth_user", {}); }); it("sets _authUser to resolved user", function() { var defer = $q.defer(); spyOn(RegionConnection, "callMethod").and.returnValue( defer.promise); UsersManager._loadAuthUser(); var user = makeUser(); defer.resolve(user); $rootScope.$digest(); expect(UsersManager._authUser).toBe(user); }); it("doesnt change _authUser reference when user resolved", function() { var defer = $q.defer(); spyOn(RegionConnection, "callMethod").and.returnValue( defer.promise); UsersManager._loadAuthUser(); var firstUser = makeUser(); UsersManager._authUser = firstUser; var secondUser = makeUser(); defer.resolve(secondUser); $rootScope.$digest(); expect(UsersManager._authUser).toBe(firstUser); expect(UsersManager._authUser).toEqual(secondUser); }); it("raises error on error", function() { var defer = $q.defer(); spyOn(RegionConnection, "callMethod").and.returnValue( defer.promise); spyOn(ErrorService, "raiseError"); UsersManager._loadAuthUser(); var error = makeName("error"); defer.reject(error); $rootScope.$digest(); expect(ErrorService.raiseError).toHaveBeenCalledWith(error); }); }); describe("_replaceItem", function() { it("replaces the _authUser without changing reference", function() { var firstUser = makeUser(); UsersManager._authUser = firstUser; var secondUser = makeUser(); secondUser.id = firstUser.id; UsersManager._replaceItem(secondUser); expect(UsersManager._authUser).toBe(firstUser); expect(UsersManager._authUser).toEqual(secondUser); }); }); describe("loadItems", function() { it("calls _loadAuthUser", function() { spyOn(RegionConnection, "callMethod").and.returnValue( $q.defer().promise); spyOn(UsersManager, "_loadAuthUser"); UsersManager.loadItems(); expect(UsersManager._loadAuthUser).toHaveBeenCalled(); }); }); describe("reloadItems", function() { it("calls _loadAuthUser", function() { spyOn(RegionConnection, "callMethod").and.returnValue( $q.defer().promise); spyOn(UsersManager, "_loadAuthUser"); UsersManager.reloadItems(); expect(UsersManager._loadAuthUser).toHaveBeenCalled(); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/tests/test_vlans.js0000644000000000000000000000305613056115004027013 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for VLANsManager. */ describe("VLANsManager", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Load the VLANsManager. var VLANsManager, SubnetsManager; beforeEach(inject(function($injector) { VLANsManager = $injector.get("VLANsManager"); SubnetsManager = $injector.get("SubnetsManager"); })); // Make a fake subnet. function makeSubnet() { return { id: makeInteger(0, 5000), name: makeName("subnet") }; } it("set requires attributes", function() { expect(VLANsManager._pk).toBe("id"); expect(VLANsManager._handler).toBe("vlan"); }); describe("getSubnets", function() { it("returns subnet objects", function() { var i, subnets = [], vlan_subnets = []; for(i = 0; i < 6; i++) { var subnet = makeSubnet(); subnets.push(subnet); if(i < 3) { vlan_subnets.push(subnet); } } var subnet_ids = []; angular.forEach(vlan_subnets, function(subnet) { subnet_ids.push(subnet.id); }); SubnetsManager._items = subnets; var vlan = { "subnet_ids": subnet_ids }; expect(vlan_subnets).toEqual(VLANsManager.getSubnets(vlan)); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/factories/tests/test_zones.js0000644000000000000000000000112713056115004027023 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for ZonesManager. */ describe("ZonesManager", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Load the ZonesManager. var ZonesManager; beforeEach(inject(function($injector) { ZonesManager = $injector.get("ZonesManager"); })); it("set requires attributes", function() { expect(ZonesManager._pk).toBe("id"); expect(ZonesManager._handler).toBe("zone"); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/filters/by_fabric.js0000644000000000000000000000110013056115004025064 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Filter VLANs by Fabric. */ angular.module('MAAS').filter('filterByFabric', function() { return function(vlans, fabric) { var filtered = []; if(!angular.isObject(fabric)) { return filtered; } angular.forEach(vlans, function(vlan) { if(vlan.fabric === fabric.id) { filtered.push(vlan); } }); return filtered; }; }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/filters/by_space.js0000644000000000000000000000110613056115004024737 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Filter Subnets by Space. */ angular.module('MAAS').filter('filterBySpace', function() { return function(subnets, space) { var filtered = []; if(!angular.isObject(space)) { return filtered; } angular.forEach(subnets, function(subnet) { if(subnet.space === space.id) { filtered.push(subnet); } }); return filtered; }; }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/filters/by_vlan.js0000644000000000000000000000110013056115004024576 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Filter Subnets by VLAN. */ angular.module('MAAS').filter('filterByVLAN', function() { return function(subnets, vlan) { var filtered = []; if(!angular.isObject(vlan)) { return filtered; } angular.forEach(subnets, function(subnet) { if(subnet.vlan === vlan.id) { filtered.push(subnet); } }); return filtered; }; }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/filters/nodes.js0000644000000000000000000002020613056115004024264 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Nodes Filter */ angular.module('MAAS').filter('nodesFilter', ['$filter', 'SearchService', function($filter, SearchService) { // Default filter built-in angular. Used on all extra filters that do // not specify a specific type. var standardFilter = $filter('filter'); // Helpers that convert the pseudo field on node to an actual // value from the node. var mappings = { cpu: function(node) { return node.cpu_count; }, cores: function(node) { return node.cpu_count; }, ram: function(node) { return node.memory; }, mac: function(node) { macs = []; macs.push(node.pxe_mac); macs.push.apply(macs, node.extra_macs); return macs; }, zone: function(node) { return node.zone.name; }, power: function(node) { return node.power_state; } }; // Return true when value is an integer. function isInteger(value) { // +1 done to silence js-lint. return value % +1 === 0; } // Return true when lowercase value contains the already // lowercased lowerTerm. function _matches(value, lowerTerm, exact) { if(angular.isNumber(value)) { if(exact) { if(isInteger(value)) { return value === parseInt(lowerTerm, 10); } else { return value === parseFloat(lowerTerm); } } else { if(isInteger(value)) { return value >= parseInt(lowerTerm, 10); } else { return value >= parseFloat(lowerTerm); } } } else if(angular.isString(value)) { if(exact) { return value.toLowerCase() === lowerTerm; } else { return value.toLowerCase().indexOf(lowerTerm) >= 0; } } else { return value === lowerTerm; } } // Return true if value matches lowerTerm, unless negate is true then // return false if matches. function matches(value, lowerTerm, exact, negate) { var match = _matches(value, lowerTerm, exact); if(negate) { return !match; } return match; } return function(nodes, search) { if(angular.isUndefined(nodes) || angular.isUndefined(search) || nodes.length === 0) { return nodes; } var filtered = nodes; var filters = SearchService.getCurrentFilters(search); angular.forEach(filters, function(terms, attr) { if(attr === '_') { // Use the standard filter on terms that do not specify // an attribute. angular.forEach(terms, function(term) { filtered = standardFilter(filtered, term); }); } else if(attr === 'in') { // "in:" is used to filter the nodes by those that are // currently selected. angular.forEach(terms, function(term) { var matched = []; angular.forEach(filtered, function(node) { if(node.$selected && term.toLowerCase() === "selected") { matched.push(node); } else if(!node.$selected && term.toLowerCase() === "!selected") { matched.push(node); } }); filtered = matched; }); } else { // Mapping function for the attribute. var mapFunc = mappings[attr]; // Loop through each item and only select the matching. var matched = []; angular.forEach(filtered, function(node) { var value; if(angular.isFunction(mapFunc)) { value = mapFunc(node); } else if(node.hasOwnProperty(attr)) { value = node[attr]; } // Unable to get value for this node. So skip it. if(angular.isUndefined(value)) { return; } var i, j; for(i = 0; i < terms.length; i++) { var term = terms[i].toLowerCase(); var exact = false, negate = false; // '!' at the beginning means the term is negated. while(term.indexOf('!') === 0) { negate = !negate; term = term.substring(1); } // '=' at the beginning means to match exactly. if(term.indexOf('=') === 0) { exact = true; term = term.substring(1); } // Allow '!' after the '=' as well. while(term.indexOf('!') === 0) { negate = !negate; term = term.substring(1); } if(angular.isArray(value)) { // If value is an array check if the // term matches any value in the // array. If negated, check whether no // value in the array matches. if(negate) { // Push to matched only if no value in // the array matches term. var no_match = true; for(j = 0; j < value.length; j++) { if(matches( value[j], term, exact, false)) { no_match = false; break; // Skip remaining tests. } } if(no_match) { matched.push(node); return; } } else { for(j = 0; j < value.length; j++) { if(matches( value[j], term, exact, false)) { matched.push(node); return; } } } } else { // Standard value check that it matches the // term. if(matches(value, term, exact, negate)) { matched.push(node); return; } } } }); filtered = matched; } }); return filtered; }; }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/filters/remove_default_vlan.js0000644000000000000000000000077113056115004027202 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Filter to remove the default VLAN as an option. */ angular.module('MAAS').filter('removeDefaultVLAN', function() { return function(vlans) { var filtered = []; angular.forEach(vlans, function(vlan) { if(vlan.vid !== 0) { filtered.push(vlan); } }); return filtered; }; }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/filters/tests/0000755000000000000000000000000013056115004023760 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/filters/tests/test_by_fabric.js0000644000000000000000000000264213056115004027301 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for filterByFabric. */ describe("filterByFabric", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Load the filterByFabric. var filterByFabric; beforeEach(inject(function($filter) { filterByFabric = $filter("filterByFabric"); })); it("returns empty if undefined fabric", function() { var i, vlan, vlans = []; for(i = 0; i < 3; i++) { vlan = { fabric: 0 }; vlans.push(vlan); } expect(filterByFabric(vlans)).toEqual([]); }); it("only returns vlans with fabric id", function() { var i, vlan, fabric_id = 1, other_fabric_id = 2; var fabric_vlans = [], other_fabric_vlans = [], all_vlans = []; for(i = 0; i < 3; i++) { vlan = { fabric: fabric_id }; fabric_vlans.push(vlan); all_vlans.push(vlan); } for(i = 0; i < 3; i++) { vlan = { fabric: other_fabric_id }; other_fabric_vlans.push(vlan); all_vlans.push(vlan); } var fabric = { id: fabric_id }; expect(filterByFabric(all_vlans, fabric)).toEqual(fabric_vlans); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/filters/tests/test_by_space.js0000644000000000000000000000266713056115004027155 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for filterBySpace. */ describe("filterBySpace", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Load the filterBySpace. var filterBySpace; beforeEach(inject(function($filter) { filterBySpace = $filter("filterBySpace"); })); it("returns empty if undefined space", function() { var i, subnet, subnets = []; for(i = 0; i < 3; i++) { subnet = { space: 0 }; subnets.push(subnet); } expect(filterBySpace(subnets)).toEqual([]); }); it("only returns subnets with space id", function() { var i, subnet, space_id = 1, other_space_id = 2; var subnet_spaces = [], other_subnet_spaces = [], all_subnets = []; for(i = 0; i < 3; i++) { subnet = { space: space_id }; subnet_spaces.push(subnet); all_subnets.push(subnet); } for(i = 0; i < 3; i++) { subnet = { space: other_space_id }; other_subnet_spaces.push(subnet); all_subnets.push(subnet); } var space = { id: space_id }; expect(filterBySpace(all_subnets, space)).toEqual(subnet_spaces); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/filters/tests/test_by_vlan.js0000644000000000000000000000263713056115004027017 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for filterByVLAN. */ describe("filterByVLAN", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Load the filterByVLAN. var filterByVLAN; beforeEach(inject(function($filter) { filterByVLAN = $filter("filterByVLAN"); })); it("returns empty if undefined space", function() { var i, subnet, subnets = []; for(i = 0; i < 3; i++) { subnet = { vlan: 0 }; subnets.push(subnet); } expect(filterByVLAN(subnets)).toEqual([]); }); it("only returns subnets with vlan id", function() { var i, subnet, vlan_id = 1, other_vlan_id = 2; var subnet_vlans = [], other_subnet_vlans = [], all_subnets = []; for(i = 0; i < 3; i++) { subnet = { vlan: vlan_id }; subnet_vlans.push(subnet); all_subnets.push(subnet); } for(i = 0; i < 3; i++) { subnet = { vlan: other_vlan_id }; other_subnet_vlans.push(subnet); all_subnets.push(subnet); } var vlan = { id: vlan_id }; expect(filterByVLAN(all_subnets, vlan)).toEqual(subnet_vlans); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/filters/tests/test_nodes.js0000644000000000000000000002330713056115004026472 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for nodesFilter. */ describe("nodesFilter", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Load the nodesFilter. var nodesFilter; beforeEach(inject(function($filter) { nodesFilter = $filter("nodesFilter"); })); it("matches using standard filter", function() { var matchingNode = { hostname: "name" }; var otherNode = { hostname: "other" }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "nam")).toEqual([matchingNode]); }); it("matches selected", function() { var matchingNode = { $selected: true }; var otherNode = { $selected: false }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "in:selected")).toEqual([matchingNode]); }); it("matches selected uppercase", function() { var matchingNode = { $selected: true }; var otherNode = { $selected: false }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "in:Selected")).toEqual([matchingNode]); }); it("matches selected uppercase in brackets", function() { var matchingNode = { $selected: true }; var otherNode = { $selected: false }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "in:(Selected)")).toEqual([matchingNode]); }); it("matches non-selected", function() { var matchingNode = { $selected: false }; var otherNode = { $selected: true }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "in:!selected")).toEqual([matchingNode]); }); it("matches non-selected uppercase", function() { var matchingNode = { $selected: false }; var otherNode = { $selected: true }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "in:!Selected")).toEqual([matchingNode]); }); it("matches non-selected uppercase in brackets", function() { var matchingNode = { $selected: false }; var otherNode = { $selected: true }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "in:(!Selected)")).toEqual([matchingNode]); }); it("matches on attribute", function() { var matchingNode = { hostname: "name" }; var otherNode = { hostname: "other" }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "hostname:name")).toEqual([matchingNode]); }); it("matches with contains on attribute", function() { var matchingNode = { hostname: "name" }; var otherNode = { hostname: "other" }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "hostname:na")).toEqual([matchingNode]); }); it("matches on negating attribute", function() { var matchingNode = { hostname: "name" }; var otherNode = { hostname: "other" }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "hostname:!other")).toEqual([matchingNode]); }); it("matches on exact attribute", function() { var matchingNode = { hostname: "other" }; var otherNode = { hostname: "other2" }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "hostname:=other")).toEqual([matchingNode]); }); it("matches on array", function() { var matchingNode = { hostnames: ["name", "first"] }; var otherNode = { hostnames: ["other", "second"] }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "hostnames:first")).toEqual([matchingNode]); }); it("matches integer values", function() { var matchingNode = { count: 4 }; var otherNode = { count: 2 }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "count:3")).toEqual([matchingNode]); }); it("matches float values", function() { var matchingNode = { count: 2.2 }; var otherNode = { count: 1.1 }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "count:1.5")).toEqual([matchingNode]); }); it("matches using cpu mapping function", function() { var matchingNode = { cpu_count: 4 }; var otherNode = { cpu_count: 2 }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "cpu:3")).toEqual([matchingNode]); }); it("matches using cores mapping function", function() { var matchingNode = { cpu_count: 4 }; var otherNode = { cpu_count: 2 }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "cores:3")).toEqual([matchingNode]); }); it("matches using ram mapping function", function() { var matchingNode = { memory: 2048 }; var otherNode = { memory: 1024 }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "ram:2000")).toEqual([matchingNode]); }); it("matches using mac mapping function", function() { var matchingNode = { pxe_mac: "00:11:22:33:44:55", extra_macs: ["aa:bb:cc:dd:ee:ff"] }; var otherNode = { pxe_mac: "66:11:22:33:44:55", extra_macs: ["00:bb:cc:dd:ee:ff"] }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "mac:aa:bb:cc:dd:ee:ff")).toEqual( [matchingNode]); }); it("matches using zone mapping function", function() { var matchingNode = { zone: { name: "first" } }; var otherNode = { zone: { name: "second" } }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "zone:first")).toEqual([matchingNode]); }); it("matches using power mapping function", function() { var matchingNode = { power_state: 'on' }; var otherNode = { power_state: 'off' }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "power:on")).toEqual([matchingNode]); }); it("matches accumulate", function() { var matchingNode = { power_state: 'on', zone: { name: "first" } }; var otherNode = { power_state: 'on', zone: { name: "second" } }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "power:on zone:first")).toEqual( [matchingNode]); }); it("matches a tag", function() { var matchingNode = { tags: ["first", "second"] }; var otherNode = { tags: ["second", "third"] }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "tags:first")).toEqual( [matchingNode]); }); it("matches a negated tag", function() { var matchingNode = { tags: ["first", "second"] }; var otherNode = { tags: ["second", "third"] }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "tags:!third")).toEqual( [matchingNode]); expect(nodesFilter(nodes, "tags:!(third)")).toEqual( [matchingNode]); expect(nodesFilter(nodes, "tags:(!third)")).toEqual( [matchingNode]); }); it("matches a double negated tag", function() { var matchingNode = { tags: ["first", "second"] }; var otherNode = { tags: ["second", "third"] }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "tags:!!first")).toEqual( [matchingNode]); expect(nodesFilter(nodes, "tags:!(!first)")).toEqual( [matchingNode]); expect(nodesFilter(nodes, "tags:(!!first)")).toEqual( [matchingNode]); }); it("matches a direct and a negated tag", function() { var matchingNode = { tags: ["first", "second"] }; var otherNode = { tags: ["second", "third"] }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "tags:(first,!third)")).toEqual( [matchingNode]); }); it("matches an exact direct and a negated tag", function() { var matchingNode = { tags: ["first", "second"] }; var otherNode = { tags: ["first1", "third"] }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "tags:(=first,!third)")).toEqual( [matchingNode]); }); it("matches two negated tags", function() { var matchingNode = { tags: ["first", "second"] }; var otherNode = { tags: ["second", "third"] }; var nodes = [matchingNode, otherNode]; expect(nodesFilter(nodes, "tags:(!second,!third)")).toEqual( [matchingNode]); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/filters/tests/test_remove_default_vlan.js0000644000000000000000000000146313056115004031402 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for removeDefaultVLAN. */ describe("removeDefaultVLAN", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Load the removeDefaultVLAN. var removeDefaultVLAN; beforeEach(inject(function($filter) { removeDefaultVLAN = $filter("removeDefaultVLAN"); })); it("only returns vlans without vid 0", function() { var i, vlan, vlans = []; for(i = 0; i < 3; i++) { vlan = { id: i, vid: i, fabric: 0 }; vlans.push(vlan); } expect(removeDefaultVLAN(vlans)).toEqual([vlans[1], vlans[2]]); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/services/browser.js0000644000000000000000000000326113056115004025014 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Browser Service * * Detects the browser used by the client. This is very simple case, because * at the moment we really only care about if the browser is Firefox. This * could be improved for other browsers, but this should only be used as a * last resort to prevent something bad happening on a misbehaving browser. */ angular.module('MAAS').service('BrowserService', ['$window', function($window) { // The first items in the array will be matched first. So if the user // agent for the browser contains both you need to make the more // specific one first. E.g. Chrome contains both "Chrome" and "Safari" // in its user-agent string. Since "Safari" does not chrome comes first // so it matches that browser more specifically. var BROWSERS = [ { name: "chrome", regex: /chrome/i }, { name: "safari", regex: /safari/i }, { name: "firefox", regex: /firefox/i }, { name: "ie", regex: /MSIE/ } ]; this.browser = "other"; // Set the browser if a regex matches. The first to match wins. var self = this; angular.forEach(BROWSERS, function(matcher) { if(matcher.regex.test($window.navigator.userAgent) && self.browser === "other") { self.browser = matcher.name; } }); }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/services/converter.js0000644000000000000000000000606013056115004025340 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Converter Service * * Used by controllers to convert user inputs. */ angular.module('MAAS').service('ConverterService', function() { var UNITS = ['Bytes', 'KB', 'MB', 'GB', 'TB']; var KB = 1000.0; var MB = 1000.0 * 1000.0; var GB = 1000.0 * 1000.0 * 1000.0; var TB = 1000.0 * 1000.0 * 1000.0 * 1000.0; // Convert the bytes to a unit. this.bytesToUnits = function(bytes) { // Support string being passed. if(angular.isString(bytes)) { bytes = parseInt(bytes, 10); } var i, unit, converted = bytes; for(i = 0; i < UNITS.length; i++) { unit = UNITS[i]; if(Math.abs(converted) < 1000.0 || unit === 'TB') { var string = converted.toFixed(1) + " " + unit; if(unit === 'Bytes') { string = converted + " " + unit; } return { original: bytes, converted: converted, units: unit, string: string }; } converted /= 1000.0; } }; // Convert the data based on the unit to bytes. this.unitsToBytes = function(data, unit) { // Support string being passed. if(angular.isString(data)) { data = parseFloat(data); } if(unit === 'Bytes') { return Math.floor(data); } else if(unit === 'KB') { return Math.floor(data * KB); } else if(unit === 'MB') { return Math.floor(data * MB); } else if(unit === 'GB') { return Math.floor(data * GB); } else if(unit === 'TB') { return Math.floor(data * TB); } }; // Convert the data based on unit down to the lowest tolerance to still // be the same value in that unit. this.roundUnits = function(data, unit) { // Support string being passed. if(angular.isString(data)) { data = parseFloat(data); } if(unit === 'Bytes') { return Math.floor(data); } else if(unit === 'KB') { return Math.floor(data * KB) - (0.05 * KB); } else if(unit === 'MB') { return Math.floor(data * MB) - (0.05 * MB); } else if(unit === 'GB') { return Math.floor(data * GB) - (0.05 * GB); } else if(unit === 'TB') { return Math.floor(data * TB) - (0.05 * TB); } }; // Round the bytes down to size based on the block size. this.roundByBlockSize = function(bytes, block_size) { return block_size * Math.floor(bytes / block_size); }; }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/services/error.js0000644000000000000000000000113713056115004024462 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Error Service */ angular.module('MAAS').service('ErrorService', function() { // Holds the client error. this._error = null; // Raise this error in the UI. this.raiseError = function(error) { // Possible that this method is called more than once. // Only take the first error. if(!angular.isString(this._error)) { this._error = error; } }; }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/services/json.js0000644000000000000000000000140213056115004024275 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS JSON Service * * Used by controllers to parse JSON. */ angular.module('MAAS').service('JSONService', function() { // Return the JSON for the string or null if it cannot be parsed. this.tryParse = function(jsonString) { try { var obj = JSON.parse(jsonString); // JSON.parse(false) or JSON.parse(1234) will throw errors, but // JSON.parse(null) returns 'null', and typeof null === "object". if (obj && typeof obj === "object" && obj !== null) { return obj; } } catch (e) { } return null; }; }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/services/manager.js0000644000000000000000000006601413056115004024750 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Base Manager * * Manages a collection of items from the websocket in the browser. The manager * uses the RegionConnection to load the items, update the items, and listen * for notification events about the items. */ angular.module('MAAS').service( 'Manager', ['$q', '$rootScope', '$timeout', 'RegionConnection', function( $q, $rootScope, $timeout, RegionConnection) { // Actions that are used to update the statuses metadata. var METADATA_ACTIONS = { CREATE: "create", UPDATE: "update", DELETE: "delete" }; // Constructor function Manager() { // Primary key on the items in the list. Used to match items. this._pk = "id"; // Handler on the region to call to list, create, update, delete, // and listen for notifications. Must be set by overriding manager. this._handler = null; // Holds all items in the system. This list must always be the same // object. this._items = []; // True when all of the items have been loaded. This is done on // intial connection to the region. this._loaded = false; // True when the items list is currently being loaded or reloaded. // Actions will not be processed while this is false. this._isLoading = false; // Holds list of defers that need to be called once the loading of // items has finished. This is used when a caller calls loadItems // when its already loading. this._extraLoadDefers = []; // Holds list of defers that need to be called once the reloading // of items has finished. This is used when a caller calls // reloadItems when its already reloading. this._extraReloadDefers = []; // Holds all of the notify actions that need to be processed. This // is used to hold the actions while the items are being loaded. // Once all of the items are loaded the queue will be processed. this._actionQueue = []; // Holds list of all of the currently selected items. This is held // in a seperate list to remove the need to loop through the full // listing to grab the selected items. this._selectedItems = []; // Set to true when the items list should reload upon re-connection // to the region. this._autoReload = false; // Holds the item that is currenly being viewed. This object will // be updated if any notify events are recieved for it. This allows // the ability of not having to keep pulling the item out of the // items list. this._activeItem = null; // Holds metadata information that is used to helper filtering. this._metadata = {}; // List of attributes to track on the loaded items. Each attribute // in this list will be placed in _metadata to track its currect // values and the number of items with that value. this._metadataAttributes = []; } // Return index of the item in the given array. Manager.prototype._getIndexOfItem = function(array, pk_value) { var i; for(i = 0, len = array.length; i < len; i++) { if(array[i][this._pk] === pk_value) { return i; } } return -1; }; // Replace the item in the array at the same index. Manager.prototype._replaceItemInArray = function(array, item) { var idx = this._getIndexOfItem(array, item[this._pk]); if(idx >= 0) { // Keep the current selection on the item. item.$selected = array[idx].$selected; angular.copy(item, array[idx]); } }; // Remove the item from the array. Manager.prototype._removeItemByIdFromArray = function( array, pk_value) { var idx = this._getIndexOfItem(array, pk_value); if(idx >= 0) { array.splice(idx, 1); } }; // Return the parameters that should be used for the batch load // request. Should be used by subclass that want to add extra // parameters to the batch request. By default it returns an empty // object. Manager.prototype._initBatchLoadParameters = function() { return {}; }; // Batch load items from the region in groups of 50. Manager.prototype._batchLoadItems = function(array, extra_func) { var self = this; var defer = $q.defer(); var method = this._handler + ".list"; function callLoad() { var params = self._initBatchLoadParameters(); params.limit = 50; // Get the last pk in the list so the region knows to // start at that offset. if(array.length > 0) { params.start = array[array.length-1][self._pk]; } RegionConnection.callMethod( method, params).then(function(items) { // Pass each item to extra_func function if given. if(angular.isFunction(extra_func)) { angular.forEach(items, function(item) { extra_func(item); }); } array.push.apply(array, items); if(items.length === 50) { // Could be more items, request the next 50. callLoad(array); } else { defer.resolve(array); } }, defer.reject); } callLoad(); return defer.promise; }; // Resolves array of defers with item. Manager.prototype._resolveDefers = function(defersArray, item) { angular.forEach(defersArray, function(defer) { defer.resolve(item); }); }; // Rejects array of defers with error. Manager.prototype._rejectDefers = function(defersArray, error) { angular.forEach(defersArray, function(defer) { defer.reject(error); }); }; // Return list of items. Manager.prototype.getItems = function() { return this._items; }; // Load all the items. Manager.prototype.loadItems = function() { // If the items have already been loaded then, we need to // reload the items list not load the initial list. if(this._loaded) { return this.reloadItems(); } // If its already loading then the caller just needs to be informed // of when it has finished loading. if(this._isLoading) { var defer = $q.defer(); this._extraLoadDefers.push(defer); return defer.promise; } var self = this; this._isLoading = true; return this._batchLoadItems(this._items, function(item) { item.$selected = false; self._updateMetadata(item, METADATA_ACTIONS.CREATE); }).then(function() { self._loaded = true; self._isLoading = false; self.processActions(); self._resolveDefers(self._extraLoadDefers, self._items); self._extraLoadDefers = []; return self._items; }, function(error) { self._rejectDefers(self._extraLoadDefers, error); self._extraLoadDefers = []; return $q.reject(error); }); }; // Reload the items list. Manager.prototype.reloadItems = function() { // If the items have not been loaded then, we need to // load the initial list. if(!this._loaded) { return this.loadItems(); } // If its already reloading then the caller just needs to be // informed of when it has refinished loading. if(this._isLoading) { var defer = $q.defer(); this._extraReloadDefers.push(defer); return defer.promise; } // Updates the items list with the reloaded items. var self = this; function updateItems(items) { // Iterate in reverse so we can remove items inline, without // having to adjust the index. var i = self._items.length; while(i--) { var item = self._items[i]; var updatedIdx = self._getIndexOfItem( items, item[self._pk]); if(updatedIdx === -1) { self._updateMetadata(item, METADATA_ACTIONS.DELETE); self._items.splice(i, 1); self._removeItemByIdFromArray( self._selectedItems, item[self._pk]); } else { var updatedItem = items[updatedIdx]; self._updateMetadata( updatedItem, METADATA_ACTIONS.UPDATE); updatedItem.$selected = item.$selected; angular.copy(items[updatedIdx], item); items.splice(updatedIdx, 1); } } // The remain items in items array are the new items. self._items.push.apply(self._items, items); } // The reload action loads all of the items into this list // instead of the items list. This list will then be used to // update the items list. var currentItems = []; // Start the reload process and once complete call updateItems. self._isLoading = true; return this._batchLoadItems(currentItems).then(function(items) { updateItems(items); self._isLoading = false; self.processActions(); // Set the activeItem again so the region knows that its // the active item. if(angular.isObject(self._activeItem)) { self.setActiveItem(self._activeItem[self._pk]); } self._resolveDefers(self._extraReloadDefers, self._items); self._extraReloadDefers = []; return self._items; }, function(error) { self._rejectDefers(self._extraReloadDefers, error); self._extraReloadDefers = []; return $q.reject(error); }); }; // Enables auto reloading of the item list on connection to region. Manager.prototype.enableAutoReload = function() { if(!this._autoReload) { this._autoReload = true; var self = this; this._reloadFunc = function() { self.reloadItems(); }; RegionConnection.registerHandler("open", this._reloadFunc); } }; // Disable auto reloading of the item list on connection to region. Manager.prototype.disableAutoReload = function() { if(this._autoReload) { RegionConnection.unregisterHandler("open", this._reloadFunc); this._reloadFunc = null; this._autoReload = false; } }; // True when the initial item list has finished loading. Manager.prototype.isLoaded = function() { return this._loaded; }; // True when the item list is currently being loaded or reloaded. Manager.prototype.isLoading = function() { return this._isLoading; }; // Replace item in the items and selectedItems list. Manager.prototype._replaceItem = function(item) { this._updateMetadata(item, METADATA_ACTIONS.UPDATE); this._replaceItemInArray(this._items, item); }; // Remove item in the items and selectedItems list. Manager.prototype._removeItem = function(pk_value) { var idx = this._getIndexOfItem(this._items, pk_value); if(idx >= 0) { this._updateMetadata(this._items[idx], METADATA_ACTIONS.DELETE); } this._removeItemByIdFromArray(this._items, pk_value); this._removeItemByIdFromArray(this._selectedItems, pk_value); }; // Get the item from the list. Does not make a get request to the // region to load more data. Manager.prototype.getItemFromList = function(pk_value) { var idx = this._getIndexOfItem(this._items, pk_value); if(idx >= 0) { return this._items[idx]; } else { return null; } }; // Get the item from the region. Manager.prototype.getItem = function(pk_value) { var self = this; var method = this._handler + ".get"; var params = {}; params[this._pk] = pk_value; return RegionConnection.callMethod( method, params).then(function(item) { self._replaceItem(item); return item; }); }; // Send the update information to the region. Manager.prototype.updateItem = function(item) { var self = this; var method = this._handler + ".update"; item = angular.copy(item); delete item.$selected; return RegionConnection.callMethod( method, item).then(function(item) { self._replaceItem(item); return item; }); }; // Send the delete call for item to the region. Manager.prototype.deleteItem = function(item) { var self = this; var method = this._handler + ".delete"; var params = {}; params[this._pk] = item[this._pk]; return RegionConnection.callMethod( method, params).then(function() { self._removeItem(item[self._pk]); }); }; // Return the active item. Manager.prototype.getActiveItem = function() { return this._activeItem; }; // Set the active item. Manager.prototype.setActiveItem = function(pk_value) { if(!this._loaded) { throw new Error( "Cannot set active item unless the manager is loaded."); } var idx = this._getIndexOfItem(this._items, pk_value); if(idx === -1) { this._activeItem = null; // Item with pk_value does not exists. Reject the returned // deferred. var defer = $q.defer(); $timeout(function() { defer.reject("No item with pk: " + pk_value); }); return defer.promise; } else { this._activeItem = this._items[idx]; // Data that is loaded from the list call is limited and // doesn't contain all of the needed data for an activeItem. // Call set_active on the handler for the region to know // this item needs all information when updated. var self = this; var method = this._handler + ".set_active"; var params = {}; params[this._pk] = pk_value; return RegionConnection.callMethod( method, params).then(function(item) { self._replaceItem(item); return self._activeItem; }); } }; // Clears the active item. Manager.prototype.clearActiveItem = function() { this._activeItem = null; }; // True when the item list is stable and not being loaded or reloaded. Manager.prototype.canProcessActions = function() { return !this._isLoading; }; // Handle notify from RegionConnection about an item. Manager.prototype.onNotify = function(action, data) { // Place the notification in the action queue. this._actionQueue.push({ action: action, data: data }); // Processing incoming actions is enabled. Otherwise they // will be queued until processActions is called. if(this.canProcessActions()) { $rootScope.$apply(this.processActions()); } }; // Process all actions to keep the item information up-to-date. Manager.prototype.processActions = function() { while(this._actionQueue.length > 0) { var action = this._actionQueue.shift(); if(action.action === "create") { // Check that the received data doesn't already exists // in the _items list. If it does then this is actually // an update action not a create action. var idx = this._getIndexOfItem( this._items, action.data[this._pk]); if(idx >= 0) { // Actually this is an update action not a create // action. So replace the item instead of adding it. this._replaceItem(action.data); } else { action.data.$selected = false; this._updateMetadata( action.data, METADATA_ACTIONS.CREATE); this._items.push(action.data); } } else if(action.action === "update") { this._replaceItem(action.data); } else if(action.action === "delete") { this._removeItem(action.data); } } }; // Return list of selected items. Manager.prototype.getSelectedItems = function() { return this._selectedItems; }; // Mark the given item as selected. Manager.prototype.selectItem = function(pk_value) { var idx = this._getIndexOfItem(this._items, pk_value); if(idx === -1) { console.log( "WARN: selection of " + this._handler + "(" + pk_value + ") failed because its missing in the items list."); return; } var item = this._items[idx]; item.$selected = true; idx = this._selectedItems.indexOf(item); if(idx === -1) { this._selectedItems.push(item); } }; // Mark the given item as unselected. Manager.prototype.unselectItem = function(pk_value) { var idx = this._getIndexOfItem(this._items, pk_value); if(idx === -1) { console.log( "WARN: de-selection of " + this._handler + "(" + pk_value + ") failed because its missing in the " + "nodes list."); return; } var item = this._items[idx]; item.$selected = false; idx = this._selectedItems.indexOf(item); if(idx >= 0) { this._selectedItems.splice(idx, 1); } }; // Determine if a item is selected. Manager.prototype.isSelected = function(pk_value) { var idx = this._getIndexOfItem(this._items, pk_value); if(idx === -1) { console.log( "WARN: unable to determine if " + this._handler + "(" + pk_value + ") is selected because its missing in the " + "nodes list."); return false; } return this._items[idx].$selected === true; }; // Return the metadata object value from `metadatas` matching `name`. Manager.prototype._getMetadataValue = function(metadatas, name) { var i; for(i = 0; i < metadatas.length; i++) { if(metadatas[i].name === name) { return metadatas[i]; } } return null; }; // Add new value to metadatas if it doesnt exists or increment the // count if it already does. Manager.prototype._addMetadataValue = function(metadatas, value) { var metadata = this._getMetadataValue(metadatas, value); if(metadata) { metadata.count += 1; } else { metadata = { name: value, count: 1 }; metadatas.push(metadata); } }; // Remove value from metadatas. Manager.prototype._removeMetadataValue = function(metadatas, value) { var metadata = this._getMetadataValue(metadatas, value); if(metadata) { metadata.count -= 1; if(metadata.count <= 0) { metadatas.splice(metadatas.indexOf(metadata), 1); } } }; // Update the metadata entry in `metadatas` for the array value and // based on the action. Manager.prototype._updateMetadataArrayEntry = function( metadatas, newValue, action, oldValue) { var self = this; if(action === METADATA_ACTIONS.CREATE) { angular.forEach(newValue, function(value) { // On create ignore empty values. if(value === '') { return; } self._addMetadataValue(metadatas, value); }); } else if(action === METADATA_ACTIONS.DELETE) { angular.forEach(newValue, function(value) { self._removeMetadataValue(metadatas, value); }); } else if(action === METADATA_ACTIONS.UPDATE && angular.isDefined(oldValue)) { // Any values in added are new on the item, and any values left // in oldArray have been removed. var added = []; var oldArray = angular.copy(oldValue); angular.forEach(newValue, function(value) { var idx = oldArray.indexOf(value); if(idx === -1) { // Value not in oldArray so it has been added. added.push(value); } else { // Value already in oldArray so its already tracked. oldArray.splice(idx, 1); } }); // Add the new values. angular.forEach(added, function(value) { self._addMetadataValue(metadatas, value); }); // Remove the old values. angular.forEach(oldArray, function(value) { self._removeMetadataValue(metadatas, value); }); } }; // Update the metadata entry in `metadatas` for the newValue and based // on the action. Method does not work with array values, use // _updateMetadataArrayEntry for values that are arrays. Manager.prototype._updateMetadataValueEntry = function( metadatas, newValue, action, oldValue) { if(action === METADATA_ACTIONS.CREATE) { // On create ignore empty values. if(newValue === '') { return; } this._addMetadataValue(metadatas, newValue); } else if(action === METADATA_ACTIONS.DELETE) { this._removeMetadataValue(metadatas, newValue); } else if(action === METADATA_ACTIONS.UPDATE && angular.isDefined(oldValue)) { if(oldValue !== newValue) { if(oldValue !== "") { // Decrement the old value this._removeMetadataValue(metadatas, oldValue); } // Increment the new value with the "create" // operation. this._updateMetadataEntry( metadatas, newValue, METADATA_ACTIONS.CREATE, oldValue); } } }; // Update the metadata entry in `metadatas` with the newValue and based // on the action. Update action will use the oldValue to remove it from // the metadata. Manager.prototype._updateMetadataEntry = function( metadatas, newValue, action, oldValue) { if(angular.isArray(newValue)) { this._updateMetadataArrayEntry( metadatas, newValue, action, oldValue); } else { this._updateMetadataValueEntry( metadatas, newValue, action, oldValue); } }; // Return the metadata object. Manager.prototype.getMetadata = function() { return this._metadata; }; // Update the metadata objects based on the given item and action. Manager.prototype._updateMetadata = function(item, action) { var self = this; var oldItem, idx; if(action === METADATA_ACTIONS.UPDATE) { // Update actions require the oldItem if it exist in the // current item listing. idx = this._getIndexOfItem(this._items, item[this._pk]); if(idx >= 0) { oldItem = this._items[idx]; } } angular.forEach(this._metadataAttributes, function(func, attr) { if(angular.isUndefined(self._metadata[attr])) { self._metadata[attr] = []; } var newValue, oldValue; if(angular.isFunction(func)) { newValue = func(item); if(angular.isObject(oldItem)) { oldValue = func(oldItem); } } else { newValue = item[attr]; if(angular.isObject(oldItem)) { oldValue = oldItem[attr]; } } self._updateMetadataEntry( self._metadata[attr], newValue, action, oldValue); }); }; return Manager; }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/services/managerhelper.js0000644000000000000000000000431213056115004026141 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Manager Helper Service * * Used by controllers to load managers. It helps the initialization of * managers and makes sure that all items in the manager are loaded * before resolving the defer. */ angular.module('MAAS').service('ManagerHelperService', [ '$q', '$timeout', 'ErrorService', 'RegionConnection', function($q, $timeout, ErrorService, RegionConnection) { // Loads the manager. this.loadManager = function(manager) { // Do this entire operation with in the context of the region // connection is connected. var defer = $q.defer(); RegionConnection.defaultConnect().then(function() { if(manager.isLoaded()) { $timeout(function() { defer.resolve(manager); }); } else { manager.loadItems().then(function() { defer.resolve(manager); }, function(error) { ErrorService.raiseError(error); }); } // Always enable auto reload. This will make sure the items // are reloaded if the connection goes down. manager.enableAutoReload(); }); return defer.promise; }; // Gets the list of managers. this.loadManagers = function(managers) { var defer = $q.defer(); var loadedManagers = []; // Resolves the defer if all managers are loaded. var resolveAllLoaded = function() { if(loadedManagers.length === managers.length) { defer.resolve(managers); } }; var self = this; angular.forEach(managers, function(manager) { self.loadManager(manager).then(function(loadedManager) { loadedManagers.push(loadedManager); resolveAllLoaded(); }); }); return defer.promise; }; }]); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/services/search.js0000644000000000000000000001357713056115004024611 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Nodes Search Services */ angular.module('MAAS').service('SearchService', function() { // Holds an empty filter object. var emptyFilter = { _: [] }; // Return a new empty filter; this.getEmptyFilter = function() { return angular.copy(emptyFilter); }; // Splits the search string into different terms based on white space. // This handles the ability for whitespace to be inside of '(', ')'. // // XXX blake_r 28-01-15: This could be improved with a regex, but was // unable to come up with one that would allow me to validate the end // ')' in the string. this.getSplitSearch = function(search) { var terms = search.split(' '); var fixedTerms = []; var spanningParentheses = false; angular.forEach(terms, function(term, idx) { if(spanningParentheses) { // Previous term had an opening '(' but not a ')'. This // term should join that previous term. fixedTerms[fixedTerms.length - 1] += ' ' + term; // If the term contains the ending ')' then its the last // in the group. if(term.indexOf(')') !== -1) { spanningParentheses = false; } } else { // Term is not part of a previous '(' span. fixedTerms.push(term); var startIdx = term.indexOf('('); if(startIdx !== -1) { if(term.indexOf(')', startIdx) === -1) { // Contains a starting '(' but not a ending ')'. spanningParentheses = true; } } } }); if(spanningParentheses) { // Missing ending parentheses so error with terms. return null; } return fixedTerms; }; // Return all of the currently active filters for the given search. this.getCurrentFilters = function(search) { var filters = this.getEmptyFilter(); if(search.length === 0) { return filters; } var searchTerms = this.getSplitSearch(search); if(!searchTerms) { return null; } angular.forEach(searchTerms, function(terms) { terms = terms.split(':'); if(terms.length === 1) { // Search term is not specifing a specific field. Gets // add to the '_' section of the filters. if(filters._.indexOf(terms[0]) === -1) { filters._.push(terms[0]); } } else { var field = terms.shift(); var values = terms.join(":"); // Remove the starting '(' and ending ')'. values = values.replace('(', ''); values = values.replace(')', ''); // If empty values then do nothing. if(values.length === 0) { return; } // Split the values based on comma. values = values.split(','); // Add the values to filters. if(angular.isUndefined(filters[field])) { filters[field] = []; } angular.forEach(values, function(value) { if(filters[field].indexOf(value) === -1) { filters[field].push(value); } }); } }); return filters; }; // Convert "filters" into a search string. this.filtersToString = function(filters) { var search = ""; angular.forEach(filters, function(terms, type) { // Skip empty and skip "_" as it gets appended at the // beginning of the search. if(terms.length === 0 || type === "_") { return; } search += type + ":(" + terms.join(",") + ") "; }); if(filters._.length > 0) { search = filters._.join(" ") + " " + search; } return search.trim(); }; // Return the index of the value in the type for the filter. this._getFilterValueIndex = function(filters, type, value) { var values = filters[type]; if(angular.isUndefined(values)) { return -1; } var lowerValues = values.map(function(value) { return value.toLowerCase(); }); return lowerValues.indexOf(value.toLowerCase()); }; // Return true if the type and value are in the filters. this.isFilterActive = function(filters, type, value, exact) { var values = filters[type]; if(angular.isUndefined(values)) { return false; } if(angular.isUndefined(exact)) { exact = false; } if(exact) { value = "=" + value; } return this._getFilterValueIndex(filters, type, value) !== -1; }; // Toggles a filter on or off based on type and value. this.toggleFilter = function(filters, type, value, exact) { if(angular.isUndefined(filters[type])) { filters[type] = []; } if(exact) { value = "=" + value; } var idx = this._getFilterValueIndex(filters, type, value); if(idx === -1) { filters[type].push(value); } else { filters[type].splice(idx, 1); } return filters; }; // Holds all stored filters. var storedFilters = {}; // Store a filter for later. this.storeFilters = function(name, filters) { storedFilters[name] = filters; }; // Retrieve a stored fitler. this.retrieveFilters = function(name) { return storedFilters[name]; }; }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/services/tests/0000755000000000000000000000000013056115004024133 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/services/validation.js0000644000000000000000000002045213056115004025464 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * MAAS Validation Service * * Used by controllers to validate user inputs. */ angular.module('MAAS').service('ValidationService', function() { // Pattern that matches a hostname. var hostnamePattern = /^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])*$/; // Pattern that matches a MAC. var macPattern = /^([0-9A-Fa-f]{2}:){5}([0-9A-Fa-f]{2})$/; // Pattern used to match IPv4. var ipv4Pattern = new RegExp([ '^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.', '(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.', '(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.', '(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$' ].join('')); // Returns true if the octets in one equal two with the cidr mask in // bits applied to both. function cidrMatcher(one, two, size, bits) { var part = 0; while(bits > 0) { var shift = size - bits; if(shift < 0) { shift = 0; } var oneShift = one[part] >> shift; var twoShift = two[part] >> shift; if(oneShift !== twoShift) { return false; } bits -= size; part += 1; } return true; } // Convert string ipv4 address into octets array. function ipv4ToOctets(ipAddress) { var parts = ipAddress.split('.'); var octets = []; angular.forEach(parts, function(part) { octets.push(parseInt(part, 10)); }); return octets; } // Convert ipv6 address to a full ipv6 address, removing the // '::' shortcut. function ipv6Expand(ipAddress) { var i, expandedAddress = ipAddress; if(expandedAddress.indexOf("::") !== -1) { // '::' is present so replace it with the required // number of '0000:' based on its location in the string. var split = ipAddress.split("::"); var groups = 0; for(i = 0; i < split.length; i++) { groups += split[i].split(":").length; } expandedAddress = split[0] + ":"; for(i = 0; i < 8 - groups; i++) { expandedAddress += "0000:"; } expandedAddress += split[1]; } return expandedAddress; } // Convert string ipv6 into octets array. function ipv6ToOctets(ipAddress) { var octets = []; var parts = ipv6Expand(ipAddress).split(":"); angular.forEach(parts, function(part) { octets.push(parseInt(part, 16)); }); return octets; } // Return true if the hostname is valid, false otherwise. this.validateHostname = function(hostname) { // Invalid if the hostname is not a string, empty, or more than // 63 characters. if(!angular.isString(hostname) || hostname.length === 0 || hostname.length > 63) { return false; } return hostnamePattern.test(hostname); }; // Return true if the MAC is valid, false otherwise. this.validateMAC = function(macAddress) { // Invalid if the macAddress is not a string. if(!angular.isString(macAddress)) { return false; } return macPattern.test(macAddress.trim()); }; // Return true if the IP is valid IPv4 address, false otherwise. this.validateIPv4 = function(ipAddress) { // Invalid if the ipAddress is not a string or empty. if(!angular.isString(ipAddress) || ipAddress.length === 0) { return false; } return ipv4Pattern.test(ipAddress); }; // Return true if the IP is valid IPv6 address, false otherwise. this.validateIPv6 = function(ipAddress) { // Invalid if the ipAddress is not a string, empty, or missing // at least one ':'. if(!angular.isString(ipAddress) || ipAddress.length === 0 || ipAddress.indexOf(':') === -1) { return false; } var expandedAddress = ipv6Expand(ipAddress); var octets = ipv6ToOctets(expandedAddress); if(octets.length !== 8) { return false; } // Make sure all octets are in range. var i; for(i = 0; i < 8; i++) { if(isNaN(octets[i]) || octets[i] < 0 || octets[i] > 0xffff) { // Out of range. return false; } } // Don't allow unspecified, loopback, multicast, link-local // unicast, or anything out of range. if(octets[0] < 1 || octets[0] === 0xff00 || octets[0] === 0xfe80) { return false; } return true; }; // Return true if the IP is valid, false otherwise. this.validateIP = function(ipAddress) { return ( this.validateIPv4(ipAddress) || this.validateIPv6(ipAddress)); }; // Return true if the ipAddress is in the network. this.validateIPInNetwork = function(ipAddress, network) { var networkSplit = network.split('/'); var networkAddress = networkSplit[0]; var cidrBits = parseInt(networkSplit[1], 10); if(this.validateIPv4(ipAddress) && this.validateIPv4(networkAddress)) { return cidrMatcher( ipv4ToOctets(ipAddress), ipv4ToOctets(networkAddress), 8, cidrBits); } else if(this.validateIPv6(ipAddress) && this.validateIPv6(networkAddress)) { return cidrMatcher( ipv6ToOctets(ipAddress), ipv6ToOctets(networkAddress), 16, cidrBits); } return false; }; // Return true if the ipAddress is in the network and between the // lowAddress and highAddress inclusive. this.validateIPInRange = function( ipAddress, network, lowAddress, highAddress) { // If the ip address is not even in the network then its // not in the range. if(!this.validateIPInNetwork(ipAddress, network)) { return false; } var i, ipOctets, lowOctets, highOctets; if(this.validateIPv4(ipAddress) && this.validateIPv4(lowAddress) && this.validateIPv4(highAddress)) { // Check that each octet is of the ip address is more or equal // to the low address and less or equal to the high address. ipOctets = ipv4ToOctets(ipAddress); lowOctets = ipv4ToOctets(lowAddress); highOctets = ipv4ToOctets(highAddress); for(i = 0; i < 4; i++) { if(ipOctets[i] > highOctets[i] || ipOctets[i] < lowOctets[i]) { return false; } } return true; } else if(this.validateIPv6(ipAddress) && this.validateIPv6(lowAddress) && this.validateIPv6(highAddress)) { // Check that each octet is of the ip address is more or equal // to the low address and less or equal to the high address. ipOctets = ipv6ToOctets(ipAddress); lowOctets = ipv6ToOctets(lowAddress); highOctets = ipv6ToOctets(highAddress); for(i = 0; i < 8; i++) { if(ipOctets[i] > highOctets[i] || ipOctets[i] < lowOctets[i]) { return false; } } return true; } return false; }; }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/services/tests/test_browser.js0000644000000000000000000000456213056115004027222 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for BrowserService. */ describe("BrowserService", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Inject a fake $window allowing the test // to set the user agent string. var $window; beforeEach(function() { $window = { navigator: { userAgent: "" } }; // Inject the fake $window into the provider so // when the directive is created if will use this // $window object instead of the one provided by // angular. module(function($provide) { $provide.value("$window", $window); }); }); // Get the $injector so the test can grab the BrowserService. var $injector; beforeEach(inject(function(_$injector_) { $injector = _$injector_; })); it("browser set to other if none of the regex match", function() { $window.navigator.userAgent = makeName("randomBrowser"); BrowserService = $injector.get("BrowserService"); expect(BrowserService.browser).toBe("other"); }); var scenarios = [ { browser: "chrome", userAgent: "Mozilla/5.0 (X11; Linux x86_64) " + "AppleWebKit/537.36 (KHTML, like Gecko) " + "Chrome/41.0.2272.89 Safari/537.36" }, { browser: "safari", userAgent: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) " + "AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 " + "Safari/7046A194A" }, { browser: "firefox", userAgent: "Mozilla/5.0 (X11; Ubuntu; " + "Linux x86_64; rv:37.0) Gecko/20100101 Firefox/37.0" }, { browser: "ie", userAgent: "Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; " + "Trident/7.0; rv:11.0) like Gecko" } ]; angular.forEach(scenarios, function(scenario) { it("browser set to " + scenario.browser, function() { $window.navigator.userAgent = scenario.userAgent; BrowserService = $injector.get("BrowserService"); expect(BrowserService.browser).toBe(scenario.browser); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/services/tests/test_converter.js0000644000000000000000000001270013056115004027537 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for ConverterService. */ describe("ConverterService", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Load the ConverterService. var ConverterService; beforeEach(inject(function($injector) { ConverterService = $injector.get("ConverterService"); })); describe("bytesToUnits", function() { var scenarios = [ { input: "99", output: { original: 99, converted: 99, units: "Bytes", string: "99 Bytes" } }, { input: 99, output: { original: 99, converted: 99, units: "Bytes", string: "99 Bytes" } }, { input: 8100, output: { original: 8100, converted: 8.1, units: "KB", string: "8.1 KB" } }, { input: 8100000, output: { original: 8100000, converted: 8.1, units: "MB", string: "8.1 MB" } }, { input: 8100000000, output: { original: 8100000000, converted: 8.1, units: "GB", string: "8.1 GB" } }, { input: 8100000000000, output: { original: 8100000000000, converted: 8.1, units: "TB", string: "8.1 TB" } }, { input: 8100000000000000, output: { original: 8100000000000000, converted: 8100, units: "TB", string: "8100.0 TB" } } ]; angular.forEach(scenarios, function(scenario) { it("converts: " + scenario.input, function() { var result = ConverterService.bytesToUnits( scenario.input); expect(result).toEqual(scenario.output); }); }); }); describe("unitsToBytes", function() { var scenarios = [ { input: "99", units: "Bytes", output: 99 }, { input: 99, units: "Bytes", output: 99 }, { input: 8.1, units: "KB", output: 8100 }, { input: 8.1, units: "MB", output: 8100000 }, { input: 8.1, units: "GB", output: 8100000000 }, { input: 8.1, units: "TB", output: 8100000000000 }, { input: 8100, units: "TB", output: 8100000000000000 } ]; angular.forEach(scenarios, function(scenario) { it("converts: " + scenario.input + scenario.units, function() { var result = ConverterService.unitsToBytes( scenario.input, scenario.units); expect(result).toBe(scenario.output); }); }); }); describe("roundUnits", function() { var scenarios = [ { input: "99", units: "Bytes", output: 99 }, { input: 99, units: "Bytes", output: 99 }, { input: 8.14, units: "KB", output: 8090 }, { input: 8.14, units: "MB", output: 8090000 }, { input: 8.14, units: "GB", output: 8090000000 }, { input: 8.14, units: "TB", output: 8090000000000 } ]; angular.forEach(scenarios, function(scenario) { it("converts: " + scenario.input + scenario.units, function() { var result = ConverterService.roundUnits( scenario.input, scenario.units); expect(result).toBe(scenario.output); }); }); }); describe("roundByBlockSize", function() { it("rounds down a block", function() { var bytes = 8.1 * 1000 * 1000; var block_size = 1024; expect(ConverterService.roundByBlockSize(bytes, block_size)).toBe( 8099840); }); it("doesnt round down a block", function() { var bytes = 1024 * 1024 * 1024; var block_size = 1024; expect(ConverterService.roundByBlockSize(bytes, block_size)).toBe( 1024 * 1024 * 1024); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/services/tests/test_error.js0000644000000000000000000000213713056115004026664 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for ErrorService. */ describe("ErrorService", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Load the ErrorService. var ErrorService; beforeEach(inject(function($injector) { ErrorService = $injector.get("ErrorService"); })); it("initializes _error to null", function() { expect(ErrorService._error).toBeNull(); }); describe("raiseError", function() { it("sets _error", function() { var error = makeName("error"); ErrorService.raiseError(error); expect(ErrorService._error).toBe(error); }); it("only sets _error once", function() { var errors = [ makeName("error"), makeName("error") ]; ErrorService.raiseError(errors[0]); ErrorService.raiseError(errors[1]); expect(ErrorService._error).toBe(errors[0]); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/services/tests/test_json.js0000644000000000000000000000261513056115004026505 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for JSONService. */ describe("JSONService", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Load the JSONService. var JSONService; beforeEach(inject(function($injector) { JSONService = $injector.get("JSONService"); })); describe("tryParse", function() { var scenarios = [ { input: null, output: null }, { input: false, output: null }, { input: 123, output: null }, { input: undefined, output: null }, { input: "string", output: null }, { input: angular.toJson({ data: "string" }), output: { data: "string" } } ]; angular.forEach(scenarios, function(scenario) { it("parses: " + scenario.input, function() { var result = JSONService.tryParse( scenario.input); expect(result).toEqual(scenario.output); }); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/services/tests/test_manager.js0000644000000000000000000013734713056115004027161 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for Manager. */ describe("Manager", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Grab the needed angular pieces. var $rootScope, $timeout, $q; beforeEach(inject(function($injector) { $rootScope = $injector.get("$rootScope"); $timeout = $injector.get("$timeout"); $q = $injector.get("$q"); })); // Load the Manager and RegionConnection factory. var NodesManager, RegionConnection, webSocket; beforeEach(inject(function($injector) { var Manager = $injector.get("Manager"); RegionConnection = $injector.get("RegionConnection"); // Create a fake node manager function FakeNodesManager() { Manager.call(this); this._pk = "system_id"; this._handler = "node"; this._metadataAttributes = { "status": null, "owner": null, "tags": null, "zone": function(node) { return node.zone.name; } }; // Listen for notify events for the node object. var self = this; RegionConnection.registerNotifier("node", function(action, data) { self.onNotify(action, data); }); } FakeNodesManager.prototype = new Manager(); NodesManager = new FakeNodesManager(); // Mock buildSocket so an actual connection is not made. webSocket = new MockWebSocket(); spyOn(RegionConnection, "buildSocket").and.returnValue(webSocket); })); // Open the connection to the region before each test. beforeEach(function(done) { RegionConnection.registerHandler("open", function() { done(); }); RegionConnection.connect(""); }); // Copy node and remove $selected field. function stripSelected(node) { node = angular.copy(node); delete node.$selected; return node; } // Copy all nodes and remove the $selected field. function stripSelectedNodes(nodes) { nodes = angular.copy(nodes); angular.forEach(nodes, function(node) { delete node.$selected; }); return nodes; } // Add $selected field to node with value. function addSelected(node, selected) { node.$selected = selected; return node; } // Add $selected field to all nodes with value. function addSelectedOnNodes(nodes, selected) { angular.forEach(nodes, function(node) { node.$selected = selected; }); return nodes; } // Make a random node. function makeNode(selected) { var node = { system_id: makeName("system_id"), name: makeName("name"), status: makeName("status"), owner: makeName("owner"), tags: [ makeName("tag"), makeName("tag") ], zone: { name: makeName("zone") } }; if(angular.isDefined(selected)) { node.$selected = selected; } return node; } // Make a list of nodes. function makeNodes(count, selected) { var i, nodes = []; for(i = 0; i < count; i++) { nodes.push(makeNode(selected)); } return nodes; } describe("getItems", function() { it("returns items array", function() { var array = [ makeNode() ]; NodesManager._items = array; expect(NodesManager.getItems()).toBe(array); }); }); describe("loadItems", function() { it("calls reloadItems if the items are already loaded", function() { NodesManager._loaded = true; spyOn(NodesManager, "reloadItems"); NodesManager.loadItems(); expect(NodesManager.reloadItems).toHaveBeenCalled(); }); it("calls node.list", function(done) { webSocket.returnData.push(makeFakeResponse([makeNode()])); NodesManager.loadItems().then(function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.method).toBe("node.list"); done(); }); }); it("loads items list without replacing it", function(done) { var fakeNode = makeNode(); var nodes = NodesManager.getItems(); webSocket.returnData.push(makeFakeResponse([fakeNode])); NodesManager.loadItems().then(function(nodes) { expect(nodes).toEqual([addSelected(fakeNode, false)]); expect(nodes).toBe(nodes); done(); }); }); it("batch calls in groups of 50", function(done) { var i, fakeNodes = []; for(i = 0; i < 3; i++) { var groupOfNodes = makeNodes(50); fakeNodes.push.apply(fakeNodes, groupOfNodes); webSocket.returnData.push(makeFakeResponse(groupOfNodes)); } // A total of 4 calls should be completed, with the last one // being an empty list of nodes. webSocket.returnData.push(makeFakeResponse([])); NodesManager.loadItems().then(function(nodes) { expect(nodes).toEqual(addSelectedOnNodes(fakeNodes, false)); expect(webSocket.sentData.length).toBe(4); expect(webSocket.receivedData.length).toBe(4); expect( angular.fromJson( webSocket.sentData[0]).params.limit).toBe(50); expect( angular.fromJson( webSocket.receivedData[3]).result).toEqual([]); done(); }); }); it("batch calls with the last system_id", function(done) { var fakeNodes = makeNodes(50); var system_id = fakeNodes[fakeNodes.length-1].system_id; webSocket.returnData.push(makeFakeResponse(fakeNodes)); // A total of 2 calls should be completed, with the last one // being an empty list of nodes. webSocket.returnData.push(makeFakeResponse([])); NodesManager.loadItems().then(function(nodes) { // Expect first message to not have a start. first_msg = angular.fromJson(webSocket.sentData[0]); expect(first_msg.params.start).toBeUndefined(); // Expect the second message to have the last system_id. second_msg = angular.fromJson(webSocket.sentData[1]); expect(second_msg.params.start).toEqual(system_id); done(); }); }); it("sets loaded true when complete", function(done) { webSocket.returnData.push(makeFakeResponse([makeNode()])); NodesManager.loadItems().then(function() { expect(NodesManager._loaded).toBe(true); done(); }); }); it("sets isLoading to true while loading", function(done) { NodesManager._isLoading = false; webSocket.returnData.push(makeFakeResponse("error", true)); NodesManager.loadItems().then(null, function() { expect(NodesManager._isLoading).toBe(true); done(); }); }); it("sets isLoading to false after loading", function(done) { webSocket.returnData.push(makeFakeResponse([makeNode()])); NodesManager.loadItems().then(function() { expect(NodesManager._isLoading).toBe(false); done(); }); }); it("resolves all load defers once done loading", function(done) { webSocket.returnData.push(makeFakeResponse([makeNode()])); var count = 0; var loadDone = function() { count += 1; if(count === 2) { expect(NodesManager._extraLoadDefers).toEqual([]); done(); } }; // Both need to resolve for the done() to be called. If both // are not called then the test will timeout. NodesManager.loadItems().then(function(items) { expect(items).toBe(NodesManager.getItems()); loadDone(); }); NodesManager.loadItems().then(function(items) { expect(items).toBe(NodesManager.getItems()); loadDone(); }); }); it("calls processActions after loading", function(done) { spyOn(NodesManager, "processActions"); webSocket.returnData.push(makeFakeResponse([makeNode()])); NodesManager.loadItems().then(function() { expect(NodesManager.processActions).toHaveBeenCalled(); done(); }); }); it("calls defer error handler on error", function(done) { var errorMsg = "Unable to load the nodes."; webSocket.returnData.push(makeFakeResponse(errorMsg, true)); NodesManager.loadItems().then(null, function(error) { expect(error).toBe(errorMsg); done(); }); }); it("rejects all load defers on error", function(done) { var fakeError = makeName("error"); webSocket.returnData.push( makeFakeResponse(fakeError, true)); var count = 0; var errorDone = function() { count += 1; if(count === 2) { expect(NodesManager._extraLoadDefers).toEqual([]); done(); } }; // Both need to reject for the done() to be called. If both // are not called then the test will timeout. NodesManager.loadItems().then(null, function(error) { expect(error).toBe(fakeError); errorDone(); }); NodesManager.loadItems().then(null, function(error) { expect(error).toBe(fakeError); errorDone(); }); }); it("doesn't set loaded to true on error", function(done) { var errorMsg = "Unable to load the nodes."; webSocket.returnData.push(makeFakeResponse(errorMsg, true)); NodesManager.loadItems().then(null, function() { expect(NodesManager._loaded).toBe(false); done(); }); }); it("returns nodes list in defer", function(done) { webSocket.returnData.push(makeFakeResponse([makeNode()])); NodesManager.loadItems().then(function(nodes) { expect(nodes).toBe(NodesManager.getItems()); done(); }); }); it("updates the node status", function(done) { var node = makeNode(); webSocket.returnData.push(makeFakeResponse([node])); NodesManager.loadItems().then(function(nodes) { expect(NodesManager._metadata.status).toEqual([{ name: node.status, count: 1 }]); done(); }); }); it("updates the node owner", function(done) { var node = makeNode(); webSocket.returnData.push(makeFakeResponse([node])); NodesManager.loadItems().then(function(nodes) { expect(NodesManager._metadata.owner).toEqual([{ name: node.owner, count: 1 }]); done(); }); }); it("updates the node tags", function(done) { var node = makeNode(); webSocket.returnData.push(makeFakeResponse([node])); NodesManager.loadItems().then(function(nodes) { expect(NodesManager._metadata.tags).toEqual([ { name: node.tags[0], count: 1 }, { name: node.tags[1], count: 1 }]); done(); }); }); }); describe("reloadItems", function() { beforeEach(function() { NodesManager._loaded = true; }); it("calls loadItems if the nodes are not loaded", function() { NodesManager._loaded = false; spyOn(NodesManager, "loadItems"); NodesManager.reloadItems(); expect(NodesManager.loadItems).toHaveBeenCalled(); }); it("sets isLoading to true while reloading", function(done) { NodesManager._isLoading = false; webSocket.returnData.push(makeFakeResponse("error", true)); NodesManager.reloadItems().then(null, function() { expect(NodesManager._isLoading).toBe(true); done(); }); }); it("sets isLoading to false after reloading", function(done) { webSocket.returnData.push(makeFakeResponse([makeNode()])); NodesManager.reloadItems().then(function() { expect(NodesManager._isLoading).toBe(false); done(); }); }); it("calls processActions after loading", function(done) { spyOn(NodesManager, "processActions"); webSocket.returnData.push(makeFakeResponse([makeNode()])); NodesManager.reloadItems().then(function() { expect(NodesManager.processActions).toHaveBeenCalled(); done(); }); }); it("calls setActiveItem after loading", function(done) { var activeNode = makeNode(); NodesManager._activeItem = activeNode; spyOn(NodesManager, "setActiveItem"); webSocket.returnData.push(makeFakeResponse([makeNode()])); NodesManager.reloadItems().then(function() { expect(NodesManager.setActiveItem).toHaveBeenCalledWith( activeNode.system_id); done(); }); }); it("resolves all reload defers once done reloading", function(done) { webSocket.returnData.push(makeFakeResponse([makeNode()])); var count = 0; var reloadDone = function() { count += 1; if(count === 2) { expect(NodesManager._extraReloadDefers).toEqual([]); done(); } }; // Both need to resolve for the done() to be called. If both // are not called then the test will timeout. NodesManager.reloadItems().then(function(items) { expect(items).toBe(NodesManager.getItems()); reloadDone(); }); NodesManager.reloadItems().then(function(items) { expect(items).toBe(NodesManager.getItems()); reloadDone(); }); }); it("calls defer error handler on error", function(done) { var errorMsg = "Unable to reload the nodes."; webSocket.returnData.push(makeFakeResponse(errorMsg, true)); NodesManager.reloadItems().then(null, function(error) { expect(error).toBe(errorMsg); done(); }); }); it("rejects all reload defers on error", function(done) { var fakeError = makeName("error"); webSocket.returnData.push( makeFakeResponse(fakeError, true)); var count = 0; var errorDone = function() { count += 1; if(count === 2) { expect(NodesManager._extraReloadDefers).toEqual([]); done(); } }; // Both need to reject for the done() to be called. If both // are not called then the test will timeout. NodesManager.reloadItems().then(null, function(error) { expect(error).toBe(fakeError); errorDone(); }); NodesManager.reloadItems().then(null, function(error) { expect(error).toBe(fakeError); errorDone(); }); }); it("returns nodes list in defer", function(done) { webSocket.returnData.push(makeFakeResponse([makeNode()])); NodesManager.reloadItems().then(function(nodes) { expect(nodes).toBe(NodesManager.getItems()); done(); }); }); it("adds new nodes to items list", function(done) { var currentNodes = [makeNode(), makeNode()]; var newNodes = [makeNode(), makeNode()]; var allNodes = currentNodes.concat(newNodes); NodesManager._items = currentNodes; webSocket.returnData.push(makeFakeResponse(allNodes)); NodesManager.reloadItems().then(function(nodes) { expect(nodes).toEqual(allNodes); done(); }); }); it("removes missing nodes from items list", function(done) { var currentNodes = [ makeNode(false), makeNode(false), makeNode(false)]; var removedNodes = angular.copy(currentNodes); removedNodes.splice(1, 1); NodesManager._items = currentNodes; webSocket.returnData.push(makeFakeResponse(removedNodes)); NodesManager.reloadItems().then(function(nodes) { expect(nodes).toEqual(addSelectedOnNodes(removedNodes, false)); done(); }); }); it("removes missing nodes from selected items list", function(done) { var currentNodes = [makeNode(), makeNode(), makeNode()]; var removedNodes = angular.copy(currentNodes); removedNodes.splice(1, 1); NodesManager._items = currentNodes; NodesManager._selectedItems = [currentNodes[0], currentNodes[1]]; webSocket.returnData.push(makeFakeResponse(removedNodes)); NodesManager.reloadItems().then(function(nodes) { expect(NodesManager._selectedItems).toEqual([currentNodes[0]]); done(); }); }); it("updates nodes in items list", function(done) { var currentNodes = [makeNode(false), makeNode(false)]; var updatedNodes = angular.copy(currentNodes); updatedNodes[0].name = makeName("name"); updatedNodes[1].name = makeName("name"); NodesManager._items = currentNodes; webSocket.returnData.push(makeFakeResponse(updatedNodes)); NodesManager.reloadItems().then(function(nodes) { expect(nodes).toEqual( addSelectedOnNodes(updatedNodes, false)); done(); }); }); it("updates nodes in selected items list", function(done) { var currentNodes = [makeNode(true), makeNode(true)]; var updatedNodes = stripSelectedNodes(currentNodes); updatedNodes[0].name = makeName("name"); updatedNodes[1].name = makeName("name"); NodesManager._items = currentNodes; NodesManager._selectedItems = [currentNodes[0], currentNodes[1]]; webSocket.returnData.push(makeFakeResponse(updatedNodes)); NodesManager.reloadItems().then(function(nodes) { expect(NodesManager._selectedItems).toEqual( addSelectedOnNodes(updatedNodes, true)); done(); }); }); }); describe("enableAutoReload", function() { it("does nothing if already enabled", function() { spyOn(RegionConnection, "registerHandler"); NodesManager._autoReload = true; NodesManager.enableAutoReload(); expect(RegionConnection.registerHandler).not.toHaveBeenCalled(); }); it("adds handler and sets autoReload to true", function() { spyOn(RegionConnection, "registerHandler"); NodesManager.enableAutoReload(); expect(RegionConnection.registerHandler).toHaveBeenCalled(); expect(NodesManager._autoReload).toBe(true); }); }); describe("disableAutoReload", function() { it("does nothing if already disabled", function() { spyOn(RegionConnection, "unregisterHandler"); NodesManager._autoReload = false; NodesManager.disableAutoReload(); expect(RegionConnection.unregisterHandler).not.toHaveBeenCalled(); }); it("removes handler and sets autoReload to false", function() { spyOn(RegionConnection, "unregisterHandler"); NodesManager._autoReload = true; NodesManager.disableAutoReload(); expect(RegionConnection.unregisterHandler).toHaveBeenCalled(); expect(NodesManager._autoReload).toBe(false); }); }); describe("getItemFromList", function() { it("returns node from _items", function() { var fakeNode = makeNode(); NodesManager._items.push(fakeNode); expect(NodesManager.getItemFromList(fakeNode.system_id)).toBe( fakeNode); }); it("returns null if system_id not in _items", function() { var fakeNode = makeNode(); expect( NodesManager.getItemFromList(fakeNode.system_id)).toBeNull(); }); }); describe("getItem", function() { it("calls node.get", function(done) { var fakeNode = makeNode(); webSocket.returnData.push(makeFakeResponse(fakeNode)); NodesManager.getItem(fakeNode.system_id).then(function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.method).toBe("node.get"); done(); }); }); it("calls node.get with node system_id", function(done) { var fakeNode = makeNode(); webSocket.returnData.push(makeFakeResponse(fakeNode)); NodesManager.getItem(fakeNode.system_id).then(function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.params.system_id).toBe(fakeNode.system_id); done(); }); }); it("updates node in items and selectedItems list", function(done) { var fakeNode = makeNode(); var updatedNode = angular.copy(fakeNode); updatedNode.name = makeName("name"); NodesManager._items.push(fakeNode); NodesManager._selectedItems.push(fakeNode); webSocket.returnData.push(makeFakeResponse(updatedNode)); NodesManager.getItem(fakeNode.system_id).then(function() { expect(NodesManager._items[0].name).toBe(updatedNode.name); expect(NodesManager._selectedItems[0].name).toBe( updatedNode.name); done(); }); }); it("calls defer error handler on error", function(done) { var errorMsg = "No node with the given system_id."; webSocket.returnData.push(makeFakeResponse(errorMsg, true)); NodesManager.getItem(makeName("system_id")).then( null, function(error) { expect(error).toBe(errorMsg); done(); }); }); }); describe("updateItem", function() { it("calls node.update", function(done) { var fakeNode = makeNode(); webSocket.returnData.push(makeFakeResponse(fakeNode)); NodesManager.updateItem(fakeNode).then(function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.method).toBe("node.update"); done(); }); }); it("calls node.update with node", function(done) { var fakeNode = makeNode(); webSocket.returnData.push(makeFakeResponse(fakeNode)); NodesManager.updateItem(fakeNode).then(function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.params).toEqual(fakeNode); done(); }); }); it("updates node in items and selectedItems list", function(done) { var fakeNode = makeNode(); var updatedNode = angular.copy(fakeNode); updatedNode.name = makeName("name"); NodesManager._items.push(fakeNode); NodesManager._selectedItems.push(fakeNode); webSocket.returnData.push(makeFakeResponse(updatedNode)); NodesManager.updateItem(updatedNode).then(function() { expect(NodesManager._items[0].name).toBe(updatedNode.name); expect(NodesManager._selectedItems[0].name).toBe( updatedNode.name); done(); }); }); it("calls defer error handler on error", function(done) { var errorMsg = "Unable to update node"; webSocket.returnData.push(makeFakeResponse(errorMsg, true)); NodesManager.updateItem(makeNode()).then(null, function(error) { expect(error).toBe(errorMsg); done(); }); }); }); describe("deleteItem", function() { it("calls node.delete", function(done) { var fakeNode = makeNode(); webSocket.returnData.push(makeFakeResponse(null)); NodesManager.deleteItem(fakeNode).then(function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.method).toBe("node.delete"); done(); }); }); it("calls node.delete with node system_id", function(done) { var fakeNode = makeNode(); webSocket.returnData.push(makeFakeResponse(null)); NodesManager.deleteItem(fakeNode).then(function() { var sentObject = angular.fromJson(webSocket.sentData[0]); expect(sentObject.params.system_id).toBe(fakeNode.system_id); done(); }); }); it("deletes node in items and selectedItems list", function(done) { var fakeNode = makeNode(); NodesManager._items.push(fakeNode); NodesManager._selectedItems.push(fakeNode); webSocket.returnData.push(makeFakeResponse(null)); NodesManager.deleteItem(fakeNode).then(function() { expect(NodesManager._items.length).toBe(0); expect(NodesManager._selectedItems.length).toBe(0); done(); }); }); }); describe("getActiveItem", function() { it("returns active item", function() { var node = makeNode(); NodesManager._activeItem = node; expect(NodesManager.getActiveItem()).toBe(node); }); }); describe("setActiveItem", function() { it("raises error if not loaded", function() { expect(NodesManager.setActiveItem).toThrow( new Error( "Cannot set active item unless the manager is loaded.")); }); it("sets _activeItem to null if doesn't exist", function(done) { NodesManager._loaded = true; NodesManager._activeItem = {}; var system_id = makeName("system_id"); NodesManager.setActiveItem(system_id).then( null, function(error) { expect(error).toBe("No item with pk: " + system_id); expect(NodesManager._activeItem).toBeNull(); done(); }); $timeout.flush(); }); it("sets _activeItem to item", function(done) { NodesManager._loaded = true; var node = makeNode(); NodesManager._items.push(node); var defer = $q.defer(); spyOn(RegionConnection, "callMethod").and.returnValue( defer.promise); NodesManager.setActiveItem(node.system_id).then( function(activeItem) { expect(NodesManager._activeItem).toBe(activeItem); expect(NodesManager._activeItem).toBe(node); expect(RegionConnection.callMethod).toHaveBeenCalledWith( NodesManager._handler + ".set_active", { system_id: node.system_id }); done(); }); defer.resolve(angular.copy(node)); $rootScope.$digest(); }); }); describe("clearActiveItem", function() { it("clears activeNode", function() { var node = makeNode(); NodesManager._activeItem = node; NodesManager.clearActiveItem(); expect(NodesManager._activeItem).toBeNull(); }); }); describe("onNotify", function() { it("adds notify to queue", function() { var node = makeNode(); NodesManager._isLoading = true; NodesManager.onNotify("create", node); expect(NodesManager._actionQueue).toEqual([{ action: "create", data: node }]); }); it("skips processActions when isLoading is true", function() { spyOn(NodesManager, "processActions"); NodesManager._isLoading = true; NodesManager.onNotify("create", makeName("system_id")); expect(NodesManager.processActions).not.toHaveBeenCalled(); }); it("calls processActions when isLoading is false", function() { spyOn(NodesManager, "processActions"); NodesManager._isLoading = false; NodesManager.onNotify("create", makeName("system_id")); expect(NodesManager.processActions).toHaveBeenCalled(); }); }); describe("processActions", function() { it("adds node to items list on create action", function() { var fakeNode = makeNode(); NodesManager._actionQueue.push({ action: "create", data: fakeNode }); NodesManager.processActions(); expect(NodesManager._items).toEqual( [addSelected(fakeNode, false)]); }); it("updates node in items list on create action if already exists", function() { var fakeNode = makeNode(false); var updatedNode = stripSelected(fakeNode); updatedNode.name = makeName("name"); NodesManager._items.push(fakeNode); NodesManager._actionQueue.push({ action: "create", data: updatedNode }); NodesManager.processActions(); expect(NodesManager._items).toEqual( [addSelected(updatedNode, false)]); }); it("updates node in items list on update action", function() { var fakeNode = makeNode(false); var updatedNode = stripSelected(fakeNode); updatedNode.name = makeName("name"); NodesManager._items.push(fakeNode); NodesManager._actionQueue.push({ action: "update", data: updatedNode }); NodesManager.processActions(); expect(NodesManager._items).toEqual( [addSelected(updatedNode, false)]); }); it("updates node in selected items on update action", function() { var fakeNode = makeNode(true); var updatedNode = stripSelected(fakeNode); updatedNode.name = makeName("name"); NodesManager._items.push(fakeNode); NodesManager._selectedItems.push(fakeNode); NodesManager._actionQueue.push({ action: "update", data: updatedNode }); NodesManager.processActions(); expect(NodesManager._selectedItems).toEqual( [addSelected(updatedNode, true)]); }); it("updates _activeItem on update action", function() { var fakeNode = makeNode(true); var updatedNode = stripSelected(fakeNode); updatedNode.name = makeName("name"); NodesManager._items.push(fakeNode); NodesManager._selectedItems.push(fakeNode); NodesManager._activeItem = fakeNode; NodesManager._actionQueue.push({ action: "update", data: updatedNode }); NodesManager.processActions(); expect(NodesManager._activeItem).toEqual( addSelected(updatedNode, true)); // The _activeItem object should still be the same object. expect(NodesManager._activeItem).toBe(fakeNode); }); it("deletes node in items list on delete action", function() { var fakeNode = makeNode(); NodesManager._items.push(fakeNode); NodesManager._actionQueue.push({ action: "delete", data: fakeNode.system_id }); NodesManager.processActions(); expect(NodesManager._items.length).toBe(0); }); it("deletes node in selected items on delete action", function() { var fakeNode = makeNode(); NodesManager._items.push(fakeNode); NodesManager._selectedItems.push(fakeNode); NodesManager._actionQueue.push({ action: "delete", data: fakeNode.system_id }); NodesManager.processActions(); expect(NodesManager._selectedItems.length).toBe(0); }); it("processes multiple actions in one call", function() { NodesManager._actionQueue = [ { action: "delete", data: makeName("system_id") }, { action: "delete", data: makeName("system_id") } ]; NodesManager.processActions(); expect(NodesManager._actionQueue.length).toBe(0); }); }); describe("getSelectedItems", function() { it("returns selected items", function() { var nodes = [makeNode()]; NodesManager._selectedItems = nodes; expect(NodesManager.getSelectedItems()).toBe(nodes); }); }); describe("selectItem", function() { it("adds node to selected items", function() { var node = makeNode(false); NodesManager._items.push(node); NodesManager.selectItem(node.system_id); expect(NodesManager._selectedItems).toEqual( [addSelected(node, true)]); }); it("doesnt add the same node twice", function() { var node = makeNode(false); NodesManager._items.push(node); NodesManager.selectItem(node.system_id); NodesManager.selectItem(node.system_id); expect(NodesManager._selectedItems).toEqual( [addSelected(node, true)]); }); }); describe("unselectItem", function() { var node; beforeEach(function() { node = makeNode(false); NodesManager._items.push(node); NodesManager.selectItem(node.system_id); }); it("removes node from selected items", function() { NodesManager.unselectItem(node.system_id); expect(NodesManager._selectedItems).toEqual([]); expect(node.$selected).toBe(false); }); it("doesnt error on unselect twice", function() { NodesManager.unselectItem(node.system_id); NodesManager.unselectItem(node.system_id); expect(NodesManager._selectedItems).toEqual([]); expect(node.$selected).toBe(false); }); }); describe("isSelected", function() { var node; beforeEach(function() { node = makeNode(false); NodesManager._items.push(node); }); it("returns true when selected", function() { NodesManager.selectItem(node.system_id); expect(NodesManager.isSelected(node.system_id)).toBe(true); }); it("returns false when not selected", function() { NodesManager.selectItem(node.system_id); NodesManager.unselectItem(node.system_id); expect(NodesManager.isSelected(node.system_id)).toBe(false); }); }); var scenarios = ['status', 'owner', 'zone']; angular.forEach(scenarios, function(scenario) { describe("_updateMetadata:" + scenario, function() { // Helper that gets the value from the node. function getValueFromNode(node, attr) { var func = NodesManager._metadataAttributes[attr]; if(angular.isFunction(func)) { return func(node); } else { return node[attr]; } } it("adds value if missing", function() { var node = makeNode(); NodesManager._updateMetadata(node, "create"); expect(NodesManager._metadata[scenario]).toEqual([{ name: getValueFromNode(node, scenario), count: 1 }]); }); it("increments count for value", function() { var node = makeNode(); NodesManager._updateMetadata(node, "create"); expect(NodesManager._metadata[scenario]).toEqual([{ name: getValueFromNode(node, scenario), count: 1 }]); NodesManager._updateMetadata(node, "create"); expect(NodesManager._metadata[scenario]).toEqual([{ name: getValueFromNode(node, scenario), count: 2 }]); NodesManager._updateMetadata(node, "create"); expect(NodesManager._metadata[scenario]).toEqual([{ name: getValueFromNode(node, scenario), count: 3 }]); }); it("decrements count for value", function() { var node = makeNode(); NodesManager._updateMetadata(node, "create"); NodesManager._updateMetadata(node, "create"); NodesManager._updateMetadata(node, "delete"); expect(NodesManager._metadata[scenario]).toEqual([{ name: getValueFromNode(node, scenario), count: 1 }]); }); it("removes value when count is 0", function() { var node = makeNode(); NodesManager._updateMetadata(node, "create"); NodesManager._updateMetadata(node, "delete"); expect(NodesManager._metadata[scenario]).toEqual([]); }); it("update doesn't add value if missing", function() { var node = makeNode(); NodesManager._updateMetadata(node, "update"); expect(NodesManager._metadata[scenario]).toEqual([]); }); it("update decrements value then increments new value", function() { var node = makeNode(); NodesManager._updateMetadata(node, "create"); NodesManager._updateMetadata(node, "create"); NodesManager._items.push(node); var updatedNode = angular.copy(node); updatedNode[scenario] = makeName(scenario); NodesManager._updateMetadata(updatedNode, "update"); expect(NodesManager._metadata[scenario]).toEqual([ { name: getValueFromNode(node, scenario), count: 1 }, { name: getValueFromNode(updatedNode, scenario), count: 1 }]); }); it("update removes old value then adds new value", function() { var node = makeNode(); NodesManager._updateMetadata(node, "create"); NodesManager._items.push(node); var updatedNode = angular.copy(node); updatedNode[scenario] = makeName(scenario); NodesManager._updateMetadata(updatedNode, "update"); expect(NodesManager._metadata[scenario]).toEqual([{ name: getValueFromNode(updatedNode, scenario), count: 1 }]); }); it("ignores empty values", function() { var node = makeNode(); node.owner = ""; node.status = ""; node.zone.name = ""; NodesManager._updateMetadata(node, "create"); expect(NodesManager._metadata[scenario]).toEqual([]); }); it("update handlers empty old values", function() { var node = makeNode(); node[scenario] = ""; NodesManager._updateMetadata(node, "create"); NodesManager._items.push(node); var updatedNode = angular.copy(node); updatedNode[scenario] = makeName(scenario); NodesManager._updateMetadata(updatedNode, "update"); expect(NodesManager._metadata[scenario]).toEqual([{ name: getValueFromNode(updatedNode, scenario), count: 1 }]); }); it("update handlers empty new values", function() { var node = makeNode(); NodesManager._updateMetadata(node, "create"); NodesManager._items.push(node); var updatedNode = angular.copy(node); if(scenario === "zone") { updatedNode.zone.name = ""; } else { updatedNode[scenario] = ""; } NodesManager._updateMetadata(updatedNode, "update"); expect(NodesManager._metadata[scenario]).toEqual([]); }); }); }); describe("_updateMetadata:tags", function() { it("adds items in array", function() { var node = makeNode(); NodesManager._updateMetadata(node, "create"); expect(NodesManager._metadata.tags).toEqual([ { name: node.tags[0], count: 1 }, { name: node.tags[1], count: 1 }]); }); it("increments count for items in array", function() { var node = makeNode(); NodesManager._updateMetadata(node, "create"); expect(NodesManager._metadata.tags).toEqual([ { name: node.tags[0], count: 1 }, { name: node.tags[1], count: 1 }]); NodesManager._updateMetadata(node, "create"); expect(NodesManager._metadata.tags).toEqual([ { name: node.tags[0], count: 2 }, { name: node.tags[1], count: 2 }]); NodesManager._updateMetadata(node, "create"); expect(NodesManager._metadata.tags).toEqual([ { name: node.tags[0], count: 3 }, { name: node.tags[1], count: 3 }]); }); it("decrements count for an item in the array", function() { var node = makeNode(); NodesManager._updateMetadata(node, "create"); NodesManager._updateMetadata(node, "create"); NodesManager._updateMetadata(node, "delete"); expect(NodesManager._metadata.tags).toEqual([ { name: node.tags[0], count: 1 }, { name: node.tags[1], count: 1 }]); }); it("removes items in array when count is 0", function() { var node = makeNode(); NodesManager._updateMetadata(node, "create"); NodesManager._updateMetadata(node, "delete"); expect(NodesManager._metadata.tags).toEqual([]); }); it("update doesn't add value if node missing", function() { var node = makeNode(); NodesManager._updateMetadata(node, "update"); expect(NodesManager._metadata.tags).toEqual([]); }); it("update decrements values then increments new values", function() { var node = makeNode(); NodesManager._updateMetadata(node, "create"); NodesManager._updateMetadata(node, "create"); NodesManager._items.push(node); var oldTag = node.tags[1]; var updatedNode = angular.copy(node); updatedNode.tags[1] = makeName("tag"); NodesManager._updateMetadata(updatedNode, "update"); expect(NodesManager._metadata.tags).toEqual([ { name: node.tags[0], count: 2 }, { name: oldTag, count: 1 }, { name: updatedNode.tags[1], count: 1 }]); }); it("update removes old values then adds new values", function() { var node = makeNode(); NodesManager._updateMetadata(node, "create"); NodesManager._items.push(node); var oldTags = angular.copy(node.tags); var updatedNode = angular.copy(node); updatedNode.tags = [ makeName("tag"), makeName("tag") ]; NodesManager._updateMetadata(updatedNode, "update"); expect(NodesManager._metadata.tags).toEqual([ { name: updatedNode.tags[0], count: 1 }, { name: updatedNode.tags[1], count: 1 }]); }); it("ignores empty arrays", function() { var node = makeNode(); node.tags = []; NodesManager._updateMetadata(node, "create"); expect(NodesManager._metadata.tags).toEqual([]); }); it("update handlers empty old values", function() { var node = makeNode(); node.tags = []; NodesManager._updateMetadata(node, "create"); NodesManager._items.push(node); var updatedNode = angular.copy(node); updatedNode.tags = [ makeName("tag"), makeName("tag") ]; NodesManager._updateMetadata(updatedNode, "update"); expect(NodesManager._metadata.tags).toEqual([ { name: updatedNode.tags[0], count: 1 }, { name: updatedNode.tags[1], count: 1 }]); }); it("update handlers empty new values", function() { var node = makeNode(); NodesManager._updateMetadata(node, "create"); NodesManager._items.push(node); var updatedNode = angular.copy(node); updatedNode.tags = []; NodesManager._updateMetadata(updatedNode, "update"); expect(NodesManager._metadata.tags).toEqual([]); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/services/tests/test_managerhelper.js0000644000000000000000000001045013056115004030342 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for ManagerHelperService. */ describe("ManagerHelperService", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Grab the needed angular pieces. var $rootScope, $timeout, $q; beforeEach(inject(function($injector) { $rootScope = $injector.get("$rootScope"); $timeout = $injector.get("$timeout"); $q = $injector.get("$q"); })); // Load the ManagerHelperService. var ManagerHelperService, RegionConnection, ErrorService; beforeEach(inject(function($injector) { ManagerHelperService = $injector.get("ManagerHelperService"); RegionConnection = $injector.get("RegionConnection"); ErrorService = $injector.get("ErrorService"); })); // Makes a fake manager. function makeManager() { var manager = { isLoaded: jasmine.createSpy(), loadItems: jasmine.createSpy(), enableAutoReload: jasmine.createSpy() }; manager.isLoaded.and.returnValue(false); manager.loadItems.and.returnValue($q.defer().promise); return manager; } describe("loadManager", function() { it("calls RegionConnection.defaultConnect", function() { spyOn(RegionConnection, "defaultConnect").and.returnValue( $q.defer().promise); var manager = makeManager(); ManagerHelperService.loadManager(manager); expect(RegionConnection.defaultConnect).toHaveBeenCalled(); }); it("doesn't call loadItems if manager already loaded", function(done) { var defer = $q.defer(); spyOn(RegionConnection, "defaultConnect").and.returnValue( defer.promise); var manager = makeManager(); manager.isLoaded.and.returnValue(true); ManagerHelperService.loadManager(manager).then(function() { expect(manager.loadItems).not.toHaveBeenCalled(); done(); }); defer.resolve(); $timeout.flush(); }); it("calls loadItems if manager not loaded", function(done) { var defer = $q.defer(); spyOn(RegionConnection, "defaultConnect").and.returnValue( defer.promise); var manager = makeManager(); var loadItemsDefer = $q.defer(); manager.loadItems.and.returnValue(loadItemsDefer.promise); ManagerHelperService.loadManager(manager).then(function() { expect(manager.loadItems).toHaveBeenCalled(); done(); }); defer.resolve(); $rootScope.$digest(); loadItemsDefer.resolve(); $rootScope.$digest(); }); it("calls enableAutoReload", function(done) { var defer = $q.defer(); spyOn(RegionConnection, "defaultConnect").and.returnValue( defer.promise); var manager = makeManager(); manager.isLoaded.and.returnValue(true); ManagerHelperService.loadManager(manager).then(function() { expect(manager.enableAutoReload).toHaveBeenCalled(); done(); }); defer.resolve(); $timeout.flush(); }); }); describe("loadManagers", function() { it("calls loadManager for all managers", function(done) { var managers = [ makeManager(), makeManager() ]; var defers = [ $q.defer(), $q.defer() ]; var i = 0; spyOn(ManagerHelperService, "loadManager").and.callFake( function(manager) { expect(manager).toBe(managers[i]); return defers[i++].promise; }); ManagerHelperService.loadManagers(managers).then( function(loadedManagers) { expect(loadedManagers).toBe(managers); done(); }); defers[0].resolve(); $rootScope.$digest(); defers[1].resolve(); $rootScope.$digest(); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/services/tests/test_search.js0000644000000000000000000001543213056115004027002 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for SearchService. */ describe("SearchService", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Load the SearchService. var SearchService; beforeEach(inject(function($injector) { SearchService = $injector.get("SearchService"); })); var scenarios = [ { input: "", split: [""], filters: { _: [] } }, { input: "moon", split: ["moon"], filters: { _: ["moon"] } }, { input: "moon status:(new)", split: ["moon", "status:(new)"], filters: { _: ["moon"], status: ["new"] } }, { input: "moon status:(deployed)", split: ["moon", "status:(deployed)"], filters: { _: ["moon"], status: ["deployed"] } }, { input: "moon status:(new,deployed)", split: ["moon", "status:(new,deployed)"], filters: { _: ["moon"], status: ["new", "deployed"] } }, { input: "moon status:(new,failed disk erasing)", split: ["moon", "status:(new,failed disk erasing)"], filters: { _: ["moon"], status: ["new", "failed disk erasing"] } }, { input: "moon status:(new,failed disk erasing", split: null, filters: null } ]; angular.forEach(scenarios, function(scenario) { describe("input:" + scenario.input, function() { it("getSplitSearch", function() { expect(SearchService.getSplitSearch( scenario.input)).toEqual(scenario.split); }); it("getCurrentFilters", function() { expect(SearchService.getCurrentFilters( scenario.input)).toEqual(scenario.filters); }); it("filtersToString", function() { // Skip the ones with filters equal to null. if(!scenario.filters) { return; } expect(SearchService.filtersToString( scenario.filters)).toEqual(scenario.input); }); }); }); describe("isFilterActive", function() { it("returns false if type not in filter", function() { expect(SearchService.isFilterActive( {}, "type", "invalid")).toBe(false); }); it("returns false if value not in type", function() { expect(SearchService.isFilterActive( { type: ["not"] }, "type", "invalid")).toBe(false); }); it("returns true if value in type", function() { expect(SearchService.isFilterActive( { type: ["valid"] }, "type", "valid")).toBe(true); }); it("returns false if exact value not in type", function() { expect(SearchService.isFilterActive( { type: ["valid"] }, "type", "valid", true)).toBe(false); }); it("returns true if exact value in type", function() { expect(SearchService.isFilterActive( { type: ["=valid"] }, "type", "valid", true)).toBe(true); }); it("returns true if lowercase value in type", function() { expect(SearchService.isFilterActive( { type: ["=Valid"] }, "type", "valid", true)).toBe(true); }); }); describe("toggleFilter", function() { it("adds type to filters", function() { expect(SearchService.toggleFilter( {}, "type", "value")).toEqual({ type: ["value"] }); }); it("adds value to type in filters", function() { var filters = { type: ["exists"] }; expect(SearchService.toggleFilter( filters, "type", "value")).toEqual({ type: ["exists", "value"] }); }); it("removes value to type in filters", function() { var filters = { type: ["exists", "value"] }; expect(SearchService.toggleFilter( filters, "type", "value")).toEqual({ type: ["exists"] }); }); it("adds exact value to type in filters", function() { var filters = { type: ["exists"] }; expect(SearchService.toggleFilter( filters, "type", "value", true)).toEqual({ type: ["exists", "=value"] }); }); it("removes exact value to type in filters", function() { var filters = { type: ["exists", "value", "=value"] }; expect(SearchService.toggleFilter( filters, "type", "value", true)).toEqual({ type: ["exists", "value"] }); }); it("removes lowercase value to type in filters", function() { var filters = { type: ["exists", "=Value"] }; expect(SearchService.toggleFilter( filters, "type", "value", true)).toEqual({ type: ["exists"] }); }); }); describe("getEmptyFilter", function() { it("includes _ empty list", function() { expect(SearchService.getEmptyFilter()).toEqual({ _: [] }); }); it("returns different object on each call", function() { var one = SearchService.getEmptyFilter(); var two = SearchService.getEmptyFilter(); expect(one).not.toBe(two); }); }); describe("storeFilters/retrieveFilters", function() { it("stores and retrieves the same object", function() { var i, names = [], objects = []; for(i = 0; i < 3; i++) { names.push(makeName("name")); objects.push({}); } angular.forEach(names, function(name, idx) { SearchService.storeFilters(name, objects[idx]); }); angular.forEach(names, function(name, idx) { expect(SearchService.retrieveFilters(name)).toBe(objects[idx]); }); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/services/tests/test_validation.js0000644000000000000000000003427113056115004027671 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Unit tests for ValidationService. */ describe("ValidationService", function() { // Load the MAAS module. beforeEach(module("MAAS")); // Load the ValidationService. var ValidationService; beforeEach(inject(function($injector) { ValidationService = $injector.get("ValidationService"); })); describe("validateHostname", function() { var scenarios = [ { input: null, valid: false }, { input: "", valid: false }, { input: "aB0-", valid: false }, { input: "aB0-z", valid: true }, { input: "aB0-z.", valid: false }, { input: "abc_alpha", valid: false }, { input: "abc^&alpha", valid: false }, { input: "abcalpha", valid: true }, { input: "aB0-z.local", valid: false }, { input: "abc_alpha.local", valid: false }, { input: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" + "abcdefghijk", valid: true }, { input: "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz" + "abcdefghijkl", valid: false } ]; angular.forEach(scenarios, function(scenario) { it("validates: " + scenario.input, function() { var result = ValidationService.validateHostname( scenario.input); expect(result).toBe(scenario.valid); }); }); }); describe("validateMAC", function() { var scenarios = [ { input: null, valid: false }, { input: "", valid: false }, { input: "00:", valid: false }, { input: "::", valid: false }, { input: "00:11:", valid: false }, { input: "00:11:22:", valid: false }, { input: "00:11:22:33:", valid: false }, { input: "00:11:22:33:44:", valid: false }, { input: "00:11:22:33:44:55", valid: true }, { input: "aa:bb:cc:dd:ee:ff", valid: true }, { input: "AA:BB:CC:DD:EE:00", valid: true }, { input: "aa:bb:cc:dd:ee:ff:", valid: false }, { input: "gg:bb:cc:zz:ee:ff", valid: false } ]; angular.forEach(scenarios, function(scenario) { it("validates: " + scenario.input, function() { var result = ValidationService.validateMAC( scenario.input); expect(result).toBe(scenario.valid); }); }); }); describe("validateIPv4", function() { var scenarios = [ { input: null, valid: false }, { input: "", valid: false }, { input: "192.168", valid: false }, { input: "192.168.1", valid: false }, { input: "192.168.1.1", valid: true }, { input: "256.168.1.1", valid: false } ]; angular.forEach(scenarios, function(scenario) { it("validates: " + scenario.input, function() { var result = ValidationService.validateIPv4( scenario.input); expect(result).toBe(scenario.valid); }); }); }); describe("validateIPv6", function() { var scenarios = [ { input: null, valid: false }, { input: "", valid: false }, { input: "2001", valid: false }, { input: "2001:", valid: false }, { input: "2001:db8::1", valid: true }, { input: "2001:67C:1562::16", valid: true }, { input: "200001:db8::1", valid: false }, { input: "2001:db008::1", valid: false }, { input: "2001::db8::1", valid: false }, { input: "ff00:db8::1", valid: false }, { input: "fe80:db8::1", valid: false }, { input: "::1", valid: false } ]; angular.forEach(scenarios, function(scenario) { it("validates: " + scenario.input, function() { var result = ValidationService.validateIPv6( scenario.input); expect(result).toBe(scenario.valid); }); }); }); describe("validateIP", function() { it("returns true if validateIPv4 returns true", function() { spyOn(ValidationService, "validateIPv4").and.returnValue(true); spyOn(ValidationService, "validateIPv6").and.returnValue(false); expect(ValidationService.validateIP("192.168.1.1")).toBe(true); }); it("returns true if validateIPv6 returns true", function() { spyOn(ValidationService, "validateIPv4").and.returnValue(false); spyOn(ValidationService, "validateIPv6").and.returnValue(true); expect(ValidationService.validateIP("::1")).toBe(true); }); it("returns false if validateIPv4 and validateIPv6 returns false", function() { spyOn(ValidationService, "validateIPv4").and.returnValue(false); spyOn(ValidationService, "validateIPv6").and.returnValue(false); expect(ValidationService.validateIP("invalid")).toBe(false); }); }); describe("validateIPInNetwork", function() { var scenarios = [ { ip: "192.168.2.1", network: "192.168.1.0/24", valid: false }, { ip: "192.168.1.1", network: "192.168.1.0/24", valid: true }, { ip: "192.168.1.1", network: "172.16.0.0/16", valid: false }, { ip: "172.17.1.1", network: "172.16.0.0/16", valid: false }, { ip: "172.16.1.1", network: "172.16.0.0/16", valid: true }, { ip: "11.1.1.1", network: "10.0.0.0/8", valid: false }, { ip: "10.1.1.1", network: "10.0.0.0/8", valid: true }, { ip: "2001:67C:1562::16", network: "2001:67C:1562::0/32", valid: true }, { ip: "2002:67C:1562::16", network: "2001:67C:1562::0/32", valid: false }, { ip: "2001:67C:1561::16", network: "2001:67C:1562::0/64", valid: false } ]; angular.forEach(scenarios, function(scenario) { it("validates: " + scenario.ip + " in network: " + scenario.network, function() { var result = ValidationService.validateIPInNetwork( scenario.ip, scenario.network); expect(result).toBe(scenario.valid); }); }); }); describe("validateIPInRange", function() { var scenarios = [ { ip: "192.168.1.1", network: "192.168.1.0/24", range: { low: "192.168.1.2", high: "192.168.1.100" }, valid: false }, { ip: "192.168.1.2", network: "192.168.1.0/24", range: { low: "192.168.1.2", high: "192.168.1.100" }, valid: true }, { ip: "192.168.1.3", network: "192.168.1.0/24", range: { low: "192.168.1.2", high: "192.168.1.100" }, valid: true }, { ip: "192.168.1.100", network: "192.168.1.0/24", range: { low: "192.168.1.2", high: "192.168.1.100" }, valid: true }, { ip: "192.168.1.101", network: "192.168.1.0/24", range: { low: "192.168.1.2", high: "192.168.1.100" }, valid: false }, { ip: "192.168.1.2", network: "192.168.2.0/24", range: { low: "192.168.2.2", high: "192.168.2.100" }, valid: false }, // Tests the parseInt is using radix 10 for ipv4ToOctets. // See bug 1462079 for more information. { ip: "192.168.1.8", network: "192.168.1.0/24", range: { low: "192.168.1.2", high: "192.168.1.100" }, valid: true }, // Tests the parseInt is using radix 10 for ipv4ToOctets. // See bug 1462079 for more information. { ip: "192.168.1.9", network: "192.168.1.0/24", range: { low: "192.168.1.2", high: "192.168.1.100" }, valid: true }, { ip: "172.16.1.9", network: "172.16.0.0/16", range: { low: "172.16.1.1", high: "172.16.2.254" }, valid: true }, { ip: "10.1.1.9", network: "10.0.0.0/8", range: { low: "10.0.0.1", high: "10.5.255.254" }, valid: true }, { ip: "2001:67C:1562::1", network: "2001:67C:1562::0/32", range: { low: "2001:67C:1562::2", high: "2001:67C:1562::FFFF:FFFF" }, valid: false }, { ip: "2001:67C:1562::2", network: "2001:67C:1562::0/32", range: { low: "2001:67C:1562::2", high: "2001:67C:1562::FFFF:FFFF" }, valid: true }, { ip: "2001:67C:1562::3", network: "2001:67C:1562::0/32", range: { low: "2001:67C:1562::2", high: "2001:67C:1562::FFFF:FFFF" }, valid: true }, { ip: "2001:67C:1562::FFFF:FFFE", network: "2001:67C:1562::0/32", range: { low: "2001:67C:1562::2", high: "2001:67C:1562::FFFF:FFFF" }, valid: true }, { ip: "2001:67C:1562::FFFF:FFFF", network: "2001:67C:1562::0/32", range: { low: "2001:67C:1562::2", high: "2001:67C:1562::FFFF:FFFF" }, valid: true }, { ip: "2001:67C:1562::1:0:0", network: "2001:67C:1562::0/32", range: { low: "2001:67C:1562::2", high: "2001:67C:1562::FFFF:FFFF" }, valid: false }, { ip: "2001:67C:1562::2", network: "2001:67C:1563::0/64", range: { low: "2001:67C:1563::2", high: "2001:67C:1563::FFFF:FFFF" }, valid: false } ]; angular.forEach(scenarios, function(scenario) { it("validates: " + scenario.ip + " in range: " + scenario.range.low + " - " + scenario.range.high, function() { var result = ValidationService.validateIPInRange( scenario.ip, scenario.network, scenario.range.low, scenario.range.high); expect(result).toBe(scenario.valid); }); }); }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/testing/utils.js0000644000000000000000000000204713056115004024324 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Testing Utilities * * Helper functions that make testing easier. */ function makeString(size) { var chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz" + "0123456789"; if(!angular.isNumber(size)) { size = 10; } var i; var text = ""; for(i = 0; i < size; i++) { text += chars.charAt(Math.floor(Math.random() * chars.length)); } return text; } function makeName(name, size) { return name + "_" + makeString(size); } function makeFakeResponse(data, error) { if(error) { return angular.toJson({ type: 1, rtype: 1, error: data }); } else { return angular.toJson({ type: 1, rtype: 0, result: data }); } } function makeInteger(min, max) { return Math.floor(Math.random() * (max - min)) + min; } maas-1.9.5+bzr4599.orig/src/maasserver/static/js/angular/testing/websocket.js0000644000000000000000000000613413056115004025153 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * Mock WebSocket * * Provides a mock websocket connection. */ var READY_STATES = { CONNECTING: 0, OPEN: 1, CLOSING: 2, CLOSED: 3 }; var MSG_TYPE = { REQUEST: 0, RESPONSE: 1 }; function MockWebSocket(url) { this.readyState = READY_STATES.CONNECTING; this.url = url; this.onclose = null; this.onerror = null; this.onmessage = null; this.onopen = null; this.sentData = []; this.returnData = []; this.receivedData = []; // Simulate the connection opening. var self = this; setTimeout(function() { self.readyState = READY_STATES.OPEN; if(angular.isFunction(self.onopen)) { self.onopen(); } }); } MockWebSocket.prototype.close = function() { this.readyState = READY_STATES.CLOSING; // Simulate the connection closing. var self = this; setTimeout(function() { self.readyState = READY_STATES.CLOSED; if(angular.isFunction(self.onclose)) { self.onclose(); } }); }; MockWebSocket.prototype.send = function(data) { this.sentData.push(data); // Exit early if no fake data to return. if(this.returnData.length === 0) { return; } // Possible that the amount of data to recieve for this // send message is only one packet. var receivedData = this.returnData.shift(); if(!angular.isArray(receivedData)) { receivedData = [receivedData]; } var self = this; // Send the response setTimeout(function() { var sentObject = angular.fromJson(data); var sentType = sentObject.type; var sentId = sentObject.request_id; if(angular.isNumber(sentType) && angular.isNumber(sentId)) { // Patch the request_id so the response is the // same as the request. angular.forEach(receivedData, function(rData) { var rObject = angular.fromJson(rData); var rType = rObject.type; // Patch the request_id if the send message was a request and // the return message is a response. This allows the response // message in the queue to not know the request_id. if(angular.isNumber(rType) && sentType === MSG_TYPE.REQUEST && rType === MSG_TYPE.RESPONSE) { rObject.request_id = sentId; } rData = angular.toJson(rObject); self.receivedData.push(rData); if(angular.isFunction(self.onmessage)) { self.onmessage({ data: rData }); } }); } else { // Nothing to patch just send the response. angular.forEach(receivedData, function(rData) { self.receivedData.push(rData); if(angular.isFunction(self.onmessage)) { self.onmessage({ data: rData }); } }); } }); }; maas-1.9.5+bzr4599.orig/src/maasserver/static/js/testing/testing.js0000644000000000000000000001252313056115004023210 0ustar 00000000000000/* Copyright 2012 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). */ YUI().add('maas.testing', function(Y) { Y.log('loading maas.testing'); var module = Y.namespace('maas.testing'); /** * Create a fake http response. */ function make_fake_response(response_text, status_code) { var out = {}; // status_code defaults to undefined, since it's not always set. if (Y.Lang.isValue(status_code)) { out.status = status_code; } out.responseText = response_text; /* We probably shouldn't rely on the response attribute: according to * http://yuilibrary.com/yui/docs/io/#the-response-object it doesn't * always have to be populated. We do get a guarantee for responseText * or responseXML. */ out.response = response_text; return out; } module.TestCase = Y.Base.create('ioMockableTestCase', Y.Test.Case, [], { _setUp: function() { if (!Y.Lang.isValue(this._cleanups)) { this._cleanups = []; } }, addCleanup: function(func) { this._setUp(); this._cleanups.push(func); }, tearDown: function() { this._setUp(); while (this._cleanups.length) { this._cleanups.pop()(); } }, /** * Mock the '_io' field of the provided module. This assumes that * the module has a internal reference to its io module named '_io'. * * @method mockIO * @param mock the mock object that should replace the module's io * @param module the module to monkey patch */ mockIO: function(mock, module) { var io = module._io; module._io = mock; this.addCleanup(function() { module._io = io; }); }, /** * Mock the '_io' field of the provided module with a silent method that * simply records the call to 'send'. Returns an array where calls will * be recorded. * This assumes that the module has a internal reference to its io module * named '_io' and that all its io is done via module._io.send(...). * * @method logIO * @param module the module to monkey patch */ logIO: function(module) { var log = []; var mockXhr = new Y.Base(); mockXhr.send = function(url, cfg) { log.push([url, cfg]); }; this.mockIO(mockXhr, module); return log; }, /** * Mock the '_io' field to silence io. * This assumes that the module has a internal reference to its io module * named '_io' and that all its io is done via module._io.send(...). * * @method silentIO * @param module the module to monkey patch */ silentIO: function(module) { var mockXhr = new Y.Base(); mockXhr.send = function(url, cfg) { }; this.mockIO(mockXhr, module); }, /** * Register a method to be fired when the event 'name' is triggered on * 'source'. The handle will be cleaned up when the test finishes. * * @method registerListener * @param source the source of the event * @param name the name of the event to listen to * @param method the method to run * @param context the context in which the method should be run */ registerListener: function(source, name, method, context) { var handle = source.on(name, method, context); this.addCleanup(Y.bind(handle.detach, handle)); return handle; }, /** * Set up mockIO to feign successful I/O completion. Returns an array * where calls will be recorded. * * @method mockSuccess * @param response_text The response text to fake. It will be available * as request.responseText in the request passed to the success * handler. * @param module The module to be instrumented. * @param status_code Optional HTTP status code. This defaults to * undefined, since the attribute may not always be available. */ mockSuccess: function(response_text, module, status_code) { var log = []; var mockXhr = new Y.Base(); mockXhr.send = function(url, cfg) { log.push([url, cfg]); var response = make_fake_response(response_text, status_code); var arbitrary_txn_id = '4'; cfg.on.success(arbitrary_txn_id, response); }; this.mockIO(mockXhr, module); return log; }, /** * Set up mockIO to feign I/O failure. Returns an array * where calls will be recorded. * * @method mockFailure * @param response_text The response text to fake. It will be available * as request.responseText in the request passed to the failure * handler. * @param module The module to be instrumented. * @param status_code Optional HTTP status code. This defaults to * undefined, since the attribute may not always be available. */ mockFailure: function(response_text, module, status_code) { var log = []; var mockXhr = new Y.Base(); mockXhr.send = function(url, cfg) { log.push([url, cfg]); var response = make_fake_response(response_text, status_code); var arbitrary_txn_id = '4'; cfg.on.failure(arbitrary_txn_id, response); }; this.mockIO(mockXhr, module); return log; } }); }, '0.1', {'requires': ['test', 'base']} ); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/testing/testrunner.js0000644000000000000000000000323313056115004023742 0ustar 00000000000000/* Copyright 2012 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). */ /** * Merely loading this script into a page will cause it to look for a * single suite using the selector span#suite. If found, the text * within the span is considered to be a test module name. This is * then loaded, and its "suite" property is used to drive * Y.Test.Runner. * * Here's how to declare the suite to run: * * maas.something.test * */ YUI().use("event", function(Y) { Y.on("domready", function() { var suite_node = Y.one("#suite"); if (Y.Lang.isValue(suite_node)) { var suite_name = suite_node.get("text"); Y.use(suite_name, "test", function(y) { var module = y, parts = suite_name.split("."); while (parts.length > 0) { module = module[parts.shift()]; } var Runner = y.Test.Runner; Runner.add(module.suite); var testsFinished = function(){ var results = y.Test.Runner.getResults(y.Test.Format.JSON); // Publish the results in a new node. var result_node = Y.Node.create('
    ') .set('id', 'test_results') .set('text', results); Y.one('body').append(result_node); // Set the suite_node content to 'done'. suite_node.set('text', 'done'); }; Runner.subscribe(Runner.COMPLETE_EVENT, testsFinished); Runner.run(); }); } }); }); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/testing/yui_test_conf.js0000644000000000000000000000011513056115004024377 0ustar 00000000000000var YUI_config = { debug: true, combine: false, filter: 'raw' }; maas-1.9.5+bzr4599.orig/src/maasserver/static/js/tests/test_enums.html0000644000000000000000000000216113056115004023733 0ustar 00000000000000 Test maas.enums maas.enums.tests
    maas-1.9.5+bzr4599.orig/src/maasserver/static/js/tests/test_enums.js0000644000000000000000000000206213056115004023403 0ustar 00000000000000/* Copyright 2012-2014 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). */ YUI({ useBrowserConsole: true }).add('maas.enums.tests', function(Y) { Y.log('loading maas.enums.tests'); var namespace = Y.namespace('maas.enums.tests'); var module = Y.maas.enums; var suite = new Y.Test.Suite("maas.enums Tests"); suite.add(new Y.maas.testing.TestCase({ name: 'test-enums', testDefinesEnums: function() { Y.Assert.isObject(Y.maas.enums.NODE_STATUS); }, testHasEnumValues: function() { Y.Assert.isNotUndefined(Y.maas.enums.NODE_STATUS.READY); Y.Assert.isNotNull(Y.maas.enums.NODE_STATUS.READY); }, testDistinguishesValues: function() { Y.Assert.areNotEqual( Y.maas.enums.NODE_STATUS.READY, Y.maas.enums.NODE_STATUS.RETIRED, "Different values of an enum were equal somehow."); } })); namespace.suite = suite; }, '0.1', {'requires': [ 'node-event-simulate', 'test', 'maas.testing', 'maas.enums']} ); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/tests/test_image.html0000644000000000000000000000141613056115004023670 0ustar 00000000000000 Test maas.image maas.image.tests maas-1.9.5+bzr4599.orig/src/maasserver/static/js/tests/test_image.js0000644000000000000000000000217413056115004023342 0ustar 00000000000000/* Copyright 2014 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). */ YUI({ useBrowserConsole: true }).add('maas.image.tests', function(Y) { Y.log('loading maas.image.tests'); var namespace = Y.namespace('maas.image.tests'); var module = Y.maas.image; var suite = new Y.Test.Suite("maas.image Tests"); suite.add(new Y.Test.Case({ name: 'test-image', testImageList: function() { var image_list = new module.ImageList(); Y.Assert.areSame(module.Image, image_list.model); }, testImageListSortsByTitle: function() { var image_list = new module.ImageList(); image_list.add({title: 'b_image'}); image_list.add({title: 'a_image'}); image_list.add({title: 'c_image'}); title_list = []; image_list.each(function(model) { title_list.push(model.get('title')); }); Y.ArrayAssert.itemsAreEqual( ['a_image', 'b_image', 'c_image'], title_list); } })); namespace.suite = suite; }, '0.1', {'requires': [ 'node-event-simulate', 'test', 'maas.image']} ); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/tests/test_image_views.html0000644000000000000000000000336713056115004025114 0ustar 00000000000000 Test maas.image_views maas.image_views.tests
    maas-1.9.5+bzr4599.orig/src/maasserver/static/js/tests/test_image_views.js0000644000000000000000000003753613056115004024571 0ustar 00000000000000/* Copyright 2014 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). */ YUI({ useBrowserConsole: true }).add('maas.image_views.tests', function(Y) { Y.log('loading maas.image_views.tests'); var namespace = Y.namespace('maas.image_views.tests'); var module = Y.maas.image_views; var suite = new Y.Test.Suite("maas.image_views Tests"); // Dump this HTML into #placeholder to get DOM hooks for the view. var view_hooks = Y.one('#view-hooks').getContent(); suite.add(new Y.maas.testing.TestCase({ name: 'test-image-views-ImageListLoader', exampleResponse: { region_import_running: true, cluster_import_running: false, resources: [ {id: '3', name: 'ubuntu/trusty'}, {id: '4', name: 'ubtunu/utopic'} ] }, makeImageListLoader: function() { var view = new Y.maas.image_views.ImageListLoader(); this.addCleanup(Y.bind(view.destroy, view)); return view; }, testInitialization: function() { var view = this.makeImageListLoader(); Y.Assert.areEqual('imageList', view.modelList.name); }, testRenderDoesNotCallLoad: function() { // The initial call to .render() does *not* trigger the loading // of the images. var self = this; var mockXhr = Y.Mock(); Y.Mock.expect(mockXhr, { method: 'send', args: [MAAS_config.uris.images_handler, Y.Mock.Value.Any], run: function(uri, cfg) { var out = new Y.Base(); out.response = Y.JSON.stringify(self.exampleResponse); cfg.on.success(Y.guid(), out); } }); this.mockIO(mockXhr, module); var view = this.makeImageListLoader(); view.render(); // The model list has not been populated. Y.Assert.areEqual(0, view.modelList.size()); }, testAddLoader: function() { // A mock loader. var loader = new Y.Base(); // Capture event registrations. var events = {}; loader.on = function(event, callback) { events[event] = callback; }; var view = this.makeImageListLoader(); view.addLoader(loader); // Several events are registered. Y.Assert.areSame(view.loadImagesStarted, events["io:start"]); Y.Assert.areSame(view.loadImagesEnded, events["io:end"]); Y.Assert.areSame(view.loadImagesFailed, events["io:failure"]); Y.Assert.isFunction(events["io:success"]); }, testLoadImages: function() { var response = Y.JSON.stringify(this.exampleResponse); var view = this.makeImageListLoader(); view.loadImages(response); Y.Assert.isTrue(view.loaded); Y.Assert.areEqual(2, view.modelList.size()); Y.Assert.isTrue(view.regionImportRunning); Y.Assert.isFalse(view.clusterImportRunning); Y.Assert.areEqual('ubtunu/utopic', view.modelList.item(0).get('name')); Y.Assert.areEqual('ubuntu/trusty', view.modelList.item(1).get('name')); }, testLoadImages_invalid_data: function() { var response = "{garbled data}"; var view = this.makeImageListLoader(); var loadImagesFailedCalled = false; view.loadImagesFailed = function() { loadImagesFailedCalled = true; }; view.loadImages(response); Y.Assert.isTrue(view.loaded); Y.Assert.areEqual(0, view.modelList.size()); Y.Assert.isTrue(loadImagesFailedCalled); }, testLoadImages_calls_render: function() { var response = Y.JSON.stringify(this.exampleResponse); var view = this.makeImageListLoader(); var renderCalled = false; view.render = function() { renderCalled = true; }; view.loadImages(response); Y.Assert.isTrue(renderCalled); }, assertModelListMatchesImages: function(modelList, images) { Y.Assert.areEqual(images.length, modelList.size()); Y.Array.each(images, function(image) { var model = modelList.getById(image.id); Y.Assert.isObject(model); Y.Assert.areEqual(image.name, model.get("name")); Y.Assert.areEqual(image.title, model.get("title")); }); }, test_mergeImages_when_modelList_is_empty: function() { var view = this.makeImageListLoader(); var images = [ {id: 1, name: "name1", title: "title1"}, {id: 2, name: "name2", title: "title2"}, {id: 3, name: "name3", title: "title2"} ]; Y.Assert.areEqual(0, view.modelList.size()); view.mergeImages(images); this.assertModelListMatchesImages(view.modelList, images); }, test_mergeImages_when_modelList_is_not_empty: function() { var view = this.makeImageListLoader(); var images_before = [ {id: 1, name: "name1", title: "title1"}, {id: 3, name: "name3", title: "title3"} ]; var images_after = [ {id: 1, name: "name1after", title: "title1after"}, {id: 2, name: "name2after", title: "title2after"} ]; view.mergeImages(images_before); this.assertModelListMatchesImages(view.modelList, images_before); view.mergeImages(images_after); this.assertModelListMatchesImages(view.modelList, images_after); } })); suite.add(new Y.maas.testing.TestCase({ name: 'test-images-views-ImagesView', setUp : function () { Y.one('#placeholder').empty(); this.regionImporting = true; this.clusterImporting = false; this.ubuntuImages = [ { id: 1, rtype: Y.maas.enums.BOOT_RESOURCE_TYPE.SYNCED, name: 'ubuntu/trusty', title: '14.04 LTS', arch: 'amd64', size: '150 MB', complete: true, status: "Complete", downloading: false, numberOfNodes: 1, lastUpdate: '10/1/14' }, { id: 2, rtype: Y.maas.enums.BOOT_RESOURCE_TYPE.SYNCED, name: 'ubuntu/precise', title: '12.04 LTS', arch: 'amd64', size: '125 MB', complete: true, status: "Complete", downloading: false, numberOfNodes: 0, lastUpdate: '10/1/14' }, { id: 3, rtype: Y.maas.enums.BOOT_RESOURCE_TYPE.SYNCED, name: 'ubuntu/utopic', title: '14.10', arch: 'amd64', size: '155 MB', complete: false, status: "Downloading 13%", downloading: true, numberOfNodes: 0, lastUpdate: '10/1/14' } ]; }, /** * Counter to generate unique numbers. */ counter: 0, /** * Get next value of this.counter, and increment. */ getNumber: function() { return this.counter++; }, /** * Create a images view, render it, and arrange for its cleanup. * * The "regionImporting" parameter defaults to this.regionImporting. * The "clusterImporting" parameter defaults to this.clusterImporting. * The "ubuntuImages" parameter defaults to this.ubuntuImages. */ makeImagesView: function(regionImporting, clusterImporting, ubuntuImages) { if (regionImporting === undefined) { regionImporting = this.regionImporting; } if (clusterImporting === undefined) { clusterImporting = this.clusterImporting; } if (ubuntuImages === undefined) { ubuntuImages = this.ubuntuImages; } var root_node_id = 'widget-' + this.getNumber().toString(); var new_view = Y.Node.create('
    ').set('id', root_node_id); this.addCleanup(function() { new_view.remove(); }); new_view.append(Y.Node.create(view_hooks)); Y.one('#placeholder').append(new_view); var view = create_images_view( regionImporting, clusterImporting, ubuntuImages, this, '#' + root_node_id); this.addCleanup(function() { view.destroy(); }); return view; }, testLoaderHiddenAndContentShown: function() { var view = this.makeImagesView(); Y.Assert.isTrue(view.srcNode.one('#loader').hasClass('hidden')); Y.Assert.isFalse(view.srcNode.one('#content').hasClass('hidden')); }, testLoaderShownAndContentHidden: function() { var view = this.makeImagesView(); view.loaded = false; view.render(); Y.Assert.isFalse(view.srcNode.one('#loader').hasClass('hidden')); Y.Assert.isTrue(view.srcNode.one('#content').hasClass('hidden')); }, testImportingHidden: function() { var view = this.makeImagesView(false, false); Y.Assert.isTrue(view.srcNode.one('#importer').hasClass('hidden')); Y.Assert.areSame( '', view.srcNode.one('#importer').one('.importing-text').getContent()); }, testImportingRegion: function() { var view = this.makeImagesView(true, false); Y.Assert.isFalse(view.srcNode.one('#importer').hasClass('hidden')); Y.Assert.areSame( view.regionImportingText, view.srcNode.one('#importer').one('.importing-text').getContent()); }, testImportingCluster: function() { var view = this.makeImagesView(false, true); Y.Assert.isFalse(view.srcNode.one('#importer').hasClass('hidden')); Y.Assert.areSame( view.clusterImportingText, view.srcNode.one('#importer').one('.importing-text').getContent()); }, testHidesUbuntuOptionsWhenRegionImporting: function() { var view = this.makeImagesView(true); Y.Assert.isTrue( view.srcNode.one('#ubuntu-options').hasClass('hidden')); }, testShowsUbuntuOptionsWhenRegionNotImporting: function() { var view = this.makeImagesView(false); Y.Assert.isFalse( view.srcNode.one('#ubuntu-options').hasClass('hidden')); }, testHidesUbuntuButtonWhenRegionImporting: function() { var view = this.makeImagesView(true); Y.Assert.isTrue( view.srcNode.one('#ubuntu-apply').hasClass('hidden')); }, testShowsUbuntuButtonWhenRegionNotImporting: function() { var view = this.makeImagesView(false); Y.Assert.isFalse( view.srcNode.one('#ubuntu-apply').hasClass('hidden')); }, testShowsMissingIfEmptyImages: function() { var view = this.makeImagesView(false, false, []); Y.Assert.isFalse( view.srcNode.one('#missing-ubuntu-images').hasClass('hidden')); Y.Assert.isTrue( view.srcNode.one('#ubuntu-resources').hasClass('hidden')); }, testShowsMissingIfEmptyUbuntuImages: function() { var none_ubuntu_images = [ { id: 1, rtype: Y.maas.enums.BOOT_RESOURCE_TYPE.SYNCED, name: "centos/centos65" }, { id: 2, rtype: Y.maas.enums.BOOT_RESOURCE_TYPE.SYNCED, name: "centos/centos70" } ]; var view = this.makeImagesView(false, false, none_ubuntu_images); Y.Assert.isFalse( view.srcNode.one('#missing-ubuntu-images').hasClass('hidden')); Y.Assert.isTrue( view.srcNode.one('#ubuntu-resources').hasClass('hidden')); }, testHidesMissingIfUbuntuImages: function() { var view = this.makeImagesView(); Y.Assert.isTrue( view.srcNode.one('#missing-ubuntu-images').hasClass('hidden')); Y.Assert.isFalse( view.srcNode.one('#ubuntu-resources').hasClass('hidden')); }, testRendersUbuntuTableData: function() { var view = this.makeImagesView(); var tableBody = view.srcNode.one('#ubuntu-resources').one('tbody'); var tableRows = tableBody.get('children'); Y.each(view.getUbuntuImages(), function(image, i) { var row = tableRows.item(i); var columns = row.get('children'); Y.Assert.areSame(image.get('title'), columns.item(1).getContent()); Y.Assert.areSame(image.get('arch'), columns.item(2).getContent()); Y.Assert.areSame(image.get('size'), columns.item(3).getContent()); Y.Assert.areSame( image.get('numberOfNodes').toString(), columns.item(4).getContent()); Y.Assert.areSame( image.get('lastUpdate'), columns.item(5).getContent()); }); }, testUpdateUbuntuButtonSetValueForApply: function() { var view = this.makeImagesView(); var ubuntuButton = view.srcNode.one('#ubuntu-apply'); view.updateUbuntuButton(true); Y.Assert.areSame('Apply changes', ubuntuButton.get('value')); }, testUpdateUbuntuButtonSetValueForImport: function() { var view = this.makeImagesView(); var ubuntuButton = view.srcNode.one('#ubuntu-apply'); view.updateUbuntuButton(false); Y.Assert.areSame('Import images', ubuntuButton.get('value')); }, testUpdateUbuntuButtonDoesNothingIfLockValueExists: function() { var view = this.makeImagesView(); var ubuntuButton = view.srcNode.one('#ubuntu-apply'); ubuntuButton.set('value', 'testing'); ubuntuButton.setData('lock-value', 'true'); view.updateUbuntuButton(true); Y.Assert.areSame('testing', ubuntuButton.get('value')); }, testGetSpinnerReturnsEmptyForComplete: function() { var view = this.makeImagesView(); var model = new Y.maas.image.Image({complete: true}); Y.Assert.areSame('', view.getSpinner(model)); }, testGetSpinnerReturnsStatusInTitle: function() { var view = this.makeImagesView(); var model = new Y.maas.image.Image({status: 'Testing'}); var html = view.getSpinner(model); var node = Y.Node.create(html); Y.Assert.areSame('Testing', node.get('title')); }, testGetSpinnerHasSpinnerClass: function() { var view = this.makeImagesView(); var model = new Y.maas.image.Image(); var html = view.getSpinner(model); var node = Y.Node.create(html); Y.Assert.isTrue(node.hasClass('spinner')); }, testGetSpinnerDoesntIncludeSpinWhenNotDownloading: function() { var view = this.makeImagesView(); var model = new Y.maas.image.Image({downloading: false}); var html = view.getSpinner(model); var node = Y.Node.create(html); Y.Assert.isFalse(node.hasClass('spin')); }, testGetSpinnerIncludesSpinWhenDownloading: function() { var view = this.makeImagesView(); var model = new Y.maas.image.Image({downloading: true}); var html = view.getSpinner(model); var node = Y.Node.create(html); Y.Assert.isTrue(node.hasClass('spin')); } })); function create_images_view( regionImporting, clusterImporting, ubuntuImages, self, root_node_descriptor) { var response = Y.JSON.stringify({ region_import_running: regionImporting, cluster_import_running: clusterImporting, resources: ubuntuImages }); var view = new Y.maas.image_views.ImagesView({ srcNode: root_node_descriptor, loader: '#loader', content: '#content', importer: '#importer', ubuntuOptions: '#ubuntu-options', ubuntuTable: '#ubuntu-resources', ubuntuMissingImages: '#missing-ubuntu-images', ubuntuButton: '#ubuntu-apply'}); view.loadImages(response); return view; } namespace.suite = suite; }, '0.1', {'requires': [ 'node-event-simulate', 'test', 'maas.testing', 'maas.enums', 'maas.image', 'maas.image_views']} ); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/tests/test_os_distro_select.html0000644000000000000000000000337513056115004026160 0ustar 00000000000000 Test maas.os_distro_select maas.os_distro_select.tests
    maas-1.9.5+bzr4599.orig/src/maasserver/static/js/tests/test_os_distro_select.js0000644000000000000000000001351313056115004025623 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). */ YUI({ useBrowserConsole: true }).add( 'maas.os_distro_select.tests', function(Y) { Y.log('loading maas.os_distro_select.tests'); var namespace = Y.namespace('maas.os_distro_select.tests'); var module = Y.maas.os_distro_select; var suite = new Y.Test.Suite("maas.os_distro_select Tests"); var select_node_template = Y.one('#select_node').getContent(); var target_node_template = Y.one('#target_node').getContent(); suite.add(new Y.maas.testing.TestCase({ name: 'test-os_distro_select', setUp: function () { Y.one('#placeholder').empty().append( Y.Node.create(select_node_template).append( Y.Node.create(target_node_template))); this.widget = new Y.maas.os_distro_select.OSReleaseWidget({ srcNode: '#id_distro_series' }); }, testBindCallsSwitchTo: function() { var called = false; this.widget.switchTo = function() { called = true; }; this.widget.bindTo(Y.one('#id_osystem'), 'change'); Y.Assert.isTrue(called); }, testSwitchToCalledModifyOptionOnAll: function() { var options = []; this.widget.modifyOption = function(option, value) { options.push(option); }; this.widget.bindTo(Y.one('#id_osystem'), 'change'); var expected = Y.one('#id_distro_series').all('option'); Y.ArrayAssert.containsItems(expected, options); }, testSwitchToTogglesInitialSkip: function() { this.widget.bindTo(Y.one('#id_osystem'), 'change'); Y.Assert.isFalse(this.widget.initialSkip); }, testSwitchToCallsSelectVisableOption: function() { var called = false; this.widget.selectVisableOption = function() { called = true; }; this.widget.initialSkip = false; this.widget.bindTo(Y.one('#id_osystem'), 'change'); Y.Assert.isTrue(called); }, testModifyOptionSelectsDefault: function() { var option = Y.Mock(); Y.Mock.expect(option, { method: "get", args: ["value"], returns: "" }); Y.Mock.expect(option, { method: "removeClass", args: ["hidden"] }); Y.Mock.expect(option, { method: "set", args: ["selected", "selected"] }); var selected = this.widget.modifyOption(option, ''); Y.Mock.verify(option); Y.Assert.isFalse(selected); }, testModifyOptionHidesNonDefault: function() { var option = Y.Mock(); Y.Mock.expect(option, { method: "get", args: ["value"], returns: "value1" }); Y.Mock.expect(option, { method: "addClass", args: ["hidden"] }); var selected = this.widget.modifyOption(option, ''); Y.Mock.verify(option); Y.Assert.isFalse(selected); }, testModifyOptionShowsOSMatch: function() { var option = Y.Mock(); Y.Mock.expect(option, { method: "get", args: ["value"], returns: "os/release" }); Y.Mock.expect(option, { method: "removeClass", args: ["hidden"] }); var selected = this.widget.modifyOption(option, 'os'); Y.Mock.verify(option); Y.Assert.isFalse(selected); }, testModifyOptionSelectsOSDefault: function() { var option = Y.Mock(); Y.Mock.expect(option, { method: "get", args: ["value"], returns: "os/" }); Y.Mock.expect(option, { method: "removeClass", args: ["hidden"] }); Y.Mock.expect(option, { method: "set", args: ["selected", "selected"] }); this.widget.initialSkip = false; var selected = this.widget.modifyOption(option, 'os'); Y.Mock.verify(option); Y.Assert.isTrue(selected); }, testModifyOptionSelectsOSDefaultSkippedOnInitial: function() { var option = Y.Mock(); Y.Mock.expect(option, { method: "get", args: ["value"], returns: "os/" }); Y.Mock.expect(option, { method: "removeClass", args: ["hidden"] }); var selected = this.widget.modifyOption(option, 'os'); Y.Mock.verify(option); Y.Assert.isFalse(selected); }, testModifyOptionHidesOSMismatch: function() { var option = Y.Mock(); Y.Mock.expect(option, { method: "get", args: ["value"], returns: "os/release" }); Y.Mock.expect(option, { method: "addClass", args: ["hidden"] }); var selected = this.widget.modifyOption(option, 'other'); Y.Mock.verify(option); Y.Assert.isFalse(selected); }, testSelectVisableOptionShowsFirstVisable: function() { var option = Y.Mock(); var option2 = Y.Mock(); Y.Mock.expect(option, { method: "hasClass", args: ["hidden"], returns: true }); Y.Mock.expect(option2, { method: "hasClass", args: ["hidden"], returns: false }); Y.Mock.expect(option2, { method: "set", args: ["selected", "selected"] }); var options = Y.Array([option, option2]); this.widget.selectVisableOption(options); Y.Mock.verify(option); Y.Mock.verify(option2); } })); namespace.suite = suite; }, '0.1', {'requires': [ 'node-event-simulate', 'test', 'maas.testing', 'maas.os_distro_select']} ); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/tests/test_prefs.html0000644000000000000000000000337013056115004023726 0ustar 00000000000000 Test maas.prefs maas.prefs.tests maas-1.9.5+bzr4599.orig/src/maasserver/static/js/tests/test_prefs.js0000644000000000000000000001560113056115004023376 0ustar 00000000000000/* Copyright 2012 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). */ YUI({ useBrowserConsole: true }).add('maas.prefs.tests', function(Y) { Y.log('loading maas.prefs.tests'); var namespace = Y.namespace('maas.prefs.tests'); var module = Y.maas.prefs; var suite = new Y.Test.Suite("maas.prefs Tests"); var api_template = Y.one('#api-template').getContent(); suite.add(new Y.maas.testing.TestCase({ name: 'test-prefs', setUp: function() { Y.one("body").append(Y.Node.create(api_template)); }, createWidget: function() { var widget = new module.TokenWidget({srcNode: '#placeholder'}); this.addCleanup(function() { widget.destroy(); }); this.patchWidgetConfirm(widget, true); return widget; }, patchWidgetConfirm: function(widget, result) { // Monkey patch widget.confirm. widget.confirm = function(message) { return result; }; }, testInitializer: function() { var widget = this.createWidget(); widget.render(); // The "create a new API token" has been created. var create_link = widget.get('srcNode').one('#create_token'); Y.Assert.isNotNull(create_link); Y.Assert.areEqual( "+ Generate MAAS key", create_link.get('text')); // The placeholder node for errors has been created. var status_node = widget.get('srcNode').one('#create_error'); Y.Assert.isNotNull(status_node); Y.Assert.areEqual( '', widget.get('srcNode').one('#create_error').get('text')); }, test_nb_tokens: function() { var widget = this.createWidget(); widget.render(); Y.Assert.areEqual(2, widget.get('nb_tokens')); }, testDeleteTokenCall: function() { // A click on the delete link calls the API to delete a token. var log = this.logIO(module); var widget = this.createWidget(); widget.render(); var link = widget.get('srcNode').one('.delete-link'); link.simulate('click'); var request_info = log.pop(); Y.Assert.areEqual(MAAS_config.uris.account_handler, request_info[0]); Y.Assert.areEqual( "op=delete_authorisation_token&token_key=tokenkey1", request_info[1].data); }, testDeleteTokenCallsAPI: function() { var log = this.logIO(module); var widget = this.createWidget(); widget.render(); var link = widget.get('srcNode').one('.delete-link'); link.simulate('click'); Y.Assert.areEqual(1, log.length); }, testDeleteTokenFail404DisplaysErrorMessage: function() { // If the API call to delete a token fails with a 404 error, // an error saying that the key has already been deleted is displayed. this.mockFailure('unused', module, 404); var widget = this.createWidget(); widget.render(); var link = widget.get('srcNode').one('.delete-link'); link.simulate('click'); Y.Assert.areEqual( "The key has already been deleted.", widget.get('srcNode').one('#create_error').get('text')); }, testDeleteTokenFailDisplaysErrorMessage: function() { // If the API call to delete a token fails, an error is displayed. this.mockFailure('unused', module, 500); var widget = this.createWidget(); widget.render(); var link = widget.get('srcNode').one('.delete-link'); link.simulate('click'); Y.Assert.areEqual( "Unable to delete the key.", widget.get('srcNode').one('#create_error').get('text')); }, testDeleteTokenDisplay: function() { // When the token is successfully deleted by the API, the // corresponding row is deleted. var log = this.mockSuccess('unused', module); var widget = this.createWidget(); widget.render(); var link = widget.get('srcNode').one('.delete-link'); Y.Assert.isNotNull(Y.one('#tokenkey1')); link.simulate('click'); Y.Assert.areEqual(1, log.length); Y.Assert.isNull(Y.one('#tokenkey1')); Y.Assert.isNotNull(Y.one('#tokenkey2')); Y.Assert.areEqual(1, widget.get('nb_tokens')); }, testDontDeleteIfConfirmReturnsFalse: function() { var mockXhr = new Y.Base(); var widget = this.createWidget(); this.patchWidgetConfirm(widget, false); widget.render(); var link = widget.get('srcNode').one('.delete-link'); Y.Assert.isNotNull(Y.one('#tokenkey1')); link.simulate('click'); Y.Assert.isNotNull(Y.one('#tokenkey1')); Y.Assert.areEqual(2, widget.get('nb_tokens')); }, test_createTokenFromKeys: function() { var widget = this.createWidget(); var token = widget.createTokenFromKeys( 'consumer_key', 'token_key', 'token_secret'); Y.Assert.areEqual('consumer_key:token_key:token_secret', token); }, testCreateTokenCall: function() { // A click on the "create a new token" link calls the API to // create a token. var log = this.logIO(module); var widget = this.createWidget(); widget.render(); var create_link = widget.get('srcNode').one('#create_token'); create_link.simulate('click'); var request_infos = log.pop(); Y.Assert.areEqual(MAAS_config.uris.account_handler, request_infos[0]); Y.Assert.areEqual( "op=create_authorisation_token", request_infos[1].data); }, testCreateTokenFail: function() { // If the API call to create a token fails, an error is displayed. var log = this.mockFailure('unused', module); var widget = this.createWidget(); widget.render(); var create_link = widget.get('srcNode').one('#create_token'); create_link.simulate('click'); Y.Assert.areEqual(1, log.length); Y.Assert.areEqual( 'Unable to create a new token.', widget.get('srcNode').one('#create_error').get('text')); }, testCreateTokenDisplay: function() { // When a new token is successfully created by the API, a new // corresponding row is added. var response = { consumer_key: 'consumer_key', token_key: 'token_key', token_secret: 'token_secret' }; var log = this.mockSuccess(Y.JSON.stringify(response), module); var widget = this.createWidget(); widget.render(); var create_link = widget.get('srcNode').one('#create_token'); create_link.simulate('click'); Y.Assert.areEqual(1, log.length); Y.Assert.areEqual(3, widget.get('nb_tokens')); Y.Assert.isNotNull(Y.one('#token_key')); } })); namespace.suite = suite; }, '0.1', {'requires': [ 'node-event-simulate', 'node', 'test', 'maas.testing', 'maas.prefs']} ); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/tests/test_reveal.html0000644000000000000000000000171513056115004024066 0ustar 00000000000000 Test maas.reveal
    maas.reveal.tests maas-1.9.5+bzr4599.orig/src/maasserver/static/js/tests/test_reveal.js0000644000000000000000000002221713056115004023536 0ustar 00000000000000/* Copyright 2012-2014 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). */ YUI({ useBrowserConsole: true }).add('maas.reveal.tests', function(Y) { Y.log('loading maas.reveal.tests'); var namespace = Y.namespace('maas.reveal.tests'); var module = Y.maas.reveal; var suite = new Y.Test.Suite("maas.reveal Tests"); suite.add(new Y.maas.testing.TestCase({ name: 'test-revealing', setUp: function() { Y.one('#placeholder').setHTML(''); }, // Create a content div (in its visible state). make_div: function(html_content) { if (html_content === undefined) { html_content = "
    Arbitrary content
    "; } // Hook this new DOM node into the document, so that it has proper // display attributes. Otherwise we can't simulate and verify its // appearing or disappearing. return Y.one('#placeholder').appendChild( '
    ' + html_content + '
    '); }, // Create a button link. make_link: function(link_content) { if (link_content === undefined) { link_content = "Arbitrary link text"; } return Y.Node.create('' + link_content + ''); }, // Make a content div look to the widget as if it's been revealed. show_div: function(node) { node.setStyle('height', '20px'); }, test_is_visible_returns_true_for_nonzero_height: function() { var div = this.make_div(); var revealer = new module.Reveal({ linkNode: this.make_link(), targetNode: div, quick: true }); revealer.render(); div.setStyle('height', '20px'); Y.assert( revealer.is_visible(), "is_visible() fails to recognize div as visible."); }, test_is_visible_returns_false_for_zero_height: function() { var div = this.make_div(); var revealer = new module.Reveal({ linkNode: this.make_link(), targetNode: div, quick: true }); revealer.render(); div.setStyle('height', '0'); Y.assert( !revealer.is_visible(), "is_visible() thinks that div is visible when it isn't."); }, test_get_animation_duration_defaults_to_suggested_duration: function() { var revealer = new module.Reveal({ linkNode: this.make_link(), targetNode: this.make_div() }); Y.Assert.areEqual(5, revealer.get_animation_duration(5)); }, test_get_animation_duration_returns_mere_wink_if_quick_is_set: function() { var revealer = new module.Reveal({ linkNode: this.make_link(), targetNode: this.make_div(), quick: true }); var suggested_duration = 5; var duration = revealer.get_animation_duration(suggested_duration); Y.Assert.areNotEqual(suggested_duration, duration); Y.assert(duration < suggested_duration, "'Quick' duration is longer."); Y.assert(duration < 0.1, "'Quick' duration is still fairly long."); }, test_set_hidden_link_sets_show_text: function() { var link = this.make_link("Original link"); var revealer = new module.Reveal({ linkNode: link, targetNode: this.make_div(), showText: "Show content", hideText: "Hide content", quick: true }); revealer.set_hidden_link(link); Y.Assert.areEqual("Show content", link.get('text')); }, test_set_hidden_link_does_nothing_if_show_text_not_set: function() { var link = this.make_link("Original link"); var revealer = new module.Reveal({ linkNode: link, targetNode: this.make_div(), hideText: "Hide content", quick: true }); revealer.set_hidden_link(link); Y.Assert.areEqual("Original link", link.get('text')); }, test_set_visible_link_sets_hide_text: function() { var link = this.make_link("Original link"); var revealer = new module.Reveal({ linkNode: link, targetNode: this.make_div(), showText: "Show content", hideText: "Hide content", quick: true }); revealer.set_visible_link(link); Y.Assert.areEqual("Hide content", link.get('text')); }, test_set_visible_link_does_nothing_if_hide_text_not_set: function() { var link = this.make_link("Original link"); var revealer = new module.Reveal({ linkNode: link, targetNode: this.make_div(), showText: "Show content", quick: true }); revealer.set_visible_link(link); Y.Assert.areEqual("Original link", link.get('text')); }, test_div_slides_out_when_revealing: function() { var self = this; var div = this.make_div('
    Content here
    '); var content = div.one('pre'); var original_height = ( parseInt(content.getStyle('height'), 10) + parseInt(content.getStyle('marginTop'), 10) + parseInt(content.getStyle('marginBottom'), 10) + parseInt(content.getStyle('paddingTop'), 10) + parseInt(content.getStyle('paddingBottom'), 10)); var revealer = new module.Reveal({ linkNode: this.make_link(), targetNode: div, quick: true }); revealer.render(); revealer.on('revealed', function() { self.resume(function() { Y.assert( revealer.is_visible(), "The content div was not revealed."); Y.Assert.areEqual( original_height, parseInt(div.getStyle('height'), 10), "The content div was not resized to its original height."); }); }); revealer.reveal(); this.wait(); }, test_replaces_link_text_when_revealing: function() { var self = this; var link = this.make_link("Original link"); var div = this.make_div(); var revealer = new module.Reveal({ linkNode: link, targetNode: div, hideText: "Hide content", quick: true }); revealer.render(); revealer.on('revealed', function() { self.resume(function() { Y.Assert.areEqual("Hide content", link.get('text')); }); }); revealer.reveal(); this.wait(); }, test_div_slides_in_when_hiding: function() { var self = this; var div = this.make_div(); var revealer = new module.Reveal({ linkNode: this.make_link(), targetNode: div, quick: true }); revealer.render(); this.show_div(div); revealer.on('hidden', function() { self.resume(function() { Y.assert( !revealer.is_visible(), "The content div was not hidden."); }); }); revealer.reveal(); this.wait(); }, test_replaces_link_text_when_hiding: function() { var self = this; var link = this.make_link("Original link"); var div = this.make_div(); var revealer = new module.Reveal({ linkNode: link, targetNode: div, showText: "Show content", quick: true }); revealer.render(); this.show_div(div); revealer.on('hidden', function() { self.resume(function() { Y.Assert.areEqual("Show content", link.get('text')); }); }); revealer.reveal(); this.wait(); }, test_renders_in_hidden_state: function() { var link = this.make_link(); var div = this.make_div(); var revealer = new module.Reveal({ linkNode: link, targetNode: div, showText: "Show content", hideText: "Hide content", quick: true }); revealer.render(); Y.Assert.areEqual(div.getStyle('height'), '0px'); Y.assert( !revealer.is_visible(), "Widget thinks it's visible after rendering."); Y.Assert.areEqual("Show content", link.get('text')); }, test_fires_hiding_events_immediately_when_rendering: function() { var revealer = new module.Reveal({ linkNode: this.make_link(), targetNode: this.make_div(), quick: true }); var hiding_fired = false, hidden_fired = false; revealer.on('hiding', function() { hiding_fired = true; }); revealer.on('hidden', function() { hidden_fired = true; }); // This fires the events immediately and synchronously. revealer.render(); Y.assert(hiding_fired, "The 'hiding' signal was not fired."); Y.assert(hidden_fired, "The 'hidden' signal was not fired."); } })); namespace.suite = suite; }, '0.1', {'requires': [ 'test', 'maas.testing', 'maas.reveal']} ); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/tests/test_shortpoll.html0000644000000000000000000000151213056115004024631 0ustar 00000000000000 Test maas.shortpoll maas.shortpoll.tests maas-1.9.5+bzr4599.orig/src/maasserver/static/js/tests/test_shortpoll.js0000644000000000000000000002044313056115004024305 0ustar 00000000000000/* Copyright 2014 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). */ YUI({ useBrowserConsole: true }).add('maas.shortpoll.tests', function(Y) { Y.log('loading maas.shortpoll.tests'); var namespace = Y.namespace('maas.shortpoll.tests'); var shortpoll = Y.maas.shortpoll; var suite = new Y.Test.Suite("maas.shortpoll Tests"); suite.add(new Y.maas.testing.TestCase({ name: 'test-shortpoll', setUp: function() { this.constructor.superclass.setUp(); var old_repoll = shortpoll._repoll; shortpoll._repoll = false; this.addCleanup(function() {shortpoll._repoll = old_repoll; }); }, testInitShortPollManager: function() { var manager = new shortpoll.ShortPollManager( {uri: '/shortpoll/', eventKey: 'event-key'}); Y.Assert.areEqual('/shortpoll/', manager.get("uri")); Y.Assert.areEqual('event-key', manager.get("eventKey")); }, testInitShortPollManagerDefaults: function() { var manager = new shortpoll.ShortPollManager(); // The default URI is the empty string, i.e. here. Y.Assert.areEqual("", manager.get("uri")); // The default eventKey is generated by Y.guid() with a custom prefix. Y.Assert.areEqual( "shortpoll_", manager.get("eventKey").substring(0, 10)); // The default eventKey is stable. Y.Assert.areEqual(manager.get("eventKey"), manager.get("eventKey")); }, testIOAttribute: function() { // The IO attribute/property returns the module's `_io` object. var manager = new shortpoll.ShortPollManager(); Y.Assert.areSame(shortpoll._io, manager.get("io")); // Changes to the module's `_io` object are reflected immediately. var io = shortpoll._io; this.addCleanup(function() { shortpoll._io = io; }); shortpoll._io = Y.guid(); Y.Assert.areSame(shortpoll._io, manager.get("io")); }, testPollStarted: function() { var fired = false; Y.on(shortpoll.shortpoll_start_event, function() { fired = true; }); var manager = new shortpoll.ShortPollManager({uri: '/shortpoll/'}); manager.poll(); Y.Assert.isTrue(fired, "Start event not fired."); }, testPollFailure: function() { var fired = false; Y.on(shortpoll.shortpoll_fail_event, function() { fired = true; }); var manager = new shortpoll.ShortPollManager({uri: '/shortpoll/'}); // Simulate failure. this.mockFailure('unused', shortpoll); manager.poll(); Y.Assert.isTrue(fired, "Failure event not fired."); }, testSuccessPollInvalidData: function() { var manager = new shortpoll.ShortPollManager(); var custom_response = "{{"; var response = { responseText: custom_response }; var res = manager.successPoll("2", response); Y.Assert.isFalse(res); }, testSuccessPollMalformedData: function() { var manager = new shortpoll.ShortPollManager(); var response = { responseText: '{ 1234: "6" }' }; var res = manager.successPoll("2", response); Y.Assert.isFalse(res); }, testSuccessPollWellformedData: function() { var manager = new shortpoll.ShortPollManager(); var response = { responseText: '{ "event_key": "4", "something": "6"}' }; var res = manager.successPoll("2", response); Y.Assert.isTrue(res); }, testPollDelay: function() { var manager = new shortpoll.ShortPollManager({uri: '/shortpoll/'}); Y.Assert.areEqual(0, manager._failed_attempts); var delay = manager.repoll(true); // Simulate failure. Y.Assert.areEqual(shortpoll.SHORT_DELAY, delay); Y.Assert.areEqual(1, manager._failed_attempts); // While the number of failures is small the delay between polls // remains at its initial value, SHORT_DELAY. var max_failures = shortpoll.MAX_SHORT_DELAY_FAILED_ATTEMPTS; for (; manager._failed_attempts < max_failures - 1;) { delay = manager.repoll(true); // Simulate failure. Y.Assert.areEqual(shortpoll.SHORT_DELAY, delay); } // After MAX_SHORT_DELAY_FAILED_ATTEMPTS failed attempts, the // delay changes to LONG_DELAY. delay = manager.repoll(true); // Simulate failure. Y.Assert.areEqual(shortpoll.LONG_DELAY, delay); // After a success, the delay returns to SHORT_DELAY. delay = manager.repoll(false); // Simulate success. Y.Assert.areEqual(shortpoll.SHORT_DELAY, delay); }, testPollURISequence: function() { // Each new polling increases the sequence parameter: // /shortpoll/?sequence=1 // /shortpoll/?sequence=2 // /shortpoll/?sequence=3 // ... var manager = new shortpoll.ShortPollManager({uri: '/shortpoll/'}); // Simulate success. var log = this.mockSuccess('{"i":2}', shortpoll); manager.poll(); var request; for (request = 1; request < 10; request++) { manager.poll(); Y.Assert.areEqual( '/shortpoll/?sequence=' + (request + 1), log.pop()[0]); } }, _testDoesNotFail: function(error_code) { // Assert that, when the shortpoll request receives an error // with code error_code, it is not treated as a failed // connection attempt. var manager = new shortpoll.ShortPollManager({uri: '/shortpoll/'}); // Simulate a request timeout. this.mockFailure('{"i":2}', shortpoll, error_code); Y.Assert.areEqual(0, manager._failed_attempts); manager.poll(); Y.Assert.areEqual(0, manager._failed_attempts); }, test408RequestTimeoutHandling: function() { this._testDoesNotFail(408); }, test504GatewayTimeoutHandling: function() { this._testDoesNotFail(504); }, testPollPayloadBad: function() { // If a non valid response is returned, shortpoll_fail_event // is fired. var fired = false; Y.on(shortpoll.shortpoll_fail_event, function() { fired = true; }); var manager = new shortpoll.ShortPollManager({uri: '/shortpoll/'}); var response = "{non valid json"; this.mockSuccess(response, shortpoll); manager.poll(); Y.Assert.isTrue(fired, "Failure event not fired."); }, testPollPayloadOk: function() { // Create a valid message. var custom_response = [ {'something': {something_else: 1234}}, {'thisisit': {thatisnot: 5678}} ]; var manager = new shortpoll.ShortPollManager({uri: '/shortpoll/'}); var event_payload = null; Y.on(manager.get("eventKey"), function(data) { event_payload = data; }); // Simulate success. this.mockSuccess(Y.JSON.stringify(custom_response), shortpoll); manager.poll(); // Note that a utility to compare objects does not yet exist in YUI. // http://yuilibrary.com/projects/yui3/ticket/2529868. Y.Assert.areEqual(1234, event_payload[0].something.something_else); Y.Assert.areEqual(5678, event_payload[1].thisisit.thatisnot); }, testPollURI_appends_sequence_to_existing_query_args: function() { // When the URI already contains query arguments, a sequence key is // added to the end. var manager = new shortpoll.ShortPollManager({uri: 'somewhere?k=v'}); var log = this.mockSuccess("[]", shortpoll); manager.poll(); Y.Assert.areEqual(1, log.length); Y.Assert.areEqual('somewhere?k=v&sequence=1', log[0][0]); }, testPollURI_adds_sequence_as_new_query_arg: function() { // When the URI does not already contain query arguments, a sequence // key is set as a new query arg. var manager = new shortpoll.ShortPollManager({uri: 'somewhere'}); var log = this.mockSuccess("[]", shortpoll); manager.poll(); Y.Assert.areEqual(1, log.length); Y.Assert.areEqual('somewhere?sequence=1', log[0][0]); } })); namespace.suite = suite; }, '0.1', {'requires': [ 'node-event-simulate', 'test', 'maas.testing', 'maas.shortpoll']} ); maas-1.9.5+bzr4599.orig/src/maasserver/static/js/tests/test_user_panel.html0000644000000000000000000000226413056115004024745 0ustar 00000000000000 Test maas.user_panel maas.user_panel.tests maas-1.9.5+bzr4599.orig/src/maasserver/static/js/tests/test_user_panel.js0000644000000000000000000000365513056115004024422 0ustar 00000000000000/* Copyright 2012 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). */ YUI({ useBrowserConsole: true }).add('maas.user_panel.tests', function(Y) { Y.log('loading maas.user_panel.tests'); var namespace = Y.namespace('maas.user_panel.tests'); var module = Y.maas.user_panel; var suite = new Y.Test.Suite("maas.user_panel Tests"); suite.add(new Y.maas.testing.TestCase({ name: 'test-user-panel-widget-singleton', testSingletonCreation: function() { Y.Assert.isNull( module._user_panel_singleton, 'module._user_panel_singleton is originally null.'); module.createUserPanelWidget(); Y.Assert.isNotNull( module._user_panel_singleton, 'module._user_panel_singleton is populated after the ' + 'call to module.showAddNodeWidget.'); } })); suite.add(new Y.maas.testing.TestCase({ name: 'test-user-panel-widget-visibility', testWidgetShowing: function() { var overlay = module._user_panel_singleton; Y.Assert.isFalse( overlay.get('visible'), 'When created the widget should not be visible'); module.showUserPanelWidget(); Y.Assert.isTrue( overlay.get('visible'), 'We should be able to show the panel with showAddNodeWidget'); }, testWidgetHiding: function() { var overlay = module._user_panel_singleton; Y.Assert.isTrue( overlay.get('visible'), 'The widget should current be visible'); var link = Y.one('#user-options-link'); link.simulate('click'); Y.Assert.isFalse( overlay.get('visible'), 'If an element outside the panel is clicked the ' + 'panel should hide.'); } })); namespace.suite = suite; }, '0.1', {'requires': [ 'node-event-simulate', 'test', 'maas.testing', 'maas.user_panel']} ); maas-1.9.5+bzr4599.orig/src/maasserver/static/partials/error.html0000644000000000000000000000124113056115004022735 0ustar 00000000000000

    {$ error $}

    maas-1.9.5+bzr4599.orig/src/maasserver/static/partials/node-details.html0000644000000000000000000035765013056115004024176 0ustar 00000000000000
    • {$ error.message $}

    Machine summary

    Owner
    {$ node.owner || "Unassigned" $}
    Tags
    Cluster
    {$ node.nodegroup.name $}
    Zone
    {$ node.zone.name $}
    Architecture
    {$ node.architecture || "Missing" $}
    Owner
    {$ node.owner || "Unassigned" $}
    Tags
    {$ node.tags.join(', ') $}
    CPU
    {$ node.cpu_count $} cores
    RAM
    {$ node.memory $}GiB
    Storage
    {$ node.storage $}GB over {$ node.physical_disk_count $} disks
    Operating System
    {$ getOSText() $}
    Kernel
    {$ node.hwe_kernel $}
    Third Party Driver
    {$ node.third_party_driver.module $} "{$ node.third_party_driver.comment $}"
    Cancel

    Containers and VMs

    Name
    MAC
    IP Address
    {$ device.name $}
    {$ device.mac_address $}
    {$ device.ip_address $}

    Power

    • Power control software for this power type is missing from the cluster controller. To proceed, install the {$ getPowerErrors() $} on the {$ node.nodegroup.cluster_name $} cluster.
    Cancel

    Network

    Loading...

    Network

    • Network configuration cannot be modified unless the node is Ready or Broken.
    • Node must be connected to a network.
    • Custom network configuration only supported on Ubuntu. Using OS default configuration.
    PXE
    Type
    Fabric
    VLAN
    Subnet
    IP Address
    {$ interface.name $} Show members Hide members
    {$ interface.mac_address $} Show members Hide members
    {$ getInterfaceTypeText(interface) $}
    {$ getSubnetText(getSubnet(interface.discovered[0].subnet_id)) $}
    • {$ getInterfaceError(interface) $}
    {$ interface.ip_address $} ({$ getLinkModeText(interface) $}) {$ interface.discovered[0].ip_address $} (DHCP)
    {$ member.name $}
    {$ member.mac_address $}
    {$ getInterfaceTypeText(member) $}
    {$ getAddName() $}
    {$ newInterface.parent.fabric.name $}
    {$ getVLANText(newInterface.parent.vlan) $}

    Are you sure you want to remove this {$ getRemoveTypeText(interface) $}?

    Cancel
    Cancel
    Bond
    Name
    Type
    Primary
    {$ parent.name $}
    {$ getInterfaceTypeText(parent) $}
    Cancel
    Physical
    {$ newInterface.errorMsg $}
    Cancel

    Storage

    • No storage information. Commissioning this node will gather the storage information.
    • Storage configuration cannot be modified unless the node is Ready or Allocated.
    • Storage configuration cannot be modified unless the node is Ready or Allocated and you own the node.
    • Custom storage configuration only supported on Ubuntu. Using flat layout.
    • {$ issue $}

    File systems

    Name
    Size
    Mountpoint
    File system
    No filesystems defined.
    {$ filesystem.name $}
    {$ filesystem.size_human $}
    {$ filesystem.mount_point $}
    {$ filesystem.fstype $}

    Are you sure you want to unmount this filesystem?

    Cancel

    Are you sure you want to remove this {$ getRemoveTypeText(filesystem) $}?

    Cancel

    Available cache sets

    Name
    Size
    Used by
    {$ cacheset.name $}
    {$ cacheset.size_human $}
    {$ cacheset.used_by $}

    Are you sure you want to delete this cache set?

    Cancel

    Available disks and partitions

    Boot
    Size
    Device Type
    File system
    Tags
    No available disks or partitions.
    {$ item.model $}
    {$ item.serial $}
    {$ item.size_human $} Free: {$ item.available_size_human $}
    {$ getDeviceType(item) $}
    {$ getAddPartitionName(item) $}
    Partition
    Logical volume

    Are you sure you want to unformat this '{$ item.fstype $}' filesystem?

    Cancel
    Cancel

    Are you sure you want to remove this {$ getRemoveTypeText(item) $}?

    Cancel
    Cancel
    Cancel
    Remove {$ getRemoveTypeText(item) $}
    {$ availableNew.device.size_human $}
    Bcache
    Name
    Size
    Device type
    {$ availableNew.device.name $}
    {$ availableNew.device.size_human $}
    {$ getDeviceType(availableNew.device) $}
    Cancel
    {$ getNewRAIDSize() $}
    {$ availableNew.mode.title $}
    Name
    Size
    Device type
    Active
    Spare (Maximum {$ getTotalNumberOfAvailableSpares() $})
    {$ device.name $}
    {$ device.size_human $}
    {$ getDeviceType(device) $}
    Cancel
    {$ getNewVolumeGroupSize() $}
    Volume group
    Name
    Size
    Device type
    {$ device.name $}
    {$ device.size_human $}
    {$ getDeviceType(device) $}
    Cancel
    Create RAID Create volume group Create cache Set Create bcache

    Used disks and partitions

    Boot
    Device type
    Used for
    No disk or partition has been fully utilized.
    {$ item.name $}
    {$ item.model $}
    {$ item.serial $}
    {$ getDeviceType(item) $}
    {$ item.used_for $}

    Latest machine events

    No events.

    Event Time
    {$ getEventText(event) $} {$ event.created $}

    Load next 10 events

    Machine output YAML XML

                        
                    
    Filename Time Output
    {$ output.name $} {$ output.created $} {$ output.line_count $} lines
                        
                    
    maas-1.9.5+bzr4599.orig/src/maasserver/static/partials/node-events.html0000644000000000000000000000471213056115004024041 0ustar 00000000000000
    Event Time
    {$ getEventText(event) $} {$ event.created $}
    maas-1.9.5+bzr4599.orig/src/maasserver/static/partials/node-result.html0000644000000000000000000000161413056115004024051 0ustar 00000000000000
                    {$ getResultData() $}
                
    maas-1.9.5+bzr4599.orig/src/maasserver/static/partials/nodes-list.html0000644000000000000000000014132213056115004023672 0ustar 00000000000000
    FQDN MAC Power Status Owner Cores RAM (GiB) Disks Storage (GB)
    {$ node.fqdn $} {$ node.pxe_mac $} (+{$ node.extra_macs.length $}) {$ node.status $} {$ node.owner $} {$ node.cpu_count $} {$ node.memory $} {$ node.physical_disk_count $} {$ node.storage $}
    FQDN MAC IP Assignment IP Address Owner
    {$ device.fqdn $} {$ device.primary_mac $} (+{$ device.extra_macs.length $}) {$ getDeviceIPAssignment(device.ip_assignment) $} {$ device.ip_address $} {$ device.owner $}
    maas-1.9.5+bzr4599.orig/src/maasserver/static/partials/subnet-details.html0000644000000000000000000001063413056115004024535 0ustar 00000000000000
    DNS
    {$ server $}{$ $last ? '' : ', ' $}
    Default Gateway
    {$ subnet.gateway_ip $}
    Utilisation
    {$ subnet.statistics.usage_string $} used ({$ subnet.statistics.num_unavailable $}/{$ subnet.statistics.total_addresses $})
    Largest available range
    {$ subnet.statistics.largest_available $}
    IP Address
    Owner
    Usage
    Usage type
    Allocation type
    {$ ip.ip $}
    {$ ip.user $}
    {$ ip.node_summary.hostname $}{$ ip.node_summary.hostname $}
    {$ ip.node_summary.installable ? "Node" : "Device" $}
    Automatic Sticky User reserved DHCP Observed Unknown
    maas-1.9.5+bzr4599.orig/src/maasserver/static/partials/subnets-list.html0000644000000000000000000001370113056115004024244 0ustar 00000000000000

    {$ data.fabric.name $}

    VLAN
    Space
    Subnet
    Available IP Addresses
    {$ getVLANName(row, data.sortedData) $}
    {$ getSpaceName(row, data.sortedData) $}
    {$ row.subnet.statistics.available_string $}

    {$ data.space.name $}

    Fabric
    VLAN
    Subnet
    Available IP Addresses
    There are currently no subnets in this space.
    {$ getFabricName(row, data.sortedData) $}
    {$ getVLANName(row, data.sortedData) $}
    {$ row.subnet.statistics.available_string $}
    maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/0000755000000000000000000000000013056115004021162 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/0000755000000000000000000000000013056115004020775 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas-styles.scss0000644000000000000000000000020213056115004023205 0ustar 00000000000000@charset "UTF-8"; /* import Ubuntu files */ @import "ubuntu/ubuntu-styles"; @import "cloud/cloud-styles"; @import "maas/styles"; maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/0000755000000000000000000000000013056115004021376 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/base/0000755000000000000000000000000013056115004022074 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/cloud-styles.scss0000644000000000000000000000213613056115004024510 0ustar 00000000000000@charset "UTF-8"; @import "helpers/variables"; @import "helpers/mixins"; @import "base/typography"; @import "base/utils"; @import "layout/header"; @import "layout/grid"; @import "layout/footer"; @import "components/actions_box"; @import "components/anchor"; @import "components/box"; @import "components/buttons"; @import "components/charm-list"; @import "components/code"; @import "components/cookie-policy"; @import "components/deploy-command"; @import "components/dropdown"; @import "components/file-list"; @import "components/hero"; @import "components/how-to"; @import "components/links"; @import "components/lists"; @import "components/maintainer-list"; @import "components/rating"; @import "components/revision-list"; @import "components/section"; @import "components/see-more"; @import "components/strips"; @import "components/tag-list"; @import "components/twitter-feed"; @import "components/segments"; @import "components/strips"; @import "components/tooltip"; // XXX pyscss issue, cannot render first selector // This mask's the issue by being removed as the first style. .fake { display: block; } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/0000755000000000000000000000000013056115004023347 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/core-print.scss0000644000000000000000000000000013056115004024127 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/helpers/0000755000000000000000000000000013056115004022624 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/layout/0000755000000000000000000000000013056115004022477 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/base/_typography.scss0000644000000000000000000000077713056115004025351 0ustar 00000000000000/* @section general -------------------------------------------------------------- */ body, a:link, a:visited { -webkit-font-smoothing: antialiased; } code, pre, p { line-height: 1.5; } body { font-size: $base + px; } @media only screen and (min-width : 768px) { code, pre, p { line-height: 1.6; } body { font-size: $base + px; } } @media (-webkit-min-device-pixel-ratio: 2), (min-resolution: 192dpi) { body { font-size: $hd_base + px; } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/base/_utils.scss0000644000000000000000000000016513056115004024272 0ustar 00000000000000.not-for-medium { display: none; @media only screen and (min-width: 985px) { display: block; } }maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/_actions_box.scss0000644000000000000000000000100313056115004026705 0ustar 00000000000000.actions { .actions__social-item--twitter, .actions__social-item--google-plus { text-indent: -99999px; background-image: url("../img/icons/icon-social.svg"); background-repeat: no-repeat; height: 44px; width: 44px; overflow: hidden; display: block; } .actions__social-item--twitter { background-position: 0 0; &:hover { background-position: 0 -45px; } } .actions__social-item--google-plus { background-position: -45px 0; &:hover { background-position: -45px -45px; } } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/_anchor.scss0000644000000000000000000000072613056115004025662 0ustar 00000000000000.anchor { display: inline-block; margin-left: 3px; opacity: .0; position: relative; top: 1px; width: 1em; height: 1em; background: url('../img/icons/anchor_16.svg') 0 80% no-repeat; background-size: 16px; -moz-transition: opacity .1s; -webkit-transition: opacity .1s; transition: opacity .1s; } h1:hover .anchor, h2:hover .anchor, h3:hover .anchor, h4:hover .anchor, dt:hover .anchor, li:hover .anchor { opacity: 1; }maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/_box.scss0000644000000000000000000000107313056115004025174 0ustar 00000000000000 .box { @include rounded_corners(0); @include box_shadow(none); display: block; margin-bottom: 0; border-top: 0; border-left: 0; border-right: 0; padding-left: 0; padding-right: 0; } .box-dim { background-color: #fafafa; } @media only screen and (min-width : 768px) { .box { @include rounded_corners(4px); @include box_shadow(0px 1px 1px 0px rgba(0, 0, 0, 0.15)); display: inline-block; margin-bottom: 20px; padding-left: 20px; padding-right: 20px; border: 0; } }maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/_buttons.scss0000644000000000000000000000226513056115004026106 0ustar 00000000000000 a.indent { @include box_shadow(inset 0 1px 2px 0 #333 ); background: rgba(0, 0, 0, 0.1); padding: 10px 30px; text-weight: normal; &:hover { background: rgba(0, 0, 0, 0.2); } } a.link-cta-positive { @include rounded_corners(3px); @include box_sizing(); background-color: $ubuntu_orange; color: #fff; display: inline-block; font-size: 1.14286em; font-weight: 300; text-decoration: none; margin: 0; padding: 8px 14px; text-align: center; -moz-transition: background .2s; -webkit-transition: background .2s; transition: background .2s; width: 100%; &:hover { background-color: darken( $ubuntu_orange, 10% ) } .external { padding-right: 1em; background-image: url("../img/icons/external-link-white.svg"); background-repeat: no-repeat; background-size: 11px; background-position: right top; @media only screen and (min-width : 769px) { padding-right: .7em; } } } a.link-cta-negative { @extend a.link-cta-positive; background-color: $coolish_grey; &:hover { background-color: $warm_grey; } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/_charm-list.scss0000644000000000000000000000356613056115004026460 0ustar 00000000000000 .charms__list { list-style: none; margin-bottom: 1em; margin-left: 0; border-bottom: 1px dotted $light_mid_grey; .charms__list--config { display: none; } .charms__list--toggle { display: block; &.is-open + .charms__list--config { display: block; } } .charms__list--config-name { border-top: 0; } } .charms__list--item { font-size: 1em; border-top: 1px dotted $light_mid_grey; padding: 10px 0 0 10px; margin-bottom: 10px; &:last-of-type { margin-bottom: 10px; } } .charms__list--icon { margin-right: .4em; width: 25px; height: 25px; } .charms__list--toggle { background: url("../img/shared/icon-arrow-down.svg") no-repeat center center; width: 16px; height: 100%; float: right; text-indent: -99999px; margin-right: 20px; &.is-open { background-image: url("../img/shared/icon-arrow-up.svg"); background-size: 14px; } } .charms__list--config { padding-left: 35px; padding-bottom: 20px; } .charms__list--config-name { font-size: 1em; margin-top: 15px; padding-top: 15px; font-weight: 400; border-top: 1px dotted $light_mid_grey; &:first-of-type { border-top: 0; } } .charms__list--config-type { font-weight: 400; } .charms__list--config-description, .charms__list--config-setting { margin-left: 30px; margin-top: 8px; } .charms__list--config-setting { font-size: 0.875em; color: $warm_grey; font-family: "Ubuntu Mono","Consolas","Monaco","Lucida Console","Courier New",Courier,monospace; } body.no-svg { .charms__list .charms__list--toggle { background-image: url("../img/shared/icon-arrow-down.png"); } .charms__list .charms__list--toggle.is-open { background-image: url("../img/shared/icon-arrow-up.png"); } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/_code.scss0000644000000000000000000000033013056115004025311 0ustar 00000000000000pre { background: transparent; border: 1px solid $warm_grey; margin: 0 0 1.5em 0; } pre:not(:first-child) { margin-top: 1.5em; } code.language-bash { .comment { color: $warm_grey; } }maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/_cookie-policy.scss0000644000000000000000000000146213056115004027154 0ustar 00000000000000 .cookie-policy { @include box_shadow(0 -1px 2px rgba(0, 0, 0, 0.2)); background-color: #fae4dc; bottom: 0; position: fixed; width: 100%; z-index: 100; p { @include box_sizing(); font-size: 13px; margin-bottom: 0; margin-left: 0; padding: 8px 0; width: 100%; } .link-cta { background-image: url(../img/icons/close-orange.svg); background-repeat: no-repeat; color: #fff; float: right; font-size: 1em; height: 15px; margin: 12px 0; margin-top: 12px; padding: 0; text-decoration: none; text-indent: -9999px; width: 16px; } } html.no-svg, html.opera-mini { .cookie-policy .link-cta { background-image: url(../img/icons/close-orange.png); } } html.opera-mini { .cookie-policy { position: relative; top: 0; } }maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/_deploy-command.scss0000644000000000000000000000257113056115004027320 0ustar 00000000000000 .deploy-command { margin-bottom: 10px; position: relative; .deploy-command__field { @include box_sizing(); @include rounded_corners(); @include box_shadow(inset 0 1px 2px 0 rgba(0,0,0,0.12)); background-image: url("../img/icons/code-snippet_16.svg"); background-repeat: no-repeat; background-position: 5px center; background-color: #fff; background-size: 1.142857143em; width: 100%; height: 37px; border: 1px solid #c1c1c1; padding: .6em; color: $warm_grey; padding-left: 2em; font-size: 0.875em; white-space: nowrap; overflow: hidden; cursor: text; } .command2clipboard__clip { cursor: pointer; @include rounded_corners( 0 4px 4px 0); line-height: 1; position: absolute; right: 1px; top: 1px; background-color: $white; padding: 9px 8px 7px; border-left: 1px solid $coolish_grey; display: none; } .command2clipboard__clip.zeroclipboard-is-hover { background-color: $background_color; } } .box { .deploy-command { .deploy-command__field { } } } @media only screen and (min-width : $page-maxwidth) { .deploy-command { .command2clipboard__clip { display: inline-block; } } }maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/_dropdown.scss0000644000000000000000000000276013056115004026244 0ustar 00000000000000.dropdown-menu { position: relative; display: block; &.open { .menu-link { background-color: $header_bg; } .dropdown { display: block; } } .menu-link { .border-box { display: block; color: $nav_border_light; } } .dropdown { @include rounded_corners(0 0 4px 4px); display: none; position: absolute; z-index: 1000; top: 0; left: 0; right: 0; width: auto; background-color: $white; box-shadow: 0 1px 5px rgba(0, 0, 0, 0.2); &.right { left: auto; right: 0; text-align: right; } &.narrow { min-width: 140px; width: auto; } a, p, li { color: $cool_grey; } p { padding: 11px 20px; } header, footer { background-color: $white; } header { padding: 11px 20px; color: $nav_border_light; font-size: 16px; font-weight: 300; } footer { padding: 20px; } ul { width: auto; left: 0; right: 0; li { a { width: auto; display: block; padding: 15px 20px; } } } } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/_file-list.scss0000644000000000000000000000470213056115004026276 0ustar 00000000000000 .files { .files__list { list-style: none; margin-bottom: 1em; border-left: 1px solid #cbcbcb; margin-left: 1em; @media only screen and (min-width : 768px) { margin-left: 0; } li { position: relative; a:link, a:visited { color: #333; text-decoration: none; } &:before { content: ''; width: 12px; height: 1px; background: $light_mid_grey; display: inline-block; position: relative; top: -4px; margin-right: 5px; } &:last-child { &:after { content: ''; width: 4px; height: 1em; position: absolute; display: block; left: -2px; top: .85em; background: #fff; } } } ul:last-child > li a:after { content: ''; width: 4px; height: 3em; position: absolute; display: block; left: -1.8em; top: -1.45em; background: white; cursor: default; } .files__list { margin-left: 1.3em; } .files__list--item, .files__list--item-folder { font-size: 0.875em; margin-bottom: 0.75em; } .files__list--item-folder { background-position: center right; background-size: 12px; cursor: pointer; &:after { font-size: 14px; display: block; content: "-"; position: absolute; left: -7px; top: 4px; padding: 0 4px; line-height: 0.9em; background: $white; border: 1px solid $warm_grey; } } .files__list--item-folder.is-closed { & + ul { display: none; } &:after { content: "+"; padding: 0 2px; } } } .files__actions--launchpad { background: url("../img/icons/icon-launchpad.svg") no-repeat; padding-left: 1.4em; } }maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/_hero.scss0000644000000000000000000000062613056115004025344 0ustar 00000000000000 /* @section heros -------------------------------------------------------------- */ #main-content .row-hero { padding-top: 20px; margin-top: 0; .intro { font-size: 16px; } } @media only screen and (min-width : 769px) { #main-content .row-hero { padding-top: 60px; .intro { font-size: 1.4375em; margin-bottom: 40px; } } }maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/_how-to.scss0000644000000000000000000000013013056115004025612 0ustar 00000000000000 .how-to { div div img { float: left; margin: 0 20px 20px 0; } }maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/_links.scss0000644000000000000000000000242213056115004025523 0ustar 00000000000000header.banner { a.external, a.external:hover { background-image: url('../img/icons/external-link-grey.png'); } } a.external, a.external:hover, header.banner nav.nav-primary ul li a.external:link, header.banner nav.nav-primary ul li a.external:visited, header.banner nav.nav-primary ul li a.external:hover { background-repeat: no-repeat; } a.external, a.external:hover header.banner nav.nav-primary ul li a.external:link, header.banner nav.nav-primary ul li a.external:visited, header.banner nav.nav-primary ul li a.external:hover { background-position: right 14px top 14px; padding-right: 35px; background-size: auto; } @media only screen and (max-width : 769px) { header.banner nav.nav-primary ul li a.external:link, header.banner nav.nav-primary ul li a.external:visited, header.banner nav.nav-primary ul li a.external:hover { background: none; } header.banner nav.nav-primary ul li a.external:after { display: inline-block; width: 11px; height: 11px; margin-left: 0.25em; // For mobile we need to display a dark external link as the background // has been changed to white. background-image: url('../img/icons/external-link-dark.png'); vertical-align: text-top; } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/_lists.scss0000644000000000000000000000443113056115004025543 0ustar 00000000000000 .list__icons { margin-left: 0; margin-bottom: 5px; li { list-style: none; float: left; padding: 8px 8px 0 0; margin-bottom: 0; img { width: 24px; height: 24px; vertical-align: top; } } } .list__tick { list-style-image: url('../img/icons/tick.png'); } .list__middot { margin-left: 0; list-style: none; li { display: inline; &:after { content: "•"; color: #888; margin: 0 5px 0 8px; vertical-align: middle; } &.files__actions--last:after { content: ""; } } } .combined-list { .list li { border-bottom: 1px dotted #888; padding: 10px 0; } @media only screen and (max-width : 767px) { .last-col .list li:last-of-type { border-bottom: 0; padding-bottom: 0; } } @media only screen and (min-width : 768px) { .list li:last-of-type { border-bottom: 0; padding-bottom: 0; } } } .events-list { li { position: relative; padding-bottom: 20px; } dd { margin-left: 0; background-position: 0 center; background-repeat: no-repeat; background-size: 20px 20px; padding: 6px 20px 6px 24px; } .event-map { display: none; } .event-date { background-image: url("../img/icons/calendar.svg"); } .location { background-image: url("../img/icons/location.svg"); } @media only screen and (min-width : 769px) { .event-details-wrapper { padding-left: 120px; } .event-map { @include rounded_corners(10px); position: absolute; left: 0; top: 0; height: 100px; width: 100px; float: left; margin-right: 10px; margin-top: 5px; overflow: hidden; display: block; } } } body.no-svg { .events-list { .event-date { background-image: url("../img/icons/calendar.png"); } .location { background-image: url("../img/icons/location.png"); } } }maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/_maintainer-list.scss0000644000000000000000000000011113056115004027474 0ustar 00000000000000 .maintainers { .maintainer__email { display: block; } }maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/_rating.scss0000644000000000000000000000044013056115004025665 0ustar 00000000000000 .ratings { ul { margin-left: 2px; margin-bottom: 0; li { margin-bottom: 0; img { vertical-align: text-top; } &:first-of-type { margin-left: 0; } } } }maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/_revision-list.scss0000644000000000000000000000042013056115004027206 0ustar 00000000000000 .revisions__list { list-style: none; margin-left: 0; .revisions__list-item { margin-bottom: 1em; } .revisions__list_meta { color: #888; margin-bottom: .2em; } .revisions__list_meta_date { float: right; } }maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/_rows.scss0000644000000000000000000000056513056115004025403 0ustar 00000000000000.row__tiled { -moz-box-shadow: inset 0 1px 2px 0 rgba(0,0,0,0.12), inset 0 -1px 2px 0 rgba(0,0,0,0.12); -webkit-box-shadow: inset 0 1px 2px 0 rgba(0,0,0,0.12), inset 0 -1px 2px 0 rgba(0,0,0,0.12); box-shadow: inset 0 1px 2px 0 rgba(0,0,0,0.12), inset 0 -1px 2px 0 rgba(0,0,0,0.12); background: url(../img/shared/pattern_tile.png); } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/_section.scss0000644000000000000000000000206513056115004026052 0ustar 00000000000000.section { @include box_sizing(); overflow: hidden; padding-bottom: 20px; padding-top: 20px; .section__title { background: url(../img/shared/icon-arrow-up.svg) no-repeat center right; cursor: pointer; margin-bottom: 1em; } &.is-closed { height: 60px; .section__title { background-image: url(../img/shared/icon-arrow-down.svg); } } } .no-svg { .row.section .section__title { background: url(../img/shared/icon-arrow-up.png); } .row.section.is-closed .section__title { background: url(../img/shared/icon-arrow-down.png); } } footer .section { margin-bottom: 1em; padding-bottom: 1em; &.is-closed { height: auto; padding-bottom: 0; ul { display: none; } } } @media only screen and (min-width : 769px) { .row .section { .section__title { background-image: none; cursor: auto; } &.is-closed { height: auto; } } }maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/_see-more.scss0000644000000000000000000000236613056115004026126 0ustar 00000000000000.list--concealed { .list-item { display: none; } .list-item:first-of-type { display: list-item; } &.list--visible-6 { .list-item:nth-child(-n+6) { display: list-item; } } &.list--visible-4 { .list-item:nth-child(-n+4) { display: list-item; } } &.list--visible-2 { .list-item:nth-child(-n+2) { display: list-item; } } a.btn__see--less { display: none; } a.btn__see--more { display: inline; } } .list--revealed .list__controls, .list--concealed .list__controls { display: block; margin-top: 2em; } .list--revealed { .list--item { display: list-item; } a.btn__see--less { display: inline; } a.btn__see--more { display: none; } } a.btn__see--more, a.btn__see--less { -moz-transition: background .2s; -webkit-transition: background .2s; transition: background .2s; @include rounded_corners(3px); color: #333; border-radius: 2px; border: 1px solid $coolish_grey; background: #fff; padding: 0.384615385em 1.153846154em; &:hover { background: #eee; text-decoration: none; } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/_segments.scss0000644000000000000000000000013313056115004026225 0ustar 00000000000000.spaced-segment { margin-bottom: 50px; h3 { margin-bottom: 1.3em; } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/_strips.scss0000644000000000000000000000414413056115004025732 0ustar 00000000000000 /* @section strips -------------------------------------------------------------- */ .strip-dark, .strip-light { clear: both; } .strip-dark { background-color: $dark_aubergine; background-image: none; background-repeat: repeat; color: #fff; } .strip-dark.solid { background-image: none; background-color: $dark_aubergine; } .strip-dark ul, .strip-dark ol { margin: 0; padding: 0; } .strip-dark .icon, .strip-dark ol span { @include background_size(40px 40px); background-image: url(../img/icons/list-icon-background.png); background-repeat: no-repeat; display: block; margin: 0 20px 20px 0; padding: 24px; float: left; width: 16px; height: 16px; padding: 12px; } .strip-light .icon, .strip-dark .icon { position: absolute; } .strip-light { background-color: rgba(255,255,255,0.6); } .strip-dark ol, .strip-dark ul { padding: 20px 0; } .strip-light .icon { display: block; background-image: url(../img/icons/list-icon-background.png); padding: 24px; margin: 0 auto 48px; } .strip-dark .connected-list li, .strip-light .connected-list li { margin-bottom: 10px; min-height: 52px; } .strip-dark .connected-list li p, .strip-light .connected-list li p, .strip-dark .connected-list li h3, .strip-light .connected-list li h3 { padding-left: 50px; } .strip-dark ol.connected-list li p .strip-dark ol.connected-list li h3, { padding-left: 50px; } .strip-dark ol.connected-list li span { float: left; font-size: 22px; font-weight: normal; height: 26px; margin-left: 0; margin-right: 20px; padding-top: 2px; position: absolute; text-align: center; width: 16px; } .strip-white { background: #fff; } .strip-trans { background: transparent; } .strip-green { background-image: linear-gradient(to right, #6fad23 0%, #7cc227 100%); overflow: hidden; &, a { color: #fff; } } .strip-blue { background-image: linear-gradient(to right, rgba(16,118,162,1) 0%, rgba(53,159,205,1) 100%); overflow: hidden; &, a { color: #fff; } }maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/_tag-list.scss0000644000000000000000000000043213056115004026126 0ustar 00000000000000.tag-list { list-style: none; margin: 0; } .tag-list--item { display: inline-block; text-transform: lowercase; a:link, a:visited { color: #333; } a:after { content: ','; } &:last-child a:after { content: ''; } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/_tooltip.scss0000644000000000000000000000230613056115004026076 0ustar 00000000000000 @media only screen and (min-width : 769px) { .tip, .command2clipboard__clip { position: relative; display: inline-block; } .tip .tip-content, .command2clipboard__clip .tip-content { position: absolute; z-index: 98; left: -1000px; right: -1000px; top: -30px; font-weight: 300; margin: auto; display: block; text-align: center; white-space: nowrap; } .tip:hover .tip-content:after, .command2clipboard__clip.zeroclipboard-is-hover .tip-content:after { display: table; z-index: 98; margin: auto; color: #fff; border-radius: 3px; background: #000; box-shadow: none; font-size: 12px; content: attr(data-tooltip); padding: 4px 6px; white-space:nowrap; text-align:center; } .tip:hover .tip-content:before, .command2clipboard__clip.zeroclipboard-is-hover .tip-content:before { position: absolute; top: 100%; left: 50%; margin-left: -5px; content: ''; border: solid transparent; border-width: 5px; border-top-color: #000; } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/components/_twitter-feed.scss0000644000000000000000000000150313056115004027005 0ustar 00000000000000#twitter-feed, #blog-feed { margin: 35px 0; > ul { list-style: none; margin-left: 0; li { position: relative; margin-bottom: 30px; } } .user { font-size: 0.875em; margin-bottom: 0.5em; img { @include rounded_corners(4px); width: 24px; height: 24px; vertical-align: middle; margin-right: 5px; } a:link, a:visited, a:hover { text-decoration: none; } span[data-scribe="element:name"] { color: #333; } } .tweet { padding-left: 33px; margin-bottom: .3em; } .timePosted { padding-left: 33px; font-size: 0.875em; } .interact { padding-left: 33px; a:link, a:visited { margin-right: 20px; } } } @media only screen and (min-width : 769px) { #twitter-feed { .timePosted { position: absolute; top: 0; right: 0; padding-left: 0; } } }maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/helpers/_mixins.scss0000644000000000000000000000412313056115004025167 0ustar 00000000000000/** * mixins * * @section mixins */ @mixin font_size ($size: 16) { font-size: ($size / $base)em; margin-bottom: (12 / $size)em; } @mixin box_sizing ($type: border-box) { -webkit-box-sizing: $type; -moz-box-sizing: $type; box-sizing: $type; } @mixin rounded_corners($radius: 4px 4px 4px 4px) { -webkit-border-radius: $radius; -moz-border-radius: $radius; border-radius: $radius; } @mixin box_shadow($shadow: 0 2px 2px 0 #c2c2c2) { -moz-box-shadow: $shadow; -webkit-box-shadow: $shadow; box-shadow: $shadow; } @mixin gradient($from, $to) { background-color: $to; background-image: -moz-linear-gradient($from, $to); background-image: -webkit-gradient(linear, 0% 0%, 0% 100%, from($from), to($to)); background-image: -webkit-linear-gradient($from, $to); background-image: -o-linear-gradient($from, $to); } @mixin footer($background) { padding: $gutter_width $two_col $gutter_width $four_col; margin-bottom: 0; background: url($background) no-repeat scroll $one_col center #F7F7F7; } @mixin clearfix() { *zoom:1; &:before, &:after { content:""; display:table; } &:after { clear:both; } } // CSS3 colunms @mixin columns($num: 3, $gap: 20) { -moz-column-count: $num; -moz-column-gap: ($gap / $base)em; -webkit-column-count: $num; -webkit-column-gap: ($gap / $base)em; column-count: $num; column-gap: ($gap / $base)em; } @mixin background_size($size: 100% 100%) { -moz-background-size: $size; -webkit-background-size: $size; -o-background-size: $size; background-size: $size; } /* row mixins */ @mixin strip-dark() { background-color: #505050; color: $white; code { color: $light_grey; } } @mixin strip-black() { background-color: #0D0B0A; color: $white; code { color: $light_grey; } } @mixin strip-light { background-color: $white; } @mixin row-expand { padding: 0; .inner-wrapper { max-width: $page-maxwidth + $page-padding * 2; } } @mixin light-gradient($to) { background-image: linear-gradient(to $to, $light-gradient, transparent); } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/helpers/_variables.scss0000644000000000000000000000611213056115004025630 0ustar 00000000000000@charset "UTF-8"; /** * Ubuntu Patterns Stylesheet * * The CSS file required by Ubuntu patterns page * * @project Ubuntu Patterns * @author Web Team at Canonical Ltd * @copyright 2012 Canonical Ltd * * @see http://design.ubuntu.com */ $one_col: 6.38297%; $two_col: 14.89361%; $three_col: 23.40425%; $four_col: 31.91489%; $five_col: 40.42553%; $six_col: 48.93617%; $seven_col: 57.4468%; $eight_col: 65.95744%; $nine_col: 74.46808%; $ten_col: 82.97872%; $eleven-col: 91.48936%; /** * standard colors * * @colordef standard colors */ $header-bg: #000000; /* header background colour */ $nav_bg: #ffffff; /* navigation's bakcground colour */ $nav_link_color: #333333; /* navigation's link colour */ $nav_border_dark: #262626; /* navigation's dividing border colour */ $nav_border_light: #f2f2f4; /* navigation's lighter dividing border colour */ $nav_hover_bg: #d0d0d0; /* navigation's hover background colour */ $nav_active_bg: #dddddd; /* navigation's active background colour */ $ubuntu_orange: #dd4814; /* used for text links also on any site except canonical */ $light_orange: #fdf6f2; /* used as background on pre text */ $canonical_aubergine: #772953; $light_aubergine: #77216f; /* consumer */ $mid_aubergine: #5e2750; /* consumer & enterprise */ $dark_aubergine: #2c001e; /* enterprise */ $warm_grey: #888888; $mid_grey: #cdcdcd; $light_mid_grey: #d4d4d4; $cool_grey: #333333; $light_grey: #f7f7f7; $white: #ffffff; $black: #000000; $light-gradient: #221e1c; $coolish_grey: #b2b2b2; /* notifications */ $error: #df382c; /* red */ $warning: #eca918; /* yellow */ $success: #38b44a; /* green */ $information: #19b6ee; /* cyan */ /* colour coded status - from negative to positive (Icon: canonical circle) */ $status_red: #df382c; /* red, for status that require immediate attention */ $status_grey: #aea79f; /* grey, for disabled status or ones that don’t require attention */ $status_yellow: #efb73e; /* yellow, for status that require attention */ $status_blue: #19b6ee; /* blue, for status that don’t require action */ $status_green: #38b44a; /* green, for positive status */ /* misc colours */ $background_color: #eeeeee; $box_solid_grey: #efefef; $link_color: $ubuntu_orange; /* This is the global link color, mainly used for links in content */ /* grid variables */ $base: 16; $hd_base: 18; $gutter_width: 20px; $grid_gutter: 20px; $gutter: 2.12766%; $page-maxwidth: 1030px; $page-padding: 40px;maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/layout/_footer.scss0000644000000000000000000001250413056115004025033 0ustar 00000000000000 /* @section footer -------------------------------------------------------------- */ .footer-cta { background: #fff; padding-bottom: 20px; } .footer-wrapper.strip-light { background-color: #fff; } .solutions-cta { height: 60px; background-color: $ubuntu_orange; line-height: 60px; text-align: center; a { color: #fff; font-size: 1.25em; } } footer.global { background-color: white; box-shadow: none; padding-top: 0; .row { padding: 10px 0 0; } .two-col { width: 46%; float: left; display: inline-block; min-height: 200px; } h2 { padding-bottom: 0; color: $warm_grey; font-size: 16px; } nav .canonlist ul li, nav .additional-info ul li { min-height: 0; width: 48%; float: left; } ul.bullets li:after { line-height: 1; color: $warm_grey; content: "•"; vertical-align: middle; margin: 0 5px; } ul.inline li:last-child { width: auto; } a.external { background-image: url('../img/icons/external-link-dark.png'); } .top-link { margin-bottom: 10px; } a { color: $cool_grey; } a:hover { color: $ubuntu_orange; } .legal { background-image: none; } .legal.has-cookie { padding-bottom: 70px; } .inner-wrapper { overflow: visible; } a.link-cta-positive { width: auto; margin-top: 10px; padding-left: 20px; padding-right: 20px; color: #fff; font-size: 14px; } .section__title { background: none; cursor: default; } } .legal-inner { clear: both; overflow: hidden; float: left; width: 100%; padding: 20px 10px 0; margin: -3px -10px 0; } .social, .social--right { margin-left: 0; .social__item { display: inline; float: left; padding-right: 1em; } .social__google, .social__facebook, .social__twitter { background-image: url("../img/icons/icon-social.png"); display: block; width: 45px; height: 44px; &.social__twitter:hover { background-position: 0 -45px; } &.social__facebook { background-position: 90px 0; &:hover { background-position: 90px -45px; } } &.social__google { background-position: 135px 0; &:hover { background-position: 135px -45px; } } } } @media only screen and (min-width : 768px) { .social--right { float: right; } } #additional-info { border-bottom: 0; h2:before { background-image: url("../img/icons/external-link-grey.svg"), none; background-repeat: no-repeat; background-size: 14px 14px; content: ""; display: inline-block; height: 15px; margin-right: 3px; position: relative; top: 3px; width: 15px; } div li { border-left: 1px solid #d4d7d4; box-sizing: border-box; display: block; float: left; margin: 0; padding: 0; width: 50%; a { border-bottom: 1px solid #d4d7d4; box-sizing: border-box; color: #333333; display: block; float: left; margin: 0; overflow: hidden; padding: 8px 10px; text-align: left; white-space: normal; width: 100%; } } .section__title { border-bottom: 1px solid #d4d7d4; background-position: 100% .1em; } } html.opera-mini footer #nav-global h2:before, html.opera-mini footer #additional-info h2:before, html.no-svg footer #nav-global h2:before, html.no-svg footer #additional-info h2:before { background-image: url('../img/icons/external-link-grey.png'); } @media only screen and (min-width : 769px) { .footer-wrapper.strip-light { white-space:nowrap; } footer.global { padding-top: 40px; padding-bottom: 40px; .two-col { width: 14.89361%; display: inline-block; min-height: 0; } .section { min-height: 160px; margin-right: 40px; padding-bottom: 0; border-right: 1px dotted #aaa; border-bottom: 0; &:last-child { margin-right: 0; border-right: 0; } } li { display: inline; float: left; } ul.no-bullets li { a { font-size: 16px; &:hover { color: $ubuntu_orange; } } border-right: 1px dotted #aaa; padding-right: 15px; padding-left: 15px; &:last-child { border-right: none; } &:first-child { padding-left: 0px; } } } #additional-info { .section__title { border-bottom: 0; } div li, div a:link { width: 100%; border: 0; } } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/layout/_grid.scss0000644000000000000000000000176513056115004024471 0ustar 00000000000000body { background-repeat: repeat; } .row { border: 0; background-color: rgba(255,255,255,0.6); } @media only screen and (min-width : 769px) { .append-one { margin-right: 10.6%; } .row { padding: 50px $page-padding 30px; } } .inner-wrapper { @include clearfix(); @include rounded_corners(0); @include box_shadow(none); background-color: transparent; background-image: none; margin: 0 auto; padding-bottom: 0; float: none; } .wrapper { position: static; background: transparent; width: 100%; overflow: hidden; } img.touch-border { margin-bottom: -50px; } .inner-wrapper, footer.global .legal { max-width: $page-maxwidth; width: auto; padding-left: 0; padding-right: 0; } .touch-top { margin-top: -50px; } @media only screen and (max-width : $page-maxwidth) { .inner-wrapper, footer.global .legal { padding-left: 8px; padding-right: 8px; width: auto; } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/cloud/layout/_header.scss0000644000000000000000000001471213056115004024770 0ustar 00000000000000 /* @section header -------------------------------------------------------------- */ header.banner { background: $header-bg; -moz-box-shadow: inset 0 2px 2px -2px darken($header-bg, 7%); -webkit-box-shadow: inset 0 2px 2px -2px darken($header-bg, 7%); box-shadow: inset 0 2px 2px -2px darken($header-bg, 7%); margin-bottom: 0; .nav-primary { @include box_shadow(none); @include clearfix; } nav.nav-primary { border-bottom: 1px solid $nav_border_dark; overflow: visible; .user-nav { float: right; margin-right: 20px; } .user-dropdown { &:hover ul:after { display: none; } .menu-link { img { margin-right: 10px; &.hover { display: none; } &.normal { display: inline-block; } } } .open .menu-link, .menu-link:hover { img.hover { display: inline-block; } img.normal { display: none; } } ul { @include rounded_corners(0 0 4px 4px); margin-top: -2px; background-color: #fff; border-width: 0; a:hover { background-color: transparent; } } } /* * These styles need a higher level of specificity that can be provided * in the dropdown CSS. */ #user-dropdown .dropdown ul { width: auto; } } .nav-primary.nav-right .logo-ubuntu { @include background_size(73px 30px); background-image: url(../img/logos/logo.svg); background-position: 20px; background-repeat: no-repeat; min-width: 120px; padding-top: 6px; margin-left: 0; } } body.no-svg { header.banner .nav-primary.nav-right .logo-ubuntu { background-image: url(../img/logos/logo.png); } } // Hide the default browser search clear button. input[type=text]::-ms-reveal, input[type=text]::-ms-clear { display: none; width : 0; height: 0; } input[type="search"]::-webkit-search-decoration, input[type="search"]::-webkit-search-cancel-button, input[type="search"]::-webkit-search-results-button, input[type="search"]::-webkit-search-results-decoration { display: none; } .contextual-bar { overflow: hidden; background-color: $nav_bg; border-bottom: 1px solid $light_mid_grey; } form.search-form { overflow: hidden; float: right; width: 100%; position: relative; input { @include rounded_corners(0); @include box_sizing(); border: 0; border-left: 0; margin: 0; width: 100%; height: 50px; float: left; font-size: 1em; padding-top: 0; padding-bottom: 0; padding-right: 30px; background-color: transparent; -webkit-appearance: none; } input:focus { border-color: $ubuntu_orange; } button[type=submit], button[type=submit]:hover { position: absolute; top: 10px; right: 10px; display: block; height: 30px; width: 30px; padding: 0; line-height: 0; -webkit-appearance: none; background: transparent; } button img { height: 16px; } } .contextual-nav { border: 0; display: block; margin: 0; padding-left: 10px; background-color: transparent; overflow: hidden; float: left; li, li:last-child { font-size: 0.875em; float: left; list-style-type: none; margin: 0; margin-left: 5px; } li a:link, li a:visited, .contextual-nav__label { display: block; color: $cool_grey; font-weight: 300; text-align: center; // bottom padding is adjusted to account for bottom border. padding: 16px 10px 10px 10px; border-bottom: 3px solid transparent; } .contextual-nav__label { color: $mid_grey; } li a:hover { border-bottom-color: $ubuntu_orange; text-decoration: none; color: $ubuntu_orange; } li a.active { border-bottom: 3px solid $ubuntu_orange; } } .opera-mini header.banner .logo-ubuntu, .no-svg header.banner .logo-ubuntu { background-image: url(../img/logos/logo.png); } @media only screen and (min-width : 769px) { header.banner .nav-primary ul li, header.banner .nav-primary ul li:last-child { border-bottom: 0; width: auto; } header.banner nav.nav-primary li a:link, header.banner nav.nav-primary li a:visited { border-left: 1px solid $nav_border_dark; font-weight: 400; } header.banner nav.nav-primary ul li a.active { // Adjust the padding to account for the active border. padding-bottom: 10px; background-color: #0e0c0b; border-bottom: 3px solid $ubuntu_orange; border-left: 1px solid $nav_border_dark; } header.banner nav.nav-primary ul li { border-left: 1px solid $nav_border_dark; a:hover { background-color: $ubuntu_orange; } } header.banner nav.nav-primary ul { background-color: transparent; border-right: 1px solid $nav_border_dark; display: block; } header.banner nav.nav-primary ul li:last-child { border-left: 1px solid $nav_border_dark; border-right: 0; } header.banner .nav-primary ul li a:active, header.banner .nav-primary ul li a:hover, header.banner .nav-primary ul li a:visited, header.banner nav.nav-primary ul li a:link { border-left: 0; } header.banner .nav-primary ul li a.external:hover { background-image: url("../img/icons/external-link-grey.png"); } form.search-form { width: 325px; input { border-left: 1px solid $light_mid_grey; margin: 0 20px; width: 250px; font-size: 0.875em; } } } @media only screen and (min-width : 1030px) { header.banner { height: 48px; overflow: hidden; .nav-primary { width: 100%; } } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/_mixins.scss0000644000000000000000000000254113056115004023342 0ustar 00000000000000//// /// MAAS Mixins /// /// @project MAAS /// @author Web Team at Canonical Ltd /// @copyright 2015 Canonical Ltd /// //// /// // String Replace /// @function str-replace($string, $search, $replace: "") { $index: str-index($string, $search); @if $index { @return str-slice($string, 1, $index - 1) + $replace + str-replace(str-slice($string, $index + str-length($search)), $search, $replace); } @return $string; } /// // Font Face /// @mixin font-face($name, $path, $weight: null, $style: null, $exts: eot woff2 woff ttf svg) { $src: null; $extmods: ( eot: "?", svg: "#" + str-replace($name, " ", "_") ); $formats: ( otf: "opentype", ttf: "truetype" ); @each $ext in $exts { $extmod: if(map-has-key($extmods, $ext), $ext + map-get($extmods, $ext), $ext); $format: if(map-has-key($formats, $ext), map-get($formats, $ext), $ext); $src: append($src, url(quote($path + "." + $extmod)) format(quote($format)), comma); } @font-face { font-family: quote($name); font-style: $style; font-weight: $weight; src: $src; } } @mixin user-select { -webkit-touch-callout: none; -webkit-user-select: none; -khtml-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/base/0000755000000000000000000000000013056115004021707 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/components/0000755000000000000000000000000013056115004023162 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/layout/0000755000000000000000000000000013056115004022312 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/pages/0000755000000000000000000000000013056115004022074 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/styles.scss0000644000000000000000000000136013056115004023215 0ustar 00000000000000@charset 'UTF-8'; // import required files @import "mixins"; @import "base/base"; @import "base/fonts"; @import "components/accordion"; @import "components/button"; @import "components/flashmessages"; @import "components/forms"; @import "components/icons"; @import "components/lists"; @import "components/ng-tags-input"; @import "components/pagination"; @import "components/search"; @import "components/spinner"; @import "components/tables"; @import "components/typography"; @import "components/yui-modules"; @import "components/placeholders"; @import "layout/footers"; @import "layout/grids"; @import "layout/headers"; @import "pages/accounts"; @import "pages/dashboard"; @import "pages/images"; @import "pages/networks"; @import "pages/nodes"; maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/base/_base.scss0000644000000000000000000000174213056115004023661 0ustar 00000000000000@charset "UTF-8"; /** * MAAS base styles * * @project MAAS * @author Web Team at Canonical Ltd * @copyright 2015 Canonical Ltd * */ /** * Dependencies * * Importing mixin file from Ubuntu guidelines: core-mixins.scss */ /** * General * * General element styles * * @section general */ * { -webkit-backface-visibility: hidden; margin: 0; } html { height: 100%; } body { height: 100%; font-size: 1.0em; font-family: 'Ubuntu', Arial, 'libra sans', sans-serif; font-weight: 300; } hr { border: none; background: #B2B2B2; width: 100%; height: 1px; display: block; width: 100%; float: left; margin-bottom: 20px; } ul, ol { margin-left: 0; } a.alt { color: #333; &:hover { color: $ubuntu-orange; } } a:active, a:focus { outline: none; } ::selection { color: #FFF; background: $ubuntu-orange; } ::-moz-selection { color: #FFF; background: $ubuntu-orange; } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/base/_fonts.scss0000644000000000000000000000165113056115004024077 0ustar 00000000000000@include font-face('Ubuntu', '../fonts/ubuntu-l-webfont', 300, normal, $exts: eot woff ttf svg); @include font-face('Ubuntu', '../fonts/ubuntu-li-webfont', 300, italic); @include font-face('Ubuntu', '../fonts/ubuntu-r-webfont', 400, normal, $exts: eot woff ttf svg); @include font-face('Ubuntu', '../fonts/ubuntu-m-webfont', 500, normal, $exts: eot woff ttf svg); @include font-face('Ubuntu', '../fonts/ubuntu-mi-webfont', 500, italic); @include font-face('Ubuntu', '../fonts/ubuntu-b-webfont', 700, normal); @font-face { font-family: 'Ubuntu'; font-style: italic; font-weight: 400; src: url('https://themes.googleusercontent.com/static/fonts/ubuntu/v5/GZMdC02DTXXx8AdUvU2etw.woff') format('woff'); } @font-face { font-family: 'Ubuntu'; font-style: italic; font-weight: 700; src: url('https://themes.googleusercontent.com/static/fonts/ubuntu/v5/pqisLQoeO9YTDCNnlQ9bfz8E0i7KZn-EPnyo3HZu7kw.woff') format('woff'); } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/components/_accordion.scss0000644000000000000000000000564013056115004026164 0ustar 00000000000000@charset "UTF-8"; //// /// MAAS accordion styles /// /// @project MAAS /// @author Web Team at Canonical Ltd /// @copyright 2015 Canonical Ltd /// //// .accordion { @include box-sizing(); @include rounded-corners(2px); list-style: none; background:#FFF; box-shadow: 0 1px 1px rgba(0, 0, 0, .1); margin-bottom: 40px; .disabled & { opacity: .5; pointer-events: none; } // accordion main sidebar title .accordion__title { border-bottom: 1px dotted #B2B2B2; padding: 13px 20px 12px; margin: 0; font-size: 1.3em; } // accordion data block, contains all filter links and controls. .accordion__tab { border-bottom: 1px dotted #B2B2B2; &:last-of-type { border: none; } // Block level title .accordion__tab-title { padding: 12px 20px; margin: 0; color: #888; cursor: pointer; background: transparent url('../img/icons/accordion-open.svg') top 20px right 20px no-repeat; &.active { background-image: url('../img/icons/accordion-close.svg'); + .accordion__tab-content { max-height: 3000px; transition: max-height .5s ease-in; } } } // Filter list .accordion__tab-content { max-height: 0; transition: max-height .5s ease-out; overflow: hidden; .accordion__tab-list { list-style-type: none; padding: 0 20px 14px; margin: 0; .accordion__tab-item { margin-bottom: 0.15em; .accordion__tab-link { @include box-sizing(); color: #333; width: 100%; display: inline-block; padding-right: 20px; &:hover { color: $ubuntu-orange; text-decoration: none; } .disabled & { color: #333; } } &.active { font-weight: 400; .accordion__tab-link { background: transparent url('../img/icons/cross.svg') top 7px right 0px no-repeat; } &:hover { color: $ubuntu-orange; .accordion__tab-link { color: $ubuntu-orange; background-image: url('../img/icons/cross-orange.svg'); } } } } } } } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/components/_button.scss0000644000000000000000000001447513056115004025544 0ustar 00000000000000@charset 'UTF-8'; /* ------------------------------------------------------------------- ---------------------------------------------------------------------- MAAS button styles @project MAAS @author Web Team at Canonical Ltd @copyright 2015 Canonical Ltd ---------------------------------------------------------------------- ------------------------------------------------------------------- */ /* INTRODUCTION ------------------------------------------------------ ---------------------------------------------------------------------- 1. VARIABLES & PLACEHOLDERS 2. BUTTON -- Form and link cta button styles 3. BUTTON GROUP -- Button group styles ---------------------------------------------------------------------- ------------------------------------------------------------------- */ /* 1. VARIABLES & PLACEHOLDERS --------------------------------------- --------------------------------------------------------------------*/ /** * General call to action styling and hover state placehlder. */ %cta-link { @include box-sizing(); @include rounded-corners(3px 0px 0px 3px); display: inline-block; padding: 10px 14px; text-align: center; color: $white; background-color: $ubuntu-orange; &:hover { cursor: pointer; text-decoration: none; background-color: darken($ubuntu-orange, 6.2%); } } %secondary-cta { color: $ubuntu-orange; border: 1px solid #b2b2b2; background-color: #FFF; line-height: 1; &:hover { cursor: pointer; background-color: #F2F2F2; } } /* 2. BUTTON --------------------------------------------------------- --------------------------------------------------------------------*/ a.link-cta-ubuntu, button.cta-ubuntu, input[type='submit'], form button[type='submit'], form input[type='submit'] { font-size: 1em; border: none; max-height: 37px; &[disabled], &.disabled { cursor: default; opacity: .5; } &.clear { background: none; color: #333; } &.secondary { color: $ubuntu-orange; border: 1px solid #b2b2b2; background: #FFF; &.external { background-image: url('../img/external-link-black.svg'); background-size: 16px 16px; background-repeat: no-repeat; background-position: top 8px right 8px; } &:hover { background-color: #F2F2F2; cursor: pointer; } &[disabled], &.disabled { cursor: default; color: lighten($ubuntu-orange, 30%); border: 1px solid #ddd; background: #FFF; opacity: 1; &:hover { background: #FFF; } } } &.text-button { background-color: transparent; color: #333; &:hover { text-decoration: underline; } } &.full { display: block; width: 100%; } @media screen and (max-width: 768px) { margin-bottom: 20px; } } a.link-cta-ubuntu { line-height: 20px; } /* 3. BUTTON GROUP --------------------------------------------------- --------------------------------------------------------------------*/ /** * Button group to trigger a dropdown and to change any active * button state to a different option where required. */ .cta-group { float: left; width: auto; clear: both; position: relative; overflow: hidden; /** * cta link style and dropdown toggle */ .cta-group__link { @extend %cta-link; @include rounded-corners(3px); max-height: 36px; padding-right: 49px; width: 100%; line-height: 1.2; position: relative; margin: 0; text-align: left; &:after { @include rounded-corners(0px 3px 3px 0px); content: ''; display: block; height: 36px; width: 34px; background: red; position: absolute; top: 0; right: 0; background-image: url('../img/chevron-white.svg'); background-color: $ubuntu-orange; background-repeat: no-repeat; background-position: center; } &:hover { background-color: $ubuntu-orange; &:after { background-color: darken($ubuntu-orange, 6.2%); } } } /** * cta group dropdown styles */ .cta-group__dropdown { @include box-sizing(); @include rounded-corners(3px); right: 0; list-style: none; background: #fff; box-shadow: 0 1px 1px rgba(0, 0, 0, .1); z-index: 20; max-height: 1000px; transition: max-height 0.3s ease-in; overflow: hidden; position: relative; clear: both; &.ng-hide { display: block !important; max-height: 0; overflow: hidden; transition: max-height 0.3s ease-out; } .cta-group__item { @include box-sizing(); float: left; clear: both; padding: 5px 10px; margin: 0; a { color: #333; cursor: pointer; width: 100%; float: left; margin: 0; &:hover { color: $ubuntu-orange; text-decoration: none; } } } } /** * cta group secondary style */ &.secondary { .cta-group__link { @extend %secondary-cta; float: left; max-height: 36px; width: 100%; &:after { @include box-sizing(); background-image: url('../img/icons/accordion-open.svg'); background-repeat: no-repeat; background-color: #fff; border: 1px solid #b2b2b2; border-left: none; top: -1px; right: -1px; } &:hover { background-color: #FFF; &:after { background-color: #F2F2F2; } } } } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/components/_flashmessages.scss0000644000000000000000000000254713056115004027053 0ustar 00000000000000@charset "UTF-8"; /** * MAAS flash messages * * @project MAAS * @author Web Team at Canonical Ltd * @copyright 2015 Canonical Ltd * */ .flash-messages { margin: 0px auto; padding: 0; max-width: 1440px; @media screen and (max-width: 1030px) { margin: 0px 10px 20px; } .flash-messages__item { @include box-sizing(); @include rounded-corners(2px); list-style: none; padding: 15px 20px 15px 45px; margin: 0; font-weight: 400; font-size: 0.875em; background:#FFF; background-position: top 50% left 15px; background-repeat: no-repeat; margin: 0 0 20px; box-shadow: 0 1px 1px rgba(0, 0, 0, .1); &.info { background-image: url('../img/icons/info.png'); background-image: url('../img/icons/info.svg'), none; } &.success { background-image: url('../img/icons/success.png'); background-image: url('../img/icons/success.svg'), none; } &.warning { background-image: url('../img/icons/warning.png'); background-image: url('../img/icons/warning.svg'), none; } &.error { background-image: url('../img/icons/error.png'); background-image: url('../img/icons/error.svg'), none; } } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/components/_forms.scss0000644000000000000000000001773213056115004025356 0ustar 00000000000000@charset 'UTF-8'; /* ------------------------------------------------------------------- ---------------------------------------------------------------------- MAAS form styles @project MAAS @author Web Team at Canonical Ltd @copyright 2015 Canonical Ltd ---------------------------------------------------------------------- ------------------------------------------------------------------- */ /* INTRODUCTION ------------------------------------------------------ ---------------------------------------------------------------------- 1. VARIABLES & PLACEHOLDERS 2. FORM 3. FEEDBACK STYLES 4. INLINE ELEMENTS 5. INLINE FORM ---------------------------------------------------------------------- ------------------------------------------------------------------- */ /* 1. VARIABLES & PLACEHOLDERS -------------------------------------- --------------------------------------------------------------------*/ %input--styles { @include rounded-corners(2px); @include box-sizing(); -webkit-appearance: none; background: #FFF; color: #333; border: 1px solid #D2D2D2; display: block; font-family: Ubuntu,Arial,"libra sans",sans-serif; font-size: 1em; font-weight: 300; padding: 0px 13px; &:hover { } &:active { border-color: #B2B2B2; outline: none; } &:focus { border-color: #B2B2B2; outline: none; } &.invalid, &.ng-invalid { border-color: #d90000; } &[disabled="disabled"] { -webkit-text-fill-color: #333; border-color: #D2D2D2; background-color: transparent; } } label { position: relative; .disabled & { cursor: default; } } form { li { &.help-msg { margin-bottom: 1em; .help { color: #888; font-size: 0.875em; } } } label span { color: #333; } input { &[type='text'], &[type='number'], &[type='search'], &[type='password'], &[type='email'], &[type='url'] { @extend %input--styles; padding: 7px 10px; &::-webkit-input-placeholder { color: #888; } &:-moz-placeholder { /* Firefox 18- */ color: #888; } &::-moz-placeholder { /* Firefox 19+ */ color: #888; } &:-ms-input-placeholder { color: #888; } } &[type='number'] { padding-right: 15px; } &[type='search'] { -webkit-appearance: textfield; } &[type='search']::-webkit-search-decoration, &[type='search']::-webkit-search-cancel-button { -webkit-appearance: none; } &[type='radio'], &[type='image'] { display: inline-block; margin-right: 10px; } } textarea { @extend %input--styles; overflow: auto; height: auto; min-height: 175px; padding: 7px 10px; vertical-align: top; } select { @extend %input--styles; display: block; clear: both; cursor: pointer; margin: 0; background-image: url('../img/icons/accordion-open.svg'); background-repeat: no-repeat; background-position: top 16px right 10px; padding: 6px 30px 6px 10px; /* Removes firefox select styles & arrow */ -moz-appearance: none; text-indent: 0.01px; text-overflow: ''; &[multiple], &[size] { height: auto; background-image: none; padding-top: 10px; } /* Removes firefox dotted outline focusing */ &:-moz-focusring { color: transparent; text-shadow: 0 0 0 #000; } &[disabled] { color: #888; background-image: none; } &::-ms-expand { display: none; } } fieldset { background: none; margin-left: 0; padding: 0; } } .checkbox { visibility: hidden; width: 0; height: 0; margin-right: 0; position: absolute; & + .checkbox-label { @include user-select(); padding-left: 20px; position: relative; &:before { content: ""; display: inline-block; width: 20px; height: 20px; position: absolute; top: 0; left: 0; background: url('../img/checkbox.svg') no-repeat; background-size: 13px 13px; background-position: 0px 3px; } } &:checked + .checkbox-label:before { background: url('../img/checkbox-checked.svg') no-repeat; background-position: 0px 3px; } &[disabled="disabled"] + .checkbox-label:before { cursor: default; opacity: 0.5; } } /* 3. FEEDBACK STYLES ------------------------------------------------ --------------------------------------------------------------------*/ .field-error, .errors { color: #DF382C; .errorlist { margin: 0; li { margin: 0 0 14px 0; } } } /* 4. INLINE ELEMENTS ------------------------------------------------ --------------------------------------------------------------------*/ .inline { display: inline-block; width: 100%; font-size: 0; margin-bottom: 10px; &.error { background-color: #fdf5f5; box-shadow: 0px 0px 0px 5px #fdf5f5; .ng-invalid { border-color: #D2D2D2; } } &:last-of-type { margin-bottom: 0; } label { display: inline-block; float: none; /** * Required to fix the inline block invisible margin */ font-size: 16px; margin: 0; } input[type='submit'], input[type='text'], input[type='number'], input[type='search'], input[type='password'], input[type='email'], input[type='checkbox'], select { display: inline-block; clear: none; margin: 0; float: none; /** * Required to fix the inline block invisible margin */ font-size: 16px; &:invalid { -moz-box-shadow: none; } &:-moz-submit-invalid { box-shadow: none; } &:-moz-ui-invalid { box-shadow:none; } } div { float: none; margin: 0; } input.cta-ubuntu, a.link-cta-ubuntu, button.cta-ubuntu { /** * Required to fix the inline block invisible margin */ font-size: 16px; } .icon { position: absolute; top: 11px; right: 10px; cursor: pointer; } .error-message { font-size: 12px; color: #e85232; margin-top: 10px; margin-bottom: 10px; font-weight: normal; } } /* 1. INLINE FORM ---------------------------------------------------- --------------------------------------------------------------------*/ .form-inline { label, button, input[type='submit'], input[type='text'], input[type='number'], input[type='search'], input[type='password'], input[type='email'], input[type='checkbox'], select { display: inline-block; width: auto; vertical-align: middle; margin-bottom: 0; } input, input[type='submit'] input[type='text'], input[type='number'], input[type='search'], input[type='password'], input[type='email'], input[type='checkbox'], select { margin-left: 20px; } fieldset { width: auto; display: inline-block; margin: 0 40px 0 0; } } .controls { position: absolute; top: 0; right: 20px; a, button { margin-left: 20px; } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/components/_icons.scss0000644000000000000000000000102713056115004025331 0ustar 00000000000000//// /// MAAS icons and symbols /// /// @project MAAS /// @author Web Team at Canonical Ltd /// @copyright 2015 Canonical Ltd /// //// .icon { background-size: 16px 16px; width: 16px; height: 16px; padding: 0; display: inline-block; &.icon__loading { background: url('../img/in_progress.png') no-repeat; -webkit-animation: spin 1s infinite linear; -moz-animation: spin 1s infinite linear; animation: spin 1s infinite linear; } } a.icon { cursor: pointer; } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/components/_lists.scss0000644000000000000000000000057713056115004025365 0ustar 00000000000000/** * MAAS list styles * * @project MAAS * @author Web Team at Canonical Ltd * @copyright 2015 Canonical Ltd * */ dl { dt { clear: left; } dd { color: #888; margin-left: 0; } dt, dd { display: inline-block; float: left; line-height: 37px; margin-bottom: 10px !important; } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/components/_ng-tags-input.scss0000644000000000000000000000446413056115004026723 0ustar 00000000000000//// /// MAAS ngTagsInput Styles /// /// @project MAAS /// @author Web Team at Canonical Ltd /// @copyright 2015 Canonical Ltd /// //// tags-input { outline: none; .host:focus { outline: none; } .tags { &:focus, &.focused { outline: none; } .tag-list { margin: 4px 0 0; padding: 0; list-style-type: none; width: 100%; float: left; } .tag-item { display: inline-block; float: left; font-family: Ubuntu,Arial,"libra sans",sans-serif; font-size: 1em; font-weight: 300; height: 30px; line-height: 30px; cursor: default; color: #000; padding-right: 15px; position: relative; margin: 0 11px 0 0; .remove-button { display: inline-block; width: 12px; height: 12px; text-indent: -999em; background: url('../img/icons/cross.svg') no-repeat; background-size: 12px 12px; position: absolute; right: 0; top: 9px; cursor: pointer; &:hover { text-decoration: none; } &:active { } } } .input { @extend %input--styles; padding: 7px 10px; width: 100% !important; float: left; position: relative !important; left: 0; &.invalid-tag { } &::-ms-clear { display: none; } } } .autocomplete { float: left; width: 100%; .suggestion-list { background: #FFF; padding: 10px 8px; border: 1px solid #D2D2D2; border-top: 0; border-radius: 0 0 2px 2px; li { &:hover { background: #EEE; cursor: pointer; } } } } &.ng-invalid .tags { } &[disabled] { .host:focus { outline: none; } .tags { cursor: default; .tag-item { .remove-button { cursor: default; &:active { } } } .input { cursor: default; } } } } .tag-link { margin-right: 10px; &:last-of-type { &:after { content: ''; } } } .table__data { tags-input { .tags { margin-top: -10px; input { margin-left: 0; width: 50% !important; float: left; } } } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/components/_pagination.scss0000644000000000000000000000047113056115004026351 0ustar 00000000000000@charset "UTF-8"; /** * MAAS pagination styles * * @project MAAS * @author Web Team at Canonical Ltd * @copyright 2015 Canonical Ltd * */ .pagination { margin: 10px 0; text-align: center; .inactive { color: #AEA79F; } a, span { margin: 0 5px; } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/components/_placeholders.scss0000644000000000000000000001235013056115004026664 0ustar 00000000000000@charset "UTF-8"; /** * MAAS placeholder styles * * @project MAAS * @author Web Team at Canonical Ltd * @copyright 2015 Canonical Ltd * */ .icon { margin-left: 5px; width: 16px; height: 16px; display: inline-block; text-indent: 999em; background-repeat: no-repeat; background-size: 16px 16px; vertical-align: middle; &.info { background-image: url('../img/icons/info.png'); background-image: url('../img/icons/info.svg'), none; } &.edit { background-image: url('../img/icons/edit.png'); background-image: url('../img/icons/edit.svg'), none; } &.delete { background-image: url('../img/icons/delete.png'); background-image: url('../img/icons/delete.svg'), none; } &.remove { background-image: url('../img/icons/filter-remove.svg'), none; } &.warning { background-image: url('../img/icons/warning.png'); background-image: url('../img/icons/warning.svg'), none; } &.debug { background-image: url('../img/icons/debug.png'); background-image: url('../img/icons/debug.svg'), none; } &.success, &.tick { background-image: url('../img/icons/success.png'); background-image: url('../img/icons/success.svg'), none; } &.error { background-image: url('../img/icons/error.png'); background-image: url('../img/icons/error.svg'), none; } &.partition { background-image: url('../img/icons/partition.svg'); } &.add { background-image: url('../img/icons/add.svg'); } &.tags { background-image: url('../img/icons/tags.svg'); } &.mount { background-image: url('../img/icons/mount.svg'); } &.unmount { background-image: url('../img/icons/unmount.svg'); } } .clear { clear: both; } .hidden { display: none; } .align-right { text-align: right; } .align-center { text-align: center; } .align-left { text-align: left; } .right { float: right !important; } .left { float: left !important; } .border { border-top: 1px dotted #B2B2B2; &.bottom { border-bottom: 1px dotted #B2B2B2; } &.solid { border-style: solid; } } %vertical-align, .vertical-align { position: relative; top: 50%; -webkit-transform: translateY(-50%); -ms-transform: translateY(-50%); transform: translateY(-50%); } %margin-top, .margin-top { margin-top: 20px; &--five { margin-top: 5px; } &--ten { margin-top: 10px; } } %margin-rght, .margin-right { margin-right: 20px !important; &--ten { margin-right: 10px !important; } } %margin-bottom, .margin-bottom { margin-bottom: 20px; } %margin-left, .margin-left { margin-left: 20px !important; &--ten { margin-left: 10px !important; } &--thirty { margin-left: 30px !important; } } %padding-top, .padding-top { padding-top: 20px; &--ten { padding-top: 10px !important; } } %padding-right, .padding-right { padding-right: 20px; } %padding-bottom, .padding-bottom { padding-bottom: 20px !important; &--ten { padding-bottom: 10px !important; } } %padding-left, .padding-left { padding-left: 20px !important; &--30, &--thirty { padding-left: 30px !important; } &--35 { padding-left: 35px !important; } &--45 { padding-left: 45px !important; } &--50 { padding-left: 50px !important; } } %border-top, .border-top { border-top: 1px dotted #888; } %border-bottom, .border-bottom { border-bottom: 1px dotted #888; } .no-margin { margin: 0; } .no-margin-top { margin-top: 0 !important; } .no-padding { padding: 0; } .no-padding-top { padding-top: 0 !important; } .no-padding-left { padding-left: 0; } .no-padding-bottom { padding-bottom: 0 !important; } .no-margin-bottom { margin-bottom: 0; } .width { &--auto { width: auto; } &--half { width: 50%; } &--full { width: 100%; } } .tooltip { position: relative; &::before { content: attr(data-tooltip) ; font-size: 13px; font-weight: 400; position:absolute; z-index: 999; white-space:nowrap; left: 50%; -webkit-transform: translateX(-50%); -ms-transform: translateX(-50%); transform: translateX(-50%); background:#333; color:#FFF; padding: 10px; text-indent: 0; opacity: 0; transition:opacity 0.4s ease-out; border-radius: 5px; box-shadow: 0px 1px 3px 0 rgba(51,51,51,0.2); top: -9999em; } &::after { position: absolute; left: 50%; -webkit-transform: translateX(-50%); -ms-transform: translateX(-50%); transform: translateX(-50%); content: ''; width: 0; height: 0; border-left: 5px solid transparent; border-right: 5px solid transparent; z-index: 1000; transition:opacity 0.4s ease-out; border-top: 5px solid #333; opacity: 0; top: -9999em; } &:hover::before { opacity: 1 !important; top: -47px; } &:hover::after { opacity: 1 !important; top: -10px; } } .link-cta-ubuntu, .cta-ubuntu { &.tooltip { &:hover::before { top: -50px; } } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/components/_search.scss0000644000000000000000000000373613056115004025474 0ustar 00000000000000@charset "UTF-8"; //// /// MAAS search styles /// /// @project MAAS /// @author Web Team at Canonical Ltd /// @copyright 2015 Canonical Ltd /// //// .search { position: relative; padding-bottom: 20px; input[type='search'] { -webkit-appearance: textfield; } .search__input { @include box-sizing(); @include rounded-corners(4px); list-style: none; background:#FFF; box-shadow: 0 1px 1px rgba(0, 0, 0, .1); width: 100%; border: none; padding: 13px 20px; font-size: 1.25em; &::-webkit-input-placeholder { color: #000 !important; } &:-moz-placeholder { /* Firefox 18- */ color: #000 !important; } &::-moz-placeholder { /* Firefox 19+ */ color: #000 !important; } &:-ms-input-placeholder { color: #000 !important; } &[disabled="disabled"] { background-color: #fff; opacity: .5; pointer-events: none; & + .search__submit.close { pointer-events: none; opacity: .5; } } } .search__submit { position: absolute; top: 15px; right: 25px; background-color: transparent; background-image: url('../img/search-icon.svg'); background-repeat: no-repeat; text-indent: -999em; display: block; width: 21px; height: 20px; overflow: hidden; outline: none; padding: 0; border: none; &:hover { background-color: transparent; background-image: url('../img/search-icon.svg'); } &.close { background-image: url('../img/icons/cross.svg'); background-size: 21px; margin-top: 2px; &:hover { background-image: url('../img/icons/cross.svg'); } } } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/components/_spinner.scss0000644000000000000000000000170113056115004025673 0ustar 00000000000000@charset "UTF-8"; /** * MAAS spinner styles * * @project MAAS * @author Web Team at Canonical Ltd * @copyright 2015 Canonical Ltd * */ .spinner-col { width: 10px; } .spinner { float: left; margin: 0 auto; text-indent: -9999em; &.spin { background: url('../img/in_progress.png') no-repeat; background-size: 16px 16px; width: 16px; height: 16px; -webkit-animation: spin 1s infinite linear; -moz-animation: spin 1s infinite linear; animation: spin 1s infinite linear; padding: 0; } } @-webkit-keyframes spin { 0% { -webkit-transform: rotate(0deg); transform: rotate(0deg); } 100% { -webkit-transform: rotate(360deg); transform: rotate(360deg); } } @keyframes spin { 0% { -webkit-transform: rotate(0deg); transform: rotate(0deg); } 100% { -webkit-transform: rotate(360deg); transform: rotate(360deg); } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/components/_tables.scss0000644000000000000000000003061013056115004025470 0ustar 00000000000000@charset 'UTF-8'; /** * MAAS table styles * * @project MAAS * @author Web Team at Canonical Ltd * @copyright 2015 Canonical Ltd * */ table { text-align: left; width: 100%; margin-bottom: 40px; border-collapse: separate; border-spacing: 0; font-size: 16px; th, td { background: transparent; text-align: left; padding: 13px 10px; .checkbox + .checkbox-label::before { top: -2px; } } thead { th { background: transparent; color: #888; border: none; border-bottom: 1px solid #B2B2B2; } } tbody { tr { &:hover .icon-controls a { opacity: 1; } } td { border: none; border-bottom: 1px dotted #B2B2B2; position: relative; &.icon-controls { text-align: right; a { opacity: 0; } } input[type="text"] { position: absolute; left: 10px; top: 6px; width: 80%; } } } } .table-listing { text-align: left; width: 100%; margin: 0 0 40px 0; border-collapse: separate; border-spacing: 0; .table-listing__row { &:hover .table-listing__cell { background-color: #FFF; } &.error { background-color: #F3E3E2; .status { color: #DD3832; } } &.progress { background-color: rgba(25, 182, 238, 0.10); } &.selected { background-color: #FFF; } } .table-listing__header { background: transparent; color: #888; border: none; border-bottom: 1px solid #B2B2B2; .table-listing__header-link { background-repeat: no-repeat; background-position: top 8px right 0; color: #888; &:hover { color: #333; text-decoration: none; border-bottom: 1px solid #333; } &:focus, &:active, &:visited { text-decoration: none; } &.active { color: #333; } &.sort { border-bottom: 1px solid #333; } } .divide { width: 1px; display: inline-block; background: #D2D2D2 none repeat scroll 0% 0%; height: 10px; padding: 0px; margin: 0px 5px; } } .table-listing__cell { @include box-sizing(); border: none; border-bottom: 1px dotted #B2B2B2; position: relative; &.icon-controls { text-align: right; opacity: 0; } input, select { position: absolute; left: 10px; top: 5px; width: 80%; } } .fixed { max-width: 200px; min-width: 200px; width: 200px; padding-left: 0; .table-listing__header & { padding-left: 0; } } &.no-hover { .table-listing__row:hover { .table-listing__cell, td { background-color: transparent; } } } } /* TABLE GRID -------------------------------------------------------- --------------------------------------------------------------------*/ @mixin table-columns($table-columns) { $table-list: null; $max-width: 1440px; @for $i from 1 through $table-columns { .t#{$i} { width: percentage($i / 100); } } @for $i from 1 through $table-columns { .table__column--#{$i} { width: percentage($i / 100); } } } @include table-columns(100); #commissioning_scripts { .script-content.slider { overflow: hidden; pre { margin-top: 20px; } } } .table { display: table; border-color: #B2B2B2; border-collapse: collapse; border-spacing: 0; overflow-x: scroll; margin-bottom: 20px; margin: 0 0 2.5em; width: 100%; .table__row { float: left; width: 100%; display: table-row; border-bottom: 1px dotted #B2B2B2; &:hover { background-color: #FFF; .table__input { background-color: #FFF; border-color: #D2D2D2; background-position: right 10px top 16px; &.invalid { border-color: #D90000; } &[disabled] { border-color: transparent; color: #333; } } .table__controls { z-index: 1; opacity: 1; &--secondary { z-index: 1; opacity: 1; } } } &.table__row--no-hover { &:hover { background-color: transparent; } } &.active { background-color: #FFF; .table__dropdown { .table__data, .table__input { color: #333; } } &:hover { .table__controls { opacity: 0; z-index: -1000; pointer-events: none; &--secondary { z-index: -1000; opacity: 0; pointer-events: none; } } .table__dropdown { .table__input { pointer-events: all; background-color: #FFF; border-color: #D2D2D2; background-position: right 10px top 16px; } } } .table__input { background-color: #FFF; border-color: #D2D2D2; background-position: right 10px top 16px; &.editible { pointer-events: all; background-color: #FFF; border-color: #D2D2D2; background-position: right 10px top 16px; } &[disabled] { border-color: transparent; color: #333; } } .table__dropdown { max-height: 3000px; } } &.selected { .table__input { background-color: transparent; border-color: transparent; background-position: right -9999em top -9999em; pointer-events: none; } &:hover { .table__controls { opacity: 0; z-index: -1000; pointer-events: none; &--secondary { z-index: -1000; opacity: 0; pointer-events: none; } } .table__dropdown { .table__input { pointer-events: all; background-color: #FFF; border-color: #D2D2D2; background-position: right 10px top 16px; } } } } &.disabled { & > .table__data { color: #888; pointer-events: none; & > .table__input { color: #888; pointer-events: none; } } .table__input { background-color: transparent; border-color: transparent; background-position: right -9999em top -9999em; pointer-events: none; } &:hover { .table__input { background-color: transparent; border-color: transparent; background-position: right -9999em top -9999em; pointer-events: none; } .table__controls { opacity: 0; z-index: -1000; pointer-events: none; &--secondary { z-index: -1000; opacity: 0; pointer-events: none; } } } } &.noEdit { .table__input { background-color: transparent; border-color: transparent; background-position: right -9999em top -9999em; pointer-events: none; } &:hover { .table__input { background-color: transparent; border-color: transparent; background-position: right -9999em top -9999em; pointer-events: none; } .table__controls { opacity: 0; z-index: -1000; pointer-events: none; &--secondary { z-index: -1000; opacity: 0; pointer-events: none; } } } } } .table__header, .table__data { font-size: 16px; display: table-cell; padding: 13px 10px; box-sizing: border-box; height: 100%; background: none; border: 0; text-align: left; float: left; a { color: $ubuntu-orange; &:hover { cursor: pointer; } } } .table__head { display: table-head; width: 100%; box-sizing: border-box; .table__row { border-bottom: 1px solid #B2B2B2; &:hover { background-color: transparent; } } .table__header { font-size: 13px; background: none; color: #888; } a, .table__header-link { color: #888; &:hover { color: #333; text-decoration: none; border-bottom: 1px solid #333; } &.active { color: #333; text-decoration: none; } &.sort { border-bottom: 1px solid inherit; } } .divide { width: 1px; display: inline-block; background: #888; height: 10px; margin: 0 5px; } } .table__body { display: table-row-group; } .table__footer { display: table-footer-group; } .table__label { clear: both; display: block; margin-top: 11px; color: #BCBCBC; a { color: #BCBCBC; &:hover { color: $ubuntu-orange; } } &.active { a { color: $ubuntu-orange; } } } .table__controls { width: 100%; text-align: right; opacity: 0; z-index: -1000; &--secondary { opacity: 0; z-index: -1000; width: auto; text-align: left; } } .table__tags { .table__tag { display: inline-block; margin-right: 5px; } } // Form styles .table__input { display: inline-block; margin: -7px 0 -8px -14px; background-color: transparent; border-color: transparent; background-position: -9999px -9999px; &.invalid { border-color: transparent; &:focus { border-color: #D90000; } } &[disabled] { border-color: transparent; color: #333; } &:focus { background-color: #FFF; border-color: #D2D2D2; } } label { font-size: 13px; color: #BCBCBC; } input, select { margin: 0 0 0 -14px; } input[type="radio"] { margin-left: 0; } ul { li { margin: 0; margin-top: 14px; &:first-of-type { margin-top: 0; } } } // Table dropdown .table__dropdown { width: 100%; max-height: 0; overflow-y: hidden; .table__dropdown-row { border-bottom: 0; position: relative; max-height: 0; transition: max-height 0.10s ease-out; overflow: hidden; &.active { max-height: 500px; transition: max-height 0.25s ease-in; } &:before { display: block; margin: 0 auto; width: calc(100% - 20px); border-top: 1px dotted #B2B2B2; position: absolute; height: 1px; content: ''; top: 0; left: 10px; } &.table__dropdown-row--head { border-bottom: 0; .table__header { color: #BCBCBC; font-size: 13px; } } &.no-border { border: 0; &:before { border: 0; } } .table__input { display: inline-block; margin: -7px 0 -8px -14px; background-color: #FFF; border-color: #D2D2D2; background-position: right 10px top 16px; } } &--info { width: 100%; max-height: 0; background-color: transparent; // Info dropdown .table__dropdown-row--info { border-bottom: 0; position: relative; max-height: 0; transition: max-height 0.10s ease-out; overflow: hidden; &.active { max-height: 500px; transition: max-height 0.25s ease-in; } .table__data { color: #BCBCBC; } } } } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/components/_typography.scss0000644000000000000000000000135513056115004026430 0ustar 00000000000000/** * MAAS typography styles * * @project MAAS * @author Web Team at Canonical Ltd * @copyright 2015 Canonical Ltd * */ h1, h2, h3, h4, h5 { span { color: #888; font-size: 75%; padding-left: 20px; } } h4 { font-size: 20px; font-weight: 300; } pre { border: 0; background-color: #FFF; border-radius: 2px; code { counter-reset: line-numbering; .line { float: left; &::before { content: counter(line-numbering); counter-increment: line-numbering; padding-right: 1em; /* space after numbers */ width: 1.5em; text-align: right; opacity: 0.5; pointer-events: none; user-select: none; } } } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/components/_yui-modules.scss0000644000000000000000000000320613056115004026473 0ustar 00000000000000.yui3-node-add-widget { width: 65.9292%; margin-right: 2.21238%; @media screen and (max-width: 768px) { width: 100%; margin: 0; } .buttons { margin-top: 30px; } .add-link img.icon { margin-right: 6px; } } .yui3-overlay { @include rounded-corners(0 0 6px 6px); background-color: #fff; -webkit-box-shadow: 0 0 10px 0 rgba(0,0,0,0.5); box-shadow: 0 0 10px 0 rgba(0,0,0,0.5); ul { padding: 5px 0; } li { float: none; &:last-child a { border-bottom: none; } } a { display: block; padding: 6px 20px; color: #dd4814 !important; border-bottom: 1px solid #e5e2e0; &:focus, &:hover { background-color: #f2f2f2; } } } .yui3-overlay-hidden { display: none; } .yui3-widget-mask { background-color: #000; opacity: 0.3; } .yui3-panel { @include rounded-corners(0 0 6px 6px); background-color: #FFF; padding: 50px 80px 50px 80px; -webkit-box-shadow: 0 0 15px 0 rgba(0,0,0,1); box-shadow: 0 0 15px 0 rgba(0,0,0,1); .yui3-button { float: right; &.link-button { float: left; padding-left: 0; padding-right: 0; color: #dd4814; border: none; background: none; -webkit-box-shadow: none; box-shadow: none; font-size: 13px; } } } .yui3-widget-hd { margin-bottom: 30px; font-size: 24px; } .yui3-widget-ft { margin-top: 50px; } .yui3-widget-button-wrapper { width: 100%; } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/layout/_footers.scss0000644000000000000000000000175413056115004025036 0ustar 00000000000000@charset "UTF-8"; /** * MAAS footer styles * * @project MAAS * @author Web Team at Canonical Ltd * @copyright 2015 Canonical Ltd * */ .footer-wrapper { border-top: 1px dotted #b2b2b2; footer.global { background-color: transparent; padding-top: 0px; padding-right: 20px; padding-left: 20px; background-image: url('../img/logos/logo-ubuntu-orange.png'); background-image: url('../img/logos/logo-ubuntu-orange.svg'), none; background-size: 107px 25px; background-position: top 20px right 20px; background-repeat: no-repeat; max-width: 1480px; p { font-size: 0.875em; } a { margin: 0 5px; color: $ubuntu_orange; } .version { font-weight: 500; margin-right: 5px; } .copy { margin-top: 10px; } .legal { max-width: 1440px; } } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/layout/_grids.scss0000644000000000000000000000205113056115004024454 0ustar 00000000000000//// /// MAAS grid /// /// @project MAAS /// @author Web Team at Canonical Ltd /// @copyright 2015 Canonical Ltd /// //// .wrapper { min-height: 100%; height: auto !important; /* This line and the next line are not necessary unless you need IE6 support */ height: 100%; margin: 0 auto -112px; /* the bottom margin is the negative value of the footer's height */ position: relative; background: rgba(255,255,255,0.6); padding-top: 172px; &:after { content: ''; position: absolute; display: block; top: 0; right: 0; bottom: 0; left: 0; background: url('../img/backgrounds/image-background-paper.png'); height: 100%; width: 100%; z-index: -1; } } .inner-wrapper { max-width: 1480px; padding: 0px 20px; margin: 0 auto; position: relative; } .push, .footer-wrapper { height: 112px; } .row { background-color: transparent; border-bottom: 1px dotted #CCC; } .row:last-child { border-bottom: none; } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/layout/_headers.scss0000644000000000000000000002522113056115004024763 0ustar 00000000000000@charset "UTF-8"; //// /// MAAS header styles /// /// @project MAAS /// @author Web Team at Canonical Ltd /// @copyright 2015 Canonical Ltd /// //// // Main page title font size $title-size: 2.00em; // Sub action title font size $title-action-size: 0.60em; /// Global Header header.banner { overflow: visible; z-index: 20; position: fixed; top: 0; .logo { padding-left: 15px; } nav.nav-primary { border-bottom: none; } .nav-primary { &.nav-right { .logo-ubuntu { @include background-size(100px 30px); background-position: 5px 9px; background-image: url('../img/logos/logo.png'); background-image: url('../img/logos/logo.svg'), none; } } li:hover ul:after { display: none; } @media screen and (max-width: 768px) { ul { border-right: none; } } } #right-nav { float: right; margin-right: 20px; @media screen and (max-width: 768px) { margin-right: 0; } } #user-link { position: relative; @media screen and (max-width: 768px) { border-top: 1px solid #d4d7d4; width: 100%; } > a { padding-bottom: 12px; @media screen and (max-width: 768px) { display: none; } } .normal, .hover { margin-right: 7px; @media screen and (max-width: 768px) { display: none; } } .hover { display: none; } .nav { @include rounded-corners(0px 0px 4px 4px); background-color: #FFF; border: none; display: none; position: absolute; right: 0; top: 48px; box-shadow: 0px 2px 4px rgba(000,000,000,0.15); a { @media screen and (min-width: 769px) { &.active { background: none; border: none; } } &:hover { background-color: transparent; @media screen and (max-width: 768px) { background-color: #F8F8F8; color: #333; } } } @media screen and (max-width: 768px) { background: none; position: relative; top: inherit; width: 100%; box-shadow: none; float: left; padding: 0; li { border-bottom: 1px solid #d4d7d4; width: 100%; float: left; a { padding:10px 14px; width: 100%; } } } } &:hover { @media screen and (max-width: 768px) { > a { background-color: transparent; } } .normal { display: none; } .hover { display: inline-block; @media screen and (max-width: 768px) { display: none; } } .nav { display: block; } } } .nav-toggle { background-image: url('../img/icons/navigation-menu-plain.png'); background-image: url('../img/icons/navigation-menu-plain.svg'), none; top: 0; } @media screen and (max-width: 768px) { .nav-toggles { .open { display: block; } .close { display: none; } } #canonlist:target { ul { display: block; } } #canonlist:target + .nav-toggles { .open { display: none; } .close { display: block; } } } } /// page header .page-header { @include box-sizing(); background:#FFF; box-shadow: 0 1px 1px rgba(0, 0, 0, .1); width: 100%; float: left; position: fixed; z-index: 10; top: 48px; .page-header__nav { position: absolute; top: 10px; z-index: 1000; a { font-weight: 300; } } // Main page title .page-header__title { font-size: $title-size; width: auto; padding: 31px 0; margin: 0; float: left; font-size: 32px; [contenteditable="true"] { display: inline-block; padding: 8px 10px; width: auto; box-sizing: border-box; border: 1px solid transparent; margin: -10px 0 -10px -10px; border-radius: 2px; color: #333; cursor: default; font-size: 32px; &.editable:hover { border: 1px solid #D2D2D2; cursor: text; } &:active, &:focus { outline: none; background-color: #FFF; border: 1px solid #B2B2B2; } &.invalid, &.invalid:hover, &.invalid:active, &.invalid:focus { border-color: #d90000; } br { display:none; } } .icon { vertical-align: 3px; margin-right: 10px; } // Title identicator is used to show any sub pages and where // a user is currently. .page-header__title--identicator { font-size: $title-action-size; width: auto; display: inline-block; position: relative; top: 0px; padding-left: 20px; margin-left: 10px; a { color: #888; &:hover { text-decoration: none; border-bottom: 3px solid #888; } &:focus, &:active { text-decoration: none; } &.active { color: #333; border-bottom: 3px solid $ubuntu-orange; &:hover { text-decoration: none; cursor: default; } } } .divide { width: 1px; display: inline-block; background: #D2D2D2; height: 11px; padding: 0; margin: 0 5px; } .page-header__title-loadmore { font-size: 14px; margin-left: 10px; &:hover { border: 0; text-decoration: underline; } } } .link-cta-ubuntu, .alt { font-size: 16px; margin-left: 20px; position: relative; vertical-align: middle; margin-top: -5px; } } // Page actions container, used to hold any call to action buttons // and links .page-header__actions { float: right; padding: 34px 0; margin-bottom: 0; .page-header__cta { float: right; position: relative; height: auto; max-height: 36px; .cta-group { float: right; } // Feedback container for the cta, will contain positive or // negative feedback .page-header__cta-feedback { display: inline-block; position: relative; line-height: 36px; text-align: right; color: $ubuntu-orange; margin-right: 20px; cursor: pointer; &:hover { text-decoration: underline; } } } } // Header dropdown section, can contain secondary information i.e // flash messages or form controls / actions .page-header__dropdown { float: left; width: 100%; max-height: 1000px; transition: max-height 0.3s ease-in; overflow: hidden; border-color: #888 !important; &.ng-hide { display: block !important; max-height: 0; overflow: hidden; transition: max-height 0.3s ease-out; border-top: none; } // Feedback style, mainly used for actions feedback .page-header__feedback { border-top: 1px dotted #888; display: inline-block; width: 100%; padding: 20px 0; .page-header__feedback-message { @include box-sizing(); margin: 0; background-position: top 3px left 0px; background-repeat: no-repeat; padding: 6px 0 5px 25px; width: auto; display: inline-block; position: relative; &.info, &.error { background-image: url('../img/icons/error.png'); background-image: url('../img/icons/error.svg'), none; background-position: 0px 9px; &.progress { width: 100%; } } &.progress { padding-left: 0; .loader { position: relative; top: 1px; } } } } } } .title { .title__indicator { .title__link { color: #888; font-size: 20px; &:hover { color: #333; text-decoration: none; border-bottom: 1px solid #333; } &:focus, &:active { text-decoration: none; } &.active { color: #333; border-bottom: 1px $ubuntu-orange solid; } } .divide { width: 1px; display: inline-block; background: #D2D2D2; height: 11px; padding: 0; margin: 0 5px; } } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/pages/_accounts.scss0000644000000000000000000000221213056115004024744 0ustar 00000000000000@charset 'UTF-8'; /** * MAAS account styles * * @project MAAS * @author Web Team at Canonical Ltd * @copyright 2015 Canonical Ltd * */ .accounts { .logout { .divide { padding: 0 20px 0 30px; display: inline-block; } } .api { li { position: relative; input[type='text'] { @extend %input--styles; line-height: 30px; padding-right: 30px; width: 100%; &::-webkit-input-placeholder { color: #333; } &:-moz-placeholder { /* Firefox 18- */ color: #333; } &::-moz-placeholder { /* Firefox 19+ */ color: #333; } &:-ms-input-placeholder { color: #333; } } .delete-link { position: absolute; top: 7px; right: 7px; } } } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/pages/_dashboard.scss0000644000000000000000000000147313056115004025064 0ustar 00000000000000@charset 'UTF-8'; /** * MAAS dashboard styles * * @project MAAS * @author Web Team at Canonical Ltd * @copyright 2015 Canonical Ltd * */ form.page-title-form { margin-bottom: 30px; input { @include rounded-corners(5px); border: 1px solid transparent; background-color: transparent; font-size: 36px; line-height: 26px; color: #333; margin: 6px 10px; padding: 4px; height: auto; box-shadow: none; &:hover { outline: none; background: #FFF; border-color: #D2D2D2; box-shadow: inset 0 1px 1px rgba(000,000,000,0.1); } &:focus { border: 1px solid #dd4814; background-color: #fff; outline: none; } } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/pages/_images.scss0000644000000000000000000000260313056115004024376 0ustar 00000000000000@charset 'UTF-8'; /** * MAAS node styles * * @project MAAS * @author Web Team at Canonical Ltd * @copyright 2015 Canonical Ltd * */ .small-icon { width: 12px; } .images-info { text-align: center; padding: 10px; } .images-warning { @include box-sizing(); @include rounded-corners(2px); list-style: none; padding: 15px 20px 15px 45px; margin: 0; font-weight: 400; font-size: 0.875em; background:#FFF; background-position: top 50% left 15px; background-repeat: no-repeat; margin: 0 0 10px; box-shadow: 0 1px 1px rgba(0, 0, 0, .1); border: 1px solid #EEE; background-image: url('../img/icons/warning.png'); background-image: url('../img/icons/warning.svg'), none; } #loader { width: 10px; margin: 16px auto 0 auto; } #importing { @include box-sizing(); @include rounded-corners(2px); list-style: none; padding: 15px 20px 15px 45px; margin: 0; font-weight: 400; font-size: 0.875em; background:#FFF; background-position: top 50% left 15px; background-repeat: no-repeat; margin: 0 0 10px; box-shadow: 0 1px 1px rgba(0, 0, 0, .1); border: 1px solid #EEE; position: relative; .spinner { position: absolute; left: 15px; } } .importing-dot { opacity: 0; -webkit-animation: dot 1.3s infinite; animation: dot 1.3s infinite; } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/pages/_networks.scss0000644000000000000000000000513513056115004025010 0ustar 00000000000000.selector { @include box-sizing(); @include rounded-corners(2px); padding: 20px; background:#FFF; margin: 0 0 20px; box-shadow: 0 1px 1px rgba(0, 0, 0, .1); border: 1px solid #EEE; width: 100%; float: left; h2 { font-size: 1em; font-weight: 300; img { @include box-sizing(); display: inline-block; background: url('../img/icons/help.svg') no-repeat; width: 16px; height: 16px; padding-left: 16px; margin-left: 5px; } } .selector-available, .selector-chosen { width: 46%; float: left; margin: 0; text-align: left; @media screen and (max-width: 768px) { width: 100%; } h2 { background: none; border: none; } select { margin-bottom: 10px; } } ul.selector-chooser { width: 8%; float: left; margin: 20% 0 0; @media screen and (max-width: 768px) { width: 100%; margin: 0 0 10px; text-align: center; } li { width: 100%; text-align: center; @media screen and (max-width: 768px) { width: auto; display: inline-block; height: 16px; width: 16px; margin: 0 20px; } a { display: block; text-indent: 999em; width: 16px; height: 16px; overflow: hidden; margin: 0 auto; &.selector-add { background-image: url('../img/icons/chevron_right.svg'); @media screen and (max-width: 768px) { background-image: url('../img/icons/chevron_down.svg'); } } &.selector-remove { background-image: url('../img/icons/chevron_left.svg'); @media screen and (max-width: 768px) { background-image: url('../img/icons/chevron_up.svg'); } } } } } .selector-filter { img { display: none; } input { background-image: url('../img/search-orange.png'); background-repeat: no-repeat; background-position: top 7px right 8px; } } select#id_mac_addresses_to.filtered { height: 269px !important; } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/maas/pages/_nodes.scss0000644000000000000000000002153113056115004024242 0ustar 00000000000000@charset 'UTF-8'; /* ------------------------------------------------------------------- ---------------------------------------------------------------------- MAAS node styles @project MAAS @author Web Team at Canonical Ltd @copyright 2015 Canonical Ltd ---------------------------------------------------------------------- ------------------------------------------------------------------- */ /* INTRODUCTION ------------------------------------------------------ ---------------------------------------------------------------------- 1. NODES -- Node listing styles 2. NODES SEARCH -- Button group styles 3. ADD MACHINE LIST 4. ACCORDION ---------------------------------------------------------------------- ------------------------------------------------------------------- */ /* 1. NODES ---------------------------------------------------------- --------------------------------------------------------------------*/ .nodes { position: relative; .search { position: absolute; right: 0; input[type='submit'] { position: absolute; top: 8px; right: 12px; background-color: transparent; background-image: url('../img/search-icon.svg'); background-repeat: no-repeat; text-indent: -999em; display: block; width: 21px; height: 20px; overflow: hidden; outline: none; padding: 0; &:hover { color: transparent; background-color: transparent; background-image: url('../img/search-icon.svg'); background-repeat: no-repeat; } } @media screen and (max-width: 768px) { position: relative; } } .actions { @media screen and (max-width: 768px) { select { width: 100%; margin-bottom: 20px; } input { position: absolute; right: 0; top: 0 } } } } .powerstates { width: 14px; height: 15px; display: inline-block; &.power-on { background: transparent url('../img/icons/power-on.svg') left top no-repeat; } &.power-off { background: transparent url('../img/icons/power-off.svg') left top no-repeat; } &.power-unknown { background: none; } &.power-error { background: transparent url('../img/icons/power-error.svg') left top no-repeat; } &.power-check-ok { @include rounded-corners(50%); width: 10px; height: 10px; color: #33CC00; } &.power-check-error { @include rounded-corners(50%); width: 10px; height: 10px; color: #FF0000; } } .node-actions { .link-cta-ubuntu, .cta-ubuntu { margin-bottom: 10px; float: left; font-size: 16px; } } .buttons { margin-top: 30px; } #network-interfaces li { list-style-type: none; } #content-discovery-data { padding-top: 20px; margin-top: 20px; border-top: 1px dotted #B2B2B2; .slider { height: 0; overflow: hidden; } } .slider { padding-top: 0 !important; .content { @include rounded-corners(0 0 4px 4px); box-shadow: 0 1px 1px rgba(0, 0, 0, .1); background: #FFF; border: 1px solid #EEE; border-top: none; padding: 20px; pre { margin: 0; } } .tabs { @include box-sizing(); @include rounded-corners(4px 4px 0 0); padding: 8px 20px; margin: 0; font-weight: 400; font-size: 0.875em; background:#FFF; background-position: top 50% left 15px; background-repeat: no-repeat; border: 1px solid #EEE; } } /* 3. ADD MACHINE LIST ----------------------------------------------- --------------------------------------------------------------------*/ .add-machine__list { padding: 13px 0 20px; border-top: 1px dotted #888; margin-bottom: 0; .add-machine__details { @extend %border-bottom; background: transparent url('../img/icons/accordion-open.svg') top 12px right 10px no-repeat; > div { margin-bottom: 0; } .add-machine__details-form { display: none; } &.active { background-image: url('../img/icons/accordion-close.svg'); .add-machine__details-form { display: block; } } } } .power-status { display: inline-block; font-size: 100%; padding-left: 0; &--power { display: inline-block; margin-left: 20px; position: relative; font-size: 100%; &.checking { color: #2AB7EC; background: url('../img/status_in_progress.svg'); padding-left: 20px; } &.on { padding-left: 20px; color: #38B44A; background: transparent url('../img/icons/power-on.svg') left top 4px no-repeat; } &.off { padding-left: 20px; color: #D2D2D2; background: transparent url('../img/icons/power-off.svg') left top 4px no-repeat; } &.error { padding-left: 20px; color: #DB3832; background: transparent url('../img/icons/power-error.svg') left top 4px no-repeat; } .power-check { font-size: 0.75em; color: #D2D2D2; display: inline-block; padding: 0; .power-check__link { color: #888; text-decoration: none; margin-left: 5px; &:hover { border-bottom: 0 !important; text-decoration: underline !important; } } } } } .loading, .loader { background: url('../img/in_progress.png') no-repeat; background-size: 16px 16px; width: 16px; height: 16px; -webkit-animation: spin 1s infinite linear; -moz-animation: spin 1s infinite linear; animation: spin 1s infinite linear; padding: 0; display: inline-block; } /// Storage /// @section storage .details { &__used { color: #BCBCBC; } .details__label { clear: both; display: block; margin-top: 11px; color: #BCBCBC; a { color: #BCBCBC; &:hover { color: $ubuntu-orange; } } &.active { a { color: $ubuntu-orange; } } } .details__controls { width: 100%; text-align: right; opacity: 0; z-index: -1000; &--secondary { opacity: 0; z-index: -1000; width: auto; text-align: left; } } .table-row { .details__input { display: inline-block; margin: -7px 0 -8px -14px; background-color: transparent; border-color: transparent; background-position: -9999px -9999px; } .details__text { line-height: 37px; } &:hover { .details__input { background-color: #FFF; border-color: #D2D2D2; background-position: right 10px top 16px; } .details__controls { z-index: 1; opacity: 1; &--secondary { z-index: 1; opacity: 1; } } } &.active { &:hover { .details__input { background-color: transparent; border-color: transparent; pointer-events: none; background-position: -9999px -9999px; } .details__controls { opacity: 0; z-index: -1000; pointer-events: none; &--secondary { z-index: -1000; opacity: 0; pointer-events: none; } } } } label { font-size: 13px; color: #BCBCBC; } input, select { margin: 0 0 0 -14px; } input[type="radio"] { margin-left: 0; } } .details__dropdown { .details__row { border-bottom: 0; position: relative; &:before { display: block; margin: 0 auto; width: calc(100% - 20px); border-top: 1px dotted #B2B2B2; position: relative; height: 1px; content: ''; } &.details__row--head { border-bottom: 0; .table-cell { color: #BCBCBC; font-size: 13px; } } &.no-border { border: 0; &:before { border: 0; } } } // Info dropdown &--info { .table-row { border-bottom: 0; } .table-cell { color: #BCBCBC; } } } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/_core-constants.scss0000644000000000000000000000511213056115004025373 0ustar 00000000000000@charset "UTF-8"; /** * Ubuntu Patterns Stylesheet * * The CSS file required by Ubuntu patterns page * * @project Ubuntu Patterns * @author Web Team at Canonical Ltd * @copyright 2012 Canonical Ltd * * @see http://design.ubuntu.com */ /** * standard colors * * @colordef standard colors */ /* assets database path */ $asset-path: "//assets.ubuntu.com/sites/ubuntu/latest/u/img/"; /* usage: background: url(#{$asset-path}/backgrounds/background.jpg) no-repeat 0 0; */ $ubuntu-orange: #dd4814; /* ubuntu orange (used for text links also on any site except canonical) */ $light-orange: #fdf6f2; /* used as background on pre text */ $canonical-aubergine: #772953; /* canonical aubergine */ $light-aubergine: #77216f; /* light aubergine (consumer) */ $mid-aubergine: #5e2750; /* mid aubergine (both) */ $dark-aubergine: #2c001e; /* dark aubergine (enterprise) */ $warm-grey: #888888; /* warm grey */ $cool-grey: #333333; /* cool grey */ $light-grey: #f7f7f7; /* light grey */ /* notifications */ $error: #df382c; /* red */ $warning: #eca918; /* yellow */ $success: #38b44a; /* green */ $information: #19b6ee; /* cyan */ /* colour coded status - from negative to positive (Icon: canonical circle) */ $status-red: #df382c; /* red, for status that require immediate attention */ $status-grey: #888888; /* grey, for disabled status or ones that don’t require attention */ $status-yellow: #efb73e; /* yellow, for status that require attention */ $status-blue: #19b6ee; /* blue, for status that don’t require action */ $status-green: #38b44a; /* green, for positive status */ /* misc colours */ $box-solid-grey: #efefef; $link-color: $ubuntu-orange; /* This is the global link color, mainly used for links in content */ /* grid variables */ $base: 14; $gutter-width: 20px; $grid-gutter: 20px; $gutter: 2.12766%; $one-col: 6.38297%; $two-col: 14.89361%; $three-col: 23.40425%; $four-col: 31.91489%; $five-col: 40.42553%; $six-col: 48.93617%; $seven-col: 57.4468%; $eight-col: 65.95744%; $nine-col: 74.46808%; $ten-col: 82.97872%; $eleven-col: 91.48936%; $nav-bg: #f0f0f0; $nav-link-color: #333; $nav-border-dark: #d4d7d4; $nav-border-light: #f2f2f4; $nav-hover-bg: #d0d0d0; $nav-active-bg: #ddd; $breakpoint-medium: "only screen and (min-width: 768px)"; $breakpoint-large: "only screen and (min-width: 984px)"; @media only screen and (min-width : 768px) { $base: 15; } @media only screen and (min-width: 984px) { $base: 14; }maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/_core-grid-desktop.scss0000644000000000000000000002562313056115004025764 0ustar 00000000000000@charset "UTF-8"; @media only screen and (min-width: 984px) { body { font-size: 16px; } .one-col, .two-col, .three-col, .four-col, .five-col, .six-col, .seven-col, .eight-col, .nine-col, .ten-col, .eleven-col, .twelve-col, .col { float: left; } .one-col { width: 6.38297%; } .two-col { width: 14.89361%; } .three-col { width: 23.40425%; } .four-col { width: 31.91489%; } .five-col { width: 40.42553%; } .six-col { width: 48.93617%; } .seven-col { width: 57.4468%; } .eight-col { width: 65.95744%; } .nine-col { width: 74.46808%; } .ten-col { width: 82.97872%; } .eleven-col { width: 91.48936%; } .three-col:nth-child(1):nth-last-child(4), .three-col:nth-child(2):nth-last-child(3), .three-col:nth-child(3):nth-last-child(2), .three-col:nth-child(4):nth-last-child(1) { width: 23.36%; } .three-col:nth-of-type(2) { margin-right: 2.21238%; } .twelve-col { width: 100%; margin-right: 0; } .twelve-col .one-col { width: 6.3053%; margin-right: 2.21238%; } .twelve-col .two-col { width: 14.823%; margin-right: 2.21238%; } .twelve-col .three-col { width: 23.3407%; margin-right: 2.21238%; } .twelve-col .three-col:nth-child(1):nth-last-child(4), .twelve-col .three-col:nth-child(2):nth-last-child(3), .twelve-col .three-col:nth-child(3):nth-last-child(2), .twelve-col .three-col:nth-child(4):nth-last-child(1) { width: 23.3407%; } .twelve-col .three-col:nth-of-type(2) { margin-right: 2.21238%; } .twelve-col .four-col { width: 31.8584%; margin-right: 2.21238%; } .twelve-col .five-col { width: 40.3761%; margin-right: 2.21238%; } .twelve-col .six-col { width: 48.8938%; margin-right: 2.21238%; } .twelve-col .seven-col { width: 57.4115%; margin-right: 2.21238%; } .twelve-col .eight-col { width: 65.9292%; margin-right: 2.21238%; } .twelve-col .nine-col { width: 74.4469%; margin-right: 2.21238%; } .twelve-col .ten-col { width: 82.9646%; margin-right: 2.21238%; } .twelve-col .eleven-col { width: 91.4823%; margin-right: 2.21238%; } .twelve-col .twelve-col { width: 100%; margin-right: 0; } .eleven-col .one-col { width: 6.89238%; margin-right: 2.41837%; } .eleven-col .two-col { width: 16.20314%; margin-right: 2.41837%; } .eleven-col .three-col { width: 25.5139%; margin-right: 2.41837%; } .eleven-col .four-col { width: 34.82466%; margin-right: 2.41837%; } .eleven-col .five-col { width: 44.13542%; margin-right: 2.41837%; } .eleven-col .six-col { width: 53.44619%; margin-right: 2.41837%; } .eleven-col .seven-col { width: 62.75695%; margin-right: 2.41837%; } .eleven-col .eight-col { width: 72.06771%; margin-right: 2.41837%; } .eleven-col .nine-col { width: 81.37847%; margin-right: 2.41837%; } .eleven-col .ten-col { width: 90.68923%; margin-right: 2.41837%; } .eleven-col .eleven-col { width: 100%; margin-right: 0; } .ten-col .one-col { width: 7.6%; margin-right: 2.66666%; } .ten-col .two-col { width: 17.86666%; margin-right: 2.66666%; } .ten-col .three-col { width: 28.13333%; margin-right: 2.66666%; } .ten-col .four-col { width: 38.4%; margin-right: 2.66666%; } .ten-col .five-col { width: 48.66666%; margin-right: 2.66666%; } .ten-col .six-col { width: 58.93333%; margin-right: 2.66666%; } .ten-col .seven-col { width: 69.19999%; margin-right: 2.66666%; } .ten-col .eight-col { width: 79.46666%; margin-right: 2.66666%; } .ten-col .nine-col { width: 89.73333%; margin-right: 2.66666%; } .ten-col .ten-col { width: 100%; margin-right: 0; } .nine-col .one-col { width: 8.46953%; margin-right: 2.97176%; } .nine-col .two-col { width: 19.91084%; margin-right: 2.97176%; } .nine-col .three-col { width: 31.35215%; margin-right: 2.97176%; } .nine-col .four-col { width: 42.79346%; margin-right: 2.97176%; } .nine-col .five-col { width: 54.23476%; margin-right: 2.97176%; } .nine-col .six-col { width: 65.67607%; margin-right: 2.97176%; } .nine-col .seven-col { width: 77.11738%; margin-right: 2.97176%; } .nine-col .eight-col { width: 88.55869%; margin-right: 2.97176%; } .nine-col .nine-col { width: 100%; margin-right: 0; } .eight-col .one-col { width: 9.56375%; margin-right: 3.3557%; } .eight-col .two-col { width: 22.48322%; margin-right: 3.3557%; } .eight-col .three-col { width: 35.40268%; margin-right: 3.3557%; } .eight-col .four-col { width: 48.32214%; margin-right: 3.3557%; } .eight-col .five-col { width: 61.24161%; margin-right: 3.3557%; } .eight-col .six-col { width: 74.16107%; margin-right: 3.3557%; } .eight-col .seven-col { width: 87.08053%; margin-right: 3.3557%; } .eight-col .eight-col { width: 100%; margin-right: 0; } .seven-col .one-col { width: 10.98265%; margin-right: 3.85356%; } .seven-col .two-col { width: 25.81888%; margin-right: 3.85356%; } .seven-col .three-col { width: 40.6551%; margin-right: 3.85356%; } .seven-col .four-col { width: 55.49132%; margin-right: 3.85356%; } .seven-col .five-col { width: 70.32755%; margin-right: 3.85356%; } .seven-col .six-col { width: 85.16377%; margin-right: 3.85356%; } .seven-col .seven-col { width: 100%; margin-right: 0; } .six-col .one-col { width: 12.89592%; margin-right: 4.52488%; } .six-col .two-col { width: 30.31674%; margin-right: 4.52488%; } .six-col .three-col { width: 47.73755%; margin-right: 4.52488%; } .six-col .four-col { width: 65.15837%; margin-right: 4.52488%; } .six-col .five-col { width: 82.57918%; margin-right: 4.52488%; } .six-col .six-col { width: 100%; margin-right: 0; } .five-col .one-col { width: 15.61643%; margin-right: 5.47945%; } .five-col .two-col { width: 36.71232%; margin-right: 5.47945%; } .five-col .three-col { width: 57.80821%; margin-right: 5.47945%; } .five-col .four-col { width: 78.9041%; margin-right: 5.47945%; } .five-col .five-col { width: 100%; margin-right: 0; } .four-col .one-col { width: 19.79166%; margin-right: 6.94444%; } .four-col .two-col { width: 46.52777%; margin-right: 6.94444%; } .four-col .three-col { width: 73.26388%; margin-right: 6.94444%; } .four-col .four-col { width: 100%; margin-right: 0; } .three-col .one-col { width: 27.01421%; margin-right: 9.47867%; } .three-col .two-col { width: 63.5071%; margin-right: 9.47867%; } .three-col .three-col { width: 100%; margin-right: 0; } .two-col .one-col { width: 42.53731%; margin-right: 14.92537%; } .two-col .two-col { width: 100%; margin-right: 0; } .one-col .one-col { width: 100%; margin-right: 0; } .twelve-col .last-col { margin-right: 0; } .eleven-col .last-col { margin-right: 0; } .ten-col .last-col { margin-right: 0; } .nine-col .last-col { margin-right: 0; } .eight-col .last-col { margin-right: 0; } .seven-col .last-col { margin-right: 0; } .six-col .last-col { margin-right: 0; } .five-col .last-col { margin-right: 0; } .four-col .last-col { margin-right: 0; } .three-col .last-col { margin-right: 0; } .two-col .last-col { margin-right: 0; } .one-col .last-col { margin-right: 0; } /** * Main containers * * @section containers */ .row, #context-footer { /** Clear entire row. */ @include rounded-corners(0); margin: 0; padding: 40px 40px 20px; } .row:after { content: "."; visibility: hidden; display: block; height: 0; clear: both; } .row-feature { background: none; } /** * Columns * * @section columns */ /* inner grid */ .container { @include box-sizing; margin: $gutter-width $gutter-width 0; width: 100%; } /** * Empty columns * * Add these to a column to append or prepend * empty columns * * @section empty-columns */ .append-one { margin-right: $one-col + $gutter; } .append-two { margin-right: $two-col + $gutter; } .append-three { margin-right: $three-col + $gutter; } .append-four { margin-right: $four-col + $gutter; } .append-five { margin-right: $five-col + $gutter; } .append-six { margin-right: $six-col + $gutter; } .append-seven { margin-right: $seven-col + $gutter; } .append-eight { margin-right: $eight-col + $gutter; } .append-nine { margin-right: $nine-col + $gutter; } .append-ten { margin-right: $ten-col + $gutter; } .append-eleven { margin-right: $eleven-col + $gutter; } .prepend-one { margin-left: $one-col + 2.12766%; } .prepend-two { margin-left: $two-col + 2.12766%; } .prepend-three { margin-left: $three-col + 2.12766%; } .prepend-four { margin-left: $four-col + 2.12766%; } .prepend-five { margin-left: $five-col + 2.12766%; } .prepend-six { margin-left: $six-col + $gutter; } .prepend-seven { margin-left: $seven-col + $gutter; } .prepend-eight { margin-left: $eight-col + $gutter; } .prepend-nine { margin-left: $nine-col + $gutter; } .prepend-ten { margin-left: $ten-col + $gutter; } .prepend-eleven { margin-left: $eleven-col + $gutter; } .push-one { margin-left: 57px; } /** * Push and pull * * Use these classes to push elements into the next * column and pull it into the previous column * * @section push-pull */ .pull-one, .pull-two, .pull-three, .pull-four, .pull-five, .pull-six, .pull-seven, .pull-eight, .pull-nine, .pull-ten, .pull-eleven { float: left; position: relative; } .pull-one { margin-left: -$one-col; } .pull-two { margin-left: -($two-col + $gutter); } .pull-three { margin-left: -($three-col + $gutter); } .pull-four { margin-left: -($four-col + $gutter); } .pull-five { margin-left: -($four-col + $gutter); } .pull-six { margin-left: -($six-col + $gutter); } .pull-seven { margin-left: -($seven-col + $gutter); } .pull-eight { margin-left: -($eight-col + $gutter); } .pull-nine { margin-left: -($nine-col + $gutter); } .pull-ten { margin-left: -($ten-col + $gutter); } .pull-eleven { margin-left: -($eleven-col + $gutter); } .push-1, .push-two, .push-three, .push-four, .push-five, .push-six, .push-seven, .push-eight, .push-nine, .push-ten, .push-eleven { float: left; position: relative; } .push-one { margin: 0 (-($one-col + $gutter)) 0 ($one-col + $gutter); } .push-two { margin: 0 (-($two-col + ($gutter * 2))) 0 ($two-col + ($gutter * 2)); } .push-three { margin: 0 (-($three-col + ($gutter * 2))) 0 ($three-col + ($gutter * 2)); } .push-four { margin: 0 (-($four-col + ($gutter * 2))) 0 ($four-col + ($gutter * 2)); } .push-five { margin: 0 (-($four-col + ($gutter * 2))) 0 ($four-col + ($gutter * 2)); } .push-six { margin: 0 (-($six-col + ($gutter * 2))) 0 ($six-col + ($gutter * 2)); } .push-seven { margin: 0 (-($seven-col + ($gutter * 2))) 0 ($seven-col + ($gutter * 2)); } .push-eight { margin: 0 (-($eight-col + ($gutter * 2))) 0 ($eight-col + ($gutter * 2)); } .push-nine { margin: 0 (-($nine-col + ($gutter * 2))) 0 ($nine-col + ($gutter * 2)); } .push-ten { margin: 0 (-($ten-col + ($gutter * 2))) 0 ($ten-col + ($gutter * 2)); } .push-eleven { margin: 0 (-($eleven-col + ($gutter * 2))) 0 ($eleven-col + ($gutter * 2)); } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/_core-grid-tablet.scss0000644000000000000000000002465513056115004025572 0ustar 00000000000000@charset "UTF-8"; @media only screen and (min-width : 768px) { body { font-size: 15px; } .one-col, .two-col, .three-col, .four-col, .five-col, .six-col, .seven-col, .eight-col, .nine-col, .ten-col, .eleven-col, .twelve-col, .col { float: left; } .one-col { width: 6.38297%; } .two-col { width: 14.89361%; } .three-col { width: 23.40425%; } .four-col { width: 31.91489%; } .five-col { width: 40.42553%; } .six-col { width: 48.93617%; } .seven-col { width: 57.4468%; } .eight-col { width: 65.95744%; } .nine-col { width: 74.46808%; } .ten-col { width: 82.97872%; } .eleven-col { width: 91.48936%; } .twelve-col { width: 100%; margin-right: 0; } .twelve-col .one-col { width: 6.3053%; margin-right: 2.21238%; } .twelve-col .two-col { width: 14.823%; margin-right: 2.21238%; } .twelve-col .three-col { width: 23.3407%; margin-right: 2.21238%; } .twelve-col .three-col { width: 48.8938%; margin-right: 2.21238%; } .twelve-col .four-col { width: 31.8584%; margin-right: 2.21238%; } .twelve-col .five-col { width: 40.3761%; margin-right: 2.21238%; } .twelve-col .six-col { width: 48.8938%; margin-right: 2.21238%; } .twelve-col .seven-col { width: 57.4115%; margin-right: 2.21238%; } .twelve-col .eight-col { width: 65.9292%; margin-right: 2.21238%; } .twelve-col .nine-col { width: 74.4469%; margin-right: 2.21238%; } .twelve-col .ten-col { width: 82.9646%; margin-right: 2.21238%; } .twelve-col .eleven-col { width: 91.4823%; margin-right: 2.21238%; } .twelve-col .twelve-col { width: 100%; margin-right: 0; } .eleven-col .one-col { width: 6.89238%; margin-right: 2.41837%; } .eleven-col .two-col { width: 16.20314%; margin-right: 2.41837%; } .eleven-col .three-col { width: 25.5139%; margin-right: 2.41837%; } .eleven-col .four-col { width: 34.82466%; margin-right: 2.41837%; } .eleven-col .five-col { width: 44.13542%; margin-right: 2.41837%; } .eleven-col .six-col { width: 53.44619%; margin-right: 2.41837%; } .eleven-col .seven-col { width: 62.75695%; margin-right: 2.41837%; } .eleven-col .eight-col { width: 72.06771%; margin-right: 2.41837%; } .eleven-col .nine-col { width: 81.37847%; margin-right: 2.41837%; } .eleven-col .ten-col { width: 90.68923%; margin-right: 2.41837%; } .eleven-col .eleven-col { width: 100%; margin-right: 0; } .ten-col .one-col { width: 7.6%; margin-right: 2.66666%; } .ten-col .two-col { width: 17.86666%; margin-right: 2.66666%; } .ten-col .three-col { width: 28.13333%; margin-right: 2.66666%; } .ten-col .four-col { width: 38.4%; margin-right: 2.66666%; } .ten-col .five-col { width: 48.66666%; margin-right: 2.66666%; } .ten-col .six-col { width: 58.93333%; margin-right: 2.66666%; } .ten-col .seven-col { width: 69.19999%; margin-right: 2.66666%; } .ten-col .eight-col { width: 79.46666%; margin-right: 2.66666%; } .ten-col .nine-col { width: 89.73333%; margin-right: 2.66666%; } .ten-col .ten-col { width: 100%; margin-right: 0; } .nine-col .one-col { width: 8.46953%; margin-right: 2.97176%; } .nine-col .two-col { width: 19.91084%; margin-right: 2.97176%; } .nine-col .three-col { width: 31.35215%; margin-right: 2.97176%; } .nine-col .four-col { width: 42.79346%; margin-right: 2.97176%; } .nine-col .five-col { width: 54.23476%; margin-right: 2.97176%; } .nine-col .six-col { width: 65.67607%; margin-right: 2.97176%; } .nine-col .seven-col { width: 77.11738%; margin-right: 2.97176%; } .nine-col .eight-col { width: 88.55869%; margin-right: 2.97176%; } .nine-col .nine-col { width: 100%; margin-right: 0; } .eight-col .one-col { width: 9.56375%; margin-right: 3.3557%; } .eight-col .two-col { width: 22.48322%; margin-right: 3.3557%; } .eight-col .three-col { width: 35.40268%; margin-right: 3.3557%; } .eight-col .four-col { width: 48.32214%; margin-right: 3.3557%; } .eight-col .five-col { width: 61.24161%; margin-right: 3.3557%; } .eight-col .six-col { width: 74.16107%; margin-right: 3.3557%; } .eight-col .seven-col { width: 87.08053%; margin-right: 3.3557%; } .eight-col .eight-col { width: 100%; margin-right: 0; } .seven-col .one-col { width: 10.98265%; margin-right: 3.85356%; } .seven-col .two-col { width: 25.81888%; margin-right: 3.85356%; } .seven-col .three-col { width: 40.6551%; margin-right: 3.85356%; } .seven-col .four-col { width: 55.49132%; margin-right: 3.85356%; } .seven-col .five-col { width: 70.32755%; margin-right: 3.85356%; } .seven-col .six-col { width: 85.16377%; margin-right: 3.85356%; } .seven-col .seven-col { width: 100%; margin-right: 0; } .six-col .one-col { width: 12.89592%; margin-right: 4.52488%; } .six-col .two-col { width: 30.31674%; margin-right: 4.52488%; } .six-col .three-col { width: 47.73755%; margin-right: 4.52488%; } .six-col .four-col { width: 65.15837%; margin-right: 4.52488%; } .six-col .five-col { width: 82.57918%; margin-right: 4.52488%; } .six-col .six-col { width: 100%; margin-right: 0; } .five-col .one-col { width: 15.61643%; margin-right: 5.47945%; } .five-col .two-col { width: 36.71232%; margin-right: 5.47945%; } .five-col .three-col { width: 57.80821%; margin-right: 5.47945%; } .five-col .four-col { width: 78.9041%; margin-right: 5.47945%; } .five-col .five-col { width: 100%; margin-right: 0; } .four-col .one-col { width: 19.79166%; margin-right: 6.94444%; } .four-col .two-col { width: 46.52777%; margin-right: 6.94444%; } .four-col .three-col { width: 73.26388%; margin-right: 6.94444%; } .four-col .four-col { width: 100%; margin-right: 0; } .three-col .one-col { width: 27.01421%; margin-right: 9.47867%; } .three-col .two-col { width: 63.5071%; margin-right: 9.47867%; } .three-col .three-col { width: 100%; margin-right: 0; } .two-col .one-col { width: 42.53731%; margin-right: 14.92537%; } .two-col .two-col { width: 100%; margin-right: 0; } .one-col .one-col { width: 100%; margin-right: 0; } .twelve-col .last-col { margin-right: 0; } .eleven-col .last-col { margin-right: 0; } .ten-col .last-col { margin-right: 0; } .nine-col .last-col { margin-right: 0; } .eight-col .last-col { margin-right: 0; } .seven-col .last-col { margin-right: 0; } .six-col .last-col { margin-right: 0; } .five-col .last-col { margin-right: 0; } .four-col .last-col { margin-right: 0; } .three-col .last-col { margin-right: 0; } .two-col .last-col { margin-right: 0; } .one-col .last-col { margin-right: 0; } /** * Main containers * * @section containers */ .row, #context-footer { /** Clear entire row. */ @include rounded-corners(0); margin: 0; padding: 40px 40px 20px; } .row:after { content: "."; visibility: hidden; display: block; height: 0; clear: both; } .row-feature { background: none; } /** * Columns * * @section columns */ /* inner grid */ .container { @include box-sizing; margin: $gutter_width $gutter_width 0; width: 100%; } /** * Empty columns * * Add these to a column to append or prepend * empty columns * * @section empty-columns */ .append-one { margin-right: $one_col + $gutter; } .append-two { margin-right: $two_col + $gutter; } .append-three { margin-right: $three_col + $gutter; } .append-four { margin-right: $four_col + $gutter; } .append-five { margin-right: $five_col + $gutter; } .append-six { margin-right: $six_col + $gutter; } .append-seven { margin-right: $seven_col + $gutter; } .append-eight { margin-right: $eight_col + $gutter; } .append-nine { margin-right: $nine_col + $gutter; } .append-ten { margin-right: $ten_col + $gutter; } .append-eleven { margin-right: $eleven_col + $gutter; } .prepend-one { margin-left: $one_col + 2.12766%; } .prepend-two { margin-left: $two_col + 2.12766%; } .prepend-three { margin-left: $three_col + 2.12766%; } .prepend-four { margin-left: $four_col + 2.12766%; } .prepend-five { margin-left: $five_col + 2.12766%; } .prepend-six { margin-left: $six_col + $gutter; } .prepend-seven { margin-left: $seven_col + $gutter; } .prepend-eight { margin-left: $eight_col + $gutter; } .prepend-nine { margin-left: $nine_col + $gutter; } .prepend-ten { margin-left: $ten_col + $gutter; } .prepend-eleven { margin-left: $eleven_col + $gutter; } .push-one { margin-left: 57px; } /** * Push and pull * * Use these classes to push elements into the next * column and pull it into the previous column * * @section push-pull */ .pull-one, .pull-two, .pull-three, .pull-four, .pull-five, .pull-six, .pull-seven, .pull-eight, .pull-nine, .pull-ten, .pull-eleven { float: left; position: relative; } .pull-one { margin-left: -$one_col; } .pull-two { margin-left: -($two_col + $gutter); } .pull-three { margin-left: -($three_col + $gutter); } .pull-four { margin-left: -($four_col + $gutter); } .pull-five { margin-left: -($four_col + $gutter); } .pull-six { margin-left: -($six_col + $gutter); } .pull-seven { margin-left: -($seven_col + $gutter); } .pull-eight { margin-left: -($eight_col + $gutter); } .pull-nine { margin-left: -($nine_col + $gutter); } .pull-ten { margin-left: -($ten_col + $gutter); } .pull-eleven { margin-left: -($eleven_col + $gutter); } .push-1, .push-two, .push-three, .push-four, .push-five, .push-six, .push-seven, .push-eight, .push-nine, .push-ten, .push-eleven { float: left; position: relative; } .push-one { margin: 0 (-($one_col + $gutter)) 0 ($one_col + $gutter); } .push-two { margin: 0 (-($two_col + ($gutter * 2))) 0 ($two_col + ($gutter * 2)); } .push-three { margin: 0 (-($three_col + ($gutter * 2))) 0 ($three_col + ($gutter * 2)); } .push-four { margin: 0 (-($four_col + ($gutter * 2))) 0 ($four_col + ($gutter * 2)); } .push-five { margin: 0 (-($four_col + ($gutter * 2))) 0 ($four_col + ($gutter * 2)); } .push-six { margin: 0 (-($six_col + ($gutter * 2))) 0 ($six_col + ($gutter * 2)); } .push-seven { margin: 0 (-($seven_col + ($gutter * 2))) 0 ($seven_col + ($gutter * 2)); } .push-eight { margin: 0 (-($eight_col + ($gutter * 2))) 0 ($eight_col + ($gutter * 2)); } .push-nine { margin: 0 (-($nine_col + ($gutter * 2))) 0 ($nine_col + ($gutter * 2)); } .push-ten { margin: 0 (-($ten_col + ($gutter * 2))) 0 ($ten_col + ($gutter * 2)); } .push-eleven { margin: 0 (-($eleven_col + ($gutter * 2))) 0 ($eleven_col + ($gutter * 2)); } }maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/_core-grid.scss0000644000000000000000000000343013056115004024305 0ustar 00000000000000@charset "UTF-8"; /** * Ubuntu Core Front-End Framework * * Grid file part of the Ubuntu Core Front-End Framework * * This grid is composed by 14 columns (units) separated by 13 gutters (1/3 unit). * The first and last column are for padding purposes only. * The content fits in the middle 12 columns. * Possible divisions: 1 (12 units + 11 gutters), 2 (6 units + 5 gutters), * 3 ( 4 units + 3 gutters) and 4 (3 units + 2 gutters). * * When 1 unit = 60px, 1 gutter = 20px * * @project Ubuntu Core Front-End Framework * @author Web Team at Canonical Ltd * @copyright 2012 Canonical Ltd * * @see http://gridinator.com/ */ /** * Table of contents * * Main containers * Columns * Empty columns * Borders * Push and pull * Verticla gutter * Last * Clearing one-col 60 two-col 140 three-col 220 four-col 300 five-col 380 six-col 460 seven-col 540 eight-col 630 nine-col 700 ten-col 780 eleven-col 860 twelve-col 940 */ .fake { display: none; } body { font-size: 14px; } .one-col, .two-col, .three-col, .four-col, .five-col, .six-col, .seven-col, .eight-col, .nine-col, .ten-col, .eleven-col, .twelve-col, .col { @include box-sizing; clear: none; display: inline-block; float: none; margin-right: $gutter; margin-bottom: 20px; position: relative; width: 100%; } .twelve-col { .one-col, .two-col, .three-col, .four-col, .five-col, .six-col, .seven-col, .eight-col, .nine-col, .ten-col, .eleven-col { width: 100%; } } .last-col, .last { margin-right: 0; } /** * Clearing * * Hard and soft clearing classes * * @section clearing */ .clearfix:after, .container:after { clear: both; content: "\0020"; display: block; height: 0; overflow:hidden; visibility: hidden; } .clear { clear: both; } .clearfix { display: block; } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/_core-mixins.scss0000644000000000000000000000370113056115004024670 0ustar 00000000000000/** * mixins * * @section mixins */ @mixin font-size ($size: 16) { font-size: ($size / $base)+em; margin-bottom: (12 / $size)+em; } @mixin box-sizing ($type: border-box) { -webkit-box-sizing: $type; -moz-box-sizing: $type; box-sizing: $type; } @mixin rounded-corners($radius: 4px 4px 4px 4px) { -webkit-border-radius: $radius; -moz-border-radius: $radius; border-radius: $radius; } @mixin box-shadow($shadow...) { -moz-box-shadow: $shadow; -webkit-box-shadow: $shadow; box-shadow: $shadow; } @mixin gradient($from, $to) { background-color: $to; background-image: -moz-linear-gradient($from, $to); background-image: -webkit-gradient(linear, 0% 0%, 0% 100%, from($from), to($to)); background-image: -webkit-linear-gradient($from, $to); background-image: -o-linear-gradient($from, $to); } @mixin footer($background) { padding: $gutter-width $two-col $gutter-width $four-col; margin-bottom: 0; background: url($background) no-repeat scroll $one-col center #F7F7F7; } @mixin clearfix() { *zoom:1; &:before, &:after { content:""; display:table; } &:after { clear:both; } } // CSS3 colunms @mixin columns($num: 3, $gap: 20) { -moz-column-count: $num; -moz-column-gap: ($gap / $base)em; -webkit-column-count: $num; -webkit-column-gap: ($gap / $base)em; column-count: $num; column-gap: ($gap / $base)em; } // background-size @mixin background-size($size: 100% 100%) { -moz-background-size: $size; -webkit-background-size: $size; -o-background-size: $size; background-size: $size; } // transitions @mixin transition($properties: all, $duration: .5s, $method: ease-out) { -webkit-transition: $properties $duration $method; -moz-transition: $properties $duration $method; -ms-transition: $properties $duration $method; -o-transition: $properties $duration $method; transition: $properties $duration $method; } // usage: @include transition(all, 0.3s, ease-in-out); maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/_core-templates.scss0000644000000000000000000000704613056115004025365 0ustar 00000000000000@charset "UTF-8"; /** * Ubuntu Super Patterns Stylesheet * * Contains audience related themes for site-wide application * * @project Ubuntu Patterns * @author Web Team at Canonical Ltd * @copyright 2012 Canonical Ltd * */ /** * Audience specific * * @section audience */ /* Voice */ .voice-community { } .voice-canonical { } .voice-community.voice-canonical { } /* * Consumer */ .audience-consumer{ color: $cool-grey; .row-box, .main-content { color: $cool-grey; } .inner-wrapper { background: #fff; } .quote-right-top { padding: 60px 60px 0 40px; background: url("/sites/ubuntu/latest/u/img/patterns/quote-orange-br-287x287.png") no-repeat; height: 287px; position: absolute; right: -($gutter-width * 2); text-align: left; top: -($gutter-width * 4.5); width: $four-col; p { @include font-size (16); margin: (1.538em / 2); padding-bottom: 0; color: #fff; cite { @include font-size (12); color: #fff; padding: 0; } } } .quote-right-top p a, .quote-right p a { color: #fff; } .quote-right { @include font-size (18); color: #fff; padding: 50px 100px 0 50px; text-indent: -6px; background: url("/sites/ubuntu/latest/u/img/patterns/quote-orange-bl-287x287.png") no-repeat; min-height: 287px; position: absolute; right: -$gutter-width; text-align: left; top: -($gutter-width * 4.5); width: $four-col -(150/$base)em; cite { font-style: normal; margin-left: 6px; } } .quote-right-alt { background: url(/sites/ubuntu/latest/u/img/patterns/quote-white-br-360x360.png) 0 -100px no-repeat; color: $ubuntu-orange; padding: 50px 50px 0 50px; } .quote-right-right { background: url("/sites/ubuntu/latest/u/img/patterns/quote-orange-br-287x287.png") no-repeat; } } /* * Enterprise */ .audience-enterprise { h1 { margin: 0 0 18px 0; } td{ background: #fff; } th, td { padding: 6px 10px; background: #fff; } th[scope="col"] { background: #E2D4DC; color: $canonical-aubergine; } tbody th[rowspan] { background: #F7F2F6; } tfoot th[rowspan] { background: #dfdcd9; } tfoot td, tfoot th { font-weight: normal; background: #dfdcd9; } .inner-wrapper { background: $dark-aubergine; color: #fff; } .row-box { background: #fff; color: $cool-grey; } /*.row-quote { background: none repeat scroll 0 0 #E2D4DC; color: #772953; margin-left: -1.125em; padding-top: 1.25em; width: 58.75em; } .row-quote blockquote, .row-quote blockquote p { color: inherit; font-size: 1.313em; margin: 0 0.563em; padding: 0; top: auto; width: auto; } .row-quote blockquote p { line-height: 1.3; text-indent: -12px; } .row-quote blockquote cite { @include font-size (12); color: #656565; font-style: normal; margin-left: 12px; text-indent: 0; }*/ } .row-enterprise { background: $canonical-aubergine; color:#fff; @include rounded-corners(0); .box, div { background: $canonical-aubergine; color:#fff; } a { color:#fff; } } /* .audience-consumer.audience-enterprise{ .inner-wrapper { background: $mid-aubergine; color: #fff; } .row-box, .main-content { background: #fff; color: $cool-grey; } } */ .enterprise-dot-pattern { background:url('/sites/ubuntu/latest/u/img/patterns/enterprise-dot-pattern.png') } .developer-dot-pattern { background:url('/sites/ubuntu/latest/u/img/patterns/developer-dot-pattern.png') } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/_core.scss0000644000000000000000000003034613056115004023370 0ustar 00000000000000@charset "UTF-8"; /** * Ubuntu Core Front-End Framework * * Main CSS file part of the Ubuntu Core Front-End Framework * * @project Ubuntu Core Front-End Framework * @author Web Team at Canonical Ltd * @copyright 2012 Canonical Ltd * * @colordef $cool-grey (#333); main text * @colordef $ubuntu-orange (#dd4814); Ubuntu orange */ /** * Dependencies * * Importing reset file: core-reset.css * Importing grid file: core-grid.css */ /** * Font sizes * * 45px * 32px * 23px * 19.5px * 16px - bold * 13px - bold - uppercase */ /** * Table of contents * General * Links * Lists * Images * Base typography * Global elements * Forms * Tables */ /** * General * * @section links */ html, body, div, span, applet, object, iframe, h1, h2, h3, h4, h5, h6, p, blockquote, pre, a, acronym, address, big, cite, code, del, dfn, em, img, ins, kbd, q, s, samp, small, strike, strong, sub, sup, tt, var, b, u, i, center, dl, ol, ul, li, form, label, legend, table, caption, tbody, tfoot, thead, tr, th, td, article, aside, canvas, details, embed, figure, figcaption, footer, header, menu, nav, output, ruby, section, summary, time, mark, audio, video { border: 0; margin: 0; padding: 0; vertical-align: baseline; } article, aside, details, figcaption, figure, footer, header, nav, section { display: block; } audio, canvas, video { display: inline-block; *display: inline; *zoom: 1; } audio:not([controls]) { display: none; } [hidden] { display: none; } @font-face { font-family: 'Ubuntu'; font-style: normal; font-weight: 300; src: url('../fonts/ubuntu-l-webfont.eot'); src: url('../fonts/ubuntu-l-webfont.eot?#iefix') format('embedded-opentype'), url('../fonts/ubuntu-l-webfont.woff') format('woff'), url('../fonts/ubuntu-l-webfont.ttf') format('truetype'), url('../fonts/ubuntu-l-webfont.svg#ubuntulight') format('svg'); } @font-face { font-family: 'Ubuntu'; font-style: normal; font-weight: 400; src: local('Ubuntu'); src: url('../fonts/ubuntu-r-webfont.eot'); src: url('../fonts/ubuntu-r-webfont.eot?#iefix') format('embedded-opentype'), url('../fonts/ubuntu-r-webfont.woff') format('woff'), url('../fonts/ubuntu-r-webfont.ttf') format('truetype'), url('../fonts/ubuntu-r-webfont.svg#ubunturegular') format('svg'); } @font-face { font-family: 'Ubuntu'; font-style: normal; font-weight: 500; src: url('../fonts/ubuntu-m-webfont.eot'); src: local('Ubuntu Medium'), local('Ubuntu-Medium'), url('../fonts/ubuntu-m-webfont.eot?#iefix') format('embedded-opentype'), url('../fonts/ubuntu-m-webfont.woff') format('woff'), url('../fonts/ubuntu-m-webfont.ttf') format('truetype'), url('../fonts/ubuntu-m-webfont.svg#ubuntumedium') format('svg'); } @font-face { font-family: 'Ubuntu'; font-style: normal; font-weight: 700; src: url('../fonts/ubuntu-b-webfont.eot'); src: local('Ubuntu Bold'), local('Ubuntu-Bold'), url('../fonts/ubuntu-b-webfont.eot?#iefix') format('embedded-opentype'), url('../fonts/ubuntu-b-webfont.woff') format('woff'), url('../fonts/ubuntu-b-webfont.ttf') format('truetype'), url('../fonts/ubuntu-b-webfont.svg#ubuntubold') format('svg'); } @font-face { font-family: 'Ubuntu'; font-style: italic; font-weight: 300; src: url('../fonts/ubuntu-li-webfont.eot'); src: local('Ubuntu Light Italic'), local('Ubuntu-LightItalic'), url('../fonts/ubuntu-li-webfont.eot?#iefix') format('embedded-opentype'), url('../fonts/ubuntu-li-webfont.woff') format('woff'), url('../fonts/ubuntu-li-webfont.ttf') format('truetype'), url('../fonts/ubuntu-li-webfont.svg#ubuntulight_italic') format('svg'); } @font-face { font-family: 'Ubuntu'; font-style: italic; font-weight: 400; src: local('Ubuntu Italic'), local('Ubuntu-Italic'), url('https://themes.googleusercontent.com/static/fonts/ubuntu/v5/GZMdC02DTXXx8AdUvU2etw.woff') format('woff'); } @font-face { font-family: 'Ubuntu'; font-style: italic; font-weight: 500; src: url('../fonts/ubuntu-mi-webfont.eot'); src: local('Ubuntu Medium Italic'), local('Ubuntu-MediumItalic'), url('../fonts/ubuntu-mi-webfont.eot?#iefix') format('embedded-opentype'), url('../fonts/ubuntu-mi-webfont.woff') format('woff'), url('../fonts/ubuntu-mi-webfont.ttf') format('truetype'), url('../fonts/ubuntu-mi-webfont.svg#ubuntumedium_italic') format('svg'); } @font-face { font-family: 'Ubuntu'; font-style: italic; font-weight: 700; src: local('Ubuntu Bold Italic'), local('Ubuntu-BoldItalic'), url('https://themes.googleusercontent.com/static/fonts/ubuntu/v5/pqisLQoeO9YTDCNnlQ9bfz8E0i7KZn-EPnyo3HZu7kw.woff') format('woff'); } html { font-size: 100%; } body { color: #333; font-family: Ubuntu, Arial, "libra sans", sans-serif; font-weight: 300; } blockquote, q { quotes: none; } blockquote { margin: 28px 20px; } blockquote:before, blockquote:after, q:before, q:after { content: ""; content: none; } legend { border: 0; *margin-left: -7px; } figure { margin: 0; } abbr, acronym { cursor: help; } /** * Links * * @section links */ a:focus { outline: thin dotted; } a:hover, a:active { outline: 0; } a:link, a:visited { color: $link-color; text-decoration: none; } a:hover, a:active, a:focus { text-decoration: underline; } a.link-arrow:after { content: "\0000a0›"; } nav ul li h2 a:after { content: "\0000a0›"; } nav ul li a:after, .carousel ul li a:after, ul li p a:after { content: ""; } /** * Lists */ ol, ul { margin-left: 20px; margin-bottom: 20px; } ol ol, ul ul, ol ul, ul ol { margin-bottom: 0; } nav ul, nav ol { list-style: none; list-style-image: none; } /** * Images * * @section images */ svg:not(:root) { overflow: hidden; } img { border: 0; height: auto; max-width: 100%; } img.left { margin-right: $gutter-width; } img.right { margin-left: $gutter-width; } .middle img { vertical-align: middle; margin-top: 4em; } /** * Base typography * * @section type */ h1, h2, h3, h4, h5, h6 { font-weight: 300; line-height: 1.3; } h1 { font-size: 1.625em; margin-bottom: .5em; } h2 { font-size: 1.438em; margin-bottom: .5em; } h3 { font-size: 1.219em; margin-bottom: .522em; } h4 { font-size: 1.25em; font-weight: 400; margin-bottom: .615em; } h5 { font-size: 1em; font-weight: 700; margin-bottom: 1em; } h6 { font-size: .723em; font-weight: 400; margin-bottom: 1em; letter-spacing: .1em; text-transform: uppercase; } p, li { font-size: 1em; line-height: 1.5; margin: 0; margin-bottom: .75em; padding: 0; } h2 span, h1 span { display: block; } p + h2, ul + h2, ol + h2, pre + h2 { margin-top: (18 / 32)+em; } header nav a:link { font-weight: normal; } p + h3, ul + h3, ol + h3, pre + h3 { margin-top: (18 / 23)+em; } p + h4, ul + h4, ol + h4, pre + h4 { margin-top: (19.5 / $base)+em; } ol+h2, p+h2, pre+h2, ul+h2 { margin-top: .563em; } ol+h3, p+h3, pre+h3, ul+h3 { margin-top: .783em; } ol+h4, p+h4, pre+h4, ul+h4 { margin-top: 1.219em; } /* p + ol, p + dl { margin-top: 1.5em; margin-bottom: 1.5em; }*/ li { margin-bottom: .4em; } li:last-of-type { margin-bottom: 0; } ins { background: #fffbeb; text-decoration: none; } small, .smaller { font-size: 13px; } sub, sup { font-size: 75%; line-height: 0; position: relative; vertical-align: baseline; } sup { vertical-align: text-top; } sub { vertical-align: text-bottom; } dfn { font-style: italic; } mark { background: #ff0; color: #000; } code, pre { font-family: "Ubuntu Mono", "Consolas", "Monaco", "Lucida Console", "Courier New", Courier, monospace; } pre { @include rounded-corners(4px); background: $light-orange; padding: .6em 1em; white-space: pre-wrap; word-wrap: break-word; } blockquote { margin: 0; } blockquote > p { @include font-size (13); font-weight:100; margin:0 0 .4em 0; } blockquote small { font-size:.813em; line-height:1.4; } /** * Forms * * Global form element styles * * @section forms */ button, input, select, textarea { font-family: Ubuntu,Arial,"libra sans",sans-serif; margin: 0; vertical-align: baseline; *vertical-align: middle; } select { font-size: 1em; font-weight: 300; } button, input { line-height: normal; } button, input[type="button"], input[type="reset"], input[type="submit"] { cursor: pointer; -webkit-appearance: button; *overflow: visible; } input[type="checkbox"], input[type="radio"] { box-sizing: border-box; padding: 0; } input[type="search"] { @include rounded-corners(2px); -moz-box-sizing: content-box; -webkit-appearance: none; -webkit-box-sizing: content-box; box-sizing: content-box; font-family: Ubuntu,Arial,"libra sans",sans-serif; font-weight: 300; outline: none; padding: 0.6956522em 0.869565em; } input[type="search"]::-webkit-search-decoration { -webkit-appearance: none; } button::-moz-focus-inner, input::-moz-focus-inner { border: 0; padding: 0; } textarea { overflow: auto; vertical-align: top; } form fieldset { @include rounded-corners(4px); background-repeat: no-repeat; background-color: #EFEEEC; background-position: -15px -15px; border: 0; margin-bottom: 8px; padding: 15px 20px; } form fieldset h3 { border-bottom: 1px dotted #dfdcd9; margin-bottom: 9px; padding-bottom: 10px; } form fieldset li:first-child { margin-top: 0; } form input[type="text"], form input[type="email"], form input[type="tel"], form textarea { -webkit-appearance: none; @include rounded-corners(2px); background: #fff; border: 1px solid #999; display: block; font-family: Ubuntu,Arial,"libra sans",sans-serif; font-size: 1em; font-weight: 300; padding: 0.6956522em 0.869565em; } form input:focus, form textarea:focus { border: 1px solid #dd4814; } form textarea[readonly='readonly'] { color: #999; } form input[type="checkbox"], form input[type="radio"] { margin: 0; width:auto; } form input[type="checkbox"] + label, form input[type="radio"] + label{ display: inline; margin-left: 5px; vertical-align: middle; width: auto; } form input[type="submit"] { @include font-size (16); @include rounded-corners(4px); @include gradient(#f26120, $ubuntu-orange); @include box-shadow(none); border: 0; color: #fff; display: block; padding: 10px 14px; text-shadow: none; width: auto; margin-bottom: 0; } form input[type="submit"]:hover { background: $ubuntu-orange; } form label { cursor: pointer; display: block; margin-bottom: 4px; } form label span { color: $error; } form ul { margin-left:0; } form li { list-style: none outside none; margin-top: 14px; } form button[type="submit"] { border: 0; display: inline-block; font-family: Ubuntu, Arial, "libra sans", sans-serif; text-decoration:none; font-weight: 300; } form input[type="reset"] { display: none; } /** * Tables * * @section tables */ table { border-collapse: collapse; border-spacing: 0; overflow-x: scroll; margin-bottom: $gutter-width; margin: 0 0 (40/$base) + em 0; width: 100%; th, td { padding: 15px 10px; background: #f0edea; border: 1px dotted $warm-grey; } td { text-align: center; vertical-align: middle; } thead th { border-collapse: separate; border-spacing: 0 10px; background: #fee3d2; color: #333333; font-weight: normal; } tbody th { text-align: left; font-weight: normal; font-weight: 300; } th[scope="col"] { text-align: center; } thead th:first-of-type { text-align: left; } } /* Responsive typo h1 { @include font-size (26); margin-bottom: .5rem; } h2 { font-size: 1.438rem; margin-bottom: .5rem; } h3 { font-size: 1.219rem; margin-bottom: .522rem; } h4 { font-size: 1rem; font-weight: 400; margin-bottom: .615rem; } h5 { font-size: .813rem; font-weight: 700; margin-bottom: 1rem; } h6 { font-size: .723rem; font-weight: 400; margin-bottom: 1rem; letter-spacing: .1rem; text-transform: uppercase; } p { @include font-size (14); line-height: 1.6; margin: 0; margin-bottom: .75rem; padding: 0; }*/ @media only screen and (max-width : 768px) { table { display: block; } } // end @media only screen and (max-width : 768px) @media only screen and (min-width: 984px) { form fieldset { padding: 15px 20px; } img { max-width: none; } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/core-print.scss0000644000000000000000000000112613056115004024355 0ustar 00000000000000@charset "UTF-8"; /** * Ubuntu print Stylesheet * * * @project Ubuntu Patterns * @author Web Team at Canonical Ltd * @copyright 2012 Canonical Ltd * */ * { background: #fff; color: #000; } body { background: white; font-size: 16pt; line-height: 1.5; } a:link, a:visited { color: #898989; background: transparent; font-weight: bold; text-decoration: underline; } nav, #box-search, .cookie-policy, .link-top, footer { display: none; } nav.nav-secondary { display: block; } .wrapper { width: auto; margin: 0 5%; padding: 0; padding-top: 1em; float: none !important; } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/patterns/0000755000000000000000000000000013056115004023236 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/ubuntu-styles.scss0000644000000000000000000000036613056115004025143 0ustar 00000000000000@charset 'UTF-8'; // import required files @import 'core-constants'; @import 'core-mixins'; @import 'core-grid'; @import 'core-grid-tablet'; @import 'core-grid-desktop'; @import 'core'; @import 'core-templates'; @import 'patterns/patterns'; maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/patterns/_arrows.scss0000644000000000000000000000210113056115004025601 0ustar 00000000000000@charset 'UTF-8'; %arrow { height: 11px; position: absolute; width: 18px; } .arrow-up { @extend %arrow; background: url('#{$asset-path}patterns/arrow-up.png') 0 0 no-repeat; left: 20px; top: -11px; } .arrow-down { @extend %arrow; background: url('#{$asset-path}patterns/arrow-down.png') 0 0 no-repeat; bottom: -11px; right: 20px; } .arrow-right { @extend %arrow; background: url('#{$asset-path}patterns/arrow-right.png') 0 0 no-repeat; height: 18px; right: -11px; top: 20px; width: 11px; } .arrow-left { @extend %arrow; background: url('#{$asset-path}patterns/arrow-left.png') 0 0 no-repeat; bottom: 20px; height: 18px; left: -11px; width: 11px; } div > .arrow-left { left: -10px; } @media only screen and (max-width : 768px) { } // end @media only screen and (max-width : 768px) @media only screen and (min-width : 769px) { html.yui3-js-enabled .arrow { visibility: visible; } } // @media only screen and (min-width : 769px) @media only screen and (min-width: 984px) { } // end @media only screen and (min-width: 984px) maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/patterns/_blockquotes.scss0000644000000000000000000001174113056115004026631 0ustar 00000000000000@charset 'UTF-8'; blockquote { &.pull-quote { text-indent: 0; p { color: $cool-grey; padding-left: 10px; padding-right: 10px; @include font-size (24.833); text-indent: -.4em; margin-left: .4em; line-height: 1.3; span { font-weight: bold; color: $ubuntu-orange; line-height: 0; position: relative; left: -5px; & + span { left: 5px; } // the second span } cite { margin: 10px 0 0; font-weight: 300; display: block; font-size: .75em; text-indent: 0; } } &.js { padding-left: $gutter-width * 3; display: table-cell; } } &.quote-canonical, &.quote-canonical-white { @include font-size (16); background: url("#{$asset-path}patterns/quote-white-360x360.png") no-repeat 20px -130px; color: $canonical-aubergine; float: right; font-size: 1em; height: 215px; margin-top: 0; padding: 20px 60px 0; position: relative; width: 236px; } &.quote-canonical-white { background: url("#{$asset-path}patterns/quote-aubergine-345x345.png") no-repeat 0 0; color: #fff; padding: 80px 60px 0; height: 265px; } &.quote p:first-child { @include font-size (18); // (16) desired value in px, this is divided by the baseline font-size value to reach the em value line-height: 1.3; text-indent: -7px; } &.quote-right-bottom { background-image: url("#{$asset-path}pictograms/picto-pack/picto-quote-orange.svg"); background-repeat: no-repeat; background-size: 287px 286px; color: #fff; height: 167px; padding: 60px 40px; position: static; right: -($gutter-width * 2); top: -($gutter-width * 4.5); width: 207px; & p { color: #fff; } } &.quote-grey { @include font-size (36); background: url("#{$asset-path}patterns/quote-grey-br-211x211.png") no-repeat scroll 0 0 transparent; color: #fff; height: 152px; line-height: 40px; margin-left: 20px; padding: 60px 0 0; text-align: center; width: 211px; } &.quote-bottom-left { background: url("#{$asset-path}patterns/quote-orange-bl-287x287.png") no-repeat; color: #fff; height: 167px; padding: 55px 40px 70px 45px; width: 225px; } } //.pull-quote { //@include font-size (30); //color: $warm-grey; //line-height: 1.4; //right: 0; //} html.no-svg, .opera-mini { blockquote.quote-right-bottom { background-image: url("#{$asset-path}pictograms/picto-pack/picto-quote-orange.png"); } } // End blockquote // row-quote .row-quote { @include rounded-corners(0); blockquote { @include rounded-corners(4px); margin: 0; padding: 0; p { margin-bottom: .75em; line-height: 1.3; color: $cool-grey; padding-left: 10px; padding-right: 10px; //@include font-size (23); //font-size: 1em; text-indent: 0; } span { font-weight: bold; color: $ubuntu-orange; line-height: 0; position: relative; left: -5px; & + span { left: 5px; } // the second span } cite { color: $cool-grey; font-style: normal; margin-bottom: 0; font-size: .75em; text-indent: -14px; text-indent: 0; } } .quote-twitter { background: #fcece7 url('#{$asset-path}pictograms/pictogram-twitter-115x139.png') $gutter-width bottom no-repeat; padding: $gutter-width $gutter-width $gutter-width $three-col; } .quote-twitter-small { background: #fcece7 url('#{$asset-path}pictograms/pictogram-twitter-54x63.png') 99% bottom no-repeat; padding: $gutter-width $gutter-width $gutter-width 80px; p { margin: 0; padding: 0; } } } blockquote.quote-canonical, blockquote.quote-canonical-white { background: none; color: $cool-grey; width: auto; height: auto; padding: 0 30px; margin-top: 20px; } @media only screen and (max-width : 768px) { .row-quote blockquote p { } } @media only screen and (min-width : 768px) { .row-quote blockquote { text-indent: -7px; } .pull-quote { text-indent: -.4em; } .row-quote blockquote p { @include font-size (24.83); } blockquote.pull-quote p, .row-quote blockquote p { padding-left: 0; padding-right: 0; text-indent: -.7em; span { font-size: 1.391304348em; } cite { margin-left: 0; text-indent: 0; } } blockquote.pull-quote p span, .row-quote blockquote p span { top: 5px; } } // end @media only screen and (max-width : 768px) @media only screen and (min-width : 769px) { .row-quote blockquote { p { @include font-size (24.83); text-indent: -.4em; } } } //@media only screen and (min-width : 769px) @media only screen and (min-width: 984px) { .row-quote blockquote { padding: 0 80px 20px; text-indent: -10px; } blockquote.pull-quote p span, .row-quote blockquote p span { top: 10px; } } // end @media only screen and (min-width: 984px) maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/patterns/_boxes.scss0000644000000000000000000001117213056115004025414 0ustar 00000000000000@charset 'UTF-8'; %box { @include rounded-corners(4px); padding: 1.333em $gutter-width; } .box { @extend %box; background: #fff; border: 1px solid #dfdcd9; } .box-grey { @extend %box; background: $light-grey; color: $cool-grey; } .box-orange { background: $ubuntu-orange; color: #fff; } .box-highlight { @include box-shadow(0 2px 2px 0 #c2c2c2); border: 1px solid $light-grey; } .box-textured { @include box-shadow(0 2px 2px 0 #c2c2c2); background: url("#{$asset-path}patterns/grey-textured-background.jpg"); border: 0; } .box-padded { @include rounded-corners(4px); background: $box-solid-grey; border: 0; margin-bottom: 20px; padding: 6px 5px; h3 { @include font-size(19.5); margin-left: ($gutter-width / 4); margin-top: 5px; } li h3 { // this happens in 'Further reading' /cloud/insights @include font-size(19.5); margin: 0; } div { @include rounded-corners(4px); background: #fff; overflow: hidden; padding: 8px 8px 2px; } } .box-padded-feature { @include rounded-corners(4px); background: url("#{$asset-path}patterns/soft-centre-bkg.gif") repeat scroll 0 0 #a09f9f; border: 0; margin-bottom: 20px; padding: 11px 5px 6px; h3 { color: #fff; margin-left: ($gutter-width / 4); @include font-size(19.5); } h4 { @include font-size(16); font-weight: normal; } > div { @include rounded-corners(4px); background: #fff; overflow: hidden; padding: 20px 8px; } div div { margin-bottom: 0; } .inline-icons { display: table; width: 100%; margin: 0; text-align: center; li { display: table-cell; text-align: left; float: none; } } .one-col { width: 48px; float: left; } } .resource { cursor: pointer; padding-bottom: 40px; position: relative; -moz-transition: background .2s ease-out; -webkit-transition: background .2s ease-out; transition: background .2s ease-out; & h2 { padding-right: 20px; } &.five-col h2, &.four-col h2 { a:link, a:visited { font-size: inherit !important; } } &.four-col { h2 a:link, h2 a:visited { font-size: 1.125em; } } &.twelve-col { h2 a:link, h2 a:visited { font-size: 1.40625em; } } &:hover { background-color: #fafafa; } &:after { -moz-box-shadow: 0 -1px 2px 0 #ddd; -webkit-box-shadow: 0 -1px 2px 0 #ddd; box-shadow: 0 -1px 2px 0 #ddd; content: ''; height: 1px; position: absolute; right: -6px; top: 14px; -ms-transform: rotate(45deg); -webkit-transform: rotate(45deg); transform: rotate(45deg); -moz-transition: all .2s ease-out; -webkit-transition: all .2s ease-out; transition: all .2s ease-out; width: 41px; z-index: 2; } &:hover:after { right: -9px; top: 18px; width: 48px; } &:before { content: ''; position: absolute; -moz-transition: border-width .2s ease-out; -webkit-transition: border-width .2s ease-out; transition: border-width .2s ease-out; top: -2px; right: -3px; width: 0; height: 0; border-bottom: 30px solid #fdfdfd; border-right: 30px solid #fff; -webkit-box-shadow: -2px 2px 2px rgba(176, 176, 176, .4); -moz-box-shadow: -2px 2px 2px rgba(176, 176, 176, .4); box-shadow: -2px 2px 2px rgba(176, 176, 176, .4); z-index: 2; @include rounded-corners(0 0 0 0); } &:hover:before { border-bottom-width: 35px; border-right-width: 35px; } &:last-of-type { margin-bottom: 30px; } .content-cat { background: url("#{$asset-path}icons/icon-resource-hub-icon-document.png") left center no-repeat; color: #aea79f; font-size: 14px; letter-spacing: 1px; margin: 0; padding-left: 20px; padding: 0; position: absolute; text-transform: uppercase; } .content-cat-webinar { background: url("#{$asset-path}icons/icon-resource-hub-webinar.png") left center no-repeat; } &.box-image-centered div + span img { margin-top: 40px; } } html.yui3-js-enabled .resource:hover a { text-decoration: underline; } .row-grey .resource:before { border-right-color: #f7f7f7; } @media only screen and (max-width : 768px) { .box-padded-feature .inline-icons li { float: left; display: block; } .box-padded-feature { .one-col { width: 48px; float: left; } } } // end @media only screen and (max-width : 768px) @media only screen and (min-width : 769px) { } //@media only screen and (min-width : 769px) @media only screen and (min-width: 984px) { } // end @media only screen and (min-width: 984px) maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/patterns/_buttons.scss0000644000000000000000000000476313056115004026002 0ustar 00000000000000@charset 'UTF-8'; a.link-cta-ubuntu, a.link-cta-canonical, a.link-cta-inverted, button.cta-ubuntu, button.cta-canonical, form button[type="submit"], form input[type="submit"] { @include box-sizing(); @include font-size (16); @include rounded-corners(3px); background: $ubuntu-orange; color: #fff; text-decoration: none; display: inline-block; margin: 0; font-family: Ubuntu, Arial, 'libra sans', sans-serif; font-weight: 300; -webkit-font-smoothing: subpixel-antialiased; -moz-font-smoothing: subpixel-antialiased; -o-font-smoothing: subpixel-antialiased; font-smoothing: subpixel-antialiased; padding: 8px 14px; width: 100%; text-align: center; } a.cta-large, button.cta-large { @include font-size (18); padding: 10px 20px; } a.link-cta-canonical, button.cta-canonical, form button.cta-canonical[type="submit"], form input.cta-canonical[type="submit"] { background: $canonical-aubergine; color: #fff; } a.link-cta-inverted, button.cta-inverted { background: #fff; color: $cool-grey; } .row-enterprise a.link-cta-canonical, .row-enterprise button.link-cta-canonical { background: #fff; color: $canonical-aubergine; } a.link-cta-ubuntu:hover, a.link-cta-ubuntu:hover, button.cta-ubuntu:hover, form button[type="submit"]:hover, form input[type="submit"]:hover { background: darken($ubuntu-orange, 6.2%); // #c03f11 text-decoration: none; } a.link-cta-canonical:hover, button.cta-canonical:hover { background: darken($canonical-aubergine, 6.2%); // #5f193e text-decoration: none; } a.link-cta-inverted:hover, .row-enterprise a.link-cta-canonical:hover, button.cta-inverted:hover, .row-enterprise button.cta-canonical:hover { background: #fff; text-decoration: underline; } a.cta-deactivated, a.cta-deactivated:hover, button.cta-deactivated, button.cta-deactivated:hover { background: $box-solid-grey; color: #fff; cursor: not-allowed; } @media only screen and (min-width : 768px) { a.link-cta-ubuntu, a.link-cta-canonical, a.link-cta-inverted, button.cta-ubuntu, button.cta-canonical, form button[type="submit"], form input[type="submit"] { width: auto; } } // end @media only screen and (max-width : 768px) @media only screen and (min-width : 769px) { } // @media only screen and (min-width : 769px) @media only screen and (min-width: 984px) { a.link-cta-ubuntu, a.link-cta-canonical, a.link-cta-inverted, button.cta-ubuntu, button.cta-canonical, form button[type="submit"], form input[type="submit"] { width: auto; } } // end @media only screen and (min-width: 984px) maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/patterns/_contextual-footer.scss0000644000000000000000000000343313056115004027757 0ustar 00000000000000@charset 'UTF-8'; #context-footer { @include box-sizing; @include font-size (14); border-bottom: 0; clear: both; padding-bottom: 1px; padding-top: 0; position: relative; margin-bottom: 0; margin-left: 0; margin-right: 0; width: 100%; hr { @include box-shadow(inset 0 2px 2px -2px #333); background: $ubuntu-orange; height: 14px; margin: 0 0 10px; border: 0; clear: both; } div.twelve-col { display: table; float: none; margin-bottom: 7px; } div div { display: block; padding-left: 0; margin-bottom: 20px; div { display: block; padding-left: 0; margin-bottom: 0; } &.feature-one { padding-left: 0; } &.feature-four { margin-bottom: 0; margin-right: 0; } } > div { padding-left: 10px; padding-right: 10px; } ul { margin-bottom: 5px; } li.active { display: none; } h3 { @include font-size (16); font-weight: normal; } .list a:after, a.link-arrow:after, nav ul li h2 a:after { content: ' \203A'; } } @media only screen and (min-width : 768px) { #context-footer { margin-bottom: 12px; padding-left: 30px; padding-right: 30px; div + div { width: 31%; } div div.feature-four { padding-bottom: 20px; } hr { margin: 0 -30px 40px; } & > div { padding-left: 0; padding-right: 0; } } } // end @media only screen and (max-width : 768px) @media only screen and (min-width : 769px) { } //@media only screen and (min-width : 769px) @media only screen and (min-width: 984px) { #context-footer { padding: 0 40px 10px; } #context-footer div div { display: table-cell; float: none; padding-left: 20px; margin-bottom: 0; } #context-footer hr { margin: 0 -40px 40px; } } // end @media only screen and (min-width: 984px) maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/patterns/_footer.scss0000644000000000000000000000720613056115004025575 0ustar 00000000000000@charset "UTF-8"; body footer.global #nav-global li:first-of-type a { margin-left: 0; } footer.global { @include box-sizing; @include box-shadow(inset 0 2px 2px -1px #d3d3d3); background: none; border-top: 0; clear: both; display: block; padding: 30px 10px 20px; position: relative; width: 100%; .legal { /* Can be removed once live */ margin: 0 auto; width: 100%; /* 980px / 15px (baseline font); 980px + (60px x 2) = 1100px */ } .legal { background-image: none; position: relative; clear: both; min-height: 40px; p, ul { padding-left: 0; } } // legal h2 { font-size: 0.75em; line-height: 1.4; margin-bottom: 0; padding-bottom: 0.5em; } h2, h2 a:link, h2 a:visited { color: $cool-grey; font-weight: normal; } nav ul li h2 a:after { content: ""; } ul { margin: 0; } nav ul li.two-col { display: inline-block; min-height: 10em; vertical-align: top; } nav ul li li { @include font-size (12); font-size: 0.75em; margin-bottom: 0; } ul li li a:link, ul li li a:visited { color: $cool-grey; margin-bottom: 0; } ul li li a:hover, ul li li a:active, h2 a:hover, h2 a:active { color: $ubuntu-orange; //text-decoration: underline; } .inline li { display: inline; } p, ul.inline li a { color: $cool-grey; font-size: 12px; margin-bottom: 0; } ul.inline li a:hover { color: $ubuntu-orange; } ul.inline li:after { color: $warm-grey; content: "\00b7"; vertical-align: middle; margin: 0 5px; } ul.inline li:last-child { width: 120px; } ul.inline li:last-child:after { content: ""; } .inline li { float: none; margin-bottom: 0; } .top-link { @include box-shadow(0 -4px 4px -4px rgba(0, 0, 0, 0.3) inset); background: none repeat scroll 0 0 rgba(0, 0, 0, 0); border: 0 none; float: left; font-size: 0.75em; letter-spacing: 0.05em; margin: 0 0 0 -10px; padding-right: 20px; text-transform: uppercase; width: 100%; a { @include box-sizing(); background-image: url("#{$asset-path}pictograms/picto-pack/picto-upload-warmgrey.svg"); background-position: 10px center; background-repeat: no-repeat; background-size: 14px 14px; border-bottom: 0 none; color: #888888; display: block; float: none; font-weight: 400; padding: 12px 0 12px 28px; } } } /* End footer.global */ html.no-svg, .opera-mini { footer.global .top-link { a { background-image: url("#{$asset-path}pictograms/picto-pack/picto-upload-warmgrey.png"); } } } @media only screen and (max-width : 768px) { footer.no-global .legal { @include box-sizing(content-box); box-shadow: 0 2px 2px -1px #D3D3D3 inset; padding-top: 10px; margin-left: -10px; padding-left: 10px; padding-right: 10px; } #livechat-eye-catcher { display: block; } } // end @media only screen and (max-width : 768px) @media only screen and (min-width : 768px) { footer.global .inline li { display: inline; float: left; } } // @media only screen and (min-width : 768px) @media only screen and (min-width : 769px) { footer.global .top-link { display: none; } footer.global .footer-b h2 a i { font-style: normal; display: inline; } } //@media only screen and (min-width : 769px) @media only screen and (min-width: 984px) { footer.global .legal { width: 984px; /* 980px / 15px (baseline font); 980px + (60px x 2) = 1100px */ } footer.global { padding: 30px 0 20px; .legal { background: url("#{$asset-path}logos/logo-ubuntu-grey.png") 100% 0 no-repeat; } .footer-a { display: block; } } } // end @media only screen and (min-width: 984px) maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/patterns/_forms.scss0000644000000000000000000000107113056115004025417 0ustar 00000000000000@charset 'UTF-8'; form { input, select, textarea { @include box-sizing(); width: 100%; } .fieldset-submit ul { margin-bottom: 0; } fieldset { .mktError, .errMsg, .reqMark { color: $error; } .mktFormMsg { clear: both; display: block; } } } @media only screen and (max-width : 768px) { } // end @media only screen and (max-width : 768px) @media only screen and (min-width : 769px) { } // @media only screen and (min-width : 769px) @media only screen and (min-width: 984px) { } // end @media only screen and (min-width: 984px) maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/patterns/_header.scss0000644000000000000000000004637513056115004025541 0ustar 00000000000000@charset "UTF-8"; header.banner { border-top: 0; min-width: 100%; width: auto; background: $ubuntu-orange; display: block; position: relative; z-index: 2; .nav-primary { border: 0; margin: 0 auto; overflow: hidden; ul { border-right: 1px solid lighten($ubuntu-orange, 10%); float: left; margin: 0; position: relative; li { border-left: 1px solid darken($ubuntu-orange, 5%); float: left; list-style-image: none; margin: 0; text-indent: 0; vertical-align: bottom; } li:last-child { border-right: 1px solid darken($ubuntu-orange, 5%); } li a:link, li a:visited { font-size: 14px; border-left: 1px solid lighten($ubuntu-orange, 7%); color: #fff; display: block; margin-bottom: 0; padding: 14px 14px 13px; position: relative; text-align: center; text-decoration: none; -webkit-font-smoothing: subpixel-antialiased; -moz-font-smoothing: subpixel-antialiased; -o-font-smoothing: subpixel-antialiased; font-smoothing: subpixel-antialiased; } a.active { background: #B83A10; border-left: 1px solid lighten($ubuntu-orange, 7%); } li a:hover { background: #e1662f; border-top: 0; @include box-shadow(inset 0 2px 2px -2px #777); } } // end ul } // nav-primary } // end header.banner // Responsive header navigation #main-navigation-link { display: none; } header.banner .nav-toggle { position:absolute; right: 0; display: block; width: 48px; height: 48px; text-indent: -99999px; background-image: url(#{$asset-path}icons/navigation-menu-plain.svg); @include background-size(25px auto); background-repeat: no-repeat; background-position: center center; cursor: pointer; } header.banner .no-script { display: none; } .opera-mini header.banner .nav-toggle, .no-svg header.banner .nav-toggle { background-image: url(#{$asset-path}icons/navigation-menu-plain.png); } header.banner nav ul { background-color: $nav-bg; display: none; float: left; } header.banner .nav-primary.active { @include box-shadow(0 1px 2px 1px rgba(120, 120, 120, 0.2)); padding: 0; border-bottom: 1px solid $nav-border-dark; } header nav ul.active { display: block; } header.banner .nav-primary ul li, header.banner .nav-primary ul li a:link, header.banner .nav-primary ul li a:visited, header.banner .nav-primary ul li a:active { display: block; padding: 0; margin: 0; border: none; } header.banner .nav-primary ul li a:hover { @include box-shadow(none); background-color: $nav-hover-bg; } header.banner .nav-primary ul li a.active { background-color: $nav-active-bg; } header.banner .nav-primary ul li { border-bottom: 1px solid #F2F2F4; font-size: 16px; } header.banner .nav-primary ul li:last-child { border: 0; } header.banner nav.nav-primary ul li a:link, header.banner .nav-primary ul li a:visited, header.banner .nav-primary ul li a:hover, header.banner .nav-primary ul li a:active { padding: 14px 14px 13px; text-align: left; } header.banner nav.nav-primary ul.active li ul { display: none; } #menu.active:after { background-image: url(#{$asset-path}patterns/nav-arrow.svg); background-repeat: no-repeat; background-position: 50% 26px; content: ""; display: block; height: 23px; margin-left: 0; padding-bottom: 17px; position: relative; top: -3px; width: 48px; z-index: 999; } html.no-svg, .opera-mini { #menu.active:after { background-image: url(#{$asset-path}patterns/nav-arrow.png); } } // End responsive navigation /* nav-secondary */ .nav-secondary { border-bottom: 1px solid #dfdcd9; margin-bottom: 0; ul { float: left; margin-bottom: 10px; margin-left: 2px; li { float: left; margin-top: 16px; font-size: 14px; margin-right: 15px; a:link, a:visited { color: #333; font-size: 14px; float: left; } a:hover, a:active { color: $ubuntu-orange; text-decoration: none; } } li, li.active a:link, li.active a:visited { color: $ubuntu-orange; text-decoration: none; } } ul.breadcrumb { margin-left: 20px; li, li a:link, li a:visited { color: $warm-grey; margin-right: 8px; } li.active a:link, li.active a:visited { color: $ubuntu-orange; } } } header.banner h2 { @include font-size (25); display: block; left: 4px; margin-bottom:0; position: relative; text-transform: lowercase; top: 14px; } header.banner h2 a:link, header.banner h2 a:visited, header.banner a { color: #fff; float: left; text-decoration:none; } header.banner { .logo { border-left: 0; float: left; height: 48px; overflow: hidden; } .logo-ubuntu { background: url("#{$asset-path}ubuntu-logo.png") no-repeat scroll 0 10px transparent; font-size: 18px; margin-bottom: 0; position: relative; text-transform: lowercase; float: left; margin: 0; display: inline-block; height: 32px; min-width: 128px; margin-right: -20px; margin-left: 10px; padding: 7px 14px 9px 0; img { margin-right: 8px; position: absolute; left: -999em; } span { float: left; font-size: 23px; font-weight: 300; padding-left: 122px; padding-right: 20px; position: relative; top: 5px; } } .nav-primary.nav-left { .logo-ubuntu { float: right; } } .nav-primary.nav-right { .logo-ubuntu { background-image: url("#{$asset-path}logos/logo-ubuntu-white.svg"); background-size: 107px 25px; float: left; } } } html.no-svg, .opera-mini { header.banner .nav-primary.nav-right .logo-ubuntu { background-image: url("#{$asset-path}logos/logo-ubuntu-white.png"); } } @media only screen and (max-width: 295px) { // this changes the logo to the circle of friends on screens below 295px header.banner { .nav-primary.nav-right .logo-ubuntu, .logo-ubuntu { @include background-size(20px 20px); background: url('#{$asset-path}logos/logo-ubuntu_cof-white_orange-hex.svg') 0 50% no-repeat; min-width: 0; width: 38px; } } header.banner .logo-ubuntu span { padding-left: 38px; } } // end @@media only screen and (max-width: 295px) html.no-svg, .opera-mini { header.banner .logo-ubuntu { background-image: url('#{$asset-path}logos/logo-ubuntu_cof-white_orange-hex.png'); } } @media only screen and (max-width : 768px) { header.banner .nav-primary { @include box-shadow(0 1px 2px 1px rgba(0, 0, 0, 0.2)); } header.banner .nav-primary.active { @include box-shadow(none); padding: 0; } header nav ul.active { float: left; } header nav ul.active li:last-child a:link, header nav ul.active li:last-child a:visited { border-bottom: 0; } header.banner .nav-primary ul { position: relative; width: 100%; } header.banner .nav-primary ul li.active { a:link, a:visited { color: #333; font-weight: 700; } } header.banner .nav-primary ul li, header.banner .nav-primary ul li a:link, header.banner .nav-primary ul li a:visited, header.banner .nav-primary ul li a:active { @include box-sizing(); background: transparent; border: none; display: block; font-weight: 300; margin: 0; padding: 0; width: 100%; } header.banner .nav-primary ul li a:link, header.banner .nav-primary ul li a:visited, header.banner .nav-primary ul li a:hover, header.banner .nav-primary ul li a:active { background-color: $nav-bg; border-bottom: 1px solid $nav-border-dark; color: #333333; font-size: 1em; } header.banner .nav-primary ul li:nth-last-child(-n+2) a:link, header.banner .nav-primary ul li:nth-last-child(-n+2) a:visited { border: 0; } header.banner .nav-primary ul li a:hover { @include box-shadow(none); background: lighten($nav-bg, 3%); } header.banner .nav-primary ul li a.active { background-color: $nav-active-bg; } header.banner nav.nav-primary ul li a:link, header.banner .nav-primary ul li a:visited, header.banner .nav-primary ul li a:hover, header.banner .nav-primary ul li a:active { padding: 8px 10px; text-align: left; } header.banner .nav-primary ul li { @include box-sizing(); background: transparent; border-bottom: 0; border-right: 1px solid $nav-border-dark; float: left; width: 50%; } .nav-secondary { background: #fff; ul.second-level-nav { border-top: 1px solid $nav-border-dark; display: none; margin-bottom: 0; margin-left: 0; padding-bottom: 10px; padding-top: 10px; width: 100%; li { @include box-sizing(); width: 50%; margin: 0; float: left; a, a:link, a:visited { -moz-box-sizing: border-box; -webkit-box-sizing: border-box; box-sizing: border-box; display: block; height: 100%; padding: 10px 10px 10px 20px; width: 100%; } &.active { a, a:link, a:visited { color: #333; font-weight: 700; } } } } /* end .second-level-nav */ ul.third-level-nav { display: none; margin-bottom: 0; width: 100%; padding-bottom: 20px; li { @include box-sizing(); width: 50%; margin: 0; float: left; padding-left: 30px; a, a:link, a:visited { @include box-sizing(); padding: 10px 10px 10px 0; display: block; width: 100%; height: 100%; } &.active { a, a:link, a:visited { color: #333; font-weight: 700; } } &.single-link { width: 100%; } } /* end li */ li:only-child { width: 100%; } } /* end third-level-nav */ ul.breadcrumb { @include box-sizing(); width: 100%; margin-left: 0; margin-bottom: 0; li:first-of-type { border-bottom: 1px solid $nav-border-dark; margin-bottom: -1px; } li { @include box-sizing(); color: #fff; width: 100%; display: block; height: 40px; margin: 0; a, a:link, a:visited { @include box-sizing(); font-size: 16px; width: 100%; color: #333333; display: block; margin-right: 0; text-decoration: none; padding: 8px 10px 0 10px; } &.active { margin-top: 12px; a, a:link, a:visited { color: #333; font-weight: 700; } } &:nth-of-type(2n) { margin-top: 12px; } .after { background-image: url(#{$asset-path}patterns/nav-down-arrow.svg); background-position: center center; background-repeat: no-repeat; background-size: 18px; float: right; height: 18px; margin-right: -5px; margin-top: -6px; padding: 10px; position: relative; right: 0; top: 0; width: 18px; } } li + li { display: none; a:link, a:active, a:visited { padding-left: 20px; } a.after { background-image: none; } } } &.open { ul.breadcrumb li a:after, ul.breadcrumb li a:link:after, ul.breadcrumb li a:visited:after { background-image: url(#{$asset-path}patterns/nav-up-arrow.svg); } ul.breadcrumb li + li a.after { background-image: none; } ul.breadcrumb li .after { background-image: url(#{$asset-path}patterns/nav-up-arrow.svg); margin-top: -7px; } ul.second-level-nav, ul.third-level-nav, ul.breadcrumb li + li { display: block; } } }/* end nav-secondary */ .no-js { .nav-secondary ul.second-level-nav { display: block; } #main-navigation-link { position: absolute; right: 10px; top: 12px; width: 20px; height: 28px; z-index: 999; text-indent: -999em; display: block; a { background-image: url("#{$asset-path}icons/navigation-menu-plain.svg"); background-position: center center; background-repeat: no-repeat; background-size: 25px auto; display: block; width: 28px; height: 28px; position: absolute; } } } html.no-svg, .opera-mini { .nav-secondary ul.breadcrumb { li .after { background-image: url(#{$asset-path}patterns/nav-down-arrow.png); } &.open { ul.breadcrumb li a:after, ul.breadcrumb li a:link:after, ul.breadcrumb li a:visited:after { background-image: url(#{$asset-path}patterns/nav-up-arrow.png); } ul.breadcrumb li .after{ background-image: url(#{$asset-path}patterns/nav-up-arrow.png); } } } header.banner .nav-primary #google-appliance-search-form button[type="submit"] { background-image: url("#{$asset-path}search-black.png"); } } html.no-svg, .opera-mini { .nav-secondary ul.breadcrumb { li .after { background-image: url(#{$asset-path}patterns/nav-down-arrow.png); } &.open { ul.breadcrumb li a:after, ul.breadcrumb li a:link:after, ul.breadcrumb li a:visited:after { background-image: url(#{$asset-path}patterns/nav-up-arrow.png); } ul.breadcrumb li .after{ background-image: url(#{$asset-path}patterns/nav-up-arrow.png); } } } header.banner .nav-primary #google-appliance-search-form button[type="submit"] { background-image: url("#{$asset-path}search-black.png"); } } header.banner { .nav-toggle { background-image: url("#{$asset-path}icons/navigation-menu-plain.svg"); background-position: center center; background-repeat: no-repeat; background-size: 25px auto; cursor: pointer; display: block; height: 48px; position: absolute; right: 0; text-indent: -99999px; width: 48px; } } html.no-svg, .opera-mini { header.banner .nav-toggle { background-image: url("#{$asset-path}icons/navigation-menu-plain.png"); } } } @media only screen and (min-width : 768px) { header.banner .nav-primary ul li { border-bottom: 0; } } @media only screen and (min-width : 769px) { header.banner { -moz-box-shadow: 0 2px 2px -2px #777777 inset, 2px 1px #FFFFFF; -webkit-box-shadow: 0 2px 2px -2px #777777 inset, 2px 1px #FFFFFF; box-shadow: 0 2px 2px -2px #777777 inset, 2px 1px #FFFFFF; nav.nav-primary { -moz-box-shadow: none; -webkit-box-shadow: none; box-shadow: none; border-bottom: 0; } } header.banner .nav-toggle { display: none; } header.banner nav ul { background-color: transparent; display: block; } header.banner .nav-primary ul li { border-left: 1px solid #C64012; } header.banner .nav-primary ul li a:active, header.banner .nav-primary ul li a:hover, header.banner .nav-primary ul li a:visited, header.banner nav.nav-primary ul li a:link { border-left: 1px solid #EC5B29; } header.banner .nav-primary ul li:last-child { border-right: 1px solid #C64012; border-left: 1px solid #C64012; } header.banner .nav-primary ul li a.active { background-color: #B83A10; } header.banner .nav-primary ul li a:hover { background-color: #E1662F; } .nav-secondary { ul:last-child li:last-child { padding-bottom: 10px; } ul.breadcrumb, ul.second-level-nav, ul.third-level-nav { li { margin-right: 15px; } } ul.breadcrumb { float: left; li { margin-bottom: 10px; } } ul { float: none; margin-bottom: 0; } ul li { margin-bottom: 5px; } } } @media only screen and (min-width: 984px) { header.banner { margin-bottom: 20px; } header.banner nav.nav-primary ul { display: block; } header.banner .nav-primary, #nav-global .nav-global-wrapper { width: 984px; /* 980px / 15px (baseline font); 980px + (60px x 2) = 1100px */ } header.banner .nav-primary.nav-right .logo-ubuntu { margin-left: 0; } } /* * Main navigation dropdown styles */ header.banner .nav-primary ul { position: static; } header.banner .nav-primary li ul { @include box-shadow(0 2px 2px -1px #777777); @include rounded-corners(10px); background: #f7f7f7; border: 1px solid #d5d5d5; display: none; float: none; margin: 0; padding: 5px 0; position: absolute; top: 51px; width: 200px; } // the new arrow that appears if there is secondary nav when you hover over the main nav header.banner .nav-primary li:hover ul:after { background: url("#{$asset-path}patterns/arrow-up-smaller.png") no-repeat; content: ''; display: block; height: 8px; left: 20px; position: relative; top: -13px; width: 200px; z-index: 999; } // show secondary nav differently if :after isn't supported, remove arrow and move secondary nav up to meet the bottom of the navbar .no-generatedcontent header.banner .nav-primary li ul { @include rounded-corners(0 0 10px 10px); top: 48px; } // the old arrow that appears if there is secondary nav when you hover over the main nav header.banner .nav-primary li ul .arrow-up { display: none; } header.banner .nav-primary li ul li { border: 0; float: none; } header.banner .nav-primary li ul li a:link, header.banner .nav-primary li ul li a:visited { border: 0; color: #333333; padding: 0 0 11px 14px; text-align: left; width: 170px; } header.banner .nav-primary li ul li a:hover { background: none repeat scroll 0 0 transparent; @include box-shadow(none); color: #DD4814; } header.banner .nav-primary li ul li.first a:link, header.banner .nav-primary li ul li.first a:visited, header.banner .nav-primary li ul li:first-of-type a:link { padding: 10px 14px; } header.banner .nav-primary li ul li.active a:link, header.banner .nav-primary li ul li.active a:visited { background: none repeat scroll 0 0 transparent !important; } header.banner .nav-primary li ul .promo { border-top: 1px solid #D5D5D5; float: left; margin-top: 5px; padding: 15px 0 0; } header.banner .nav-primary li ul .promo a:link, header.banner .nav-primary li ul .promo a:visited { background: none repeat scroll 0 0 transparent; border-left: 0 none; color: #333333; height: auto; padding: 0; text-align: left; } header.banner .nav-primary li ul .promo p { margin: 0 10px; } header.banner .nav-primary li ul .promo a:hover { box-shadow: none; color: #DD4814; } header.banner .nav-primary li ul .promo img { margin-top: 14px; margin-bottom: -6px; @include rounded-corners(0 0 10px 10px); position:relative; top:1px; } header.banner .nav-primary li ul .promo .category { color: $warm_grey; font-size: 11px; margin: 0 10px; text-transform: uppercase; } header.banner .nav-primary li:hover ul { display: block; } html.lt-ie8 header.banner .nav-primary li:hover ul { display: none; } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/patterns/_helpers.scss0000644000000000000000000000666413056115004025750 0ustar 00000000000000@charset 'UTF-8'; .left { float: left; } .right { float: right; } // additional helper class for shouty text .caps { text-transform: uppercase; } img { border: 0 none; height: auto; max-width: 100%; &.left { margin-right: 0; } &.touch-border { margin-bottom: -3px; } } .accessibility-aid, .off-left { position: absolute; left: -999em; } a.external { @include background-size(.7em .7em); padding-right: .9em; background-image: url('#{$asset-path}external-link-orange.svg'); background-position: right 1px; background-repeat: no-repeat; } .opera-mini a.external, .no-svg a.external { background-image: url('#{$asset-path}external-link-orange.png'); } .text-center, .align-center { text-align: center; } .no-margin { margin: 0; } .no-margin-bottom { margin-bottom: 0; } .no-padding-bottom { padding-bottom: 0; } .pull-left-20 { margin-left: -20px; } .pull-right-20 { margin-right: -20px; } .pull-left-40 { margin-left: -40px; } .pull-right-40 { margin-right: -41px; } .no-border { border: 0; } .link-top { @include font-size (14); clear: both; margin-bottom: 40px; margin-top: -40px; a { background: #fff; margin-right: 10px; margin-top: -17px; padding: 5px; float: right; } } .pull-bottom-right { position: absolute; right: 0; bottom: 0; left: auto; } .box .pull-bottom-right { @include rounded-corners(0 0 4px 0); } .pull-bottom-left { margin-left: -20px; margin-bottom: -21px; } .pull-top-right { margin-left: -20px; margin-top: -21px; } div.box-image-centered span img, div.row-image-centered span img, div.row.row-image-centered span img, img { &.priority-0 { position: absolute; left: -999em; } } .priority-0, .not-for-small { position: absolute; left: -999em; } // responsive video solution .video-container { position: relative; padding-bottom: 56.25%; padding-top: 30px; height: 0; overflow: hidden; iframe { position: absolute; top: 0; left: 0; width: 100%; height: 100%; } & + h3, & + .video-title { margin-top: 20px; } } @media only screen and (max-width : 768px) { .pull-right-40 { margin-right: -30px; } .pull-bottom-right, .pull-bottom-left { position: static; } img.pull-bottom-left { margin-bottom: 0; margin-left: 0; } } // end @media only screen and (max-width : 768px) @media only screen and (min-width : 768px) { div.box-image-centered span img, div.row-image-centered span img, div.row.row-image-centered span img, img { &.priority-0 { position: relative; left: auto; } } .priority-0, .not-for-small { position: relative; left: auto; } .for-mobile, .for-small { position: absolute; left: -999em; } .pull-right { float: right; margin-right: -30px; } img.pull-left { margin-left: -30px; } img.touch-border { float: left; margin-bottom: -30px; } } // end @media only screen and (min-width : 768px) @media only screen and (min-width : 769px) { img.left { margin-right: 20px; } } //@media only screen and (min-width : 769px) @media only screen and (min-width: 984px) { img.touch-border { float: left; margin-bottom: -40px; } img.pull-left { margin-left: -40px; } .pull-right { float: right; margin-right: -40px; } .for-tablet, .for-medium { display: none; } .no-border { border: 0; } } // end @media only screen and (min-width: 984px) maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/patterns/_image-centered.scss0000644000000000000000000000626313056115004027152 0ustar 00000000000000@charset 'UTF-8'; .row.row-image-centered, div.box-image-centered, div.row-image-centered, div.row.row-image-centered { padding: 20px 10px 0; } .row-box.row-image-centered { padding-top: 20px; padding-bottom: 20px; } .row.row-image-centered { padding-top: 40px; padding-bottom: 40px; } .row-hero.row-image-centered { padding-top: 0; } div.row-image-centered, div.box-image-centered, div.row.row-image-centered { //display: block; div, span { //display: block; float: none; } span { width: 100%; img { height: auto; max-width: 100%; display: block; padding: 0; margin: 0 auto; margin-bottom: 20px; } } p, h2, h3 { float: none; } } @media only screen and (min-width : 768px) { div.row-image-centered, div.row.row-image-centered, div.box-image-centered { padding-bottom: 20px; display: table; div { float: none; display: table-cell; position: relative; p, h2, h3 { display: block; width: 100%; float: left; } + span img { // if image is on the right hand side padding-right: 0; margin-bottom: 20px; } } span { display: table-cell; float: none; position: relative; text-align: center; top: 0; vertical-align: middle; width: auto; img { padding-right: 20px; // if image is on the left hand side } } } /* alternative to row-image-centered requires equal-height class on row add align-vertically to the div containing the image http://caniuse.com/transforms2d */ .js .align-vertically { -moz-transform-style: preserve-3d; -webkit-transform-style: preserve-3d; transform-style: preserve-3d; img, div { -ms-transform: translateY(-50%); // for IE9 -webkit-transform: translateY(-50%); position: relative; top: 50%; transform: translateY(-50%); } } } //@media only screen and (min-width : 768px) div.box-image-centered { padding-top: 20px; } @media only screen and (min-width : 768px) { .row.row-image-centered, div.box-image-centered, div.row-image-centered, div.row.row-image-centered { padding: 30px; } div.box-image-centered div + span img, div.row-image-centered div + span img, div.row.row-image-centered div + span img, div.box-image-centered span img.priority-0, div.row-image-centered span img.priority-0, div.row.row-image-centered span img.priority-0 { margin-right: auto; display: table-cell; margin-bottom: 0; } } // @media only screen and (min-width : 768px) @media only screen and (min-width: 984px) { .row.row-image-centered, div.row-image-centered, div.row.row-image-centered, div.box-image-centered { padding: 60px 40px 60px; display: table; div { float: none; display: table-cell; position: relative; p, h2, h3 { display: block; width: 100%; float: left; } + span img { // if image is on the right hand side padding-right: 0; //margin-bottom: 20px; } } span { display: table-cell; float: none; position: relative; text-align: center; top: 0; vertical-align: middle; width: auto; img { padding-right: 20px; // if image is on the left hand side } } } } // end @media only screen and (min-width: 984px) maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/patterns/_inline-logos.scss0000644000000000000000000000256613056115004026702 0ustar 00000000000000@charset 'UTF-8'; ul.inline-logos { float: left; margin-left: 0; padding: 0; text-align: center; width: 100%; li { clear: none; display: inline-block; float: none; margin: 10px 20px; padding: 0; &.clear-row { clear: left; } &.last-item { border: 0; } } img { @include transition; vertical-align: middle; max-width: 115px; max-height: 32px; } } .inline-icons { margin: 0 0 $gutter-width; li { margin-right: 20px; margin-bottom: 20px; text-align: left; display: inline-block; &.last-item { margin-right: 0; } } &.no-margin-bottom li { margin-bottom: 0; } img { vertical-align: middle; max-width: 115px; max-height: 32px; } } @media only screen and (max-width : 768px) { ul.inline-logos { img { max-width: 172px; max-height: 48px; } } } // end @media only screen and (max-width : 768px) @media only screen and (min-width : 769px) { ul.inline-logos { li { clear: none; display: inline-block; height: auto; margin: 20px 0; line-height: 60px; padding: 0 40px; img { float: none; vertical-align: middle; max-width: 200px; max-height: 45px; } } } } //@media only screen and (min-width : 769px) @media only screen and (min-width: 984px) { .inline-icons { text-align: left; margin-bottom: 20px; } } // end @media only screen and (min-width: 984px) maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/patterns/_lists.scss0000644000000000000000000000450713056115004025436 0ustar 00000000000000@charset 'UTF-8'; .list, .list-ubuntu, .list-canonical { list-style: none; margin-left: 0; li { border-bottom: 1px dotted $warm-grey; margin-bottom: 0; padding: 10px 0; } li:last-of-type, li.last-item { border: 0; padding-bottom: 0; } } .list article { border-bottom: 1px dotted $warm-grey; margin-bottom: 0; padding: 10px 0; } .list-spaced article, .list-spaced li { padding: 30px 0; } nav .list a { display: block; } .list-ubuntu li, .list-canonical li { background-repeat: no-repeat; background-position: 0 1em; padding-left: 25px; } .list-ubuntu li { background-image: url('#{$asset-path}patterns/tick-orange.svg'); } .list-canonical li { background-image: url('#{$asset-path}patterns/tick-midaubergine.svg'); } .list-warm li { background-image: url('#{$asset-path}patterns/tick-warmgrey.svg'); } .list-dark li { background-image: url('#{$asset-path}patterns/tick-darkaubergine.svg'); } .vertical-divider .list-canonical li, .vertical-divider .list-ubuntu li { padding-left: 25px; } html.no-svg, .opera-mini { .list-ubuntu li { background-image: url('#{$asset-path}patterns/tick-orange.png'); } .list-canonical li { background-image: url('#{$asset-path}patterns/tick-midaubergine.png'); } .list-warm li { background-image: url('#{$asset-path}patterns/tick-warmgrey.png'); } .list-dark li { background-image: url('#{$asset-path}patterns/tick-darkaubergine.png'); } } .no-bullets { list-style: none; margin-left: 0; } .row .combined-list { ul, div { margin-bottom: 0; } li.last-item { border-bottom: 1px dotted $warm-grey; padding-bottom: 10px; } div.last-col, ul.last-col { margin-bottom: 20px; li.last-item { border-bottom: 0; padding-bottom: 0; } } } .inline { margin-left: 0; li { display: inline; list-style: none; margin-left: 0; float: left; } } @media only screen and (min-width : 768px) { .row .combined-list { ul, div { margin-bottom: 20px; } li.last-item { border-bottom: 0; padding-bottom: 0; } } } // end @media only screen and (max-width : 768px) @media only screen and (min-width : 769px) { } //@media only screen and (min-width : 769px) @media only screen and (min-width: 984px) { } // end @media only screen and (min-width: 984px) maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/patterns/_notifications.scss0000644000000000000000000000067513056115004027153 0ustar 00000000000000@charset 'UTF-8'; div.warning { @include rounded-corners(4px); background-color: #fdffdc; color: $cool-grey; p { padding: 0; margin: 0; } } @media only screen and (max-width : 768px) { } // end @media only screen and (max-width : 768px) @media only screen and (min-width : 769px) { } //@media only screen and (min-width : 769px) @media only screen and (min-width: 984px) { } // end @media only screen and (min-width: 984px) maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/patterns/_rows.scss0000644000000000000000000000617213056115004025272 0ustar 00000000000000@charset 'UTF-8'; // rows sass // contents: // row // row-hero // row-background // strips // row-aux // row-step // row // ---------------------------------------------------------- .row { @include box-sizing; border-bottom: 1px dotted $warm-grey; clear: both; padding: 20px 10px 0; position: relative; br { display: none; } &.no-padding-bottom { padding-bottom: 0 !important; } } .row-grey { background: $light-grey; } .no-border { border: 0; } // row-hero // ---------------------------------------------------------- #main-content .row-hero { margin-top: 20px; padding-top: 0; } // row-background // ---------------------------------------------------------- .row-background { color: #fff; background: url("#{$asset-path}backgrounds/image-background-wallpaper.jpg") no-repeat scroll 50% 50% #4b1827; a.alternate { color: #fff; text-decoration: underline; } a.alternate:hover { color: rgba(255, 255, 255, .6); } @media only screen and (min-width : 768px) { background-position: center 50%; background-size: 100% auto; } } // strips // ---------------------------------------------------------- .strip { width: 100%; display: block; } .strip-dark { background-color: $dark-aubergine; background-image: url("#{$asset-path}backgrounds/background-grid.png"); background-repeat: repeat; color: #fff; .list-ubuntu li { border: 0; } .resource { color: #333; -moz-box-shadow: none; -webkit-box-shadow: none; box-shadow: none; } .resource:before { border-right-color: #2c001e; } } #main-content .strip-dark { .resource:before { border-bottom-width: 29px; right: -2px; top: -1px; } .resource:hover:before { border-bottom-width: 34px; } .resource h2 { padding-right: 20px; } } // row-aux // ---------------------------------------------------------- .row-aux { background-color: rgba(255, 255, 255, .6); text-align: center; h2, p { text-align: left; } a p { color: #333; margin-bottom: 30px; } } // row-step // ---------------------------------------------------------- .row-step { h2 { position: relative; top: 5px; } .step { position: relative; top: -5px; height: 32px; width: 32px; border-radius: 50%; border: 3px solid $ubuntu-orange; color: $ubuntu-orange; line-height: 32px; text-align: center; background-color: #fff; font-size: 23px; font-weight: 400; } } @media only screen and (max-width : 768px) { } // end @media only screen and (max-width : 768px) @media only screen and (min-width : 768px) { .row { padding: 30px; } #main-content .row-hero { margin-top: 40px; } } // end @media only screen and (max-width : 768px) @media only screen and (min-width : 769px) { .row-step .step { height: 42px; width: 42px; line-height: 42px; } .row br { display: block; } } //@media only screen and (min-width : 769px) @media only screen and (min-width: 984px) { .row br { display: block; } .row { padding: 60px 40px 40px; } .no-border { border: 0; } } // end @media only screen and (min-width: 984px) maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/patterns/_search.scss0000644000000000000000000002167713056115004025554 0ustar 00000000000000@charset "UTF-8"; .header-search, #box-search { padding: 7px 0 7px 14px; overflow: hidden; input[type="search"], input[type="text"] { -webkit-appearance: none; @include box-shadow(inset 0 1px 4px rgba(0,0,0,0.2)); @include box-sizing(); @include rounded-corners(4px); @include transition; background-color: #be3d00; border: none; color: #fff; display: block; float: left; font-size: 16px; height: 2.1em; margin-bottom: 0; padding: 0.5em 2.5em 0.5em 0.5em; width: 100%; } // User agents are required to ignore a rule with an unknown selector. i.e: a group of selectors containing an invalid selector is invalid. // So we need separate placeholder rules for each browser. Otherwise the whole group would be ignored by all browsers. ::-webkit-input-placeholder { color: white; opacity: 0.4; } ::-webkit-input-placeholder { color: white; opacity: 0.4; } ::-moz-placeholder { color: white; opacity: 0.4; } :-ms-input-placeholder { color: white; opacity: 0.4; } input:-moz-placeholder { color: white; opacity: 0.4; } ::placeholder { color: white; opacity: 0.4; } input[type="search"]:focus { background-color: #a63603; } button[type=submit] { padding: 3px 2px; line-height: 0; float: left; margin-left: -40px; display: block; background: none; overflow: visible; &:hover { background: none; } img { height: 28px; width: 28px; } } } header.banner .search-toggle { @include background-size(20px 20px); background-image: url('#{$asset-path}search_icon_white_64.png'); background-image: url('#{$asset-path}search.svg'); background-position: center center; background-repeat: no-repeat; display: block; height: 48px; outline: none; overflow: hidden; position: absolute; right: 58px; text-indent: -999em; top: 0; width: 24px; } .search-toggle:link, .search-toggle:active { outline: none; } #box-search, .header-search { background: #f0f0f0; border: 0; display: none; float: left; margin-bottom: 0; position: relative; margin: 0 0 -1px 0; padding: 0; width: 100%; z-index: 3; } #box-search.active, .header-search.active, .header-search.open { display: block; } #box-search div, .header-search div { @include box-shadow( inset 0 -4px 4px -4px rgba(0, 0, 0, .3), inset 0 5px 5px -5px rgba(0, 0, 0, 0.3)); background: $nav-bg; margin: 10px; position: relative; z-index: 1; } #box-search form input[type="search"], .header-search form input[type="search"] { @include font-size (16); @include rounded-corners(); @include box-shadow(0 2px 2px rgba(0, 0, 0, 0.3) inset, 0 -1px 3px rgba(0, 0, 0, 0.2) inset, 0 2px 0 rgba(255, 255, 255, 0.4)); @include box-sizing(); background: #fff; border: 0; color: #333; font-size: 16px; height: auto; margin: 0; float: left; padding: 9px 10px; width: 100%; } .yes-js .header-inner #box-search, .yes-js .header-inner .header-search { display: none; form { @include box-sizing(); margin-left: 0; margin-right: 0; overflow: hidden; padding: 10px; top: 0; z-index: 999; position: relative; width: 100%; } } @media only screen and (max-width : 768px) { header.banner { .search-toggle { right: 48px; } } html.no-svg, .opera-mini { .search-toggle { background-image: url("#{$asset-path}search-white.png"); } } } // @media only screen and (max-width : 768px) @media only screen and (min-width : 768px) { header.banner .search-toggle { display: none; } } // @media only screen and (min-width : 768px) @media only screen and (min-width : 960px) { #box-search, .header-search { background: none; overflow:hidden; padding: 7px 0 7px 14px; border-right: 0 none; float: right; margin-bottom: 0; padding-bottom: 5px; padding-right: 0; padding-top: 7px; max-width: 220px; form input[type="text"], form input[type="search"] { @include box-shadow(0 2px 4px rgba(0, 0, 0, 0.4) inset); @include box-sizing(content-box); background: url("#{$asset-path}icons/icon-search.png") no-repeat scroll 5px center, none repeat scroll 0 0 #BE3D00; border: 6px solid #DE6532; border-width: 0 0 1px; color: #fff; font-size: 0.813em; height: 24px; margin-bottom: 0; padding: 4px 4px 4px 30px; transition: all 0.5s ease 0s; width: 86px; } } } // @media only screen and (min-width : 769px) @media only screen and (max-width : 960px) { header.banner nav.nav-primary { .header-search { padding: 0; position: relative; top: 0; width: 100%; input[type="search"] { border-radius: 0; background: $light_grey; color: #333; } button[type="submit"] { width: 32px; height: 38px; background: url('#{$asset-path}search-black.svg') no-repeat scroll center center transparent; background-size: 28px 28px; img { max-width: none; display: none; } } } .header-search.open { display: block; } } header.banner { .search-toggle { background-image: url("#{$asset-path}search-white.svg"); background-position: center center; background-repeat: no-repeat; background-size: 25px auto; cursor: pointer; right: 0; display: block; height: 48px; position: absolute; text-indent: -99999px; width: 48px; } } html.no-svg, .opera-mini { header.banner .search-toggle { background-image: url("#{$asset-path}img/search-white.png"); } } .opera-mini { x:-o-prefocus, header.banner .search-toggle { -o-background-size: 25px auto; background-size: 25px auto; } } } @media only screen and (min-width: 984px) { #box-search, .header-search { display: block; margin-right: 0; input[type="search"], input[type="text"] { } form input[type="text"]:focus { width: 160px; } } } // @media only screen and (min-width: 984px) @media only screen and (max-width : 768px) { header.banner .search-toggle { right: 48px; } } // @media only screen and (min-width : 768px) // search results body.ubuntu-search, body.search-results, body.search-no-results { .nav-secondary { display: none; } section > h1, section article h1 { padding-bottom: 10px; font-size: 1.438em; margin-bottom: 0; } section > h1 { border-bottom: 1px dotted #dfdcd9; } .main-search { padding: 20px 0; margin: 0 0 20px 0; background-color: transparent; input[type="search"] { float: left; width: 100%; font-size: 2em; border: 1px solid #999; -moz-box-sizing: border-box; box-sizing: border-box; padding: 0.2em 65px 0.2em 0.2em; } button[type=submit] { padding: 4px; line-height: 0; float: left; margin-left: -53px; display: block; background: none; overflow: visible; width: auto; margin-top: -4px; &:hover { background: none; } img { height: 45px; width: 45px; } } } .search-result h1 .title-main { margin-right: 20px; } .search-result h1 .result-url { color: #999; overflow: hidden; text-overflow: ellipsis; display: block; vertical-align: bottom; padding-bottom: 2px; } .search-result h1 .result-url a { color: #999; } .search-result p { margin-bottom: 0; } .num-results { display: inline-block; margin-left: 20px; } .bottom-results-total { text-align: center; width: 100%; overflow: visible; padding-top: 20px; margin: 0; } .bottom-nav { overflow: hidden; margin-top: -26px; } .bottom-nav ul { margin-bottom: 0; margin-left: 0; padding: 0; overflow: hidden; } .bottom-nav li { float: left; margin-left: 15px; } .bottom-nav li:first-child { margin-left: 0; } .nav-back { float: left; } .nav-back li:before { content: "\2039"; /* left chevron ‹ */ color: $ubuntu_orange; margin-right: 5px; } .nav-back li.item-extreme:before { content: "\2039\2039"; /* double left chevron ‹ */ } .nav-forward { float: right; } .nav-forward li:after { content: "\203A"; /* right chevron › */ color: $ubuntu_orange; margin-left: 5px; } .nav-forward li.item-extreme:after { content: "\203A\203A"; /* double right chevron › */ } .error-notification { background-color: #fdffdc; color: #333; padding: 20px; -moz-box-sizing: border-box; box-sizing: border-box; width: 100%; margin-top: 20px; display: block; } .result-line { color: #ada69e; } .results-top { border-bottom: 1px dotted #dfdcd9; padding-bottom: 0.5em; } .search-container { padding-bottom: 0; } } @media only screen and (min-width : 768px) { .ubuntu-search { .main-search { button[type=submit] { margin-left: -60px; margin-top: 0; } } } } /* end @media only screen and (min-width : 768px) */ maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/patterns/_slider.scss0000644000000000000000000000311013056115004025547 0ustar 00000000000000@charset 'UTF-8'; .slider { @include rounded-corners(4px); background: $light-grey; padding-top: $gutter-width * 2; .slide-window { overflow: hidden; position: relative; height: 450px; -moz-transition: left 1s; -webkit-transition: left 1s; -o-transition: left 1s; transition: left 1s; } .slide-container { position: absolute; width: (700 * 4)px; -moz-transition: left 1s; -webkit-transition: left 1s; -o-transition: left 1s; transition: left 1s; left: 0; } .slider-dots { ul { position: absolute; top: 550px; left: 220px; z-index: 5; } li { background-position: 0 -8px; background: url('#{$asset-path}patterns/sprite-pager.png') no-repeat; float: left; height: 7px; list-style-type: none; margin-right: .75em; text-indent: -9999em; width: 7px; &.active { background-position: 0 0; } } a { display: block; outline: 0; } } .slide { float: right; width: 700px; h3 { margin-top: 65px; display: inline-block; } p { width: 350px; } } .arrow-prev, .arrow-next { font-size: 5em; margin-top: 150px; display: block; color: $warm-grey; outline: 0; } .arrow-prev:hover, .arrow-next:hover { text-decoration: none; color: $cool-grey; } .arrow-prev:active, .arrow-next:active { padding-top: 1px; text-decoration: none; } .arrow-prev:focus, .arrow-next:focus { text-decoration: none; } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/patterns/_structure.scss0000644000000000000000000000240613056115004026334 0ustar 00000000000000@charset 'UTF-8'; .wrapper, header.banner .nav-primary, nav div.footer-a div, .inline-lists ul, .legal { @include box-sizing(); width: auto; } .inner-wrapper { @include box-sizing(); background: #fff; clear: both; display: block; float: left; width: 100%; margin: 0; padding-bottom: 20px; position: relative; z-index: 1; } @media only screen and (min-width : 768px) { .med-six-col { .three-col { width: 48%; } .three-col:nth-of-type(2n) { margin-right: 0; } } } // end @media only screen and (max-width : 768px) @media only screen and (min-width : 769px) { .inner-wrapper { border-radius: 4px; padding-bottom: 20px; } } //@media only screen and (min-width : 769px) @media only screen and (min-width: 984px) { .wrapper { @include box-sizing; background: #fff; margin: 0 auto; position: relative; text-align: left; width: 984px; } .inner-wrapper { @include box-shadow(0 0 3px #c9c9c9); margin: 10px 0 30px; } .three-col, .med-six-col .three-col { width: 23.30%; } .three-col.last-col:nth-of-type(2n) { margin-right: 0; } .med-six-col { .three-col:nth-of-type(2n) { margin-right: 20px; } .three-col.last-col { margin-right: 0; } } } // end @media only screen and (min-width: 984px) maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/patterns/_tabbed-content.scss0000644000000000000000000000722213056115004027166 0ustar 00000000000000@charset 'UTF-8'; html.js .tabbed-content .accordion-button { @include box-sizing; height: auto; padding-bottom: .6em; padding-right: 20px; } html.yui3-js-enabled { .tabbed-menu { display: none; padding-bottom: 20px; padding-top: 20px; } .arrow { display: none; position: absolute; visibility: hidden; } .tabbed-content { @include rounded-corners(4px); padding: 8px 8px 0; background: $light-grey; margin-bottom: 8px; &.hide { display: block; opacity: 1 !important; } .title { display: none; } div { display: none; } .accordion-button { @include box-sizing; background: url("#{$asset-path}icons/icon-arrow-down.svg") no-repeat scroll right 3px $light-grey; color: $cool-grey; display: block; font-size: 16px; padding-bottom: .6em; padding-right: 20px; width: 100%; } &.open { .accordion-button { background-image: url("#{$asset-path}icons/icon-arrow-up.svg"); margin-bottom: 10px; } div { display: block; } } } html.yui3-js-enabled.opera-mini .tabbed-content, html.yui3-js-enabled.no-svg .tabbed-content { .accordion-button { background-image: url("#{$asset-path}icons/icon-arrow-right.png"); } &.open { .accordion-button { background-image: url("#{$asset-path}icons/icon-arrow-up.png"); } } } html.yui3-js-enabled.opera-mini.tabbed-content { .accordion-button { background-image: none; margin-bottom: 10px; } div { display: block; } } @media only screen and (min-width : 768px) { .tabbed-menu { display: block; } .tabbed-content { margin-bottom: 20px; padding: 40px; &.hide { display: none; opacity: 0 !important; } .title { display: block; } div { display: block; } .vertical-divider div { display: table-cell; } .accordion-button { display: none; } } } .accordion-button.active { background-color: transparent; } } @media only screen and (min-width : 768px) { .tabbed-menu { @include box-shadow(0 -1px 10px #cfcfcf inset); @include box-sizing; @include rounded-corners(4px 4px 0 0); background: none repeat scroll 0 0 $light-grey; padding-bottom: $gutter-width; padding-top: $gutter-width; position: relative; ul { @include box-sizing; display: table; margin-bottom: 0; padding: 0; position: relative; table-layout: fixed; width: 100%; } li { text-align: center; display: table-cell; } a { color: #666; display: block; outline: none; .active { color: $canonical-aubergine; text-decoration: none; } } a:hover { text-decoration: none; } .arrow { bottom: 0; position: absolute; } } .tabbed-content { @include clearfix(); padding: $gutter-width $gutter-width*2 0; .row { padding-left: 0; padding-right: 0; } } .tabbed-content .main-content { padding-bottom: 40px; } html.yui3-js-enabled .tabbed-content.hide { display: none; opacity: 0; } .tabbed-content-bg { @include box-sizing; background: #fff; margin-left: 20px; margin-right: 20px; } .tabbed-content-bg .row-box { padding-left: 0; padding-right: 0; } html.yui3-js-enabled .arrow { visibility: visible; } } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/patterns/_tooltips.scss0000644000000000000000000000341313056115004026150 0ustar 00000000000000@charset 'UTF-8'; .yui3-tooltip-hidden { display: none; } .yui3-tooltip-content { @include box-shadow(0 2px 8px hsla(0, 0%, 0%, .20)); background: url("#{$asset-path}patterns/grey-textured-background.jpg") repeat scroll 0 0 transparent; -moz-border-radius: 4px; -webkit-border-radius: 4px; border-radius: 4px; border: 1px solid #e3e3e3; color: $cool-grey; margin-top: -30px; max-width: 520px; position: relative; } .yui3-tooltip .yui3-widget-bd { padding: 20px; width: 320px; * { max-width: 100%; } h5 { margin-bottom: 10px; font-size: 22px; font-weight: 300; } img { float: left; margin-right: 10px; } q { border-bottom: 1px dotted $warm-grey; border-top: 1px dotted $warm-grey; display: block; font-size: 16px; font-style: italic; margin-bottom: 0; margin-top: 20px; padding: 10px 0; } p:last-child { margin-bottom: 0; } } .yui3-tooltip .yui3-widget-ft, .yui3-tooltip .yui3-widget-ft div { position: absolute; width: 0; height: 0; border-style: solid; line-height: 0; font-size: 0; } .yui3-tooltip .yui3-tooltip-align-bottom .yui3-widget-ft, .yui3-tooltip .yui3-tooltip-align-bottom .yui3-widget-ft div { top: -10px; left: 50%; margin: 0 0 0 -10px; border-width: 0 10px 10px; border-color: $box-solid-grey transparent; } .yui3-tooltip .yui3-tooltip-align-bottom .yui3-widget-ft div { top: 0; border-color: $box-solid-grey transparent; } .tooltip-label { @include box-shadow(3px 3px 6px rgba(0, 0, 0, .3)); @include rounded-corners(4px); background: #fff; border: 1px solid #dfdcd9; display: none; font-size: 13px; line-height: 1; margin: 0; padding: 6px 5px; position: absolute; top: -20px; white-space: nowrap; z-index: 1000; } maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/patterns/_typography.scss0000644000000000000000000000211513056115004026477 0ustar 00000000000000@charset 'UTF-8'; .caps-centered, .muted-heading { font-size: .875em; margin-bottom: 20px; text-align: center; text-transform: uppercase; } p.intro { @include font-size (16); line-height: 1.4; } .row div { p:last-child, ul:last-child { margin-bottom: 0; } } .four-col p:last-child { margin-bottom: 0; } .note { color: $warm-grey; font-size: .813em; } @media only screen and (min-width : 768px) { p.intro { font-size: 1.13333em; } } // end @media only screen and (max-width : 768px) @media only screen and (min-width : 769px) { } //@media only screen and (min-width : 769px) @media only screen and (min-width: 984px) { h1 { font-size: 2.8125em; } h2 { font-size: 2em; margin-bottom: .375em; } h3 { font-size: 1.438em; margin-bottom: .522em; } h4 { font-size: 1em; margin-bottom: .75em; } h5 { font-size: 1em; } p, li, code, pre { font-size: 16px; line-height: 1.5; margin-bottom: .75em; } p.intro { font-size: 1.25em; } } // end @media only screen and (min-width: 984px) maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/patterns/_vertical-divider.scss0000644000000000000000000000331613056115004027532 0ustar 00000000000000@charset 'UTF-8'; .row.vertical-divider { padding-bottom: 40px; } .vertical-divider div, .vertical-divider li { border-right: 0; display: block; padding-left: 0; padding-right: 0; } .vertical-divider-full { padding-bottom: 0; > div { padding-bottom: 40px; } } .row.vertical-divider-full { padding-bottom: 0; } @media only screen and (max-width : 767px) { .vertical-divider > div, .vertical-divider > li { border-bottom: 1px dotted $warm-grey; padding-bottom: 20px; } .vertical-divider div:last-of-type, .vertical-divider li:last-of-type, .inline-icons li:last-of-type { border-bottom: 0; padding-bottom: 5px; } .row.vertical-divider { padding-bottom: 0; } .equal-height div, .equal-height li { height: auto !important; } } @media only screen and (max-width : 768px) { } // end @media only screen and (max-width : 768px) @media only screen and (min-width : 769px) { } //@media only screen and (min-width : 769px) @media only screen and (min-width: 984px) { .row.vertical-divider { padding-bottom: 60px; } .vertical-divider { > div, > li { border-right: 1px dotted $warm-grey; display: table-cell; float: none; margin-right: 0; padding-left: 20px; padding-right: 20px; vertical-align: top; } > div:last-child, > li:last-child, > div.last-col, > li.last-col, > div:last-of-type, > li:last-of-type { border-right: 0; padding-right: 0; } > div:first-child, > li:first-child, > div.first-col, > li.first-col, > div:first-of-type, > li:first-of-type { padding-left: 0; } } } // end @media only screen and (min-width: 984px) maas-1.9.5+bzr4599.orig/src/maasserver/static/scss/ubuntu/patterns/patterns.scss0000644000000000000000000000122013056115004025766 0ustar 00000000000000@charset "UTF-8"; /** * Ubuntu Patterns Stylesheet * * The CSS file required by Ubuntu patterns page * * @project Ubuntu Patterns * @author Web Team at Canonical Ltd * @copyright 2014 Canonical Ltd * */ @import '_structure'; @import '_helpers'; @import '_typography'; @import '_header'; @import '_search'; @import '_footer'; @import '_contextual-footer'; @import '_buttons'; @import '_forms'; @import '_rows'; /* @import '_image-centered'; */ @import '_boxes'; @import '_arrows'; @import '_lists'; @import '_inline-logos'; @import '_blockquotes'; @import '_tabbed-content'; @import '_vertical-divider'; @import '_slider'; @import '_tooltips'; maas-1.9.5+bzr4599.orig/src/maasserver/support/__init__.py0000644000000000000000000000000013056115004021425 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/support/pertenant/0000755000000000000000000000000013056115004021326 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/support/pertenant/__init__.py0000644000000000000000000000000013056115004023425 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/support/pertenant/migration.py0000644000000000000000000001316313056115004023675 0ustar 00000000000000# Copyright 2013-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Shared namespace --> per-tenant namespace migration. Perform the following steps to migrate: 1. When no files exist (i.e. no Juju environments exist): do nothing 1a. When no *unowned* files exist: do nothing. 2. When there's only one user: assign ownership of all files to user. 3. When there are multiple users and a `provider-state` file: parse that file to extract the instance id of the bootstrap node. From that instance id, get the identity of the user who deployed this environment (that's the owner of the bootstrap node). Then proceed as in 4, using that user as the "legacy" user. 4. When there are multiple users: create a new "legacy" user, assign ownership of all files and allocated/owned nodes to this user, copy all public SSH keys to this user, and move all API credentials to this user. There's not a lot we can do about SSH keys authorised to connect to the already deployed nodes in #3, but this set will only ever decrease: nodes allocated after this migration will permit access from any of the users with SSH keys prior to the migration. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "migrate", ] from django.contrib.auth.models import User from maasserver.models import ( FileStorage, Node, SSHKey, ) from maasserver.models.user import ( get_auth_tokens, SYSTEM_USERS, ) from maasserver.support.pertenant.utils import get_bootstrap_node_owner from maasserver.utils.orm import get_one legacy_user_name = "shared-environment" def get_legacy_user(): """Return the legacy namespace user, creating it if need be.""" try: legacy_user = User.objects.get(username=legacy_user_name) except User.DoesNotExist: # Create the legacy user with a local, probably non-working, email # address, and an unusable password. legacy_user = User.objects.create_user( email="%s@localhost" % legacy_user_name, username=legacy_user_name) legacy_user.first_name = "Shared" legacy_user.last_name = "Environment" legacy_user.is_active = True return legacy_user def get_unowned_files(): """Returns a `QuerySet` of unowned files.""" return FileStorage.objects.filter(owner=None) def get_real_users(): """Returns a `QuerySet` of real. not system, users.""" users = User.objects.exclude(username__in=SYSTEM_USERS) users = users.exclude(username=legacy_user_name) return users def get_owned_nodes(): """Returns a `QuerySet` of nodes owned by real users.""" return Node.objects.filter(owner__in=get_real_users()) def get_owned_nodes_owners(): """Returns a `QuerySet` of the owners of nodes owned by real users.""" owner_ids = get_owned_nodes().values_list("owner", flat=True) return User.objects.filter(id__in=owner_ids.distinct()) def get_destination_user(): """Return the user to which resources should be assigned.""" real_users = get_real_users() if real_users.count() == 1: return get_one(real_users) else: bootstrap_user = get_bootstrap_node_owner() if bootstrap_user is None: return get_legacy_user() else: return bootstrap_user def get_ssh_keys(user): """Return the SSH key strings belonging to the specified user.""" return SSHKey.objects.filter(user=user).values_list("key", flat=True) def copy_ssh_keys(user_from, user_dest): """Copies SSH keys from one user to another. This is idempotent, and does not clobber the destination user's existing keys. """ user_from_keys = get_ssh_keys(user_from) user_dest_keys = get_ssh_keys(user_dest) for key in set(user_from_keys).difference(user_dest_keys): ssh_key = SSHKey(user=user_dest, key=key) ssh_key.save() def give_file_to_user(file, user): """Give a file to a user.""" file.owner = user file.save() def give_api_credentials_to_user(user_from, user_dest): """Gives one user's API credentials to another. This ensures that users of the shared namespace environment continue to operate within the legacy shared namespace environment by default via the API (e.g. from the command-line client, or from Juju). """ for token in get_auth_tokens(user_from): consumer = token.consumer consumer.user = user_dest consumer.save() token.user = user_dest token.save() def give_node_to_user(node, user): """Changes a node's ownership for the legacy shared environment.""" node.owner = user node.save() def migrate_to_user(user): """Migrate files and nodes to the specified user. This also copies, to the destination user, the public SSH keys of any owned nodes' owners. This is so that those users who had allocated nodes (i.e. active users of a shared-namespace environment) can access newly created nodes in the legacy shared-namespace environment. """ for unowned_file in get_unowned_files(): give_file_to_user(unowned_file, user) for node_owner in get_owned_nodes_owners(): copy_ssh_keys(node_owner, user) give_api_credentials_to_user(node_owner, user) for owned_node in get_owned_nodes(): give_node_to_user(owned_node, user) def migrate(): """Migrate files to a per-tenant namespace.""" if get_unowned_files().exists(): # 2, 3, and 4 user = get_destination_user() migrate_to_user(user) else: # 1 and 1a pass maas-1.9.5+bzr4599.orig/src/maasserver/support/pertenant/tests/0000755000000000000000000000000013056115004022470 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/support/pertenant/utils.py0000644000000000000000000000333213056115004023041 0ustar 00000000000000# Copyright 2013-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Utilities for the per-tenant file storage work.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "get_bootstrap_node_owner", ] from maasserver.models import ( FileStorage, Node, ) import yaml PROVIDER_STATE_FILENAME = 'provider-state' def get_bootstrap_node_owner(): """Return the owner of the bootstrap node or None if it cannot be found. This method uses the unowned 'provider-state' file to extract the system_id of the bootstrap node. """ try: provider_file = FileStorage.objects.get( filename=PROVIDER_STATE_FILENAME, owner=None) except FileStorage.DoesNotExist: return None system_id = extract_bootstrap_node_system_id(provider_file.content) if system_id is None: return None try: return Node.objects.get(system_id=system_id).owner except Node.DoesNotExist: return None def extract_bootstrap_node_system_id(content): """Extract the system_id of the node referenced in the given provider-state file. This method implements a very defensive strategy; if the given content is not in yaml format or if the owner of the bootstrap node cannot be found, it returns None. """ try: state = yaml.load(content) except yaml.YAMLError: return None try: parts = state['zookeeper-instances'][0].split('/') except (IndexError, TypeError): return None system_id = [part for part in parts if part != ''][-1] return system_id maas-1.9.5+bzr4599.orig/src/maasserver/support/pertenant/tests/__init__.py0000644000000000000000000000000013056115004024567 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/support/pertenant/tests/test_migration.py0000644000000000000000000003527113056115004026102 0ustar 00000000000000# Copyright 2013-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test `maasserver.support.pertenant.migration.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from django.contrib.auth.models import User from maasserver.models import ( Node, SSHKey, ) from maasserver.support.pertenant import migration from maasserver.support.pertenant.migration import ( copy_ssh_keys, get_destination_user, get_legacy_user, get_owned_nodes, get_owned_nodes_owners, get_real_users, get_ssh_keys, get_unowned_files, give_api_credentials_to_user, give_file_to_user, give_node_to_user, legacy_user_name, migrate, migrate_to_user, ) from maasserver.support.pertenant.tests.test_utils import ( make_provider_state_file, ) from maasserver.testing import get_data from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase from maastesting.matchers import MockCalledOnceWith from mock import ( call, sentinel, ) from testtools.matchers import MatchesStructure def get_ssh_key_string(num=0): return get_data('data/test_rsa%d.pub' % num) class TestFunctions(MAASServerTestCase): def find_legacy_user(self): return User.objects.filter(username=legacy_user_name) def test_get_legacy_user_creates_user(self): self.assertEqual([], list(self.find_legacy_user())) legacy_user = get_legacy_user() self.assertEqual([legacy_user], list(self.find_legacy_user())) self.assertThat( legacy_user, MatchesStructure.byEquality( first_name="Shared", last_name="Environment", email=legacy_user_name + "@localhost", is_active=True)) def test_get_legacy_user_creates_user_only_once(self): legacy_user1 = get_legacy_user() self.assertEqual([legacy_user1], list(self.find_legacy_user())) legacy_user2 = get_legacy_user() self.assertEqual([legacy_user2], list(self.find_legacy_user())) self.assertEqual(legacy_user1, legacy_user2) def test_get_unowned_files_no_files(self): self.assertEqual([], list(get_unowned_files())) def test_get_unowned_files(self): user = factory.make_User() files = [ factory.make_FileStorage(owner=None), factory.make_FileStorage(owner=user), factory.make_FileStorage(owner=None), ] self.assertSetEqual( {files[0], files[2]}, set(get_unowned_files())) def test_get_real_users_no_users(self): get_legacy_user() # Ensure at least the legacy user exists. self.assertEqual([], list(get_real_users())) def test_get_real_users(self): get_legacy_user() # Ensure at least the legacy user exists. users = [ factory.make_User(), factory.make_User(), ] self.assertSetEqual(set(users), set(get_real_users())) def test_get_owned_nodes_no_nodes(self): self.assertEqual([], list(get_owned_nodes())) def test_get_owned_nodes_no_owned_nodes(self): factory.make_Node() self.assertEqual([], list(get_owned_nodes())) def test_get_owned_nodes_with_owned_nodes(self): nodes = { factory.make_Node(owner=factory.make_User()), factory.make_Node(owner=factory.make_User()), } self.assertSetEqual(nodes, set(get_owned_nodes())) def test_get_owned_nodes_with_nodes_owned_by_system_users(self): factory.make_Node(owner=get_legacy_user()), self.assertEqual([], list(get_owned_nodes())) def test_get_owned_nodes_owners_no_users(self): self.assertEqual([], list(get_owned_nodes_owners())) def test_get_owned_nodes_owners_no_nodes(self): factory.make_User() self.assertEqual([], list(get_owned_nodes_owners())) def test_get_owned_nodes_owners_no_owned_nodes(self): factory.make_User() factory.make_Node(owner=None) self.assertEqual([], list(get_owned_nodes_owners())) def test_get_owned_nodes_owners(self): user1 = factory.make_User() user2 = factory.make_User() factory.make_User() factory.make_Node(owner=user1) factory.make_Node(owner=user2) factory.make_Node(owner=None) self.assertSetEqual({user1, user2}, set(get_owned_nodes_owners())) def test_get_destination_user_one_real_user(self): user = factory.make_User() self.assertEqual(user, get_destination_user()) def test_get_destination_user_two_real_users(self): factory.make_User() factory.make_User() self.assertEqual(get_legacy_user(), get_destination_user()) def test_get_destination_user_no_real_users(self): self.assertEqual(get_legacy_user(), get_destination_user()) def test_get_destination_user_with_user_from_juju_state(self): user = factory.make_User() # Also create another user. factory.make_User() node = factory.make_Node(owner=user) make_provider_state_file(node) self.assertEqual(user, get_destination_user()) def test_get_destination_user_with_orphaned_juju_state(self): user = factory.make_User() # Also create another user. factory.make_User() node = factory.make_Node(owner=user) make_provider_state_file(node) node.delete() # Orphan the state. self.assertEqual(get_legacy_user(), get_destination_user()) class TestCopySSHKeys(MAASServerTestCase): """Tests for copy_ssh_keys().""" def test_noop_when_there_are_no_keys(self): user1 = factory.make_User() user2 = factory.make_User() copy_ssh_keys(user1, user2) ssh_keys = SSHKey.objects.filter(user__in={user1, user2}) self.assertEqual([], list(ssh_keys)) def test_copy(self): user1 = factory.make_User() key1 = factory.make_SSHKey(user1) user2 = factory.make_User() copy_ssh_keys(user1, user2) user2s_ssh_keys = SSHKey.objects.filter(user=user2) self.assertSetEqual( {key1.key}, {ssh_key.key for ssh_key in user2s_ssh_keys}) def test_copy_is_idempotent(self): # When the destination user already has a key, copy_ssh_keys() is a # noop for that key. user1 = factory.make_User() key1 = factory.make_SSHKey(user1) user2 = factory.make_User() key2 = factory.make_SSHKey(user2, key1.key) copy_ssh_keys(user1, user2) user2s_ssh_keys = SSHKey.objects.filter(user=user2) self.assertSetEqual( {key2.key}, {ssh_key.key for ssh_key in user2s_ssh_keys}) def test_copy_does_not_clobber(self): # When the destination user already has some keys, copy_ssh_keys() # adds to them; it does not remove them. user1 = factory.make_User() key1 = factory.make_SSHKey(user1, get_ssh_key_string(1)) user2 = factory.make_User() key2 = factory.make_SSHKey(user2, get_ssh_key_string(2)) copy_ssh_keys(user1, user2) user2s_ssh_keys = SSHKey.objects.filter(user=user2) self.assertSetEqual( {key1.key, key2.key}, {ssh_key.key for ssh_key in user2s_ssh_keys}) class TestGiveFileToUser(MAASServerTestCase): def test_give_unowned_file(self): user = factory.make_User() file = factory.make_FileStorage(owner=None) give_file_to_user(file, user) self.assertEqual(user, file.owner) def test_give_owned_file(self): user1 = factory.make_User() user2 = factory.make_User() file = factory.make_FileStorage(owner=user1) give_file_to_user(file, user2) self.assertEqual(user2, file.owner) def test_file_saved(self): user = factory.make_User() file = factory.make_FileStorage(owner=None) save = self.patch(file, "save") give_file_to_user(file, user) self.assertThat(save, MockCalledOnceWith()) class TestGiveCredentialsToUser(MAASServerTestCase): def test_give(self): user1 = factory.make_User() user2 = factory.make_User() profile = user1.userprofile consumer, token = profile.create_authorisation_token() give_api_credentials_to_user(user1, user2) self.assertEqual(user2, reload_object(consumer).user) self.assertEqual(user2, reload_object(token).user) class TestGiveNodeToUser(MAASServerTestCase): def test_give(self): user1 = factory.make_User() user2 = factory.make_User() node = factory.make_Node(owner=user1) give_node_to_user(node, user2) self.assertEqual(user2, reload_object(node).owner) class TestMigrateToUser(MAASServerTestCase): def test_migrate(self): # This is a mechanical test, to demonstrate that migrate_to_user() is # wired up correctly: it should not really contain much logic because # it is meant only as a convenient wrapper around other functions. # Those functions are unit tested individually, and the overall # behaviour of migrate() is tested too; this is another layer of # verification. It's also a reminder not to stuff logic into # migrate_to_user(); extract it into functions instead and unit test # those. # migrate_to_user() will give all unowned files to a specified user. get_unowned_files = self.patch(migration, "get_unowned_files") get_unowned_files.return_value = [sentinel.file1, sentinel.file2] give_file_to_user = self.patch(migration, "give_file_to_user") # migrate_to_user() will copy all SSH keys and give all API # credentials belonging to node owners over to a specified user. get_owned_nodes_owners = self.patch( migration, "get_owned_nodes_owners") get_owned_nodes_owners.return_value = [ sentinel.node_owner1, sentinel.node_owner2] copy_ssh_keys = self.patch(migration, "copy_ssh_keys") give_api_credentials_to_user = self.patch( migration, "give_api_credentials_to_user") # migrate_to_user() will give all owned nodes to a specified user. get_owned_nodes = self.patch(migration, "get_owned_nodes") get_owned_nodes.return_value = [sentinel.node1, sentinel.node2] give_node_to_user = self.patch(migration, "give_node_to_user") migrate_to_user(sentinel.user) # Each unowned file is given to the destination user one at a time. self.assertThat(get_unowned_files, MockCalledOnceWith()) self.assertEqual( [call(sentinel.file1, sentinel.user), call(sentinel.file2, sentinel.user)], give_file_to_user.call_args_list) # The SSH keys of each node owner are copied to the destination user, # one at a time, and the credentials of these users are given to the # destination user. self.assertThat(get_owned_nodes_owners, MockCalledOnceWith()) self.assertEqual( [call(sentinel.node_owner1, sentinel.user), call(sentinel.node_owner2, sentinel.user)], copy_ssh_keys.call_args_list) self.assertEqual( [call(sentinel.node_owner1, sentinel.user), call(sentinel.node_owner2, sentinel.user)], give_api_credentials_to_user.call_args_list) # Each owned node is given to the destination user one at a time. self.assertThat(get_owned_nodes, MockCalledOnceWith()) self.assertEqual( [call(sentinel.node1, sentinel.user), call(sentinel.node2, sentinel.user)], give_node_to_user.call_args_list) class TestMigrate(MAASServerTestCase): def test_migrate_runs_when_no_files_exist(self): migrate() def test_migrate_runs_when_no_unowned_files_exist(self): factory.make_FileStorage(owner=factory.make_User()) migrate() def test_migrate_all_files_to_single_user_when_only_one_user(self): user = factory.make_User() stored = factory.make_FileStorage(owner=None) migrate() self.assertEqual(user, reload_object(stored).owner) def test_migrate_all_files_to_new_legacy_user_when_multiple_users(self): stored = factory.make_FileStorage(owner=None) user1 = factory.make_User() user2 = factory.make_User() migrate() stored = reload_object(stored) self.assertNotIn(stored.owner, {user1, user2, None}) def test_migrate_all_nodes_to_new_legacy_user_when_multiple_users(self): factory.make_FileStorage(owner=None) user1 = factory.make_User() node1 = factory.make_Node(owner=user1) user2 = factory.make_User() node2 = factory.make_Node(owner=user2) migrate() self.assertNotIn(reload_object(node1).owner, {user1, user2, None}) self.assertNotIn(reload_object(node2).owner, {user1, user2, None}) def test_migrate_all_nodes_to_bootstrap_owner_when_multiple_users(self): user1 = factory.make_User() node1 = factory.make_Node(owner=user1) user2 = factory.make_User() node2 = factory.make_Node(owner=user2) make_provider_state_file(node1) migrate() self.assertEqual( (user1, user1), (reload_object(node1).owner, reload_object(node2).owner)) def test_migrate_ancillary_data_to_legacy_user_when_multiple_users(self): factory.make_FileStorage(owner=None) # Create two users, both with API credentials, an SSH key and a node. user1 = factory.make_User() consumer1, token1 = user1.userprofile.create_authorisation_token() key1 = factory.make_SSHKey(user1, get_ssh_key_string(1)) node1 = factory.make_Node(owner=user1) user2 = factory.make_User() consumer2, token2 = user2.userprofile.create_authorisation_token() key2 = factory.make_SSHKey(user2, get_ssh_key_string(2)) node2 = factory.make_Node(owner=user2) migrate() # The SSH keys have been copied to the legacy user. legacy_user = get_legacy_user() legacy_users_ssh_keys = get_ssh_keys(legacy_user) self.assertSetEqual({key1.key, key2.key}, set(legacy_users_ssh_keys)) # The API credentials have been moved to the legacy user. legacy_users_nodes = Node.objects.filter(owner=legacy_user) self.assertSetEqual({node1, node2}, set(legacy_users_nodes)) self.assertEqual( (legacy_user, legacy_user, legacy_user, legacy_user), (reload_object(consumer1).user, reload_object(token1).user, reload_object(consumer2).user, reload_object(token2).user)) maas-1.9.5+bzr4599.orig/src/maasserver/support/pertenant/tests/test_utils.py0000644000000000000000000000547513056115004025254 0ustar 00000000000000# Copyright 2013-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test the utilities of the per-tenant file storage work.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from django.core.urlresolvers import reverse from maasserver.support.pertenant.utils import ( extract_bootstrap_node_system_id, get_bootstrap_node_owner, PROVIDER_STATE_FILENAME, ) from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maastesting.utils import sample_binary_data def make_provider_state_file(node=None): """Create a 'provider-state' file with a reference (zookeeper-instances) to a node. """ if node is None: node = factory.make_Node() node_link = reverse('node_handler', args=[node.system_id]) content = 'zookeeper-instances: [%s]\n' % node_link content_data = content.encode('ascii') return factory.make_FileStorage( filename=PROVIDER_STATE_FILENAME, content=content_data, owner=None) class TestExtractBootstrapNodeSystemId(MAASServerTestCase): def test_parses_valid_provider_state_file(self): node = factory.make_Node() provider_state_file = make_provider_state_file(node=node) system_id = extract_bootstrap_node_system_id( provider_state_file.content) self.assertEqual(system_id, node.system_id) def test_returns_None_if_parsing_fails(self): invalid_contents = [ '%', # invalid yaml sample_binary_data, # binary content (invalid yaml) 'invalid content', # invalid provider-state content 'zookeeper-instances: []', # no instances listed ] for invalid_content in invalid_contents: self.assertIsNone( extract_bootstrap_node_system_id(invalid_content)) class TestGetBootstrapNodeOwner(MAASServerTestCase): def test_returns_None_if_no_provider_state_file(self): self.assertIsNone(get_bootstrap_node_owner()) def test_returns_owner_if_node_found(self): node = factory.make_Node(owner=factory.make_User()) make_provider_state_file(node=node) self.assertEqual(node.owner, get_bootstrap_node_owner()) def test_returns_None_if_node_does_not_exist(self): node = factory.make_Node(owner=factory.make_User()) make_provider_state_file(node=node) node.delete() self.assertIsNone(get_bootstrap_node_owner()) def test_returns_None_if_invalid_yaml(self): invalid_content = '%'.encode('ascii') factory.make_FileStorage( filename=PROVIDER_STATE_FILENAME, content=invalid_content) self.assertIsNone(get_bootstrap_node_owner()) maas-1.9.5+bzr4599.orig/src/maasserver/templates/404.html0000644000000000000000000000041713056115004021007 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block title %}Error: Page not found{% endblock %} {% block page-title %}Error: Page not found{% endblock %} {% block content %}

    The requested URL {{ request.path }} was not found on this server.

    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/409.html0000644000000000000000000000056613056115004021021 0ustar 00000000000000 Error: Conflict Error

    Conflict error. Try your request again, as it will most likely succeed.

    maas-1.9.5+bzr4599.orig/src/maasserver/templates/500.html0000644000000000000000000000051413056115004021002 0ustar 00000000000000 Error: Internal server error

    Internal server error.

    maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/0000755000000000000000000000000013056115004021760 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/templates/metadataserver/0000755000000000000000000000000013056115004022617 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/templates/registration/0000755000000000000000000000000013056115004022322 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/api_doc.html0000644000000000000000000000031613056115004024244 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block title %}MAAS API documentation{% endblock %} {% block page-title %}MAAS API documentation{% endblock %} {% block content %} {{ doc|safe }} {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/base.html0000644000000000000000000001424713056115004023570 0ustar 00000000000000 {% block title %}{% endblock %} | {% include "maasserver/site_title.html" %} {% block pre_head %} {% endblock %} {% block css-conf %} {% include "maasserver/css-conf.html" %} {% endblock %} {% block js-conf %} {% include "maasserver/js-conf.html" %} {% endblock %} {% block head %} {% endblock %}
    {% block html_includes %}{% endblock %}
    {% block page-title-block %} {% endblock %}
    {% if user.is_authenticated %}
      {% for persistent_error in persistent_errors %}
    • {{ persistent_error }}
    • {% endfor %} {% if messages %} {% for message in messages %} {{ message }} {% endfor %} {% endif %}
    {% endif %}
    {% block content %} {% endblock %}
    maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/cluster_listing.html0000644000000000000000000000145613056115004026066 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-cluster-list %}active{% endblock %} {% block title %}Clusters{% endblock %} {% block page-title %}{{ current_count }} cluster{{ current_count|pluralize }} in {% include "maasserver/site_title.html" %}{% endblock %} {% block header-search %}{% endblock %} {% block content %}

    Clusters

    {% include "maasserver/cluster_listing_head.html" %} {% for cluster in cluster_list %} {% cycle 'even' 'odd' as cycle silent %} {% include "maasserver/cluster_listing_row.html" with cycle=cycle display_warnings=display_warnings %} {% endfor %}
    {% include "maasserver/pagination.html" %}
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/cluster_listing_head.html0000644000000000000000000000050613056115004027042 0ustar 00000000000000 Name Connected Status Managed interfaces Nodes Images {% comment %} Action buttons {% endcomment %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/cluster_listing_row.html0000644000000000000000000000412113056115004026745 0ustar 00000000000000 {% with state=cluster.get_state %} {{ cluster.cluster_name }} {% if display_warnings %} {% if not region_has_images or state == "Disconnected" or state == "Out-of-sync" %}   {% endif %} {% endif %} {% if state == "Disconnected" %} {% else %} {% endif %} {{ cluster.get_status_display }} {{ cluster.get_managed_interfaces|length }} {{ cluster.node_set.count }} {% if not region_has_images %} No images available {% else %} {% if state == "Disconnected" %} - {% else %} {% if state == "Syncing" %}
      {% endif %} {{ state }} {% endif %} {% endif %} {% endwith %} edit delete maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/css-conf.html0000644000000000000000000000025313056115004024361 0ustar 00000000000000 maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/debug_rpc_toolbar.html0000644000000000000000000000653513056115004026333 0ustar 00000000000000

    Summary

    Total
    Total Clusters {{ total_clusters }}
    Total getClientFor Calls {{ total_getClientFor_calls }}
    Total getClientFor Errors {{ total_getClientFor_errors }}
    Total RPC Calls {{ total_rpc_calls }}
    Total RPC Succeed {{ total_rpc_succeed }}
    Total RPC Fail {{ total_rpc_fail }}
    Total Time {{ total_time }}ms
    {% if getClientFor_errors|length %}

    getClientFor - Errors

    {% for error in getClientFor_errors %} {% endfor %}
    Cluster UUID Cluster Name Error Traceback
    {{ error.uuid }} {{ error.cluster }} {{ error.error }}
    {{ error.traceback }}
    {% endif %} {% if fail_rpc_calls|length %}

    Failed RPC Calls

    {% for call in fail_rpc_calls %} {% endfor %}
      Cluster UUID Cluster Name Exec Time Command Arguments Error Traceback
      {{ call.uuid }} {{ call.cluster }} {{ call.time }}ms {{ call.command }} {{ call.arguments }} {{ call.error }}
    {{ call.traceback }}
    {% endif %} {% if succeed_rpc_calls|length %}

    Successful RPC Calls

    {% for call in succeed_rpc_calls %} {% endfor %}
      Cluster UUID Cluster Name Exec Time Command Arguments Result
      {{ call.uuid }} {{ call.cluster }} {{ call.time }}ms {{ call.command }} {{ call.arguments }}
    {{ call.result }}
    {% endif %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/enlist_preseed.html0000644000000000000000000000043713056115004025657 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-settings %}active{% endblock %} {% block title %}Enlistment preseed{% endblock %} {% block page-title %}Enlistment preseed{% endblock %} {% block content %}

    {{ warning_message }}

    {{ preseed }}
    
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/form_field.html0000644000000000000000000000144613056115004024761 0ustar 00000000000000{% load field_type %}
  • {% if not field.is_hidden %} {% endif %} {% if field.errors %} {{ field.errors }} {% endif %} {% ifnotequal field|field_type "CheckboxInput" %} {{ field }} {% endifnotequal %} {% if field.help_text %}{{ field.help_text }}{% endif %}
  • maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/image_confirm_delete.html0000644000000000000000000000133213056115004026766 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-images %}active{% endblock %} {% block title %}Delete custom image{% endblock %} {% block page-title %}Delete custom image{% endblock %} {% block content %}

    Are you sure you want to delete image "{{ image_to_delete.title }} ({{ image_to_delete.architecture }})"?

    This action is permanent and can not be undone.

    {% csrf_token %} Cancel
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/images.html0000644000000000000000000003010113056115004024106 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-images %}active{% endblock %} {% block title %}Boot Images{% endblock %} {% block page-title %}Boot Images in {% include "maasserver/site_title.html" %}{% endblock %} {% block head %} {% endblock %} {% block content %}
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/index.html0000644000000000000000000001245413056115004023763 0ustar 00000000000000 {% include "maasserver/css-conf.html" %} {% include "maasserver/js-conf.html" %}
    {% if user.is_authenticated %} {% endif %}
    maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/js-conf.html0000644000000000000000000000260513056115004024210 0ustar 00000000000000 maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/logout_confirm.html0000644000000000000000000000120413056115004025671 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-settings %}active{% endblock %} {% block title %}Logout{% endblock %} {% block page-title %}Logout{% endblock %} {% block content %}

    Are you sure you want to log out?

    {% csrf_token %}
    | Cancel
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/mac_confirm_delete.html0000644000000000000000000000150113056115004026442 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-settings %}active{% endblock %} {% block title %}Delete network interface: {{ mac_to_delete.mac_address }}{% endblock %} {% block page-title %}Delete network interface: {{ mac_to_delete.mac_address }}{% endblock %} {% block content %}

    Are you sure you want to delete network interface "{{ mac_to_delete.mac_address }}"?

    This action is permanent and can not be undone.

    {% csrf_token %} Cancel
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/multiselect_widget_include.html0000644000000000000000000000160613056115004030251 0ustar 00000000000000{% comment %} This file should be included in the 'pre_head' block in order to transform Django's basic multiselect widget into a Javascripty widget. Then this javascript must be included after the widget is declared: {% endcomment %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/nodegroup_confirm_delete.html0000644000000000000000000000141513056115004027710 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-settings %}active{% endblock %} {% block title %}Delete Cluster Controller{% endblock %} {% block page-title %}Delete Cluster Controller{% endblock %} {% block content %}

    Are you sure you want to delete the cluster controller "{{ cluster_to_delete.cluster_name }}"? This action will also delete all the nodes inside this cluster controller.

    This action is permanent and can not be undone.

    {% csrf_token %} Cancel
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/nodegroup_edit.html0000644000000000000000000000530613056115004025661 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-cluster-list %}active{% endblock %} {% block title %}Edit Cluster Controller{% endblock %} {% block page-title %}Edit Cluster Controller{% endblock %} {% block content %}
    {% csrf_token %}
      {% for field in form %} {% include "maasserver/form_field.html" %} {% endfor %}
    Cancel

    Interfaces

    {% with nb_interfaces=cluster.nodegroupinterface_set.count %}

    This cluster controller has {{ nb_interfaces }} interface{{ nb_interfaces|pluralize }}.

    {% endwith %} {% for interface in interfaces %} {% endfor %}
    Name Interface Network Manage... Non-MAAS DHCP Server
    {{ interface.name }} {{ interface.interface }} {{ interface.network|default_if_none:"Not configured" }} {{ interface.display_management }} {{ interface.foreign_dhcp_ip|default_if_none:"" }} {% if interface.foreign_dhcp_ip and interface.display_management != "Unmanaged" %} {% endif %} editdelete
    Add interface
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/nodegroupinterface_confirm_delete.html0000644000000000000000000000127213056115004031572 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-settings %}active{% endblock %} {% block title %}Delete Interface{% endblock %} {% block page-title %}Delete Interface{% endblock %} {% block content %}

    Are you sure you want to delete the interface "{{ interface_to_delete.name }}"?

    This action is permanent and can not be undone.

    {% csrf_token %} Cancel
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/nodegroupinterface_edit.html0000644000000000000000000000126013056115004027535 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-cluster-list %}active{% endblock %} {% block title %}Edit Cluster Interface{% endblock %} {% block page-title %}Edit Cluster Interface{% endblock %} {% block content %}
    {% csrf_token %}
      {% for field in form %} {% include "maasserver/form_field.html" %} {% endfor %}
    Cancel
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/nodegroupinterface_new.html0000644000000000000000000000123613056115004027404 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-cluster-list %}active{% endblock %} {% block title %}Create Cluster Interface{% endblock %} {% block page-title %}Create Cluster Interface{% endblock %} {% block content %}
    {% csrf_token %}
      {% for field in form %} {% include "maasserver/form_field.html" %} {% endfor %}
    Cancel
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/pagination.html0000644000000000000000000000115513056115004025001 0ustar 00000000000000{% if is_paginated %} {% endif %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/prefs.html0000644000000000000000000001031013056115004023760 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-prefs %}active{% endblock %} {% block title %}User preferences for {{ user.username }}{% endblock %} {% block page-title %}User preferences for {{ user.username }}{% endblock %} {% block head %} {% endblock %} {% block content %}

    Keys

    MAAS keys

    You'll need a separate API key for each Juju environment.

      {% for token in user.userprofile.get_authorisation_tokens %}
    • {% endfor %}

    SSH keys

      {% for key in user.sshkey_set.all %}
    • {{ key.display_html }}
    • {% empty %} No SSH key configured. {% endfor %}
    + Add SSH key

    SSL keys

      {% for key in user.sslkey_set.all %}
    • {{ key.display_html }}
    • {% empty %} No SSL key configured. {% endfor %}
    + Add SSL key

    User details

    {% csrf_token %}
      {% for field in profile_form %} {% include "maasserver/form_field.html" %} {% endfor %}

    Password

    {% csrf_token %}
      {% for field in password_form %} {% include "maasserver/form_field.html" %} {% endfor %}
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/prefs_add_sshkey.html0000644000000000000000000000101113056115004026154 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block title %}Add SSH key{% endblock %} {% block page-title %}Add SSH key{% endblock %} {% block content %}
    {% csrf_token %}
      {% for field in form %} {% include "maasserver/form_field.html" %} {% endfor %}
    Cancel
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/prefs_add_sslkey.html0000644000000000000000000000021513056115004026165 0ustar 00000000000000{% extends "maasserver/prefs_add_sshkey.html" %} {% block title %}Add SSL key{% endblock %} {% block page-title %}Add SSL key{% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/prefs_confirm_delete_sshkey.html0000644000000000000000000000117513056115004030416 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block title %}Delete SSH key{% endblock %} {% block page-title %}Delete SSH key{% endblock %} {% block content %}

    Are you sure you want to delete the following key?

    {{ key }}

    This action is permanent and can not be undone.

    {% csrf_token %} Cancel
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/prefs_confirm_delete_sslkey.html0000644000000000000000000000122713056115004030420 0ustar 00000000000000{% extends "maasserver/prefs_confirm_delete_sshkey.html" %} {% block title %}Delete SSL key{% endblock %} {% block page-title %}Delete SSL key{% endblock %} {% block content %}

    Are you sure you want to delete the following key?

    {{ sslkey }}

    This action is permanent and can not be undone.

    {% csrf_token %} Cancel
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/robots.txt0000644000000000000000000000003213056115004024024 0ustar 00000000000000User-agent: * Disallow: / maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/settings.html0000644000000000000000000002067213056115004024515 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-settings %}active{% endblock %} {% block title %}Settings{% endblock %} {% block page-title %}Settings{% endblock %} {% block head %} {% endblock %} {% block content %}

    Users and Keys

    {% for user_item in user_list %} {% endfor %}
    ID Number of nodes in use Last seen MAAS Admin
    {{ user_item.username }} {{ user_item.node_set.count }} {{ user_item.last_login }} {% if user_item.is_superuser %} Yes {% endif %} edit {% if user != user_item %} delete
    {% csrf_token %}
    {% endif %}
    Add user
    {% include "maasserver/settings_commissioning_scripts.html" %}
    {% if show_license_keys %}
    {% include "maasserver/settings_license_keys.html" %}
    {% endif %}

    Commissioning

    {% csrf_token %}
      {% for field in commissioning_form %} {% include "maasserver/form_field.html" %} {% endfor %}

    Deploy

    {% csrf_token %}
      {% for field in deploy_form %} {% include "maasserver/form_field.html" %} {% endfor %}

    Ubuntu

    {% csrf_token %}
      {% for field in ubuntu_form %} {% include "maasserver/form_field.html" %} {% endfor %}

    Windows

    {% csrf_token %}
      {% for field in windows_form %} {% include "maasserver/form_field.html" %} {% endfor %}

    Global Kernel Parameters

    {% csrf_token %}
      {% with field=kernelopts_form.kernel_opts %} {% include "maasserver/form_field.html" %} {% endwith %}

    Network Configuration

    {% csrf_token %}
      {% for field in maas_and_network_form %} {% include "maasserver/form_field.html" %} {% endfor %}
    {% if show_boot_source %}

    Boot Images

    {% csrf_token %}
      {% for field in boot_source_form %} {% include "maasserver/form_field.html" %} {% endfor %}
    {% endif %}

    Third Party Drivers Configuration

    {% csrf_token %}
      {% for field in third_party_drivers_form %} {% include "maasserver/form_field.html" %} {% endfor %}

    Storage

    {% csrf_token %}
      {% for field in storage_settings_form %} {% include "maasserver/form_field.html" %} {% endfor %}
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/settings_add_archive.html0000644000000000000000000000110213056115004027011 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-settings %}active{% endblock %} {% block title %}Add archive{% endblock %} {% block page-title %}Add archive{% endblock %} {% block content %}
    {% csrf_token %}
      {% for field in form %} {% include "maasserver/form_field.html" %} {% endfor %}
    Cancel
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/settings_add_commissioning_script.html0000644000000000000000000000121313056115004031635 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-settings %}active{% endblock %} {% block title %}Add commissioning script{% endblock %} {% block page-title %}Add commissioning script{% endblock %} {% block content %}
    {% csrf_token %}
      {% for field in form %} {% include "maasserver/form_field.html" %} {% endfor %}
    Cancel
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/settings_add_license_key.html0000644000000000000000000000117113056115004027670 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-settings %}active{% endblock %} {% block title %}Add license key{% endblock %} {% block page-title %}Add license key{% endblock %} {% block content %}
    {% csrf_token %}
      {% for field in form %} {% include "maasserver/form_field.html" %} {% endfor %}
    Cancel
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/settings_commissioning_scripts.html0000644000000000000000000000253313056115004031216 0ustar 00000000000000

    Commissioning scripts

    {% for script in commissioning_scripts %} {% empty %} No commissioning scripts. {% endfor %}
    {{ script.name }}
    {{ script.content }}
    delete
    Upload script ././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/settings_confirm_delete_commissioning_script.htmlmaas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/settings_confirm_delete_commissioning_sc0000644000000000000000000000131213056115004032222 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block title %}Delete commissioning script{% endblock %} {% block page-title %}Delete commissioning script{% endblock %} {% block content %}

    Are you sure you want to delete the commissioning script '{{ script_to_delete.name }}'?

    This action is permanent and can not be undone.

    {% csrf_token %} Cancel
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/settings_confirm_delete_license_key.html0000644000000000000000000000132113056115004032114 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block title %}Delete license key{% endblock %} {% block page-title %}Delete license key{% endblock %} {% block content %}

    Are you sure you want to delete the license key '{{ license_key_to_delete.osystem }} / {{ license_key_to_delete.distro_series }}'?

    This action is permanent and can not be undone.

    {% csrf_token %} Cancel
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/settings_edit_license_key.html0000644000000000000000000000120413056115004030062 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-settings %}active{% endblock %} {% block title %}Edit license key{% endblock %} {% block page-title %}Edit license key{% endblock %} {% block content %}
    {% csrf_token %}
      {% for field in form %} {% include "maasserver/form_field.html" %} {% endfor %}
    Cancel
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/settings_license_keys.html0000644000000000000000000000157413056115004027252 0ustar 00000000000000

    License Keys

    {% for key in license_keys %} {% empty %} No license keys. {% endfor %}
    {{ key.osystem_title }} {{ key.distro_series_title }} edit delete
    Add license key maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/site_title.html0000644000000000000000000000004413056115004025011 0ustar 00000000000000{{ global_options.site_name }} MAAS maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/sshkeys.txt0000644000000000000000000000006013056115004024206 0ustar 00000000000000{% for key in keys %}{{ key|safe }} {% endfor %}maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/user_add.html0000644000000000000000000000107313056115004024435 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-settings %}active{% endblock %} {% block title %}Add user{% endblock %} {% block page-title %}Add user{% endblock %} {% block content %}
    {% csrf_token %}
      {% for field in form %} {% include "maasserver/form_field.html" %} {% endfor %}
    Cancel
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/user_confirm_delete.html0000644000000000000000000000136113056115004026664 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-settings %}active{% endblock %} {% block title %}Delete user: {{ user_to_delete.user.username }}{% endblock %} {% block page-title %}Delete user: {{ user_to_delete.user.username }}{% endblock %} {% block content %}

    Are you sure you want to delete the user "{{ user_to_delete.user.username }}"?

    This action is permanent and can not be undone.

    {% csrf_token %} Cancel
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/user_edit.html0000644000000000000000000000251613056115004024635 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-settings %}active{% endblock %} {% block title %}Edit User{% endblock %} {% block page-title %}Edit User{% endblock %} {% block content %}

    Settings

    {% csrf_token %}
      {% for field in profile_form %} {% include "maasserver/form_field.html" %} {% endfor %}
    Cancel

    Change password

    {% csrf_token %}
      {% for field in password_form %} {% include "maasserver/form_field.html" %} {% endfor %}
    Cancel
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/user_view.html0000644000000000000000000000311313056115004024654 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-settings %}active{% endblock %} {% block title %}View user{% endblock %} {% block page-title %}View user: {{ view_user.username }}{% endblock %} {% block layout-modifiers %}sidebar{% endblock %} {% block sidebar %}
    {% endblock %} {% block content %}
    • Username

      {{ view_user.username }}
    • Full name

      {{ view_user.last_name }}
    • Email address

      {{ view_user.email }}
    • MAAS Keys

      {{ user.userprofile.get_authorisation_tokens.count }} key{{ user.userprofile.get_authorisation_tokens.count|pluralize }}
    • MAAS Administrator

      {{ view_user.is_superuser }}
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/zone_add.html0000644000000000000000000000111713056115004024431 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-zone-list %}active{% endblock %} {% block title %}Add physical zone{% endblock %} {% block page-title %}Add physical zone{% endblock %} {% block content %}
    {% csrf_token %}
      {% for field in form %} {% include "maasserver/form_field.html" %} {% endfor %}
    Cancel
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/zone_confirm_delete.html0000644000000000000000000000144213056115004026661 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-settings %}active{% endblock %} {% block title %}Delete zone{% endblock %} {% block page-title %}Delete zone{% endblock %} {% block content %}

    Are you sure you want to delete zone "{{ zone_to_delete }}"? If any nodes are still in this zone, deleting the zone will take them out of the zone, but not otherwise affect them.

    This action is permanent and can not be undone.

    {% csrf_token %} Cancel
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/zone_detail.html0000644000000000000000000000350413056115004025145 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-zone-list %}active{% endblock %} {% block title %}Physical zone {{ zone.name }}{% endblock %} {% block page-title %} Physical zone: {{ zone.name }} {% endblock %} {% block layout-modifiers %}sidebar{% endblock %} {% block content %}
      • Name

        {{ zone.name }}
      • Nodes

        {{ zone.node_set.count }}
      • {% if zone.is_default %}
      • Default

        This zone is the default zone. Its name cannot be changed and it cannot be deleted.
      • {% endif %}
    • {% if zone.description %}

      Description

      {{ zone.description }}
      {% endif %}
    {% endblock %} {% block sidebar %}

    Actions

      {% if user.is_superuser %}
    • Edit zone
    • {% if not zone.is_default %}
    • Delete zone
    • {% endif %} {% endif %}
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/zone_edit.html0000644000000000000000000000116513056115004024631 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-settings %}active{% endblock %} {% block title %}Edit Physical zone{% endblock %} {% block page-title %}Edit Physical zone{% endblock %} {% block content %}
    {% csrf_token %}
      {% for field in form %} {% include "maasserver/form_field.html" %} {% endfor %}
    Cancel
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/maasserver/zone_list.html0000644000000000000000000000467313056115004024666 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block nav-active-zone-list %}active{% endblock %} {% block title %}Physical zones{% endblock %} {% block page-title %}{{ paginator.count }}{% if input_query %} matching{% endif %} zone{{ paginator.count|pluralize }} in {% include "maasserver/site_title.html" %}{% endblock %} {% block content %}

    Physical zones

    {% if user.is_superuser %} {% endif %} {% for zone_item in zone_list %} {% if user.is_superuser %} {% endif %} {% endfor %}
    Name Description Nodes Devices
    {{ zone_item.name }} {{ zone_item.description|truncatechars:40 }} {{ zone_item.node_only_set.count }} {{ zone_item.device_only_set.count }} {% if not zone_item.is_default %} edit delete
    {% csrf_token %}
    {% endif %}
    {% include "maasserver/pagination.html" %} {% if user.is_superuser %} Add zone {% endif %}
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templates/registration/login.html0000644000000000000000000000422613056115004024324 0ustar 00000000000000{% extends "maasserver/base.html" %} {% block title %}Login{% endblock %} {% block layout-modifiers %} modal-content login {% if no_users %} no-users {% endif %} {% endblock %} {% block head %} {% if not no_users %} {% endif %} {% endblock %} {% block content %} {% block page-title-block %} {% endblock %} {% if no_users %}
    No users pictogram

    No admin user has been created yet

    Use the "createadmin" administration command to create one:

    {{ create_command }} createadmin
    login
    {% else %}

    Login to {% include "maasserver/site_title.html" %}

    {% if form.errors %}

    Your username and password didn't match. Please try again.

    {% endif %} {% comment %} We turn off autocompletion of the login form in production environments. Autocompletion, in combination with cross-site scripting attacks, can potentially allow remote attackers to steal credentials. {% endcomment %} {% endif %}
    {% endblock %} maas-1.9.5+bzr4599.orig/src/maasserver/templatetags/__init__.py0000644000000000000000000000000013056115004022403 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/templatetags/field_type.py0000644000000000000000000000076713056115004023014 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Field type template tag.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "field_type", ] from django import template register = template.Library() @register.filter('field_type') def field_type(field): return field.field.widget.__class__.__name__ maas-1.9.5+bzr4599.orig/src/maasserver/testing/__init__.py0000644000000000000000000001054013056115004021400 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). from __future__ import ( absolute_import, print_function, unicode_literals, ) """Tests for `maasserver`.""" str = None __metaclass__ = type __all__ = [ "extract_redirect", "get_content_links", "get_data", "get_prefixed_form_data", "NoReceivers", ] import collections from contextlib import contextmanager import httplib from itertools import chain import os from urlparse import urlparse from lxml.html import fromstring def extract_redirect(http_response): """Extract redirect target from an http response object. Only the http path part of the redirect is ignored; protocol and host name, if present, are not included in the result. If the response is not a redirect, this raises :class:`ValueError` with a descriptive error message. :param http_response: A response returned from an http request. :type http_response: :class:`HttpResponse` :return: The "path" part of the target that `http_response` redirects to. :raises: ValueError """ if http_response.status_code != httplib.FOUND: raise ValueError( "Not a redirect: http status %d. Content: %s" % (http_response.status_code, http_response.content[:80])) target_url = http_response['Location'] parsed_url = urlparse(target_url) return parsed_url.path def get_data(filename): """Read the content of a file in `src/maasserver/tests`. Some tests use this to read fixed data stored in files in `src/maasserver/tests/data/`. Where possible, provide data in-line in tests, or use fakes, to keep the information close to the tests that rely on it. :param filename: A file path relative to `src/maasserver/tests` in this branch. :return: Binary contents of the file, as `bytes`. """ path = os.path.join( os.path.dirname(os.path.abspath(__file__)), '..', 'tests', filename) return file(path).read() def get_prefixed_form_data(prefix, data): """Prefix entries in a dict of form parameters with a form prefix. Also, add a parameter "_submit" to indicate that the form with the given prefix is being submitted. Use this to construct a form submission if the form uses a prefix (as it would if there are multiple forms on the page). :param prefix: Form prefix string. :param data: A dict of form parameters. :return: A new dict of prefixed form parameters. """ result = {'%s-%s' % (prefix, key): value for key, value in data.items()} result.update({'%s_submit' % prefix: 1}) return result def get_content_links(response, element='#content'): """Extract links from :class:`HttpResponse` content. :param response: An HTTP response object. Only its `content` attribute is used. :param element: Optional CSS selector for the node(s) in the content whose links should be extracted. Only links inside the part of the content that matches this selector will be extracted; any other links will be ignored. Defaults to `#content`, which is the main document. :return: List of link targets found in any matching parts of the document, including their nested tags. If a link is in a DOM subtree that matches `element` at multiple levels, it may be counted more than once. Otherwise, links are returned in the same order in which they are found in the document. """ doc = fromstring(response.content) links_per_matching_node = chain.from_iterable( [elem.get('href') for elem in matching_node.cssselect('a')] for matching_node in doc.cssselect(element) ) return list(links_per_matching_node) @contextmanager def NoReceivers(signals): """Disconnect signal receivers from the supplied signals. :param signals: A signal (or iterable of signals) for which to disable signal receivers while in the context manager. :type signal: django.dispatch.Signal """ saved = dict() if not isinstance(signals, collections.Iterable): signals = [signals] for signal in signals: saved[signal] = signal.receivers signal.receivers = [] try: yield finally: for signal in signals: signal.receivers = saved[signal] maas-1.9.5+bzr4599.orig/src/maasserver/testing/api.py0000644000000000000000000001011313056115004020406 0ustar 00000000000000# Copyright 2013-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Helpers for API testing.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'APITestCase', 'APITransactionTestCase', 'explain_unexpected_response', 'log_in_as_normal_user', 'make_worker_client', 'MultipleUsersScenarios', ] from abc import ( ABCMeta, abstractproperty, ) from maasserver.testing.factory import factory from maasserver.testing.oauthclient import OAuthAuthenticatedClient from maasserver.testing.testcase import ( MAASServerTestCase, MAASTransactionServerTestCase, ) from maasserver.utils.orm import transactional from maasserver.worker_user import get_worker_user from maastesting.testcase import MAASTestCase class MultipleUsersScenarios: """A mixin that uses testscenarios to repeat a testcase as different users. The scenarios should inject a `userfactory` variable that will be called to produce the user used in the tests e.g.: class ExampleTest(MultipleUsersScenarios, MAASServerTestCase): scenarios = [ ('anon', dict(userfactory=lambda: AnonymousUser())), ('user', dict(userfactory=factory.make_User)), ('admin', dict(userfactory=factory.make_admin)), ] def test_something(self): pass The test `test_something` with be run 3 times: one with a anonymous user logged in, once with a simple (non-admin) user logged in and once with an admin user logged in. """ __metaclass__ = ABCMeta scenarios = abstractproperty( "The scenarios as defined by testscenarios.") def setUp(self): super(MultipleUsersScenarios, self).setUp() user = self.userfactory() if not user.is_anonymous(): password = factory.make_string() user.set_password(password) user.save() self.logged_in_user = user self.client.login( username=self.logged_in_user.username, password=password) class APITestCaseBase(MAASTestCase): """Base class for logged-in API tests. :ivar logged_in_user: A user who is currently logged in and can access the API. :ivar client: Authenticated API client (unsurprisingly, logged in as `logged_in_user`). """ @transactional def setUp(self): super(APITestCaseBase, self).setUp() self.logged_in_user = factory.make_User( username='test', password='test') self.client = OAuthAuthenticatedClient(self.logged_in_user) @transactional def become_admin(self): """Promote the logged-in user to admin.""" self.logged_in_user.is_superuser = True self.logged_in_user.save() def assertResponseCode(self, expected_code, response): if response.status_code != expected_code: self.fail("Expected %s response, got %s:\n%s" % ( expected_code, response.status_code, response.content)) class APITestCase(APITestCaseBase, MAASServerTestCase): """Class for logged-in API tests within a single transaction.""" class APITransactionTestCase(APITestCaseBase, MAASTransactionServerTestCase): """Class for logged-in API tests with the ability to use transactions.""" def log_in_as_normal_user(client): """Log `client` in as a normal user.""" password = factory.make_string() user = factory.make_User(password=password) client.login(username=user.username, password=password) return user def make_worker_client(nodegroup): """Create a test client logged in as if it were `nodegroup`.""" return OAuthAuthenticatedClient( get_worker_user(), token=nodegroup.api_token) def explain_unexpected_response(expected_status, response): """Return human-readable failure message: unexpected http response.""" return "Unexpected http status (expected %s): %s - %s" % ( expected_status, response.status_code, response.content, ) maas-1.9.5+bzr4599.orig/src/maasserver/testing/architecture.py0000644000000000000000000000516313056115004022330 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Helpers for architectures in testing.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'make_usable_architecture', 'patch_usable_architectures', ] from random import randint from maasserver import forms from maasserver.testing.factory import factory def make_arch(with_subarch=True, arch_name=None, subarch_name=None): """Generate an arbitrary architecture name. :param with_subarch: Should the architecture include a slash and a sub-architecture name? Defaults to `True`. """ if arch_name is None: arch_name = factory.make_name('arch') if with_subarch: if subarch_name is None: subarch_name = factory.make_name('sub') return '%s/%s' % (arch_name, subarch_name) else: return arch_name def patch_usable_architectures(testcase, architectures=None): """Set a fixed list of usable architecture names. A usable architecture is one for which boot images are available. :param testcase: A `TestCase` whose `patch` this function can use. :param architectures: Optional list of architecture names. If omitted, defaults to a list (which may be empty) of random architecture names. """ if architectures is None: architectures = [ "%s/%s" % (factory.make_name('arch'), factory.make_name('sub')) for _ in range(randint(0, 2)) ] patch = testcase.patch(forms, 'list_all_usable_architectures') patch.return_value = architectures def make_usable_architecture( testcase, with_subarch=True, arch_name=None, subarch_name=None): """Return arbitrary architecture name, and make it "usable." A usable architecture is one for which boot images are available. :param testcase: A `TestCase` whose `patch` this function can pass to `patch_usable_architectures`. :param with_subarch: Should the architecture include a slash and a sub-architecture name? Defaults to `True`. :param arch_name: The architecture name. Useful in cases where we need to test that not supplying an arch works correctly. :param subarch_name: The subarchitecture name. Useful in cases where we need to test that not supplying a subarch works correctly. """ arch = make_arch( with_subarch=with_subarch, arch_name=arch_name, subarch_name=subarch_name) patch_usable_architectures(testcase, [arch]) return arch maas-1.9.5+bzr4599.orig/src/maasserver/testing/config.py0000644000000000000000000000124413056115004021107 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Fixtures for working with local configuration in the region.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'RegionConfigurationFixture', ] from maasserver.config import RegionConfiguration from provisioningserver.testing.config import ConfigurationFixtureBase class RegionConfigurationFixture(ConfigurationFixtureBase): """Fixture to configure local region settings in tests.""" configuration = RegionConfiguration maas-1.9.5+bzr4599.orig/src/maasserver/testing/database.py0000644000000000000000000000226013056115004021405 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """MAAS database cluster fixture.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "MAASClusterFixture", ] from django.db import ( connections, DEFAULT_DB_ALIAS, ) from postgresfixture import ClusterFixture class MAASClusterFixture(ClusterFixture): def __init__(self, database=None): """ @param database: The name of the database to use. Must correspond to a database defined in `django.db.connections`. If ``None``, then `DEFAULT_DB_ALIAS` is used. """ self.connection = connections[ DEFAULT_DB_ALIAS if database is None else database] super(MAASClusterFixture, self).__init__( datadir=self.connection.settings_dict["HOST"], preserve=True) @property def dbname(self): return self.connection.settings_dict["NAME"] def setUp(self): super(MAASClusterFixture, self).setUp() self.createdb(self.dbname) maas-1.9.5+bzr4599.orig/src/maasserver/testing/db_migrations.py0000644000000000000000000000367313056115004022473 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Helpers for testing South database migrations. Each Django application in MAAS tests the basic sanity of its own South database migrations. To minimize repetition, this single module provides all the code those tests need. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'detect_sequence_clashes', ] from collections import Counter import re from south.migration.base import Migrations from south.utils import ask_for_it_by_name def extract_number(migration_name): """Extract the sequence number from a migration module name.""" return int(re.match('([0-9]+)_', migration_name).group(1)) def get_duplicates(numbers): """Return set of those items that occur more than once.""" return { numbers for numbers, count in Counter(numbers).items() if count > 1 } def list_migrations(app_name): """List schema migrations in the given app.""" app = ask_for_it_by_name(app_name) return [migration.name() for migration in Migrations(app)] def detect_sequence_clashes(app_name): """List numbering clashes among database migrations in given app. :param app_name: Name of a MAAS Django application, e.g. "metadataserver" :return: A sorted `list` of tuples `(number, name)` representing all migration modules in the app that have clashing sequence numbers. The `number` is as found in `name`, but in `int` form. """ migrations = list_migrations(app_name) numbers_and_names = [(extract_number(name), name) for name in migrations] duplicates = get_duplicates(number for number, name in numbers_and_names) return sorted( (number, name) for number, name in numbers_and_names if number in duplicates ) maas-1.9.5+bzr4599.orig/src/maasserver/testing/dblocks.py0000644000000000000000000000162113056115004021262 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Helpers for testing database locks and related.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "lock_held_in_other_thread", ] from contextlib import contextmanager import threading from maasserver.utils.orm import transactional @contextmanager def lock_held_in_other_thread(lock, timeout=10): """Hold `lock` in another thread.""" held = threading.Event() done = threading.Event() @transactional def hold(): with lock: held.set() done.wait(timeout) thread = threading.Thread(target=hold) thread.start() held.wait(timeout) try: yield finally: done.set() thread.join() maas-1.9.5+bzr4599.orig/src/maasserver/testing/eventloop.py0000644000000000000000000000715013056115004021657 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Testing utilities for the region event-loop.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "RegionEventLoopFixture", "RunningEventLoopFixture", ] from crochet import wait_for_reactor from fixtures import Fixture from maasserver import eventloop from maasserver.eventloop import loop from twisted.application.service import Service class RegionEventLoopFixture(Fixture): """Stubs-out services in the event-loop to avoid side-effects. Sometimes we need only a single service, or no services, running when starting the event-loop. This fixture, by default, will stub- out all services by switching their factory callable out. This means that the services will be created, started, and stopped, but they won't do anything. """ def __init__(self, *services): super(RegionEventLoopFixture, self).__init__() self.services = services def checkEventLoopClean(self): # Don't proceed if the event-loop is running. if loop.services.running: raise RuntimeError( "The event-loop has been left running; this fixture cannot " "make a reasonable decision about what to do next.") # Don't proceed if any services are registered. services = list(loop.services) if services != []: raise RuntimeError( "One or more services are registered; this fixture cannot " "make a reasonable decision about what to do next. " "The services are: %s." % ', '.join(service.name for service in services)) def setUp(self): super(RegionEventLoopFixture, self).setUp() # Check that the event-loop is dormant and clean. self.checkEventLoopClean() # Ensure the event-loop will be left in a consistent state. self.addCleanup(self.checkEventLoopClean) # Restore the current `factories` tuple on exit. self.addCleanup(setattr, loop, "factories", loop.factories) # Set the new `factories` tuple, with all factories stubbed-out # except those in `self.services`. loop.factories = tuple( (name, (factory if name in self.services else Service)) for name, factory in loop.factories) class RunningEventLoopFixture(Fixture): """Starts and stops the region's event-loop. Note that this does *not* start and stop the Twisted reactor. Typically in region tests you'll find that the reactor is always running as a side-effect of importing :py:mod:`maasserver.eventloop`. """ @wait_for_reactor def start(self): return eventloop.start() @wait_for_reactor def stop(self): return eventloop.reset() def checkEventLoopClean(self): # Don't proceed if the event-loop is running. if loop.services.running: raise RuntimeError( "The event-loop has been left running; this fixture cannot " "make a reasonable decision about what to do next.") def setUp(self): super(RunningEventLoopFixture, self).setUp() # Check that the event-loop is dormant and clean. self.checkEventLoopClean() # Check that the event-loop will be left dormant and clean. self.addCleanup(self.checkEventLoopClean) # Stop the event-loop on exit. self.addCleanup(self.stop) # Start the event-loop. self.start() maas-1.9.5+bzr4599.orig/src/maasserver/testing/factory.py0000644000000000000000000017054113056115004021320 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test object factories.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) from maasserver.models.subnet import create_cidr str = None __metaclass__ = type __all__ = [ "factory", "Messages", ] from datetime import timedelta import hashlib from io import BytesIO import logging import random import time from distro_info import UbuntuDistroInfo from django.contrib.auth.models import User from django.test.client import RequestFactory from django.utils import timezone from maasserver.clusterrpc.power_parameters import get_power_types from maasserver.enum import ( ALLOCATED_NODE_STATUSES, BOOT_RESOURCE_FILE_TYPE, BOOT_RESOURCE_TYPE, CACHE_MODE_TYPE, FILESYSTEM_FORMAT_TYPE_CHOICES, FILESYSTEM_GROUP_TYPE, FILESYSTEM_TYPE, INTERFACE_TYPE, IPADDRESS_TYPE, NODE_BOOT, NODE_STATUS, NODEGROUP_STATUS, NODEGROUPINTERFACE_MANAGEMENT, PARTITION_TABLE_TYPE, POWER_STATE, ) from maasserver.fields import ( LargeObjectFile, MAC, ) from maasserver.models import ( BlockDevice, BootResource, BootResourceFile, BootResourceSet, BootSource, BootSourceCache, BootSourceSelection, CacheSet, Device, DownloadProgress, Event, EventType, Fabric, FanNetwork, FileStorage, Filesystem, FilesystemGroup, LargeFile, LicenseKey, Node, NodeGroup, NodeGroupInterface, Partition, PartitionTable, PhysicalBlockDevice, Space, SSHKey, SSLKey, StaticIPAddress, Subnet, Tag, VirtualBlockDevice, VLAN, VolumeGroup, Zone, ) from maasserver.models.blockdevice import MIN_BLOCK_DEVICE_SIZE from maasserver.models.bootresourceset import ( COMMISSIONABLE_SET, INSTALL_SET, XINSTALL_TYPES, ) from maasserver.models.interface import ( Interface, InterfaceRelationship, ) from maasserver.models.partition import MIN_PARTITION_SIZE from maasserver.node_status import NODE_TRANSITIONS from maasserver.testing import get_data from maasserver.testing.orm import reload_object from maasserver.utils.converters import round_size_to_nearest_block import maastesting.factory from maastesting.factory import NO_VALUE from metadataserver.enum import RESULT_TYPE from metadataserver.fields import Bin from metadataserver.models import ( CommissioningScript, NodeResult, ) from netaddr import ( IPAddress, IPNetwork, IPRange, ) from provisioningserver.utils.enum import map_enum # We have a limited number of public keys: # src/maasserver/tests/data/test_rsa{0, 1, 2, 3, 4}.pub MAX_PUBLIC_KEYS = 5 ALL_NODE_STATES = map_enum(NODE_STATUS).values() # Use `undefined` instead of `None` for default factory arguments when `None` # is a reasonable value for the argument. undefined = object() class Messages: """A class to record messages published by Django messaging framework. """ def __init__(self): self.messages = [] def add(self, level, message, extras): self.messages.append((level, message, extras)) def __iter__(self): for message in self.messages: yield message class Factory(maastesting.factory.Factory): def make_fake_request(self, path, method="GET", cookies={}): """Create a fake request. :param path: The path to which to make the request. :param method: The method to use for the request ('GET' or 'POST'). :param cookies: A `dict` with the cookies for the request. """ rf = RequestFactory() request = rf.get(path) request.method = method request._messages = Messages() request.COOKIES = cookies.copy() return request def make_file_upload(self, name=None, content=None): """Create a file-like object for upload in http POST or PUT. To upload a file using the Django test client, just include a parameter that maps not to a string, but to a file upload as produced by this method. :param name: Name of the file to be uploaded. If omitted, one will be made up. :type name: `unicode` :param content: Contents for the uploaded file. If omitted, some contents will be made up. :type content: `bytes` :return: A file-like object, with the requested `content` and `name`. """ if content is None: content = self.make_string().encode('ascii') if name is None: name = self.make_name('file') assert isinstance(content, bytes) upload = BytesIO(content) upload.name = name return upload def pick_enum(self, enum, but_not=None): """Pick a random item from an enumeration class. :param enum: An enumeration class such as `NODE_STATUS`. :return: The value of one of its items. :param but_not: A list of choices' IDs to exclude. :type but_not: Sequence. """ if but_not is None: but_not = () return random.choice([ value for value in list(map_enum(enum).values()) if value not in but_not]) def pick_choice(self, choices, but_not=None): """Pick a random item from `choices`. :param choices: A sequence of choices in Django form choices format: [ ('choice_id_1', "Choice name 1"), ('choice_id_2', "Choice name 2"), ] :param but_not: A list of choices' IDs to exclude. :type but_not: Sequence. :return: The "id" portion of a random choice out of `choices`. """ if but_not is None: but_not = () return random.choice( [choice for choice in choices if choice[0] not in but_not])[0] def pick_power_type(self, but_not=None): """Pick a random power type and return it. :param but_not: Exclude these values from result :type but_not: Sequence """ if but_not is None: but_not = [] else: but_not = list(but_not) but_not.append('') return random.choice( [choice for choice in list(get_power_types().keys()) if choice not in but_not]) def pick_commissioning_release(self, osystem): """Pick a random commissioning release from operating system.""" releases = osystem.get_supported_commissioning_releases() return random.choice(releases) def pick_ubuntu_release(self, but_not=None): """Pick a random supported Ubuntu release. :param but_not: Exclude these releases from the result :type but_not: Sequence """ ubuntu_releases = UbuntuDistroInfo() supported_releases = ubuntu_releases.all[ ubuntu_releases.all.index('precise'):] if but_not is None: but_not = [] return random.choice( [choice for choice in supported_releases if choice not in but_not], ).decode("utf-8") def _save_node_unchecked(self, node): """Save a :class:`Node`, but circumvent status transition checks.""" valid_initial_states = NODE_TRANSITIONS[None] NODE_TRANSITIONS[None] = ALL_NODE_STATES try: node.save() finally: NODE_TRANSITIONS[None] = valid_initial_states def make_Device(self, hostname=None, nodegroup=None, **kwargs): if hostname is None: hostname = self.make_string(20) if nodegroup is None: nodegroup = self.make_NodeGroup() device = Device(hostname=hostname, nodegroup=nodegroup, **kwargs) device.save() return device def make_Node( self, interface=False, hostname=None, status=None, architecture="i386/generic", min_hwe_kernel=None, hwe_kernel=None, installable=True, updated=None, created=None, nodegroup=None, routers=None, zone=None, networks=None, boot_type=None, sortable_name=False, power_type=None, power_parameters=None, power_state=None, power_state_updated=undefined, disable_ipv4=None, with_boot_disk=True, vlan=None, fabric=None, **kwargs): """Make a :class:`Node`. :param sortable_name: If `True`, use a that will sort consistently between different collation orders. Use this when testing sorting by name, where the database and the python code may have different ideas about collation orders, especially when it comes to case differences. """ # hostname=None is a valid value, hence the set_hostname trick. if hostname is None: hostname = self.make_string(20) if sortable_name: hostname = hostname.lower() if status is None: status = NODE_STATUS.DEFAULT if nodegroup is None: nodegroup = self.make_NodeGroup() if routers is None: routers = [self.make_MAC()] if zone is None: zone = self.make_Zone() if power_type is None: power_type = 'ether_wake' if power_parameters is None: power_parameters = "" if power_state is None: power_state = self.pick_enum(POWER_STATE) if power_state_updated is undefined: power_state_updated = ( timezone.now() - timedelta(minutes=random.randint(0, 15))) if disable_ipv4 is None: disable_ipv4 = self.pick_bool() if boot_type is None: boot_type = self.pick_enum(NODE_BOOT) node = Node( hostname=hostname, status=status, architecture=architecture, min_hwe_kernel=min_hwe_kernel, hwe_kernel=hwe_kernel, installable=installable, nodegroup=nodegroup, routers=routers, zone=zone, boot_type=boot_type, power_type=power_type, power_parameters=power_parameters, power_state=power_state, power_state_updated=power_state_updated, disable_ipv4=disable_ipv4, **kwargs) self._save_node_unchecked(node) # We do not generate random networks by default because the limited # number of VLAN identifiers (4,094) makes it very likely to # encounter collisions. if networks is not None: node.networks.add(*networks) if interface: self.make_Interface( INTERFACE_TYPE.PHYSICAL, node=node, vlan=vlan, fabric=fabric) if installable and with_boot_disk: root_partition = self.make_Partition(node=node) acquired = node.status in ALLOCATED_NODE_STATUSES self.make_Filesystem( partition=root_partition, mount_point='/', acquired=acquired) # Update the 'updated'/'created' fields with a call to 'update' # preventing a call to save() from overriding the values. if updated is not None: Node.objects.filter(id=node.id).update(updated=updated) if created is not None: Node.objects.filter(id=node.id).update(created=created) return reload_object(node) def get_interface_fields(self, name=None, ip=None, router_ip=None, network=None, subnet=None, subnet_mask=None, ip_range_low=None, ip_range_high=None, interface=None, management=None, static_ip_range_low=None, static_ip_range_high=None, **kwargs): """Return a dict of parameters for a cluster interface. These are the values that go into a `NodeGroupInterface` model object or form, except the `NodeGroup`. All IP address fields are unicode strings. The `network` parameter is not included in the result, but if you pass an `IPNetwork` as its value, this will be the network that the cluster interface will be attached to. Its IP address, netmask, and address ranges will be taken from `network`. """ if name is None: name = factory.make_name('ngi') if subnet is not None: network = subnet.get_ipnetwork() if network is None: network = factory.make_ipv4_network() # Split the network into dynamic and static ranges. if network.size > 2: middle = network.size // 2 dynamic_range = IPRange(network.first, network[middle]) static_range = IPRange(network[middle + 1], network.last) else: dynamic_range = network static_range = None if subnet is None and subnet_mask is None: assert type(network) == IPNetwork subnet_mask = unicode(network.netmask) if static_ip_range_low is None or static_ip_range_high is None: if static_range is None: static_ip_range_low = None static_ip_range_high = None else: static_low = static_range.first static_high = static_range.last if static_ip_range_low is None: static_ip_range_low = unicode(IPAddress(static_low)) if static_ip_range_high is None: static_ip_range_high = unicode(IPAddress(static_high)) if ip_range_low is None: ip_range_low = unicode(IPAddress(dynamic_range.first)) if ip_range_high is None: ip_range_high = unicode(IPAddress(dynamic_range.last)) if router_ip is None: router_ip = factory.pick_ip_in_network(network) if ip is None: ip = factory.pick_ip_in_network(network) if management is None: management = factory.pick_enum(NODEGROUPINTERFACE_MANAGEMENT) if interface is None: # Make the name start with something sane, because we have code # that [falls back to] filtering based on interface name that # runs when we register a new cluster. (in other words, tests # will fail if this doesn't look like it should be a physical # Ethernet card.) interface = self.make_name('eth') return dict( name=name, subnet=subnet, subnet_mask=subnet_mask, ip_range_low=ip_range_low, ip_range_high=ip_range_high, static_ip_range_low=static_ip_range_low, static_ip_range_high=static_ip_range_high, router_ip=router_ip, ip=ip, management=management, interface=interface) def make_NodeGroup(self, name=None, uuid=None, cluster_name=None, dhcp_key=None, ip=None, router_ip=None, network=None, subnet_mask=None, ip_range_low=None, ip_range_high=None, interface=None, management=None, status=None, maas_url='', static_ip_range_low=None, static_ip_range_high=None, default_disable_ipv4=None, **kwargs): """Create a :class:`NodeGroup`. If `management` is set (to a `NODEGROUPINTERFACE_MANAGEMENT` value), a :class:`NodeGroupInterface` will be created as well. If network (an instance of IPNetwork) is provided, use it to populate subnet_mask, broadcast_ip, ip_range_low, ip_range_high, router_ip and worker_ip. This is a convenience for setting up a coherent network all in one go. """ if status is None: status = factory.pick_enum(NODEGROUP_STATUS) if name is None: name = self.make_name('nodegroup') if uuid is None: uuid = factory.make_UUID() if cluster_name is None: cluster_name = factory.make_name('cluster') if dhcp_key is None: # TODO: Randomise this properly. dhcp_key = '' if default_disable_ipv4 is None: default_disable_ipv4 = factory.pick_bool() cluster = NodeGroup.objects.new( name=name, uuid=uuid, cluster_name=cluster_name, status=status, dhcp_key=dhcp_key, maas_url=maas_url, default_disable_ipv4=default_disable_ipv4) if management is not None: interface_settings = dict( ip=ip, router_ip=router_ip, network=network, subnet_mask=subnet_mask, ip_range_low=ip_range_low, ip_range_high=ip_range_high, interface=interface, management=management, static_ip_range_low=static_ip_range_low, static_ip_range_high=static_ip_range_high) interface_settings.update(kwargs) self.make_NodeGroupInterface(cluster, **interface_settings) return cluster def make_unrenamable_NodeGroup_with_Node(self): """Create a `NodeGroup` that can't be renamed, and `Node`. Node groups can't be renamed while they are in an accepted state, have DHCP and DNS management enabled, and have a node that is in allocated state. The cluster will also have a managed interface. :return: tuple: (`NodeGroup`, `Node`). """ name = self.make_name('original-name') nodegroup = self.make_NodeGroup( name=name, status=NODEGROUP_STATUS.ENABLED) factory.make_NodeGroupInterface( nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) node = self.make_Node( nodegroup=nodegroup, status=NODE_STATUS.ALLOCATED) return nodegroup, node def make_NodeGroupInterface(self, nodegroup, name=None, ip=None, router_ip=None, network=None, subnet=None, subnet_mask=None, ip_range_low=None, ip_range_high=None, interface=None, management=None, static_ip_range_low=None, static_ip_range_high=None, fabric=None, **kwargs): interface_settings = self.get_interface_fields( name=name, ip=ip, router_ip=router_ip, network=network, subnet=subnet, subnet_mask=subnet_mask, ip_range_low=ip_range_low, ip_range_high=ip_range_high, interface=interface, management=management, static_ip_range_low=static_ip_range_low, static_ip_range_high=static_ip_range_high) interface_settings.update(**kwargs) # Only populate the subnet field if the subnet_mask exists. # (the caller could want an unconfigured NodeGroupInterface) if interface_settings['subnet_mask']: cidr = create_cidr( interface_settings['ip'], interface_settings['subnet_mask']) defaults = { 'name': cidr, 'cidr': cidr, 'space': Space.objects.get_default_space(), } if fabric is not None: defaults['vlan'] = fabric.get_default_vlan() subnet, _ = Subnet.objects.get_or_create( cidr=cidr, defaults=defaults) elif interface_settings['subnet']: subnet = interface_settings.pop('subnet') interface_settings['subnet_mask'] = subnet.get_ipnetwork().netmask if 'broadcast_ip' in interface_settings: del interface_settings['broadcast_ip'] interface = NodeGroupInterface( nodegroup=nodegroup, **interface_settings) interface.save() return interface def make_NodeResult_for_commissioning( self, node=None, name=None, script_result=None, data=None): """Create a `NodeResult` as one would see from commissioning a node.""" if node is None: node = self.make_Node() if name is None: name = "ncrname-" + self.make_string(92) if data is None: data = b"ncrdata-" + self.make_bytes() if script_result is None: script_result = random.randint(0, 10) ncr = NodeResult( node=node, name=name, script_result=script_result, result_type=RESULT_TYPE.COMMISSIONING, data=Bin(data)) ncr.save() return ncr def make_NodeResult_for_installation( self, node=None, name=None, script_result=None, data=None): """Create a `NodeResult` as one would see from installing a node.""" if node is None: node = self.make_Node() if name is None: name = "ncrname-" + self.make_string(92) if data is None: data = b"ncrdata-" + self.make_bytes() if script_result is None: script_result = random.randint(0, 10) ncr = NodeResult( node=node, name=name, script_result=script_result, result_type=RESULT_TYPE.INSTALLATION, data=Bin(data)) ncr.save() return ncr def make_MAC(self): """Generate a random MAC address, in the form of a MAC object.""" return MAC(self.make_mac_address()) def make_Node_with_Interface_on_Subnet( self, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, interface_count=1, nodegroup=None, vlan=None, subnet=None, cidr=None, fabric=None, ifname=None, unmanaged=False, **kwargs): """Create a Node that has a Interface which is on a Subnet that has a NodeGroupInterface. :param interface_count: count of interfaces to add :param **kwargs: Additional parameters to pass to make_Node. """ mac_address = None iftype = INTERFACE_TYPE.PHYSICAL if nodegroup is None: nodegroup = self.make_NodeGroup(status=NODEGROUP_STATUS.ENABLED) if 'address' in kwargs: mac_address = kwargs['address'] del kwargs['address'] if 'iftype' in kwargs: iftype = kwargs['iftype'] del kwargs['iftype'] node = self.make_Node( nodegroup=nodegroup, fabric=fabric, **kwargs) if vlan is None: if fabric is None: fabric = factory.make_Fabric() vlan = fabric.get_default_vlan() if subnet is None: subnet = self.make_Subnet(vlan=vlan, cidr=cidr) # Check if the subnet already has a managed interface. ngis = subnet.nodegroupinterface_set.filter(nodegroup=nodegroup) ngis = ngis.exclude(management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) ngi = ngis.first() if ngi is None and not unmanaged: self.make_NodeGroupInterface( nodegroup, vlan=vlan, management=management, subnet=subnet) boot_interface = self.make_Interface( iftype, name=ifname, node=node, vlan=vlan, mac_address=mac_address) node.boot_interface = boot_interface node.save() self.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.DISCOVERED, ip="", subnet=subnet, interface=boot_interface) should_have_default_link_configuration = ( node.status not in [ NODE_STATUS.NEW, NODE_STATUS.COMMISSIONING, NODE_STATUS.FAILED_COMMISSIONING, ]) if should_have_default_link_configuration: self.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.AUTO, ip="", subnet=subnet, interface=boot_interface) for _ in range(1, interface_count): interface = self.make_Interface( INTERFACE_TYPE.PHYSICAL, node=node, vlan=vlan) self.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.DISCOVERED, ip="", subnet=subnet, interface=interface) if should_have_default_link_configuration: self.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip="", subnet=subnet, interface=interface) return reload_object(node) UNDEFINED = float('NaN') def _get_exclude_list(self, subnet): return ([IPAddress(subnet.gateway_ip)] + [IPAddress(ip) for ip in StaticIPAddress.objects.filter( subnet=subnet).values_list('ip', flat=True) if ip is not None]) def make_StaticIPAddress(self, ip=UNDEFINED, alloc_type=IPADDRESS_TYPE.AUTO, interface=None, user=None, subnet=None, **kwargs): """Create and return a StaticIPAddress model object. If a non-None `interface` is passed, connect this IP address to the given interface. """ if subnet is None: subnet = Subnet.objects.first() if subnet is None and alloc_type != IPADDRESS_TYPE.USER_RESERVED: subnet = self.make_Subnet() if ip is self.UNDEFINED: if not subnet and alloc_type == IPADDRESS_TYPE.USER_RESERVED: ip = self.make_ip_address() else: ip = self.pick_ip_in_network( IPNetwork(subnet.cidr), but_not=self._get_exclude_list(subnet)) elif ip is None or ip == '': ip = '' ipaddress = StaticIPAddress( ip=ip, alloc_type=alloc_type, user=user, subnet=subnet, **kwargs) ipaddress.save() if interface is not None: interface.ip_addresses.add(ipaddress) interface.save() return reload_object(ipaddress) def make_email(self): return '%s@example.com' % self.make_string(10) def make_User(self, username=None, password='test', email=None): if username is None: username = self.make_username() if email is None: email = self.make_email() return User.objects.create_user( username=username, password=password, email=email) def make_SSHKey(self, user, key_string=None): if key_string is None: key_string = get_data('data/test_rsa0.pub') key = SSHKey(key=key_string, user=user) key.save() return key def make_SSLKey(self, user, key_string=None): if key_string is None: key_string = get_data('data/test_x509_0.pem') key = SSLKey(key=key_string, user=user) key.save() return key def make_Space(self, name=None): space = Space(name=name) space.save() return space def make_Subnet(self, name=None, vlan=None, space=None, cidr=None, gateway_ip=None, dns_servers=None, host_bits=None, fabric=None, vid=None): if name is None: name = factory.make_name('name') if vlan is None: vlan = factory.make_VLAN(fabric=fabric, vid=vid) if space is None: space = factory.make_Space() if cidr is None: network = factory.make_ip4_or_6_network(host_bits=host_bits) cidr = unicode(network.cidr) if gateway_ip is None: gateway_ip = factory.pick_ip_in_network(IPNetwork(cidr)) if dns_servers is None: dns_servers = [ self.make_ip_address() for _ in range(random.randint(1, 3))] subnet = Subnet( name=name, vlan=vlan, cidr=cidr, gateway_ip=gateway_ip, space=space, dns_servers=dns_servers) subnet.save() return subnet def make_FanNetwork(self, name=None, underlay=None, overlay=None, dhcp=None, host_reserve=1, bridge=None, off=None): if name is None: name = self.make_name('fan network') if underlay is None: underlay = factory.make_ipv4_network(slash=16) if overlay is None: overlay = factory.make_ipv4_network( slash=8, disjoint_from=[underlay]) fannetwork = FanNetwork( name=name, underlay=underlay, overlay=overlay, dhcp=dhcp, host_reserve=host_reserve, bridge=bridge, off=off) fannetwork.save() return fannetwork def make_Fabric(self, name=None, class_type=None): fabric = Fabric(name=name, class_type=class_type) fabric.save() return fabric def _get_available_vid(self, fabric): """Return a free vid in the given Fabric.""" taken_vids = set(fabric.vlan_set.all().values_list('vid', flat=True)) for attempt in range(1000): vid = random.randint(1, 4095) if vid not in taken_vids: return vid raise maastesting.factory.TooManyRandomRetries( "Could not generate vid in fabric %s" % fabric) def make_VLAN(self, name=None, vid=None, fabric=None): assert vid != 0, "VID=0 VLANs are auto-created" if fabric is None: fabric = Fabric.objects.get_default_fabric() if vid is None: # Don't create the vid=0 VLAN, it's auto-created. vid = self._get_available_vid(fabric) vlan = VLAN(name=name, vid=vid, fabric=fabric) vlan.save() return vlan def make_Interface( self, iftype=INTERFACE_TYPE.PHYSICAL, node=None, mac_address=None, vlan=None, parents=None, name=None, cluster_interface=None, ip=None, enabled=True, fabric=None): if name is None: if iftype in (INTERFACE_TYPE.PHYSICAL, INTERFACE_TYPE.UNKNOWN): name = self.make_name('eth') elif iftype == INTERFACE_TYPE.ALIAS: name = self.make_name('eth', sep=':') elif iftype == INTERFACE_TYPE.BOND: name = self.make_name('bond') elif iftype == INTERFACE_TYPE.UNKNOWN: name = self.make_name('eth') elif iftype == INTERFACE_TYPE.VLAN: # This will be determined by the VLAN's VID. name = None if iftype is None: iftype = INTERFACE_TYPE.PHYSICAL if vlan is None: if fabric is not None: if iftype == INTERFACE_TYPE.VLAN: vlan = self.make_VLAN(fabric=fabric) else: vlan = fabric.get_default_vlan() else: if iftype == INTERFACE_TYPE.VLAN and parents: vlan = self.make_VLAN(fabric=parents[0].vlan.fabric) elif iftype == INTERFACE_TYPE.BOND and parents: vlan = parents[0].vlan else: fabric = self.make_Fabric() vlan = fabric.get_default_vlan() if (mac_address is None and iftype in [ INTERFACE_TYPE.PHYSICAL, INTERFACE_TYPE.BOND, INTERFACE_TYPE.UNKNOWN]): mac_address = self.make_MAC() if node is None and iftype == INTERFACE_TYPE.PHYSICAL: node = self.make_Node() interface = Interface( node=node, mac_address=mac_address, type=iftype, name=name, vlan=vlan, enabled=enabled) interface.save() if cluster_interface is not None: sip = StaticIPAddress.objects.create( ip=ip, alloc_type=IPADDRESS_TYPE.DHCP, subnet=cluster_interface.subnet) interface.ip_addresses.add(sip) if parents: for parent in parents: InterfaceRelationship(child=interface, parent=parent).save() interface.save() return reload_object(interface) def make_Tag(self, name=None, definition=None, comment='', kernel_opts=None, created=None, updated=None): if name is None: name = self.make_name('tag') if definition is None: # Is there a 'node' in this xml? definition = '//node' tag = Tag( name=name, definition=definition, comment=comment, kernel_opts=kernel_opts) tag.save() # Update the 'updated'/'created' fields with a call to 'update' # preventing a call to save() from overriding the values. if updated is not None: Tag.objects.filter(id=tag.id).update(updated=updated) if created is not None: Tag.objects.filter(id=tag.id).update(created=created) return reload_object(tag) def make_user_with_keys(self, n_keys=2, user=None, **kwargs): """Create a user with n `SSHKey`. If user is not None, use this user instead of creating one. Additional keyword arguments are passed to `make_user()`. """ if n_keys > MAX_PUBLIC_KEYS: raise RuntimeError( "Cannot create more than %d public keys. If you need more: " "add more keys in src/maasserver/tests/data/." % MAX_PUBLIC_KEYS) if user is None: user = self.make_User(**kwargs) keys = [] for i in range(n_keys): key_string = get_data('data/test_rsa%d.pub' % i) key = SSHKey(user=user, key=key_string) key.save() keys.append(key) return user, keys def make_user_with_ssl_keys(self, n_keys=2, user=None, **kwargs): """Create a user with n `SSLKey`. :param n_keys: Number of keys to add to user. :param user: User to add keys to. If user is None, then user is made with make_user. Additional keyword arguments are passed to `make_user()`. """ if n_keys > MAX_PUBLIC_KEYS: raise RuntimeError( "Cannot create more than %d public keys. If you need more: " "add more keys in src/maasserver/tests/data/." % MAX_PUBLIC_KEYS) if user is None: user = self.make_User(**kwargs) keys = [] for i in range(n_keys): key_string = get_data('data/test_x509_%d.pem' % i) key = SSLKey(user=user, key=key_string) key.save() keys.append(key) return user, keys def make_admin(self, username=None, password='test', email=None): if username is None: username = self.make_username() if email is None: email = self.make_email() return User.objects.create_superuser( username, password=password, email=email) def make_FileStorage(self, filename=None, content=None, owner=None): fake_file = self.make_file_upload(filename, content) return FileStorage.objects.save_file(fake_file.name, fake_file, owner) def make_oauth_header(self, missing_param=None, **kwargs): """Fake an OAuth authorization header. This will use arbitrary values. Pass as keyword arguments any header items that you wish to override. :param missing_param: Optional parameter name. This parameter will be omitted from the OAuth header. This is used to create bogus OAuth headers to make sure the code deals properly with them. """ items = { 'realm': self.make_string(), 'oauth_nonce': random.randint(0, 99999), 'oauth_timestamp': time.time(), 'oauth_consumer_key': self.make_string(18), 'oauth_signature_method': 'PLAINTEXT', 'oauth_version': '1.0', 'oauth_token': self.make_string(18), 'oauth_signature': "%%26%s" % self.make_string(32), } items.update(kwargs) if missing_param is not None: del items[missing_param] return "OAuth " + ", ".join([ '%s="%s"' % (key, value) for key, value in items.items()]) def make_CommissioningScript(self, name=None, content=None): if name is None: name = self.make_name('script') if content is None: content = b'content:' + self.make_string().encode('ascii') return CommissioningScript.objects.create( name=name, content=Bin(content)) def make_DownloadProgress(self, nodegroup=None, filename=None, size=NO_VALUE, bytes_downloaded=NO_VALUE, error=None): """Create a `DownloadProgress` in some poorly-defined state. If you have specific wishes about the object's state, you'll want to use one of the specialized `make_DownloadProgress_*` methods instead. Pass a `size` of `None` to indicate that total file size is not yet known. The default picks either a random number, or None. """ if nodegroup is None: nodegroup = self.make_NodeGroup() if filename is None: filename = self.make_name('download') if size is NO_VALUE: if self.pick_bool(): size = random.randint(0, 1000000000) else: size = None if bytes_downloaded is NO_VALUE: if self.pick_bool(): if size is None: max_size = 1000000000 else: max_size = size bytes_downloaded = random.randint(0, max_size) else: bytes_downloaded = None if error is None: if self.pick_bool(): error = self.make_string() else: error = '' return DownloadProgress.objects.create( nodegroup=nodegroup, filename=filename, size=size, bytes_downloaded=bytes_downloaded) def make_DownloadProgress_initial(self, nodegroup=None, filename=None, size=NO_VALUE): """Create a `DownloadProgress` as reported before a download.""" return self.make_DownloadProgress( nodegroup=nodegroup, filename=filename, size=size, bytes_downloaded=None, error='') def make_DownloadProgress_success(self, nodegroup=None, filename=None, size=None): """Create a `DownloadProgress` indicating success.""" if size is None: size = random.randint(0, 1000000000) return self.make_DownloadProgress( nodegroup=nodegroup, filename=filename, size=size, bytes_downloaded=size, error='') def make_DownloadProgress_incomplete(self, nodegroup=None, filename=None, size=NO_VALUE, bytes_downloaded=None): """Create a `DownloadProgress` that's not done yet.""" if size is NO_VALUE: if self.pick_bool(): # File can't be empty, or the download can't be incomplete. size = random.randint(1, 1000000000) else: size = None if bytes_downloaded is None: if size is None: max_size = 1000000000 else: max_size = size bytes_downloaded = random.randint(0, max_size - 1) return self.make_DownloadProgress( nodegroup=nodegroup, filename=filename, size=size, bytes_downloaded=bytes_downloaded, error='') def make_DownloadProgress_failure(self, nodegroup=None, filename=None, size=NO_VALUE, bytes_downloaded=NO_VALUE, error=None): """Create a `DownloadProgress` indicating failure.""" if error is None: error = self.make_string() return self.make_DownloadProgress_incomplete( nodegroup=nodegroup, filename=filename, size=size, bytes_downloaded=bytes_downloaded, error=error) def make_Zone(self, name=None, description=None, nodes=None, sortable_name=False): """Create a physical `Zone`. :param sortable_name: If `True`, use a that will sort consistently between different collation orders. Use this when testing sorting by name, where the database and the python code may have different ideas about collation orders, especially when it comes to case differences. """ if name is None: name = self.make_name('zone') if sortable_name: name = name.lower() if description is None: description = self.make_string() zone = Zone(name=name, description=description) zone.save() if nodes is not None: zone.node_set.add(*nodes) return zone make_zone = make_Zone def make_BootSource(self, url=None, keyring_filename=None, keyring_data=None): """Create a new `BootSource`.""" if url is None: url = "http://%s.com" % self.make_name('source-url') # Only set _one_ of keyring_filename and keyring_data. if keyring_filename is None and keyring_data is None: keyring_filename = self.make_name("keyring") boot_source = BootSource( url=url, keyring_filename=( "" if keyring_filename is None else keyring_filename), keyring_data=( b"" if keyring_data is None else keyring_data), ) boot_source.save() return boot_source def make_BootSourceCache(self, boot_source=None, os=None, arch=None, subarch=None, release=None, label=None): """Create a new `BootSourceCache`.""" if boot_source is None: boot_source = self.make_BootSource() if os is None: os = factory.make_name('os') if arch is None: arch = factory.make_name('arch') if subarch is None: subarch = factory.make_name('subarch') if release is None: release = factory.make_name('release') if label is None: label = factory.make_name('label') return BootSourceCache.objects.create( boot_source=boot_source, os=os, arch=arch, subarch=subarch, release=release, label=label) def make_many_BootSourceCaches(self, number, **kwargs): caches = list() for _ in range(number): caches.append(self.make_BootSourceCache(**kwargs)) return caches def make_BootSourceSelection(self, boot_source=None, os=None, release=None, arches=None, subarches=None, labels=None): """Create a `BootSourceSelection`.""" if boot_source is None: boot_source = self.make_BootSource() if os is None: os = self.make_name('os') if release is None: release = self.make_name('release') if arches is None: arch_count = random.randint(1, 10) arches = [self.make_name("arch") for _ in range(arch_count)] if subarches is None: subarch_count = random.randint(1, 10) subarches = [ self.make_name("subarch") for _ in range(subarch_count) ] if labels is None: label_count = random.randint(1, 10) labels = [self.make_name("label") for _ in range(label_count)] boot_source_selection = BootSourceSelection( boot_source=boot_source, release=release, arches=arches, subarches=subarches, labels=labels) boot_source_selection.save() return boot_source_selection def make_LicenseKey(self, osystem=None, distro_series=None, license_key=None): if osystem is None: osystem = factory.make_name('osystem') if distro_series is None: distro_series = factory.make_name('distro_series') if license_key is None: license_key = factory.make_name('key') return LicenseKey.objects.create( osystem=osystem, distro_series=distro_series, license_key=license_key) def make_EventType(self, name=None, level=None, description=None): if name is None: name = self.make_name('name', size=20) if description is None: description = factory.make_name('description') if level is None: level = random.choice([ logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]) return EventType.objects.create( name=name, description=description, level=level) def make_Event(self, node=None, type=None, action=None, description=None): if node is None: node = self.make_Node() if type is None: type = self.make_EventType() if action is None: action = self.make_name('action') if description is None: description = self.make_name('desc') return Event.objects.create( node=node, type=type, action=action, description=description) def make_LargeFile(self, content=None, size=512): """Create `LargeFile`. :param content: Data to store in large file object. :param size: Size of `content`. If `content` is None then it will be a random string of this size. If content is provided and `size` is not the same length, then it will be an inprogress file. """ if content is None: content = factory.make_string(size=size) sha256 = hashlib.sha256() sha256.update(content) sha256 = sha256.hexdigest() largeobject = LargeObjectFile() with largeobject.open('wb') as stream: stream.write(content) return LargeFile.objects.create( sha256=sha256, total_size=size, content=largeobject) def make_BootResource(self, rtype=None, name=None, architecture=None, extra=None, kflavor=None): if rtype is None: rtype = self.pick_enum(BOOT_RESOURCE_TYPE) if name is None: if rtype == BOOT_RESOURCE_TYPE.UPLOADED: name = self.make_name('name') else: os = self.make_name('os') series = self.make_name('series') name = '%s/%s' % (os, series) if architecture is None: arch = self.make_name('arch') subarch = self.make_name('subarch') architecture = '%s/%s' % (arch, subarch) if extra is None: extra = { self.make_name('key'): self.make_name('value') for _ in range(3) } if kflavor is None: extra['kflavor'] = 'generic' else: extra['kflavor'] = kflavor return BootResource.objects.create( rtype=rtype, name=name, architecture=architecture, extra=extra) def make_BootResourceSet(self, resource, version=None, label=None): if version is None: version = self.make_name('version') if label is None: label = self.make_name('label') return BootResourceSet.objects.create( resource=resource, version=version, label=label) def make_BootResourceFile(self, resource_set, largefile, filename=None, filetype=None, extra=None): if filename is None: filename = self.make_name('name') if filetype is None: filetype = self.pick_enum(BOOT_RESOURCE_FILE_TYPE) if extra is None: extra = { self.make_name('key'): self.make_name('value') for _ in range(3) } return BootResourceFile.objects.create( resource_set=resource_set, largefile=largefile, filename=filename, filetype=filetype, extra=extra) def make_boot_resource_file_with_content( self, resource_set, filename=None, filetype=None, extra=None, content=None, size=512): largefile = self.make_LargeFile(content=content, size=size) return self.make_BootResourceFile( resource_set, largefile, filename=filename, filetype=filetype, extra=extra) def make_usable_boot_resource( self, rtype=None, name=None, architecture=None, extra=None, version=None, label=None, kflavor=None): resource = self.make_BootResource( rtype=rtype, name=name, architecture=architecture, extra=extra, kflavor=kflavor) resource_set = self.make_BootResourceSet( resource, version=version, label=label) filetypes = COMMISSIONABLE_SET.union(INSTALL_SET) filetypes.add(random.choice(XINSTALL_TYPES)) for filetype in filetypes: # We set the filename to the same value as filetype, as in most # cases this will always be true. The simplestreams content from # maas.ubuntu.com, is formatted this way. self.make_boot_resource_file_with_content( resource_set, filename=filetype, filetype=filetype) return resource def make_BlockDevice( self, node=None, name=None, id_path=None, size=None, block_size=None, tags=None): if node is None: node = self.make_Node() if name is None: name = self.make_name('name') if id_path is None: id_path = '/dev/disk/by-id/id_%s' % name if block_size is None: block_size = random.choice([512, 1024, 4096]) if size is None: size = round_size_to_nearest_block( random.randint( MIN_BLOCK_DEVICE_SIZE * 4, MIN_BLOCK_DEVICE_SIZE * 1024), block_size) if tags is None: tags = [self.make_name('tag') for _ in range(3)] return BlockDevice.objects.create( node=node, name=name, size=size, block_size=block_size, tags=tags) def make_PhysicalBlockDevice( self, node=None, name=None, size=None, block_size=None, tags=None, model=None, serial=None, id_path=None): if node is None: node = self.make_Node() if name is None: name = self.make_name('name') if block_size is None: block_size = random.choice([512, 1024, 4096]) if size is None: size = round_size_to_nearest_block( random.randint( MIN_BLOCK_DEVICE_SIZE * 4, MIN_BLOCK_DEVICE_SIZE * 1024), block_size) if tags is None: tags = [self.make_name('tag') for _ in range(3)] if id_path is None: if model is None: model = self.make_name('model') if serial is None: serial = self.make_name('serial') else: model = "" serial = "" return PhysicalBlockDevice.objects.create( node=node, name=name, size=size, block_size=block_size, tags=tags, model=model, serial=serial, id_path=id_path) def make_PartitionTable( self, table_type=None, block_device=None, node=None, block_device_size=None): if block_device is None: if node is None: if table_type == PARTITION_TABLE_TYPE.GPT: node = factory.make_Node(bios_boot_method="uefi") else: node = factory.make_Node() block_device = self.make_PhysicalBlockDevice( node=node, size=block_device_size) return PartitionTable.objects.create( table_type=table_type, block_device=block_device) def make_Partition( self, partition_table=None, uuid=None, size=None, bootable=None, node=None, block_device_size=None): if partition_table is None: partition_table = self.make_PartitionTable( node=node, block_device_size=block_device_size) if size is None: available_size = partition_table.get_available_size() / 2 if available_size < MIN_PARTITION_SIZE: raise ValueError( "Cannot make another partition on partition_table not " "enough free space.") size = random.randint(MIN_PARTITION_SIZE, available_size) if bootable is None: bootable = random.choice([True, False]) return Partition.objects.create( partition_table=partition_table, uuid=uuid, size=size, bootable=bootable) def make_Filesystem( self, uuid=None, fstype=None, partition=None, block_device=None, filesystem_group=None, label=None, create_params=None, mount_point=None, mount_params=None, block_device_size=None, acquired=False): if fstype is None: fstype = self.pick_choice(FILESYSTEM_FORMAT_TYPE_CHOICES) if partition is None and block_device is None: if self.pick_bool(): partition = self.make_Partition() else: block_device = self.make_PhysicalBlockDevice( size=block_device_size) return Filesystem.objects.create( uuid=uuid, fstype=fstype, partition=partition, block_device=block_device, filesystem_group=filesystem_group, label=label, create_params=create_params, mount_point=mount_point, mount_params=mount_params, acquired=acquired) def make_CacheSet(self, block_device=None, partition=None, node=None): if node is None: node = self.make_Node() if partition is None and block_device is None: if self.pick_bool(): partition = self.make_Partition(node=node) else: block_device = self.make_PhysicalBlockDevice(node=node) if block_device is not None: return CacheSet.objects.get_or_create_cache_set_for_block_device( block_device) else: return CacheSet.objects.get_or_create_cache_set_for_partition( partition) def make_FilesystemGroup( self, uuid=None, group_type=None, name=None, create_params=None, filesystems=None, node=None, block_device_size=None, cache_mode=None, num_lvm_devices=4, cache_set=None): if group_type is None: group_type = self.pick_enum(FILESYSTEM_GROUP_TYPE) if group_type == FILESYSTEM_GROUP_TYPE.BCACHE: if cache_mode is None: cache_mode = self.pick_enum(CACHE_MODE_TYPE) if cache_set is None: cache_set = self.make_CacheSet(node=node) group = FilesystemGroup( uuid=uuid, group_type=group_type, name=name, cache_mode=cache_mode, create_params=create_params, cache_set=cache_set) group.save() if filesystems is None: if node is None: node = self.make_Node() if node.physicalblockdevice_set.count() == 0: # Add the boot disk and leave it as is. self.make_PhysicalBlockDevice(node=node) if group_type == FILESYSTEM_GROUP_TYPE.LVM_VG: for _ in range(num_lvm_devices): block_device = self.make_PhysicalBlockDevice( node, size=block_device_size) filesystem = self.make_Filesystem( fstype=FILESYSTEM_TYPE.LVM_PV, block_device=block_device) group.filesystems.add(filesystem) elif group_type == FILESYSTEM_GROUP_TYPE.RAID_0: for _ in range(2): block_device = self.make_PhysicalBlockDevice(node) filesystem = self.make_Filesystem( fstype=FILESYSTEM_TYPE.RAID, block_device=block_device) group.filesystems.add(filesystem) elif group_type == FILESYSTEM_GROUP_TYPE.RAID_1: for _ in range(2): block_device = self.make_PhysicalBlockDevice(node) filesystem = self.make_Filesystem( fstype=FILESYSTEM_TYPE.RAID, block_device=block_device) group.filesystems.add(filesystem) elif group_type == FILESYSTEM_GROUP_TYPE.RAID_5: for _ in range(3): block_device = self.make_PhysicalBlockDevice(node) filesystem = self.make_Filesystem( fstype=FILESYSTEM_TYPE.RAID, block_device=block_device) group.filesystems.add(filesystem) spare_block_device = self.make_PhysicalBlockDevice(node) spare_filesystem = self.make_Filesystem( fstype=FILESYSTEM_TYPE.RAID_SPARE, block_device=spare_block_device) group.filesystems.add(spare_filesystem) elif group_type == FILESYSTEM_GROUP_TYPE.RAID_6: for _ in range(4): block_device = self.make_PhysicalBlockDevice(node) filesystem = self.make_Filesystem( fstype=FILESYSTEM_TYPE.RAID, block_device=block_device) group.filesystems.add(filesystem) spare_block_device = self.make_PhysicalBlockDevice(node) spare_filesystem = self.make_Filesystem( fstype=FILESYSTEM_TYPE.RAID_SPARE, block_device=spare_block_device) group.filesystems.add(spare_filesystem) elif group_type == FILESYSTEM_GROUP_TYPE.RAID_10: for _ in range(4): block_device = self.make_PhysicalBlockDevice(node) filesystem = self.make_Filesystem( fstype=FILESYSTEM_TYPE.RAID, block_device=block_device) group.filesystems.add(filesystem) spare_block_device = self.make_PhysicalBlockDevice(node) spare_filesystem = self.make_Filesystem( fstype=FILESYSTEM_TYPE.RAID_SPARE, block_device=spare_block_device) group.filesystems.add(spare_filesystem) elif group_type == FILESYSTEM_GROUP_TYPE.BCACHE: backing_block_device = self.make_PhysicalBlockDevice(node) backing_filesystem = self.make_Filesystem( fstype=FILESYSTEM_TYPE.BCACHE_BACKING, block_device=backing_block_device) group.filesystems.add(backing_filesystem) else: for filesystem in filesystems: group.filesystems.add(filesystem) # Save again to make sure that the added filesystems are correct. group.save() return group def make_VolumeGroup(self, *args, **kwargs): if len(args) > 1: args[1] = FILESYSTEM_GROUP_TYPE.LVM_VG else: kwargs['group_type'] = FILESYSTEM_GROUP_TYPE.LVM_VG filesystem_group = self.make_FilesystemGroup(*args, **kwargs) return VolumeGroup.objects.get(id=filesystem_group.id) def make_VirtualBlockDevice( self, name=None, size=None, block_size=None, tags=None, uuid=None, filesystem_group=None, node=None): if node is None: node = factory.make_Node() if block_size is None: block_size = random.choice([512, 1024, 4096]) if filesystem_group is None: filesystem_group = self.make_FilesystemGroup( node=node, group_type=FILESYSTEM_GROUP_TYPE.LVM_VG, block_device_size=size, num_lvm_devices=2) if size is None: available_size = filesystem_group.get_lvm_free_space() if available_size < MIN_BLOCK_DEVICE_SIZE: raise ValueError( "Cannot make a virtual block device in filesystem_group; " "not enough space.") size = round_size_to_nearest_block( random.randint( MIN_BLOCK_DEVICE_SIZE, available_size), block_size) if tags is None: tags = [self.make_name("tag") for _ in range(3)] elif not filesystem_group.is_lvm(): raise RuntimeError( "make_VirtualBlockDevice should only be used with " "filesystem_group that has a group_type of LVM_VG. " "If you need a VirtualBlockDevice that is for another type " "use make_FilesystemGroup which will create a " "VirtualBlockDevice automatically.") if name is None: name = self.make_name("lv") if size is None: size = random.randint(1, filesystem_group.get_size()) if block_size is None: block_size = random.choice([512, 1024, 4096]) return VirtualBlockDevice.objects.create( name=name, size=size, block_size=block_size, tags=tags, uuid=uuid, filesystem_group=filesystem_group) # Create factory singleton. factory = Factory() maas-1.9.5+bzr4599.orig/src/maasserver/testing/oauthclient.py0000644000000000000000000000536013056115004022164 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """OAuth client for API testing.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'OAuthAuthenticatedClient', ] from time import time from maasserver.models.user import get_auth_tokens from maasserver.testing.testclient import MAASSensibleClient from oauth.oauth import ( generate_nonce, OAuthConsumer, OAuthRequest, OAuthSignatureMethod_PLAINTEXT, OAuthToken, ) class OAuthAuthenticatedClient(MAASSensibleClient): """OAuth-authenticated client for Piston API testing.""" def __init__(self, user, token=None): """Initialize an oauth-authenticated test client. :param user: The user to authenticate. :type user: django.contrib.auth.models.User :param token: Optional token to authenticate `user` with. If no `token` is given, the user's first token will be used. :type token: oauth.oauth.OAuthToken """ super(OAuthAuthenticatedClient, self).__init__() if token is None: # Get the user's first token. token = get_auth_tokens(user)[0] assert token.user == user, "Token does not match User." consumer = token.consumer self.consumer = OAuthConsumer( consumer.key.encode("ascii"), consumer.secret.encode("ascii")) self.token = OAuthToken( token.key.encode("ascii"), token.secret.encode("ascii")) def _compose_auth_header(self, url): """Return additional header entries for request to `url`.""" params = { 'oauth_version': "1.0", 'oauth_nonce': generate_nonce(), 'oauth_timestamp': int(time()), 'oauth_token': self.token.key, 'oauth_consumer_key': self.consumer.key, } req = OAuthRequest(http_url=url, parameters=params) req.sign_request( OAuthSignatureMethod_PLAINTEXT(), self.consumer, self.token) header = req.to_header() # Django uses the 'HTTP_AUTHORIZATION' to look up Authorization # credentials. header['HTTP_AUTHORIZATION'] = header['Authorization'] return header def _compose_url(self, path): """Put together a full URL for the resource at `path`.""" environ = self._base_environ() return '%s://%s' % (environ['wsgi.url_scheme'], path) def request(self, **kwargs): url = self._compose_url(kwargs['PATH_INFO']) kwargs.update(self._compose_auth_header(url)) return super(OAuthAuthenticatedClient, self).request(**kwargs) maas-1.9.5+bzr4599.orig/src/maasserver/testing/orm.py0000644000000000000000000000676113056115004020450 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """ORM-related test helpers.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'PostCommitHooksTestMixin', 'reload_object', 'reload_objects', ] from maasserver.utils.orm import ( gen_description_of_hooks, get_one, post_commit_hooks, ) import testtools from testtools.matchers import HasLength def reload_object(model_object): """Reload `obj` from the database. Use this when a test needs to inspect changes to model objects made by the API. If the object has been deleted, this will return None. :param model_object: Model object to reload. :type model_object: Concrete `Model` subtype. :return: Freshly-loaded instance of `model_object`, or None. :rtype: Same as `model_object`. """ model_class = model_object.__class__ return get_one(model_class.objects.filter(id=model_object.id)) def reload_objects(model_class, model_objects): """Reload `model_objects` of type `model_class` from the database. Use this when a test needs to inspect changes to model objects made by the API. If any of the objects have been deleted, they will not be included in the result. :param model_class: `Model` class to reload from. :type model_class: Class. :param model_objects: Objects to reload from the database. :type model_objects: Sequence of `model_class` objects. :return: Reloaded objects, in no particular order. :rtype: Sequence of `model_class` objects. """ assert all(isinstance(obj, model_class) for obj in model_objects) return model_class.objects.filter( id__in=[obj.id for obj in model_objects]) class PostCommitHooksTestMixin(testtools.TestCase): """Reset all post-commit hooks. This also adds an expectation to `test` that there aren't any leaking post-commit hooks. The test will still run, but will be marked as failed. The learnings: tests should not be allowing post-commit hooks to escape. """ def setUp(self): try: super(PostCommitHooksTestMixin, self).setUp() description_of_hooks = "\n".join( gen_description_of_hooks(post_commit_hooks.hooks)) self.expectThat( post_commit_hooks.hooks, HasLength(0), "One or more post-commit tasks were present before " "commencing this test:\n" + description_of_hooks) finally: # By this point we will have reported the leaked post-commit # tasks, so always reset them; we don't want to report them again, # and we don't want to execute them. post_commit_hooks.reset() def tearDown(self): try: description_of_hooks = "\n".join( gen_description_of_hooks(post_commit_hooks.hooks)) self.expectThat( post_commit_hooks.hooks, HasLength(0), "One or more post-commit tasks were present at the end of " "this test." + description_of_hooks) super(PostCommitHooksTestMixin, self).tearDown() finally: # By this point we will have reported the leaked post-commit # tasks, so always reset them; we don't want to report them again, # and we don't want to execute them. post_commit_hooks.reset() maas-1.9.5+bzr4599.orig/src/maasserver/testing/osystems.py0000644000000000000000000000556213056115004021537 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Helpers for operating systems in testing.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'make_usable_osystem', 'patch_usable_osystems', ] from random import randint from maasserver.clusterrpc.testing.osystems import ( make_rpc_osystem, make_rpc_release, ) from maasserver.testing.factory import factory from maasserver.utils import osystems as osystems_module def make_osystem_with_releases(testcase, osystem_name=None, releases=None): """Generate an arbitrary operating system. :param osystem_name: The operating system name. Useful in cases where we need to test that not supplying an os works correctly. :param releases: The list of releases name. Useful in cases where we need to test that not supplying a release works correctly. """ if osystem_name is None: osystem_name = factory.make_name('os') if releases is None: releases = [factory.make_name('release') for _ in range(3)] rpc_releases = [ make_rpc_release(release) for release in releases ] return make_rpc_osystem(osystem_name, releases=rpc_releases) def patch_usable_osystems(testcase, osystems=None, allow_empty=True): """Set a fixed list of usable operating systems. A usable operating system is one for which boot images are available. :param testcase: A `TestCase` whose `patch` this function can use. :param osystems: Optional list of operating systems. If omitted, defaults to a list (which may be empty) of random operating systems. """ start = 0 if allow_empty is False: start = 1 if osystems is None: osystems = [ make_osystem_with_releases(testcase) for _ in range(randint(start, 2)) ] testcase.patch( osystems_module, 'gen_all_known_operating_systems').return_value = osystems def make_usable_osystem(testcase, osystem_name=None, releases=None): """Return arbitrary operating system, and make it "usable." A usable operating system is one that is returned from the RPC call ListOperatingSystems. :param testcase: A `TestCase` whose `patch` this function can pass to `patch_usable_osystems`. :param osystem_name: The operating system name. Useful in cases where we need to test that not supplying an os works correctly. :param releases: The list of releases name. Useful in cases where we need to test that not supplying a release works correctly. """ osystem = make_osystem_with_releases( testcase, osystem_name=osystem_name, releases=releases) patch_usable_osystems(testcase, [osystem]) return osystem maas-1.9.5+bzr4599.orig/src/maasserver/testing/testcase.py0000644000000000000000000002626113056115004021463 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Custom test-case classes.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'MAASServerTestCase', 'MAASTransactionServerTestCase', 'SeleniumTestCase', 'SerializationFailureTestCase', 'TestWithoutCrochetMixin', ] from contextlib import closing import SocketServer import sys import threading from unittest import SkipTest import wsgiref import crochet import django from django.core.urlresolvers import reverse from django.db import ( close_old_connections, connection, transaction, ) from django.db.utils import OperationalError from fixtures import Fixture from maasserver.fields import register_mac_type from maasserver.testing.factory import factory from maasserver.testing.orm import PostCommitHooksTestMixin from maasserver.testing.testclient import MAASSensibleClient from maasserver.utils.orm import is_serialization_failure from maastesting.djangotestcase import ( DjangoTestCase, DjangoTransactionTestCase, ) from maastesting.fixtures import DisplayFixture from maastesting.utils import run_isolated from mock import Mock class MAASRegionTestCaseBase(PostCommitHooksTestMixin): """Base test case for testing the region. See sub-classes for the real deal though. """ client_class = MAASSensibleClient # For each piece of default data introduced via migrations we need # to also include a data fixture. This needs to be representative, # but can be a reduced set. fixtures = [ "candidatenames.yaml", "defaultzone.yaml", ] @classmethod def setUpClass(cls): super(MAASRegionTestCaseBase, cls).setUpClass() register_mac_type(connection.cursor()) def setUp(self): super(MAASRegionTestCaseBase, self).setUp() # Avoid circular imports. from maasserver.models import signals # XXX: allenap bug=1427628 2015-03-03: This should not be here. from maasserver.clusterrpc.testing import power_parameters self.useFixture(power_parameters.StaticPowerTypesFixture()) # XXX: allenap bug=1427628 2015-03-03: These should not be here. # Disconnect the monitor cancellation as it's triggered by a signal. self.patch(signals.monitors, 'MONITOR_CANCEL_CONNECT', False) # Disconnect the status transition event to speed up tests. self.patch(signals.events, 'STATE_TRANSITION_EVENT_CONNECT', False) def client_log_in(self, as_admin=False): """Log `self.client` into MAAS. Sets `self.logged_in_user` to match the logged-in identity. """ password = 'test' if as_admin: user = factory.make_admin(password=password) else: user = factory.make_User(password=password) self.client.login(username=user.username, password=password) self.logged_in_user = user class MAASServerTestCase( MAASRegionTestCaseBase, DjangoTestCase): """:class:`TestCase` variant for region testing.""" class MAASTransactionServerTestCase( MAASRegionTestCaseBase, DjangoTransactionTestCase): """:class:`TestCase` variant for *transaction* region testing.""" # Django supports Selenium tests only since version 1.4. django_supports_selenium = (django.VERSION >= (1, 4)) if django_supports_selenium: from django.test import LiveServerTestCase from selenium.webdriver.firefox.webdriver import WebDriver else: LiveServerTestCase = object # noqa class LogSilencerFixture(Fixture): old_handle_error = wsgiref.handlers.BaseHandler.handle_error old_log_exception = wsgiref.handlers.BaseHandler.log_exception def setUp(self): super(LogSilencerFixture, self).setUp() self.silence_loggers() self.addCleanup(self.unsilence_loggers) def silence_loggers(self): # Silence logging of errors to avoid the # "IOError: [Errno 32] Broken pipe" error. SocketServer.BaseServer.handle_error = Mock() wsgiref.handlers.BaseHandler.log_exception = Mock() def unsilence_loggers(self): """Restore original handle_error/log_exception methods.""" SocketServer.BaseServer.handle_error = self.old_handle_error wsgiref.handlers.BaseHandler.log_exception = self.old_log_exception class SeleniumTestCase( DjangoTransactionTestCase, LiveServerTestCase, PostCommitHooksTestMixin): """Selenium-enabled test case. Two users are pre-created: "user" for a regular user account, or "admin" for an administrator account. Both have the password "test". You can log in as either using `log_in`. """ # Load the selenium test fixture. fixtures = ['src/maastesting/protractor/fixture.yaml'] @classmethod def setUpClass(cls): if not django_supports_selenium: return cls.display = DisplayFixture() cls.display.__enter__() cls.silencer = LogSilencerFixture() cls.silencer.__enter__() cls.selenium = WebDriver() super(SeleniumTestCase, cls).setUpClass() def setUp(self): if not django_supports_selenium: raise SkipTest( "Live tests only enabled if Django.version >=1.4.") super(SeleniumTestCase, self).setUp() @classmethod def tearDownClass(cls): if not django_supports_selenium: return cls.selenium.quit() cls.display.__exit__(None, None, None) cls.silencer.__exit__(None, None, None) super(SeleniumTestCase, cls).tearDownClass() def log_in(self, user='user', password='test'): """Log in as the given user. Defaults to non-admin user.""" self.get_page('login') username_input = self.selenium.find_element_by_id("id_username") username_input.send_keys(user) password_input = self.selenium.find_element_by_id("id_password") password_input.send_keys(password) self.selenium.find_element_by_xpath('//input[@value="Login"]').click() def get_page(self, *reverse_args, **reverse_kwargs): """GET a page. Arguments are passed on to `reverse`.""" path = reverse(*reverse_args, **reverse_kwargs) return self.selenium.get("%s%s" % (self.live_server_url, path)) class TestWithoutCrochetMixin: """Ensure that Crochet's event-loop is not running. Crochet's event-loop cannot easily be resurrected, so this runs each test in a new subprocess. There we can stop Crochet without worrying about how to get it going again. Use this where tests must, for example, patch out global state during testing, where those patches coincide with things that Crochet expects to use too, ``time.sleep`` for example. """ _dead_thread = threading.Thread() _dead_thread.start() _dead_thread.join() def __call__(self, result=None): if result is None: result = self.defaultTestResult() # nose.proxy.ResultProxy.assertMyTest() is weird, and makes # things break, so we neutralise it here. result.assertMyTest = lambda test: None # Finally, run the test in a subprocess. up = super(TestWithoutCrochetMixin, self.__class__) run_isolated(up, self, result) run = __call__ def setUp(self): super(TestWithoutCrochetMixin, self).setUp() # Ensure that Crochet's event-loop has shutdown. The following # runs in the child process started by run_isolated() so we # don't need to repair the damage we do. if crochet._watchdog.is_alive(): crochet._watchdog._canary = self._dead_thread crochet._watchdog.join() # Wait for the watchdog to stop. self.assertFalse(crochet.reactor.running) class SerializationFailureTestCase( DjangoTransactionTestCase, PostCommitHooksTestMixin): def create_stest_table(self): with closing(connection.cursor()) as cursor: cursor.execute("CREATE TABLE IF NOT EXISTS stest (a INTEGER)") def drop_stest_table(self): with closing(connection.cursor()) as cursor: cursor.execute("DROP TABLE IF EXISTS stest") def setUp(self): super(SerializationFailureTestCase, self).setUp() self.create_stest_table() # Put something into the stest table upon which to trigger a # serialization failure. with transaction.atomic(): with closing(connection.cursor()) as cursor: cursor.execute("INSERT INTO stest VALUES (1)") def tearDown(self): super(SerializationFailureTestCase, self).tearDown() self.drop_stest_table() def cause_serialization_failure(self): """Trigger an honest, from the database, serialization failure.""" # Helper to switch the transaction to SERIALIZABLE. def set_serializable(): with closing(connection.cursor()) as cursor: cursor.execute("SET TRANSACTION ISOLATION LEVEL SERIALIZABLE") # Perform a conflicting update. This must run in a separate thread. It # also must begin after the beginning of the transaction in which we # will trigger a serialization failure AND commit before that other # transaction commits. This doesn't need to run with serializable # isolation. def do_conflicting_update(): try: with transaction.atomic(): with closing(connection.cursor()) as cursor: cursor.execute("UPDATE stest SET a = 2") finally: close_old_connections() def trigger_serialization_failure(): # Fetch something first. This ensures that we're inside the # transaction, and that the database has a reference point for # calculating serialization failures. with closing(connection.cursor()) as cursor: cursor.execute("SELECT * FROM stest") cursor.fetchall() # Run do_conflicting_update() in a separate thread. thread = threading.Thread(target=do_conflicting_update) thread.start() thread.join() # Updating the same rows as do_conflicting_update() did will # trigger a serialization failure. We have to check the __cause__ # to confirm the failure type as reported by PostgreSQL. with closing(connection.cursor()) as cursor: cursor.execute("UPDATE stest SET a = 4") if connection.in_atomic_block: # We're already in a transaction. set_serializable() trigger_serialization_failure() else: # Start a transaction in this thread. with transaction.atomic(): set_serializable() trigger_serialization_failure() def capture_serialization_failure(self): """Trigger a serialization failure, return its ``exc_info`` tuple.""" try: self.cause_serialization_failure() except OperationalError as e: if is_serialization_failure(e): return sys.exc_info() else: raise maas-1.9.5+bzr4599.orig/src/maasserver/testing/testclient.py0000644000000000000000000000262013056115004022017 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """MAAS-specific test HTTP client.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'MAASSensibleClient', ] from maasserver.utils.orm import ( post_commit_hooks, transactional, ) from maastesting.djangoclient import SensibleClient class MAASSensibleClient(SensibleClient): """A derivative of Django's test client specially for MAAS. This ensures that requests are performed in a transaction, and that post-commit hooks are alway fired or reset. """ def request(self, **request): # Make sure that requests are done within a transaction. Some kinds of # tests will already have a transaction in progress, in which case # this will act like a sub-transaction, but that's fine. upcall = transactional(super(MAASSensibleClient, self).request) # If we're outside of a transaction right now then the transactional() # wrapper above will ensure that post-commit hooks are run or reset on # return from the request. However, we want to ensure that post-commit # hooks are fired in any case, hence the belt-n-braces context. with post_commit_hooks: return upcall(**request) maas-1.9.5+bzr4599.orig/src/maasserver/testing/tests/0000755000000000000000000000000013056115004020431 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/testing/tests/__init__.py0000644000000000000000000000000013056115004022530 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/testing/tests/models.py0000644000000000000000000000101413056115004022262 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test model for tests of testing module.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'TestModel', ] from django.db.models import ( CharField, Model, ) class TestModel(Model): """A trivial model class for testing.""" text = CharField(max_length=100) maas-1.9.5+bzr4599.orig/src/maasserver/testing/tests/test_db_migrations.py0000644000000000000000000000550713056115004024672 0ustar 00000000000000# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for helpers used to sanity-check South migrations.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from random import randint from maasserver.testing import db_migrations from maastesting.factory import factory from maastesting.testcase import MAASTestCase def make_migration_name(number=None, name=None): """Create a migration name.""" if number is None: number = randint(0, 9999) if name is None: name = factory.make_string() return '{0:=04}_{1}'.format(number, name) class TestDBMigrations(MAASTestCase): def test_extract_number_returns_sequence_number(self): number = randint(0, 999999) self.assertEqual( number, db_migrations.extract_number(make_migration_name(number))) def test_get_duplicates_finds_duplicates(self): item = factory.make_name('item') self.assertEqual({item}, db_migrations.get_duplicates([item, item])) def test_get_duplicates_finds_all_duplicates(self): dup1 = factory.make_name('dup1') dup2 = factory.make_name('dup2') self.assertEqual( {dup1, dup2}, db_migrations.get_duplicates(2 * [dup1, dup2])) def test_get_duplicates_ignores_unique_items(self): self.assertEqual(set(), db_migrations.get_duplicates(range(5))) def test_get_duplicates_ignores_ordering(self): dup = factory.make_name('dup') unique = factory.make_name('unique') self.assertEqual( {dup}, db_migrations.get_duplicates([dup, unique, dup])) def test_list_migrations_lists_real_migrations(self): self.assertIn( '0001_initial', db_migrations.list_migrations('maasserver')) def test_detect_sequence_clashes_returns_list(self): self.assertIsInstance( db_migrations.detect_sequence_clashes('maasserver'), list) def test_detect_sequence_clashes_finds_clashes(self): number = randint(0, 999) names = tuple(make_migration_name(number) for counter in range(2)) self.patch(db_migrations, 'list_migrations').return_value = names self.assertItemsEqual( [(number, name) for name in names], db_migrations.detect_sequence_clashes(factory.make_name('app'))) def test_detect_sequence_clashes_ignores_unique_migrations(self): self.patch(db_migrations, 'list_migrations').return_value = tuple( make_migration_name(number) for number in range(5)) self.assertItemsEqual( [], db_migrations.detect_sequence_clashes(factory.make_name('app'))) maas-1.9.5+bzr4599.orig/src/maasserver/testing/tests/test_factory.py0000644000000000000000000000645013056115004023516 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test the factory where appropriate. Don't overdo this.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random from maasserver.models import NodeGroup from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase class TestFactory(MAASServerTestCase): def test_pick_enum_returns_enum_value(self): random_value = random.randint(0, 99999) class Enum: VALUE = random_value OTHER_VALUE = random_value + 3 self.assertIn(factory.pick_enum(Enum), [Enum.VALUE, Enum.OTHER_VALUE]) def test_pick_enum_can_exclude_choices(self): random_value = random.randint(0, 99999) class Enum: FIRST_VALUE = random_value SECOND_VALUE = random_value + 1 THIRD_VALUE = random_value + 2 self.assertEqual( Enum.FIRST_VALUE, factory.pick_enum( Enum, but_not=(Enum.SECOND_VALUE, Enum.THIRD_VALUE))) def test_pick_choice_chooses_from_django_options(self): options = [(2, 'b'), (10, 'j')] self.assertIn( factory.pick_choice(options), [option[0] for option in options]) def test_pick_choice_can_exclude_choices(self): options = [(2, 'b'), (10, 'j')] but_not = [2] self.assertEqual( 10, factory.pick_choice(options, but_not=but_not)) def test_make_Node_creates_nodegroup_if_none_given(self): existing_nodegroup_ids = set( nodegroup.id for nodegroup in NodeGroup.objects.all()) new_node = factory.make_Node() self.assertIsNotNone(new_node.nodegroup) self.assertNotIn(new_node.nodegroup.id, existing_nodegroup_ids) def test_make_Node_uses_given_nodegroup(self): nodegroup = factory.make_NodeGroup() self.assertEqual( nodegroup, factory.make_Node(nodegroup=nodegroup).nodegroup) def test_make_Zone_returns_physical_zone(self): self.assertIsNotNone(factory.make_Zone()) def test_make_Zone_assigns_name(self): name = factory.make_Zone().name self.assertIsNotNone(name) self.assertNotEqual(0, len(name)) def test_make_Zone_returns_unique_zone(self): self.assertNotEqual(factory.make_Zone(), factory.make_Zone()) def test_make_Zone_adds_nodes(self): node = factory.make_Node() zone = factory.make_Zone(nodes=[node]) node = reload_object(node) self.assertEqual(zone, node.zone) def test_make_Zone_does_not_add_other_nodes(self): previous_zone = factory.make_Zone() node = factory.make_Node(zone=previous_zone) factory.make_Zone(nodes=[factory.make_Node()]) node = reload_object(node) self.assertEqual(previous_zone, node.zone) def test_make_Zone_adds_no_nodes_by_default(self): previous_zone = factory.make_Zone() node = factory.make_Node(zone=previous_zone) factory.make_Zone() node = reload_object(node) self.assertEqual(previous_zone, node.zone) maas-1.9.5+bzr4599.orig/src/maasserver/testing/tests/test_module.py0000644000000000000000000001117713056115004023336 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.testing`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import httplib from django.db.models.signals import ( post_save, pre_save, ) from django.http import ( HttpResponse, HttpResponseRedirect, ) from maasserver.testing import ( extract_redirect, NoReceivers, ) from maasserver.testing.factory import factory from maasserver.testing.orm import ( reload_object, reload_objects, ) from maasserver.testing.testcase import MAASServerTestCase from maasserver.testing.tests.models import TestModel from maastesting.djangotestcase import TestModelMixin # Horrible kludge. Works around a bug where delete() does not work on # test models when using nose. Without this, running the tests in this # module fails at the delete() calls, saying a table node_c does not # exist. (Running just the test case passes, but running the entire # module's tests fails even if the failing test case is the only one). # # https://github.com/jbalogh/django-nose/issues/15 TestModel._meta.get_all_related_objects() class TestHelpers(TestModelMixin, MAASServerTestCase): """Test helper functions.""" app = 'maasserver.testing.tests' def test_extract_redirect_extracts_redirect_location(self): url = factory.make_string() self.assertEqual( url, extract_redirect(HttpResponseRedirect(url))) def test_extract_redirect_only_returns_target_path(self): url_path = factory.make_string() self.assertEqual( "/%s" % url_path, extract_redirect( HttpResponseRedirect("http://example.com/%s" % url_path))) def test_extract_redirect_errors_out_helpfully_if_not_a_redirect(self): content = factory.make_string(10) other_response = HttpResponse(status=httplib.OK, content=content) try: extract_redirect(other_response) except ValueError as e: pass self.assertIn(unicode(httplib.OK), unicode(e)) self.assertIn(content, unicode(e)) def test_reload_object_reloads_object(self): test_obj = TestModel(text="old text") test_obj.save() TestModel.objects.filter(id=test_obj.id).update(text="new text") self.assertEqual("new text", reload_object(test_obj).text) def test_reload_object_returns_None_for_deleted_object(self): test_obj = TestModel() test_obj.save() TestModel.objects.filter(id=test_obj.id).delete() self.assertIsNone(reload_object(test_obj)) def test_reload_objects_reloads_objects(self): texts = ['1 text', '2 text', '3 text'] objs = [TestModel(text=text) for text in texts] for obj in objs: obj.save() texts[0] = "different text" TestModel.objects.filter(id=objs[0].id).update(text=texts[0]) self.assertItemsEqual( texts, [obj.text for obj in reload_objects(TestModel, objs)]) def test_reload_objects_omits_deleted_objects(self): objs = [TestModel() for counter in range(3)] for obj in objs: obj.save() dead_obj = objs.pop(0) TestModel.objects.filter(id=dead_obj.id).delete() self.assertItemsEqual(objs, reload_objects(TestModel, objs)) class TestNoReceivers(MAASServerTestCase): def test_clears_and_restores_signal(self): # post_save already has some receivers on it, but make sure. self.assertNotEqual(0, len(post_save.receivers)) old_values = list(post_save.receivers) with NoReceivers(post_save): self.assertEqual([], post_save.receivers) self.assertItemsEqual(old_values, post_save.receivers) def test_clears_and_restores_many_signals(self): self.assertNotEqual(0, len(post_save.receivers)) self.assertNotEqual(0, len(pre_save.receivers)) old_pre_values = pre_save.receivers old_post_values = post_save.receivers with NoReceivers((post_save, pre_save)): self.assertEqual([], post_save.receivers) self.assertEqual([], pre_save.receivers) self.assertItemsEqual(old_pre_values, pre_save.receivers) self.assertItemsEqual(old_post_values, post_save.receivers) def test_leaves_some_other_signals_alone(self): self.assertNotEqual(0, len(post_save.receivers)) old_pre_values = pre_save.receivers with NoReceivers(post_save): self.assertItemsEqual(old_pre_values, pre_save.receivers) maas-1.9.5+bzr4599.orig/src/maasserver/tests/__init__.py0000644000000000000000000000047513056115004021073 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] maas-1.9.5+bzr4599.orig/src/maasserver/tests/data/0000755000000000000000000000000013056115004017665 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/tests/models.py0000644000000000000000000000450513056115004020615 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test related classes and functions for maas and its applications.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'BulkManagerParentTestModel', 'BulkManagerTestModel', 'CIDRTestModel', 'FieldChangeTestModel', 'GenericTestModel', 'IPv4CIDRTestModel', 'JSONFieldModel', 'LargeObjectFieldModel', 'MAASIPAddressFieldModel', 'MessagesTestModel', 'TimestampedModelTestModel', 'XMLFieldModel', ] from django.db.models import ( CharField, ForeignKey, Model, ) from maasserver.fields import ( CIDRField, IPv4CIDRField, JSONObjectField, LargeObjectField, MAASIPAddressField, XMLField, ) from maasserver.models.managers import BulkManager from maasserver.models.timestampedmodel import TimestampedModel class GenericTestModel(Model): """A multi-purpose test model with one field, named `field`.""" field = CharField(max_length=20, blank=True) class JSONFieldModel(Model): name = CharField(max_length=255, unique=False) value = JSONObjectField(null=True) class XMLFieldModel(Model): class Meta: db_table = "docs" name = CharField(max_length=255, unique=False) value = XMLField(null=True) class MessagesTestModel(Model): name = CharField(max_length=255, unique=False) class TimestampedModelTestModel(TimestampedModel): # This model inherits from TimestampedModel so it will have a 'created' # field and an 'updated' field. pass class FieldChangeTestModel(Model): name1 = CharField(max_length=255, unique=False) name2 = CharField(max_length=255, unique=False) class BulkManagerParentTestModel(Model): pass class BulkManagerTestModel(Model): parent = ForeignKey('BulkManagerParentTestModel', editable=False) objects = BulkManager() class MAASIPAddressFieldModel(Model): ip_address = MAASIPAddressField() class LargeObjectFieldModel(Model): name = CharField(max_length=255, unique=False) large_object = LargeObjectField(block_size=10) class CIDRTestModel(Model): cidr = CIDRField() class IPv4CIDRTestModel(Model): cidr = IPv4CIDRField() maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_auth.py0000644000000000000000000003015313056115004021330 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). from __future__ import ( absolute_import, print_function, unicode_literals, ) """Test permissions.""" str = None __metaclass__ = type __all__ = [] from functools import partial import httplib from django.core.urlresolvers import reverse from maasserver.enum import ( INTERFACE_TYPE, NODE_PERMISSION, NODE_STATUS, ) from maasserver.models import ( MAASAuthorizationBackend, Node, ) from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from metadataserver.nodeinituser import get_node_init_user class LoginLogoutTest(MAASServerTestCase): def make_user(self, name='test', password='test'): """Create a user with a password.""" return factory.make_User(username=name, password=password) def test_login(self): name = factory.make_string() password = factory.make_string() user = self.make_user(name, password) response = self.client.post( reverse('login'), {'username': name, 'password': password}) self.assertEqual(httplib.FOUND, response.status_code) self.assertEqual(user.id, self.client.session['_auth_user_id']) def test_login_failed(self): response = self.client.post( reverse('login'), { 'username': factory.make_string(), 'password': factory.make_string(), }) self.assertEqual(httplib.OK, response.status_code) self.assertNotIn('_auth_user_id', self.client.session) def test_logout(self): name = factory.make_string() password = factory.make_string() factory.make_User(name, password) self.client.login(username=name, password=password) self.client.post(reverse('logout')) self.assertNotIn('_auth_user_id', self.client.session) def make_unallocated_node(): """Return a node that is not allocated to anyone.""" return factory.make_Node() def make_allocated_node(owner=None): """Create a node, owned by `owner` (or create owner if not given).""" if owner is None: owner = factory.make_User() return factory.make_Node(owner=owner, status=NODE_STATUS.ALLOCATED) class TestMAASAuthorizationBackend(MAASServerTestCase): def test_invalid_check_object(self): backend = MAASAuthorizationBackend() exc = factory.make_exception() self.assertRaises( NotImplementedError, backend.has_perm, factory.make_admin(), NODE_PERMISSION.VIEW, exc) def test_invalid_check_permission(self): backend = MAASAuthorizationBackend() self.assertRaises( NotImplementedError, backend.has_perm, factory.make_admin(), 'not-access', make_unallocated_node()) def test_node_init_user_cannot_access(self): backend = MAASAuthorizationBackend() self.assertFalse(backend.has_perm( get_node_init_user(), NODE_PERMISSION.VIEW, make_unallocated_node())) def test_user_can_view_unowned_node(self): backend = MAASAuthorizationBackend() self.assertTrue(backend.has_perm( factory.make_User(), NODE_PERMISSION.VIEW, make_unallocated_node())) def test_user_can_view_nodes_owned_by_others(self): backend = MAASAuthorizationBackend() self.assertTrue(backend.has_perm( factory.make_User(), NODE_PERMISSION.VIEW, make_allocated_node())) def test_owned_status(self): # A non-admin user can access nodes he owns. backend = MAASAuthorizationBackend() node = make_allocated_node() self.assertTrue( backend.has_perm( node.owner, NODE_PERMISSION.VIEW, node)) def test_user_cannot_edit_nodes_owned_by_others(self): backend = MAASAuthorizationBackend() self.assertFalse(backend.has_perm( factory.make_User(), NODE_PERMISSION.EDIT, make_allocated_node())) def test_user_cannot_edit_unowned_node(self): backend = MAASAuthorizationBackend() self.assertFalse(backend.has_perm( factory.make_User(), NODE_PERMISSION.EDIT, make_unallocated_node())) def test_user_can_edit_his_own_nodes(self): backend = MAASAuthorizationBackend() user = factory.make_User() self.assertTrue(backend.has_perm( user, NODE_PERMISSION.EDIT, make_allocated_node(owner=user))) def test_user_has_no_admin_permission_on_node(self): # NODE_PERMISSION.ADMIN permission on nodes is granted to super users # only. backend = MAASAuthorizationBackend() user = factory.make_User() self.assertFalse( backend.has_perm( user, NODE_PERMISSION.ADMIN, factory.make_Node())) def test_user_cannot_view_BlockDevice_when_not_node_owner(self): backend = MAASAuthorizationBackend() user = factory.make_User() node = factory.make_Node(owner=factory.make_User()) device = factory.make_BlockDevice(node=node) self.assertFalse(backend.has_perm(user, NODE_PERMISSION.VIEW, device)) def test_user_can_view_BlockDevice_when_no_node_owner(self): backend = MAASAuthorizationBackend() user = factory.make_User() node = factory.make_Node() device = factory.make_BlockDevice(node=node) self.assertTrue(backend.has_perm(user, NODE_PERMISSION.VIEW, device)) def test_user_can_view_BlockDevice_when_node_owner(self): backend = MAASAuthorizationBackend() user = factory.make_User() node = factory.make_Node(owner=user) device = factory.make_BlockDevice(node=node) self.assertTrue(backend.has_perm(user, NODE_PERMISSION.VIEW, device)) def test_user_cannot_edit_BlockDevice_when_not_node_owner(self): backend = MAASAuthorizationBackend() user = factory.make_User() node = factory.make_Node(owner=factory.make_User()) device = factory.make_BlockDevice(node=node) self.assertFalse(backend.has_perm(user, NODE_PERMISSION.EDIT, device)) def test_user_can_edit_VirtualBlockDevice_when_node_owner(self): backend = MAASAuthorizationBackend() user = factory.make_User() node = factory.make_Node(owner=user) device = factory.make_VirtualBlockDevice(node=node) self.assertTrue(backend.has_perm(user, NODE_PERMISSION.EDIT, device)) def test_user_has_no_admin_permission_on_BlockDevice(self): # NODE_PERMISSION.ADMIN permission on block devices is granted to super # user only. backend = MAASAuthorizationBackend() user = factory.make_User() self.assertFalse( backend.has_perm( user, NODE_PERMISSION.ADMIN, factory.make_BlockDevice())) def test_user_cannot_view_FilesystemGroup_when_not_node_owner(self): backend = MAASAuthorizationBackend() user = factory.make_User() node = factory.make_Node(owner=factory.make_User()) filesystem_group = factory.make_FilesystemGroup(node=node) self.assertFalse( backend.has_perm(user, NODE_PERMISSION.VIEW, filesystem_group)) def test_user_can_view_FilesystemGroup_when_no_node_owner(self): backend = MAASAuthorizationBackend() user = factory.make_User() node = factory.make_Node() filesystem_group = factory.make_FilesystemGroup(node=node) self.assertTrue( backend.has_perm(user, NODE_PERMISSION.VIEW, filesystem_group)) def test_user_can_view_FilesystemGroup_when_node_owner(self): backend = MAASAuthorizationBackend() user = factory.make_User() node = factory.make_Node(owner=user) filesystem_group = factory.make_FilesystemGroup(node=node) self.assertTrue( backend.has_perm(user, NODE_PERMISSION.VIEW, filesystem_group)) def test_user_cannot_edit_FilesystemGroup_when_not_node_owner(self): backend = MAASAuthorizationBackend() user = factory.make_User() node = factory.make_Node(owner=factory.make_User()) filesystem_group = factory.make_FilesystemGroup(node=node) self.assertFalse( backend.has_perm(user, NODE_PERMISSION.EDIT, filesystem_group)) def test_user_has_no_admin_permission_on_FilesystemGroup(self): # NODE_PERMISSION.ADMIN permission on block devices is granted to super # user only. backend = MAASAuthorizationBackend() user = factory.make_User() self.assertFalse( backend.has_perm( user, NODE_PERMISSION.ADMIN, factory.make_FilesystemGroup())) class TestMAASAuthorizationBackendForDeviceInterface(MAASServerTestCase): def test_owner_can_edit_device_interface(self): backend = MAASAuthorizationBackend() user = factory.make_User() parent = factory.make_Node() device = factory.make_Node( owner=user, installable=False, parent=parent) interface = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=device) self.assertTrue( backend.has_perm( user, NODE_PERMISSION.EDIT, interface)) def test_non_owner_cannot_edit_device_interface(self): backend = MAASAuthorizationBackend() user = factory.make_User() owner = factory.make_User() parent = factory.make_Node() device = factory.make_Node( owner=owner, installable=False, parent=parent) interface = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=device) self.assertFalse( backend.has_perm( user, NODE_PERMISSION.EDIT, interface)) class TestMAASAuthorizationBackendForNetworking(MAASServerTestCase): scenarios = ( ("fabric", {"factory": factory.make_Fabric}), ("interface", { "factory": partial( factory.make_Interface, INTERFACE_TYPE.PHYSICAL)}), ("subnet", {"factory": factory.make_Subnet}), ("space", {"factory": factory.make_Space}), ) def test_user_can_view(self): backend = MAASAuthorizationBackend() user = factory.make_User() self.assertTrue( backend.has_perm( user, NODE_PERMISSION.VIEW, self.factory())) def test_user_cannot_edit(self): backend = MAASAuthorizationBackend() user = factory.make_User() self.assertFalse( backend.has_perm( user, NODE_PERMISSION.EDIT, self.factory())) def test_user_not_admin(self): backend = MAASAuthorizationBackend() user = factory.make_User() self.assertFalse( backend.has_perm( user, NODE_PERMISSION.ADMIN, self.factory())) def test_admin_can_view(self): backend = MAASAuthorizationBackend() admin = factory.make_admin() self.assertTrue( backend.has_perm( admin, NODE_PERMISSION.VIEW, self.factory())) def test_admin_can_edit(self): backend = MAASAuthorizationBackend() admin = factory.make_admin() self.assertTrue( backend.has_perm( admin, NODE_PERMISSION.EDIT, self.factory())) def test_admin_is_admin(self): backend = MAASAuthorizationBackend() admin = factory.make_admin() self.assertTrue( backend.has_perm( admin, NODE_PERMISSION.ADMIN, self.factory())) class TestNodeVisibility(MAASServerTestCase): def test_admin_sees_all_nodes(self): nodes = [ make_allocated_node(), make_unallocated_node(), ] self.assertItemsEqual( nodes, Node.objects.get_nodes( factory.make_admin(), NODE_PERMISSION.VIEW)) def test_user_sees_own_nodes_and_unowned_nodes(self): user = factory.make_User() make_allocated_node() own_node = make_allocated_node(owner=user) unowned_node = make_unallocated_node() self.assertItemsEqual( [own_node, unowned_node], Node.objects.get_nodes(own_node.owner, NODE_PERMISSION.VIEW)) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_bootresources.py0000644000000000000000000020420513056115004023266 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test maasserver.bootresources.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import httplib import json import logging import os from os import environ from random import randint from StringIO import StringIO from subprocess import CalledProcessError from django.core.urlresolvers import reverse from django.db import ( connections, transaction, ) from django.http import StreamingHttpResponse from django.test.client import Client from fixtures import ( FakeLogger, Fixture, ) from maasserver import bootresources from maasserver.bootresources import ( BootResourceStore, download_all_boot_resources, download_boot_resources, get_simplestream_endpoint, SimpleStreamsHandler, ) from maasserver.clusterrpc.testing.boot_images import make_rpc_boot_image from maasserver.components import ( get_persistent_error, register_persistent_error, ) from maasserver.enum import ( BOOT_RESOURCE_FILE_TYPE, BOOT_RESOURCE_TYPE, COMPONENT, ) from maasserver.models import ( BootResource, BootResourceFile, BootResourceSet, Config, LargeFile, ) from maasserver.rpc.testing.fixtures import MockLiveRegionToClusterRPCFixture from maasserver.testing.config import RegionConfigurationFixture from maasserver.testing.dblocks import lock_held_in_other_thread from maasserver.testing.eventloop import ( RegionEventLoopFixture, RunningEventLoopFixture, ) from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import ( MAASServerTestCase, MAASTransactionServerTestCase, ) from maasserver.utils import absolute_reverse from maasserver.utils.orm import ( get_one, post_commit_hooks, ) from maastesting.djangotestcase import DjangoTransactionTestCase from maastesting.matchers import ( MockCalledOnceWith, MockNotCalled, ) from maastesting.testcase import MAASTestCase from maastesting.twisted import TwistedLoggerFixture from mock import ( ANY, Mock, sentinel, ) from provisioningserver.auth import get_maas_user_gpghome from provisioningserver.import_images.product_mapping import ProductMapping from provisioningserver.rpc.cluster import ( ListBootImages, ListBootImagesV2, ) from provisioningserver.utils.text import normalise_whitespace from provisioningserver.utils.twisted import asynchronous from testtools.deferredruntest import extract_result from testtools.matchers import ( Contains, ContainsAll, Equals, HasLength, ) from twisted.application.internet import TimerService from twisted.internet.defer import ( fail, succeed, ) from twisted.protocols.amp import UnhandledCommand def make_boot_resource_file_with_stream(): resource = factory.make_usable_boot_resource( rtype=BOOT_RESOURCE_TYPE.SYNCED) rfile = resource.sets.first().files.first() with rfile.largefile.content.open('rb') as stream: content = stream.read() with rfile.largefile.content.open('wb') as stream: stream.truncate() return rfile, StringIO(content), content class TestHelpers(MAASServerTestCase): """Tests for `maasserver.bootresources` helpers.""" def test_get_simplestreams_endpoint(self): endpoint = get_simplestream_endpoint() self.assertEqual( absolute_reverse( 'simplestreams_stream_handler', kwargs={'filename': 'index.json'}), endpoint['url']) self.assertEqual([], endpoint['selections']) class SimplestreamsEnvFixture(Fixture): """Clears the env variables set by the methods that interact with simplestreams.""" def setUp(self): super(SimplestreamsEnvFixture, self).setUp() prior_env = {} for key in ['GNUPGHOME', 'http_proxy', 'https_proxy']: prior_env[key] = os.environ.get(key, '') self.addCleanup(os.environ.update, prior_env) class TestSimpleStreamsHandler(MAASServerTestCase): """Tests for `maasserver.bootresources.SimpleStreamsHandler`.""" def reverse_stream_handler(self, filename): return reverse( 'simplestreams_stream_handler', kwargs={'filename': filename}) def reverse_file_handler( self, os, arch, subarch, series, version, filename): return reverse( 'simplestreams_file_handler', kwargs={ 'os': os, 'arch': arch, 'subarch': subarch, 'series': series, 'version': version, 'filename': filename, }) def get_stream_client(self, filename): return self.client.get(self.reverse_stream_handler(filename)) def get_file_client(self, os, arch, subarch, series, version, filename): return self.client.get( self.reverse_file_handler( os, arch, subarch, series, version, filename)) def get_product_name_for_resource(self, resource): arch, subarch = resource.architecture.split('/') if resource.rtype == BOOT_RESOURCE_TYPE.UPLOADED: os = 'custom' series = resource.name else: os, series = resource.name.split('/') return 'maas:boot:%s:%s:%s:%s' % (os, arch, subarch, series) def make_usable_product_boot_resource(self): resource = factory.make_usable_boot_resource() return self.get_product_name_for_resource(resource), resource def test_streams_other_than_allowed_returns_404(self): allowed_paths = [ 'index.json', 'maas:v2:download.json', ] invalid_paths = [ '%s.json' % factory.make_name('path') for _ in range(3) ] for path in allowed_paths: response = self.get_stream_client(path) self.assertEqual(httplib.OK, response.status_code) for path in invalid_paths: response = self.get_stream_client(path) self.assertEqual(httplib.NOT_FOUND, response.status_code) def test_streams_product_index_contains_keys(self): response = self.get_stream_client('index.json') output = json.loads(response.content) self.assertThat(output, ContainsAll(['index', 'updated', 'format'])) def test_streams_product_index_format_is_index_1(self): response = self.get_stream_client('index.json') output = json.loads(response.content) self.assertEqual('index:1.0', output['format']) def test_streams_product_index_index_has_maas_v2_download(self): response = self.get_stream_client('index.json') output = json.loads(response.content) self.assertThat(output['index'], ContainsAll(['maas:v2:download'])) def test_streams_product_index_maas_v2_download_contains_keys(self): response = self.get_stream_client('index.json') output = json.loads(response.content) self.assertThat( output['index']['maas:v2:download'], ContainsAll([ 'datatype', 'path', 'updated', 'products', 'format'])) def test_streams_product_index_maas_v2_download_has_valid_values(self): response = self.get_stream_client('index.json') output = json.loads(response.content) self.assertEqual( 'image-downloads', output['index']['maas:v2:download']['datatype']) self.assertEqual( 'streams/v1/maas:v2:download.json', output['index']['maas:v2:download']['path']) self.assertEqual( 'products:1.0', output['index']['maas:v2:download']['format']) def test_streams_product_index_empty_products(self): response = self.get_stream_client('index.json') output = json.loads(response.content) self.assertEqual( [], output['index']['maas:v2:download']['products']) def test_streams_product_index_empty_with_incomplete_resource(self): resource = factory.make_BootResource() factory.make_BootResourceSet(resource) response = self.get_stream_client('index.json') output = json.loads(response.content) self.assertEqual( [], output['index']['maas:v2:download']['products']) def test_streams_product_index_with_resources(self): products = [] for _ in range(3): product, _ = self.make_usable_product_boot_resource() products.append(product) response = self.get_stream_client('index.json') output = json.loads(response.content) # Product listing should be the same as all of the completed # boot resources in the database. self.assertItemsEqual( products, output['index']['maas:v2:download']['products']) def test_streams_product_download_contains_keys(self): response = self.get_stream_client('maas:v2:download.json') output = json.loads(response.content) self.assertThat(output, ContainsAll([ 'datatype', 'updated', 'content_id', 'products', 'format'])) def test_streams_product_download_has_valid_values(self): response = self.get_stream_client('maas:v2:download.json') output = json.loads(response.content) self.assertEqual('image-downloads', output['datatype']) self.assertEqual('maas:v2:download', output['content_id']) self.assertEqual('products:1.0', output['format']) def test_streams_product_download_empty_products(self): response = self.get_stream_client('maas:v2:download.json') output = json.loads(response.content) self.assertEqual( {}, output['products']) def test_streams_product_download_empty_with_incomplete_resource(self): resource = factory.make_BootResource() factory.make_BootResourceSet(resource) response = self.get_stream_client('maas:v2:download.json') output = json.loads(response.content) self.assertEqual( {}, output['products']) def test_streams_product_download_has_valid_product_keys(self): products = [] for _ in range(3): product, _ = self.make_usable_product_boot_resource() products.append(product) response = self.get_stream_client('maas:v2:download.json') output = json.loads(response.content) # Product listing should be the same as all of the completed # boot resources in the database. self.assertThat( output['products'], ContainsAll(products)) def test_streams_product_download_product_contains_keys(self): product, _ = self.make_usable_product_boot_resource() response = self.get_stream_client('maas:v2:download.json') output = json.loads(response.content) self.assertThat( output['products'][product], ContainsAll([ 'versions', 'subarch', 'label', 'version', 'arch', 'release', 'krel', 'os'])) def test_streams_product_download_product_has_valid_values(self): product, resource = self.make_usable_product_boot_resource() _, _, os, arch, subarch, series = product.split(':') label = resource.get_latest_complete_set().label response = self.get_stream_client('maas:v2:download.json') output = json.loads(response.content) output_product = output['products'][product] self.assertEqual(subarch, output_product['subarch']) self.assertEqual(label, output_product['label']) self.assertEqual(series, output_product['version']) self.assertEqual(arch, output_product['arch']) self.assertEqual(series, output_product['release']) self.assertEqual(series, output_product['krel']) self.assertEqual(os, output_product['os']) for key, value in resource.extra.items(): self.assertEqual(value, output_product[key]) def test_streams_product_download_product_uses_latest_complete_label(self): product, resource = self.make_usable_product_boot_resource() # Incomplete resource_set factory.make_BootResourceSet(resource) newest_set = factory.make_BootResourceSet(resource) factory.make_boot_resource_file_with_content(newest_set) response = self.get_stream_client('maas:v2:download.json') output = json.loads(response.content) output_product = output['products'][product] self.assertEqual(newest_set.label, output_product['label']) def test_streams_product_download_product_contains_multiple_versions(self): resource = factory.make_BootResource() resource_sets = [ factory.make_BootResourceSet(resource) for _ in range(3) ] versions = [] for resource_set in resource_sets: factory.make_boot_resource_file_with_content(resource_set) versions.append(resource_set.version) product = self.get_product_name_for_resource(resource) response = self.get_stream_client('maas:v2:download.json') output = json.loads(response.content) self.assertThat( output['products'][product]['versions'], ContainsAll(versions)) def test_streams_product_download_product_version_contains_items(self): product, resource = self.make_usable_product_boot_resource() resource_set = resource.get_latest_complete_set() items = [ rfile.filename for rfile in resource_set.files.all() ] response = self.get_stream_client('maas:v2:download.json') output = json.loads(response.content) version = output['products'][product]['versions'][resource_set.version] self.assertThat( version['items'], ContainsAll(items)) def test_streams_product_download_product_item_contains_keys(self): product, resource = self.make_usable_product_boot_resource() resource_set = resource.get_latest_complete_set() resource_file = resource_set.files.order_by('?')[0] response = self.get_stream_client('maas:v2:download.json') output = json.loads(response.content) version = output['products'][product]['versions'][resource_set.version] self.assertThat( version['items'][resource_file.filename], ContainsAll(['path', 'ftype', 'sha256', 'size'])) def test_streams_product_download_product_item_has_valid_values(self): product, resource = self.make_usable_product_boot_resource() _, _, os, arch, subarch, series = product.split(':') resource_set = resource.get_latest_complete_set() resource_file = resource_set.files.order_by('?')[0] path = '%s/%s/%s/%s/%s/%s' % ( os, arch, subarch, series, resource_set.version, resource_file.filename) response = self.get_stream_client('maas:v2:download.json') output = json.loads(response.content) version = output['products'][product]['versions'][resource_set.version] item = version['items'][resource_file.filename] self.assertEqual(path, item['path']) self.assertEqual(resource_file.filetype, item['ftype']) self.assertEqual(resource_file.largefile.sha256, item['sha256']) self.assertEqual(resource_file.largefile.total_size, item['size']) for key, value in resource_file.extra.items(): self.assertEqual(value, item[key]) def test_download_invalid_boot_resource_returns_404(self): os = factory.make_name('os') series = factory.make_name('series') arch = factory.make_name('arch') subarch = factory.make_name('subarch') version = factory.make_name('version') filename = factory.make_name('filename') response = self.get_file_client( os, arch, subarch, series, version, filename) self.assertEqual(httplib.NOT_FOUND, response.status_code) def test_download_invalid_version_returns_404(self): product, resource = self.make_usable_product_boot_resource() _, _, os, arch, subarch, series = product.split(':') version = factory.make_name('version') filename = factory.make_name('filename') response = self.get_file_client( os, arch, subarch, series, version, filename) self.assertEqual(httplib.NOT_FOUND, response.status_code) def test_download_invalid_filename_returns_404(self): product, resource = self.make_usable_product_boot_resource() _, _, os, arch, subarch, series = product.split(':') resource_set = resource.get_latest_complete_set() version = resource_set.version filename = factory.make_name('filename') response = self.get_file_client( os, arch, subarch, series, version, filename) self.assertEqual(httplib.NOT_FOUND, response.status_code) def test_download_valid_path_returns_200(self): product, resource = self.make_usable_product_boot_resource() _, _, os, arch, subarch, series = product.split(':') resource_set = resource.get_latest_complete_set() version = resource_set.version resource_file = resource_set.files.order_by('?')[0] filename = resource_file.filename response = self.get_file_client( os, arch, subarch, series, version, filename) self.assertEqual(httplib.OK, response.status_code) def test_download_returns_streaming_response(self): product, resource = self.make_usable_product_boot_resource() _, _, os, arch, subarch, series = product.split(':') resource_set = resource.get_latest_complete_set() version = resource_set.version resource_file = resource_set.files.order_by('?')[0] filename = resource_file.filename response = self.get_file_client( os, arch, subarch, series, version, filename) self.assertIsInstance(response, StreamingHttpResponse) class TestConnectionWrapper(DjangoTransactionTestCase): """Tests the use of StreamingHttpResponse(ConnectionWrapper(stream)). We do not run this inside of `MAASServerTestCase` as that wraps a transaction around each test. Since a new connection is created to return the actual content, the transaction to create the data needs be committed. """ def make_file_for_client(self): # Set up the database information inside of a transaction. This is # done so the information is committed. As the new connection needs # to be able to access the data. with transaction.atomic(): os = factory.make_name('os') series = factory.make_name('series') arch = factory.make_name('arch') subarch = factory.make_name('subarch') name = '%s/%s' % (os, series) architecture = '%s/%s' % (arch, subarch) version = factory.make_name('version') filetype = factory.pick_enum(BOOT_RESOURCE_FILE_TYPE) # We set the filename to the same value as filetype, as in most # cases this will always be true. The simplestreams content from # maas.ubuntu.com, is formatted this way. filename = filetype size = randint(1024, 2048) content = factory.make_string(size=size) resource = factory.make_BootResource( rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, architecture=architecture) resource_set = factory.make_BootResourceSet( resource, version=version) largefile = factory.make_LargeFile(content=content, size=size) factory.make_BootResourceFile( resource_set, largefile, filename=filename, filetype=filetype) return content, reverse( 'simplestreams_file_handler', kwargs={ 'os': os, 'arch': arch, 'subarch': subarch, 'series': series, 'version': version, 'filename': filename, }) def read_response(self, response): """Read the streaming_content from the response. :rtype: bytes """ return b''.join(response.streaming_content) def test_download_calls__get_new_connection(self): content, url = self.make_file_for_client() mock_get_new_connection = self.patch( bootresources.ConnectionWrapper, '_get_new_connection') client = Client() response = client.get(url) self.read_response(response) self.assertThat(mock_get_new_connection, MockCalledOnceWith()) def test_download_connection_is_not_same_as_django_connections(self): content, url = self.make_file_for_client() class AssertConnectionWrapper(bootresources.ConnectionWrapper): def _set_up(self): super(AssertConnectionWrapper, self)._set_up() # Capture the created connection AssertConnectionWrapper.connection = self._connection def close(self): # Close the stream, but we don't want to close the # connection as the test is testing that the connection is # not the same as the connection django is using for other # webrequests. if self._stream is not None: self._stream.close() self._stream = None self._connection = None self.patch( bootresources, 'ConnectionWrapper', AssertConnectionWrapper) client = Client() response = client.get(url) self.read_response(response) # Add cleanup to close the connection, since this was removed from # AssertConnectionWrapper.close method. def close(): conn = AssertConnectionWrapper.connection conn.commit() conn.leave_transaction_management() conn.close() self.addCleanup(close) # The connection that is used by the wrapper cannot be the same as the # connection be using for all other webrequests. Without this # seperate the transactional middleware will fail to initialize, # because the the connection will already be in a transaction. # # Note: cannot test if DatabaseWrapper != DatabaseWrapper, as it will # report true, because the __eq__ operator only checks if the aliases # are the same. This is checking the underlying connection is # different, which is the important part. self.assertNotEqual( connections["default"].connection, AssertConnectionWrapper.connection.connection) def make_product(): """Make product dictionary that is just like the one provided from simplsetreams.""" subarch = factory.make_name('subarch') subarches = [factory.make_name('subarch') for _ in range(3)] subarches.insert(0, subarch) subarches = ','.join(subarches) product = { 'os': factory.make_name('os'), 'arch': factory.make_name('arch'), 'subarch': subarch, 'release': factory.make_name('release'), 'kflavor': factory.make_name('kflavor'), 'subarches': subarches, 'version_name': factory.make_name('version'), 'label': factory.make_name('label'), 'ftype': factory.pick_enum(BOOT_RESOURCE_FILE_TYPE), 'kpackage': factory.make_name('kpackage'), 'di_version': factory.make_name('di_version'), } name = '%s/%s' % (product['os'], product['release']) architecture = '%s/%s' % (product['arch'], product['subarch']) return name, architecture, product def make_boot_resource_group( rtype=None, name=None, architecture=None, version=None, filename=None, filetype=None): """Make boot resource that contains one set and one file.""" resource = factory.make_BootResource( rtype=rtype, name=name, architecture=architecture) resource_set = factory.make_BootResourceSet(resource, version=version) rfile = factory.make_boot_resource_file_with_content( resource_set, filename=filename, filetype=filetype) return resource, resource_set, rfile def make_boot_resource_group_from_product(product): """Make boot resource that contains one set and one file, using the information from the given product. The product dictionary is also updated to include the sha256 and size for the created largefile. The calling function should use the returned product in place of the passed product. """ name = '%s/%s' % (product['os'], product['release']) architecture = '%s/%s' % (product['arch'], product['subarch']) resource = factory.make_BootResource( rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, architecture=architecture) resource_set = factory.make_BootResourceSet( resource, version=product['version_name']) rfile = factory.make_boot_resource_file_with_content( resource_set, filename=product['ftype'], filetype=product['ftype']) product['sha256'] = rfile.largefile.sha256 product['size'] = rfile.largefile.total_size return product, resource class TestBootResourceStore(MAASServerTestCase): def make_boot_resources(self): resources = [ factory.make_BootResource(rtype=BOOT_RESOURCE_TYPE.SYNCED) for _ in range(3) ] resource_names = [] for resource in resources: os, series = resource.name.split('/') arch, subarch = resource.split_arch() name = '%s/%s/%s/%s' % (os, arch, subarch, series) resource_names.append(name) return resources, resource_names def test_init_initializes_variables(self): _, resource_names = self.make_boot_resources() store = BootResourceStore() self.assertItemsEqual(resource_names, store._resources_to_delete) self.assertEqual({}, store._content_to_finalize) def test_prevent_resource_deletion_removes_resource(self): resources, resource_names = self.make_boot_resources() store = BootResourceStore() resource = resources.pop() resource_names.pop() store.prevent_resource_deletion(resource) self.assertItemsEqual(resource_names, store._resources_to_delete) def test_prevent_resource_deletion_doesnt_remove_unknown_resource(self): resources, resource_names = self.make_boot_resources() store = BootResourceStore() resource = factory.make_BootResource(rtype=BOOT_RESOURCE_TYPE.SYNCED) store.prevent_resource_deletion(resource) self.assertItemsEqual(resource_names, store._resources_to_delete) def test_save_content_later_adds_to__content_to_finalize_var(self): _, _, rfile = make_boot_resource_group() store = BootResourceStore() store.save_content_later(rfile, sentinel.reader) self.assertEqual( {rfile.id: sentinel.reader}, store._content_to_finalize) def test_get_or_create_boot_resource_creates_resource(self): name, architecture, product = make_product() store = BootResourceStore() resource = store.get_or_create_boot_resource(product) self.assertEqual(BOOT_RESOURCE_TYPE.SYNCED, resource.rtype) self.assertEqual(name, resource.name) self.assertEqual(architecture, resource.architecture) self.assertEqual(product['kflavor'], resource.extra['kflavor']) self.assertEqual(product['subarches'], resource.extra['subarches']) def test_get_or_create_boot_resource_gets_resource(self): name, architecture, product = make_product() expected = factory.make_BootResource( rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, architecture=architecture) store = BootResourceStore() resource = store.get_or_create_boot_resource(product) self.assertEqual(expected, resource) self.assertEqual(product['kflavor'], resource.extra['kflavor']) self.assertEqual(product['subarches'], resource.extra['subarches']) def test_get_or_create_boot_resource_calls_prevent_resource_deletion(self): name, architecture, product = make_product() resource = factory.make_BootResource( rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, architecture=architecture) store = BootResourceStore() mock_prevent = self.patch(store, 'prevent_resource_deletion') store.get_or_create_boot_resource(product) self.assertThat( mock_prevent, MockCalledOnceWith(resource)) def test_get_or_create_boot_resource_converts_generated_into_synced(self): name, architecture, product = make_product() resource = factory.make_BootResource( rtype=BOOT_RESOURCE_TYPE.GENERATED, name=name, architecture=architecture) store = BootResourceStore() mock_prevent = self.patch(store, 'prevent_resource_deletion') store.get_or_create_boot_resource(product) self.assertEqual( BOOT_RESOURCE_TYPE.SYNCED, reload_object(resource).rtype) self.assertThat( mock_prevent, MockNotCalled()) def test_get_or_create_boot_resource_set_creates_resource_set(self): name, architecture, product = make_product() product, resource = make_boot_resource_group_from_product(product) with post_commit_hooks: resource.sets.all().delete() store = BootResourceStore() resource_set = store.get_or_create_boot_resource_set(resource, product) self.assertEqual(product['version_name'], resource_set.version) self.assertEqual(product['label'], resource_set.label) def test_get_or_create_boot_resource_set_gets_resource_set(self): name, architecture, product = make_product() product, resource = make_boot_resource_group_from_product(product) expected = resource.sets.first() store = BootResourceStore() resource_set = store.get_or_create_boot_resource_set(resource, product) self.assertEqual(expected, resource_set) self.assertEqual(product['label'], resource_set.label) def test_get_or_create_boot_resource_file_creates_resource_file(self): name, architecture, product = make_product() product, resource = make_boot_resource_group_from_product(product) resource_set = resource.sets.first() with post_commit_hooks: resource_set.files.all().delete() store = BootResourceStore() rfile = store.get_or_create_boot_resource_file(resource_set, product) self.assertEqual(product['ftype'], rfile.filename) self.assertEqual(product['ftype'], rfile.filetype) self.assertEqual(product['kpackage'], rfile.extra['kpackage']) self.assertEqual(product['di_version'], rfile.extra['di_version']) def test_get_or_create_boot_resource_file_gets_resource_file(self): name, architecture, product = make_product() product, resource = make_boot_resource_group_from_product(product) resource_set = resource.sets.first() expected = resource_set.files.first() store = BootResourceStore() rfile = store.get_or_create_boot_resource_file(resource_set, product) self.assertEqual(expected, rfile) self.assertEqual(product['ftype'], rfile.filetype) self.assertEqual(product['kpackage'], rfile.extra['kpackage']) self.assertEqual(product['di_version'], rfile.extra['di_version']) def test_get_resource_file_log_identifier_returns_valid_ident(self): os = factory.make_name('os') series = factory.make_name('series') arch = factory.make_name('arch') subarch = factory.make_name('subarch') version = factory.make_name('version') filename = factory.make_name('filename') name = '%s/%s' % (os, series) architecture = '%s/%s' % (arch, subarch) resource = factory.make_BootResource( rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, architecture=architecture) resource_set = factory.make_BootResourceSet( resource, version=version) rfile = factory.make_boot_resource_file_with_content( resource_set, filename=filename) store = BootResourceStore() self.assertEqual( '%s/%s/%s/%s/%s/%s' % ( os, arch, subarch, series, version, filename), store.get_resource_file_log_identifier(rfile)) self.assertEqual( '%s/%s/%s/%s/%s/%s' % ( os, arch, subarch, series, version, filename), store.get_resource_file_log_identifier( rfile, resource_set, resource)) def test_write_content_saves_data(self): rfile, reader, content = make_boot_resource_file_with_stream() store = BootResourceStore() store.write_content(rfile, reader) self.assertTrue(BootResourceFile.objects.filter(id=rfile.id).exists()) with rfile.largefile.content.open('rb') as stream: written_data = stream.read() self.assertEqual(content, written_data) def test_write_content_deletes_file_on_bad_checksum(self): rfile, _, _ = make_boot_resource_file_with_stream() reader = StringIO(factory.make_string()) store = BootResourceStore() with post_commit_hooks: store.write_content(rfile, reader) self.assertFalse(BootResourceFile.objects.filter(id=rfile.id).exists()) def test_finalize_does_nothing_if_resources_to_delete_hasnt_changed(self): factory.make_BootResource(rtype=BOOT_RESOURCE_TYPE.SYNCED) store = BootResourceStore() mock_resource_cleaner = self.patch(store, 'resource_cleaner') mock_perform_write = self.patch(store, 'perform_write') mock_resource_set_cleaner = self.patch(store, 'resource_set_cleaner') store.finalize() self.expectThat(mock_resource_cleaner, MockNotCalled()) self.expectThat(mock_perform_write, MockNotCalled()) self.expectThat(mock_resource_set_cleaner, MockNotCalled()) def test_finalize_calls_methods_if_new_resources_need_to_be_saved(self): factory.make_BootResource(rtype=BOOT_RESOURCE_TYPE.SYNCED) store = BootResourceStore() store._content_to_finalize = [sentinel.content] mock_resource_cleaner = self.patch(store, 'resource_cleaner') mock_perform_write = self.patch(store, 'perform_write') mock_resource_set_cleaner = self.patch(store, 'resource_set_cleaner') store.finalize() self.expectThat(mock_resource_cleaner, MockCalledOnceWith()) self.expectThat(mock_perform_write, MockCalledOnceWith()) self.expectThat(mock_resource_set_cleaner, MockCalledOnceWith()) def test_finalize_calls_methods_if_resources_to_delete_has_changed(self): factory.make_BootResource(rtype=BOOT_RESOURCE_TYPE.SYNCED) store = BootResourceStore() store._resources_to_delete = set() mock_resource_cleaner = self.patch(store, 'resource_cleaner') mock_perform_write = self.patch(store, 'perform_write') mock_resource_set_cleaner = self.patch(store, 'resource_set_cleaner') store.finalize() self.expectThat(mock_resource_cleaner, MockCalledOnceWith()) self.expectThat(mock_perform_write, MockCalledOnceWith()) self.expectThat(mock_resource_set_cleaner, MockCalledOnceWith()) class TestBootResourceTransactional(DjangoTransactionTestCase): """Test methods on `BootResourceStore` that manage their own transactions. This is done using `DjangoTransactionTestCase` so the database is flushed after each test run. """ def test_insert_does_nothing_if_file_already_exists(self): name, architecture, product = make_product() with transaction.atomic(): product, resource = make_boot_resource_group_from_product(product) rfile = resource.sets.first().files.first() largefile = rfile.largefile store = BootResourceStore() mock_save_later = self.patch(store, 'save_content_later') store.insert(product, sentinel.reader) self.assertEqual(largefile, reload_object(rfile).largefile) self.assertThat(mock_save_later, MockNotCalled()) def test_insert_uses_already_existing_largefile(self): name, architecture, product = make_product() with transaction.atomic(): product, resource = make_boot_resource_group_from_product(product) resource_set = resource.sets.first() with post_commit_hooks: resource_set.files.all().delete() largefile = factory.make_LargeFile() product['sha256'] = largefile.sha256 product['size'] = largefile.total_size store = BootResourceStore() mock_save_later = self.patch(store, 'save_content_later') store.insert(product, sentinel.reader) self.assertEqual( largefile, get_one(reload_object(resource_set).files.all()).largefile) self.assertThat(mock_save_later, MockNotCalled()) def test_insert_deletes_mismatch_largefile(self): name, architecture, product = make_product() with transaction.atomic(): product, resource = make_boot_resource_group_from_product(product) rfile = resource.sets.first().files.first() delete_largefile = rfile.largefile largefile = factory.make_LargeFile() product['sha256'] = largefile.sha256 product['size'] = largefile.total_size store = BootResourceStore() mock_save_later = self.patch(store, 'save_content_later') store.insert(product, sentinel.reader) self.assertFalse( LargeFile.objects.filter(id=delete_largefile.id).exists()) self.assertEqual(largefile, reload_object(rfile).largefile) self.assertThat(mock_save_later, MockNotCalled()) def test_insert_prints_warning_if_mismatch_largefile(self): name, architecture, product = make_product() with transaction.atomic(): product, resource = make_boot_resource_group_from_product(product) largefile = factory.make_LargeFile() product['sha256'] = largefile.sha256 product['size'] = largefile.total_size store = BootResourceStore() with FakeLogger("maas", logging.WARNING) as logger: store.insert(product, sentinel.reader) self.assertDocTestMatches( "Hash mismatch for prev_file=...", logger.output) def test_insert_deletes_mismatch_largefile_keeps_other_resource_file(self): name, architecture, product = make_product() with transaction.atomic(): resource = factory.make_BootResource( rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, architecture=architecture) resource_set = factory.make_BootResourceSet( resource, version=product['version_name']) other_type = factory.pick_enum( BOOT_RESOURCE_FILE_TYPE, but_not=product['ftype']) other_file = factory.make_boot_resource_file_with_content( resource_set, filename=other_type, filetype=other_type) rfile = factory.make_BootResourceFile( resource_set, other_file.largefile, filename=product['ftype'], filetype=product['ftype']) largefile = factory.make_LargeFile() product['sha256'] = largefile.sha256 product['size'] = largefile.total_size store = BootResourceStore() mock_save_later = self.patch(store, 'save_content_later') store.insert(product, sentinel.reader) self.assertEqual(largefile, reload_object(rfile).largefile) self.assertTrue( LargeFile.objects.filter(id=other_file.largefile.id).exists()) self.assertTrue( BootResourceFile.objects.filter(id=other_file.id).exists()) self.assertEqual( other_file.largefile, reload_object(other_file).largefile) self.assertThat(mock_save_later, MockNotCalled()) def test_insert_doesnt_create_largefile_for_unknown_ftype(self): name, architecture, product = make_product() product['ftype'] = factory.make_name('ftype') with transaction.atomic(): resource = factory.make_BootResource( rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, architecture=architecture) resource_set = factory.make_BootResourceSet( resource, version=product['version_name']) product['sha256'] = factory.make_string(size=64) product['size'] = randint(1024, 2048) store = BootResourceStore() store.insert(product, sentinel.reader) self.assertThat(reload_object(resource_set).files.all(), HasLength(0)) def test_insert_creates_new_largefile(self): name, architecture, product = make_product() with transaction.atomic(): resource = factory.make_BootResource( rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, architecture=architecture) resource_set = factory.make_BootResourceSet( resource, version=product['version_name']) product['sha256'] = factory.make_string(size=64) product['size'] = randint(1024, 2048) store = BootResourceStore() mock_save_later = self.patch(store, 'save_content_later') store.insert(product, sentinel.reader) rfile = get_one(reload_object(resource_set).files.all()) self.assertEqual(product['sha256'], rfile.largefile.sha256) self.assertEqual(product['size'], rfile.largefile.total_size) self.assertThat( mock_save_later, MockCalledOnceWith(rfile, sentinel.reader)) def test_insert_prints_error_when_breaking_resources(self): # Test case for bug 1419041: if the call to insert() makes # an existing complete resource incomplete: print an error in the # log. name, architecture, product = make_product() with transaction.atomic(): resource = factory.make_BootResource( rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, architecture=architecture) release_name = resource.name.split('/')[1] resource_set = factory.make_BootResourceSet( resource, version=product['version_name']) factory.make_boot_resource_file_with_content( resource_set, filename=product['ftype'], filetype=product['ftype']) # The resource has a complete set. self.assertIsNotNone(resource.get_latest_complete_set()) # The resource is references in the simplestreams endpoint. simplestreams_content = ( SimpleStreamsHandler().get_product_index().content) self.assertThat(simplestreams_content, Contains(release_name)) product['sha256'] = factory.make_string(size=64) product['size'] = randint(1024, 2048) store = BootResourceStore() with FakeLogger("maas", logging.ERROR) as logger: store.insert(product, sentinel.reader) self.assertDocTestMatches( "Resource %s has no complete resource set!" % resource, logger.output) def test_insert_doesnt_print_error_when_first_import(self): name, architecture, product = make_product() with transaction.atomic(): factory.make_BootResource( rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, architecture=architecture) product['sha256'] = factory.make_string(size=64) product['size'] = randint(1024, 2048) store = BootResourceStore() with FakeLogger("maas", logging.ERROR) as logger: store.insert(product, sentinel.reader) self.assertEquals('', logger.output) def test_resource_cleaner_removes_old_boot_resources(self): with transaction.atomic(): resources = [ factory.make_BootResource(rtype=BOOT_RESOURCE_TYPE.SYNCED) for _ in range(3) ] store = BootResourceStore() store.resource_cleaner() for resource in resources: os, series = resource.name.split('/') arch, subarch = resource.split_arch() self.assertFalse( BootResource.objects.has_synced_resource( os, arch, subarch, series)) def test_resource_set_cleaner_removes_incomplete_set(self): with transaction.atomic(): resource = factory.make_usable_boot_resource( rtype=BOOT_RESOURCE_TYPE.SYNCED) incomplete_set = factory.make_BootResourceSet(resource) store = BootResourceStore() store.resource_set_cleaner() self.assertFalse( BootResourceSet.objects.filter(id=incomplete_set.id).exists()) def test_resource_set_cleaner_keeps_only_newest_completed_set(self): with transaction.atomic(): resource = factory.make_BootResource( rtype=BOOT_RESOURCE_TYPE.SYNCED) old_complete_sets = [] for _ in range(3): resource_set = factory.make_BootResourceSet(resource) factory.make_boot_resource_file_with_content(resource_set) old_complete_sets.append(resource_set) newest_set = factory.make_BootResourceSet(resource) factory.make_boot_resource_file_with_content(newest_set) store = BootResourceStore() store.resource_set_cleaner() self.assertItemsEqual([newest_set], resource.sets.all()) for resource_set in old_complete_sets: self.assertFalse( BootResourceSet.objects.filter(id=resource_set.id).exists()) def test_resource_set_cleaner_removes_resources_with_empty_sets(self): with transaction.atomic(): resource = factory.make_BootResource( rtype=BOOT_RESOURCE_TYPE.SYNCED) store = BootResourceStore() store.resource_set_cleaner() self.assertFalse( BootResource.objects.filter(id=resource.id).exists()) def test_perform_writes_writes_all_content(self): with transaction.atomic(): files = [make_boot_resource_file_with_stream() for _ in range(3)] store = BootResourceStore() for rfile, reader, content in files: store.save_content_later(rfile, reader) store.perform_write() with transaction.atomic(): for rfile, reader, content in files: self.assertTrue( BootResourceFile.objects.filter(id=rfile.id).exists()) with rfile.largefile.content.open('rb') as stream: written_data = stream.read() self.assertEqual(content, written_data) class TestImportImages(MAASTransactionServerTestCase): def setUp(self): super(TestImportImages, self).setUp() self.useFixture(SimplestreamsEnvFixture()) # We're not testing cache_boot_sources() here, so patch it out to # avoid inadvertently calling it and wondering why the test blocks. self.patch_autospec(bootresources, 'cache_boot_sources') def patch_and_capture_env_for_download_all_boot_resources(self): class CaptureEnv: """Fake function; records a copy of the environment.""" def __call__(self, *args, **kwargs): self.args = args self.env = environ.copy() capture = self.patch( bootresources, 'download_all_boot_resources', CaptureEnv()) return capture def test_download_boot_resources_syncs_repo(self): fake_sync = self.patch(bootresources.BootResourceRepoWriter, 'sync') store = BootResourceStore() source_url = factory.make_url() download_boot_resources( source_url, store, None, None) self.assertEqual(1, len(fake_sync.mock_calls)) def test_download_all_boot_resources_calls_download_boot_resources(self): source = { 'url': factory.make_url(), 'keyring': self.make_file("keyring"), } product_mapping = ProductMapping() store = BootResourceStore() fake_download = self.patch(bootresources, 'download_boot_resources') download_all_boot_resources( sources=[source], product_mapping=product_mapping, store=store) self.assertThat( fake_download, MockCalledOnceWith( source['url'], store, product_mapping, keyring_file=source['keyring'])) def test_download_all_boot_resources_calls_finalize_on_store(self): product_mapping = ProductMapping() store = BootResourceStore() fake_finalize = self.patch(store, 'finalize') download_all_boot_resources( sources=[], product_mapping=product_mapping, store=store) self.assertThat( fake_finalize, MockCalledOnceWith()) def test_has_synced_resources_returns_true(self): factory.make_BootResource(rtype=BOOT_RESOURCE_TYPE.SYNCED) self.assertTrue(bootresources.has_synced_resources()) def test_has_synced_resources_returns_false(self): factory.make_BootResource(rtype=BOOT_RESOURCE_TYPE.UPLOADED) self.assertFalse(bootresources.has_synced_resources()) def test__import_resources_exits_early_if_lock_held(self): has_synced_resources = self.patch_autospec( bootresources, "has_synced_resources") with lock_held_in_other_thread(bootresources.locks.import_images): bootresources._import_resources(force=True) # The test for already-synced resources is not called if the # lock is already held. self.assertThat(has_synced_resources, MockNotCalled()) def test__import_resources_exits_early_without_force(self): has_synced_resources = self.patch( bootresources, "has_synced_resources") bootresources._import_resources(force=False) # The test for already-synced resources is not performed if we're # forcing a sync. self.assertThat(has_synced_resources, MockCalledOnceWith()) def test__import_resources_continues_with_force(self): has_synced_resources = self.patch( bootresources, "has_synced_resources") bootresources._import_resources(force=True) # The test for already-synced resources is performed if we're not # forcing a sync. self.assertThat(has_synced_resources, MockNotCalled()) def test__import_resources_holds_lock(self): fake_write_all_keyrings = self.patch( bootresources, 'write_all_keyrings') def test_for_held_lock(directory, sources): self.assertTrue(bootresources.locks.import_images.is_locked()) return [] fake_write_all_keyrings.side_effect = test_for_held_lock bootresources._import_resources(force=True) self.assertFalse(bootresources.locks.import_images.is_locked()) def test__import_resources_calls_functions_with_correct_parameters(self): write_all_keyrings = self.patch( bootresources, 'write_all_keyrings') write_all_keyrings.return_value = [sentinel.source] image_descriptions = self.patch( bootresources, 'download_all_image_descriptions') descriptions = Mock() descriptions.is_empty.return_value = False image_descriptions.return_value = descriptions map_products = self.patch( bootresources, 'map_products') map_products.return_value = sentinel.mapping download_all_boot_resources = self.patch( bootresources, 'download_all_boot_resources') bootresources._import_resources(force=True) self.expectThat( bootresources.cache_boot_sources, MockCalledOnceWith()) self.expectThat( write_all_keyrings, MockCalledOnceWith(ANY, [])) self.expectThat( image_descriptions, MockCalledOnceWith([sentinel.source])) self.expectThat( map_products, MockCalledOnceWith(descriptions)) self.expectThat( download_all_boot_resources, MockCalledOnceWith([sentinel.source], sentinel.mapping)) def test__import_resources_has_env_GNUPGHOME_set(self): fake_image_descriptions = self.patch( bootresources, 'download_all_image_descriptions') descriptions = Mock() descriptions.is_empty.return_value = False fake_image_descriptions.return_value = descriptions self.patch(bootresources, 'map_products') capture = self.patch_and_capture_env_for_download_all_boot_resources() bootresources._import_resources(force=True) self.assertEqual( get_maas_user_gpghome(), capture.env['GNUPGHOME']) def test__import_resources_has_env_http_and_https_proxy_set(self): proxy_address = factory.make_name('proxy') Config.objects.set_config('http_proxy', proxy_address) fake_image_descriptions = self.patch( bootresources, 'download_all_image_descriptions') descriptions = Mock() descriptions.is_empty.return_value = False fake_image_descriptions.return_value = descriptions self.patch(bootresources, 'map_products') capture = self.patch_and_capture_env_for_download_all_boot_resources() bootresources._import_resources(force=True) self.assertEqual( (proxy_address, proxy_address), (capture.env['http_proxy'], capture.env['http_proxy'])) def test__import_resources_schedules_import_to_clusters(self): from maasserver.clusterrpc import boot_images self.patch(boot_images.ClustersImporter, "run") bootresources._import_resources(force=True) self.assertThat( boot_images.ClustersImporter.run, MockCalledOnceWith()) class TestImportResourcesInThread(MAASTestCase): """Tests for `_import_resources_in_thread`.""" def test__defers__import_resources_to_thread(self): deferToDatabase = self.patch(bootresources, "deferToDatabase") bootresources._import_resources_in_thread(force=sentinel.force) self.assertThat( deferToDatabase, MockCalledOnceWith( bootresources._import_resources, force=sentinel.force)) def tests__defaults_force_to_False(self): deferToDatabase = self.patch(bootresources, "deferToDatabase") bootresources._import_resources_in_thread() self.assertThat( deferToDatabase, MockCalledOnceWith( bootresources._import_resources, force=False)) def test__logs_errors_and_does_not_errback(self): logger = self.useFixture(TwistedLoggerFixture()) exception_type = factory.make_exception_type() deferToDatabase = self.patch(bootresources, "deferToDatabase") deferToDatabase.return_value = fail(exception_type()) d = bootresources._import_resources_in_thread(force=sentinel.force) self.assertIsNone(extract_result(d)) self.assertDocTestMatches( """\ Importing boot resources failed. Traceback (most recent call last): ... """, logger.output) def test__logs_subprocess_output_on_error(self): logger = self.useFixture(TwistedLoggerFixture()) exception = CalledProcessError( 2, [factory.make_name("command")], factory.make_name("output")) deferToDatabase = self.patch(bootresources, "deferToDatabase") deferToDatabase.return_value = fail(exception) d = bootresources._import_resources_in_thread(force=sentinel.force) self.assertIsNone(extract_result(d)) self.assertDocTestMatches( """\ Importing boot resources failed. Traceback (most recent call last): Failure: subprocess.CalledProcessError: Command `command-...` returned non-zero exit status 2: output-... """, logger.output) class TestImportResourcesService(MAASTestCase): """Tests for `ImportResourcesService`.""" def test__is_a_TimerService(self): service = bootresources.ImportResourcesService() self.assertIsInstance(service, TimerService) def test__runs_once_an_hour(self): service = bootresources.ImportResourcesService() self.assertEqual(3600, service.step) def test__calls__maybe_import_resources(self): service = bootresources.ImportResourcesService() self.assertEqual( (service.maybe_import_resources, (), {}), service.call) def test_maybe_import_resources_does_not_error(self): service = bootresources.ImportResourcesService() deferToDatabase = self.patch(bootresources, "deferToDatabase") exception_type = factory.make_exception_type() deferToDatabase.return_value = fail(exception_type()) d = service.maybe_import_resources() self.assertIsNone(extract_result(d)) class TestImportResourcesServiceAsync(MAASTransactionServerTestCase): """Tests for the async parts of `ImportResourcesService`.""" def test__imports_resources_in_thread_if_auto(self): self.patch(bootresources, "_import_resources_in_thread") with transaction.atomic(): Config.objects.set_config('boot_images_auto_import', True) service = bootresources.ImportResourcesService() maybe_import_resources = asynchronous(service.maybe_import_resources) maybe_import_resources().wait(5) self.assertThat( bootresources._import_resources_in_thread, MockCalledOnceWith()) def test__does_not_import_resources_in_thread_if_not_auto(self): self.patch(bootresources, "_import_resources_in_thread") with transaction.atomic(): Config.objects.set_config('boot_images_auto_import', False) service = bootresources.ImportResourcesService() maybe_import_resources = asynchronous(service.maybe_import_resources) maybe_import_resources().wait(5) self.assertThat( bootresources._import_resources_in_thread, MockNotCalled()) class TestImportResourcesProgressService(MAASServerTestCase): """Tests for `ImportResourcesProgressService`.""" def test__is_a_TimerService(self): service = bootresources.ImportResourcesProgressService() self.assertIsInstance(service, TimerService) def test__runs_every_three_minutes(self): service = bootresources.ImportResourcesProgressService() self.assertEqual(180, service.step) def test__calls_try_check_boot_images(self): service = bootresources.ImportResourcesProgressService() func, args, kwargs = service.call self.expectThat(func, Equals(service.try_check_boot_images)) self.expectThat(args, HasLength(0)) self.expectThat(kwargs, HasLength(0)) class TestImportResourcesProgressServiceAsync(MAASTransactionServerTestCase): """Tests for the async parts of `ImportResourcesProgressService`.""" def set_maas_url(self): maas_url_path = "/path/%s" % factory.make_string() maas_url = factory.make_simple_http_url(path=maas_url_path) self.useFixture(RegionConfigurationFixture(maas_url=maas_url)) return maas_url, maas_url_path def patch_are_functions(self, service, region_answer, cluster_answer): # Patch the are_boot_images_available_* functions. are_region_func = self.patch_autospec( service, "are_boot_images_available_in_the_region") are_region_func.return_value = region_answer are_cluster_func = self.patch_autospec( service, "are_boot_images_available_in_any_cluster") are_cluster_func.return_value = cluster_answer def test__adds_warning_if_boot_images_exists_on_cluster_not_region(self): _, maas_url_path = self.set_maas_url() service = bootresources.ImportResourcesProgressService() self.patch_are_functions(service, False, True) check_boot_images = asynchronous(service.check_boot_images) check_boot_images().wait(5) error_observed = get_persistent_error(COMPONENT.IMPORT_PXE_FILES) error_expected = """\ One or more of your clusters currently has boot images, but your region does not. Nodes will not be able to provision until you import boot images into the region. Visit the boot images page to start the import. """ images_link = maas_url_path + '/images/' self.assertEqual( normalise_whitespace(error_expected % images_link), normalise_whitespace(error_observed)) def test__adds_warning_if_boot_image_import_not_started(self): _, maas_url_path = self.set_maas_url() service = bootresources.ImportResourcesProgressService() self.patch_are_functions(service, False, False) check_boot_images = asynchronous(service.check_boot_images) check_boot_images().wait(5) error_observed = get_persistent_error(COMPONENT.IMPORT_PXE_FILES) error_expected = """\ Boot image import process not started. Nodes will not be able to provision without boot images. Visit the boot images page to start the import. """ images_link = maas_url_path + '/images/' self.assertEqual( normalise_whitespace(error_expected % images_link), normalise_whitespace(error_observed)) def test__removes_warning_if_boot_image_process_started(self): register_persistent_error( COMPONENT.IMPORT_PXE_FILES, "You rotten swine, you! You have deaded me!") service = bootresources.ImportResourcesProgressService() self.patch_are_functions(service, True, False) check_boot_images = asynchronous(service.check_boot_images) check_boot_images().wait(5) error = get_persistent_error(COMPONENT.IMPORT_PXE_FILES) self.assertIsNone(error) def test__logs_all_errors(self): logger = self.useFixture(TwistedLoggerFixture()) exception = factory.make_exception() service = bootresources.ImportResourcesProgressService() check_boot_images = self.patch_autospec(service, "check_boot_images") check_boot_images.return_value = fail(exception) try_check_boot_images = asynchronous(service.try_check_boot_images) try_check_boot_images().wait() self.assertDocTestMatches( """\ Failure checking for boot images. Traceback (most recent call last): ... maastesting.factory.TestException#...: """, logger.output) def test__are_boot_images_available_in_the_region(self): service = bootresources.ImportResourcesProgressService() self.assertFalse(service.are_boot_images_available_in_the_region()) factory.make_BootResource() self.assertTrue(service.are_boot_images_available_in_the_region()) def test__are_boot_images_available_in_any_cluster_v2(self): # Import the websocket handlers now: merely defining DeviceHandler, # e.g., causes a database access, which will crash if it happens # inside the reactor thread where database access is forbidden and # prevented. My own opinion is that a class definition should not # cause a database access and we ought to fix that. import maasserver.websockets.handlers # noqa cluster = factory.make_NodeGroup() service = bootresources.ImportResourcesProgressService() self.useFixture(RegionEventLoopFixture("rpc")) self.useFixture(RunningEventLoopFixture()) region_rpc = MockLiveRegionToClusterRPCFixture() self.useFixture(region_rpc) # are_boot_images_available_in_the_region() returns False when there # are no clusters connected. self.assertFalse(service.are_boot_images_available_in_any_cluster()) # Connect a cluster to the region via RPC. cluster_rpc = region_rpc.makeCluster(cluster, ListBootImagesV2) # are_boot_images_available_in_the_region() returns False when none of # the clusters have any images. cluster_rpc.ListBootImagesV2.return_value = succeed({"images": []}) self.assertFalse(service.are_boot_images_available_in_any_cluster()) # are_boot_images_available_in_the_region() returns True when a # cluster has an imported boot image. response = {"images": [make_rpc_boot_image()]} cluster_rpc.ListBootImagesV2.return_value = succeed(response) self.assertTrue(service.are_boot_images_available_in_any_cluster()) def test__are_boot_images_available_in_any_cluster_v1(self): # Import the websocket handlers now: merely defining DeviceHandler, # e.g., causes a database access, which will crash if it happens # inside the reactor thread where database access is forbidden and # prevented. My own opinion is that a class definition should not # cause a database access and we ought to fix that. import maasserver.websockets.handlers # noqa cluster = factory.make_NodeGroup() service = bootresources.ImportResourcesProgressService() self.useFixture(RegionEventLoopFixture("rpc")) self.useFixture(RunningEventLoopFixture()) region_rpc = MockLiveRegionToClusterRPCFixture() self.useFixture(region_rpc) # are_boot_images_available_in_the_region() returns False when there # are no clusters connected. self.assertFalse(service.are_boot_images_available_in_any_cluster()) # Connect a cluster to the region via RPC. cluster_rpc = region_rpc.makeCluster( cluster, ListBootImagesV2, ListBootImages) # All calls to ListBootImagesV2 raises a UnhandledCommand. cluster_rpc.ListBootImagesV2.side_effect = UnhandledCommand # are_boot_images_available_in_the_region() returns False when none of # the clusters have any images. cluster_rpc.ListBootImages.return_value = succeed({"images": []}) self.assertFalse(service.are_boot_images_available_in_any_cluster()) # are_boot_images_available_in_the_region() returns True when a # cluster has an imported boot image. response = {"images": [make_rpc_boot_image()]} cluster_rpc.ListBootImages.return_value = succeed(response) self.assertTrue(service.are_boot_images_available_in_any_cluster()) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_bootsources.py0000644000000000000000000002410613056115004022737 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test maasserver.bootsources.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from os import environ from maasserver import bootsources from maasserver.bootsources import ( cache_boot_sources, ensure_boot_source_definition, get_boot_sources, get_os_info_from_boot_sources, ) from maasserver.components import ( get_persistent_error, register_persistent_error, ) from maasserver.enum import COMPONENT from maasserver.models import ( BootSource, BootSourceCache, BootSourceSelection, Config, ) from maasserver.models.testing import UpdateBootSourceCacheDisconnected from maasserver.testing.factory import factory from maasserver.testing.testcase import ( MAASServerTestCase, MAASTransactionServerTestCase, ) from maasserver.tests.test_bootresources import SimplestreamsEnvFixture from mock import MagicMock from provisioningserver.import_images import ( download_descriptions as download_descriptions_module, ) from provisioningserver.import_images.boot_image_mapping import ( BootImageMapping, ) from provisioningserver.import_images.helpers import ImageSpec from requests.exceptions import ConnectionError from testtools.matchers import HasLength def patch_and_capture_env_for_download_all_image_descriptions(testcase): class CaptureEnv: """Fake function; records a copy of the environment.""" def __call__(self, *args, **kwargs): self.args = args self.env = environ.copy() return MagicMock() capture = testcase.patch( bootsources, 'download_all_image_descriptions', CaptureEnv()) return capture def make_image_spec( os=None, arch=None, subarch=None, release=None, label=None): if os is None: os = factory.make_name('os') if arch is None: arch = factory.make_name('arch') if subarch is None: subarch = factory.make_name('subarch') if release is None: release = factory.make_name('release') if label is None: label = factory.make_name('label') return ImageSpec( os, arch, subarch, release, label, ) def make_boot_image_mapping(image_specs=[]): mapping = BootImageMapping() for image_spec in image_specs: mapping.setdefault(image_spec, {}) return mapping class TestHelpers(MAASServerTestCase): def setUp(self): super(TestHelpers, self).setUp() self.useFixture(UpdateBootSourceCacheDisconnected()) def test_ensure_boot_source_definition_creates_default_source(self): BootSource.objects.all().delete() ensure_boot_source_definition() sources = BootSource.objects.all() self.assertThat(sources, HasLength(1)) [source] = sources self.assertAttributes( source, { 'url': 'http://maas.ubuntu.com/images/ephemeral-v2/releases/', 'keyring_filename': ( '/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg'), }) selections = BootSourceSelection.objects.filter(boot_source=source) by_release = { selection.release: selection for selection in selections } self.assertItemsEqual(['trusty'], by_release.keys()) self.assertAttributes( by_release['trusty'], { 'release': 'trusty', 'arches': ['amd64'], 'subarches': ['*'], 'labels': ['release'], }) def test_ensure_boot_source_definition_skips_if_already_present(self): sources = [ factory.make_BootSource() for _ in range(3) ] ensure_boot_source_definition() self.assertItemsEqual(sources, BootSource.objects.all()) def test_get_boot_sources(self): sources = [ factory.make_BootSource( keyring_data="data").to_dict() for _ in range(3) ] self.assertItemsEqual(sources, get_boot_sources()) class TestGetOSInfoFromBootSources(MAASServerTestCase): def setUp(self): super(TestGetOSInfoFromBootSources, self).setUp() self.useFixture(UpdateBootSourceCacheDisconnected()) def test__returns_empty_sources_and_sets_when_cache_empty(self): self.assertEqual( ([], set(), set()), get_os_info_from_boot_sources(factory.make_name('os'))) def test__returns_empty_sources_and_sets_when_no_os(self): factory.make_BootSourceCache() self.assertEqual( ([], set(), set()), get_os_info_from_boot_sources(factory.make_name('os'))) def test__returns_sources_and_sets_of_releases_and_architectures(self): os = factory.make_name('os') sources = [ factory.make_BootSource(keyring_data='1234') for _ in range(2)] releases = set() arches = set() for source in sources: for _ in range(3): release = factory.make_name('release') arch = factory.make_name('arch') factory.make_BootSourceCache( source, os=os, release=release, arch=arch) releases.add(release) arches.add(arch) self.assertEqual( (sources, releases, arches), get_os_info_from_boot_sources(os)) class TestPrivateCacheBootSources(MAASTransactionServerTestCase): def setUp(self): super(TestPrivateCacheBootSources, self).setUp() self.useFixture(SimplestreamsEnvFixture()) self.useFixture(UpdateBootSourceCacheDisconnected()) def test__has_env_GNUPGHOME_set(self): capture = ( patch_and_capture_env_for_download_all_image_descriptions(self)) factory.make_BootSource(keyring_data='1234') cache_boot_sources() self.assertEqual( bootsources.get_maas_user_gpghome(), capture.env['GNUPGHOME']) def test__has_env_http_and_https_proxy_set(self): proxy_address = factory.make_name('proxy') Config.objects.set_config('http_proxy', proxy_address) capture = ( patch_and_capture_env_for_download_all_image_descriptions(self)) factory.make_BootSource(keyring_data='1234') cache_boot_sources() self.assertEqual( (proxy_address, proxy_address), (capture.env['http_proxy'], capture.env['https_proxy'])) def test__doesnt_have_env_http_and_https_proxy_set_if_disabled(self): proxy_address = factory.make_name('proxy') Config.objects.set_config('http_proxy', proxy_address) Config.objects.set_config('enable_http_proxy', False) capture = ( patch_and_capture_env_for_download_all_image_descriptions(self)) factory.make_BootSource(keyring_data='1234') cache_boot_sources() self.assertEqual( ("", ""), (capture.env['http_proxy'], capture.env['https_proxy'])) def test__returns_clears_entire_cache(self): source = factory.make_BootSource(keyring_data='1234') factory.make_BootSourceCache(source) mock_download = self.patch( bootsources, 'download_all_image_descriptions') mock_download.return_value = make_boot_image_mapping() cache_boot_sources() self.assertEqual(0, BootSourceCache.objects.all().count()) def test__returns_adds_entries_to_cache_for_source(self): source = factory.make_BootSource(keyring_data='1234') os = factory.make_name('os') releases = [factory.make_name('release') for _ in range(3)] image_specs = [ make_image_spec(os=os, release=release) for release in releases] mock_download = self.patch( bootsources, 'download_all_image_descriptions') mock_download.return_value = make_boot_image_mapping(image_specs) cache_boot_sources() cached_releases = [ cache.release for cache in BootSourceCache.objects.filter(boot_source=source) if cache.os == os ] self.assertItemsEqual(releases, cached_releases) class TestBadConnectionHandling(MAASTransactionServerTestCase): def setUp(self): super(TestBadConnectionHandling, self).setUp() self.useFixture(SimplestreamsEnvFixture()) self.useFixture(UpdateBootSourceCacheDisconnected()) def test__catches_connection_errors_and_sets_component_error(self): sources = [ factory.make_BootSource(keyring_data='1234') for _ in range(3)] download_image_descriptions = self.patch( download_descriptions_module, 'download_image_descriptions') error_text = factory.make_name("error_text") # Make two of the downloads fail. download_image_descriptions.side_effect = [ ConnectionError(error_text), BootImageMapping(), IOError(error_text), ] cache_boot_sources() base_error = "Failed to import images from boot source {url}: {err}" error_part_one = base_error.format(url=sources[0].url, err=error_text) error_part_two = base_error.format(url=sources[2].url, err=error_text) expected_error = error_part_one + '\n' + error_part_two actual_error = get_persistent_error(COMPONENT.REGION_IMAGE_IMPORT) self.assertEqual(expected_error, actual_error) def test__clears_component_error_when_successful(self): register_persistent_error( COMPONENT.REGION_IMAGE_IMPORT, factory.make_string()) [factory.make_BootSource(keyring_data='1234') for _ in range(3)] download_image_descriptions = self.patch( download_descriptions_module, 'download_image_descriptions') # Make all of the downloads successful. download_image_descriptions.return_value = BootImageMapping() cache_boot_sources() self.assertIsNone(get_persistent_error(COMPONENT.REGION_IMAGE_IMPORT)) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_commands.py0000644000000000000000000003047413056115004022176 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test custom commands, as found in src/maasserver/management/commands.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from codecs import getwriter from io import BytesIO import StringIO from apiclient.creds import convert_tuple_to_string import django from django.contrib.auth.models import User from django.core.management import call_command from django.core.management.base import CommandError from maasserver.management.commands import ( changepasswords, createadmin, ) from maasserver.models.user import get_creds_tuple from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.utils.orm import get_one from maastesting.djangotestcase import DjangoTestCase from testtools.matchers import StartsWith def assertCommandErrors(runner, command, *args, **kwargs): """Assert that the given django command fails. This method returns the error text. """ # This helper helps dealing with the difference in how # call_command() reports failure between Django 1.4 and Django # 1.5. See the 4th bullet point ("Management commands do not raise...") # in # https://docs.djangoproject.com/en/dev/releases/1.5/#minor-features if django.VERSION >= (1, 5): # Django >= 1.5 puts error text in exception. exception = runner.assertRaises( CommandError, call_command, command, *args, **kwargs) return unicode(exception) else: # Django < 1.5 prints error text on stderr. stderr = BytesIO() kwargs['stderr'] = stderr runner.assertRaises( SystemExit, call_command, command, *args, **kwargs) return stderr.getvalue().strip() class TestCommands(DjangoTestCase): """Happy-path integration testing for custom commands. Detailed testing does not belong here. If there's any complexity at all in a command's code, it should be extracted and unit-tested separately. """ def test_generate_api_doc(self): out = BytesIO() stdout = getwriter("UTF-8")(out) call_command('generate_api_doc', stdout=stdout) result = stdout.getvalue() # Just check that the documentation looks all right. self.assertIn("POST /api/1.0/account/", result) self.assertIn("MAAS API", result) # The documentation starts with a link target: "region-controller-api". self.assertThat(result[:100], StartsWith('.. _region-controller-api:')) # It also contains a ReST title (not indented). self.assertIn('===', result[:100]) def test_createadmin_prompts_for_password_if_not_given(self): stderr = BytesIO() stdout = BytesIO() username = factory.make_name('user') password = factory.make_string() email = factory.make_email_address() self.patch(createadmin, 'prompt_for_password').return_value = password call_command( 'createadmin', username=username, email=email, stdout=stdout, stderr=stderr) user = User.objects.get(username=username) self.assertEquals('', stderr.getvalue().strip()) self.assertEquals('', stdout.getvalue().strip()) self.assertTrue(user.check_password(password)) def test_createadmin_prompts_for_username_if_not_given(self): stderr = BytesIO() stdout = BytesIO() username = factory.make_name('user') password = factory.make_string() email = factory.make_email_address() self.patch(createadmin, 'prompt_for_username').return_value = username call_command( 'createadmin', password=password, email=email, stdout=stdout, stderr=stderr) user = User.objects.get(username=username) self.assertEquals('', stderr.getvalue().strip()) self.assertEquals('', stdout.getvalue().strip()) self.assertTrue(user.check_password(password)) def test_createadmin_prompts_for_email_if_not_given(self): stderr = BytesIO() stdout = BytesIO() username = factory.make_name('user') password = factory.make_string() email = factory.make_email_address() self.patch(createadmin, 'prompt_for_email').return_value = email call_command( 'createadmin', username=username, password=password, stdout=stdout, stderr=stderr) user = User.objects.get(username=username) self.assertEquals('', stderr.getvalue().strip()) self.assertEquals('', stderr.getvalue().strip()) self.assertTrue(user.check_password(password)) def test_createadmin_creates_admin(self): stderr = BytesIO() stdout = BytesIO() username = factory.make_string() password = factory.make_string() email = '%s@example.com' % factory.make_string() call_command( 'createadmin', username=username, password=password, email=email, stderr=stderr, stdout=stdout) user = get_one(User.objects.filter(username=username)) self.assertEquals('', stderr.getvalue().strip()) self.assertEquals('', stdout.getvalue().strip()) self.assertTrue(user.check_password(password)) self.assertTrue(user.is_superuser) self.assertEqual(email, user.email) def test_prompt_for_password_returns_selected_password(self): password = factory.make_string() self.patch(createadmin, 'getpass').return_value = password self.assertEqual(password, createadmin.prompt_for_password()) def test_prompt_for_password_checks_for_consistent_password(self): self.patch(createadmin, 'getpass', lambda x: factory.make_string()) self.assertRaises( createadmin.InconsistentPassword, createadmin.prompt_for_password) def test_prompt_for_username_returns_selected_username(self): username = factory.make_name('user') self.patch(createadmin, 'raw_input').return_value = username self.assertEqual(username, createadmin.prompt_for_username()) def test_prompt_for_username_checks_for_empty_username(self): self.patch(createadmin, 'raw_input', lambda x: '') self.assertRaises( createadmin.EmptyUsername, createadmin.prompt_for_username) def test_prompt_for_email_returns_selected_email(self): email = factory.make_email_address() self.patch(createadmin, 'raw_input').return_value = email self.assertEqual(email, createadmin.prompt_for_email()) def test_prompt_for_email_checks_for_empty_email(self): self.patch(createadmin, 'raw_input', lambda x: '') self.assertRaises( createadmin.EmptyEmail, createadmin.prompt_for_email) class TestChangePasswords(DjangoTestCase): def test_bad_input(self): stdin = StringIO.StringIO("nobody") self.patch(changepasswords, 'input').return_value = stdin error_text = assertCommandErrors(self, 'changepasswords') self.assertIn( "Invalid input provided. " "Format is 'username:password', one per line.", error_text) def test_nonexistent_user(self): stdin = StringIO.StringIO("nobody:nopass") self.patch(changepasswords, 'input').return_value = stdin error_text = assertCommandErrors(self, 'changepasswords') self.assertIn("User 'nobody' does not exist.", error_text) def test_changes_one_password(self): username = factory.make_username() password = factory.make_string(size=16, spaces=True, prefix="password") user = factory.make_User(username=username, password=password) self.assertTrue(user.check_password(password)) newpass = factory.make_string(size=16, spaces=True, prefix="newpass") stdin = StringIO.StringIO("%s:%s" % (username, newpass)) self.patch(changepasswords, 'input').return_value = stdin call_command('changepasswords') self.assertTrue(reload_object(user).check_password(newpass)) def test_changes_ten_passwords(self): users_passwords = [] stringio = StringIO.StringIO() for _ in range(10): username = factory.make_username() user = factory.make_User(username=username) newpass = factory.make_string(spaces=True, prefix="newpass") users_passwords.append((user, newpass)) stringio.write("%s:%s\n" % (username, newpass)) stringio.seek(0) self.patch(changepasswords, 'input').return_value = stringio call_command('changepasswords') for user, newpass in users_passwords: self.assertTrue(reload_object(user).check_password(newpass)) class TestApikeyCommand(DjangoTestCase): def test_apikey_requires_username(self): error_text = assertCommandErrors(self, 'apikey') self.assertIn( "You must provide a username with --username.", error_text) def test_apikey_gets_keys(self): stderr = BytesIO() out = BytesIO() stdout = getwriter("UTF-8")(out) user = factory.make_User() call_command( 'apikey', username=user.username, stderr=stderr, stdout=stdout) self.assertEqual('', stderr.getvalue().strip()) expected_token = get_one( user.userprofile.get_authorisation_tokens()) expected_string = convert_tuple_to_string( get_creds_tuple(expected_token)) + '\n' self.assertEqual(expected_string, stdout.getvalue()) def test_apikey_generates_key(self): stderr = BytesIO() out = BytesIO() stdout = getwriter("UTF-8")(out) user = factory.make_User() num_keys = len(user.userprofile.get_authorisation_tokens()) call_command( 'apikey', username=user.username, generate=True, stderr=stderr, stdout=stdout) self.assertEqual('', stderr.getvalue().strip()) keys_after = user.userprofile.get_authorisation_tokens() expected_num_keys = num_keys + 1 self.assertEqual(expected_num_keys, len(keys_after)) expected_token = user.userprofile.get_authorisation_tokens()[1] expected_string = convert_tuple_to_string( get_creds_tuple(expected_token)) + '\n' self.assertEqual(expected_string, stdout.getvalue()) def test_apikey_deletes_key(self): stderr = BytesIO() stdout = BytesIO() user = factory.make_User() existing_token = get_one( user.userprofile.get_authorisation_tokens()) token_string = convert_tuple_to_string( get_creds_tuple(existing_token)) call_command( 'apikey', username=user.username, delete=token_string, stderr=stderr, stdout=stdout) self.assertEqual('', stderr.getvalue().strip()) keys_after = user.userprofile.get_authorisation_tokens() self.assertEqual(0, len(keys_after)) def test_apikey_rejects_mutually_exclusive_options(self): user = factory.make_User() error_text = assertCommandErrors( self, 'apikey', username=user.username, generate=True, delete="foo") self.assertIn( "Specify one of --generate or --delete", error_text) def test_apikey_rejects_deletion_of_bad_key(self): user = factory.make_User() error_text = assertCommandErrors( self, 'apikey', username=user.username, delete="foo") self.assertIn( "Malformed credentials string", error_text) def test_api_key_rejects_deletion_of_nonexistent_key(self): stderr = BytesIO() user = factory.make_User() existing_token = get_one( user.userprofile.get_authorisation_tokens()) token_string = convert_tuple_to_string( get_creds_tuple(existing_token)) call_command( 'apikey', username=user.username, delete=token_string, stderr=stderr) self.assertEqual('', stderr.getvalue().strip()) # Delete it again. Check that there's a sensible rejection. error_text = assertCommandErrors( self, 'apikey', username=user.username, delete=token_string) self.assertIn( "No matching api key found", error_text) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_commands_dbshell.py0000644000000000000000000000635513056115004023674 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the `dbshell` management command.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import subprocess from django.core.management import call_command from django.core.management.base import CommandError from maasserver.management.commands.dbshell import Command as dbshell_command from maasserver.testing import database as database_module from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase import mock class TestDBShell(MAASServerTestCase): def test_runs_installed_cluster_by_default_if_no_dev_fixture(self): check_call = self.patch(subprocess, 'check_call') mock_get_dev_db = self.patch( dbshell_command, 'get_development_database') mock_get_dev_db.return_value = None call_command('dbshell') self.assertEqual( [mock.call(['sudo', '-u', 'postgres', 'psql', 'maasdb'])], check_call.mock_calls) def test_runs_dev_cluster_by_default_if_dev_fixture_exists(self): dbname = factory.make_name('db') cluster = self.patch(database_module, 'MAASClusterFixture') cluster.return_value.dbname = dbname call_command('dbshell') self.assertEqual( [ mock.call(None), mock.call().__enter__(), mock.call().shell(dbname), mock.call().__exit__(None, None, None), ], cluster.mock_calls) def test_local_run_obeys_database_option_if_given(self): dbname = factory.make_name('db') cluster = self.patch(database_module, 'MAASClusterFixture') cluster.return_value.dbname = dbname call_command('dbshell', database=dbname) self.assertEqual( [ mock.call(dbname), mock.call().__enter__(), mock.call().shell(dbname), mock.call().__exit__(None, None, None), ], cluster.mock_calls) def test_installed_option_connects_to_installed_cluster(self): check_call = self.patch(subprocess, 'check_call') call_command('dbshell', installed=True, database=None) self.assertEqual( [mock.call(['sudo', '-u', 'postgres', 'psql', 'maasdb'])], check_call.mock_calls) def test_installed_run_obeys_database_option_if_given(self): dbname = factory.make_name('db') check_call = self.patch(subprocess, 'check_call') call_command('dbshell', installed=True, database=dbname) self.assertEqual( [mock.call(['sudo', '-u', 'postgres', 'psql', dbname])], check_call.mock_calls) def test_installed_run_raises_errors_as_CommandError(self): self.patch( subprocess, 'check_call', mock.MagicMock(side_effect=subprocess.CalledProcessError( 99, ['command', 'line']))) error = self.assertRaises( CommandError, call_command, 'dbshell', installed=True) self.assertEquals("psql failed.", unicode(error)) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_commands_edit_named_options.py0000644000000000000000000002567413056115004026130 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the edit_named_options command.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from codecs import getwriter from collections import OrderedDict from io import BytesIO import os import shutil import textwrap from django.core.management import call_command from django.core.management.base import CommandError from maasserver.management.commands.edit_named_options import ( Command as command_module, ) from maasserver.models import Config from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.utils.orm import get_one from provisioningserver.dns.config import MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME from provisioningserver.utils.isc import ( make_isc_string, parse_isc_string, read_isc_file, ) from testtools.matchers import ( Contains, Equals, FileContains, Not, ) OPTIONS_FILE = textwrap.dedent("""\ options { directory "/var/cache/bind"; auth-nxdomain no; # conform to RFC1035 listen-on-v6 { any; }; }; """) OPTIONS_FILE_WITH_DNSSEC = textwrap.dedent("""\ options { directory "/var/cache/bind"; dnssec-validation auto; auth-nxdomain no; # conform to RFC1035 listen-on-v6 { any; }; }; """) OPTIONS_FILE_WITH_FORWARDERS = textwrap.dedent("""\ options { directory "/var/cache/bind"; forwarders { 192.168.1.1; 192.168.1.2; }; auth-nxdomain no; # conform to RFC1035 listen-on-v6 { any; }; }; """) OPTIONS_FILE_WITH_FORWARDERS_AND_DNSSEC = textwrap.dedent("""\ options { directory "/var/cache/bind"; forwarders { 192.168.1.1; 192.168.1.2; }; dnssec-validation no; auth-nxdomain no; # conform to RFC1035 listen-on-v6 { any; }; }; """) OPTIONS_FILE_WITH_EXTRA_AND_DUP_FORWARDER = textwrap.dedent("""\ options { directory "/var/cache/bind"; forwarders { 192.168.1.2; 192.168.1.3; }; dnssec-validation no; auth-nxdomain no; # conform to RFC1035 listen-on-v6 { any; }; }; """) class TestEditNamedOptionsCommand(MAASServerTestCase): def setUp(self): super(TestEditNamedOptionsCommand, self).setUp() out = BytesIO() self.stdout = getwriter("UTF-8")(out) def assertFailsWithMessage(self, config_path, message): e = self.assertRaises( CommandError, call_command, "edit_named_options", config_path=config_path, stdout=self.stdout) self.assertIn(message, e.message) def assertContentFailsWithMessage(self, content, message): options_file = self.make_file(contents=content) self.assertFailsWithMessage(options_file, message) # The original file must be untouched. self.assertThat(options_file, FileContains(content)) def test_exits_when_no_file_to_edit(self): dir = self.make_dir() absent_file = os.path.join(dir, "foo") self.assertFailsWithMessage(absent_file, "does not exist") def test_exits_when_file_has_no_options_block(self): content = factory.make_string() self.assertContentFailsWithMessage( content, "Can't find options {} block") def test_exits_when_cant_parse_config(self): content = "options { forwarders {1.1.1.1} " # (missing a closing brace) self.assertContentFailsWithMessage(content, "Failed to parse") def test_exits_when_fails_to_make_backup(self): self.patch(shutil, "copyfile").side_effect = IOError("whatever") self.assertContentFailsWithMessage( OPTIONS_FILE, "Failed to make a backup") def test_does_not_remove_existing_forwarders_config(self): options_file = self.make_file(contents=OPTIONS_FILE_WITH_FORWARDERS) call_command( "edit_named_options", config_path=options_file, stdout=self.stdout) options = read_isc_file(options_file) self.assertThat(make_isc_string(options), Contains('forwarders')) def test_removes_existing_forwarders_config_if_migrate_set(self): options_file = self.make_file(contents=OPTIONS_FILE_WITH_FORWARDERS) call_command( "edit_named_options", config_path=options_file, migrate_conflicting_options=True, stdout=self.stdout) # Check that the file was re-written without forwarders (since # that's now in the included file). options = read_isc_file(options_file) self.assertThat( make_isc_string(options), Not(Contains('forwarders'))) def test_removes_existing_dnssec_validation_config(self): options_file = self.make_file(contents=OPTIONS_FILE_WITH_DNSSEC) call_command( "edit_named_options", config_path=options_file, stdout=self.stdout) # Check that the file was re-written without dnssec-validation (since # that's now in the included file). options = read_isc_file(options_file) self.assertThat( make_isc_string(options), Contains('dnssec-validation')) def test_removes_existing_dnssec_validation_config_if_migration_set(self): options_file = self.make_file(contents=OPTIONS_FILE_WITH_DNSSEC) call_command( "edit_named_options", config_path=options_file, migrate_conflicting_options=True, stdout=self.stdout) # Check that the file was re-written without dnssec-validation (since # that's now in the included file). options = read_isc_file(options_file) self.assertThat( make_isc_string(options), Not(Contains('dnssec-validation'))) def test_normal_operation(self): options_file = self.make_file(contents=OPTIONS_FILE) call_command( "edit_named_options", config_path=options_file, stdout=self.stdout) expected_path = os.path.join( os.path.dirname(options_file), "maas", MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME) # Check that the file was re-written with the include statement. options = read_isc_file(options_file) self.assertThat( make_isc_string(options), Contains('include "%s";' % expected_path)) # Check that the backup was made. options_file_base = os.path.dirname(options_file) files = os.listdir(options_file_base) self.assertEqual(2, len(files)) files.remove(os.path.basename(options_file)) [backup_file] = files backup_file = os.path.join(options_file_base, backup_file) self.assertThat(backup_file, FileContains(OPTIONS_FILE)) def test_migrates_bind_config_to_database(self): options_file = self.make_file( contents=OPTIONS_FILE_WITH_FORWARDERS_AND_DNSSEC) call_command( "edit_named_options", config_path=options_file, migrate_conflicting_options=True, stdout=self.stdout) upstream_dns = get_one(Config.objects.filter(name="upstream_dns")) self.assertThat({'192.168.1.1', '192.168.1.2'}, Equals(set(upstream_dns.value.split()))) dnssec_validation = get_one(Config.objects.filter( name="dnssec_validation")) self.assertThat('no', Equals(dnssec_validation.value)) def test_migrate_combines_with_existing_forwarders(self): options_file = self.make_file( contents=OPTIONS_FILE_WITH_FORWARDERS_AND_DNSSEC) call_command( "edit_named_options", config_path=options_file, migrate_conflicting_options=True, stdout=self.stdout) upstream_dns = get_one(Config.objects.filter(name="upstream_dns")) self.assertThat(OrderedDict.fromkeys(['192.168.1.1', '192.168.1.2']), Equals(OrderedDict.fromkeys( upstream_dns.value.split()))) dnssec_validation = get_one(Config.objects.filter( name="dnssec_validation")) self.assertThat('no', Equals(dnssec_validation.value)) options_file = self.make_file( contents=OPTIONS_FILE_WITH_EXTRA_AND_DUP_FORWARDER) call_command( "edit_named_options", config_path=options_file, migrate_conflicting_options=True, stdout=self.stdout) upstream_dns = get_one(Config.objects.filter(name="upstream_dns")) self.assertThat( OrderedDict.fromkeys( ['192.168.1.1', '192.168.1.2', '192.168.1.3']), Equals(OrderedDict.fromkeys(upstream_dns.value.split()))) def test_dry_run_migrates_nothing_and_prints_config(self): options_file = self.make_file( contents=OPTIONS_FILE_WITH_FORWARDERS_AND_DNSSEC) call_command( "edit_named_options", config_path=options_file, migrate_conflicting_options=True, dry_run=True, stdout=self.stdout) upstream_dns = get_one(Config.objects.filter(name="upstream_dns")) self.assertIsNone(upstream_dns) dnssec_validation = get_one(Config.objects.filter( name="dnssec_validation")) self.assertIsNone(dnssec_validation) # Check that a proper configuration was written to stdout. config = parse_isc_string(self.stdout.getvalue()) self.assertIsNotNone(config) def test_repeat_migrations_migrate_nothing(self): options_file = self.make_file( contents=OPTIONS_FILE_WITH_FORWARDERS_AND_DNSSEC) backup_mock = self.patch(command_module, "back_up_existing_file") call_command( "edit_named_options", config_path=options_file, migrate_conflicting_options=True, stdout=self.stdout) self.assertTrue(backup_mock.called) backup_mock.reset_mock() write_mock = self.patch(command_module, "write_new_named_conf_options") call_command( "edit_named_options", config_path=options_file, migrate_conflicting_options=True, stdout=self.stdout) self.assertFalse(backup_mock.called) self.assertFalse(write_mock.called) def test_repeat_forced_migrations_write_file_anyway(self): options_file = self.make_file( contents=OPTIONS_FILE_WITH_FORWARDERS_AND_DNSSEC) backup_mock = self.patch(command_module, "back_up_existing_file") call_command( "edit_named_options", config_path=options_file, migrate_conflicting_options=True, stdout=self.stdout) self.assertTrue(backup_mock.called) backup_mock.reset_mock() write_mock = self.patch(command_module, "write_new_named_conf_options") call_command( "edit_named_options", config_path=options_file, migrate_conflicting_options=True, force=True, stdout=self.stdout) self.assertTrue(backup_mock.called) self.assertTrue(write_mock.called) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_commands_get_named_conf.py0000644000000000000000000000231513056115004025177 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the get_named_conf command.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from codecs import getwriter from io import BytesIO from django.core.management import call_command from maasserver.testing.testcase import MAASServerTestCase from testtools.matchers import ( Contains, FileContains, ) class TestGetNamedConfCommand(MAASServerTestCase): def test_get_named_conf_returns_snippet(self): out = BytesIO() stdout = getwriter("UTF-8")(out) call_command('get_named_conf', stdout=stdout) result = stdout.getvalue() # Just check that the returned snippet looks all right. self.assertIn('include "', result) def test_get_named_conf_appends_to_config_file(self): file_path = self.make_file() call_command( 'get_named_conf', edit=True, config_path=file_path) self.assertThat( file_path, FileContains( matcher=Contains('include "'))) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_commands_set_up_dns.py0000644000000000000000000000323013056115004024407 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the get_named_conf command.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import os from django.core.management import call_command from maasserver.testing.testcase import MAASServerTestCase from maastesting.factory import factory from provisioningserver.dns.config import ( MAAS_NAMED_CONF_NAME, MAAS_RNDC_CONF_NAME, ) from provisioningserver.dns.testing import patch_dns_config_path from testtools.matchers import ( AllMatch, FileContains, FileExists, ) class TestSetUpDNSCommand(MAASServerTestCase): def test_set_up_dns_writes_configuration(self): dns_conf_dir = self.make_dir() patch_dns_config_path(self, dns_conf_dir) call_command('set_up_dns') named_config = os.path.join(dns_conf_dir, MAAS_NAMED_CONF_NAME) rndc_conf_path = os.path.join(dns_conf_dir, MAAS_RNDC_CONF_NAME) self.assertThat([rndc_conf_path, named_config], AllMatch(FileExists())) def test_set_up_dns_does_not_overwrite_config(self): dns_conf_dir = self.make_dir() patch_dns_config_path(self, dns_conf_dir) random_content = factory.make_string() factory.make_file( location=dns_conf_dir, name=MAAS_NAMED_CONF_NAME, contents=random_content) call_command('set_up_dns', no_clobber=True) self.assertThat( os.path.join(dns_conf_dir, MAAS_NAMED_CONF_NAME), FileContains(random_content)) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_compose_preseed.py0000644000000000000000000003000413056115004023536 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.compose_preseed`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.compose_preseed import ( compose_preseed, get_apt_proxy_for_node, ) from maasserver.enum import ( NODE_BOOT, NODE_STATUS, PRESEED_TYPE, ) from maasserver.models.config import Config from maasserver.rpc.testing.fixtures import RunningClusterRPCFixture from maasserver.testing.factory import factory from maasserver.testing.osystems import make_usable_osystem from maasserver.testing.testcase import MAASServerTestCase from maasserver.utils import absolute_reverse from maastesting.matchers import MockCalledOnceWith from metadataserver.models import NodeKey from provisioningserver.drivers.osystem import BOOT_IMAGE_PURPOSE from provisioningserver.rpc.exceptions import ( NoConnectionsAvailable, NoSuchOperatingSystem, ) from provisioningserver.testing.os import make_osystem from testtools.matchers import ( ContainsDict, Equals, KeysEqual, MatchesDict, MatchesListwise, StartsWith, ) import yaml class TestComposePreseed(MAASServerTestCase): def assertSystemInfo(self, config): self.assertThat(config, ContainsDict({ 'system_info': MatchesDict({ 'package_mirrors': MatchesListwise([ MatchesDict({ "arches": Equals(["i386", "amd64"]), "search": MatchesDict({ "primary": Equals( [Config.objects.get_config("main_archive")]), "security": Equals( [Config.objects.get_config("main_archive")]), }), "failsafe": MatchesDict({ "primary": Equals( "http://archive.ubuntu.com/ubuntu"), "security": Equals( "http://security.ubuntu.com/ubuntu"), }) }), MatchesDict({ "arches": Equals(["default"]), "search": MatchesDict({ "primary": Equals( [Config.objects.get_config("ports_archive")]), "security": Equals( [Config.objects.get_config("ports_archive")]), }), "failsafe": MatchesDict({ "primary": Equals( "http://ports.ubuntu.com/ubuntu-ports"), "security": Equals( "http://ports.ubuntu.com/ubuntu-ports"), }) }), ]), }), })) def test_compose_preseed_for_commissioning_node_skips_apt_proxy(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) Config.objects.set_config("enable_http_proxy", False) preseed = yaml.safe_load( compose_preseed(PRESEED_TYPE.COMMISSIONING, node)) self.assertNotIn('apt_proxy', preseed) def test_compose_preseed_for_commissioning_node_produces_yaml(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) apt_proxy = get_apt_proxy_for_node(node) preseed = yaml.safe_load( compose_preseed(PRESEED_TYPE.COMMISSIONING, node)) self.assertIn('datasource', preseed) self.assertIn('MAAS', preseed['datasource']) self.assertThat( preseed['datasource']['MAAS'], KeysEqual( 'metadata_url', 'consumer_key', 'token_key', 'token_secret')) self.assertEquals(apt_proxy, preseed['apt_proxy']) self.assertThat( preseed['reporting']['maas'], KeysEqual( 'consumer_key', 'endpoint', 'token_key', 'token_secret', 'type')) self.assertSystemInfo(preseed) def test_compose_preseed_for_commissioning_node_has_header(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) preseed = compose_preseed(PRESEED_TYPE.COMMISSIONING, node) self.assertThat(preseed, StartsWith("#cloud-config\n")) def test_compose_preseed_includes_metadata_url(self): node = factory.make_Node(status=NODE_STATUS.READY) node.nodegroup.accept() self.useFixture(RunningClusterRPCFixture()) preseed = compose_preseed(PRESEED_TYPE.DEFAULT, node) reverse = absolute_reverse('metadata') self.assertIn(reverse, preseed) status = absolute_reverse('metadata-status', args=[node.system_id]) self.assertIn(status, preseed) def test_compose_preseed_for_commissioning_includes_metadata_status_url( self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) preseed = yaml.safe_load( compose_preseed(PRESEED_TYPE.COMMISSIONING, node)) self.assertEqual( absolute_reverse('metadata'), preseed['datasource']['MAAS']['metadata_url']) self.assertEqual( absolute_reverse('metadata-status', args=[node.system_id]), preseed['reporting']['maas']['endpoint']) def test_compose_preseed_includes_node_oauth_token(self): node = factory.make_Node(status=NODE_STATUS.READY) node.nodegroup.accept() self.useFixture(RunningClusterRPCFixture()) preseed = compose_preseed(PRESEED_TYPE.DEFAULT, node) token = NodeKey.objects.get_token_for_node(node) self.assertIn('oauth_consumer_key=%s' % token.consumer.key, preseed) self.assertIn('oauth_token_key=%s' % token.key, preseed) self.assertIn('oauth_token_secret=%s' % token.secret, preseed) def test_compose_preseed_for_commissioning_includes_auth_tokens(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) preseed = yaml.safe_load( compose_preseed(PRESEED_TYPE.COMMISSIONING, node)) maas_dict = preseed['datasource']['MAAS'] reporting_dict = preseed['reporting']['maas'] token = NodeKey.objects.get_token_for_node(node) self.assertEqual(token.consumer.key, maas_dict['consumer_key']) self.assertEqual(token.key, maas_dict['token_key']) self.assertEqual(token.secret, maas_dict['token_secret']) self.assertEqual(token.consumer.key, reporting_dict['consumer_key']) self.assertEqual(token.key, reporting_dict['token_key']) self.assertEqual(token.secret, reporting_dict['token_secret']) def test_compose_preseed_valid_local_cloud_config(self): node = factory.make_Node(status=NODE_STATUS.READY) node.nodegroup.accept() self.useFixture(RunningClusterRPCFixture()) apt_proxy = get_apt_proxy_for_node(node) preseed = compose_preseed(PRESEED_TYPE.DEFAULT, node) keyname = "cloud-init/local-cloud-config" self.assertIn(keyname, preseed) # Expected input is 'cloud-init/local-cloud-config string VALUE' # where one or more spaces in between tokens, and VALUE ending # at newline. config = preseed[preseed.find(keyname) + len(keyname):] value = config.lstrip().split("string")[1].lstrip() # Now debconf-unescape it. value = value.replace("\\n", "\n").replace("\\\\", "\\") # At this point it should be valid yaml. data = yaml.safe_load(value) self.assertIn("manage_etc_hosts", data) self.assertFalse(data["manage_etc_hosts"]) self.assertIn("apt_preserve_sources_list", data) self.assertTrue(data["apt_preserve_sources_list"]) self.assertEqual(apt_proxy, data["apt_proxy"]) self.assertTrue(data["manual_cache_clean"]) self.assertSystemInfo(data) def test_compose_preseed_skips_apt_proxy(self): node = factory.make_Node(status=NODE_STATUS.READY) node.nodegroup.accept() self.useFixture(RunningClusterRPCFixture()) Config.objects.set_config("enable_http_proxy", False) preseed = compose_preseed(PRESEED_TYPE.DEFAULT, node) self.assertNotIn('apt_proxy', preseed) def test_compose_preseed_with_curtin_installer(self): node = factory.make_Node( status=NODE_STATUS.READY, boot_type=NODE_BOOT.FASTPATH) node.nodegroup.accept() self.useFixture(RunningClusterRPCFixture()) apt_proxy = get_apt_proxy_for_node(node) preseed = yaml.safe_load( compose_preseed(PRESEED_TYPE.CURTIN, node)) self.assertIn('datasource', preseed) self.assertIn('MAAS', preseed['datasource']) self.assertThat( preseed['datasource']['MAAS'], KeysEqual( 'metadata_url', 'consumer_key', 'token_key', 'token_secret')) self.assertEqual( absolute_reverse('curtin-metadata'), preseed['datasource']['MAAS']['metadata_url']) self.assertEqual(apt_proxy, preseed['apt_proxy']) self.assertSystemInfo(preseed) def test_compose_preseed_with_curtin_installer_skips_apt_proxy(self): node = factory.make_Node( status=NODE_STATUS.READY, boot_type=NODE_BOOT.FASTPATH) node.nodegroup.accept() self.useFixture(RunningClusterRPCFixture()) Config.objects.set_config("enable_http_proxy", False) preseed = yaml.safe_load( compose_preseed(PRESEED_TYPE.CURTIN, node)) self.assertNotIn('apt_proxy', preseed) def test_compose_preseed_with_osystem_compose_preseed(self): os_name = factory.make_name('os') osystem = make_osystem(self, os_name, [BOOT_IMAGE_PURPOSE.XINSTALL]) make_usable_osystem(self, os_name) compose_preseed_orig = osystem.compose_preseed compose_preseed_mock = self.patch(osystem, 'compose_preseed') compose_preseed_mock.side_effect = compose_preseed_orig node = factory.make_Node( osystem=os_name, status=NODE_STATUS.READY) node.nodegroup.accept() self.useFixture(RunningClusterRPCFixture()) token = NodeKey.objects.get_token_for_node(node) url = absolute_reverse('curtin-metadata') compose_preseed(PRESEED_TYPE.CURTIN, node) self.assertThat( compose_preseed_mock, MockCalledOnceWith( PRESEED_TYPE.CURTIN, (node.system_id, node.hostname), (token.consumer.key, token.key, token.secret), url)) def test_compose_preseed_propagates_NoSuchOperatingSystem(self): # If the cluster controller replies that the node's OS is not known to # it, compose_preseed() simply passes the exception up. os_name = factory.make_name('os') osystem = make_osystem(self, os_name, [BOOT_IMAGE_PURPOSE.XINSTALL]) make_usable_osystem(self, os_name) compose_preseed_mock = self.patch(osystem, 'compose_preseed') compose_preseed_mock.side_effect = NoSuchOperatingSystem node = factory.make_Node( osystem=os_name, status=NODE_STATUS.READY) node.nodegroup.accept() self.useFixture(RunningClusterRPCFixture()) self.assertRaises( NoSuchOperatingSystem, compose_preseed, PRESEED_TYPE.CURTIN, node) def test_compose_preseed_propagates_NoConnectionsAvailable(self): # If the region does not have any connections to the node's cluster # controller, compose_preseed() simply passes the exception up. os_name = factory.make_name('os') make_osystem(self, os_name, [BOOT_IMAGE_PURPOSE.XINSTALL]) make_usable_osystem(self, os_name) node = factory.make_Node( osystem=os_name, status=NODE_STATUS.READY) self.assertRaises( NoConnectionsAvailable, compose_preseed, PRESEED_TYPE.CURTIN, node) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_config.py0000644000000000000000000000652713056115004021644 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.config`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.config import RegionConfiguration from maastesting.factory import factory from maastesting.testcase import MAASTestCase class TestRegionConfiguration(MAASTestCase): """Tests for `RegionConfiguration`.""" def test_default_maas_url(self): config = RegionConfiguration({}) self.assertEqual("http://localhost:5240/MAAS", config.maas_url) def test_set_and_get_maas_url(self): config = RegionConfiguration({}) example_url = factory.make_simple_http_url() config.maas_url = example_url self.assertEqual(example_url, config.maas_url) # It's also stored in the configuration database. self.assertEqual({"maas_url": example_url}, config.store) def test_set_maas_url_accepts_hostnames(self): config = RegionConfiguration({}) example_url = factory.make_simple_http_url() config.maas_url = example_url self.assertEqual(example_url, config.maas_url) self.assertEqual({"maas_url": example_url}, config.store) def test_set_maas_url_accepts_very_short_hostnames(self): config = RegionConfiguration({}) example_url = factory.make_simple_http_url( netloc=factory.make_string(size=1)) config.maas_url = example_url self.assertEqual(example_url, config.maas_url) self.assertEqual({"maas_url": example_url}, config.store) def test_set_maas_url_accepts_ipv6_addresses(self): config = RegionConfiguration({}) example_url = factory.make_simple_http_url( netloc=factory.make_ipv6_address()) config.maas_url = example_url self.assertEqual(example_url, config.maas_url) self.assertEqual({"maas_url": example_url}, config.store) def test_set_maas_url_accepts_ipv6_addresses_with_brackets(self): config = RegionConfiguration({}) example_url = factory.make_simple_http_url( netloc="[%s]" % factory.make_ipv6_address()) config.maas_url = example_url self.assertEqual(example_url, config.maas_url) self.assertEqual({"maas_url": example_url}, config.store) class TestRegionConfigurationDatabaseOptions(MAASTestCase): """Tests for the database options in `RegionConfiguration`.""" options_and_defaults = { "database_host": "localhost", "database_name": "maasdb", "database_user": "maas", "database_pass": "", } scenarios = tuple( (name, {"option": name, "default": default}) for name, default in options_and_defaults.viewitems() ) def test__default(self): config = RegionConfiguration({}) self.assertEqual(self.default, getattr(config, self.option)) def test__set_and_get(self): config = RegionConfiguration({}) example_value = factory.make_name(self.option) setattr(config, self.option, example_value) self.assertEqual(example_value, getattr(config, self.option)) # It's also stored in the configuration database. self.assertEqual({self.option: example_value}, config.store) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_config_forms.py0000644000000000000000000002522713056115004023050 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test config forms utilities.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from django import forms from django.forms import widgets from django.http import QueryDict from lxml.etree import XPath from lxml.html import fromstring from maasserver.config_forms import ( DictCharField, DictCharWidget, get_all_prefixed_values, ) from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maastesting.matchers import MockCalledOnceWith from mock import ( ANY, Mock, ) class TestDictCharField(MAASServerTestCase): def test_DictCharField_init(self): testField = DictCharField( [ ('field_a', forms.CharField(label='Field a')), ('field_b', forms.CharField(label='Field b')), ('field_c', forms.CharField(label='Field c')), ]) self.assertEqual(['field_a', 'field_b', 'field_c'], testField.names) self.assertEqual( ['field_a', 'field_b', 'field_c'], testField.widget.names) self.assertEqual( [field.widget for field in testField.field_dict.values()], testField.widget.widgets) def test_DictCharField_does_not_allow_subfield_named_skip_check(self): # Creating a DictCharField with a subfield named 'skip_check' is not # allowed. self.assertRaises( RuntimeError, DictCharField, [('skip_check', forms.CharField(label='Skip Check'))]) class TestFormWithDictCharField(MAASServerTestCase): def test_DictCharField_processes_QueryDict_into_a_dict(self): class FakeForm(forms.Form): multi_field = DictCharField( [ ('field_a', forms.CharField(label='Field a')), ('field_b', forms.CharField( label='Field b', required=False, max_length=3)), ('field_c', forms.CharField( label='Field c', required=False)), ]) fielda_value = factory.make_string() fieldc_value = factory.make_string() data = QueryDict( 'multi_field_field_a=%s&multi_field_field_c=%s' % ( fielda_value, fieldc_value)) form = FakeForm(data) self.assertTrue(form.is_valid()) self.assertEqual( { 'field_a': fielda_value, 'field_b': '', 'field_c': fieldc_value, }, form.cleaned_data['multi_field']) def test_DictCharField_honors_field_constraint(self): class FakeForm(forms.Form): multi_field = DictCharField( [ ('field_a', forms.CharField(label='Field a')), ('field_b', forms.CharField( label='Field b', required=False, max_length=3)), ]) # Create a value that will fail validation because it's too long. fielda_value = factory.make_string(10) data = QueryDict('multi_field_field_b=%s' % fielda_value) form = FakeForm(data) self.assertFalse(form.is_valid()) self.assertEqual( {'multi_field': [ 'Field a: This field is required.', 'Field b: Ensure this value has at ' 'most 3 characters (it has 10).']}, form.errors) def test_DictCharField_skip_check_true_skips_validation(self): # Create a value that will fail validation because it's too long. field_name = factory.make_string(10) field_value = factory.make_string(10) # multi_field_skip_check=true will make the form accept the value # even if it's not valid. data = QueryDict( 'multi_field_%s=%s&multi_field_skip_check=true' % ( field_name, field_value)) class FakeFormSkip(forms.Form): multi_field = DictCharField( [(field_name, forms.CharField(label='Unused', max_length=3))], skip_check=True) form = FakeFormSkip(data) self.assertTrue(form.is_valid()) self.assertEqual( {field_name: field_value}, form.cleaned_data['multi_field']) def test_DictCharField_skip_check_false(self): # Create a value that will fail validation because it's too long. field_value = factory.make_string(10) field_name = factory.make_string() field_label = factory.make_string() # Force the check with multi_field_skip_check=false. data = QueryDict( 'multi_field_%s=%s&multi_field_skip_check=false' % ( field_name, field_value)) class FakeFormSkip(forms.Form): multi_field = DictCharField( [(field_name, forms.CharField( label=field_label, max_length=3))], skip_check=True) form = FakeFormSkip(data) self.assertFalse(form.is_valid()) self.assertEqual( { 'multi_field': [ "%s: Ensure this value has at most 3 characters " "(it has 10)." % field_label] }, form.errors) def test_DictCharField_accepts_required_false(self): # A form where the DictCharField instance is constructed with # required=False. class FakeFormRequiredFalse(forms.Form): multi_field = DictCharField( [('field_a', forms.CharField(label='Field a'))], required=False) char_field = forms.CharField(label='Field a') char_value = factory.make_string(10) data = QueryDict('char_field=%s' % (char_value)) form = FakeFormRequiredFalse(data) self.assertTrue(form.is_valid()) self.assertEqual( {'char_field': char_value, 'multi_field': None}, form.cleaned_data) class TestUtilities(MAASServerTestCase): def test_get_all_prefixed_values_returns_sub_dict(self): inputs = [ {'prefix_test': 'a', 'key': 'b', 'prefix_2': 'c'}, {}, {'b': factory.make_string()}, ] prefix = 'prefix_' expected = [ {'test': 'a', '2': 'c'}, {}, {}, ] self.assertEqual( expected, map(lambda data: get_all_prefixed_values(data, prefix), inputs)) class TestDictCharWidget(MAASServerTestCase): def test_DictCharWidget_id_for_label_uses_first_fields_name(self): names = [factory.make_string()] initials = [] labels = [factory.make_string()] widget = DictCharWidget( [widgets.TextInput, widgets.TextInput], names, initials, labels) self.assertEqual( ' _%s' % names[0], widget.id_for_label(' ')) def test_DictCharWidget_renders_fieldset_with_label_and_field_names(self): names = [factory.make_string(), factory.make_string()] initials = [] labels = [factory.make_string(), factory.make_string()] values = [factory.make_string(), factory.make_string()] widget = DictCharWidget( [widgets.TextInput, widgets.TextInput, widgets.CheckboxInput], names, initials, labels, skip_check=True) name = factory.make_string() html_widget = fromstring( '' + widget.render(name, values) + '') widget_names = XPath('fieldset/input/@name')(html_widget) widget_labels = XPath('fieldset/label/text()')(html_widget) widget_values = XPath('fieldset/input/@value')(html_widget) expected_names = [ "%s_%s" % (name, widget_name) for widget_name in names] self.assertEqual( [expected_names, labels, values], [widget_names, widget_labels, widget_values]) def test_empty_DictCharWidget_renders_as_empty_string(self): widget = DictCharWidget( [widgets.CheckboxInput], [], [], [], skip_check=True) self.assertEqual('', widget.render(factory.make_string(), '')) def test_DictCharWidget_value_from_datadict_values_from_data(self): # 'value_from_datadict' extracts the values corresponding to the # field as a dictionary. names = [factory.make_string(), factory.make_string()] initials = [] labels = [factory.make_string(), factory.make_string()] name = factory.make_string() field_1_value = factory.make_string() field_2_value = factory.make_string() # Create a query string with the field2 before the field1 and another # (unknown) value. data = QueryDict( '%s_%s=%s&%s_%s=%s&%s=%s' % ( name, names[1], field_2_value, name, names[0], field_1_value, factory.make_string(), factory.make_string()) ) widget = DictCharWidget( [widgets.TextInput, widgets.TextInput], names, initials, labels) self.assertEqual( {names[0]: field_1_value, names[1]: field_2_value}, widget.value_from_datadict(data, None, name)) def test_DictCharWidget_renders_with_empty_string_as_input_data(self): names = [factory.make_string(), factory.make_string()] initials = [] labels = [factory.make_string(), factory.make_string()] widget = DictCharWidget( [widgets.TextInput, widgets.TextInput, widgets.CheckboxInput], names, initials, labels, skip_check=True) name = factory.make_string() html_widget = fromstring( '' + widget.render(name, '') + '') widget_names = XPath('fieldset/input/@name')(html_widget) widget_labels = XPath('fieldset/label/text()')(html_widget) expected_names = [ "%s_%s" % (name, widget_name) for widget_name in names] self.assertEqual( [expected_names, labels], [widget_names, widget_labels]) def test_DictCharWidget_renders_with_initial_when_no_value(self): """Widgets should use initial value if rendered without value.""" names = [factory.make_name()] initials = [factory.make_name()] labels = [factory.make_name()] mock_widget = Mock() widget = DictCharWidget([mock_widget, widgets.TextInput], names, initials, labels, skip_check=True) self.patch(widget, 'format_output') widget.render('foo', []) self.assertThat( mock_widget.render, MockCalledOnceWith(ANY, initials[0], ANY)) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_dhcp.py0000644000000000000000000007021213056115004021305 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for DHCP management.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random from django.conf import settings from django.db import transaction from fixtures import LoggerFixture from maasserver import dhcp from maasserver.dhcp import ( configure_dhcp, consolidator, do_configure_dhcp, make_subnet_config, split_ipv4_ipv6_interfaces, ) from maasserver.enum import ( NODEGROUP_STATUS, NODEGROUPINTERFACE_MANAGEMENT, ) from maasserver.models import ( Config, NodeGroup, ) from maasserver.rpc import getClientFor from maasserver.rpc.testing.fixtures import MockLiveRegionToClusterRPCFixture from maasserver.testing.eventloop import ( RegionEventLoopFixture, RunningEventLoopFixture, ) from maasserver.testing.factory import factory from maasserver.testing.testcase import ( MAASServerTestCase, MAASTransactionServerTestCase, ) from maasserver.utils.orm import post_commit_hooks from maastesting.matchers import ( MockCalledOnceWith, MockCallsMatch, MockNotCalled, ) from maastesting.twisted import always_succeed_with from mock import ( ANY, call, sentinel, ) from netaddr import ( IPAddress, IPNetwork, ) from provisioningserver.rpc.cluster import ( ConfigureDHCPv4, ConfigureDHCPv6, ) from provisioningserver.utils.url import compose_URL from testtools.matchers import ( AllMatch, ContainsAll, ContainsDict, Equals, IsInstance, MatchesStructure, Not, ) class TestSplitIPv4IPv6Interfaces(MAASServerTestCase): """Tests for `split_ipv4_ipv6_interfaces`.""" def make_ipv4_interface(self, nodegroup): subnet = factory.make_Subnet( cidr=unicode(factory.make_ipv4_network().cidr)) return factory.make_NodeGroupInterface( nodegroup, subnet=subnet) def make_ipv6_interface(self, nodegroup): subnet = factory.make_Subnet( cidr=unicode(factory.make_ipv6_network().cidr)) return factory.make_NodeGroupInterface( nodegroup, subnet=subnet) def test__separates_IPv4_from_IPv6_interfaces(self): nodegroup = factory.make_NodeGroup() # Create 0-2 IPv4 cluster interfaces and 0-2 IPv6 cluster interfaces. ipv4_interfaces = [ self.make_ipv4_interface(nodegroup) for _ in range(random.randint(0, 2)) ] ipv6_interfaces = [ self.make_ipv6_interface(nodegroup) for _ in range(random.randint(0, 2)) ] interfaces = sorted( ipv4_interfaces + ipv6_interfaces, key=lambda *args: random.randint(0, 10)) ipv4_result, ipv6_result = split_ipv4_ipv6_interfaces(interfaces) self.assertItemsEqual(ipv4_interfaces, ipv4_result) self.assertItemsEqual(ipv6_interfaces, ipv6_result) class TestMakeSubnetConfig(MAASServerTestCase): """Tests for `make_subnet_config`.""" def test__includes_all_parameters(self): interface = factory.make_NodeGroupInterface( factory.make_NodeGroup()) config = make_subnet_config( interface, factory.make_name('dns'), factory.make_name('ntp')) self.assertIsInstance(config, dict) self.assertThat( config.keys(), ContainsAll([ 'subnet', 'subnet_mask', 'subnet_cidr', 'broadcast_ip', 'interface', 'router_ip', 'dns_servers', 'ntp_server', 'domain_name', 'ip_range_low', 'ip_range_high', ])) def test__sets_dns_and_ntp_from_arguments(self): interface = factory.make_NodeGroupInterface( factory.make_NodeGroup()) dns = '%s %s' % ( factory.make_ipv4_address(), factory.make_ipv6_address(), ) ntp = factory.make_name('ntp') config = make_subnet_config(interface, dns_servers=dns, ntp_server=ntp) self.expectThat(config['dns_servers'], Equals(dns)) self.expectThat(config['ntp_server'], Equals(ntp)) def test__sets_domain_name_from_cluster(self): nodegroup = factory.make_NodeGroup() interface = factory.make_NodeGroupInterface(nodegroup) config = make_subnet_config( interface, factory.make_name('dns'), factory.make_name('ntp')) self.expectThat(config['domain_name'], Equals(nodegroup.name)) def test__sets_other_items_from_interface(self): interface = factory.make_NodeGroupInterface( factory.make_NodeGroup()) config = make_subnet_config( interface, factory.make_name('dns'), factory.make_name('ntp')) self.expectThat(config['broadcast_ip'], Equals(interface.broadcast_ip)) self.expectThat(config['interface'], Equals(interface.interface)) self.expectThat(config['router_ip'], Equals(interface.router_ip)) def test__passes_IP_addresses_as_strings(self): interface = factory.make_NodeGroupInterface( factory.make_NodeGroup()) config = make_subnet_config( interface, factory.make_name('dns'), factory.make_name('ntp')) self.expectThat(config['subnet'], IsInstance(unicode)) self.expectThat(config['subnet_mask'], IsInstance(unicode)) self.expectThat(config['subnet_cidr'], IsInstance(unicode)) self.expectThat(config['broadcast_ip'], IsInstance(unicode)) self.expectThat(config['router_ip'], IsInstance(unicode)) self.expectThat(config['ip_range_low'], IsInstance(unicode)) self.expectThat(config['ip_range_high'], IsInstance(unicode)) def test__defines_IPv4_subnet(self): interface = factory.make_NodeGroupInterface( factory.make_NodeGroup(), network=IPNetwork('10.9.8.7/24')) config = make_subnet_config( interface, factory.make_name('dns'), factory.make_name('ntp')) self.expectThat(config['subnet'], Equals('10.9.8.0')) self.expectThat(config['subnet_mask'], Equals('255.255.255.0')) self.expectThat(config['subnet_cidr'], Equals('10.9.8.0/24')) def test__defines_IPv6_subnet(self): interface = factory.make_NodeGroupInterface( factory.make_NodeGroup(), network=IPNetwork('fd38:c341:27da:c831::/64')) config = make_subnet_config( interface, factory.make_name('dns'), factory.make_name('ntp')) # Don't expect a specific literal value, like we do for IPv4; there # are different spellings. self.expectThat( IPAddress(config['subnet']), Equals(IPAddress('fd38:c341:27da:c831::'))) # (Netmask is not used for the IPv6 config, so ignore it.) self.expectThat( IPNetwork(config['subnet_cidr']), Equals(IPNetwork('fd38:c341:27da:c831::/64'))) def test__passes_dynamic_range(self): interface = factory.make_NodeGroupInterface( factory.make_NodeGroup()) config = make_subnet_config( interface, factory.make_name('dns'), factory.make_name('ntp')) self.expectThat( (config['ip_range_low'], config['ip_range_high']), Equals((interface.ip_range_low, interface.ip_range_high))) self.expectThat( config['ip_range_low'], Not(Equals(interface.static_ip_range_low))) def test__doesnt_convert_None_router_ip(self): interface = factory.make_NodeGroupInterface(factory.make_NodeGroup()) interface.subnet.router_ip = None interface.save() post_commit_hooks.fire() config = make_subnet_config( interface, factory.make_name('dns'), factory.make_name('ntp')) self.assertEqual('', config['router_ip']) class TestDoConfigureDHCP(MAASServerTestCase): """Tests for `do_configure_dhcp`.""" scenarios = ( ("DHCPv4", { "command": ConfigureDHCPv4, "make_network": factory.make_ipv4_network, "make_address": factory.make_ipv4_address, "ip_version": 4, }), ("DHCPv6", { "command": ConfigureDHCPv6, "make_network": factory.make_ipv6_network, "make_address": factory.make_ipv6_address, "ip_version": 6, }), ) def prepare_rpc(self, nodegroup): """Set up test case for speaking RPC to `nodegroup`. :param nodegroup: A cluster. It will "run" a mock RPC service. :return: Protocol, Command stub """ self.useFixture(RegionEventLoopFixture('rpc')) self.useFixture(RunningEventLoopFixture()) fixture = self.useFixture(MockLiveRegionToClusterRPCFixture()) cluster = fixture.makeCluster(nodegroup, self.command) return cluster, getattr(cluster, self.command.commandName) def test__configures_dhcp(self): dns_server = self.make_address() maas_url = compose_URL("http://", dns_server) nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, dhcp_key=factory.make_name('key'), network=self.make_network(), maas_url=maas_url) ntp_server = factory.make_name('ntp') protocol, command_stub = self.prepare_rpc(nodegroup) command_stub.side_effect = always_succeed_with({}) # Although the above nodegroup has managed interfaces, we pass the # empty list here; do_configure_dhcp() dutifully believes us. do_configure_dhcp( self.ip_version, nodegroup, [], ntp_server, getClientFor(nodegroup.uuid)) self.assertThat( command_stub, MockCalledOnceWith( ANY, omapi_key=nodegroup.dhcp_key, subnet_configs=[])) def test__configures_dhcp_with_subnets(self): dns_server = self.make_address() maas_url = compose_URL("http://", dns_server) nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, dhcp_key=factory.make_string(), interface=factory.make_name('eth'), network=self.make_network(), maas_url=maas_url) # Create a second DHCP-managed interface. factory.make_NodeGroupInterface( nodegroup=nodegroup, interface=factory.make_name('eth'), management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, network=self.make_network()) ntp_server = factory.make_name('ntp') interfaces = nodegroup.get_managed_interfaces() protocol, command_stub = self.prepare_rpc(nodegroup) command_stub.side_effect = always_succeed_with({}) do_configure_dhcp( self.ip_version, nodegroup, interfaces, ntp_server, getClientFor(nodegroup.uuid)) expected_subnet_configs = [ make_subnet_config(interface, dns_server, ntp_server) for interface in nodegroup.get_managed_interfaces() ] self.assertThat( command_stub, MockCalledOnceWith( ANY, subnet_configs=expected_subnet_configs, omapi_key=nodegroup.dhcp_key, )) class TestDoConfigureDHCPWrappers(MAASServerTestCase): """Tests for `do_configure_dhcp` wrapper functions.""" def test_configure_dhcpv4_calls_do_configure_dhcp(self): do_configure_dhcp = self.patch_autospec(dhcp, "do_configure_dhcp") dhcp.configure_dhcpv4( sentinel.nodegroup, sentinel.interfaces, sentinel.ntp_server, sentinel.client) self.assertThat(do_configure_dhcp, MockCalledOnceWith( 4, sentinel.nodegroup, sentinel.interfaces, sentinel.ntp_server, sentinel.client)) def test_configure_dhcpv6_calls_do_configure_dhcp(self): do_configure_dhcp = self.patch_autospec(dhcp, "do_configure_dhcp") dhcp.configure_dhcpv6( sentinel.nodegroup, sentinel.interfaces, sentinel.ntp_server, sentinel.client) self.assertThat(do_configure_dhcp, MockCalledOnceWith( 6, sentinel.nodegroup, sentinel.interfaces, sentinel.ntp_server, sentinel.client)) def patch_configure_funcs(test): """Patch `configure_dhcpv4` and `configure_dhcpv6`.""" return ( test.patch(dhcp, 'configure_dhcpv4'), test.patch(dhcp, 'configure_dhcpv6'), ) def make_cluster(test, status=None, omapi_key=None, **kwargs): """Create a `NodeGroup` without interfaces. Status defaults to `ACCEPTED`. """ if status is None: status = NODEGROUP_STATUS.ENABLED if omapi_key is None: # Set an arbitrary OMAPI key, so that the cluster won't need to # shell out to create one. omapi_key = factory.make_name('key') return factory.make_NodeGroup( status=status, dhcp_key=omapi_key, **kwargs) def make_cluster_interface( test, network, cluster=None, management=None, **kwargs): if cluster is None: cluster = test.make_cluster() if management is None: management = NODEGROUPINTERFACE_MANAGEMENT.DHCP return factory.make_NodeGroupInterface( cluster, network=network, management=management, **kwargs) def make_ipv4_interface(test, cluster=None, **kwargs): """Create an IPv4 `NodeGroupInterface` for `cluster`. The interface defaults to being managed. """ return make_cluster_interface( test, factory.make_ipv4_network(), cluster, **kwargs) def make_ipv6_interface(test, cluster=None, **kwargs): """Create an IPv6 `NodeGroupInterface` for `cluster`. The interface defaults to being managed. """ return make_cluster_interface( test, factory.make_ipv6_network(), cluster, **kwargs) class TestConfigureDHCP(MAASServerTestCase): """Tests for `configure_dhcp`.""" def setUp(self): super(TestConfigureDHCP, self).setUp() # Suppress checks for cluster availability. self.patch_autospec(dhcp, "getClientFor") def test__obeys_DHCP_CONNECT(self): configure_dhcpv4, configure_dhcpv6 = patch_configure_funcs(self) cluster = make_cluster(self) make_ipv4_interface(self, cluster) make_ipv6_interface(self, cluster) self.patch(settings, "DHCP_CONNECT", False) with post_commit_hooks: configure_dhcp(cluster) self.expectThat(configure_dhcpv4, MockNotCalled()) self.expectThat(configure_dhcpv6, MockNotCalled()) def test__does_not_configure_interfaces_if_nodegroup_not_accepted(self): configure_dhcpv4, configure_dhcpv6 = patch_configure_funcs(self) cluster = make_cluster(self, status=NODEGROUP_STATUS.DISABLED) make_ipv4_interface(self, cluster) make_ipv6_interface(self, cluster) self.patch(settings, "DHCP_CONNECT", True) with post_commit_hooks: configure_dhcp(cluster) self.expectThat(configure_dhcpv4, MockCalledOnceWith( cluster, [], ANY, dhcp.getClientFor.return_value)) self.expectThat(configure_dhcpv6, MockCalledOnceWith( cluster, [], ANY, dhcp.getClientFor.return_value)) def test__configures_dhcpv4(self): ip = factory.make_ipv4_address() cluster = make_cluster(self, maas_url='http://%s/' % ip) make_ipv4_interface(self, cluster) self.patch(settings, "DHCP_CONNECT", True) with post_commit_hooks: configure_dhcp(cluster) self.assertThat(dhcp.getClientFor, MockCalledOnceWith(cluster.uuid)) client = dhcp.getClientFor.return_value self.assertThat(client, MockCallsMatch( call(ANY, omapi_key=ANY, subnet_configs=ANY), call(ANY, omapi_key=ANY, subnet_configs=ANY), )) subnet_configs = [ subnet_config for call_args in client.call_args_list for subnet_config in call_args[1]['subnet_configs'] ] self.assertThat( subnet_configs, AllMatch( ContainsDict({"dns_servers": Equals(ip)}))) def test__uses_ntp_server_from_config(self): configure_dhcpv4, configure_dhcpv6 = patch_configure_funcs(self) cluster = make_cluster(self) make_ipv4_interface(self, cluster) self.patch(settings, "DHCP_CONNECT", True) with post_commit_hooks: configure_dhcp(cluster) ntp_server = Config.objects.get_config('ntp_server') self.assertThat( configure_dhcpv4, MockCalledOnceWith( ANY, ANY, ntp_server, dhcp.getClientFor.return_value)) self.assertThat( configure_dhcpv6, MockCalledOnceWith( ANY, ANY, ntp_server, dhcp.getClientFor.return_value)) class TestConfigureDHCPWithDisconnectedCluster(MAASServerTestCase): """Behaviour when the target cluster is not connected.""" def test__logs_about_disconnected_cluster(self): cluster = make_cluster(self) self.patch(settings, "DHCP_CONNECT", True) with LoggerFixture(dhcp.__name__) as logger: with post_commit_hooks: configure_dhcp(cluster) self.assertDocTestMatches( """\ Cluster ... (...) is not connected at present so cannot be configured; it will catch up when it next connects. """, logger.output) class TestConfigureDHCPTransactional(MAASTransactionServerTestCase): """Tests for `configure_dhcp` that require transactions. Specifically, post-commit hooks are run in a separate thread, so changes must be committed to the database in order that they're visible elsewhere. """ def setUp(self): super(TestConfigureDHCPTransactional, self).setUp() # Suppress checks for cluster availability. getClientFor = self.patch_autospec(dhcp, "getClientFor") getClientFor.return_value = sentinel.client # Connect DHCP changes. self.patch(settings, "DHCP_CONNECT", True) def test__passes_only_IPv4_interfaces_to_DHCPv4(self): configure_dhcpv4, _ = patch_configure_funcs(self) with transaction.atomic(): cluster = make_cluster(self) ipv4_interface = make_ipv4_interface(self, cluster) make_ipv6_interface(self, cluster) with post_commit_hooks: configure_dhcp(cluster) self.assertThat(configure_dhcpv4, MockCalledOnceWith( cluster, [ipv4_interface], ANY, sentinel.client)) def test__passes_only_IPv6_interfaces_to_DHCPv6(self): _, configure_dhcpv6 = patch_configure_funcs(self) with transaction.atomic(): cluster = make_cluster(self) ipv6_interface = make_ipv6_interface(self, cluster) make_ipv4_interface(self, cluster) with post_commit_hooks: configure_dhcp(cluster) self.assertThat(configure_dhcpv6, MockCalledOnceWith( cluster, [ipv6_interface], ANY, sentinel.client)) class TestDHCPConnect(MAASServerTestCase): """Tests for DHCP signals triggered when saving a cluster interface.""" def setUp(self): super(TestDHCPConnect, self).setUp() self.patch_autospec(dhcp, "configure_dhcp") def test_dhcp_config_gets_written_when_nodegroup_becomes_active(self): nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.DISABLED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) self.patch(settings, "DHCP_CONNECT", True) nodegroup.accept() self.assertThat(dhcp.configure_dhcp, MockCalledOnceWith(nodegroup)) def test_dhcp_config_gets_written_when_nodegroup_name_changes(self): nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) self.patch(settings, "DHCP_CONNECT", True) nodegroup.name = factory.make_name('domain') nodegroup.save() self.assertThat(dhcp.configure_dhcp, MockCalledOnceWith(nodegroup)) def test_dhcp_config_gets_written_when_interface_IP_changes(self): nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) [interface] = nodegroup.nodegroupinterface_set.all() self.patch(settings, "DHCP_CONNECT", True) interface.ip = factory.pick_ip_in_network( interface.network, but_not=[interface.ip]) interface.save() self.assertThat(dhcp.configure_dhcp, MockCalledOnceWith(nodegroup)) def test_dhcp_config_gets_written_when_interface_management_changes(self): nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED, management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) [interface] = nodegroup.nodegroupinterface_set.all() self.patch(settings, "DHCP_CONNECT", True) interface.management = NODEGROUPINTERFACE_MANAGEMENT.DHCP interface.save() self.assertThat(dhcp.configure_dhcp, MockCalledOnceWith(nodegroup)) def test_dhcp_config_gets_written_when_interface_name_changes(self): nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) [interface] = nodegroup.get_managed_interfaces() self.patch(settings, "DHCP_CONNECT", True) interface.interface = factory.make_name('itf') interface.save() self.assertThat(dhcp.configure_dhcp, MockCalledOnceWith(nodegroup)) def test_dhcp_config_gets_written_when_netmask_changes(self): network = factory.make_ipv4_network(slash='255.255.255.0') subnet = factory.make_Subnet(cidr=unicode(network.cidr)) nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED, subnet=subnet, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) [interface] = nodegroup.get_managed_interfaces() self.patch(settings, "DHCP_CONNECT", True) interface.subnet_mask = '255.255.0.0' interface.save() self.assertThat(dhcp.configure_dhcp, MockCalledOnceWith(nodegroup)) def test_dhcp_config_gets_written_when_interface_router_ip_changes(self): nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) [interface] = nodegroup.get_managed_interfaces() self.patch(settings, "DHCP_CONNECT", True) interface.subnet.gateway_ip = factory.pick_ip_in_network( interface.network, but_not=[interface.subnet.gateway_ip]) interface.save() self.assertThat(dhcp.configure_dhcp, MockCalledOnceWith(nodegroup)) def test_dhcp_config_gets_written_when_ip_range_changes(self): nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) [interface] = nodegroup.get_managed_interfaces() self.patch(settings, "DHCP_CONNECT", True) interface.ip_range_low = unicode( IPAddress(interface.ip_range_low) + 1) interface.ip_range_high = unicode( IPAddress(interface.ip_range_high) - 1) interface.save() self.assertThat(dhcp.configure_dhcp, MockCalledOnceWith(nodegroup)) def test_dhcp_config_is_not_written_when_foreign_dhcp_changes(self): nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) [interface] = nodegroup.get_managed_interfaces() self.patch(settings, "DHCP_CONNECT", True) interface.foreign_dhcp = factory.pick_ip_in_network(interface.network) interface.save() self.assertThat(dhcp.configure_dhcp, MockNotCalled()) def test_dhcp_config_gets_written_when_ntp_server_changes(self): # When the "ntp_server" Config item is changed, check that all # nodegroups get their DHCP config re-written. # XXX 2015-09-17 blake_r: Isolation issue where an extra NodeGroup # already exists. So we remove all the nodegroup's before performing # this test. NodeGroup.objects.all().delete() num_active_nodegroups = random.randint(1, 10) num_inactive_nodegroups = random.randint(1, 10) for _ in range(num_active_nodegroups): factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) for _ in range(num_inactive_nodegroups): factory.make_NodeGroup( status=NODEGROUP_STATUS.DISABLED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) self.patch(settings, "DHCP_CONNECT", True) Config.objects.set_config("ntp_server", factory.make_ipv4_address()) # Every nodegroup is updated, including those that are PENDING. expected_call_one_nodegroup = [call(ANY)] expected_calls = expected_call_one_nodegroup * ( num_active_nodegroups + num_inactive_nodegroups) self.assertThat(dhcp.configure_dhcp, MockCallsMatch(*expected_calls)) def test_dhcp_config_gets_written_when_managed_interface_is_deleted(self): interface = factory.make_NodeGroupInterface( factory.make_NodeGroup(status=NODEGROUP_STATUS.ENABLED), management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) self.patch(settings, "DHCP_CONNECT", True) interface.delete() self.assertThat( dhcp.configure_dhcp, MockCalledOnceWith(interface.nodegroup)) # Matchers to check that a `Changes` object is empty, or not. changes_are_empty = MatchesStructure.byEquality(hook=None, clusters=[]) changes_are_not_empty = Not(changes_are_empty) class TestConsolidatingChangesWhenDisconnected(MAASServerTestCase): """Tests for `Changes` and `ChangeConsolidator` when disconnected. Where "disconnected" means where `settings.DHCP_CONNECT` is `False`. """ def test__does_nothing(self): self.patch(settings, "DHCP_CONNECT", False) consolidator.configure(sentinel.cluster) self.assertThat(consolidator.changes, changes_are_empty) class TestConsolidatingChanges(MAASServerTestCase): """Tests for `Changes` and `ChangeConsolidator`.""" def setUp(self): super(TestConsolidatingChanges, self).setUp() self.patch(settings, "DHCP_CONNECT", True) def test__added_clusters_applied_post_commit(self): configure_dhcp_now = self.patch_autospec(dhcp, "configure_dhcp_now") cluster = make_cluster(self) consolidator.configure(cluster) self.assertThat(configure_dhcp_now, MockNotCalled()) post_commit_hooks.fire() self.assertThat(configure_dhcp_now, MockCalledOnceWith(cluster)) def test__added_clusters_are_consolidated(self): configure_dhcp_now = self.patch_autospec(dhcp, "configure_dhcp_now") cluster = make_cluster(self) consolidator.configure(cluster) consolidator.configure(cluster) post_commit_hooks.fire() self.assertThat(configure_dhcp_now, MockCalledOnceWith(cluster)) def test__changes_are_reset_post_commit(self): self.patch_autospec(dhcp, "configure_dhcp_now") # The changes start empty. self.assertThat(consolidator.changes, changes_are_empty) cluster = make_cluster(self) consolidator.configure(cluster) # The changes are not empty now. self.assertThat(consolidator.changes, changes_are_not_empty) # They are once again empty after the post-commit hook fires. post_commit_hooks.fire() self.assertThat(consolidator.changes, changes_are_empty) def test__changes_are_reset_post_commit_on_failure(self): exception_type = factory.make_exception_type() configure_dhcp_now = self.patch_autospec(dhcp, "configure_dhcp_now") configure_dhcp_now.side_effect = exception_type # This is going to crash later. consolidator.configure(make_cluster(self)) # The changes are empty after the post-commit hook fires. self.assertRaises(exception_type, post_commit_hooks.fire) self.assertThat(consolidator.changes, changes_are_empty) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_eventloop.py0000644000000000000000000002101113056115004022373 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.eventloop`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import socket from crochet import wait_for_reactor from django.db import connections from maasserver import ( bootresources, eventloop, nonces_cleanup, webapp, ) from maasserver.eventloop import DEFAULT_PORT from maasserver.rpc import regionservice from maasserver.testing.eventloop import RegionEventLoopFixture from maasserver.utils import dbtasks from maasserver.utils.orm import ( DisabledDatabaseConnection, transactional, ) from maastesting.factory import factory from maastesting.testcase import MAASTestCase from testtools.matchers import ( Equals, IsInstance, MatchesStructure, ) from twisted.internet import reactor from twisted.python.threadable import isInIOThread class TestRegionEventLoop(MAASTestCase): def test_name(self): self.patch(eventloop, "gethostname").return_value = "foo" self.patch(eventloop, "getpid").return_value = 12345 self.assertEqual("foo:pid=12345", eventloop.loop.name) def test_populate(self): an_eventloop = eventloop.RegionEventLoop() # At first there are no services. self.assertEqual( set(), {service.name for service in an_eventloop.services}) # populate() creates a service with each factory. an_eventloop.populate().wait() self.assertEqual( {name for name, _ in an_eventloop.factories}, {svc.name for svc in an_eventloop.services}) # The services are not started. self.assertEqual( {name: False for name, _ in an_eventloop.factories}, {svc.name: svc.running for svc in an_eventloop.services}) def test_start_and_stop(self): # Replace the factories in RegionEventLoop with non-functional # dummies to avoid bringing up real services here, and ensure # that the services list is empty. self.useFixture(RegionEventLoopFixture()) # At the outset, the eventloop's services are dorment. self.assertFalse(eventloop.loop.services.running) # RegionEventLoop.running is an alias for .services.running. self.assertFalse(eventloop.loop.running) self.assertEqual( set(eventloop.loop.services), set()) # After starting the loop, the services list is populated, and # the services are started too. eventloop.loop.start().wait(5) self.addCleanup(lambda: eventloop.loop.reset().wait(5)) self.assertTrue(eventloop.loop.services.running) self.assertTrue(eventloop.loop.running) self.assertEqual( {service.name for service in eventloop.loop.services}, {name for name, _ in eventloop.loop.factories}) # A shutdown hook is registered with the reactor. stopService = eventloop.loop.services.stopService self.assertEqual( ("shutdown", ("before", stopService, (), {})), eventloop.loop.handle) # After stopping the loop, the services list remains populated, # but the services are all stopped. eventloop.loop.stop().wait(5) self.assertFalse(eventloop.loop.services.running) self.assertFalse(eventloop.loop.running) self.assertEqual( {service.name for service in eventloop.loop.services}, {name for name, _ in eventloop.loop.factories}) # The hook has been cleared. self.assertIsNone(eventloop.loop.handle) def test_reset(self): # Replace the factories in RegionEventLoop with non-functional # dummies to avoid bringing up real services here, and ensure # that the services list is empty. self.useFixture(RegionEventLoopFixture()) eventloop.loop.start().wait(5) eventloop.loop.reset().wait(5) # After stopping the loop, the services list is also emptied. self.assertFalse(eventloop.loop.services.running) self.assertFalse(eventloop.loop.running) self.assertEqual( set(eventloop.loop.services), set()) # The hook has also been cleared. self.assertIsNone(eventloop.loop.handle) def test_reset_clears_factories(self): eventloop.loop.factories = ( (factory.make_name("service"), None), ) eventloop.loop.reset().wait(5) # The loop's factories are also reset. self.assertEqual( eventloop.loop.__class__.factories, eventloop.loop.factories) def test_module_globals(self): # Several module globals are references to a shared RegionEventLoop. self.assertIs(eventloop.services, eventloop.loop.services) # Must compare by equality here; these methods are decorated. self.assertEqual(eventloop.reset, eventloop.loop.reset) self.assertEqual(eventloop.start, eventloop.loop.start) self.assertEqual(eventloop.stop, eventloop.loop.stop) class TestFactories(MAASTestCase): def test_make_DatabaseTaskService(self): service = eventloop.make_DatabaseTaskService() self.assertThat(service, IsInstance(dbtasks.DatabaseTasksService)) # It is registered as a factory in RegionEventLoop. self.assertIn( eventloop.make_DatabaseTaskService, {factory for _, factory in eventloop.loop.factories}) def test_make_RegionService(self): service = eventloop.make_RegionService() self.assertThat(service, IsInstance(regionservice.RegionService)) # It is registered as a factory in RegionEventLoop. self.assertIn( eventloop.make_RegionService, {factory for _, factory in eventloop.loop.factories}) def test_make_RegionAdvertisingService(self): service = eventloop.make_RegionAdvertisingService() self.assertThat(service, IsInstance( regionservice.RegionAdvertisingService)) # It is registered as a factory in RegionEventLoop. self.assertIn( eventloop.make_RegionAdvertisingService, {factory for _, factory in eventloop.loop.factories}) def test_make_NonceCleanupService(self): service = eventloop.make_NonceCleanupService() self.assertThat(service, IsInstance( nonces_cleanup.NonceCleanupService)) # It is registered as a factory in RegionEventLoop. self.assertIn( eventloop.make_NonceCleanupService, {factory for _, factory in eventloop.loop.factories}) def test_make_ImportResourcesService(self): service = eventloop.make_ImportResourcesService() self.assertThat(service, IsInstance( bootresources.ImportResourcesService)) # It is registered as a factory in RegionEventLoop. self.assertIn( eventloop.make_ImportResourcesService, {factory for _, factory in eventloop.loop.factories}) def test_make_WebApplicationService(self): service = eventloop.make_WebApplicationService() self.assertThat(service, IsInstance(webapp.WebApplicationService)) # The endpoint is set to port 5243 on localhost. self.assertThat(service.endpoint, MatchesStructure.byEquality( reactor=reactor, addressFamily=socket.AF_INET)) self.assertThat( service.endpoint.port, Equals(DEFAULT_PORT)) self.assertThat( service.endpoint.socket.getsockname(), Equals(("0.0.0.0", DEFAULT_PORT))) # It is registered as a factory in RegionEventLoop. self.assertIn( eventloop.make_WebApplicationService, {factory for _, factory in eventloop.loop.factories}) class TestDisablingDatabaseConnections(MAASTestCase): @wait_for_reactor def test_connections_are_all_stubs_in_the_event_loop(self): self.assertTrue(isInIOThread()) for alias in connections: connection = connections[alias] # isinstance() fails because it references __bases__, so # compare types here. self.assertEqual( DisabledDatabaseConnection, type(connection)) @transactional def test_connections_are_all_usable_outside_the_event_loop(self): self.assertFalse(isInIOThread()) for alias in connections: connection = connections[alias] self.assertTrue(connection.is_usable()) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_exceptions.py0000644000000000000000000000641413056115004022553 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the exceptions module.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import httplib from maasserver.exceptions import ( MAASAPIBadRequest, MAASAPIValidationError, Redirect, ) from maasserver.testing import extract_redirect from maastesting.factory import factory from maastesting.testcase import MAASTestCase import simplejson as json from testtools.matchers import Equals class TestExceptions(MAASTestCase): def test_MAASAPIException_produces_http_response(self): error = factory.make_string() exception = MAASAPIBadRequest(error) response = exception.make_http_response() self.assertEqual( (httplib.BAD_REQUEST, error), (response.status_code, response.content)) def test_Redirect_produces_redirect_to_given_URL(self): target = factory.make_string() exception = Redirect(target) response = exception.make_http_response() self.assertEqual(target, extract_redirect(response)) class TestMAASAPIValidationError(MAASTestCase): """Tests for the `MAASAPIValidationError` exception class.""" def test_returns_http_response(self): error = factory.make_string() exception = MAASAPIValidationError(error) response = exception.make_http_response() self.assertEqual( (httplib.BAD_REQUEST, error), (response.status_code, response.content)) def test_returns_textual_response_if_message_is_a_string(self): error = factory.make_string() exception = MAASAPIValidationError(error) response = exception.make_http_response() self.assertEqual( "text/plain; charset=utf-8", response.get("Content-Type")) def test_returns_json_response_if_message_is_a_list(self): errors = [ factory.make_string(), factory.make_string(), ] exception = MAASAPIValidationError(errors) response = exception.make_http_response() self.expectThat( response.get("Content-Type"), Equals("application/json; charset=utf-8")) self.expectThat(response.content, Equals(json.dumps(errors))) def test_if_message_is_single_item_list_returns_only_first_message(self): errors = [ factory.make_string(), ] exception = MAASAPIValidationError(errors) response = exception.make_http_response() self.expectThat( response.get("Content-Type"), Equals("text/plain; charset=utf-8")) self.expectThat(response.content, Equals(errors[0])) def test_returns_json_response_if_message_is_a_dict(self): errors = { 'error_1': [factory.make_string()], 'error_2': [factory.make_string()], } exception = MAASAPIValidationError(errors) response = exception.make_http_response() self.expectThat( response.get("Content-Type"), Equals("application/json; charset=utf-8")) self.expectThat(response.content, Equals(json.dumps(errors))) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_fields.py0000644000000000000000000005430113056115004021636 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test custom model fields.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import json from random import randint import re from django.core import serializers from django.core.exceptions import ValidationError from django.db import ( connection, DatabaseError, ) from django.db.models import BinaryField from maasserver.enum import ( INTERFACE_TYPE, NODEGROUPINTERFACE_MANAGEMENT, ) from maasserver.fields import ( EditableBinaryField, IPListFormField, LargeObjectField, LargeObjectFile, MAC, NodeGroupFormField, register_mac_type, validate_mac, VerboseRegexField, VerboseRegexValidator, ) from maasserver.models import ( Interface, NodeGroup, ) from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase from maasserver.tests.models import ( CIDRTestModel, IPv4CIDRTestModel, JSONFieldModel, LargeObjectFieldModel, MAASIPAddressFieldModel, XMLFieldModel, ) from maastesting.djangotestcase import TestModelMixin from maastesting.matchers import MockCalledOnceWith from psycopg2 import OperationalError from psycopg2.extensions import ISQLQuote class TestNodeGroupFormField(MAASServerTestCase): def test_label_from_instance_tolerates_missing_interface(self): nodegroup = factory.make_NodeGroup() nodegroup.nodegroupinterface_set.all().delete() self.assertEqual( nodegroup.name, NodeGroupFormField().label_from_instance(nodegroup)) def test_label_from_instance_shows_name_and_address(self): nodegroup = factory.make_NodeGroup() interface = factory.make_NodeGroupInterface( nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) self.assertEqual( '%s: %s' % (nodegroup.name, interface.ip), NodeGroupFormField().label_from_instance(nodegroup)) def test_clean_defaults_to_master(self): spellings_for_none = [None, '', b''] field = NodeGroupFormField() self.assertEqual( [NodeGroup.objects.ensure_master()] * len(spellings_for_none), [field.clean(spelling) for spelling in spellings_for_none]) def test_clean_accepts_nodegroup(self): nodegroup = factory.make_NodeGroup() self.assertEqual(nodegroup, NodeGroupFormField().clean(nodegroup)) def test_clean_accepts_id_as_unicode(self): nodegroup = factory.make_NodeGroup() self.assertEqual( nodegroup, NodeGroupFormField().clean("%s" % nodegroup.id)) def test_clean_accepts_id_as_bytes(self): nodegroup = factory.make_NodeGroup() self.assertEqual( nodegroup, NodeGroupFormField().clean(("%s" % nodegroup.id).encode('ascii'))) def test_clean_accepts_uuid(self): nodegroup = factory.make_NodeGroup() self.assertEqual( nodegroup, NodeGroupFormField().clean(nodegroup.uuid)) def test_clean_accepts_uuid_as_bytes(self): nodegroup = factory.make_NodeGroup() self.assertEqual( nodegroup, NodeGroupFormField().clean(nodegroup.uuid.encode('ascii'))) def test_clean_accepts_cluster_name(self): nodegroup = factory.make_NodeGroup() self.assertEqual( nodegroup, NodeGroupFormField().clean(nodegroup.cluster_name)) def test_clean_accepts_cluster_name_as_bytes(self): nodegroup = factory.make_NodeGroup() self.assertEqual( nodegroup, NodeGroupFormField().clean(nodegroup.cluster_name.encode('ascii'))) def test_clean_accepts_numeric_cluster_name(self): # This cluster has a name that looks just like a number. Pick a number # that's highly unlikely to clash with the node's ID. cluster_name = '%s' % randint(1000000, 10000000) nodegroup = factory.make_NodeGroup(cluster_name=cluster_name) self.assertEqual(nodegroup, NodeGroupFormField().clean(cluster_name)) def test_clean_rejects_unknown_nodegroup(self): self.assertRaises( ValidationError, NodeGroupFormField().clean, factory.make_name('nonesuch')) class TestMAC(MAASServerTestCase): def test_conform_accepts_ISQLQuote(self): mac = MAC(factory.make_mac_address()) self.assertEqual(mac, mac.__conform__(ISQLQuote)) def test_new_MAC_with_None_is_None(self): self.assertIsNone(MAC(None)) def test_new_MAC_with_empty_unicode_string_is_None(self): self.assertIsNone(MAC(u"")) def test_new_MAC_with_empty_byte_string_is_None(self): self.assertIsNone(MAC(b"")) def test_new_MAC_with_other_value_types_are_rejected(self): self.assertRaises(TypeError, MAC, 1234) self.assertRaises(TypeError, MAC, object()) self.assertRaises(TypeError, MAC, self) def test_as_representation(self): addr = factory.make_mac_address() mac = MAC(addr) self.assertEqual("", repr(mac)) def test_as_unicode_string(self): addr = factory.make_mac_address() mac = MAC(addr) self.assertEqual(addr, unicode(mac)) def test_as_byte_string(self): addr = factory.make_mac_address() mac = MAC(addr) self.assertEqual(addr.encode("ascii"), bytes(mac)) def test_get_raw_returns_wrapped_address(self): addr = factory.make_mac_address() self.assertEqual(addr, MAC(addr).get_raw()) def test_get_raw_punches_through_double_wrapping(self): addr = factory.make_mac_address() self.assertEqual(addr, MAC(MAC(addr)).get_raw()) def test_raw_property_is_the_address(self): addr = factory.make_mac_address() self.assertEqual(addr, MAC(addr).raw) def test_getquoted_returns_SQL_for_MAC(self): addr = factory.make_mac_address() self.assertEqual("'%s'::macaddr" % addr, MAC(addr).getquoted()) def test_getquoted_punches_through_double_wrapping(self): addr = factory.make_mac_address() self.assertEqual("'%s'::macaddr" % addr, MAC(MAC(addr)).getquoted()) def test_mac_equals_self(self): mac = factory.make_MAC() self.assertTrue(mac == mac) def test_mac_equals_identical_mac(self): addr = factory.make_mac_address() self.assertTrue(MAC(addr) == MAC(addr)) def test_eq_punches_through_double_wrapping_on_self(self): mac = factory.make_MAC() self.assertTrue(MAC(mac) == mac) def test_eq_punches_through_double_wrapping_on_other(self): mac = factory.make_MAC() self.assertTrue(mac == MAC(mac)) def test_eq_punches_through_double_double_wrappings(self): mac = factory.make_MAC() self.assertTrue(MAC(mac) == MAC(mac)) def test_mac_does_not_equal_other(self): self.assertFalse(factory.make_MAC() == factory.make_MAC()) def test_mac_differs_from_other(self): self.assertTrue(factory.make_MAC() != factory.make_MAC()) def test_mac_does_not_differ_from_self(self): mac = factory.make_MAC() self.assertFalse(mac != mac) def test_mac_address_does_not_equal_none(self): self.assertIsNotNone(factory.make_MAC()) def test_ne_punches_through_double_wrapping_on_self(self): mac = factory.make_MAC() self.assertFalse(MAC(mac) != mac) def test_ne_punches_through_double_wrapping_on_other(self): mac = factory.make_MAC() self.assertFalse(mac != MAC(mac)) def test_ne_punches_through_double_double_wrapping(self): mac = factory.make_MAC() self.assertFalse(MAC(mac) != MAC(mac)) def test_different_macs_hash_differently(self): mac1 = factory.make_MAC() mac2 = factory.make_MAC() self.assertItemsEqual(set([mac1, mac2]), [mac1, mac2]) def test_identical_macs_hash_identically(self): addr = factory.make_mac_address() self.assertItemsEqual( set([MAC(addr), MAC(addr), MAC(MAC(addr)), addr]), [addr]) def test_django_serializes_MAC_to_JSON(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) query = Interface.objects.filter(id=interface.id) output = serializers.serialize('json', query) self.assertIn(json.dumps(interface.mac_address.get_raw()), output) self.assertIn('"%s"' % interface.mac_address.get_raw(), output) def test_register_mac_type_is_idempotent(self): register_mac_type(connection.cursor()) register_mac_type(connection.cursor()) # The test is that we get here without crashing. pass class TestVerboseRegexValidator(MAASServerTestCase): def test_VerboseRegexValidator_validates_value(self): validator = VerboseRegexValidator( regex="test", message="Unknown value") self.assertIsNone(validator('test')) def test_VerboseRegexValidator_validation_error_includes_value(self): message = "Unknown value: %(value)s" validator = VerboseRegexValidator(regex="test", message=message) value = factory.make_name('value') error = self.assertRaises(ValidationError, validator, value) self.assertEqual(message % {'value': value}, error.message) class TestVerboseRegexField(MAASServerTestCase): def test_VerboseRegexField_accepts_valid_value(self): field = VerboseRegexField(regex="test", message="Unknown value") self.assertEqual('test', field.clean('test')) def test_VerboseRegexField_validation_error_includes_value(self): message = "Unknown value: %(value)s" field = VerboseRegexField(regex="test", message=message) value = factory.make_name('value') error = self.assertRaises(ValidationError, field.clean, value) self.assertEqual([message % {'value': value}], error.messages) class TestMACAddressField(MAASServerTestCase): def test_mac_address_is_stored_normalized_and_loaded(self): interface = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, mac_address=' AA-bb-CC-dd-EE-Ff ') loaded_mac = Interface.objects.get(id=interface.id) self.assertEqual('aa:bb:cc:dd:ee:ff', loaded_mac.mac_address) def test_accepts_colon_separated_octets(self): validate_mac('00:aa:22:cc:44:dd') # No error. pass def test_accepts_dash_separated_octets(self): validate_mac('00-aa-22-cc-44-dd') # No error. pass def test_accepts_upper_and_lower_case(self): validate_mac('AA:BB:CC:dd:ee:ff') # No error. pass def test_accepts_leading_and_trailing_whitespace(self): validate_mac(' AA:BB:CC:DD:EE:FF ') # No error. pass def test_rejects_short_mac(self): self.assertRaises(ValidationError, validate_mac, '00:11:22:33:44') def test_rejects_long_mac(self): self.assertRaises( ValidationError, validate_mac, '00:11:22:33:44:55:66') def test_rejects_short_octet(self): self.assertRaises(ValidationError, validate_mac, '00:1:22:33:44:55') def test_rejects_long_octet(self): self.assertRaises(ValidationError, validate_mac, '00:11:222:33:44:55') class TestJSONObjectField(TestModelMixin, MAASServerTestCase): app = 'maasserver.tests' def test_stores_types(self): values = [ None, True, False, 3.33, "A simple string", [1, 2.43, "3"], {"not": 5, "another": "test"}, ] for value in values: name = factory.make_string() test_instance = JSONFieldModel(name=name, value=value) test_instance.save() test_instance = JSONFieldModel.objects.get(name=name) self.assertEqual(value, test_instance.value) def test_field_exact_lookup(self): # Value can be query via an 'exact' lookup. obj = [4, 6, {}] JSONFieldModel.objects.create(value=obj) test_instance = JSONFieldModel.objects.get(value=obj) self.assertEqual(obj, test_instance.value) def test_field_none_lookup(self): # Value can be queried via a 'isnull' lookup. JSONFieldModel.objects.create(value=None) test_instance = JSONFieldModel.objects.get(value__isnull=True) self.assertIsNone(test_instance.value) def test_field_another_lookup_fails(self): # Others lookups are not allowed. self.assertRaises(TypeError, JSONFieldModel.objects.get, value__gte=3) class TestXMLField(TestModelMixin, MAASServerTestCase): app = 'maasserver.tests' def test_loads_string(self): name = factory.make_string() value = "" XMLFieldModel.objects.create(name=name, value=value) instance = XMLFieldModel.objects.get(name=name) self.assertEqual(value, instance.value) def test_lookup_xpath_exists_result(self): name = factory.make_string() XMLFieldModel.objects.create(name=name, value="") result = XMLFieldModel.objects.raw( "SELECT * FROM docs WHERE xpath_exists(%s, value)", ["//test"]) self.assertEqual(name, result[0].name) def test_lookup_xpath_exists_no_result(self): name = factory.make_string() XMLFieldModel.objects.create(name=name, value="") result = XMLFieldModel.objects.raw( "SELECT * FROM docs WHERE xpath_exists(%s, value)", ["//miss"]) self.assertEqual([], list(result)) def test_save_empty_rejected(self): self.assertRaises( DatabaseError, XMLFieldModel.objects.create, value="") def test_save_non_wellformed_rejected(self): self.assertRaises( DatabaseError, XMLFieldModel.objects.create, value="") def test_lookup_none(self): XMLFieldModel.objects.create(value=None) test_instance = XMLFieldModel.objects.get(value__isnull=True) self.assertIsNone(test_instance.value) def test_lookup_exact_unsupported(self): self.assertRaises(TypeError, XMLFieldModel.objects.get, value="") class TestEditableBinaryField(MAASServerTestCase): def test_is_BinaryField(self): self.assertIsInstance(EditableBinaryField(), BinaryField) def test_is_editable(self): self.assertTrue(EditableBinaryField().editable) class TestMAASIPAddressField(TestModelMixin, MAASServerTestCase): app = 'maasserver.tests' def test_uses_ip_comparison(self): ip_object = MAASIPAddressFieldModel.objects.create( ip_address='192.0.2.99') results = MAASIPAddressFieldModel.objects.filter( ip_address__lte='192.0.2.100') self.assertItemsEqual([ip_object], results) class TestLargeObjectField(TestModelMixin, MAASServerTestCase): app = 'maasserver.tests' def test_stores_data(self): data = factory.make_string() test_name = factory.make_name('name') test_instance = LargeObjectFieldModel(name=test_name) large_object = LargeObjectFile() with large_object.open('wb') as stream: stream.write(data) test_instance.large_object = large_object test_instance.save() test_instance = LargeObjectFieldModel.objects.get(name=test_name) with test_instance.large_object.open('rb') as stream: saved_data = stream.read() self.assertEqual(data, saved_data) def test_with_exit_calls_close(self): data = factory.make_string() large_object = LargeObjectFile() with large_object.open('wb') as stream: self.addCleanup(large_object.close) mock_close = self.patch(large_object, 'close') stream.write(data) self.assertThat(mock_close, MockCalledOnceWith()) def test_unlink(self): data = factory.make_string() large_object = LargeObjectFile() with large_object.open('wb') as stream: stream.write(data) oid = large_object.oid large_object.unlink() self.assertEqual(0, large_object.oid) self.assertRaises( OperationalError, connection.connection.lobject, oid) def test_interates_on_block_size(self): # String size is multiple of block_size in the testing model data = factory.make_string(10 * 2) test_name = factory.make_name('name') test_instance = LargeObjectFieldModel(name=test_name) large_object = LargeObjectFile() with large_object.open('wb') as stream: stream.write(data) test_instance.large_object = large_object test_instance.save() test_instance = LargeObjectFieldModel.objects.get(name=test_name) with test_instance.large_object.open('rb') as stream: offset = 0 for block in stream: self.assertEqual(data[offset:offset + 10], block) offset += 10 def test_get_db_prep_value_returns_None_when_value_None(self): field = LargeObjectField() self.assertEqual(None, field.get_db_prep_value(None)) def test_get_db_prep_value_returns_oid_when_value_LargeObjectFile(self): oid = randint(1, 100) field = LargeObjectField() obj_file = LargeObjectFile() obj_file.oid = oid self.assertEqual(oid, field.get_db_prep_value(obj_file)) def test_get_db_prep_value_raises_error_when_oid_less_than_zero(self): oid = randint(-100, 0) field = LargeObjectField() obj_file = LargeObjectFile() obj_file.oid = oid self.assertRaises(AssertionError, field.get_db_prep_value, obj_file) def test_get_db_prep_value_raises_error_when_not_LargeObjectFile(self): field = LargeObjectField() self.assertRaises( AssertionError, field.get_db_prep_value, factory.make_string()) def test_to_python_returns_None_when_value_None(self): field = LargeObjectField() self.assertEqual(None, field.to_python(None)) def test_to_python_returns_value_when_value_LargeObjectFile(self): field = LargeObjectField() obj_file = LargeObjectFile() self.assertEqual(obj_file, field.to_python(obj_file)) def test_to_python_returns_LargeObjectFile_when_value_int(self): oid = randint(1, 100) field = LargeObjectField() # South normally substitutes a FakeModel here, but with a baseline # schema, we can skip the migration that creates LargeObjectField. self.patch(field, 'model') obj_file = field.to_python(oid) self.assertEqual(oid, obj_file.oid) def test_to_python_returns_LargeObjectFile_when_value_long(self): oid = long(randint(1, 100)) field = LargeObjectField() # South normally substitutes a FakeModel here, but with a baseline # schema, we can skip the migration that creates LargeObjectField. self.patch(field, 'model') obj_file = field.to_python(oid) self.assertEqual(oid, obj_file.oid) def test_to_python_raises_error_when_not_valid_type(self): field = LargeObjectField() self.assertRaises( AssertionError, field.to_python, factory.make_string()) class TestCIDRField(TestModelMixin, MAASServerTestCase): app = 'maasserver.tests' def test_stores_cidr(self): cidr = '192.0.2.0/24' instance = CIDRTestModel.objects.create(cidr=cidr) self.assertEqual(cidr, reload_object(instance).cidr) def test_validates_cidr(self): cidr = 'invalid-cidr' error = self.assertRaises( ValidationError, CIDRTestModel.objects.create, cidr=cidr) self.assertEqual("invalid IPNetwork %s" % cidr, error.message) def test_stores_cidr_with_bit_set_in_host_part(self): cidr = '192.0.2.1/24' normalized_cidr = '192.0.2.0/24' instance = CIDRTestModel.objects.create(cidr=cidr) self.assertEqual(normalized_cidr, reload_object(instance).cidr) class TestIPv4CIDRField(TestModelMixin, MAASServerTestCase): app = 'maasserver.tests' def test_stores_cidr(self): cidr = '192.0.2.0/24' instance = IPv4CIDRTestModel.objects.create(cidr=cidr) self.assertEqual(cidr, reload_object(instance).cidr) def test_validates_cidr(self): cidr = 'invalid-cidr' error = self.assertRaises( ValidationError, IPv4CIDRTestModel.objects.create, cidr=cidr) self.assertEqual("Invalid network: %s" % cidr, error.message) def test_stores_cidr_with_bit_set_in_host_part(self): cidr = '192.0.2.1/24' normalized_cidr = '192.0.2.0/24' instance = IPv4CIDRTestModel.objects.create(cidr=cidr) self.assertEqual(normalized_cidr, reload_object(instance).cidr) def test_fails_to_store_ipv6_cidr(self): cidr = "2001:DB8::/32" self.assertRaises( ValidationError, IPv4CIDRTestModel.objects.create, cidr=cidr) class IPListFormFieldTest(MAASServerTestCase): def test_accepts_none(self): self.assertIsNone(IPListFormField().clean(None)) def test_accepts_single_ip(self): ip = factory.make_ip_address() self.assertEquals(ip, IPListFormField().clean(ip)) def test_accepts_space_separated_ips(self): ips = [factory.make_ip_address() for _ in range(5)] input = ' '.join(ips) self.assertEquals(input, IPListFormField().clean(input)) def test_accepts_comma_separated_ips(self): ips = [factory.make_ip_address() for _ in range(5)] input = ','.join(ips) self.assertEquals(' '.join(ips), IPListFormField().clean(input)) def test_rejects_invalid_input(self): invalid = factory.make_name('invalid') input = ' '.join([factory.make_ip_address(), invalid]) error = self.assertRaises( ValidationError, IPListFormField().clean, input) self.assertIn("Invalid IP address: %s" % invalid, error.message) def test_separators_dont_conflict_with_ipv4_address(self): self.assertIsNone(re.search( IPListFormField.separators, factory.make_ipv4_address())) def test_separators_dont_conflict_with_ipv6_address(self): self.assertIsNone(re.search( IPListFormField.separators, factory.make_ipv6_address())) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_bcache.py0000644000000000000000000003316613056115004023011 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for all forms that are used with `Bcache`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from uuid import uuid4 from maasserver.enum import ( CACHE_MODE_TYPE, FILESYSTEM_GROUP_TYPE, FILESYSTEM_TYPE, ) from maasserver.forms import ( CreateBcacheForm, UpdateBcacheForm, ) from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase class TestCreateBcacheForm(MAASServerTestCase): def test_required_fields(self): node = factory.make_Node() form = CreateBcacheForm(node=node, data={}) self.assertFalse(form.is_valid(), form.errors) self.assertDictContainsSubset( {'cache_mode': [u'This field is required.']}, form.errors) def test_choices_are_being_populated_correctly(self): node = factory.make_Node(with_boot_disk=False) # Make 10 block devices. bds = [ factory.make_PhysicalBlockDevice(node=node, size=10 * 1000 ** 4) for _ in range(10) ] # Make 3 cache sets. cache_sets = [ factory.make_CacheSet(node=node) for _ in range(3) ] cache_set_choices = [ cache_set.id for cache_set in cache_sets ] + [ cache_set.name for cache_set in cache_sets ] # Partition the last 5 devices with a single partition. partitions = [ factory.make_PartitionTable(block_device=bd).add_partition() for bd in bds[5:] ] partition_choices = [ partition.id for partition in partitions ] + [ partition.name for partition in partitions ] # Get the IDs of the non-partitioned devices. block_devices = [ bd.id for bd in bds if bd.get_partitiontable() is None ] + [ bd.name for bd in bds if bd.get_partitiontable() is None ] form = CreateBcacheForm(node=node, data={}) self.assertItemsEqual( cache_set_choices, [k for (k, v) in form.fields['cache_set'].choices]) self.assertItemsEqual( block_devices, [k for (k, v) in form.fields['backing_device'].choices]) self.assertItemsEqual( partition_choices, [k for (k, v) in form.fields['backing_partition'].choices]) def test_bcache_creation_on_save(self): node = factory.make_Node() backing_size = 10 * 1000 ** 4 cache_set = factory.make_CacheSet(node=node) backing_device = factory.make_PhysicalBlockDevice( node=node, size=backing_size) uuid = unicode(uuid4()) form = CreateBcacheForm(node=node, data={ 'name': 'bcache0', 'uuid': uuid, 'cache_set': cache_set.id, 'backing_device': backing_device.id, 'cache_mode': CACHE_MODE_TYPE.WRITEBACK, }) self.assertTrue(form.is_valid(), form.errors) bcache = form.save() self.assertEqual('bcache0', bcache.name) self.assertEqual(uuid, bcache.uuid) self.assertEqual(cache_set, bcache.cache_set) self.assertEqual( backing_device.get_effective_filesystem(), bcache.filesystems.get(fstype=FILESYSTEM_TYPE.BCACHE_BACKING)) self.assertEqual(backing_size, bcache.get_size()) self.assertEqual(FILESYSTEM_GROUP_TYPE.BCACHE, bcache.group_type) def test_bcache_creation_with_names(self): node = factory.make_Node() backing_size = 10 * 1000 ** 4 cache_set = factory.make_CacheSet(node=node) backing_device = factory.make_PhysicalBlockDevice( node=node, size=backing_size) backing_partition_table = factory.make_PartitionTable( block_device=backing_device) backing_partition = backing_partition_table.add_partition() uuid = unicode(uuid4()) form = CreateBcacheForm(node=node, data={ 'name': 'bcache0', 'uuid': uuid, 'cache_set': cache_set.name, 'backing_partition': backing_partition.name, 'cache_mode': CACHE_MODE_TYPE.WRITEBACK, }) self.assertTrue(form.is_valid(), form.errors) bcache = form.save() self.assertEqual('bcache0', bcache.name) self.assertEqual(uuid, bcache.uuid) self.assertEqual(cache_set, bcache.cache_set) self.assertEqual( backing_partition.get_effective_filesystem(), bcache.filesystems.get(fstype=FILESYSTEM_TYPE.BCACHE_BACKING)) self.assertEqual(FILESYSTEM_GROUP_TYPE.BCACHE, bcache.group_type) def test_bcache_creation_on_boot_disk(self): node = factory.make_Node(with_boot_disk=False) boot_disk = factory.make_PhysicalBlockDevice(node=node) cache_set = factory.make_CacheSet(node=node) form = CreateBcacheForm(node=node, data={ 'name': 'bcache0', 'cache_set': cache_set.id, 'backing_device': boot_disk.id, 'cache_mode': CACHE_MODE_TYPE.WRITEBACK, }) self.assertTrue(form.is_valid(), form.errors) bcache = form.save() self.assertEqual('bcache0', bcache.name) self.assertEqual(cache_set, bcache.cache_set) self.assertEqual(FILESYSTEM_GROUP_TYPE.BCACHE, bcache.group_type) boot_partition = ( boot_disk.get_partitiontable().partitions.first()) self.assertEqual( boot_partition.get_effective_filesystem(), bcache.filesystems.get(fstype=FILESYSTEM_TYPE.BCACHE_BACKING)) def test_bcache_creation_with_invalid_names_fails(self): node = factory.make_Node() uuid = unicode(uuid4()) form = CreateBcacheForm(node=node, data={ 'name': 'bcache0', 'uuid': uuid, 'cache_set': "sdapart1", 'backing_partition': "sda-partXD", 'cache_mode': CACHE_MODE_TYPE.WRITEBACK, }) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "cache_set": [ "Select a valid choice. sdapart1 is not one of the " "available choices."], "backing_partition": [ "Select a valid choice. sda-partXD is not one of the " "available choices."], "__all__": [ "Bcache requires a cache_set."], }, form.errors) def test_bcache_creation_without_storage_fails(self): node = factory.make_Node() form = CreateBcacheForm(node=node, data={ 'cache_mode': CACHE_MODE_TYPE.WRITEAROUND }) self.assertFalse(form.is_valid(), form.errors) self.assertDictContainsSubset( { 'cache_set': [ 'This field is required.'] }, form.errors) def test_bcache_creation_without_cache_set_fails(self): node = factory.make_Node() backing_size = 10 * 1000 ** 4 backing_device = factory.make_PhysicalBlockDevice( node=node, size=backing_size) form = CreateBcacheForm(node=node, data={ 'cache_mode': CACHE_MODE_TYPE.WRITEAROUND, 'backing_device': backing_device.id }) self.assertFalse(form.is_valid(), form.errors) self.assertDictContainsSubset( { 'cache_set': [ 'This field is required.'] }, form.errors) def test_bcache_creation_without_backing_fails(self): node = factory.make_Node() cache_set = factory.make_CacheSet(node=node) form = CreateBcacheForm(node=node, data={ 'cache_mode': CACHE_MODE_TYPE.WRITEAROUND, 'cache_set': cache_set.id }) self.assertFalse(form.is_valid(), form.errors) self.assertDictContainsSubset( {'__all__': ['Either backing_device or backing_partition must be ' 'specified.']}, form.errors) class TestUpdateBcacheForm(MAASServerTestCase): def test_choices_are_being_populated_correctly(self): node = factory.make_Node(with_boot_disk=False) device_size = 1 * 1000 ** 4 # Make 10 block devices. bds = [ factory.make_PhysicalBlockDevice(node=node, size=device_size) for _ in range(10) ] # Make 3 cache sets. cache_sets = [ factory.make_CacheSet(node=node) for _ in range(3) ] cache_set_choices = [ cache_set.id for cache_set in cache_sets ] + [ cache_set.name for cache_set in cache_sets ] # Partition the last 5 devices with a single partition. partitions = [ factory.make_PartitionTable(block_device=bd).add_partition() for bd in bds[5:] ] partition_choices = [ p.id for p in partitions ] + [ p.name for p in partitions ] # Get the chocies of the non-partitioned devices. block_device_choices = [ bd.id for bd in bds if bd.get_partitiontable() is None ] + [ bd.name for bd in bds if bd.get_partitiontable() is None ] # Use one of the cache sets and one of the backing devices. filesystems = [ factory.make_Filesystem( partition=partitions[0], fstype=FILESYSTEM_TYPE.BCACHE_BACKING) ] bcache = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.BCACHE, cache_set=cache_sets[0], filesystems=filesystems) form = UpdateBcacheForm(bcache=bcache, data={}) # Should allow all devices and partitions, including the ones currently # allocated for bcache. self.assertItemsEqual( cache_set_choices, [k for (k, v) in form.fields['cache_set'].choices]) self.assertItemsEqual( block_device_choices, [k for (k, v) in form.fields['backing_device'].choices]) self.assertItemsEqual( partition_choices, [k for (k, v) in form.fields['backing_partition'].choices]) def test_bcache_update_with_invalid_mode(self): """Tests the mode field validation.""" node = factory.make_Node() cache_set = factory.make_CacheSet(node=node) filesystems = [ factory.make_Filesystem( partition=factory.make_PartitionTable( block_device=factory.make_PhysicalBlockDevice( node=node)).add_partition(), fstype=FILESYSTEM_TYPE.BCACHE_BACKING) ] bcache = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.BCACHE, cache_set=cache_set, filesystems=filesystems) form = UpdateBcacheForm(bcache=bcache, data={ 'cache_mode': 'Writeonly' }) self.assertFalse(form.is_valid(), form.errors) self.assertIn( 'Select a valid choice.', form.errors['cache_mode'][0]) self.assertIn( 'is not one of the available choices.', form.errors['cache_mode'][0]) def test_bcache_with_invalid_block_device_fails(self): """Tests allowable device list validation.""" node = factory.make_Node() cache_set = factory.make_CacheSet(node=node) filesystems = [ factory.make_Filesystem( partition=factory.make_PartitionTable( block_device=factory.make_PhysicalBlockDevice( node=node)).add_partition(), fstype=FILESYSTEM_TYPE.BCACHE_BACKING) ] backing_device = factory.make_PhysicalBlockDevice() bcache = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.BCACHE, cache_set=cache_set, filesystems=filesystems) form = UpdateBcacheForm(bcache=bcache, data={ 'backing_device': backing_device.id }) self.assertFalse(form.is_valid(), form.errors) self.assertIn( 'Select a valid choice.', form.errors['backing_device'][0]) self.assertIn( 'is not one of the available choices.', form.errors['backing_device'][0]) def test_bcache_update_with_boot_disk(self): node = factory.make_Node(with_boot_disk=False) boot_disk = factory.make_PhysicalBlockDevice(node=node) cache_set = factory.make_CacheSet(node=node) filesystems = [ factory.make_Filesystem( partition=factory.make_PartitionTable( block_device=factory.make_PhysicalBlockDevice( node=node)).add_partition(), fstype=FILESYSTEM_TYPE.BCACHE_BACKING) ] bcache = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.BCACHE, cache_set=cache_set, filesystems=filesystems) form = UpdateBcacheForm(bcache=bcache, data={ 'backing_device': boot_disk.id }) self.assertTrue(form.is_valid(), form.errors) bcache = form.save() boot_partition = ( boot_disk.get_partitiontable().partitions.first()) self.assertEqual( boot_partition.get_effective_filesystem(), bcache.filesystems.get(fstype=FILESYSTEM_TYPE.BCACHE_BACKING)) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_blockdevice.py0000644000000000000000000003471413056115004024056 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for all forms that are used with `BlockDevice`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random import uuid from maasserver.enum import ( FILESYSTEM_FORMAT_TYPE_CHOICES, FILESYSTEM_GROUP_TYPE, FILESYSTEM_TYPE, ) from maasserver.forms import ( CreatePhysicalBlockDeviceForm, FormatBlockDeviceForm, MountBlockDeviceForm, UpdatePhysicalBlockDeviceForm, UpdateVirtualBlockDeviceForm, ) from maasserver.models import Filesystem from maasserver.models.blockdevice import MIN_BLOCK_DEVICE_SIZE from maasserver.models.partition import PARTITION_ALIGNMENT_SIZE from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase from maasserver.utils.converters import round_size_to_nearest_block from maasserver.utils.orm import get_one from testtools.matchers import MatchesStructure class TestFormatBlockDeviceForm(MAASServerTestCase): def test_requires_fields(self): form = FormatBlockDeviceForm( block_device=factory.make_BlockDevice(), data={}) self.assertFalse(form.is_valid(), form.errors) self.assertItemsEqual(['fstype'], form.errors.keys()) def test_is_not_valid_if_block_device_has_partition_table(self): fstype = factory.pick_choice(FILESYSTEM_FORMAT_TYPE_CHOICES) block_device = factory.make_PhysicalBlockDevice() factory.make_PartitionTable(block_device=block_device) data = { 'fstype': fstype, } form = FormatBlockDeviceForm(block_device, data=data) self.assertFalse( form.is_valid(), "Should be invalid because block device has a partition table.") self.assertEquals({ '__all__': [ "Cannot format block device with a partition table.", ]}, form._errors) def test_is_not_valid_if_invalid_format_fstype(self): block_device = factory.make_PhysicalBlockDevice() data = { 'fstype': FILESYSTEM_TYPE.LVM_PV, } form = FormatBlockDeviceForm(block_device, data=data) self.assertFalse( form.is_valid(), "Should be invalid because of an invalid fstype.") self.assertEquals({ 'fstype': [ "Select a valid choice. lvm-pv is not one of the " "available choices." ], }, form._errors) def test_is_not_valid_if_invalid_uuid(self): fstype = factory.pick_choice(FILESYSTEM_FORMAT_TYPE_CHOICES) block_device = factory.make_PhysicalBlockDevice() data = { 'fstype': fstype, 'uuid': factory.make_string(size=32), } form = FormatBlockDeviceForm(block_device, data=data) self.assertFalse( form.is_valid(), "Should be invalid because of an invalid uuid.") self.assertEquals({'uuid': ["Enter a valid value."]}, form._errors) def test_is_not_valid_if_invalid_uuid_append_XYZ(self): fstype = factory.pick_choice(FILESYSTEM_FORMAT_TYPE_CHOICES) block_device = factory.make_PhysicalBlockDevice() data = { 'fstype': fstype, 'uuid': "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaXYZ", } form = FormatBlockDeviceForm(block_device, data=data) self.assertFalse( form.is_valid(), "Should be invalid because of an invalid uuid.") self.assertEquals({'uuid': ["Enter a valid value."]}, form._errors) def test_is_not_valid_if_invalid_uuid_prepend_XYZ(self): fstype = factory.pick_choice(FILESYSTEM_FORMAT_TYPE_CHOICES) block_device = factory.make_PhysicalBlockDevice() data = { 'fstype': fstype, 'uuid': "XYZaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", } form = FormatBlockDeviceForm(block_device, data=data) self.assertFalse( form.is_valid(), "Should be invalid because of an invalid uuid.") self.assertEquals({'uuid': ["Enter a valid value."]}, form._errors) def test_creates_filesystem(self): fsuuid = "%s" % uuid.uuid4() fstype = factory.pick_choice(FILESYSTEM_FORMAT_TYPE_CHOICES) block_device = factory.make_PhysicalBlockDevice() data = { 'uuid': fsuuid, 'fstype': fstype, } form = FormatBlockDeviceForm(block_device, data=data) self.assertTrue(form.is_valid(), form._errors) form.save() filesystem = get_one( Filesystem.objects.filter(block_device=block_device)) self.assertIsNotNone(filesystem) self.assertEquals(fstype, filesystem.fstype) self.assertEquals(fsuuid, filesystem.uuid) def test_deletes_old_filesystem_and_creates_new_one(self): fstype = factory.pick_choice(FILESYSTEM_FORMAT_TYPE_CHOICES) block_device = factory.make_PhysicalBlockDevice() prev_filesystem = factory.make_Filesystem(block_device=block_device) data = { 'fstype': fstype, } form = FormatBlockDeviceForm(block_device, data=data) self.assertTrue(form.is_valid(), form._errors) form.save() self.assertEquals( 1, Filesystem.objects.filter(block_device=block_device).count(), "Should only be one filesystem that exists for block device.") self.assertIsNone(reload_object(prev_filesystem)) filesystem = get_one( Filesystem.objects.filter(block_device=block_device)) self.assertIsNotNone(filesystem) self.assertEquals(fstype, filesystem.fstype) class TestMountBlockDeviceForm(MAASServerTestCase): def test_requires_fields(self): form = MountBlockDeviceForm( block_device=factory.make_BlockDevice(), data={}) self.assertFalse(form.is_valid(), form.errors) self.assertItemsEqual(['mount_point'], form.errors.keys()) def test_is_not_valid_if_block_device_has_no_filesystem(self): block_device = factory.make_PhysicalBlockDevice() data = { 'mount_point': factory.make_absolute_path(), } form = MountBlockDeviceForm(block_device, data=data) self.assertFalse( form.is_valid(), "Should be invalid because block device does " "not have a filesystem.") self.assertEquals({ '__all__': [ "Cannot mount an unformatted block device.", ]}, form._errors) def test_is_not_valid_if_block_device_in_filesystem_group(self): block_device = factory.make_PhysicalBlockDevice() filesystem = factory.make_Filesystem( block_device=block_device, fstype=FILESYSTEM_TYPE.LVM_PV) factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.LVM_VG, filesystems=[filesystem]) data = { 'mount_point': factory.make_absolute_path(), } form = MountBlockDeviceForm(block_device, data=data) self.assertFalse( form.is_valid(), "Should be invalid because block device is in a filesystem group.") self.assertEquals({ '__all__': [ "Filesystem is part of a filesystem group, and cannot be " "mounted.", ]}, form._errors) def test_is_not_valid_if_invalid_absolute_path(self): block_device = factory.make_PhysicalBlockDevice() factory.make_Filesystem(block_device=block_device) data = { 'mount_point': factory.make_absolute_path()[1:], } form = MountBlockDeviceForm(block_device, data=data) self.assertFalse( form.is_valid(), "Should be invalid because its not an absolute path.") self.assertEquals( {'mount_point': ["Enter a valid value."]}, form._errors) def test_is_not_valid_if_invalid_absolute_path_empty(self): block_device = factory.make_PhysicalBlockDevice() factory.make_Filesystem(block_device=block_device) data = { 'mount_point': "", } form = MountBlockDeviceForm(block_device, data=data) self.assertFalse( form.is_valid(), "Should be invalid because its not an absolute path.") self.assertEquals( {'mount_point': ["This field is required."]}, form._errors) def test_is_not_valid_if_invalid_absolute_path_to_long(self): block_device = factory.make_PhysicalBlockDevice() factory.make_Filesystem(block_device=block_device) mount_point = factory.make_absolute_path(directory_length=4096) data = { 'mount_point': mount_point, } form = MountBlockDeviceForm(block_device, data=data) self.assertFalse( form.is_valid(), "Should be invalid because its not an absolute path.") self.assertEquals({ 'mount_point': [ "Ensure this value has at most 4095 characters " "(it has %s)." % len(mount_point) ], }, form._errors) def test_sets_mount_point_on_filesystem(self): block_device = factory.make_PhysicalBlockDevice() filesystem = factory.make_Filesystem(block_device=block_device) mount_point = factory.make_absolute_path() data = { 'mount_point': mount_point, } form = MountBlockDeviceForm(block_device, data=data) self.assertTrue(form.is_valid(), form._errors) form.save() filesystem = reload_object(filesystem) self.assertEquals(mount_point, filesystem.mount_point) class TestCreatePhysicalBlockDeviceForm(MAASServerTestCase): def test_requires_fields(self): node = factory.make_Node() form = CreatePhysicalBlockDeviceForm(node, data={}) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ 'name': ['This field is required.'], 'size': ['This field is required.'], 'block_size': ['This field is required.'], '__all__': [ 'serial/model are required if id_path is not provided.'], }, form.errors) def test_creates_physical_block_device_with_model_serial(self): node = factory.make_Node() name = factory.make_name("sd") model = factory.make_name("model") serial = factory.make_name("serial") size = random.randint( MIN_BLOCK_DEVICE_SIZE, MIN_BLOCK_DEVICE_SIZE * 10) block_size = 4096 form = CreatePhysicalBlockDeviceForm(node, data={ 'name': name, 'model': model, 'serial': serial, 'size': size, 'block_size': block_size, }) self.assertTrue(form.is_valid(), form.errors) block_device = form.save() self.assertThat(block_device, MatchesStructure.byEquality( name=name, model=model, serial=serial, size=size, block_size=block_size, )) def test_creates_physical_block_device_with_id_path(self): node = factory.make_Node() name = factory.make_name("sd") id_path = factory.make_absolute_path() size = random.randint( MIN_BLOCK_DEVICE_SIZE, MIN_BLOCK_DEVICE_SIZE * 10) block_size = 4096 form = CreatePhysicalBlockDeviceForm(node, data={ 'name': name, 'id_path': id_path, 'size': size, 'block_size': block_size, }) self.assertTrue(form.is_valid(), form.errors) block_device = form.save() self.assertThat(block_device, MatchesStructure.byEquality( name=name, id_path=id_path, size=size, block_size=block_size, )) class TestUpdatePhysicalBlockDeviceForm(MAASServerTestCase): def test_requires_no_fields(self): block_device = factory.make_PhysicalBlockDevice() form = UpdatePhysicalBlockDeviceForm(instance=block_device, data={}) self.assertTrue(form.is_valid(), form.errors) self.assertItemsEqual([], form.errors.keys()) def test_updates_physical_block_device(self): block_device = factory.make_PhysicalBlockDevice() name = factory.make_name("sd") model = factory.make_name("model") serial = factory.make_name("serial") id_path = factory.make_absolute_path() size = random.randint( MIN_BLOCK_DEVICE_SIZE, MIN_BLOCK_DEVICE_SIZE * 10) block_size = 4096 form = UpdatePhysicalBlockDeviceForm(instance=block_device, data={ 'name': name, 'model': model, 'serial': serial, 'id_path': id_path, 'size': size, 'block_size': block_size, }) self.assertTrue(form.is_valid(), form.errors) block_device = form.save() self.assertThat(block_device, MatchesStructure.byEquality( name=name, model=model, serial=serial, id_path=id_path, size=size, block_size=block_size, )) class TestUpdateVirtualBlockDeviceForm(MAASServerTestCase): def test_requires_no_fields(self): block_device = factory.make_VirtualBlockDevice() form = UpdateVirtualBlockDeviceForm(instance=block_device, data={}) self.assertTrue(form.is_valid(), form.errors) self.assertItemsEqual([], form.errors.keys()) def test_updates_virtual_block_device(self): block_device = factory.make_VirtualBlockDevice() name = factory.make_name("lv") vguuid = "%s" % uuid.uuid4() size = random.randint( MIN_BLOCK_DEVICE_SIZE, block_device.filesystem_group.get_size()) form = UpdateVirtualBlockDeviceForm(instance=block_device, data={ 'name': name, 'uuid': vguuid, 'size': size, }) self.assertTrue(form.is_valid(), form.errors) block_device = form.save() expected_size = round_size_to_nearest_block( size, PARTITION_ALIGNMENT_SIZE, False) self.assertThat(block_device, MatchesStructure.byEquality( name=name, uuid=vguuid, size=expected_size, )) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_bootresource.py0000644000000000000000000001562213056115004024314 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `BootSourceForm`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random from django.core.files.uploadedfile import SimpleUploadedFile from maasserver.enum import ( BOOT_RESOURCE_FILE_TYPE, BOOT_RESOURCE_TYPE, ) from maasserver.forms import BootResourceForm from maasserver.models import BootResource from maasserver.testing.architecture import make_usable_architecture from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase class TestBootResourceForm(MAASServerTestCase): def pick_filetype(self): upload_type = random.choice([ 'tgz', 'ddtgz']) if upload_type == 'tgz': filetype = BOOT_RESOURCE_FILE_TYPE.ROOT_TGZ elif upload_type == 'ddtgz': filetype = BOOT_RESOURCE_FILE_TYPE.ROOT_DD return upload_type, filetype def test_creates_boot_resource(self): name = factory.make_name('name') title = factory.make_name('title') architecture = make_usable_architecture(self) subarch = architecture.split('/')[1] upload_type, filetype = self.pick_filetype() size = random.randint(1024, 2048) content = factory.make_string(size).encode('utf-8') upload_name = factory.make_name('filename') uploaded_file = SimpleUploadedFile(content=content, name=upload_name) data = { 'name': name, 'title': title, 'architecture': architecture, 'filetype': upload_type, } form = BootResourceForm(data=data, files={'content': uploaded_file}) self.assertTrue(form.is_valid(), form._errors) form.save() resource = BootResource.objects.get( rtype=BOOT_RESOURCE_TYPE.UPLOADED, name=name, architecture=architecture) resource_set = resource.sets.first() rfile = resource_set.files.first() self.assertEqual(title, resource.extra['title']) self.assertEqual(subarch, resource.extra['subarches']) self.assertTrue(filetype, rfile.filetype) self.assertTrue(filetype, rfile.filename) self.assertTrue(size, rfile.largefile.total_size) with rfile.largefile.content.open('rb') as stream: written_content = stream.read() self.assertEqual(content, written_content) def test_adds_boot_resource_set_to_existing_boot_resource(self): name = factory.make_name('name') architecture = make_usable_architecture(self) resource = factory.make_usable_boot_resource( rtype=BOOT_RESOURCE_TYPE.UPLOADED, name=name, architecture=architecture) upload_type, filetype = self.pick_filetype() size = random.randint(1024, 2048) content = factory.make_string(size).encode('utf-8') upload_name = factory.make_name('filename') uploaded_file = SimpleUploadedFile(content=content, name=upload_name) data = { 'name': name, 'architecture': architecture, 'filetype': upload_type, } form = BootResourceForm(data=data, files={'content': uploaded_file}) self.assertTrue(form.is_valid(), form._errors) form.save() resource = reload_object(resource) resource_set = resource.sets.order_by('id').last() rfile = resource_set.files.first() self.assertTrue(filetype, rfile.filetype) self.assertTrue(filetype, rfile.filename) self.assertTrue(size, rfile.largefile.total_size) with rfile.largefile.content.open('rb') as stream: written_content = stream.read() self.assertEqual(content, written_content) def test_creates_boot_resoures_with_generated_rtype(self): os = factory.make_name('os') series = factory.make_name('series') name = '%s/%s' % (os, series) architecture = make_usable_architecture(self) upload_type, filetype = self.pick_filetype() size = random.randint(1024, 2048) content = factory.make_string(size).encode('utf-8') upload_name = factory.make_name('filename') uploaded_file = SimpleUploadedFile(content=content, name=upload_name) data = { 'name': name, 'architecture': architecture, 'filetype': upload_type, } form = BootResourceForm(data=data, files={'content': uploaded_file}) self.assertTrue(form.is_valid(), form._errors) form.save() resource = BootResource.objects.get( rtype=BOOT_RESOURCE_TYPE.GENERATED, name=name, architecture=architecture) resource_set = resource.sets.first() rfile = resource_set.files.first() self.assertTrue(filetype, rfile.filetype) self.assertTrue(filetype, rfile.filename) self.assertTrue(size, rfile.largefile.total_size) with rfile.largefile.content.open('rb') as stream: written_content = stream.read() self.assertEqual(content, written_content) def test_adds_boot_resource_set_to_existing_generated_boot_resource(self): os = factory.make_name('os') series = factory.make_name('series') name = '%s/%s' % (os, series) architecture = make_usable_architecture(self) resource = factory.make_usable_boot_resource( rtype=BOOT_RESOURCE_TYPE.GENERATED, name=name, architecture=architecture) upload_type, filetype = self.pick_filetype() size = random.randint(1024, 2048) content = factory.make_string(size).encode('utf-8') upload_name = factory.make_name('filename') uploaded_file = SimpleUploadedFile(content=content, name=upload_name) data = { 'name': name, 'architecture': architecture, 'filetype': upload_type, } form = BootResourceForm(data=data, files={'content': uploaded_file}) self.assertTrue(form.is_valid(), form._errors) form.save() resource = reload_object(resource) resource_set = resource.sets.order_by('id').last() rfile = resource_set.files.first() self.assertTrue(filetype, rfile.filetype) self.assertTrue(filetype, rfile.filename) self.assertTrue(size, rfile.largefile.total_size) with rfile.largefile.content.open('rb') as stream: written_content = stream.read() self.assertEqual(content, written_content) def test_requires_fields(self): form = BootResourceForm(data={}) self.assertFalse(form.is_valid(), form.errors) self.assertItemsEqual([ 'name', 'architecture', 'filetype', 'content', ], form.errors.keys()) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_bootsource.py0000644000000000000000000000477613056115004023775 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `BootSourceForm`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from cStringIO import StringIO from django.core.files.uploadedfile import InMemoryUploadedFile from maasserver.forms import BootSourceForm from maasserver.models.testing import UpdateBootSourceCacheDisconnected from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase from maastesting.utils import sample_binary_data class TestBootSourceForm(MAASServerTestCase): """Tests for `BootSourceForm`.""" def setUp(self): super(TestBootSourceForm, self).setUp() self.useFixture(UpdateBootSourceCacheDisconnected()) def test_edits_boot_source_object(self): boot_source = factory.make_BootSource() params = { 'url': 'http://example.com/', 'keyring_filename': factory.make_name('keyring_filename'), } form = BootSourceForm(instance=boot_source, data=params) self.assertTrue(form.is_valid(), form._errors) form.save() boot_source = reload_object(boot_source) self.assertAttributes(boot_source, params) def test_creates_boot_source_object_with_keyring_filename(self): params = { 'url': 'http://example.com/', 'keyring_filename': factory.make_name('keyring_filename'), } form = BootSourceForm(data=params) self.assertTrue(form.is_valid(), form._errors) boot_source = form.save() self.assertAttributes(boot_source, params) def test_creates_boot_source_object_with_keyring_data(self): in_mem_file = InMemoryUploadedFile( StringIO(sample_binary_data), name=factory.make_name('name'), field_name=factory.make_name('field-name'), content_type='application/octet-stream', size=len(sample_binary_data), charset=None) params = {'url': 'http://example.com/'} form = BootSourceForm( data=params, files={'keyring_data': in_mem_file}) self.assertTrue(form.is_valid(), form._errors) boot_source = form.save() self.assertEqual(sample_binary_data, bytes(boot_source.keyring_data)) self.assertAttributes(boot_source, params) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_bootsourceselection.py0000644000000000000000000002502613056115004025672 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `BootSourceSelectionForm`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from django.core.exceptions import ValidationError from maasserver.forms import BootSourceSelectionForm from maasserver.models.testing import UpdateBootSourceCacheDisconnected from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase class TestBootSourceSelectionForm(MAASServerTestCase): """Tests for `BootSourceSelectionForm`.""" def setUp(self): super(TestBootSourceSelectionForm, self).setUp() self.useFixture(UpdateBootSourceCacheDisconnected()) def make_valid_source_selection_params(self, boot_source=None): # Helper that creates a valid BootSourceCache and parameters for # a BootSourceSelectionForm that will validate against the # cache. if boot_source is None: boot_source = factory.make_BootSource() arch = factory.make_name('arch') arch2 = factory.make_name('arch') subarch = factory.make_name('subarch') subarch2 = factory.make_name('subarch') label = factory.make_name('label') label2 = factory.make_name('label') params = { 'os': factory.make_name('os'), 'release': factory.make_name('release'), 'arches': [arch, arch2], 'subarches': [subarch, subarch2], 'labels': [label, label2], } factory.make_BootSourceCache( boot_source=boot_source, os=params['os'], release=params['release'], arch=arch, subarch=subarch, label=label, ) factory.make_BootSourceCache( boot_source=boot_source, os=params['os'], release=params['release'], arch=arch2, subarch=subarch2, label=label2, ) return params def test_edits_boot_source_selection_object(self): boot_source_selection = factory.make_BootSourceSelection() boot_source = boot_source_selection.boot_source params = self.make_valid_source_selection_params(boot_source) form = BootSourceSelectionForm( instance=boot_source_selection, data=params) self.assertTrue(form.is_valid(), form._errors) form.save() boot_source_selection = reload_object(boot_source_selection) self.assertAttributes(boot_source_selection, params) def test_creates_boot_source_selection_object(self): boot_source = factory.make_BootSource() params = self.make_valid_source_selection_params(boot_source) form = BootSourceSelectionForm(boot_source=boot_source, data=params) self.assertTrue(form.is_valid(), form._errors) boot_source_selection = form.save() self.assertAttributes(boot_source_selection, params) def test_cannot_create_duplicate_entry(self): boot_source = factory.make_BootSource() params = self.make_valid_source_selection_params(boot_source) form = BootSourceSelectionForm( boot_source=boot_source, data=params) self.assertTrue(form.is_valid(), form._errors) form.save() # Duplicates should be detected for the same boot_source, os and # release, the other fields are irrelevant. dup_params = { 'os': params['os'], 'release': params['release'], } form = BootSourceSelectionForm( boot_source=boot_source, data=dup_params) self.assertRaises(ValidationError, form.save) def test_validates_if_boot_source_cache_has_same_os_and_release(self): boot_source = factory.make_BootSource() boot_cache = factory.make_BootSourceCache(boot_source) params = { 'os': boot_cache.os, 'release': boot_cache.release, } form = BootSourceSelectionForm(boot_source=boot_source, data=params) self.assertTrue(form.is_valid(), form._errors) def test_rejects_if_boot_source_cache_has_different_os(self): boot_source = factory.make_BootSource() boot_cache = factory.make_BootSourceCache(boot_source) params = { 'os': factory.make_name('os'), 'release': boot_cache.release, } form = BootSourceSelectionForm(boot_source=boot_source, data=params) self.assertFalse(form.is_valid()) self.assertEqual( { "os": [ "OS %s with release %s has no available images " "for download" % (params['os'], boot_cache.release) ] }, form._errors) def test_rejects_if_boot_source_cache_has_different_release(self): boot_source = factory.make_BootSource() boot_cache = factory.make_BootSourceCache(boot_source) params = { 'os': boot_cache.os, 'release': factory.make_name('release'), } form = BootSourceSelectionForm(boot_source=boot_source, data=params) self.assertFalse(form.is_valid()) self.assertEqual( { "os": [ "OS %s with release %s has no available images " "for download" % (boot_cache.os, params['release']) ] }, form._errors) def make_some_caches(self, boot_source, os, release): # Make a few BootSourceCache records that the following tests can use # to validate against when using BootSourceSelectionForm. return factory.make_many_BootSourceCaches( 3, boot_source=boot_source, os=os, release=release) def test_validates_if_boot_source_cache_has_arch(self): boot_source = factory.make_BootSource() os = factory.make_name('os') release = factory.make_name('release') boot_caches = self.make_some_caches(boot_source, os, release) # Request arches that are in two of the cache records. params = { 'os': os, 'release': release, 'arches': [boot_caches[0].arch, boot_caches[2].arch], } form = BootSourceSelectionForm(boot_source=boot_source, data=params) self.assertTrue(form.is_valid(), form._errors) def test_rejects_if_boot_source_cache_does_not_have_arch(self): boot_source = factory.make_BootSource() os = factory.make_name('os') release = factory.make_name('release') factory.make_BootSourceCache( boot_source, os=os, release=release) params = { 'os': os, 'release': release, 'arches': [factory.make_name('arch')], } form = BootSourceSelectionForm(boot_source=boot_source, data=params) self.assertFalse(form.is_valid()) self.assertEqual( { "arches": [ "No available images to download for %s" % params['arches'] ] }, form._errors) def test_validates_if_boot_source_cache_has_subarch(self): boot_source = factory.make_BootSource() os = factory.make_name('os') release = factory.make_name('release') boot_caches = self.make_some_caches(boot_source, os, release) # Request subarches that are in two of the cache records. params = { 'os': os, 'release': release, 'subarches': [boot_caches[0].subarch, boot_caches[2].subarch], } form = BootSourceSelectionForm(boot_source=boot_source, data=params) self.assertTrue(form.is_valid(), form._errors) def test_rejects_if_boot_source_cache_does_not_have_subarch(self): boot_source = factory.make_BootSource() os = factory.make_name('os') release = factory.make_name('release') factory.make_BootSourceCache( boot_source, os=os, release=release) params = { 'os': os, 'release': release, 'subarches': [factory.make_name('subarch')], } form = BootSourceSelectionForm(boot_source=boot_source, data=params) self.assertFalse(form.is_valid()) self.assertEqual( { "subarches": [ "No available images to download for %s" % params['subarches'] ] }, form._errors) def test_validates_if_boot_source_cache_has_label(self): boot_source = factory.make_BootSource() os = factory.make_name('os') release = factory.make_name('release') boot_caches = self.make_some_caches(boot_source, os, release) # Request labels that are in two of the cache records. params = { 'os': os, 'release': release, 'labels': [boot_caches[0].label, boot_caches[2].label], } form = BootSourceSelectionForm(boot_source=boot_source, data=params) self.assertTrue(form.is_valid(), form._errors) def test_rejects_if_boot_source_cache_does_not_have_label(self): boot_source = factory.make_BootSource() os = factory.make_name('os') release = factory.make_name('release') factory.make_BootSourceCache( boot_source, os=os, release=release) params = { 'os': os, 'release': release, 'labels': [factory.make_name('label')], } form = BootSourceSelectionForm(boot_source=boot_source, data=params) self.assertFalse(form.is_valid()) self.assertEqual( { "labels": [ "No available images to download for %s" % params['labels'] ] }, form._errors) def test_star_values_in_request_validate_against_any_cache(self): boot_source = factory.make_BootSource() os = factory.make_name('os') release = factory.make_name('release') factory.make_BootSourceCache( boot_source, os=os, release=release) params = { 'os': os, 'release': release, 'arches': ['*'], 'subarches': ['*'], 'labels': ['*'], } form = BootSourceSelectionForm(boot_source=boot_source, data=params) self.assertTrue(form.is_valid(), form._errors) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_bulknodeaction.py0000644000000000000000000002504513056115004024602 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `BulkNodeActionForm`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from django.db import transaction from maasserver.enum import NODE_STATUS from maasserver.exceptions import NodeActionError from maasserver.forms import ( BulkNodeActionForm, SetZoneBulkAction, ) from maasserver.models import Node from maasserver.node_action import ( Delete, PowerOff, PowerOn, ) from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import ( MAASServerTestCase, MAASTransactionServerTestCase, ) class TestBulkNodeActionForm(MAASServerTestCase): """Tests for `BulkNodeActionForm`.""" def test_first_action_is_empty(self): form = BulkNodeActionForm(user=factory.make_admin()) action = form.fields['action'] default_action = action.choices[0][0] required = action.required # The default action is the empty string (i.e. no action) # and it's a required field. self.assertEqual(('', True), (default_action, required)) def test_admin_is_offered_bulk_node_change(self): form = BulkNodeActionForm(user=factory.make_admin()) choices = form.fields['action'].choices self.assertNotEqual( [], [choice for choice in choices if choice[0] == 'set_zone']) def test_nonadmin_is_not_offered_bulk_node_change(self): form = BulkNodeActionForm(user=factory.make_User()) choices = form.fields['action'].choices self.assertEqual( [], [choice for choice in choices if choice[0] == 'set_zone']) def test_rejects_empty_system_ids(self): form = BulkNodeActionForm( user=factory.make_admin(), data=dict(action=Delete.name, system_id=[])) self.assertFalse(form.is_valid(), form._errors) self.assertEqual( ["No node selected."], form._errors['system_id']) def test_rejects_invalid_system_ids(self): node = factory.make_Node() system_id_to_delete = [node.system_id, "wrong-system_id"] form = BulkNodeActionForm( user=factory.make_admin(), data=dict( action=Delete.name, system_id=system_id_to_delete)) self.assertFalse(form.is_valid(), form._errors) self.assertEqual( ["Some of the given system ids are invalid system ids."], form._errors['system_id']) def test_rejects_if_no_action(self): form = BulkNodeActionForm( user=factory.make_admin(), data=dict(system_id=[factory.make_Node().system_id])) self.assertFalse(form.is_valid(), form._errors) def test_rejects_if_invalid_action(self): form = BulkNodeActionForm( user=factory.make_admin(), data=dict( action="invalid-action", system_id=[factory.make_Node().system_id])) self.assertFalse(form.is_valid(), form._errors) def test_set_zone_does_not_work_if_not_admin(self): node = factory.make_Node() form = BulkNodeActionForm( user=factory.make_User(), data={ 'action': SetZoneBulkAction.name, 'zone': factory.make_Zone().name, 'system_id': [node.system_id], }) self.assertFalse(form.is_valid()) self.assertIn( "Select a valid choice. " "set_zone is not one of the available choices.", form._errors['action']) def test_zone_field_rejects_empty_zone(self): # If the field is present, the zone name has to be valid # and the empty string is not a valid zone name. form = BulkNodeActionForm( user=factory.make_admin(), data={ 'action': SetZoneBulkAction.name, 'zone': '', }) self.assertFalse(form.is_valid(), form._errors) self.assertEqual( ["This field is required."], form._errors['zone']) def test_zone_field_present_if_data_is_empty(self): form = BulkNodeActionForm( user=factory.make_admin(), data={}) self.assertIn('zone', form.fields) def test_zone_field_not_present_action_is_not_SetZoneBulkAction(self): form = BulkNodeActionForm( user=factory.make_admin(), data={'action': factory.make_name('action')}) self.assertNotIn('zone', form.fields) class TestBulkNodeActionFormSave(MAASTransactionServerTestCase): """Tests for `BulkNodeActionForm.save()`. These are transactional tests, meaning that commits to the database must be made to test behaviour. """ def test_performs_action(self): with transaction.atomic(): node1 = factory.make_Node() node2 = factory.make_Node() node3 = factory.make_Node() system_id_to_delete = [node1.system_id, node2.system_id] form = BulkNodeActionForm( user=factory.make_admin(), data=dict( action=Delete.name, system_id=system_id_to_delete)) self.assertTrue(form.is_valid(), form._errors) with transaction.atomic(): done, not_actionable, not_permitted = form.save() self.assertEqual( [2, 0, 0], [done, not_actionable, not_permitted]) with transaction.atomic(): existing_nodes = list(Node.objects.filter( system_id__in=system_id_to_delete)) node3_system_id = reload_object(node3).system_id self.assertEqual( [[], node3.system_id], [existing_nodes, node3_system_id]) def test_perform_action_catches_start_action_errors(self): error_text = factory.make_string(prefix="NodeActionError") exc = NodeActionError(error_text) self.patch(PowerOn, "execute").side_effect = exc with transaction.atomic(): user = factory.make_User() factory.make_SSHKey(user) node = factory.make_Node(status=NODE_STATUS.READY, owner=user) form = BulkNodeActionForm( user=user, data=dict( action=PowerOn.name, system_id=[node.system_id])) self.assertTrue(form.is_valid(), form._errors) with transaction.atomic(): done, not_actionable, not_permitted = form.save() self.assertEqual( [0, 1, 0], [done, not_actionable, not_permitted]) def test_gives_stat_when_not_applicable(self): with transaction.atomic(): node1 = factory.make_Node(status=NODE_STATUS.NEW) node2 = factory.make_Node(status=NODE_STATUS.FAILED_COMMISSIONING) system_id_for_action = [node1.system_id, node2.system_id] form = BulkNodeActionForm( user=factory.make_admin(), data=dict( action=PowerOn.name, system_id=system_id_for_action)) self.assertTrue(form.is_valid(), form._errors) with transaction.atomic(): done, not_actionable, not_permitted = form.save() self.assertEqual( [0, 2, 0], [done, not_actionable, not_permitted]) def test_gives_stat_when_no_permission(self): with transaction.atomic(): user = factory.make_User() node = factory.make_Node( status=NODE_STATUS.DEPLOYED, owner=factory.make_User()) system_id_for_action = [node.system_id] form = BulkNodeActionForm( user=user, data=dict( action=PowerOff.name, system_id=system_id_for_action)) self.assertTrue(form.is_valid(), form._errors) with transaction.atomic(): done, not_actionable, not_permitted = form.save() self.assertEqual( [0, 0, 1], [done, not_actionable, not_permitted]) def test_gives_stat_when_action_is_inhibited(self): with transaction.atomic(): node = factory.make_Node( status=NODE_STATUS.ALLOCATED, owner=factory.make_User()) form = BulkNodeActionForm( user=factory.make_admin(), data=dict( action=PowerOn.name, system_id=[node.system_id])) self.assertTrue(form.is_valid(), form._errors) with transaction.atomic(): done, not_actionable, not_permitted = form.save() self.assertEqual( [0, 1, 0], [done, not_actionable, not_permitted]) def test_set_zone_sets_zone_on_node(self): with transaction.atomic(): node = factory.make_Node() zone = factory.make_Zone() form = BulkNodeActionForm( user=factory.make_admin(), data={ 'action': 'set_zone', 'zone': zone.name, 'system_id': [node.system_id], }) self.assertTrue(form.is_valid(), form._errors) with transaction.atomic(): done, not_actionable, not_permitted = form.save() self.assertEqual( [1, 0, 0], [done, not_actionable, not_permitted]) with transaction.atomic(): node = reload_object(node) self.assertEqual(zone, node.zone) def test_set_zone_leaves_unselected_nodes_alone(self): with transaction.atomic(): unselected_node = factory.make_Node() original_zone = unselected_node.zone form = BulkNodeActionForm( user=factory.make_admin(), data={ 'action': SetZoneBulkAction.name, 'zone': factory.make_Zone().name, 'system_id': [factory.make_Node().system_id], }) self.assertTrue(form.is_valid(), form._errors) with transaction.atomic(): done, not_actionable, not_permitted = form.save() self.assertEqual( [1, 0, 0], [done, not_actionable, not_permitted]) with transaction.atomic(): unselected_node = reload_object(unselected_node) self.assertEqual(original_zone, unselected_node.zone) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_cacheset.py0000644000000000000000000001736113056115004023362 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for all forms that are used with `CacheSet`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.forms import ( CreateCacheSetForm, UpdateCacheSetForm, ) from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase class TestCreateCacheSetForm(MAASServerTestCase): def test_required_fields(self): node = factory.make_Node() form = CreateCacheSetForm(node=node, data={}) self.assertFalse(form.is_valid(), form.errors) self.assertDictContainsSubset( {'__all__': [ 'Either cache_device or cache_partition must be specified.']}, form.errors) def test_choices_are_being_populated_correctly(self): node = factory.make_Node(with_boot_disk=False) # Make 10 block devices. bds = [ factory.make_PhysicalBlockDevice(node=node) for _ in range(10) ] # Partition the last 5 devices with a single partition. partitions = [ factory.make_PartitionTable(block_device=bd).add_partition() for bd in bds[5:] ] partition_choices = [ partition.id for partition in partitions ] + [ partition.name for partition in partitions ] # Get the IDs of the non-partitioned devices. block_devices = [ bd.id for bd in bds if bd.get_partitiontable() is None ] + [ bd.name for bd in bds if bd.get_partitiontable() is None ] form = CreateCacheSetForm(node=node, data={}) self.assertItemsEqual( block_devices, [k for (k, v) in form.fields['cache_device'].choices]) self.assertItemsEqual( partition_choices, [k for (k, v) in form.fields['cache_partition'].choices]) def test_cache_set_creation_with_block_device(self): node = factory.make_Node() cache_device = factory.make_PhysicalBlockDevice(node=node) form = CreateCacheSetForm(node=node, data={ 'cache_device': cache_device.id, }) self.assertTrue(form.is_valid(), form.errors) cache_set = form.save() self.assertEquals(cache_device, cache_set.get_device()) def test_cache_set_creation_with_boot_disk(self): node = factory.make_Node(with_boot_disk=False) boot_disk = factory.make_PhysicalBlockDevice(node=node) form = CreateCacheSetForm(node=node, data={ 'cache_device': boot_disk.id, }) self.assertTrue(form.is_valid(), form.errors) cache_set = form.save() boot_partition = boot_disk.get_partitiontable().partitions.first() self.assertEquals(boot_partition, cache_set.get_device()) def test_cache_set_creation_with_partition(self): node = factory.make_Node() block_device = factory.make_PhysicalBlockDevice(node=node) partition_table = factory.make_PartitionTable( block_device=block_device) partition = factory.make_Partition(partition_table=partition_table) form = CreateCacheSetForm(node=node, data={ 'cache_partition': partition.id, }) self.assertTrue(form.is_valid(), form.errors) cache_set = form.save() self.assertEquals(partition, cache_set.get_device()) def test_bcache_creation_fails_with_both_set(self): node = factory.make_Node() cache_device = factory.make_PhysicalBlockDevice(node=node) block_device = factory.make_PhysicalBlockDevice(node=node) partition_table = factory.make_PartitionTable( block_device=block_device) partition = factory.make_Partition(partition_table=partition_table) form = CreateCacheSetForm(node=node, data={ 'cache_device': cache_device.id, 'cache_partition': partition.id, }) self.assertFalse(form.is_valid(), form.errors) self.assertDictContainsSubset( { '__all__': [ 'Cannot set both cache_device and cache_partition.'], }, form.errors) class TestUpdateCacheSetForm(MAASServerTestCase): def test_choices_are_being_populated_correctly(self): node = factory.make_Node(with_boot_disk=False) # Make 10 block devices. bds = [ factory.make_PhysicalBlockDevice(node=node) for _ in range(10) ] # Partition the last 5 devices with a single partition. partitions = [ factory.make_PartitionTable(block_device=bd).add_partition() for bd in bds[5:] ] partition_choices = [ p.id for p in partitions ] + [ p.name for p in partitions ] # Get the chocies of the non-partitioned devices. block_device_choices = [ bd.id for bd in bds if bd.get_partitiontable() is None ] + [ bd.name for bd in bds if bd.get_partitiontable() is None ] cache_set = factory.make_CacheSet(block_device=bds[1]) form = UpdateCacheSetForm(cache_set=cache_set, data={}) # Should allow all devices and partitions, including the one currently # in use on the cache set. self.assertItemsEqual( block_device_choices, [k for (k, v) in form.fields['cache_device'].choices]) self.assertItemsEqual( partition_choices, [k for (k, v) in form.fields['cache_partition'].choices]) def test_save_updates_the_cache_set_with_block_device(self): node = factory.make_Node() partition = factory.make_Partition(node=node) cache_set = factory.make_CacheSet(partition=partition) new_cache_device = factory.make_PhysicalBlockDevice(node=node) form = UpdateCacheSetForm(cache_set=cache_set, data={ "cache_device": new_cache_device.id, }) self.assertTrue(form.is_valid(), form.errors) cache_set = form.save() self.assertEquals(new_cache_device, cache_set.get_device()) self.assertIsNone(partition.get_effective_filesystem()) def test_save_updates_the_cache_set_with_boot_disk(self): node = factory.make_Node(with_boot_disk=False) boot_disk = factory.make_PhysicalBlockDevice(node=node) partition = factory.make_Partition(node=node) cache_set = factory.make_CacheSet(partition=partition) form = UpdateCacheSetForm(cache_set=cache_set, data={ "cache_device": boot_disk.id, }) self.assertTrue(form.is_valid(), form.errors) cache_set = form.save() boot_partition = boot_disk.get_partitiontable().partitions.first() self.assertEquals(boot_partition, cache_set.get_device()) self.assertIsNone(partition.get_effective_filesystem()) def test_save_updates_the_cache_set_with_partition(self): node = factory.make_Node() cache_device = factory.make_PhysicalBlockDevice(node=node) cache_set = factory.make_CacheSet(block_device=cache_device) new_partition = factory.make_Partition(node=node) form = UpdateCacheSetForm(cache_set=cache_set, data={ "cache_partition": new_partition.id, }) self.assertTrue(form.is_valid(), form.errors) cache_set = form.save() self.assertEquals(new_partition, cache_set.get_device()) self.assertIsNone(cache_device.get_effective_filesystem()) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_commission.py0000644000000000000000000000523413056115004023757 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for commission form.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.enum import ( NODE_STATUS, POWER_STATE, ) from maasserver.forms_commission import CommissionForm from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maastesting.matchers import MockCalledOnceWith class TestCommissionForm(MAASServerTestCase): def test__doesnt_require_anything(self): node = factory.make_Node( status=NODE_STATUS.READY, power_state=POWER_STATE.OFF) user = factory.make_admin() form = CommissionForm(instance=node, user=user, data={}) self.assertTrue(form.is_valid(), form.errors) def test__not_allowed_in_bad_state(self): node = factory.make_Node( status=NODE_STATUS.DEPLOYING, power_state=POWER_STATE.OFF) user = factory.make_admin() form = CommissionForm(instance=node, user=user, data={}) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ '__all__': [ "Commission is not available because of the current state " "of the node."], }, form.errors) def test__not_allowed_if_on(self): node = factory.make_Node( status=NODE_STATUS.READY, power_state=POWER_STATE.ON) user = factory.make_admin() form = CommissionForm(instance=node, user=user, data={}) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ '__all__': [ "Commission is not available because of the node is currently " "powered on."], }, form.errors) def test__calls_start_commissioning_with_options(self): node = factory.make_Node( status=NODE_STATUS.READY, power_state=POWER_STATE.OFF) user = factory.make_admin() mock_start_commissioning = self.patch_autospec( node, "start_commissioning") form = CommissionForm(instance=node, user=user, data={ "enable_ssh": True, "skip_networking": True, "skip_storage": True, }) self.assertTrue(form.is_valid(), form.errors) node = form.save() self.assertIsNotNone(node) self.assertThat( mock_start_commissioning, MockCalledOnceWith( user, enable_ssh=True, skip_networking=True, skip_storage=True)) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_commissioning.py0000644000000000000000000001126513056115004024456 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for commissioning forms.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from django.core.files.uploadedfile import SimpleUploadedFile from maasserver.enum import BOOT_RESOURCE_TYPE from maasserver.forms import ( CommissioningForm, CommissioningScriptForm, ) from maasserver.models import ( BootSourceCache, Config, ) from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.utils.forms import compose_invalid_choice_text from maasserver.utils.orm import post_commit_hooks from metadataserver.models import CommissioningScript from testtools.matchers import MatchesStructure class TestCommissioningFormForm(MAASServerTestCase): def test_commissioningform_error_msg_lists_series_choices(self): form = CommissioningForm() field = form.fields['commissioning_distro_series'] self.assertEqual( compose_invalid_choice_text( 'commissioning_distro_series', field.choices), field.error_messages['invalid_choice']) def test_commissioningform_error_msg_lists_min_hwe_kernel_choices(self): form = CommissioningForm() field = form.fields['default_min_hwe_kernel'] self.assertEqual( compose_invalid_choice_text( 'default_min_hwe_kernel', field.choices), field.error_messages['invalid_choice']) def test_commissioningform_contains_real_and_ui_choice(self): release = factory.pick_ubuntu_release() name = "ubuntu/" + release kernel = 'hwe-' + release[0] # Stub out the post commit tasks otherwise the test fails due to # unrun post-commit tasks at the end of the test. self.patch(BootSourceCache, "post_commit_do") # Force run the post commit tasks as we make new boot sources with post_commit_hooks: factory.make_BootSourceCache( os=name, subarch=kernel, release=release) factory.make_usable_boot_resource( name=name, extra={'subarches': kernel}, rtype=BOOT_RESOURCE_TYPE.SYNCED) Config.objects.set_config( 'commissioning_distro_series', release) form = CommissioningForm() self.assertItemsEqual([ ('', '--- No minimum kernel ---'), (kernel, '%s (%s)' % (release, kernel))], form.fields['default_min_hwe_kernel'].choices) class TestCommissioningScriptForm(MAASServerTestCase): def test_creates_commissioning_script(self): content = factory.make_string().encode('ascii') name = factory.make_name('filename') uploaded_file = SimpleUploadedFile(content=content, name=name) form = CommissioningScriptForm(files={'content': uploaded_file}) self.assertTrue(form.is_valid(), form._errors) form.save() new_script = CommissioningScript.objects.get(name=name) self.assertThat( new_script, MatchesStructure.byEquality(name=name, content=content)) def test_raises_if_duplicated_name(self): content = factory.make_string().encode('ascii') name = factory.make_name('filename') factory.make_CommissioningScript(name=name) uploaded_file = SimpleUploadedFile(content=content, name=name) form = CommissioningScriptForm(files={'content': uploaded_file}) self.assertEqual( (False, {'content': ["A script with that name already exists."]}), (form.is_valid(), form._errors)) def test_rejects_whitespace_in_name(self): name = factory.make_name('with space') content = factory.make_string().encode('ascii') uploaded_file = SimpleUploadedFile(content=content, name=name) form = CommissioningScriptForm(files={'content': uploaded_file}) self.assertFalse(form.is_valid()) self.assertEqual( ["Name contains disallowed characters (e.g. space or quotes)."], form._errors['content']) def test_rejects_quotes_in_name(self): name = factory.make_name("l'horreur") content = factory.make_string().encode('ascii') uploaded_file = SimpleUploadedFile(content=content, name=name) form = CommissioningScriptForm(files={'content': uploaded_file}) self.assertFalse(form.is_valid()) self.assertEqual( ["Name contains disallowed characters (e.g. space or quotes)."], form._errors['content']) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_config.py0000644000000000000000000000607313056115004023046 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `ConfigForm`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from django import forms from maasserver.forms import ConfigForm from maasserver.models import Config from maasserver.models.config import DEFAULT_CONFIG from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase class TestOptionForm(ConfigForm): field1 = forms.CharField(label="Field 1", max_length=10) field2 = forms.BooleanField(label="Field 2", required=False) class TestValidOptionForm(ConfigForm): maas_name = forms.CharField(label="Field 1", max_length=10) class TestCompositeForm(ConfigForm): config_fields = ['maas_name'] maas_name = forms.CharField(label="Field 1", max_length=10) non_config_field = forms.CharField(label="Field 2", max_length=10) class ConfigFormTest(MAASServerTestCase): def test_form_valid_saves_into_db(self): value = factory.make_string(10) form = TestValidOptionForm({'maas_name': value}) result = form.save() self.assertTrue(result, form._errors) self.assertEqual(value, Config.objects.get_config('maas_name')) def test_form_rejects_unknown_settings(self): value = factory.make_string(10) value2 = factory.make_string(10) form = TestOptionForm({'field1': value, 'field2': value2}) valid = form.is_valid() self.assertFalse(valid, form._errors) self.assertIn('field1', form._errors) self.assertIn('field2', form._errors) def test_form_invalid_does_not_save_into_db(self): value_too_long = factory.make_string(20) form = TestOptionForm({'field1': value_too_long, 'field2': False}) result = form.save() self.assertFalse(result, form._errors) self.assertIn('field1', form._errors) self.assertIsNone(Config.objects.get_config('field1')) self.assertIsNone(Config.objects.get_config('field2')) def test_form_loads_initial_values(self): value = factory.make_string() Config.objects.set_config('field1', value) form = TestOptionForm() self.assertItemsEqual(['field1'], form.initial) self.assertEqual(value, form.initial['field1']) def test_form_loads_initial_values_from_default_value(self): value = factory.make_string() DEFAULT_CONFIG['field1'] = value form = TestOptionForm() self.assertItemsEqual(['field1'], form.initial) self.assertEqual(value, form.initial['field1']) def test_validates_composite_form(self): value1 = factory.make_string(5) value2 = factory.make_string(5) form = TestCompositeForm( {'maas_name': value1, 'non_config_field': value2}) result = form.save() self.assertTrue(result, form._errors) self.assertEqual(value1, Config.objects.get_config('maas_name')) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_deploy.py0000644000000000000000000000317113056115004023071 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `DeployForm`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.forms import DeployForm from maasserver.testing.osystems import make_usable_osystem from maasserver.testing.testcase import MAASServerTestCase class TestDeployForm(MAASServerTestCase): """Tests for `DeployForm`.""" def test_uses_live_data(self): # The DeployForm uses the database rather than just relying on # hard-coded stuff. osystem = make_usable_osystem(self) os_name = osystem['name'] release_name = osystem['default_release'] release_name = "%s/%s" % (os_name, release_name) deploy_form = DeployForm() os_choices = deploy_form.fields['default_osystem'].choices os_names = [name for name, title in os_choices] release_choices = deploy_form.fields['default_distro_series'].choices release_names = [name for name, title in release_choices] self.assertIn(os_name, os_names) self.assertIn(release_name, release_names) def test_accepts_new_values(self): osystem = make_usable_osystem(self) os_name = osystem['name'] release_name = osystem['default_release'] params = { 'default_osystem': os_name, 'default_distro_series': "%s/%s" % (os_name, release_name), } form = DeployForm(data=params) self.assertTrue(form.is_valid()) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_device.py0000644000000000000000000000274513056115004023042 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for device forms.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.forms import DeviceForm from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase class TestDeviceForm(MAASServerTestCase): def test_contains_limited_set_of_fields(self): form = DeviceForm() self.assertEqual( [ 'hostname', 'parent', ], list(form.fields)) def test_changes_device_hostname(self): device = factory.make_Device() hostname = factory.make_string() form = DeviceForm( data={ 'hostname': hostname, }, instance=device) form.save() reload_object(device) self.assertEqual(hostname, device.hostname) def test_changes_device_parent(self): device = factory.make_Device() parent = factory.make_Node() form = DeviceForm( data={ 'parent': parent.system_id, }, instance=device) form.save() reload_object(device) reload_object(parent) self.assertEqual(parent, device.parent) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_downloadprogress.py0000644000000000000000000000472713056115004025201 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `DownloadProgressForm`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.forms import DownloadProgressForm from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase class TestDownloadProgressForm(MAASServerTestCase): def test_updates_instance(self): progress = factory.make_DownloadProgress_incomplete(size=None) new_bytes_downloaded = progress.bytes_downloaded + 1 size = progress.bytes_downloaded + 2 error = factory.make_string() form = DownloadProgressForm( data={ 'size': size, 'bytes_downloaded': new_bytes_downloaded, 'error': error, }, instance=progress) new_progress = form.save() progress = reload_object(progress) self.assertEqual(progress, new_progress) self.assertEqual(size, progress.size) self.assertEqual(new_bytes_downloaded, progress.bytes_downloaded) self.assertEqual(error, progress.error) def test_rejects_unknown_ongoing_download(self): form = DownloadProgressForm( data={'bytes_downloaded': 1}, instance=None) self.assertFalse(form.is_valid()) def test_get_download_returns_ongoing_download(self): progress = factory.make_DownloadProgress_incomplete() self.assertEqual( progress, DownloadProgressForm.get_download( progress.nodegroup, progress.filename, progress.bytes_downloaded + 1)) def test_get_download_recognises_start_of_new_download(self): nodegroup = factory.make_NodeGroup() filename = factory.make_string() progress = DownloadProgressForm.get_download(nodegroup, filename, None) self.assertIsNotNone(progress) self.assertEqual(nodegroup, progress.nodegroup) self.assertEqual(filename, progress.filename) self.assertIsNone(progress.bytes_downloaded) def test_get_download_returns_none_for_unknown_ongoing_download(self): self.assertIsNone( DownloadProgressForm.get_download( factory.make_NodeGroup(), factory.make_string(), 1)) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_fabric.py0000644000000000000000000000332113056115004023020 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for Fabric forms.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.forms_fabric import FabricForm from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase class TestFabricForm(MAASServerTestCase): def test__creates_fabric(self): fabric_name = factory.make_name("fabric") fabric_class_type = factory.make_name("class_type") form = FabricForm({ "name": fabric_name, "class_type": fabric_class_type, }) self.assertTrue(form.is_valid(), form.errors) fabric = form.save() self.assertEquals(fabric_name, fabric.name) self.assertEquals(fabric_class_type, fabric.class_type) def test__doest_require_name_on_update(self): fabric = factory.make_Fabric() form = FabricForm(instance=fabric, data={}) self.assertTrue(form.is_valid(), form.errors) def test__updates_fabric(self): new_name = factory.make_name("fabric") new_class_type = factory.make_name("class_type") fabric = factory.make_Fabric() form = FabricForm(instance=fabric, data={ "name": new_name, "class_type": new_class_type, }) self.assertTrue(form.is_valid(), form.errors) form.save() self.assertEquals(new_name, reload_object(fabric).name) self.assertEquals(new_class_type, reload_object(fabric).class_type) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_fannetwork.py0000644000000000000000000000603613056115004023756 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for FanNetwork forms.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random from maasserver.forms_fannetwork import FanNetworkForm from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase class TestFanNetworkForm(MAASServerTestCase): def test__requires_name(self): slash = random.randint(12, 28) underlay = factory.make_ipv4_network(slash=slash) overlay = factory.make_ipv4_network(slash=slash - 4) form = FanNetworkForm({ "overlay": unicode(overlay), "underlay": unicode(underlay), }) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "name": ["This field is required."], }, form.errors) def test__requires_overlay(self): slash = random.randint(12, 28) underlay = factory.make_ipv4_network(slash=slash) form = FanNetworkForm({ "name": factory.make_name("fannetwork"), "underlay": unicode(underlay), }) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "overlay": ["This field is required."], }, form.errors) def test__requires_underlay(self): slash = random.randint(12, 28) overlay = factory.make_ipv4_network(slash=slash - 4) form = FanNetworkForm({ "name": factory.make_name("fannetwork"), "overlay": unicode(overlay), }) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "underlay": ["This field is required."], }, form.errors) def test__creates_fannetwork(self): fannetwork_name = factory.make_name("fannetwork") slash = random.randint(12, 28) underlay = factory.make_ipv4_network(slash=slash) overlay = factory.make_ipv4_network(slash=slash - 4) form = FanNetworkForm({ "name": fannetwork_name, "overlay": unicode(overlay), "underlay": unicode(underlay), }) self.assertTrue(form.is_valid(), form.errors) fannetwork = form.save() self.assertEquals(fannetwork_name, fannetwork.name) def test__doest_require_name_on_update(self): fannetwork = factory.make_FanNetwork() form = FanNetworkForm(instance=fannetwork, data={}) self.assertTrue(form.is_valid(), form.errors) def test__updates_fannetwork(self): new_name = factory.make_name("fannetwork") fannetwork = factory.make_FanNetwork() form = FanNetworkForm(instance=fannetwork, data={ "name": new_name, }) self.assertTrue(form.is_valid(), form.errors) form.save() self.assertEquals(new_name, reload_object(fannetwork).name) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_helpers.py0000644000000000000000000001771113056115004023244 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for forms helpers.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from django.forms import CharField from maasserver.enum import ( BOOT_RESOURCE_TYPE, NODE_STATUS, ) from maasserver.forms import ( AdminNodeForm, AdminNodeWithMACAddressesForm, get_node_create_form, get_node_edit_form, initialize_node_group, list_all_usable_architectures, MAASModelForm, NodeForm, NodeWithMACAddressesForm, pick_default_architecture, remove_None_values, ) from maasserver.models import ( Node, NodeGroup, ) from maasserver.testing.architecture import make_usable_architecture from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.tests.models import GenericTestModel from maastesting.djangotestcase import TestModelMixin from testtools.matchers import Equals class TestHelpers(MAASServerTestCase): def make_usable_boot_resource(self, arch=None, subarch=None): """Create a set of boot resources, so the architecture becomes usable. This will make the resources' architecture show up in the list of usable architectures. """ if arch is None: arch = factory.make_name('arch') if subarch is None: subarch = factory.make_name('subarch') for purpose in ['install', 'commissioning']: architecture = '%s/%s' % (arch, subarch) factory.make_usable_boot_resource( rtype=BOOT_RESOURCE_TYPE.SYNCED, architecture=architecture) def test_initialize_node_group_leaves_nodegroup_reference_intact(self): preselected_nodegroup = factory.make_NodeGroup() node = factory.make_Node(nodegroup=preselected_nodegroup) initialize_node_group(node) self.assertEqual(preselected_nodegroup, node.nodegroup) def test_initialize_node_group_initializes_nodegroup_to_form_value(self): node = Node( NODE_STATUS.NEW, architecture=make_usable_architecture(self)) nodegroup = factory.make_NodeGroup() initialize_node_group(node, nodegroup) self.assertEqual(nodegroup, node.nodegroup) def test_initialize_node_group_defaults_to_master(self): node = Node( NODE_STATUS.NEW, architecture=make_usable_architecture(self)) initialize_node_group(node) self.assertEqual(NodeGroup.objects.ensure_master(), node.nodegroup) def test_list_all_usable_architectures_combines_nodegroups(self): arches = [ (factory.make_name('arch'), factory.make_name('subarch')) for _ in range(3)] for arch, subarch in arches: self.make_usable_boot_resource(arch=arch, subarch=subarch) expected = [ "%s/%s" % (arch, subarch) for arch, subarch in arches] self.assertItemsEqual(expected, list_all_usable_architectures()) def test_list_all_usable_architectures_sorts_output(self): arches = [ (factory.make_name('arch'), factory.make_name('subarch')) for _ in range(3)] for arch, subarch in arches: self.make_usable_boot_resource(arch=arch, subarch=subarch) expected = [ "%s/%s" % (arch, subarch) for arch, subarch in arches] self.assertEqual(sorted(expected), list_all_usable_architectures()) def test_list_all_usable_architectures_returns_no_duplicates(self): arch = factory.make_name('arch') subarch = factory.make_name('subarch') self.make_usable_boot_resource(arch=arch, subarch=subarch) self.make_usable_boot_resource(arch=arch, subarch=subarch) self.assertEqual( ["%s/%s" % (arch, subarch)], list_all_usable_architectures()) def test_pick_default_architecture_returns_empty_if_no_options(self): self.assertEqual('', pick_default_architecture([])) def test_pick_default_architecture_prefers_i386_generic_if_usable(self): self.assertEqual( 'i386/generic', pick_default_architecture( ['amd64/generic', 'i386/generic', 'mips/generic'])) def test_pick_default_architecture_falls_back_to_first_option(self): arches = [factory.make_name('arch') for _ in range(5)] self.assertEqual(arches[0], pick_default_architecture(arches)) def test_remove_None_values_removes_None_values_in_dict(self): random_input = factory.make_string() self.assertEqual( {random_input: random_input}, remove_None_values({ random_input: random_input, factory.make_string(): None, })) def test_remove_None_values_leaves_empty_dict_untouched(self): self.assertEqual({}, remove_None_values({})) def test_get_node_edit_form_returns_NodeForm_if_non_admin(self): user = factory.make_User() self.assertEqual(NodeForm, get_node_edit_form(user)) def test_get_node_edit_form_returns_APIAdminNodeEdit_if_admin(self): admin = factory.make_admin() self.assertEqual(AdminNodeForm, get_node_edit_form(admin)) def test_get_node_create_form_if_non_admin(self): user = factory.make_User() self.assertEqual( NodeWithMACAddressesForm, get_node_create_form(user)) def test_get_node_create_form_if_admin(self): admin = factory.make_admin() self.assertEqual( AdminNodeWithMACAddressesForm, get_node_create_form(admin)) class TestMAASModelForm(TestModelMixin, MAASServerTestCase): app = 'maasserver.tests' def test_model_class_from_UI_has_hidden_field(self): class TestClass(MAASModelForm): class Meta: model = GenericTestModel form = TestClass(ui_submission=True) self.assertIn('ui_submission', form.fields) self.assertTrue( form.fields['ui_submission'].widget.is_hidden, "ui_submission field is not 'hidden'") def test_model_class_from_API_doesnt_have_hidden_field(self): class TestClass(MAASModelForm): class Meta: model = GenericTestModel form = TestClass() self.assertNotIn('ui_submission', form.fields) def test_hidden_field_is_available_to_all_field_cleaning_methods(self): class EarlyFieldMixin: """Mixin to sneak a field into our form early. Proves that the `ui_submission` field is present for all field validators, regardless of the order in which the fields were added to the form. """ def __init__(self, *args, **kwargs): super(EarlyFieldMixin, self).__init__(*args, **kwargs) self.fields['early_field'] = CharField(required=False) class TestForm(EarlyFieldMixin, MAASModelForm): extra_field = CharField(required=False) def clean_early_field(self, *args, **kwargs): """Cleaner for `GenericTestModel.field`.""" self.while_early_field = ('ui_submission' in self.cleaned_data) def clean_field(self, *args, **kwargs): """Cleaner for `GenericTestModel.field`.""" self.while_field = ('ui_submission' in self.cleaned_data) def clean_extra_field(self, *args, **kwargs): """Cleaner for `TestForm.extra_field`.""" self.while_extra_field = ('ui_submission' in self.cleaned_data) class Meta: model = GenericTestModel fields = ('field', ) form = TestForm(ui_submission=True, data={}) self.assertTrue(form.is_valid(), form._errors) self.expectThat(form.while_early_field, Equals(True)) self.expectThat(form.while_field, Equals(True)) self.expectThat(form.while_extra_field, Equals(True)) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_instancelistfield.py0000644000000000000000000000360013056115004025276 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `InstanceListField`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from django.core.exceptions import ValidationError from maasserver.forms import InstanceListField from maasserver.models import Node from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase class TestInstanceListField(MAASServerTestCase): """Tests for `InstanceListField`.""" def test_field_validates_valid_data(self): nodes = [factory.make_Node() for _ in range(3)] # Create other nodes. [factory.make_Node() for _ in range(3)] field = InstanceListField(model_class=Node, field_name='system_id') input_data = [node.system_id for node in nodes] self.assertItemsEqual( input_data, [node.system_id for node in field.clean(input_data)]) def test_field_ignores_duplicates(self): nodes = [factory.make_Node() for _ in range(2)] # Create other nodes. [factory.make_Node() for _ in range(3)] field = InstanceListField(model_class=Node, field_name='system_id') input_data = [node.system_id for node in nodes] * 2 self.assertItemsEqual( set(input_data), [node.system_id for node in field.clean(input_data)]) def test_field_rejects_invalid_data(self): nodes = [factory.make_Node() for _ in range(3)] field = InstanceListField(model_class=Node, field_name='system_id') error = self.assertRaises( ValidationError, field.clean, [node.system_id for node in nodes] + ['unknown']) self.assertEquals(['Unknown node(s): unknown.'], error.messages) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_interface.py0000644000000000000000000011076313056115004023543 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for Interface forms.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random from django.core.exceptions import ValidationError from maasserver.enum import ( INTERFACE_TYPE, IPADDRESS_TYPE, ) from maasserver.forms_interface import ( BOND_LACP_RATE_CHOICES, BOND_MODE_CHOICES, BOND_XMIT_HASH_POLICY_CHOICES, BondInterfaceForm, InterfaceForm, PhysicalInterfaceForm, VLANInterfaceForm, ) from maasserver.models.interface import build_vlan_interface_name from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase from maasserver.utils.forms import compose_invalid_choice_text from testtools import ExpectedException from testtools.matchers import MatchesStructure class GetInterfaceFormTests(MAASServerTestCase): scenarios = [ ('physical', {'type': INTERFACE_TYPE.PHYSICAL, 'form': PhysicalInterfaceForm}), ('bond', {'type': INTERFACE_TYPE.BOND, 'form': BondInterfaceForm}), ('vlan', {'type': INTERFACE_TYPE.VLAN, 'form': VLANInterfaceForm}), ] def test_get_interface_form_returns_form(self): self.assertEqual( self.form, InterfaceForm.get_interface_form(self.type)) class GetInterfaceFormErrorTests(MAASServerTestCase): def test_get_interface_form_returns_form(self): with ExpectedException(ValidationError): InterfaceForm.get_interface_form(factory.make_name()) class PhysicalInterfaceFormTest(MAASServerTestCase): def test__creates_physical_interface(self): node = factory.make_Node() mac_address = factory.make_mac_address() interface_name = 'eth0' fabric = factory.make_Fabric() vlan = fabric.get_default_vlan() tags = [ factory.make_name("tag") for _ in range(3) ] form = PhysicalInterfaceForm( node=node, data={ 'name': interface_name, 'mac_address': mac_address, 'vlan': vlan.id, 'tags': ",".join(tags), }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() self.assertThat( interface, MatchesStructure.byEquality( node=node, mac_address=mac_address, name=interface_name, type=INTERFACE_TYPE.PHYSICAL, tags=tags)) self.assertItemsEqual([], interface.parents.all()) def test__create_ensures_link_up(self): node = factory.make_Node() mac_address = factory.make_mac_address() interface_name = 'eth0' fabric = factory.make_Fabric() vlan = fabric.get_default_vlan() tags = [ factory.make_name("tag") for _ in range(3) ] form = PhysicalInterfaceForm( node=node, data={ 'name': interface_name, 'mac_address': mac_address, 'vlan': vlan.id, 'tags': ",".join(tags), }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() self.assertIsNotNone( interface.ip_addresses.filter(alloc_type=IPADDRESS_TYPE.STICKY)) def test__requires_mac_address(self): interface_name = 'eth0' fabric = factory.make_Fabric() vlan = fabric.get_default_vlan() form = PhysicalInterfaceForm( node=factory.make_Node(), data={ 'name': interface_name, 'vlan': vlan.id, }) self.assertFalse(form.is_valid(), form.errors) self.assertItemsEqual( ['mac_address'], form.errors.keys(), form.errors) self.assertIn( "This field is required.", form.errors['mac_address'][0]) def test_rejects_interface_with_duplicate_name(self): fabric = factory.make_Fabric() vlan = fabric.get_default_vlan() interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, vlan=vlan) mac_address = factory.make_mac_address() form = PhysicalInterfaceForm( node=interface.node, data={ 'name': interface.name, 'mac_address': mac_address, 'vlan': interface.vlan.id, }) self.assertFalse(form.is_valid(), form.errors) self.assertItemsEqual( ['name'], form.errors.keys(), form.errors) self.assertIn( "already has an interface named '%s'." % interface.name, form.errors['name'][0]) def test_rejects_interface_on_tagged_vlan(self): fabric = factory.make_Fabric() interface = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, vlan=fabric.get_default_vlan()) vlan = factory.make_VLAN(fabric=fabric) mac_address = factory.make_mac_address() form = PhysicalInterfaceForm( node=interface.node, data={ 'name': factory.make_name("eth"), 'mac_address': mac_address, 'vlan': vlan.id, }) self.assertFalse(form.is_valid(), form.errors) self.assertItemsEqual( ['vlan'], form.errors.keys(), form.errors) self.assertIn( "A physical interface can only belong to an untagged VLAN.", form.errors['vlan'][0]) def test_allows_interface_on_tagged_vlan_for_device(self): device = factory.make_Device() fabric = factory.make_Fabric() interface = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=device, vlan=fabric.get_default_vlan()) vlan = factory.make_VLAN(fabric=fabric) mac_address = factory.make_mac_address() form = PhysicalInterfaceForm( node=device, data={ 'name': factory.make_name("eth"), 'mac_address': mac_address, 'vlan': vlan.id, }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() self.assertEquals(vlan, interface.vlan) def test__rejects_parents(self): parent = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) fabric = factory.make_Fabric() vlan = fabric.get_default_vlan() form = PhysicalInterfaceForm( node=parent.node, data={ 'name': factory.make_name("eth"), 'mac_address': factory.make_mac_address(), 'vlan': vlan.id, 'parents': [parent.id], }) self.assertFalse(form.is_valid(), form.errors) self.assertItemsEqual( ['parents'], form.errors.keys(), form.errors) self.assertIn( "A physical interface cannot have parents.", form.errors['parents'][0]) def test__edits_interface(self): interface = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, name='eth0') new_name = 'eth1' new_fabric = factory.make_Fabric() new_vlan = new_fabric.get_default_vlan() form = PhysicalInterfaceForm( instance=interface, data={ 'name': new_name, 'vlan': new_vlan.id, 'enabled': False, 'tags': "", }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() self.assertThat( interface, MatchesStructure.byEquality( name=new_name, vlan=new_vlan, enabled=False, tags=[])) self.assertItemsEqual([], interface.parents.all()) def test__create_sets_interface_parameters(self): node = factory.make_Node() mac_address = factory.make_mac_address() interface_name = 'eth0' fabric = factory.make_Fabric() vlan = fabric.get_default_vlan() tags = [ factory.make_name("tag") for _ in range(3) ] mtu = random.randint(1000, 2000) accept_ra = factory.pick_bool() autoconf = factory.pick_bool() form = PhysicalInterfaceForm( node=node, data={ 'name': interface_name, 'mac_address': mac_address, 'vlan': vlan.id, 'tags': ",".join(tags), 'mtu': mtu, 'accept_ra': accept_ra, 'autoconf': autoconf, }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() self.assertEquals({ "mtu": mtu, "accept_ra": accept_ra, "autoconf": autoconf, }, interface.params) def test__update_doesnt_change_interface_parameters(self): interface = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, name='eth0') mtu = random.randint(1000, 2000) accept_ra = factory.pick_bool() autoconf = factory.pick_bool() interface.params = { "mtu": mtu, "accept_ra": accept_ra, "autoconf": autoconf, } new_name = 'eth1' new_fabric = factory.make_Fabric() new_vlan = new_fabric.get_default_vlan() form = PhysicalInterfaceForm( instance=interface, data={ 'name': new_name, 'vlan': new_vlan.id, 'enabled': False, 'tags': "", }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() self.assertEquals({ "mtu": mtu, "accept_ra": accept_ra, "autoconf": autoconf, }, interface.params) def test__update_does_change_interface_parameters(self): interface = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, name='eth0') mtu = random.randint(1000, 2000) accept_ra = factory.pick_bool() autoconf = factory.pick_bool() interface.params = { "mtu": mtu, "accept_ra": accept_ra, "autoconf": autoconf, } new_mtu = random.randint(1000, 2000) new_accept_ra = not accept_ra new_autoconf = not autoconf form = PhysicalInterfaceForm( instance=interface, data={ "mtu": new_mtu, "accept_ra": new_accept_ra, "autoconf": new_autoconf, }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() self.assertEquals({ "mtu": new_mtu, "accept_ra": new_accept_ra, "autoconf": new_autoconf, }, interface.params) def test__update_allows_clearing_interface_parameters(self): interface = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, name='eth0') mtu = random.randint(1000, 2000) accept_ra = factory.pick_bool() autoconf = factory.pick_bool() interface.params = { "mtu": mtu, "accept_ra": accept_ra, "autoconf": autoconf, } form = PhysicalInterfaceForm( instance=interface, data={ "mtu": "", "accept_ra": "", "autoconf": "", }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() self.assertEquals({}, interface.params) class VLANInterfaceFormTest(MAASServerTestCase): def test__creates_vlan_interface(self): parent = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) vlan = factory.make_VLAN(fabric=parent.vlan.fabric, vid=10) form = VLANInterfaceForm( node=parent.node, data={ 'vlan': vlan.id, 'parents': [parent.id], }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() interface_name = build_vlan_interface_name(parent, vlan) self.assertThat( interface, MatchesStructure.byEquality( name=interface_name, type=INTERFACE_TYPE.VLAN)) self.assertItemsEqual([parent], interface.parents.all()) def test__create_ensures_link_up(self): parent = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) vlan = factory.make_VLAN(fabric=parent.vlan.fabric, vid=10) form = VLANInterfaceForm( node=parent.node, data={ 'vlan': vlan.id, 'parents': [parent.id], }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() self.assertIsNotNone( interface.ip_addresses.filter(alloc_type=IPADDRESS_TYPE.STICKY)) def test_rejects_interface_with_duplicate_name(self): parent = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) vlan = factory.make_VLAN(fabric=parent.vlan.fabric, vid=10) interface = factory.make_Interface( INTERFACE_TYPE.VLAN, vlan=vlan, parents=[parent]) form = VLANInterfaceForm( node=parent.node, data={ 'vlan': vlan.id, 'parents': [parent.id], }) self.assertFalse(form.is_valid(), form.errors) self.assertItemsEqual( ['name'], form.errors.keys(), form.errors) self.assertIn( "already has an interface named '%s'." % interface.name, form.errors['name'][0]) def test_rejects_interface_on_default_fabric(self): parent = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) vlan = parent.vlan.fabric.get_default_vlan() form = VLANInterfaceForm( node=parent.node, data={ 'vlan': vlan.id, 'parents': [parent.id], }) self.assertFalse(form.is_valid(), form.errors) self.assertItemsEqual( ['vlan'], form.errors.keys(), form.errors) self.assertIn( "A VLAN interface can only belong to a tagged VLAN.", form.errors['vlan'][0]) def test__rejects_no_parents(self): vlan = factory.make_VLAN(vid=10) form = VLANInterfaceForm( node=factory.make_Node(), data={ 'vlan': vlan.id, }) self.assertFalse(form.is_valid(), form.errors) self.assertItemsEqual(['parents'], form.errors.keys()) self.assertIn( "A VLAN interface must have exactly one parent.", form.errors['parents'][0]) def test__rejects_vlan_parent(self): parent = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) vlan = factory.make_VLAN(fabric=parent.vlan.fabric, vid=10) vlan_parent = factory.make_Interface( INTERFACE_TYPE.VLAN, vlan=vlan, parents=[parent]) other_vlan = factory.make_VLAN(fabric=parent.vlan.fabric, vid=11) form = VLANInterfaceForm( node=parent.node, data={ 'vlan': other_vlan.id, 'parents': [vlan_parent.id], }) self.assertFalse(form.is_valid(), form.errors) self.assertItemsEqual(['parents'], form.errors.keys()) self.assertIn( "VLAN interface can't have another VLAN interface as parent.", form.errors['parents'][0]) def test__rejects_vlan_not_on_same_fabric(self): parent = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) factory.make_VLAN(fabric=parent.vlan.fabric, vid=10) other_vlan = factory.make_VLAN() form = VLANInterfaceForm( node=parent.node, data={ 'vlan': other_vlan.id, 'parents': [parent.id], }) self.assertFalse(form.is_valid(), form.errors) self.assertItemsEqual(['vlan'], form.errors.keys()) self.assertIn( "A VLAN interface can only belong to a tagged VLAN on " "the same fabric as its parent interface.", form.errors['vlan'][0]) def test__rejects_parent_on_bond(self): parent = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) bond = factory.make_Interface(INTERFACE_TYPE.BOND, parents=[parent]) vlan = factory.make_VLAN(fabric=bond.vlan.fabric, vid=10) form = VLANInterfaceForm( node=parent.node, data={ 'vlan': vlan.id, 'parents': [parent.id], }) self.assertFalse(form.is_valid(), form.errors) self.assertItemsEqual(['parents'], form.errors.keys()) self.assertIn( "A VLAN interface can't have a parent that is already in a bond.", form.errors['parents'][0]) def test__rejects_more_than_one_parent(self): parent1 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) parent2 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=parent1.node) vlan = factory.make_VLAN(vid=10) form = VLANInterfaceForm( node=parent1.node, data={ 'vlan': vlan.id, 'parents': [parent1.id, parent2.id], }) self.assertFalse(form.is_valid(), form.errors) self.assertItemsEqual(['parents'], form.errors.keys()) self.assertIn( "A VLAN interface must have exactly one parent.", form.errors['parents'][0]) def test__edits_interface(self): parent = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) interface = factory.make_Interface( INTERFACE_TYPE.VLAN, parents=[parent]) new_vlan = factory.make_VLAN(fabric=interface.vlan.fabric, vid=33) form = VLANInterfaceForm( instance=interface, data={ 'vlan': new_vlan.id }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() self.assertThat( interface, MatchesStructure.byEquality( name="%s.%d" % (parent.get_name(), new_vlan.vid), vlan=new_vlan, type=INTERFACE_TYPE.VLAN)) self.assertItemsEqual([parent], interface.parents.all()) class BondInterfaceFormTest(MAASServerTestCase): def test__error_with_invalid_bond_mode(self): parent1 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) parent2 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=parent1.node, vlan=parent1.vlan) interface_name = factory.make_name() bond_mode = factory.make_name("bond_mode") form = BondInterfaceForm( node=parent1.node, data={ 'name': interface_name, 'parents': [parent1.id, parent2.id], 'bond_mode': bond_mode, }) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "bond_mode": [ compose_invalid_choice_text( "bond_mode", BOND_MODE_CHOICES) % {"value": bond_mode}], }, form.errors) def test__creates_bond_interface(self): parent1 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) parent2 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=parent1.node, vlan=parent1.vlan) interface_name = factory.make_name() form = BondInterfaceForm( node=parent1.node, data={ 'name': interface_name, 'parents': [parent1.id, parent2.id], }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() self.assertThat( interface, MatchesStructure.byEquality( name=interface_name, type=INTERFACE_TYPE.BOND)) self.assertIn( interface.mac_address, [parent1.mac_address, parent2.mac_address]) self.assertItemsEqual([parent1, parent2], interface.parents.all()) def test__create_removes_parent_links_and_sets_link_up_on_bond(self): parent1 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) parent1.ensure_link_up() parent2 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=parent1.node, vlan=parent1.vlan) parent2.ensure_link_up() interface_name = factory.make_name() form = BondInterfaceForm( node=parent1.node, data={ 'name': interface_name, 'parents': [parent1.id, parent2.id], }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() self.assertEquals( 0, parent1.ip_addresses.exclude( alloc_type=IPADDRESS_TYPE.DISCOVERED).count()) self.assertEquals( 0, parent2.ip_addresses.exclude( alloc_type=IPADDRESS_TYPE.DISCOVERED).count()) self.assertIsNotNone( interface.ip_addresses.filter(alloc_type=IPADDRESS_TYPE.STICKY)) def test__creates_bond_interface_with_parent_mac_address(self): parent1 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) parent2 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=parent1.node, vlan=parent1.vlan) interface_name = factory.make_name() form = BondInterfaceForm( node=parent1.node, data={ 'name': interface_name, 'parents': [parent1.id, parent2.id], 'mac_address': parent1.mac_address, }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() self.assertThat( interface, MatchesStructure.byEquality( name=interface_name, mac_address=parent1.mac_address, type=INTERFACE_TYPE.BOND)) self.assertItemsEqual([parent1, parent2], interface.parents.all()) def test__creates_bond_interface_with_default_bond_params(self): parent1 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) parent2 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=parent1.node, vlan=parent1.vlan) interface_name = factory.make_name() form = BondInterfaceForm( node=parent1.node, data={ 'name': interface_name, 'parents': [parent1.id, parent2.id], }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() self.assertEquals({ "bond_mode": "balance-rr", "bond_miimon": 100, "bond_downdelay": 0, "bond_updelay": 0, "bond_lacp_rate": "slow", "bond_xmit_hash_policy": "layer2", }, interface.params) def test__creates_bond_interface_with_bond_params(self): parent1 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) parent2 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=parent1.node, vlan=parent1.vlan) interface_name = factory.make_name() bond_mode = factory.pick_choice(BOND_MODE_CHOICES) bond_miimon = random.randint(0, 1000) bond_downdelay = random.randint(0, 1000) bond_updelay = random.randint(0, 1000) bond_lacp_rate = factory.pick_choice(BOND_LACP_RATE_CHOICES) bond_xmit_hash_policy = factory.pick_choice( BOND_XMIT_HASH_POLICY_CHOICES) form = BondInterfaceForm( node=parent1.node, data={ 'name': interface_name, 'parents': [parent1.id, parent2.id], 'bond_mode': bond_mode, 'bond_miimon': bond_miimon, 'bond_downdelay': bond_downdelay, 'bond_updelay': bond_updelay, 'bond_lacp_rate': bond_lacp_rate, 'bond_xmit_hash_policy': bond_xmit_hash_policy, }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() self.assertEquals({ "bond_mode": bond_mode, "bond_miimon": bond_miimon, "bond_downdelay": bond_downdelay, "bond_updelay": bond_updelay, "bond_lacp_rate": bond_lacp_rate, "bond_xmit_hash_policy": bond_xmit_hash_policy, }, interface.params) def test__rejects_no_parents(self): interface_name = factory.make_name() form = BondInterfaceForm( node=factory.make_Node(), data={ 'name': interface_name, }) self.assertFalse(form.is_valid(), form.errors) self.assertItemsEqual(['parents', 'mac_address'], form.errors.keys()) self.assertIn( "A Bond interface must have one or more parents.", form.errors['parents'][0]) def test__rejects_when_vlan_not_untagged(self): interface_name = factory.make_name() parent = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) vlan = factory.make_VLAN(fabric=parent.vlan.fabric) form = BondInterfaceForm( node=parent.node, data={ 'name': interface_name, 'parents': [parent.id], 'mac_address': parent.mac_address, 'vlan': vlan.id, }) self.assertFalse(form.is_valid(), form.errors) self.assertItemsEqual(['vlan'], form.errors.keys()) self.assertIn( "A bond interface can only belong to an untagged VLAN.", form.errors['vlan'][0]) def test__rejects_when_parents_already_have_children(self): node = factory.make_Node() parent1 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=node, name="eth0") factory.make_Interface(INTERFACE_TYPE.VLAN, parents=[parent1]) parent2 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=node, name="eth1", vlan=parent1.vlan) factory.make_Interface(INTERFACE_TYPE.VLAN, parents=[parent2]) interface_name = factory.make_name() form = BondInterfaceForm( node=node, data={ 'name': interface_name, 'parents': [parent1.id, parent2.id] }) self.assertFalse(form.is_valid(), form.errors) self.assertIn( "eth0, eth1 is already in-use by another interface.", form.errors['parents'][0]) def test__rejects_when_parents_not_in_same_vlan(self): node = factory.make_Node() parent1 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=node, name="eth0") parent2 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=node, name="eth1") interface_name = factory.make_name() form = BondInterfaceForm( node=node, data={ 'name': interface_name, 'parents': [parent1.id, parent2.id] }) self.assertFalse(form.is_valid(), form.errors) self.assertEquals( "All parents must belong to the same VLAN.", form.errors['parents'][0]) def test__edits_interface(self): parent1 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) parent2 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=parent1.node, vlan=parent1.vlan) interface = factory.make_Interface( INTERFACE_TYPE.BOND, parents=[parent1, parent2]) new_fabric = factory.make_Fabric() new_vlan = new_fabric.get_default_vlan() new_name = factory.make_name() new_parent = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=parent1.node, vlan=parent1.vlan) form = BondInterfaceForm( instance=interface, data={ 'vlan': new_vlan.id, 'name': new_name, 'parents': [parent1.id, parent2.id, new_parent.id], }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() self.assertThat( interface, MatchesStructure.byEquality( mac_address=interface.mac_address, name=new_name, vlan=new_vlan, type=INTERFACE_TYPE.BOND)) self.assertItemsEqual( [parent1, parent2, new_parent], interface.parents.all()) self.assertItemsEqual([new_vlan], set( reload_object(parent).vlan for parent in [parent1, parent2, new_parent] )) def test__edits_interface_removes_parents(self): parent1 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) parent2 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=parent1.node) parent3 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=parent1.node) interface = factory.make_Interface( INTERFACE_TYPE.BOND, parents=[parent1, parent2, parent3]) new_name = factory.make_name() form = BondInterfaceForm( instance=interface, data={ 'name': new_name, 'parents': [parent1.id, parent2.id], }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() self.assertThat( interface, MatchesStructure.byEquality( mac_address=interface.mac_address, name=new_name, type=INTERFACE_TYPE.BOND)) self.assertItemsEqual( [parent1, parent2], interface.parents.all()) def test__edits_interface_updates_mac_address_when_parent_removed(self): parent1 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) parent2 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=parent1.node) parent3 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=parent1.node) interface = factory.make_Interface( INTERFACE_TYPE.BOND, mac_address=parent3.mac_address, parents=[parent1, parent2, parent3]) new_name = factory.make_name() form = BondInterfaceForm( instance=interface, data={ 'name': new_name, 'parents': [parent1.id, parent2.id], }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() self.assertThat( interface, MatchesStructure.byEquality( name=new_name, type=INTERFACE_TYPE.BOND)) self.assertItemsEqual( [parent1, parent2], interface.parents.all()) self.assertIn( interface.mac_address, [parent1.mac_address, parent2.mac_address]) def test__edit_doesnt_overwrite_params(self): parent1 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) parent2 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=parent1.node, vlan=parent1.vlan) interface = factory.make_Interface( INTERFACE_TYPE.BOND, parents=[parent1, parent2]) bond_mode = factory.pick_choice(BOND_MODE_CHOICES) bond_miimon = random.randint(0, 1000) bond_downdelay = random.randint(0, 1000) bond_updelay = random.randint(0, 1000) bond_lacp_rate = factory.pick_choice(BOND_LACP_RATE_CHOICES) bond_xmit_hash_policy = factory.pick_choice( BOND_XMIT_HASH_POLICY_CHOICES) interface.params = { "bond_mode": bond_mode, "bond_miimon": bond_miimon, "bond_downdelay": bond_downdelay, "bond_updelay": bond_updelay, "bond_lacp_rate": bond_lacp_rate, "bond_xmit_hash_policy": bond_xmit_hash_policy, } interface.save() new_name = factory.make_name() form = BondInterfaceForm( instance=interface, data={ 'name': new_name, }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() self.assertEquals({ "bond_mode": bond_mode, "bond_miimon": bond_miimon, "bond_downdelay": bond_downdelay, "bond_updelay": bond_updelay, "bond_lacp_rate": bond_lacp_rate, "bond_xmit_hash_policy": bond_xmit_hash_policy, }, interface.params) def test__edit_does_overwrite_params(self): parent1 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) parent2 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=parent1.node, vlan=parent1.vlan) interface = factory.make_Interface( INTERFACE_TYPE.BOND, parents=[parent1, parent2]) bond_mode = factory.pick_choice(BOND_MODE_CHOICES) bond_miimon = random.randint(0, 1000) bond_downdelay = random.randint(0, 1000) bond_updelay = random.randint(0, 1000) bond_lacp_rate = factory.pick_choice(BOND_LACP_RATE_CHOICES) bond_xmit_hash_policy = factory.pick_choice( BOND_XMIT_HASH_POLICY_CHOICES) interface.params = { "bond_mode": bond_mode, "bond_miimon": bond_miimon, "bond_downdelay": bond_downdelay, "bond_updelay": bond_updelay, "bond_lacp_rate": bond_lacp_rate, "bond_xmit_hash_policy": bond_xmit_hash_policy, } interface.save() new_name = factory.make_name() new_bond_mode = factory.pick_choice(BOND_MODE_CHOICES) new_bond_miimon = random.randint(0, 1000) new_bond_downdelay = random.randint(0, 1000) new_bond_updelay = random.randint(0, 1000) new_bond_lacp_rate = factory.pick_choice(BOND_LACP_RATE_CHOICES) new_bond_xmit_hash_policy = factory.pick_choice( BOND_XMIT_HASH_POLICY_CHOICES) form = BondInterfaceForm( instance=interface, data={ 'name': new_name, 'bond_mode': new_bond_mode, 'bond_miimon': new_bond_miimon, 'bond_downdelay': new_bond_downdelay, 'bond_updelay': new_bond_updelay, 'bond_lacp_rate': new_bond_lacp_rate, 'bond_xmit_hash_policy': new_bond_xmit_hash_policy, }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() self.assertEquals({ "bond_mode": new_bond_mode, "bond_miimon": new_bond_miimon, "bond_downdelay": new_bond_downdelay, "bond_updelay": new_bond_updelay, "bond_lacp_rate": new_bond_lacp_rate, "bond_xmit_hash_policy": new_bond_xmit_hash_policy, }, interface.params) def test__edit_allows_zero_params(self): parent1 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) parent2 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=parent1.node, vlan=parent1.vlan) interface = factory.make_Interface( INTERFACE_TYPE.BOND, parents=[parent1, parent2]) bond_mode = factory.pick_choice(BOND_MODE_CHOICES) bond_miimon = random.randint(0, 1000) bond_downdelay = random.randint(0, 1000) bond_updelay = random.randint(0, 1000) bond_lacp_rate = factory.pick_choice(BOND_LACP_RATE_CHOICES) bond_xmit_hash_policy = factory.pick_choice( BOND_XMIT_HASH_POLICY_CHOICES) interface.params = { "bond_mode": bond_mode, "bond_miimon": bond_miimon, "bond_downdelay": bond_downdelay, "bond_updelay": bond_updelay, "bond_lacp_rate": bond_lacp_rate, "bond_xmit_hash_policy": bond_xmit_hash_policy, } interface.save() new_name = factory.make_name() new_bond_mode = factory.pick_choice(BOND_MODE_CHOICES) new_bond_miimon = 0 new_bond_downdelay = 0 new_bond_updelay = 0 new_bond_lacp_rate = factory.pick_choice(BOND_LACP_RATE_CHOICES) new_bond_xmit_hash_policy = factory.pick_choice( BOND_XMIT_HASH_POLICY_CHOICES) form = BondInterfaceForm( instance=interface, data={ 'name': new_name, 'bond_mode': new_bond_mode, 'bond_miimon': new_bond_miimon, 'bond_downdelay': new_bond_downdelay, 'bond_updelay': new_bond_updelay, 'bond_lacp_rate': new_bond_lacp_rate, 'bond_xmit_hash_policy': new_bond_xmit_hash_policy, }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() self.assertEquals({ "bond_mode": new_bond_mode, "bond_miimon": new_bond_miimon, "bond_downdelay": new_bond_downdelay, "bond_updelay": new_bond_updelay, "bond_lacp_rate": new_bond_lacp_rate, "bond_xmit_hash_policy": new_bond_xmit_hash_policy, }, interface.params) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_interface_link.py0000644000000000000000000007364313056115004024565 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for interface link form.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random from maasserver.enum import ( INTERFACE_LINK_TYPE, INTERFACE_TYPE, IPADDRESS_TYPE, NODEGROUP_STATUS, NODEGROUPINTERFACE_MANAGEMENT, ) from maasserver.forms_interface_link import ( InterfaceLinkForm, InterfaceSetDefaultGatwayForm, InterfaceUnlinkForm, ) from maasserver.models import interface as interface_module from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase from maasserver.utils.orm import get_one from netaddr import IPAddress class TestInterfaceLinkForm(MAASServerTestCase): def test__requires_mode(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) form = InterfaceLinkForm(instance=interface, data={}) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "mode": ["This field is required."], }, form.errors) def test__mode_is_case_insensitive(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.DHCP.upper(), }) self.assertTrue(form.is_valid(), form.errors) def test__sets_subnet_queryset_to_subnets_on_interface_vlan(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) subnets = [ factory.make_Subnet(vlan=interface.vlan) for _ in range(3) ] form = InterfaceLinkForm(instance=interface, data={}) self.assertItemsEqual(subnets, form.fields["subnet"].queryset) def test__AUTO_requires_subnet(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.AUTO, }) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "subnet": ["This field is required."], }, form.errors) def test__AUTO_creates_link_to_AUTO_with_subnet(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) auto_subnet = factory.make_Subnet(vlan=interface.vlan) form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.AUTO, "subnet": auto_subnet.id, }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() auto_ip = interface.ip_addresses.get(alloc_type=IPADDRESS_TYPE.AUTO) self.assertEquals(auto_subnet, auto_ip.subnet) def test__AUTO_sets_node_gateway_link_v4(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) network = factory.make_ipv4_network() auto_subnet = factory.make_Subnet( cidr=unicode(network.cidr), vlan=interface.vlan) form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.AUTO, "subnet": auto_subnet.id, "default_gateway": True, }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() auto_ip = interface.ip_addresses.get(alloc_type=IPADDRESS_TYPE.AUTO) node = interface.get_node() self.assertEquals(auto_ip, node.gateway_link_ipv4) def test__AUTO_sets_node_gateway_link_v6(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) network = factory.make_ipv6_network() auto_subnet = factory.make_Subnet( cidr=unicode(network.cidr), vlan=interface.vlan) form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.AUTO, "subnet": auto_subnet.id, "default_gateway": True, }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() auto_ip = interface.ip_addresses.get(alloc_type=IPADDRESS_TYPE.AUTO) node = interface.get_node() self.assertEquals(auto_ip, node.gateway_link_ipv6) def test__AUTO_default_gateway_requires_subnet(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.AUTO, "default_gateway": True, }) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "default_gateway": [ "Subnet is required when default_gateway is True."], "subnet": ["This field is required."], }, form.errors) def test__AUTO_default_gateway_requires_subnet_with_gateway_ip(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) auto_subnet = factory.make_Subnet(vlan=interface.vlan) auto_subnet.gateway_ip = None auto_subnet.save() form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.AUTO, "subnet": auto_subnet.id, "default_gateway": True, }) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "default_gateway": [ "Cannot set as default gateway because subnet " "%s doesn't provide a gateway IP address." % auto_subnet], }, form.errors) def test__DHCP_not_allowed_if_already_DHCP_with_subnet(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) dhcp_subnet = factory.make_Subnet() factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.DHCP, ip="", subnet=dhcp_subnet, interface=interface) form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.DHCP, }) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "mode": [ "Interface is already set to DHCP from '%s'." % ( dhcp_subnet)] }, form.errors) def test__DHCP_not_allowed_if_already_DHCP_without_subnet(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) static_ip = factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.DHCP, ip="", interface=interface) static_ip.subnet = None static_ip.save() form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.DHCP, }) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "mode": [ "Interface is already set to DHCP."] }, form.errors) def test__DHCP_not_allowed_default_gateway(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.DHCP, "default_gateway": True, }) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "default_gateway": [ "Cannot use in mode '%s'." % (INTERFACE_LINK_TYPE.DHCP)] }, form.errors) def test__DHCP_creates_link_to_DHCP_with_subnet(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) dhcp_subnet = factory.make_Subnet(vlan=interface.vlan) form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.DHCP, "subnet": dhcp_subnet.id, }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() dhcp_ip = interface.ip_addresses.get(alloc_type=IPADDRESS_TYPE.DHCP) self.assertEquals(dhcp_subnet, dhcp_ip.subnet) def test__DHCP_creates_link_to_DHCP_without_subnet(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.DHCP, }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() self.assertIsNotNone( get_one( interface.ip_addresses.filter(alloc_type=IPADDRESS_TYPE.DHCP))) def test__STATIC_requires_subnet(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.STATIC, }) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "subnet": ["This field is required."], }, form.errors) def test__STATIC_not_allowed_if_ip_address_not_in_subnet(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) network = factory.make_ipv4_network() subnet = factory.make_Subnet( vlan=interface.vlan, cidr=unicode(network.cidr)) ip_not_in_subnet = factory.make_ipv6_address() form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.STATIC, "subnet": subnet.id, "ip_address": ip_not_in_subnet, }) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "ip_address": [ "IP address is not in the given subnet '%s'." % subnet] }, form.errors) def test__STATIC_not_allowed_if_ip_address_in_dynamic_range(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) subnet = factory.make_Subnet(vlan=interface.vlan) nodegroup = factory.make_NodeGroup(status=NODEGROUP_STATUS.ENABLED) ngi = factory.make_NodeGroupInterface( nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, subnet=subnet) ip_in_dynamic = IPAddress(ngi.get_dynamic_ip_range().first) form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.STATIC, "subnet": subnet.id, "ip_address": "%s" % ip_in_dynamic, }) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "ip_address": [ "IP address is inside a managed dynamic range %s to %s." % ( ngi.ip_range_low, ngi.ip_range_high)] }, form.errors) def test__STATIC_sets_ip_in_unmanaged_subnet(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) subnet = factory.make_Subnet(vlan=interface.vlan) ip = factory.pick_ip_in_network(subnet.get_ipnetwork()) form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.STATIC, "subnet": subnet.id, "ip_address": ip, }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() self.assertIsNotNone( get_one( interface.ip_addresses.filter( alloc_type=IPADDRESS_TYPE.STICKY, ip=ip, subnet=subnet))) def test__STATIC_sets_ip_for_unmanaged_subnet_specifier(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) subnet = factory.make_Subnet(vlan=interface.vlan) ip = factory.pick_ip_in_network(subnet.get_ipnetwork()) form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.STATIC, "subnet": "%s" % subnet.name, "ip_address": ip, }) self.assertTrue(form.is_valid(), dict(form.errors)) interface = form.save() self.assertIsNotNone( get_one( interface.ip_addresses.filter( alloc_type=IPADDRESS_TYPE.STICKY, ip=ip, subnet=subnet))) def test__STATIC_sets_ip_for_unmanaged_subnet_cidr_specifier(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) subnet = factory.make_Subnet(vlan=interface.vlan) ip = factory.pick_ip_in_network(subnet.get_ipnetwork()) form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.STATIC, "subnet": "cidr:%s" % subnet.cidr, "ip_address": ip, }) self.assertTrue(form.is_valid(), dict(form.errors)) interface = form.save() self.assertIsNotNone( get_one( interface.ip_addresses.filter( alloc_type=IPADDRESS_TYPE.STICKY, ip=ip, subnet=subnet))) def test__STATIC_sets_ip_in_managed_subnet(self): # Silence update_host_maps. self.patch_autospec(interface_module, "update_host_maps") interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) subnet = factory.make_Subnet(vlan=interface.vlan) nodegroup = factory.make_NodeGroup(status=NODEGROUP_STATUS.ENABLED) ngi = factory.make_NodeGroupInterface( nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, subnet=subnet) ip_in_static = IPAddress(ngi.get_static_ip_range().first) form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.STATIC, "subnet": subnet.id, "ip_address": "%s" % ip_in_static, }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() self.assertIsNotNone( get_one( interface.ip_addresses.filter( alloc_type=IPADDRESS_TYPE.STICKY, ip="%s" % ip_in_static, subnet=subnet))) def test__STATIC_picks_ip_in_unmanaged_subnet(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) subnet = factory.make_Subnet(vlan=interface.vlan) form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.STATIC, "subnet": subnet.id, }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() ip_address = get_one( interface.ip_addresses.filter( alloc_type=IPADDRESS_TYPE.STICKY, subnet=subnet)) self.assertIsNotNone(ip_address) self.assertIn(IPAddress(ip_address.ip), subnet.get_ipnetwork()) def test__STATIC_picks_ip_in_managed_subnet(self): # Silence update_host_maps. self.patch_autospec(interface_module, "update_host_maps") interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) subnet = factory.make_Subnet(vlan=interface.vlan) nodegroup = factory.make_NodeGroup(status=NODEGROUP_STATUS.ENABLED) ngi = factory.make_NodeGroupInterface( nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, subnet=subnet) form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.STATIC, "subnet": subnet.id, }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() ip_address = get_one( interface.ip_addresses.filter( alloc_type=IPADDRESS_TYPE.STICKY, subnet=subnet)) self.assertIsNotNone(ip_address) self.assertIn(IPAddress(ip_address.ip), ngi.get_static_ip_range()) def test__STATIC_sets_node_gateway_link_ipv4(self): # Silence update_host_maps. self.patch_autospec(interface_module, "update_host_maps") interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) network = factory.make_ipv4_network() subnet = factory.make_Subnet( cidr=unicode(network.cidr), vlan=interface.vlan) nodegroup = factory.make_NodeGroup(status=NODEGROUP_STATUS.ENABLED) factory.make_NodeGroupInterface( nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, subnet=subnet) form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.STATIC, "subnet": subnet.id, "default_gateway": True, }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() ip_address = get_one( interface.ip_addresses.filter( alloc_type=IPADDRESS_TYPE.STICKY, subnet=subnet)) node = interface.get_node() self.assertEquals(ip_address, node.gateway_link_ipv4) def test__STATIC_sets_node_gateway_link_ipv6(self): # Silence update_host_maps. self.patch_autospec(interface_module, "update_host_maps") interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) network = factory.make_ipv6_network() subnet = factory.make_Subnet( cidr=unicode(network.cidr), vlan=interface.vlan) nodegroup = factory.make_NodeGroup(status=NODEGROUP_STATUS.ENABLED) factory.make_NodeGroupInterface( nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, subnet=subnet) form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.STATIC, "subnet": subnet.id, "default_gateway": True, }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() ip_address = get_one( interface.ip_addresses.filter( alloc_type=IPADDRESS_TYPE.STICKY, subnet=subnet)) node = interface.get_node() self.assertEquals(ip_address, node.gateway_link_ipv6) def test__LINK_UP_not_allowed_with_other_ip_addresses(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.DHCP, ip="", interface=interface) form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.LINK_UP, }) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "mode": [ "Cannot configure interface to link up (with no IP address) " "while other links are already configured."] }, form.errors) def test__LINK_UP_creates_link_STICKY_with_subnet(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) link_subnet = factory.make_Subnet(vlan=interface.vlan) form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.LINK_UP, "subnet": link_subnet.id, }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() link_ip = interface.ip_addresses.get(alloc_type=IPADDRESS_TYPE.STICKY) self.assertIsNone(link_ip.ip) self.assertEquals(link_subnet, link_ip.subnet) def test__LINK_UP_creates_link_STICKY_without_subnet(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.LINK_UP, }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() link_ip = get_one( interface.ip_addresses.filter(alloc_type=IPADDRESS_TYPE.STICKY)) self.assertIsNotNone(link_ip) self.assertIsNone(link_ip.ip) def test__LINK_UP_not_allowed_default_gateway(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) form = InterfaceLinkForm(instance=interface, data={ "mode": INTERFACE_LINK_TYPE.LINK_UP, "default_gateway": True, }) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "default_gateway": [ "Cannot use in mode '%s'." % (INTERFACE_LINK_TYPE.LINK_UP)] }, form.errors) def test_linking_when_no_bond_not_allowed(self): node = factory.make_Node() eth0 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, node=node) eth1 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, node=node) bond0 = factory.make_Interface( INTERFACE_TYPE.BOND, parents=[eth0, eth1], node=node) subnet = factory.make_Subnet(vlan=eth0.vlan) nodegroup = factory.make_NodeGroup(status=NODEGROUP_STATUS.ENABLED) ngi = factory.make_NodeGroupInterface( nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, subnet=subnet) ip_in_static = IPAddress(ngi.get_static_ip_range().first) form = InterfaceLinkForm(instance=eth0, data={ "mode": INTERFACE_LINK_TYPE.STATIC, "subnet": subnet.id, "ip_address": "%s" % ip_in_static, }) self.assertFalse(form.is_valid()) self.assertEquals({ "bond": [("Cannot link interface(%s) when interface is in a " "bond(%s)." % (eth0.name, bond0.name))]}, form.errors) class TestInterfaceUnlinkForm(MAASServerTestCase): def test__requires_id(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) form = InterfaceUnlinkForm(instance=interface, data={}) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "id": ["This field is required."], }, form.errors) def test__must_be_valid_id(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) link_id = random.randint(100, 1000) form = InterfaceUnlinkForm(instance=interface, data={ "id": link_id, }) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "id": ["'%s' is not a valid id. It should be one of: ." % ( link_id)], }, form.errors) def test__DHCP_deletes_link_with_unmanaged_subnet(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) dhcp_subnet = factory.make_Subnet(vlan=interface.vlan) interface.link_subnet(INTERFACE_LINK_TYPE.DHCP, dhcp_subnet) interface = reload_object(interface) dhcp_ip = interface.ip_addresses.get(alloc_type=IPADDRESS_TYPE.DHCP) form = InterfaceUnlinkForm(instance=interface, data={ "id": dhcp_ip.id, }) self.assertTrue(form.is_valid(), form.errors) form.save() self.assertIsNone(reload_object(dhcp_ip)) def test__DHCP_deletes_link_with_managed_subnet(self): self.patch_autospec(interface_module, "remove_host_maps") interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) dhcp_subnet = factory.make_Subnet(vlan=interface.vlan) nodegroup = factory.make_NodeGroup(status=NODEGROUP_STATUS.ENABLED) factory.make_NodeGroupInterface( nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, subnet=dhcp_subnet) interface.link_subnet(INTERFACE_LINK_TYPE.DHCP, dhcp_subnet) interface = reload_object(interface) dhcp_ip = interface.ip_addresses.get(alloc_type=IPADDRESS_TYPE.DHCP) assigned_ip = factory.pick_ip_in_network(dhcp_subnet.get_ipnetwork()) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.DISCOVERED, ip=assigned_ip, subnet=dhcp_subnet, interface=interface) form = InterfaceUnlinkForm(instance=interface, data={ "id": dhcp_ip.id, }) self.assertTrue(form.is_valid(), form.errors) form.save() self.assertIsNone(reload_object(dhcp_ip)) def test__STATIC_deletes_link_in_unmanaged_subnet(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) subnet = factory.make_Subnet(vlan=interface.vlan) ip = factory.pick_ip_in_network(subnet.get_ipnetwork()) interface.link_subnet( INTERFACE_LINK_TYPE.STATIC, subnet, ip_address=ip) interface = reload_object(interface) static_ip = get_one( interface.ip_addresses.filter( alloc_type=IPADDRESS_TYPE.STICKY, ip=ip, subnet=subnet)) form = InterfaceUnlinkForm(instance=interface, data={ "id": static_ip.id, }) self.assertTrue(form.is_valid(), form.errors) form.save() self.assertIsNone(reload_object(static_ip)) def test__STATIC_deletes_link_in_managed_subnet(self): self.patch_autospec(interface_module, "remove_host_maps") interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) subnet = factory.make_Subnet(vlan=interface.vlan) nodegroup = factory.make_NodeGroup(status=NODEGROUP_STATUS.ENABLED) factory.make_NodeGroupInterface( nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, subnet=subnet) ip = factory.pick_ip_in_network(subnet.get_ipnetwork()) static_ip = factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip=ip, subnet=subnet, interface=interface) form = InterfaceUnlinkForm(instance=interface, data={ "id": static_ip.id, }) self.assertTrue(form.is_valid(), form.errors) form.save() self.assertIsNone(reload_object(static_ip)) def test__LINK_UP_deletes_link(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) subnet = factory.make_Subnet(vlan=interface.vlan) link_ip = factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip="", subnet=subnet, interface=interface) form = InterfaceUnlinkForm(instance=interface, data={ "id": link_ip.id, }) self.assertTrue(form.is_valid(), form.errors) form.save() self.assertIsNone(reload_object(link_ip)) class TestInterfaceSetDefaultGatwayForm(MAASServerTestCase): def make_ip_family_link( self, interface, network, alloc_type=IPADDRESS_TYPE.STICKY): subnet = factory.make_Subnet( cidr=unicode(network.cidr), vlan=interface.vlan) if alloc_type == IPADDRESS_TYPE.STICKY: ip = factory.pick_ip_in_network(network) else: ip = "" return factory.make_StaticIPAddress( alloc_type=alloc_type, ip=ip, subnet=subnet, interface=interface) def test__interface_needs_gateways(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) form = InterfaceSetDefaultGatwayForm(instance=interface, data={}) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "__all__": ["This interface has no usable gateways."], }, form.errors) def test__doesnt_require_link_id_if_only_one_gateway_per_family(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) self.make_ip_family_link(interface, factory.make_ipv4_network()) self.make_ip_family_link(interface, factory.make_ipv6_network()) form = InterfaceSetDefaultGatwayForm(instance=interface, data={}) self.assertTrue(form.is_valid(), form.errors) def test__requires_link_id_if_more_than_one_gateway_per_family(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) self.make_ip_family_link(interface, factory.make_ipv4_network()) self.make_ip_family_link(interface, factory.make_ipv6_network()) self.make_ip_family_link(interface, factory.make_ipv4_network()) self.make_ip_family_link(interface, factory.make_ipv6_network()) form = InterfaceSetDefaultGatwayForm(instance=interface, data={}) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "link_id": [ "This field is required; Interface has more than one " "usable IPv4 and IPv6 gateways."], }, form.errors) def test__link_id_fields_setup_correctly(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) links = [] for _ in range(2): links.append( self.make_ip_family_link( interface, factory.make_ipv4_network())) for _ in range(2): links.append( self.make_ip_family_link( interface, factory.make_ipv6_network())) link_ids = [ link.id for link in links ] form = InterfaceSetDefaultGatwayForm(instance=interface, data={}) choice_ids = [ choice[0] for choice in form.fields["link_id"].choices ] self.assertItemsEqual(link_ids, choice_ids) def test__sets_gateway_links_on_node_when_no_link_id(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) ipv4_link = self.make_ip_family_link( interface, factory.make_ipv4_network()) ipv6_link = self.make_ip_family_link( interface, factory.make_ipv6_network()) form = InterfaceSetDefaultGatwayForm(instance=interface, data={}) self.assertTrue(form.is_valid(), form.errors) interface = form.save() node = interface.get_node() self.assertEquals(ipv4_link, node.gateway_link_ipv4) self.assertEquals(ipv6_link, node.gateway_link_ipv6) def test__sets_gateway_link_v4_on_node_when_link_id(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) ipv4_link = self.make_ip_family_link( interface, factory.make_ipv4_network()) self.make_ip_family_link(interface, factory.make_ipv4_network()) form = InterfaceSetDefaultGatwayForm(instance=interface, data={ "link_id": ipv4_link.id, }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() node = interface.get_node() self.assertEquals(ipv4_link, node.gateway_link_ipv4) def test__sets_gateway_link_v6_on_node_when_link_id(self): interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL) ipv6_link = self.make_ip_family_link( interface, factory.make_ipv6_network()) self.make_ip_family_link(interface, factory.make_ipv6_network()) form = InterfaceSetDefaultGatwayForm(instance=interface, data={ "link_id": ipv6_link.id, }) self.assertTrue(form.is_valid(), form.errors) interface = form.save() node = interface.get_node() self.assertEquals(ipv6_link, node.gateway_link_ipv6) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_licensekey.py0000644000000000000000000001746113056115004023737 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `LicenseKeyForm`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from operator import itemgetter from maasserver import forms from maasserver.clusterrpc.testing.osystems import ( make_rpc_osystem, make_rpc_release, ) from maasserver.forms import LicenseKeyForm from maasserver.models import LicenseKey from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.osystems import patch_usable_osystems from maasserver.testing.testcase import MAASServerTestCase class TestLicenseKeyForm(MAASServerTestCase): """Tests for `LicenseKeyForm`.""" def make_os_with_license_key(self): """Makes a fake operating system that has a release that requires a license key.""" release = make_rpc_release(requires_license_key=True) osystem = make_rpc_osystem(releases=[release]) patch_usable_osystems(self, osystems=[osystem]) return osystem, release def test_creates_license_key(self): osystem, release = self.make_os_with_license_key() key = factory.make_name('key') self.patch_autospec(forms, 'validate_license_key').return_value = True definition = { 'osystem': osystem['name'], 'distro_series': release['name'], 'license_key': key, } data = definition.copy() data['distro_series'] = '%s/%s' % (osystem['name'], release['name']) form = LicenseKeyForm(data=data) form.save() license_key_obj = LicenseKey.objects.get( osystem=osystem['name'], distro_series=release['name']) self.assertAttributes(license_key_obj, definition) def test_updates_license_key(self): osystem, release = self.make_os_with_license_key() self.patch_autospec(forms, 'validate_license_key').return_value = True license_key = factory.make_LicenseKey( osystem=osystem['name'], distro_series=release['name'], license_key=factory.make_name('key')) new_key = factory.make_name('key') form = LicenseKeyForm( data={'license_key': new_key}, instance=license_key) form.save() license_key = reload_object(license_key) self.assertEqual(new_key, license_key.license_key) def test_validates_license_key(self): osystem, release = self.make_os_with_license_key() self.patch_autospec(forms, 'validate_license_key').return_value = False license_key = factory.make_LicenseKey( osystem=osystem['name'], distro_series=release['name'], license_key=factory.make_name('key')) new_key = factory.make_name('key') form = LicenseKeyForm( data={'license_key': new_key}, instance=license_key) self.assertFalse(form.is_valid(), form.errors) self.assertEqual( {'__all__': ['Invalid license key.']}, form.errors) def test_handles_missing_osystem_in_distro_series(self): osystem, release = self.make_os_with_license_key() self.patch_autospec(forms, 'validate_license_key').return_value = True key = factory.make_name('key') definition = { 'osystem': osystem['name'], 'distro_series': release['name'], 'license_key': key, } form = LicenseKeyForm(data=definition.copy()) form.save() license_key_obj = LicenseKey.objects.get( osystem=osystem['name'], distro_series=release['name']) self.assertAttributes(license_key_obj, definition) def test_requires_all_fields(self): form = LicenseKeyForm(data={}) self.assertFalse(form.is_valid(), form.errors) self.assertItemsEqual( ['osystem', 'distro_series', 'license_key'], form.errors.keys()) def test_errors_on_not_unique(self): osystem, release = self.make_os_with_license_key() self.patch_autospec(forms, 'validate_license_key').return_value = True key = factory.make_name('key') factory.make_LicenseKey( osystem=osystem['name'], distro_series=release['name'], license_key=key) definition = { 'osystem': osystem['name'], 'distro_series': release['name'], 'license_key': key, } form = LicenseKeyForm(data=definition) self.assertFalse(form.is_valid(), form.errors) self.assertEqual({ '__all__': ['%s %s' % ( "License key with this operating system and distro series", "already exists.")]}, form.errors) def test_doesnt_include_default_osystem(self): form = LicenseKeyForm() self.assertNotIn(('', 'Default OS'), form.fields['osystem'].choices) def test_includes_osystem_in_choices(self): osystems = [] for _ in range(3): release = make_rpc_release(requires_license_key=True) osystems.append(make_rpc_osystem(releases=[release])) patch_usable_osystems(self, osystems=osystems) choices = [ (osystem['name'], osystem['title']) for osystem in osystems ] form = LicenseKeyForm() self.assertItemsEqual(choices, form.fields['osystem'].choices) def test_includes_all_osystems_sorted(self): osystems = [] for _ in range(3): release = make_rpc_release(requires_license_key=True) osystems.append(make_rpc_osystem(releases=[release])) patch_usable_osystems(self, osystems=osystems) choices = [ (osystem['name'], osystem['title']) for osystem in sorted(osystems, key=itemgetter('title')) ] form = LicenseKeyForm() self.assertEqual(choices, form.fields['osystem'].choices) def test_includes_only_osystems_that_require_license_keys(self): osystems = [] for _ in range(2): release = make_rpc_release(requires_license_key=True) osystems.append(make_rpc_osystem(releases=[release])) patch_usable_osystems(self, osystems=osystems + [make_rpc_osystem()]) choices = [ (osystem['name'], osystem['title']) for osystem in sorted(osystems, key=itemgetter('title')) ] form = LicenseKeyForm() self.assertEquals(choices, form.fields['osystem'].choices) def test_doesnt_include_default_distro_series(self): form = LicenseKeyForm() self.assertNotIn( ('', 'Default OS Release'), form.fields['distro_series'].choices) def test_includes_all_distro_series(self): releases = [ make_rpc_release(requires_license_key=True) for _ in range(3)] osystem = make_rpc_osystem(releases=releases) patch_usable_osystems(self, osystems=[osystem]) choices = [ ('%s/%s' % (osystem['name'], release['name']), release['title']) for release in releases ] form = LicenseKeyForm() self.assertItemsEqual(choices, form.fields['distro_series'].choices) def test_includes_only_distro_series_that_require_license_keys(self): releases = [ make_rpc_release(requires_license_key=True) for _ in range(3)] no_key_release = make_rpc_release() osystem = make_rpc_osystem(releases=releases + [no_key_release]) patch_usable_osystems(self, osystems=[osystem]) choices = [ ('%s/%s' % (osystem['name'], release['name']), release['title']) for release in releases ] form = LicenseKeyForm() self.assertItemsEqual(choices, form.fields['distro_series'].choices) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_merge_error_messages.py0000644000000000000000000000332713056115004025777 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `merge_error_messages`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.forms import ( MAX_MESSAGES, merge_error_messages, ) from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase class TestMergeErrorMessages(MAASServerTestCase): def test_merge_error_messages_returns_summary_message(self): summary = factory.make_name('summary') errors = [factory.make_name('error') for _ in range(2)] result = merge_error_messages(summary, errors, 5) self.assertEqual( "%s (%s)" % (summary, ' \u2014 '.join(errors)), result) def test_merge_error_messages_includes_limited_number_of_msgs(self): summary = factory.make_name('summary') errors = [ factory.make_name('error') for _ in range(MAX_MESSAGES + 2)] result = merge_error_messages(summary, errors) self.assertEqual( "%s (%s and 2 more errors)" % ( summary, ' \u2014 '.join(errors[:MAX_MESSAGES])), result) def test_merge_error_messages_with_one_more_error(self): summary = factory.make_name('summary') errors = [ factory.make_name('error') for _ in range(MAX_MESSAGES + 1)] result = merge_error_messages(summary, errors) self.assertEqual( "%s (%s and 1 more error)" % ( summary, ' \u2014 '.join(errors[:MAX_MESSAGES])), result) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_multiplechoicefield.py0000644000000000000000000000262713056115004025614 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for multiple-choice fields.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from django.core.exceptions import ValidationError from django.core.validators import validate_email from maasserver.forms import ( UnconstrainedMultipleChoiceField, ValidatorMultipleChoiceField, ) from maasserver.testing.testcase import MAASServerTestCase class TestUnconstrainedMultipleChoiceField(MAASServerTestCase): def test_accepts_list(self): value = ['a', 'b'] instance = UnconstrainedMultipleChoiceField() self.assertEqual(value, instance.clean(value)) class TestValidatorMultipleChoiceField(MAASServerTestCase): def test_field_validates_valid_data(self): value = ['test@example.com', 'me@example.com'] field = ValidatorMultipleChoiceField(validator=validate_email) self.assertEqual(value, field.clean(value)) def test_field_uses_validator(self): value = ['test@example.com', 'invalid-email'] field = ValidatorMultipleChoiceField(validator=validate_email) error = self.assertRaises(ValidationError, field.clean, value) self.assertEquals(['Enter a valid email address.'], error.messages) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_node.py0000644000000000000000000005720513056115004022531 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for node forms.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from crochet import TimeoutError from django.forms import ( CheckboxInput, HiddenInput, ) from maasserver import forms from maasserver.clusterrpc.power_parameters import get_power_type_choices from maasserver.clusterrpc.testing.osystems import ( make_rpc_osystem, make_rpc_release, ) from maasserver.enum import ( NODEGROUP_STATUS, NODEGROUPINTERFACE_MANAGEMENT, ) from maasserver.forms import ( AdminNodeForm, BLANK_CHOICE, NodeForm, pick_default_architecture, ) from maasserver.testing.architecture import ( make_usable_architecture, patch_usable_architectures, ) from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.osystems import ( make_osystem_with_releases, make_usable_osystem, patch_usable_osystems, ) from maasserver.testing.testcase import MAASServerTestCase from maastesting.matchers import MockCalledOnceWith from provisioningserver.rpc.exceptions import ( NoConnectionsAvailable, NoSuchOperatingSystem, ) class TestNodeForm(MAASServerTestCase): def test_contains_limited_set_of_fields(self): form = NodeForm() self.assertEqual( [ 'hostname', 'architecture', 'osystem', 'distro_series', 'license_key', 'disable_ipv4', 'swap_size', 'boot_type', 'min_hwe_kernel', 'hwe_kernel', 'nodegroup', ], list(form.fields)) def test_changes_node(self): node = factory.make_Node() hostname = factory.make_string() patch_usable_architectures(self, [node.architecture]) form = NodeForm( data={ 'hostname': hostname, 'architecture': make_usable_architecture(self), }, instance=node) form.save() self.assertEqual(hostname, node.hostname) def test_accepts_usable_architecture(self): arch = make_usable_architecture(self) form = NodeForm(data={ 'hostname': factory.make_name('host'), 'architecture': arch, }) self.assertTrue(form.is_valid(), form._errors) def test_rejects_unusable_architecture(self): patch_usable_architectures(self) form = NodeForm(data={ 'hostname': factory.make_name('host'), 'architecture': factory.make_name('arch'), }) self.assertFalse(form.is_valid()) self.assertItemsEqual(['architecture'], form._errors.keys()) def test_starts_with_default_architecture(self): arches = sorted([factory.make_name('arch') for _ in range(5)]) patch_usable_architectures(self, arches) form = NodeForm() self.assertEqual( pick_default_architecture(arches), form.fields['architecture'].initial) def test_form_validates_hwe_kernel_by_passing_invalid_config(self): self.client_log_in() node = factory.make_Node( owner=self.logged_in_user) osystem = make_usable_osystem(self) form = NodeForm(data={ 'hostname': factory.make_name('host'), 'architecture': make_usable_architecture(self), 'osystem': osystem['name'], 'min_hwe_kernel': 'hwe-t', 'hwe_kernel': 'hwe-p', }, instance=node) self.assertEqual(form.is_valid(), False) def test_form_validates_min_hwe_kernel_by_passing_invalid_config(self): node = factory.make_Node(min_hwe_kernel='hwe-t') form = NodeForm(instance=node) self.assertEqual(form.is_valid(), False) def test_adds_blank_default_when_no_arches_available(self): patch_usable_architectures(self, []) form = NodeForm() self.assertEqual( [BLANK_CHOICE], form.fields['architecture'].choices) def test_accepts_osystem(self): self.client_log_in() node = factory.make_Node(owner=self.logged_in_user) osystem = make_usable_osystem(self) form = NodeForm(data={ 'hostname': factory.make_name('host'), 'architecture': make_usable_architecture(self), 'osystem': osystem['name'], }, instance=node) self.assertTrue(form.is_valid(), form._errors) def test_rejects_invalid_osystem(self): self.client_log_in() node = factory.make_Node(owner=self.logged_in_user) patch_usable_osystems(self) form = NodeForm(data={ 'hostname': factory.make_name('host'), 'architecture': make_usable_architecture(self), 'osystem': factory.make_name('os'), }, instance=node) self.assertFalse(form.is_valid()) self.assertItemsEqual(['osystem'], form._errors.keys()) def test_starts_with_default_osystem(self): self.client_log_in() node = factory.make_Node(owner=self.logged_in_user) osystems = [make_osystem_with_releases(self) for _ in range(5)] patch_usable_osystems(self, osystems) form = NodeForm(instance=node) self.assertEqual( '', form.fields['osystem'].initial) def test_accepts_osystem_distro_series(self): self.client_log_in() node = factory.make_Node(owner=self.logged_in_user) osystem = make_usable_osystem(self) release = osystem['default_release'] form = NodeForm(data={ 'hostname': factory.make_name('host'), 'architecture': make_usable_architecture(self), 'osystem': osystem['name'], 'distro_series': '%s/%s' % (osystem['name'], release), }, instance=node) self.assertTrue(form.is_valid(), form._errors) def test_rejects_invalid_osystem_distro_series(self): self.client_log_in() node = factory.make_Node(owner=self.logged_in_user) osystem = make_usable_osystem(self) release = factory.make_name('release') form = NodeForm(data={ 'hostname': factory.make_name('host'), 'architecture': make_usable_architecture(self), 'osystem': osystem['name'], 'distro_series': '%s/%s' % (osystem['name'], release), }, instance=node) self.assertFalse(form.is_valid()) self.assertItemsEqual(['distro_series'], form._errors.keys()) def test_set_distro_series_accepts_short_distro_series(self): self.client_log_in() node = factory.make_Node(owner=self.logged_in_user) release = factory.make_name('release') make_usable_osystem( self, releases=[release + '6', release + '0', release + '3']) form = NodeForm(data={ 'hostname': factory.make_name('host'), 'architecture': make_usable_architecture(self), }, instance=node) form.set_distro_series(release) form.save() self.assertEquals(release + '6', node.distro_series) def test_set_distro_series_doesnt_allow_short_ubuntu_series(self): self.client_log_in() node = factory.make_Node(owner=self.logged_in_user) make_usable_osystem( self, osystem_name='ubuntu', releases=['trusty']) form = NodeForm(data={ 'hostname': factory.make_name('host'), 'architecture': make_usable_architecture(self), }, instance=node) form.set_distro_series('trust') self.assertFalse(form.is_valid()) def test_starts_with_default_distro_series(self): self.client_log_in() node = factory.make_Node(owner=self.logged_in_user) osystems = [make_osystem_with_releases(self) for _ in range(5)] patch_usable_osystems(self, osystems) form = NodeForm(instance=node) self.assertEqual( '', form.fields['distro_series'].initial) def test_rejects_mismatch_osystem_distro_series(self): self.client_log_in() node = factory.make_Node(owner=self.logged_in_user) osystem = make_usable_osystem(self) release = osystem['default_release'] invalid = factory.make_name('invalid_os') form = NodeForm(data={ 'hostname': factory.make_name('host'), 'architecture': make_usable_architecture(self), 'osystem': osystem['name'], 'distro_series': '%s/%s' % (invalid, release), }, instance=node) self.assertFalse(form.is_valid()) self.assertItemsEqual(['distro_series'], form._errors.keys()) def test_rejects_when_validate_license_key_returns_False(self): self.client_log_in() node = factory.make_Node(owner=self.logged_in_user) release = make_rpc_release(requires_license_key=True) osystem = make_rpc_osystem(releases=[release]) patch_usable_osystems(self, osystems=[osystem]) license_key = factory.make_name('key') mock_validate = self.patch(forms, 'validate_license_key') mock_validate.return_value = False form = NodeForm(data={ 'hostname': factory.make_name('host'), 'architecture': make_usable_architecture(self), 'osystem': osystem['name'], 'distro_series': '%s/%s*' % (osystem['name'], release['name']), 'license_key': license_key, }, instance=node) self.assertFalse(form.is_valid()) self.assertItemsEqual(['license_key'], form._errors.keys()) def test_calls_validate_license_key_for_with_nodegroup(self): self.client_log_in() node = factory.make_Node(owner=self.logged_in_user) release = make_rpc_release(requires_license_key=True) osystem = make_rpc_osystem(releases=[release]) patch_usable_osystems(self, osystems=[osystem]) license_key = factory.make_name('key') mock_validate_for = self.patch(forms, 'validate_license_key_for') mock_validate_for.return_value = True form = NodeForm(data={ 'architecture': make_usable_architecture(self), 'osystem': osystem['name'], 'distro_series': '%s/%s*' % (osystem['name'], release['name']), 'license_key': license_key, }, instance=node) self.assertTrue(form.is_valid()) self.assertThat( mock_validate_for, MockCalledOnceWith( node.nodegroup, osystem['name'], release['name'], license_key)) def test_rejects_when_validate_license_key_for_returns_False(self): self.client_log_in() node = factory.make_Node(owner=self.logged_in_user) release = make_rpc_release(requires_license_key=True) osystem = make_rpc_osystem(releases=[release]) patch_usable_osystems(self, osystems=[osystem]) license_key = factory.make_name('key') mock_validate_for = self.patch(forms, 'validate_license_key_for') mock_validate_for.return_value = False form = NodeForm(data={ 'architecture': make_usable_architecture(self), 'osystem': osystem['name'], 'distro_series': '%s/%s*' % (osystem['name'], release['name']), 'license_key': license_key, }, instance=node) self.assertFalse(form.is_valid()) self.assertItemsEqual(['license_key'], form._errors.keys()) def test_rejects_when_validate_license_key_for_raise_no_connection(self): self.client_log_in() node = factory.make_Node(owner=self.logged_in_user) release = make_rpc_release(requires_license_key=True) osystem = make_rpc_osystem(releases=[release]) patch_usable_osystems(self, osystems=[osystem]) license_key = factory.make_name('key') mock_validate_for = self.patch(forms, 'validate_license_key_for') mock_validate_for.side_effect = NoConnectionsAvailable() form = NodeForm(data={ 'architecture': make_usable_architecture(self), 'osystem': osystem['name'], 'distro_series': '%s/%s*' % (osystem['name'], release['name']), 'license_key': license_key, }, instance=node) self.assertFalse(form.is_valid()) self.assertItemsEqual(['license_key'], form._errors.keys()) def test_rejects_when_validate_license_key_for_raise_timeout(self): self.client_log_in() node = factory.make_Node(owner=self.logged_in_user) release = make_rpc_release(requires_license_key=True) osystem = make_rpc_osystem(releases=[release]) patch_usable_osystems(self, osystems=[osystem]) license_key = factory.make_name('key') mock_validate_for = self.patch(forms, 'validate_license_key_for') mock_validate_for.side_effect = TimeoutError() form = NodeForm(data={ 'architecture': make_usable_architecture(self), 'osystem': osystem['name'], 'distro_series': '%s/%s*' % (osystem['name'], release['name']), 'license_key': license_key, }, instance=node) self.assertFalse(form.is_valid()) self.assertItemsEqual(['license_key'], form._errors.keys()) def test_rejects_when_validate_license_key_for_raise_no_os(self): self.client_log_in() node = factory.make_Node(owner=self.logged_in_user) release = make_rpc_release(requires_license_key=True) osystem = make_rpc_osystem(releases=[release]) patch_usable_osystems(self, osystems=[osystem]) license_key = factory.make_name('key') mock_validate_for = self.patch(forms, 'validate_license_key_for') mock_validate_for.side_effect = NoSuchOperatingSystem() form = NodeForm(data={ 'architecture': make_usable_architecture(self), 'osystem': osystem['name'], 'distro_series': '%s/%s*' % (osystem['name'], release['name']), 'license_key': license_key, }, instance=node) self.assertFalse(form.is_valid()) self.assertItemsEqual(['license_key'], form._errors.keys()) def test_rejects_duplicate_fqdn_with_unmanaged_dns_on_one_nodegroup(self): # If a host with a given hostname exists on a managed nodegroup, # new nodes on unmanaged nodegroups with hostnames that match # that FQDN will be rejected. nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) node = factory.make_Node( hostname=factory.make_name("hostname"), nodegroup=nodegroup) other_nodegroup = factory.make_NodeGroup() form = NodeForm(data={ 'nodegroup': other_nodegroup, 'hostname': node.fqdn, 'architecture': make_usable_architecture(self), }) form.instance.nodegroup = other_nodegroup self.assertFalse(form.is_valid()) def test_rejects_duplicate_fqdn_on_same_nodegroup(self): # If a node with a given FQDN exists on a managed nodegroup, new # nodes on that nodegroup with duplicate FQDNs will be rejected. nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) node = factory.make_Node( hostname=factory.make_name("hostname"), nodegroup=nodegroup) form = NodeForm(data={ 'nodegroup': nodegroup, 'hostname': node.fqdn, 'architecture': make_usable_architecture(self), }) form.instance.nodegroup = nodegroup self.assertFalse(form.is_valid()) def test_obeys_disable_ipv4_if_given(self): setting = factory.pick_bool() cluster = factory.make_NodeGroup(default_disable_ipv4=(not setting)) form = NodeForm( data={ 'nodegroup': cluster, 'architecture': make_usable_architecture(self), 'disable_ipv4': setting, }) form.instance.nodegroup = cluster node = form.save() self.assertEqual(setting, node.disable_ipv4) def test_takes_missing_disable_ipv4_as_False_in_UI(self): form = NodeForm( instance=factory.make_Node(disable_ipv4=True), data={ 'architecture': make_usable_architecture(self), 'ui_submission': True, }) node = form.save() self.assertFalse(node.disable_ipv4) def test_takes_missing_disable_ipv4_as_Unchanged_in_API(self): form = NodeForm( instance=factory.make_Node(disable_ipv4=True), data={ 'architecture': make_usable_architecture(self), }) node = form.save() self.assertTrue(node.disable_ipv4) def test_takes_True_disable_ipv4_from_cluster_by_default(self): setting = True cluster = factory.make_NodeGroup(default_disable_ipv4=setting) form = NodeForm( data={ 'nodegroup': cluster, 'architecture': make_usable_architecture(self), }) form.instance.nodegroup = cluster node = form.save() self.assertEqual(setting, node.disable_ipv4) def test_takes_False_disable_ipv4_from_cluster_by_default(self): setting = False cluster = factory.make_NodeGroup(default_disable_ipv4=setting) form = NodeForm( data={ 'nodegroup': cluster, 'architecture': make_usable_architecture(self), }) form.instance.nodegroup = cluster node = form.save() self.assertEqual(setting, node.disable_ipv4) def test_shows_disable_ipv4_if_IPv6_configured(self): node = factory.make_Node_with_Interface_on_Subnet( cidr=unicode(factory.make_ipv6_network().cidr)) form = NodeForm( instance=node, data={'architecture': make_usable_architecture(self)}) self.assertIsInstance( form.fields['disable_ipv4'].widget, CheckboxInput) def test_hides_disable_ipv4_if_IPv6_not_configured(self): node = factory.make_Node_with_Interface_on_Subnet( cidr=unicode(factory.make_ipv4_network().cidr)) form = NodeForm( instance=node, data={'architecture': make_usable_architecture(self)}) self.assertIsInstance(form.fields['disable_ipv4'].widget, HiddenInput) def test_shows_disable_ipv4_on_new_node_if_any_cluster_supports_it(self): factory.make_Node_with_Interface_on_Subnet( cidr=unicode(factory.make_ipv6_network().cidr)) form = NodeForm(data={'architecture': make_usable_architecture(self)}) self.assertIsInstance( form.fields['disable_ipv4'].widget, CheckboxInput) def test_hides_disable_ipv4_on_new_node_if_no_cluster_supports_it(self): factory.make_NodeGroupInterface( factory.make_NodeGroup(), network=factory.make_ipv6_network(), management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) form = NodeForm(data={'architecture': make_usable_architecture(self)}) self.assertIsInstance(form.fields['disable_ipv4'].widget, HiddenInput) class TestAdminNodeForm(MAASServerTestCase): def test_AdminNodeForm_contains_limited_set_of_fields(self): self.client_log_in() node = factory.make_Node(owner=self.logged_in_user) form = AdminNodeForm(instance=node) self.assertEqual( [ 'hostname', 'architecture', 'osystem', 'distro_series', 'license_key', 'disable_ipv4', 'swap_size', 'boot_type', 'min_hwe_kernel', 'hwe_kernel', 'power_type', 'power_parameters', 'cpu_count', 'memory', 'zone', ], list(form.fields)) def test_AdminNodeForm_initialises_zone(self): # The zone field uses "to_field_name", so that it can refer to a zone # by name instead of by ID. A bug in Django breaks initialisation # from an instance: the field tries to initialise the field using a # zone's ID instead of its name, and ends up reverting to the default. # The code must work around this bug. zone = factory.make_Zone() node = factory.make_Node(zone=zone) # We'll create a form that makes a change, but not to the zone. data = {'hostname': factory.make_name('host')} form = AdminNodeForm(instance=node, data=data) # The Django bug would stop the initial field value from being set, # but the workaround ensures that it is initialised. self.assertEqual(zone.name, form.initial['zone']) def test_AdminNodeForm_changes_node(self): node = factory.make_Node() zone = factory.make_Zone() hostname = factory.make_string() power_type = factory.pick_power_type() form = AdminNodeForm( data={ 'hostname': hostname, 'power_type': power_type, 'architecture': make_usable_architecture(self), 'zone': zone.name, }, instance=node) form.save() node = reload_object(node) self.assertEqual( (node.hostname, node.power_type, node.zone), (hostname, power_type, zone)) def test_AdminNodeForm_populates_power_type_choices(self): form = AdminNodeForm() self.assertEqual( [''] + [choice[0] for choice in get_power_type_choices()], [choice[0] for choice in form.fields['power_type'].choices]) def test_AdminNodeForm_populates_power_type_initial(self): node = factory.make_Node() form = AdminNodeForm(instance=node) self.assertEqual(node.power_type, form.fields['power_type'].initial) def test_AdminNodeForm_changes_node_with_skip_check(self): node = factory.make_Node() hostname = factory.make_string() power_type = factory.pick_power_type() power_parameters_field = factory.make_string() arch = make_usable_architecture(self) form = AdminNodeForm( data={ 'hostname': hostname, 'architecture': arch, 'power_type': power_type, 'power_parameters_field': power_parameters_field, 'power_parameters_skip_check': True, }, instance=node) form.save() self.assertEqual( (hostname, power_type, {'field': power_parameters_field}), (node.hostname, node.power_type, node.power_parameters)) def test_AdminForm_does_not_permit_nodegroup_change(self): # We had to make Node.nodegroup editable to get Django to # validate it as non-blankable, but that doesn't mean that we # actually want to allow people to edit it through API or UI. old_nodegroup = factory.make_NodeGroup() node = factory.make_Node( nodegroup=old_nodegroup, architecture=make_usable_architecture(self)) new_nodegroup = factory.make_NodeGroup() AdminNodeForm(data={'nodegroup': new_nodegroup}, instance=node).save() # The form saved without error, but the nodegroup change was ignored. self.assertEqual(old_nodegroup, node.nodegroup) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_nodegroup.py0000644000000000000000000004536113056115004023606 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for nodegroup forms.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import json from random import randint from unittest import skip from django.forms import ( CheckboxInput, HiddenInput, ) from maasserver.enum import ( NODE_STATUS, NODEGROUP_STATUS, NODEGROUPINTERFACE_MANAGEMENT, ) from maasserver.forms import ( INTERFACES_VALIDATION_ERROR_MESSAGE, NodeGroupDefineForm, NodeGroupEdit, ) from maasserver.models import ( NodeGroup, NodeGroupInterface, ) from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase from netaddr import IPNetwork from provisioningserver.utils.enum import map_enum from testtools.matchers import ( HasLength, MatchesStructure, StartsWith, ) class TestNodeGroupDefineForm(MAASServerTestCase): def test_creates_pending_nodegroup(self): name = factory.make_name('name') uuid = factory.make_UUID() form = NodeGroupDefineForm(data={'name': name, 'uuid': uuid}) self.assertTrue(form.is_valid(), form._errors) nodegroup = form.save() self.assertEqual( (uuid, name, NODEGROUP_STATUS.ENABLED, 0), ( nodegroup.uuid, nodegroup.name, nodegroup.status, nodegroup.nodegroupinterface_set.count(), )) def test_creates_nodegroup_with_status(self): name = factory.make_name('name') uuid = factory.make_UUID() form = NodeGroupDefineForm( status=NODEGROUP_STATUS.ENABLED, data={'name': name, 'uuid': uuid}) self.assertTrue(form.is_valid(), form._errors) nodegroup = form.save() self.assertEqual(NODEGROUP_STATUS.ENABLED, nodegroup.status) def test_validates_parameters(self): name = factory.make_name('name') too_long_uuid = 'test' * 30 form = NodeGroupDefineForm( data={'name': name, 'uuid': too_long_uuid}) self.assertFalse(form.is_valid()) self.assertEquals( {'uuid': ['Ensure this value has at most 36 characters (it has 120).']}, form._errors) def test_rejects_invalid_json_interfaces(self): name = factory.make_name('name') uuid = factory.make_UUID() invalid_interfaces = factory.make_name('invalid_json_interfaces') form = NodeGroupDefineForm( data={ 'name': name, 'uuid': uuid, 'interfaces': invalid_interfaces}) self.assertFalse(form.is_valid()) self.assertEquals( {'interfaces': ['Invalid json value.']}, form._errors) def test_rejects_invalid_list_interfaces(self): name = factory.make_name('name') uuid = factory.make_UUID() invalid_interfaces = json.dumps('invalid interface list') form = NodeGroupDefineForm( data={ 'name': name, 'uuid': uuid, 'interfaces': invalid_interfaces}) self.assertFalse(form.is_valid()) self.assertEquals( {'interfaces': [INTERFACES_VALIDATION_ERROR_MESSAGE]}, form._errors) @skip( "XXX: GavinPanella 2015-10-30 bug=1511689: This test keeps failing " "when landing unrelated branches, so has been disabled.") def test_rejects_invalid_interface(self): name = factory.make_name('name') uuid = factory.make_UUID() interface = factory.get_interface_fields() # Make the interface invalid. interface['ip_range_high'] = 'invalid IP address' interfaces = json.dumps([interface]) form = NodeGroupDefineForm( data={'name': name, 'uuid': uuid, 'interfaces': interfaces}) self.assertFalse(form.is_valid()) self.assertIn( "Enter a valid IPv4 or IPv6 address", form._errors['interfaces'][0]) def test_creates_interface_from_params(self): name = factory.make_name('name') uuid = factory.make_UUID() interface = factory.get_interface_fields() interfaces = json.dumps([interface]) form = NodeGroupDefineForm( data={'name': name, 'uuid': uuid, 'interfaces': interfaces}) self.assertTrue(form.is_valid(), form._errors) nodegroup = form.save() # Replace empty strings with None as empty strings are converted into # None for fields with null=True. expected_result = { key: (value if value != '' else None) for key, value in interface.items() } del expected_result["subnet"] self.assertThat( nodegroup.nodegroupinterface_set.all()[0], MatchesStructure.byEquality(**expected_result)) def test_accepts_unnamed_cluster_interface(self): uuid = factory.make_UUID() interface = factory.get_interface_fields() del interface['name'] interfaces = json.dumps([interface]) form = NodeGroupDefineForm( data={ 'name': factory.make_name('cluster'), 'uuid': uuid, 'interfaces': interfaces, }) self.assertTrue(form.is_valid(), form._errors) cluster = form.save() [cluster_interface] = cluster.nodegroupinterface_set.all() self.assertEqual(interface['interface'], cluster_interface.name) self.assertEqual(interface['interface'], cluster_interface.interface) def test_checks_against_conflicting_managed_networks(self): big_network = IPNetwork('10.0.0.0/255.255.0.0') nested_network = IPNetwork('10.0.100.0/255.255.255.0') managed = NODEGROUPINTERFACE_MANAGEMENT.DHCP form = NodeGroupDefineForm( data={ 'name': factory.make_name('cluster'), 'uuid': factory.make_UUID(), 'interfaces': json.dumps([ factory.get_interface_fields( network=big_network, management=managed), factory.get_interface_fields( network=nested_network, management=managed), ]), }) self.assertFalse(form.is_valid()) self.assertNotEqual([], form._errors['interfaces']) self.assertThat( form._errors['interfaces'][0], StartsWith("Conflicting networks")) def test_ignores_conflicts_on_unmanaged_interfaces(self): big_network = IPNetwork('10.0.0.0/255.255.0.0') nested_network = IPNetwork('10.100.100.0/255.255.255.0') managed = NODEGROUPINTERFACE_MANAGEMENT.DHCP unmanaged = NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED form = NodeGroupDefineForm( data={ 'name': factory.make_name('cluster'), 'uuid': factory.make_UUID(), 'interfaces': json.dumps([ factory.get_interface_fields( network=big_network, management=managed), factory.get_interface_fields( network=nested_network, management=unmanaged), ]), }) is_valid = form.is_valid() self.assertEqual( (True, None), (is_valid, form._errors.get('interfaces'))) def test_creates_multiple_interfaces(self): name = factory.make_name('name') uuid = factory.make_UUID() interfaces = [ factory.get_interface_fields(management=management) for management in map_enum(NODEGROUPINTERFACE_MANAGEMENT).values() ] form = NodeGroupDefineForm( data={ 'name': name, 'uuid': uuid, 'interfaces': json.dumps(interfaces), }) self.assertTrue(form.is_valid(), form._errors) form.save() nodegroup = NodeGroup.objects.get(uuid=uuid) self.assertEqual( len(interfaces), nodegroup.nodegroupinterface_set.count()) def test_filters_interface_by_type_when_json_provided(self): name = factory.make_name('name') uuid = factory.make_UUID() interfaces = [ {u'interface': 'pci0', u'ip': u'92.140.219.3', u'subnet_mask': u'255.255.255.255'}, {u'interface': u'eth0', u'ip': u'78.146.80.12', u'subnet_mask': u'255.255.255.255'} ] input_json = { "pci0": {"type": "ethernet.physical"}, } form = NodeGroupDefineForm( data={ 'name': name, 'uuid': uuid, 'interfaces': json.dumps(interfaces), 'ip_addr_json': json.dumps(input_json), }) self.assertTrue(form.is_valid(), form._errors) form.save() nodegroup = NodeGroup.objects.get(uuid=uuid) self.assertEqual( 1, nodegroup.nodegroupinterface_set.count()) ngi = nodegroup.nodegroupinterface_set.first() self.assertEqual('pci0', ngi.interface) def test_filters_interface_by_name_when_json_not_provided(self): name = factory.make_name('name') uuid = factory.make_UUID() interfaces = [ {u'interface': 'pci0', u'ip': u'92.140.219.3', u'subnet_mask': u'255.255.255.255'}, {u'interface': u'eth0', u'ip': u'78.146.80.12', u'subnet_mask': u'255.255.255.255'} ] form = NodeGroupDefineForm( data={ 'name': name, 'uuid': uuid, 'interfaces': json.dumps(interfaces), }) self.assertTrue(form.is_valid(), form._errors) form.save() nodegroup = NodeGroup.objects.get(uuid=uuid) self.assertEqual( 1, nodegroup.nodegroupinterface_set.count()) ngi = nodegroup.nodegroupinterface_set.first() self.assertEqual('eth0', ngi.interface) def test_populates_cluster_name_default(self): name = factory.make_name('name') uuid = factory.make_UUID() form = NodeGroupDefineForm( status=NODEGROUP_STATUS.ENABLED, data={'name': name, 'uuid': uuid}) self.assertTrue(form.is_valid(), form._errors) nodegroup = form.save() self.assertIn(uuid, nodegroup.cluster_name) def test_populates_cluster_name(self): cluster_name = factory.make_name('cluster_name') uuid = factory.make_UUID() form = NodeGroupDefineForm( status=NODEGROUP_STATUS.ENABLED, data={'cluster_name': cluster_name, 'uuid': uuid}) self.assertTrue(form.is_valid(), form._errors) nodegroup = form.save() self.assertEqual(cluster_name, nodegroup.cluster_name) def test_creates_unmanaged_interfaces(self): name = factory.make_name('name') uuid = factory.make_UUID() interface = factory.get_interface_fields() del interface['management'] interfaces = json.dumps([interface]) form = NodeGroupDefineForm( data={'name': name, 'uuid': uuid, 'interfaces': interfaces}) self.assertTrue(form.is_valid(), form._errors) form.save() uuid_nodegroup = NodeGroup.objects.get(uuid=uuid) self.assertEqual( [NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED], [ nodegroup.management for nodegroup in uuid_nodegroup.nodegroupinterface_set.all() ]) def test_gives_disambiguation_preference_to_IPv4(self): network_interface = factory.make_name('eth', sep='') ipv4_network = factory.make_ipv4_network() # We'll be creating a cluster with two interfaces, both using the same # network interface: an IPv4 one and an IPv6 one. # We randomise the ordering of this list to rule out special treatment # based on definition order. interfaces = sorted( [ factory.get_interface_fields( network=factory.make_ipv6_network(slash=64), interface=network_interface), factory.get_interface_fields( network=ipv4_network, interface=network_interface), ], cmp=lambda left, right: randint(-1, 1)) # We're not going to pass names for these cluster interfaces, so the # form will have to make some up based on the network interface name. for definition in interfaces: del definition['name'] form = NodeGroupDefineForm( data={ 'name': factory.make_name('cluster'), 'uuid': factory.make_UUID(), 'interfaces': json.dumps(interfaces), }) self.assertTrue(form.is_valid(), form._errors) cluster = form.save() # All of the cluster interfaces' names are unique and based on the # network interface name, but the IPv4 one gets the unadorned name. interfaces_by_name = { interface.name: interface for interface in cluster.nodegroupinterface_set.all() } self.expectThat(interfaces_by_name, HasLength(len(interfaces))) self.assertIn(network_interface, interfaces_by_name) self.assertEqual( ipv4_network, interfaces_by_name[network_interface].network) class TestNodeGroupEdit(MAASServerTestCase): def make_form_data(self, nodegroup): """Create `NodeGroupEdit` form data based on `nodegroup`.""" return { 'name': nodegroup.name, 'cluster_name': nodegroup.cluster_name, 'status': nodegroup.status, } def test_changes_name(self): nodegroup = factory.make_NodeGroup(name=factory.make_name('old-name')) new_name = factory.make_name('new-name') data = self.make_form_data(nodegroup) data['name'] = new_name form = NodeGroupEdit(instance=nodegroup, data=data) self.assertTrue(form.is_valid()) form.save() self.assertEqual(new_name, reload_object(nodegroup).name) def test_refuses_name_change_if_dns_managed_and_nodes_in_use(self): nodegroup, node = factory.make_unrenamable_NodeGroup_with_Node() data = self.make_form_data(nodegroup) data['name'] = factory.make_name('new-name') form = NodeGroupEdit(instance=nodegroup, data=data) self.assertFalse(form.is_valid()) def test_accepts_unchanged_name(self): nodegroup, node = factory.make_unrenamable_NodeGroup_with_Node() original_name = nodegroup.name form = NodeGroupEdit( instance=nodegroup, data=self.make_form_data(nodegroup)) self.assertTrue(form.is_valid()) form.save() self.assertEqual(original_name, reload_object(nodegroup).name) def test_accepts_omitted_name(self): nodegroup, node = factory.make_unrenamable_NodeGroup_with_Node() original_name = nodegroup.name data = self.make_form_data(nodegroup) del data['name'] form = NodeGroupEdit(instance=nodegroup, data=data) self.assertTrue(form.is_valid()) form.save() self.assertEqual(original_name, reload_object(nodegroup).name) def test_accepts_name_change_if_nodegroup_not_accepted(self): nodegroup, node = factory.make_unrenamable_NodeGroup_with_Node() nodegroup.status = NODEGROUP_STATUS.DISABLED data = self.make_form_data(nodegroup) data['name'] = factory.make_name('new-name') form = NodeGroupEdit(instance=nodegroup, data=data) self.assertTrue(form.is_valid()) def test_accepts_name_change_if_dns_managed_but_no_nodes_in_use(self): nodegroup, node = factory.make_unrenamable_NodeGroup_with_Node() node.status = NODE_STATUS.READY node.save() data = self.make_form_data(nodegroup) data['name'] = factory.make_name('new-name') form = NodeGroupEdit(instance=nodegroup, data=data) self.assertTrue(form.is_valid()) form.save() self.assertEqual(data['name'], reload_object(nodegroup).name) def test_accepts_name_change_if_nodes_in_use_but_dns_not_managed(self): nodegroup, node = factory.make_unrenamable_NodeGroup_with_Node() [interface] = nodegroup.get_managed_interfaces() interface.management = NODEGROUPINTERFACE_MANAGEMENT.DHCP interface.save() data = self.make_form_data(nodegroup) data['name'] = factory.make_name('new-name') form = NodeGroupEdit(instance=nodegroup, data=data) self.assertTrue(form.is_valid()) form.save() self.assertEqual(data['name'], reload_object(nodegroup).name) def test_accepts_name_change_if_nodegroup_has_no_interface(self): nodegroup, node = factory.make_unrenamable_NodeGroup_with_Node() NodeGroupInterface.objects.filter(nodegroup=nodegroup).delete() data = self.make_form_data(nodegroup) data['name'] = factory.make_name('new-name') form = NodeGroupEdit(instance=nodegroup, data=data) self.assertTrue(form.is_valid()) form.save() self.assertEqual(data['name'], reload_object(nodegroup).name) def test_shows_default_disable_ipv4_if_managed_ipv6_configured(self): nodegroup = factory.make_NodeGroup() factory.make_NodeGroupInterface( nodegroup, network=factory.make_ipv6_network(), management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) form = NodeGroupEdit(instance=nodegroup) self.assertIsInstance( form.fields['default_disable_ipv4'].widget, CheckboxInput) def test_hides_default_disable_ipv4_if_no_managed_ipv6_configured(self): nodegroup = factory.make_NodeGroup() eth = factory.make_name('eth') factory.make_NodeGroupInterface( nodegroup, network=factory.make_ipv4_network(), interface=eth, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) factory.make_NodeGroupInterface( nodegroup, network=factory.make_ipv6_network(), interface=eth, management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) form = NodeGroupEdit(instance=nodegroup) self.assertIsInstance( form.fields['default_disable_ipv4'].widget, HiddenInput) def test_default_disable_ipv4_field_ignores_other_nodegroups(self): factory.make_NodeGroupInterface( factory.make_NodeGroup(), network=factory.make_ipv6_network(), management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) nodegroup = factory.make_NodeGroup() form = NodeGroupEdit(instance=nodegroup) self.assertIsInstance( form.fields['default_disable_ipv4'].widget, HiddenInput) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_nodegroupinterface.py0000644000000000000000000006606513056115004025473 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test forms.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random from maasserver.enum import ( NODEGROUP_STATUS, NODEGROUPINTERFACE_MANAGEMENT, ) from maasserver.forms import ( ERROR_MESSAGE_DYNAMIC_RANGE_SPANS_SLASH_16S, ERROR_MESSAGE_STATIC_RANGE_IN_USE, NodeGroupInterfaceForm, ) from maasserver.models import ( Fabric, NodeGroupInterface, VLAN, ) from maasserver.models.staticipaddress import StaticIPAddress from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maastesting.matchers import MockCalledOnceWith from netaddr import ( IPAddress, IPNetwork, ) from testtools.matchers import ( AllMatch, Contains, Equals, StartsWith, ) nullable_fields = [ 'subnet_mask', 'router_ip', 'ip_range_low', 'ip_range_high', 'static_ip_range_low', 'static_ip_range_high', ] def make_ngi_instance(nodegroup=None): """Create a `NodeGroupInterface` with nothing set but `nodegroup`. This is used by tests to instantiate the cluster interface form for a given cluster. We create an initial cluster interface object just to tell it which cluster that is. """ if nodegroup is None: nodegroup = factory.make_NodeGroup() return NodeGroupInterface(nodegroup=nodegroup) class TestNodeGroupInterfaceForm(MAASServerTestCase): def test__validates_parameters(self): form = NodeGroupInterfaceForm( data={'ip': factory.make_string()}, instance=make_ngi_instance()) self.assertFalse(form.is_valid()) self.assertEquals( {'ip': ['Enter a valid IPv4 or IPv6 address.']}, form._errors) def test__can_save_fields_being_None(self): int_settings = factory.get_interface_fields() int_settings['management'] = NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED for field_name in nullable_fields: del int_settings[field_name] form = NodeGroupInterfaceForm( data=int_settings, instance=make_ngi_instance()) self.assertTrue(form.is_valid()) interface = form.save() field_values = [ getattr(interface, field_name) for field_name in nullable_fields] self.assertThat(field_values, AllMatch(Equals(''))) def test__uses_name_if_given(self): name = factory.make_name('explicit-name') int_settings = factory.get_interface_fields() int_settings['name'] = name form = NodeGroupInterfaceForm( data=int_settings, instance=make_ngi_instance()) self.assertTrue(form.is_valid(), form.errors) interface = form.save() self.assertEqual(name, interface.name) def test__lets_name_default_to_network_interface_name(self): int_settings = factory.get_interface_fields() int_settings['interface'] = factory.make_name('ether') del int_settings['name'] form = NodeGroupInterfaceForm( data=int_settings, instance=make_ngi_instance()) self.assertTrue(form.is_valid()) interface = form.save() self.assertEqual(int_settings['interface'], interface.name) def test__escapes_interface_name(self): int_settings = factory.get_interface_fields() int_settings['interface'] = 'eth1+1' del int_settings['name'] form = NodeGroupInterfaceForm( data=int_settings, instance=make_ngi_instance()) self.assertTrue(form.is_valid()) interface = form.save() self.assertEqual('eth1--1', interface.name) def test__populates_subnet_mask_from_instance(self): network = factory._make_random_network() nodegroup = factory.make_NodeGroup() ngi = factory.make_NodeGroupInterface( nodegroup, network=network, ip=unicode(IPAddress(network.first)), management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) form = NodeGroupInterfaceForm(data={}, instance=ngi) self.assertEqual( unicode(IPAddress(network.netmask)), form.initial.get('subnet_mask')) self.assertTrue(form.is_valid(), dict(form.errors)) self.assertEqual( unicode(IPAddress(network.netmask)), form.cleaned_data.get('subnet_mask')) def test__rejects_missing_subnet_mask_if_managed(self): int_settings = factory.get_interface_fields( management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) del int_settings['subnet_mask'] form = NodeGroupInterfaceForm(data=int_settings) self.assertFalse(form.is_valid()) message = ( "That field cannot be empty (unless that interface is " "'unmanaged')") self.assertEqual({'subnet_mask': [message]}, form.errors) def test__defaults_to_unique_name_if_no_name_or_interface_given(self): int_settings = factory.get_interface_fields( management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) del int_settings['name'] del int_settings['interface'] form1 = NodeGroupInterfaceForm( data=int_settings, instance=make_ngi_instance()) self.assertTrue(form1.is_valid()) interface1 = form1.save() form2 = NodeGroupInterfaceForm( data=int_settings, instance=make_ngi_instance()) self.assertTrue(form2.is_valid()) interface2 = form2.save() self.assertNotIn(interface1.name, [None, '']) self.assertNotIn(interface2.name, [None, '']) self.assertNotEqual(interface1.name, interface2.name) def test__disambiguates_default_name(self): cluster = factory.make_NodeGroup() existing_interface = factory.make_NodeGroupInterface(cluster) int_settings = factory.get_interface_fields() del int_settings['name'] int_settings['interface'] = existing_interface.name form = NodeGroupInterfaceForm( data=int_settings, instance=make_ngi_instance(cluster)) self.assertTrue(form.is_valid()) interface = form.save() self.assertThat(interface.name, StartsWith(int_settings['interface'])) self.assertNotEqual(int_settings['interface'], interface.name) def test__disambiguates_IPv4_interface_with_ipv4_suffix(self): cluster = factory.make_NodeGroup() existing_interface = factory.make_NodeGroupInterface( cluster, network=factory.make_ipv4_network()) int_settings = factory.get_interface_fields() del int_settings['name'] int_settings['interface'] = existing_interface.name form = NodeGroupInterfaceForm( data=int_settings, instance=make_ngi_instance(cluster)) self.assertTrue(form.is_valid()) interface = form.save() self.assertThat( interface.name, StartsWith('%s-ipv4-' % int_settings['interface'])) def test__disambiguates_IPv6_interface_with_ipv6_suffix(self): cluster = factory.make_NodeGroup() existing_interface = factory.make_NodeGroupInterface(cluster) int_settings = factory.get_interface_fields( network=factory.make_ipv6_network(slash=64)) del int_settings['name'] int_settings['interface'] = existing_interface.name form = NodeGroupInterfaceForm( data=int_settings, instance=make_ngi_instance(cluster)) if 'name' in form.data: del form.data['name'] self.assertTrue(form.is_valid(), form._errors) interface = form.save() self.assertThat( interface.name, StartsWith('%s-ipv6-' % int_settings['interface'])) def test__requires_netmask_on_managed_IPv4_interface(self): network = factory.make_ipv4_network() int_settings = factory.get_interface_fields( network=network, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) del int_settings['subnet_mask'] form = NodeGroupInterfaceForm( data=int_settings, instance=make_ngi_instance()) self.assertFalse(form.is_valid()) def test__lets_netmask_default_to_64_bits_on_IPv6(self): network = factory.make_ipv6_network() int_settings = factory.get_interface_fields( network=network, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) form = NodeGroupInterfaceForm( data=int_settings, instance=make_ngi_instance()) form.data.update({'subnet_mask': ""}) self.assertTrue(form.is_valid()) interface = form.save() self.assertEqual( IPAddress('ffff:ffff:ffff:ffff::'), IPAddress(interface.subnet_mask)) def test__accepts_netmasks_other_than_64_bits_on_IPv6(self): netmask = 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff8' network = factory.make_ipv6_network(slash=125) int_settings = factory.get_interface_fields( network=network, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, netmask=netmask) form = NodeGroupInterfaceForm( data=int_settings, instance=make_ngi_instance()) self.assertTrue(form.is_valid()) interface = form.save() self.assertEqual( IPAddress(netmask), IPAddress(interface.subnet_mask)) def test__rejects_126_bit_netmask_on_IPv6(self): netmask = 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffc' network = factory.make_ipv6_network(slash=126) int_settings = factory.get_interface_fields( network=network, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, netmask=netmask) form = NodeGroupInterfaceForm( data=int_settings, instance=make_ngi_instance()) self.assertFalse(form.is_valid()) def test__rejects_short_netmask_on_IPv4(self): slash = random.randint(30, 32) if slash == 30: netmask = '255.255.255.252' elif slash == 31: netmask = '255.255.255.254' else: netmask = '255.255.255.255' network = factory.make_ipv4_network(slash=slash) int_settings = factory.get_interface_fields( network=network, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, netmask=netmask) form = NodeGroupInterfaceForm( data=int_settings, instance=make_ngi_instance()) self.assertFalse(form.is_valid()) def test_validates_new_static_ip_ranges(self): network = IPNetwork("10.1.0.0/24") nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS, network=network) [interface] = nodegroup.get_managed_interfaces() StaticIPAddress.objects.allocate_new( interface.network, interface.static_ip_range_low, interface.static_ip_range_high, interface.ip_range_low, interface.ip_range_high) form = NodeGroupInterfaceForm( data={'static_ip_range_low': '', 'static_ip_range_high': ''}, instance=interface) self.assertFalse(form.is_valid()) self.assertEqual( [ERROR_MESSAGE_STATIC_RANGE_IN_USE], form._errors['static_ip_range_low']) self.assertEqual( [ERROR_MESSAGE_STATIC_RANGE_IN_USE], form._errors['static_ip_range_high']) def test_rejects_ipv4_dynamic_ranges_across_multiple_slash_16s(self): # Even if a dynamic range is < 65536 addresses, it can't cross # two /16 networks. network = IPNetwork("10.1.0.0/8") nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS, network=network, static_ip_range_low=None, static_ip_range_high=None) [interface] = nodegroup.get_managed_interfaces() form = NodeGroupInterfaceForm( data={ 'ip_range_low': '10.1.255.255', 'ip_range_high': '10.2.0.1', }, instance=interface) self.assertFalse(form.is_valid()) self.assertEqual( [ERROR_MESSAGE_DYNAMIC_RANGE_SPANS_SLASH_16S], form._errors['ip_range_low']) self.assertEqual( [ERROR_MESSAGE_DYNAMIC_RANGE_SPANS_SLASH_16S], form._errors['ip_range_low']) def test_allows_sane_ipv4_dynamic_range_size(self): network = IPNetwork("10.1.0.0/8") nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS, network=network, static_ip_range_low=None, static_ip_range_high=None) [interface] = nodegroup.get_managed_interfaces() form = NodeGroupInterfaceForm( data={ 'ip_range_low': '10.0.0.1', 'ip_range_high': '10.0.1.255', }, instance=interface) self.assertTrue(form.is_valid(), form.errors) def test_rejects_missing_ip_range_high(self): network = IPNetwork("10.0.0.0/8") nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS, network=network, static_ip_range_low=None, static_ip_range_high=None) [interface] = nodegroup.get_managed_interfaces() data = {'ip_range_low': '10.0.0.1'} form = NodeGroupInterfaceForm( data=data, instance=interface) self.assertFalse(form.is_valid()) def test_rejects_missing_ip_range_low(self): network = IPNetwork("10.2.0.0/8") nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS, network=network, static_ip_range_low=None, static_ip_range_high=None) [interface] = nodegroup.get_managed_interfaces() data = { 'ip_range_low': '', 'ip_range_high': '10.0.1.255' } form = NodeGroupInterfaceForm( data=data, instance=interface) self.assertFalse(form.is_valid()) def test_allows_any_size_ipv6_dynamic_range(self): network = factory.make_ipv6_network(slash=64) nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS, network=network) [interface] = nodegroup.get_managed_interfaces() form = NodeGroupInterfaceForm( data={ 'ip_range_low': IPAddress(network.first).format(), 'ip_range_high': IPAddress(network.last).format(), 'static_ip_range_low': '', 'static_ip_range_high': '', }, instance=interface) self.assertTrue(form.is_valid(), form._errors) def test_calls_get_duplicate_fqdns_when_appropriate(self): # Check for duplicate FQDNs if the NodeGroupInterface has a # NodeGroup and is managing DNS. int_settings = factory.get_interface_fields( management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) form = NodeGroupInterfaceForm( data=int_settings, instance=make_ngi_instance()) mock = self.patch(form, "get_duplicate_fqdns") self.assertTrue(form.is_valid(), form.errors) self.assertThat(mock, MockCalledOnceWith()) def test_reports_error_if_fqdns_duplicated(self): int_settings = factory.get_interface_fields( management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) form = NodeGroupInterfaceForm( data=int_settings, instance=make_ngi_instance()) mock = self.patch(form, "get_duplicate_fqdns") hostnames = [ factory.make_hostname("duplicate") for _ in range(0, 3)] mock.return_value = hostnames self.assertFalse(form.is_valid()) message = "Enabling DNS management creates duplicate FQDN(s): %s." % ( ", ".join(set(hostnames))) self.assertEqual( {'management': [message]}, form.errors) def test_reports_ip_outside_network(self): network = IPNetwork('192.168.0.3/24') ip_outside_network = '192.168.2.1' checked_fields = [ 'router_ip', 'ip_range_low', 'ip_range_high', 'static_ip_range_low', 'static_ip_range_high', ] for field in checked_fields: nodegroup = factory.make_NodeGroup( network=network, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) [interface] = nodegroup.get_managed_interfaces() form = NodeGroupInterfaceForm( data={field: ip_outside_network}, instance=interface) message = "%s not in the %s network" % ( ip_outside_network, '192.168.0.0/24', ) self.assertFalse(form.is_valid()) self.assertThat(form.errors[field], Contains(message)) def test_reports_invalid_ip(self): network = IPNetwork('192.168.0.3/24') invalid_ip = '192.168.0.9/24' checked_fields = [ 'router_ip', 'ip_range_low', 'ip_range_high', 'static_ip_range_low', 'static_ip_range_high', ] for field in checked_fields: nodegroup = factory.make_NodeGroup( network=network, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) [interface] = nodegroup.get_managed_interfaces() form = NodeGroupInterfaceForm( data={field: invalid_ip}, instance=interface) if field == 'router_ip': message = "%s (%s) is not a valid address" % ( field, invalid_ip, ) else: message = "Enter a valid IPv4 or IPv6 address." self.assertFalse(form.is_valid()) self.assertThat(form.errors[field], Contains(message)) def test_identifies_duplicate_fqdns_in_nodegroup(self): # Don't allow DNS management to be enabled when it would # cause more than one node on the nodegroup to have the # same FQDN. nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) base_hostname = factory.make_hostname("host") full_hostnames = [ "%s.%s" % (base_hostname, factory.make_hostname("domain")) for _ in range(0, 2)] for hostname in full_hostnames: factory.make_Node(hostname=hostname, nodegroup=nodegroup) [interface] = nodegroup.get_managed_interfaces() data = {"management": NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS} form = NodeGroupInterfaceForm(data=data, instance=interface) duplicates = form.get_duplicate_fqdns() expected_duplicates = set(["%s.%s" % (base_hostname, nodegroup.name)]) self.assertEqual(expected_duplicates, duplicates) def test_identifies_duplicate_fqdns_across_nodegroups(self): # Don't allow DNS management to be enabled when it would # cause a node in this nodegroup to have the same FQDN # as a node in another nodegroup. conflicting_domain = factory.make_hostname("conflicting-domain") nodegroup_a = factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, name=conflicting_domain) conflicting_hostname = factory.make_hostname("conflicting-hostname") factory.make_Node( hostname="%s.%s" % (conflicting_hostname, conflicting_domain), nodegroup=nodegroup_a) nodegroup_b = factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, name=conflicting_domain) factory.make_Node( hostname="%s.%s" % ( conflicting_hostname, factory.make_hostname("other-domain")), nodegroup=nodegroup_b) [interface] = nodegroup_b.get_managed_interfaces() data = {"management": NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS} form = NodeGroupInterfaceForm(data=data, instance=interface) duplicates = form.get_duplicate_fqdns() expected_duplicates = set( ["%s.%s" % (conflicting_hostname, conflicting_domain)]) self.assertEqual(expected_duplicates, duplicates) def test_creates_subnet_for_interface(self): int_settings = factory.get_interface_fields() int_settings['interface'] = 'eth0:1' interface = make_ngi_instance() form = NodeGroupInterfaceForm(data=int_settings, instance=interface) self.assertTrue(form.is_valid()) ngi = form.save() self.assertIsNotNone(ngi.subnet) def test_updates_subnet_cidr_and_name_if_subnet_mask_changed(self): network = factory._make_random_network(slash=24) nodegroup = factory.make_NodeGroup() subnet = factory.make_Subnet(name=unicode(network.cidr), cidr=network) ngi = factory.make_NodeGroupInterface(nodegroup, subnet=subnet) # Update the network from a /24 to a /16 form = NodeGroupInterfaceForm( data=dict(subnet_mask='255.255.0.0'), instance=ngi) # form.subnet_mask = '255.255.0.0' self.assertTrue(form.is_valid()) ngi = form.save() new_network = IPNetwork(unicode(network.ip) + "/16") self.assertThat(ngi.network, Equals(new_network.cidr)) self.assertThat(ngi.subnet.name, Equals(unicode(new_network.cidr))) def test_updating_cidr_does_not_create_new_subnet(self): network = factory._make_random_network(slash=24) nodegroup = factory.make_NodeGroup() subnet = factory.make_Subnet(cidr=network) ngi = factory.make_NodeGroupInterface(nodegroup, subnet=subnet) # Update the network from a /24 to a /16 form = NodeGroupInterfaceForm( data=dict(subnet_mask='255.255.0.0'), instance=ngi) # form.subnet_mask = '255.255.0.0' self.assertTrue(form.is_valid()) form.save() # new_network = IPNetwork(unicode(network.ip) + "/16") from maasserver.models import Subnet # print(list(Subnet.objects.all())) # self.assertThat(ngi.network, Equals(new_network.cidr)) self.assertThat(Subnet.objects.count(), Equals(1)) def test_multiple_subnets_on_single_interface_uses_existing_vlan(self): ng = factory.make_NodeGroup() ngi1 = NodeGroupInterface(nodegroup=ng) form = NodeGroupInterfaceForm(data=dict( interface='eth0', ip='192.168.0.1', subnet_mask='255.255.255.0'), instance=ngi1) self.assertThat(form.is_valid(), Equals(True)) ngi1 = form.save() self.assertIsNotNone(ngi1) ngi2 = NodeGroupInterface(nodegroup=ng) form = NodeGroupInterfaceForm(data=dict( interface='eth0', ip='192.168.1.1', subnet_mask='255.255.255.0'), instance=ngi2) self.assertThat(form.is_valid(), Equals(True)) ngi2 = form.save() self.assertIsNotNone(ngi2) self.assertThat(VLAN.objects.all().count(), Equals(1)) self.assertThat(ngi1.vlan, Equals(ngi2.vlan)) def test_subnet_vlan_creation_uses_default_fabric_if_empty(self): ng = factory.make_NodeGroup() ngi1 = NodeGroupInterface(nodegroup=ng) form = NodeGroupInterfaceForm(data=dict( interface='eth0', ip='192.168.0.1', subnet_mask='255.255.255.0'), instance=ngi1) self.assertThat(form.is_valid(), Equals(True)) ngi1 = form.save() self.assertIsNotNone(ngi1) self.assertThat(Fabric.objects.all().count(), Equals(1)) self.assertThat(ngi1.vlan.fabric.id, Equals(0)) def test_creates_new_fabric_if_alt_subnet_exists_in_default_fabric(self): ng = factory.make_NodeGroup() ngi1 = NodeGroupInterface(nodegroup=ng) form = NodeGroupInterfaceForm(data=dict( interface='eth0', ip='192.168.0.1', subnet_mask='255.255.255.0'), instance=ngi1) self.assertThat(form.is_valid(), Equals(True)) ngi1 = form.save() self.assertIsNotNone(ngi1) self.assertThat(Fabric.objects.all().count(), Equals(1)) self.assertThat(ngi1.vlan.fabric.id, Equals(0)) ngi2 = NodeGroupInterface(nodegroup=ng) form = NodeGroupInterfaceForm(data=dict( interface='eth1', ip='192.168.1.1', subnet_mask='255.255.255.0'), instance=ngi2) self.assertThat(form.is_valid(), Equals(True)) ngi2 = form.save() self.assertIsNotNone(ngi2) self.assertThat(Fabric.objects.all().count(), Equals(2)) # The first NodeGroupInterface we saved should be using the default # Fabric self.assertThat(ngi1.vlan.fabric.id, Equals(0)) self.assertIsNotNone(ngi2.vlan.fabric) def test_creates_vlan_interface_if_interface_type_and_parent_known(self): ng = factory.make_NodeGroup() ngi1 = NodeGroupInterface(nodegroup=ng) form = NodeGroupInterfaceForm(data=dict( interface='eth0', ip='192.168.0.1', subnet_mask='255.255.255.0'), instance=ngi1) self.assertThat(form.is_valid(), Equals(True)) ngi1 = form.save() self.assertIsNotNone(ngi1) self.assertThat(Fabric.objects.all().count(), Equals(1)) self.assertThat(ngi1.vlan.fabric.id, Equals(0)) ngi2 = NodeGroupInterface(nodegroup=ng) form = NodeGroupInterfaceForm(data=dict( interface='vlan12', ip='192.168.1.1', subnet_mask='255.255.255.0', parent='eth0', type='ethernet.vlan'), instance=ngi2) self.assertThat(form.is_valid(), Equals(True)) ngi2 = form.save() self.assertIsNotNone(ngi2) self.assertThat(Fabric.objects.all().count(), Equals(1)) self.assertThat(VLAN.objects.filter(vid=12).count(), Equals(1)) def test_creates_vlan_plus_new_fabric_if_no_parent_untagged_exists(self): ng = factory.make_NodeGroup() ngi1 = NodeGroupInterface(nodegroup=ng) form = NodeGroupInterfaceForm(data=dict( interface='eth0', ip='192.168.0.1', subnet_mask='255.255.255.0'), instance=ngi1) self.assertThat(form.is_valid(), Equals(True)) ngi1 = form.save() self.assertIsNotNone(ngi1) self.assertThat(Fabric.objects.all().count(), Equals(1)) self.assertThat(ngi1.vlan.fabric.id, Equals(0)) ngi2 = NodeGroupInterface(nodegroup=ng) form = NodeGroupInterfaceForm(data=dict( interface='eth0.12', ip='192.168.1.1', subnet_mask='255.255.255.0', type='ethernet.vlan'), instance=ngi2) self.assertThat(form.is_valid(), Equals(True)) ngi2 = form.save() self.assertIsNotNone(ngi2) self.assertThat(Fabric.objects.all().count(), Equals(2)) # Check that VLAN 12 was created on the non-default VLAN. self.assertThat(VLAN.objects.filter( vid=12, fabric__id__gt=0).count(), Equals(1)) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_nodewithmacaddresses.py0000644000000000000000000001776713056115004026015 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `NodeWithMACAddressesForm`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from django.http import QueryDict from maasserver.enum import INTERFACE_TYPE from maasserver.forms import NodeWithMACAddressesForm from maasserver.models import NodeGroup from maasserver.testing.architecture import ( make_usable_architecture, patch_usable_architectures, ) from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase from netaddr import IPNetwork from testtools.matchers import Contains class NodeWithMACAddressesFormTest(MAASServerTestCase): def get_QueryDict(self, params): query_dict = QueryDict('', mutable=True) for k, v in params.items(): if isinstance(v, list): query_dict.setlist(k, v) else: query_dict[k] = v return query_dict def make_params(self, mac_addresses=None, architecture=None, hostname=None, nodegroup=None): if mac_addresses is None: mac_addresses = [factory.make_mac_address()] if architecture is None: architecture = factory.make_name('arch') if hostname is None: hostname = factory.make_name('hostname') params = { 'mac_addresses': mac_addresses, 'architecture': architecture, 'hostname': hostname, } if nodegroup is not None: params['nodegroup'] = nodegroup # Make sure that the architecture parameter is acceptable. patch_usable_architectures(self, [architecture]) return self.get_QueryDict(params) def test__valid(self): architecture = make_usable_architecture(self) form = NodeWithMACAddressesForm( data=self.make_params( mac_addresses=['aa:bb:cc:dd:ee:ff', '9a:bb:c3:33:e5:7f'], architecture=architecture)) self.assertTrue(form.is_valid(), form.errors) self.assertEqual( ['aa:bb:cc:dd:ee:ff', '9a:bb:c3:33:e5:7f'], form.cleaned_data['mac_addresses']) self.assertEqual(architecture, form.cleaned_data['architecture']) def test__simple_invalid(self): # If the form only has one (invalid) MAC address field to validate, # the error message in form.errors['mac_addresses'] is the # message from the field's validation error. form = NodeWithMACAddressesForm( data=self.make_params(mac_addresses=['invalid'])) self.assertFalse(form.is_valid()) self.assertEqual(['mac_addresses'], list(form.errors)) self.assertEqual( ["'invalid' is not a valid MAC address."], form.errors['mac_addresses']) def test__multiple_invalid(self): # If the form has multiple MAC address fields to validate, # if one or more fields are invalid, a single error message is # present in form.errors['mac_addresses'] after validation. form = NodeWithMACAddressesForm( data=self.make_params(mac_addresses=['invalid_1', 'invalid_2'])) self.assertFalse(form.is_valid()) self.assertEqual(['mac_addresses'], list(form.errors)) self.assertEqual( [ "One or more MAC addresses is invalid. " "('invalid_1' is not a valid MAC address. \u2014" " 'invalid_2' is not a valid MAC address.)" ], form.errors['mac_addresses']) def test__mac_in_use_on_current_node_passes(self): node = factory.make_Node_with_Interface_on_Subnet( address='aa:bb:cc:dd:ee:ff') architecture = make_usable_architecture(self) form = NodeWithMACAddressesForm( data=self.make_params( mac_addresses=['aa:bb:cc:dd:ee:ff', '9a:bb:c3:33:e5:7f'], architecture=architecture), instance=node) self.assertTrue(form.is_valid(), dict(form.errors)) self.assertEqual( ['aa:bb:cc:dd:ee:ff', '9a:bb:c3:33:e5:7f'], form.cleaned_data['mac_addresses']) self.assertEqual(architecture, form.cleaned_data['architecture']) def test__with_mac_in_use_on_another_node_fails(self): factory.make_Node_with_Interface_on_Subnet(address='aa:bb:cc:dd:ee:ff') architecture = make_usable_architecture(self) node = factory.make_Node_with_Interface_on_Subnet() form = NodeWithMACAddressesForm( data=self.make_params( mac_addresses=['aa:bb:cc:dd:ee:ff', '9a:bb:c3:33:e5:7f'], architecture=architecture), instance=node) self.assertFalse(form.is_valid(), dict(form.errors)) self.assertThat(dict(form.errors), Contains('mac_addresses')) def test__with_mac_in_use_on_uknown_interface_passes(self): factory.make_Interface( INTERFACE_TYPE.UNKNOWN, mac_address='aa:bb:cc:dd:ee:ff') architecture = make_usable_architecture(self) form = NodeWithMACAddressesForm( data=self.make_params( mac_addresses=['aa:bb:cc:dd:ee:ff', '9a:bb:c3:33:e5:7f'], architecture=architecture)) self.assertTrue(form.is_valid(), dict(form.errors)) self.assertEqual( ['aa:bb:cc:dd:ee:ff', '9a:bb:c3:33:e5:7f'], form.cleaned_data['mac_addresses']) self.assertEqual(architecture, form.cleaned_data['architecture']) def test__empty(self): # Empty values in the list of MAC addresses are simply ignored. form = NodeWithMACAddressesForm( data=self.make_params( mac_addresses=[factory.make_mac_address(), ''])) self.assertTrue(form.is_valid()) def test__save(self): macs = ['aa:bb:cc:dd:ee:ff', '9a:bb:c3:33:e5:7f'] form = NodeWithMACAddressesForm( data=self.make_params(mac_addresses=macs)) node = form.save() self.assertIsNotNone(node.id) # The node is persisted. self.assertItemsEqual( macs, [nic.mac_address for nic in node.interface_set.all()]) def test_includes_nodegroup_field_for_new_node(self): self.assertIn( 'nodegroup', NodeWithMACAddressesForm(data=self.make_params()).fields) def test_does_not_include_nodegroup_field_for_existing_node(self): params = self.make_params() node = factory.make_Node() self.assertNotIn( 'nodegroup', NodeWithMACAddressesForm(data=params, instance=node).fields) def test_sets_nodegroup_to_master_by_default(self): self.assertEqual( NodeGroup.objects.ensure_master(), NodeWithMACAddressesForm(data=self.make_params()).save().nodegroup) def test_leaves_nodegroup_alone_if_unset_on_existing_node(self): # Selecting a node group for a node is only supported on new # nodes. You can't change it later. original_nodegroup = factory.make_NodeGroup() node = factory.make_Node(nodegroup=original_nodegroup) factory.make_NodeGroup(network=IPNetwork("192.168.1.0/24")) form = NodeWithMACAddressesForm( data=self.make_params(nodegroup='192.168.1.0'), instance=node) form.save() self.assertEqual(original_nodegroup, reload_object(node).nodegroup) def test_form_without_hostname_generates_hostname(self): form = NodeWithMACAddressesForm(data=self.make_params(hostname='')) node = form.save() self.assertTrue(len(node.hostname) > 0) def test_form_with_ip_based_hostname_generates_hostname(self): ip_based_hostname = '192-168-12-10.domain' form = NodeWithMACAddressesForm( data=self.make_params(hostname=ip_based_hostname)) node = form.save() self.assertNotEqual(ip_based_hostname, node.hostname) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_partition.py0000644000000000000000000001602513056115004023610 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for all forms that are used with `Partition`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import uuid from maasserver.enum import ( FILESYSTEM_FORMAT_TYPE_CHOICES, FILESYSTEM_TYPE, ) from maasserver.forms import ( AddPartitionForm, FormatPartitionForm, ) from maasserver.models import Filesystem from maasserver.models.blockdevice import MIN_BLOCK_DEVICE_SIZE from maasserver.models.partition import PARTITION_ALIGNMENT_SIZE from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase from maasserver.utils.converters import round_size_to_nearest_block from maasserver.utils.orm import get_one class TestAddPartitionForm(MAASServerTestCase): def test_requires_fields(self): form = AddPartitionForm( block_device=factory.make_BlockDevice(), data={}) self.assertFalse(form.is_valid(), form.errors) self.assertItemsEqual(['size'], form.errors.keys()) def test_is_not_valid_if_size_less_than_min_size(self): block_device = factory.make_PhysicalBlockDevice() data = { 'size': MIN_BLOCK_DEVICE_SIZE - 1, } form = AddPartitionForm(block_device, data=data) self.assertFalse( form.is_valid(), "Should be invalid because size below zero.") self.assertEquals({ 'size': [ "Ensure this value is greater than or equal to %s." % ( MIN_BLOCK_DEVICE_SIZE), ]}, form._errors) def test_is_not_valid_if_size_greater_than_block_size(self): block_device = factory.make_PhysicalBlockDevice() data = { 'size': block_device.size + 1, } form = AddPartitionForm(block_device, data=data) self.assertFalse( form.is_valid(), "Should be invalid because size is to large.") self.assertEquals({ 'size': [ "Ensure this value is less than or equal to %s." % ( block_device.size), ]}, form._errors) def test_is_valid_if_size_a_string(self): block_device = factory.make_PhysicalBlockDevice() k_size = (MIN_BLOCK_DEVICE_SIZE / 1000) + 1 size = "%sk" % k_size data = { 'size': size, } form = AddPartitionForm(block_device, data=data) self.assertTrue( form.is_valid(), "Should be valid because size is large enough and a string.") def test_size_rounded_down_and_placed_on_alignment_boundry(self): block_size = 4096 block_device = factory.make_PhysicalBlockDevice(block_size=block_size) k_size = (MIN_BLOCK_DEVICE_SIZE / 1000) + 1 size = "%sk" % k_size rounded_size = round_size_to_nearest_block( k_size * 1000, PARTITION_ALIGNMENT_SIZE, False) data = { 'size': size, } form = AddPartitionForm(block_device, data=data) self.assertTrue(form.is_valid()) partition = form.save() self.assertEquals(rounded_size, partition.size) def test_uuid_is_set_on_partition(self): block_device = factory.make_PhysicalBlockDevice() part_uuid = "%s" % uuid.uuid4() data = { 'size': MIN_BLOCK_DEVICE_SIZE, 'uuid': part_uuid, } form = AddPartitionForm(block_device, data=data) self.assertTrue(form.is_valid()) partition = form.save() self.assertEquals(part_uuid, partition.uuid) def test_bootable_is_set_on_partition(self): block_device = factory.make_PhysicalBlockDevice() data = { 'size': MIN_BLOCK_DEVICE_SIZE, 'bootable': True, } form = AddPartitionForm(block_device, data=data) self.assertTrue(form.is_valid()) partition = form.save() self.assertTrue(partition.bootable, "Partition should be bootable.") class TestFormatPartitionForm(MAASServerTestCase): def test_requires_fields(self): form = FormatPartitionForm( partition=factory.make_Partition(), data={}) self.assertFalse(form.is_valid(), form.errors) self.assertItemsEqual(['fstype'], form.errors.keys()) def test_is_not_valid_if_invalid_uuid(self): fstype = factory.pick_choice(FILESYSTEM_FORMAT_TYPE_CHOICES) partition = factory.make_Partition() data = { 'fstype': fstype, 'uuid': factory.make_string(size=32), } form = FormatPartitionForm(partition, data=data) self.assertFalse( form.is_valid(), "Should be invalid because of an invalid uuid.") self.assertEquals({'uuid': ["Enter a valid value."]}, form._errors) def test_is_not_valid_if_invalid_format_fstype(self): partition = factory.make_Partition() data = { 'fstype': FILESYSTEM_TYPE.LVM_PV, } form = FormatPartitionForm(partition, data=data) self.assertFalse( form.is_valid(), "Should be invalid because of an invalid fstype.") self.assertEquals({ 'fstype': [ "Select a valid choice. lvm-pv is not one of the " "available choices." ], }, form._errors) def test_creates_filesystem(self): fsuuid = "%s" % uuid.uuid4() fstype = factory.pick_choice(FILESYSTEM_FORMAT_TYPE_CHOICES) partition = factory.make_Partition() data = { 'uuid': fsuuid, 'fstype': fstype, } form = FormatPartitionForm(partition, data=data) self.assertTrue(form.is_valid(), form._errors) form.save() filesystem = get_one( Filesystem.objects.filter(partition=partition)) self.assertIsNotNone(filesystem) self.assertEquals(fstype, filesystem.fstype) self.assertEquals(fsuuid, filesystem.uuid) def test_deletes_old_filesystem_and_creates_new_one(self): fstype = factory.pick_choice(FILESYSTEM_FORMAT_TYPE_CHOICES) partition = factory.make_Partition() prev_filesystem = factory.make_Filesystem(partition=partition) data = { 'fstype': fstype, } form = FormatPartitionForm(partition, data=data) self.assertTrue(form.is_valid(), form._errors) form.save() self.assertEquals( 1, Filesystem.objects.filter(partition=partition).count(), "Should only be one filesystem that exists for partition.") self.assertIsNone(reload_object(prev_filesystem)) filesystem = get_one( Filesystem.objects.filter(partition=partition)) self.assertIsNotNone(filesystem) self.assertEquals(fstype, filesystem.fstype) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_raid.py0000644000000000000000000004460513056115004022523 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for all forms that are used with `RAID`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random from uuid import uuid4 from maasserver.enum import ( FILESYSTEM_GROUP_TYPE, FILESYSTEM_TYPE, ) from maasserver.forms import ( CreateRaidForm, UpdateRaidForm, ) from maasserver.models.filesystemgroup import RAID from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase def _make_interesting_RAID( node, level=FILESYSTEM_GROUP_TYPE.RAID_6, num_devices=10, num_partitions=10, num_spare_devices=2, num_spare_partitions=2): """Returns a RAID that is interesting for our tests.""" size = 1000 ** 4 # A Terabyte. block_devices = [factory.make_BlockDevice(node=node, size=size) for _ in range(num_devices)] partitions = [factory.make_Partition(node=node, block_device_size=size) for _ in range(num_partitions)] spare_devices = [factory.make_BlockDevice(node=node, size=size) for _ in range(num_spare_devices)] spare_partitions = [ factory.make_Partition(node=node, block_device_size=size) for _ in range(num_spare_partitions) ] return RAID.objects.create_raid( name='md%d' % random.randint(1, 1000), level=level, uuid=uuid4(), block_devices=block_devices, partitions=partitions, spare_devices=spare_devices, spare_partitions=spare_partitions ) class TestCreateRaidForm(MAASServerTestCase): def test_requires_fields(self): node = factory.make_Node() form = CreateRaidForm(node=node, data={}) self.assertFalse(form.is_valid(), form.errors) self.assertDictContainsSubset( { 'level': ['This field is required.'], }, form.errors) def test_choices_are_being_populated_correctly(self): node = factory.make_Node(with_boot_disk=False) bds = [ factory.make_PhysicalBlockDevice(node=node, size=10 * 1000 ** 4) for _ in range(10) ] for bd in bds[5:]: factory.make_PartitionTable(block_device=bd) block_devices_choices = [ bd.id for bd in bds if bd.get_partitiontable() is None ] + [ bd.name for bd in bds if bd.get_partitiontable() is None ] partitions = [ bd.get_partitiontable().add_partition() for bd in bds[5:] ] partitions_choices = [ part.id for part in partitions ] + [ part.name for part in partitions ] form = CreateRaidForm(node=node, data={}) self.assertItemsEqual( block_devices_choices, [k for (k, v) in form.fields['block_devices'].choices]) self.assertItemsEqual( partitions_choices, [k for (k, v) in form.fields['partitions'].choices]) self.assertItemsEqual( block_devices_choices, [k for (k, v) in form.fields['spare_devices'].choices]) self.assertItemsEqual( partitions_choices, [k for (k, v) in form.fields['spare_partitions'].choices]) def test_raid_creation_on_save(self): node = factory.make_Node() device_size = 10 * 1000 ** 4 bds = [ factory.make_PhysicalBlockDevice(node=node, size=device_size) for _ in range(10) ] for bd in bds[5:]: factory.make_PartitionTable(block_device=bd) block_devices = [ bd.id for bd in bds if bd.get_partitiontable() is None ] partition_objs = [ bd.get_partitiontable().add_partition() for bd in bds[5:] ] partitions = [partition.id for partition in partition_objs] form = CreateRaidForm(node=node, data={ 'name': 'md1', 'level': FILESYSTEM_GROUP_TYPE.RAID_6, 'block_devices': block_devices, 'partitions': partitions, }) self.assertTrue(form.is_valid(), form.errors) raid = form.save() self.assertEqual('md1', raid.name) self.assertEqual(8 * partition_objs[0].size, raid.get_size()) self.assertEqual(FILESYSTEM_GROUP_TYPE.RAID_6, raid.group_type) self.assertItemsEqual( block_devices, [fs.block_device.id for fs in raid.filesystems.exclude(block_device=None)]) self.assertItemsEqual( partitions, [fs.partition.id for fs in raid.filesystems.exclude(partition=None)]) def test_raid_creation_with_names(self): node = factory.make_Node() device_size = 10 * 1000 ** 4 bds = [ factory.make_PhysicalBlockDevice(node=node, size=device_size) for _ in range(10) ] for bd in bds[5:]: factory.make_PartitionTable(block_device=bd) block_devices_ids = [ bd.id for bd in bds if bd.get_partitiontable() is None ] block_device_names = [ bd.name for bd in bds if bd.get_partitiontable() is None ] partitions = [ bd.get_partitiontable().add_partition() for bd in bds[5:] ] partition_ids = [ part.id for part in partitions ] partition_names = [ part.name for part in partitions ] form = CreateRaidForm(node=node, data={ 'name': 'md1', 'level': FILESYSTEM_GROUP_TYPE.RAID_6, 'block_devices': block_device_names, 'partitions': partition_names, }) self.assertTrue(form.is_valid(), form.errors) raid = form.save() self.assertEqual('md1', raid.name) self.assertEqual(8 * partitions[0].size, raid.get_size()) self.assertEqual(FILESYSTEM_GROUP_TYPE.RAID_6, raid.group_type) self.assertItemsEqual( block_devices_ids, [ fs.block_device.id for fs in raid.filesystems.exclude(block_device=None) ]) self.assertItemsEqual( partition_ids, [ fs.partition.id for fs in raid.filesystems.exclude(partition=None) ]) def test_raid_creation_on_boot_disk(self): node = factory.make_Node(with_boot_disk=False) bds = [ factory.make_PhysicalBlockDevice(node=node) for _ in range(10) ] for bd in bds[5:]: factory.make_PartitionTable(block_device=bd) block_devices = [ bd.id for bd in bds if bd.get_partitiontable() is None ] partitions = [ bd.get_partitiontable().add_partition().id for bd in bds[5:] ] form = CreateRaidForm(node=node, data={ 'name': 'md1', 'level': FILESYSTEM_GROUP_TYPE.RAID_6, 'block_devices': block_devices, 'partitions': partitions, }) self.assertTrue(form.is_valid(), form.errors) raid = form.save() self.assertEqual('md1', raid.name) self.assertEqual(FILESYSTEM_GROUP_TYPE.RAID_6, raid.group_type) block_devices = [ bd.id for bd in bds if bd.get_partitiontable() is None and not bd.is_boot_disk() ] self.assertItemsEqual( block_devices, [fs.block_device.id for fs in raid.filesystems.exclude(block_device=None)]) partitions = [ bd.get_partitiontable().partitions.first().id for bd in [bds[0]] + bds[5:] ] self.assertItemsEqual( partitions, [fs.partition.id for fs in raid.filesystems.exclude(partition=None)]) def test_raid_creation_without_storage_fails(self): node = factory.make_Node() for level in [ FILESYSTEM_GROUP_TYPE.RAID_0, FILESYSTEM_GROUP_TYPE.RAID_1, FILESYSTEM_GROUP_TYPE.RAID_5, FILESYSTEM_GROUP_TYPE.RAID_6, FILESYSTEM_GROUP_TYPE.RAID_10, ]: form = CreateRaidForm(node=node, data={ 'name': 'md1', 'level': level, 'block_devices': [], 'partitions': [], }) self.assertFalse(form.is_valid()) self.assertDictContainsSubset( { u'__all__': ['At least one block device or partition must ' 'be added to the array.'] }, form.errors) class TestUpdateRaidForm(MAASServerTestCase): # Add devices and partitions def test_add_valid_blockdevice(self): raid = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.RAID_6) # Add 5 new blockdevices to the node. bd_ids = [factory.make_BlockDevice(node=raid.get_node()).id for _ in range(5)] form = UpdateRaidForm(raid, data={'add_block_devices': bd_ids}) self.assertTrue(form.is_valid(), form.errors) def test_add_valid_blockdevice_by_name(self): raid = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.RAID_6) # Add 5 new blockdevices to the node. bd_names = [ factory.make_BlockDevice(node=raid.get_node()).name for _ in range(5) ] form = UpdateRaidForm(raid, data={'add_block_devices': bd_names}) self.assertTrue(form.is_valid(), form.errors) def test_add_valid_boot_disk(self): node = factory.make_Node(with_boot_disk=False) boot_disk = factory.make_PhysicalBlockDevice(node=node) raid = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.RAID_6, node=node) raid = RAID.objects.get(id=raid.id) form = UpdateRaidForm(raid, data={'add_block_devices': [boot_disk.id]}) self.assertTrue(form.is_valid(), form.errors) raid = form.save() boot_partition = boot_disk.get_partitiontable().partitions.first() self.assertEquals( boot_partition.get_effective_filesystem().filesystem_group.id, raid.id) def test_add_valid_partition(self): raid = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.RAID_6) # Add 5 new partitions to the node. part_ids = [factory.make_Partition(node=raid.get_node()).id for _ in range(5)] form = UpdateRaidForm(raid, data={'add_partitions': part_ids}) self.assertTrue(form.is_valid(), form.errors) def test_add_valid_spare_device(self): raid = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.RAID_6) # Add 5 new blockdevices to the node. bd_ids = [factory.make_BlockDevice(node=raid.get_node()).id for _ in range(5)] form = UpdateRaidForm(raid, data={'add_spare_devices': bd_ids}) self.assertTrue(form.is_valid(), form.errors) def test_add_valid_spare_boot_disk(self): node = factory.make_Node(with_boot_disk=False) boot_disk = factory.make_PhysicalBlockDevice(node=node) raid = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.RAID_6, node=node) raid = RAID.objects.get(id=raid.id) form = UpdateRaidForm(raid, data={'add_spare_devices': [boot_disk.id]}) self.assertTrue(form.is_valid(), form.errors) raid = form.save() boot_partition = boot_disk.get_partitiontable().partitions.first() self.assertEquals( boot_partition.get_effective_filesystem().filesystem_group.id, raid.id) def test_add_valid_spare_partition(self): raid = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.RAID_6) # Add 5 new partitions to the node. part_ids = [factory.make_Partition(node=raid.get_node()).id for _ in range(5)] form = UpdateRaidForm(raid, data={'add_spare_partitions': part_ids}) self.assertTrue(form.is_valid(), form.errors) def test_add_invalid_blockdevice_fails(self): raid = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.RAID_6) # Add 5 new blockdevices to other nodes. bd_ids = [factory.make_BlockDevice().id for _ in range(5)] form = UpdateRaidForm(raid, data={'add_block_devices': bd_ids}) self.assertFalse(form.is_valid(), form.errors) self.assertIn('add_block_devices', form.errors) self.assertIn( 'is not one of the available choices.', form.errors['add_block_devices'][0]) def test_add_invalid_spare_blockdevice_fails(self): raid = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.RAID_6) # Add 5 new blockdevices to other nodes. bd_ids = [factory.make_BlockDevice().id for _ in range(5)] form = UpdateRaidForm(raid, data={'add_spare_devices': bd_ids}) self.assertFalse(form.is_valid(), form.errors) self.assertIn('add_spare_devices', form.errors) self.assertIn( 'is not one of the available choices.', form.errors['add_spare_devices'][0]) def test_add_invalid_partition_fails(self): raid = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.RAID_6) # Add 5 new partitions to other nodes. part_ids = [factory.make_Partition().id for _ in range(5)] form = UpdateRaidForm(raid, data={'add_partitions': part_ids}) self.assertFalse(form.is_valid(), form.errors) self.assertIn('add_partitions', form.errors) self.assertIn( 'is not one of the available choices.', form.errors['add_partitions'][0]) def test_add_invalid_spare_partition_fails(self): raid = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.RAID_6) # Add 5 new partitions to other nodes. part_ids = [factory.make_Partition().id for _ in range(5)] form = UpdateRaidForm(raid, data={'add_spare_partitions': part_ids}) self.assertFalse(form.is_valid(), form.errors) self.assertIn('add_spare_partitions', form.errors) self.assertIn( 'is not one of the available choices.', form.errors['add_spare_partitions'][0]) # Removal tests def test_remove_valid_blockdevice(self): raid = _make_interesting_RAID(node=factory.make_Node()) ids = [ fs.block_device.id for fs in raid.filesystems.filter( fstype=FILESYSTEM_TYPE.RAID).exclude(block_device=None)[:2] ] # Select 2 items for removal form = UpdateRaidForm(raid, data={'remove_block_devices': ids}) self.assertTrue(form.is_valid(), form.errors) def test_remove_valid_partition(self): raid = _make_interesting_RAID(node=factory.make_Node()) ids = [ fs.partition.id for fs in raid.filesystems.filter( fstype=FILESYSTEM_TYPE.RAID).exclude(partition=None)[:2] ] # Select 2 items for removal form = UpdateRaidForm(raid, data={'remove_partitions': ids}) self.assertTrue(form.is_valid(), form.errors) def test_remove_valid_spare_device(self): raid = _make_interesting_RAID(node=factory.make_Node()) ids = [ fs.block_device.id for fs in raid.filesystems.filter( fstype=FILESYSTEM_TYPE.RAID_SPARE) .exclude(block_device=None)[:2] ] # Select 2 items for removal form = UpdateRaidForm(raid, data={'remove_block_devices': ids}) self.assertTrue(form.is_valid(), form.errors) def test_remove_valid_spare_partition(self): raid = _make_interesting_RAID(node=factory.make_Node()) ids = [ fs.partition.id for fs in raid.filesystems.filter( fstype=FILESYSTEM_TYPE.RAID_SPARE).exclude(partition=None)[:2] ] # Select 2 items for removal form = UpdateRaidForm(raid, data={'remove_partitions': ids}) self.assertTrue(form.is_valid(), form.errors) def test_remove_invalid_blockdevice_fails(self): raid = _make_interesting_RAID(node=factory.make_Node()) ids = [factory.make_BlockDevice().id for _ in range(2)] form = UpdateRaidForm(raid, data={'remove_block_devices': ids}) self.assertFalse(form.is_valid(), form.errors) self.assertIn('remove_block_devices', form.errors) self.assertIn( 'is not one of the available choices.', form.errors['remove_block_devices'][0]) def test_remove_invalid_spare_blockdevice_fails(self): raid = _make_interesting_RAID(node=factory.make_Node()) ids = [factory.make_BlockDevice().id for _ in range(2)] form = UpdateRaidForm(raid, data={'remove_spare_devices': ids}) self.assertFalse(form.is_valid(), form.errors) self.assertIn('remove_spare_devices', form.errors) self.assertIn( 'is not one of the available choices.', form.errors['remove_spare_devices'][0]) def test_remove_invalid_partition_fails(self): raid = _make_interesting_RAID(node=factory.make_Node()) ids = [factory.make_Partition().id for _ in range(2)] form = UpdateRaidForm(raid, data={'remove_partitions': ids}) self.assertFalse(form.is_valid(), form.errors) self.assertIn('remove_partitions', form.errors) self.assertIn( 'is not one of the available choices.', form.errors['remove_partitions'][0]) def test_remove_invalid_spare_partition_fails(self): raid = _make_interesting_RAID(node=factory.make_Node()) ids = [factory.make_Partition().id for _ in range(2)] form = UpdateRaidForm(raid, data={'remove_spare_partitions': ids}) self.assertFalse(form.is_valid(), form.errors) self.assertIn('remove_spare_partitions', form.errors) self.assertIn( 'is not one of the available choices.', form.errors['remove_spare_partitions'][0]) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_settings.py0000644000000000000000000001002713056115004023433 0ustar 00000000000000# Copyright 2013-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test forms settings.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from django import forms from maasserver.forms import BootSourceSettingsForm from maasserver.forms_settings import ( CONFIG_ITEMS, get_config_doc, get_config_field, get_config_form, ) from maasserver.models import Config from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.utils.forms import compose_invalid_choice_text class TestGetConfigField(MAASServerTestCase): def test_get_config_field_validates_config_name(self): config_name = factory.make_string() self.assertRaises( forms.ValidationError, get_config_field, config_name) def test_get_config_field_creates_field(self): field = get_config_field('maas_name') label = CONFIG_ITEMS['maas_name']['form_kwargs']['label'] self.assertEqual(label, field.label) class TestGetConfigForm(MAASServerTestCase): def test_get_config_form_returns_initialized_form(self): maas_name = factory.make_string() Config.objects.set_config('maas_name', maas_name) form = get_config_form('maas_name') # The form contains only one field. self.assertItemsEqual(['maas_name'], form.fields) # The form is populated with the value of the 'maas_name' # config item. self.assertEqual( {'maas_name': maas_name}, form.initial) class TestGetConfigDoc(MAASServerTestCase): def test_get_config_doc(self): doc = get_config_doc() # Just make sure that the doc looks okay. self.assertIn('maas_name', doc) class TestSpecificConfigSettings(MAASServerTestCase): def test_commissioning_distro_series_config(self): field = get_config_field('commissioning_distro_series') self.assertEqual( compose_invalid_choice_text( 'commissioning_distro_series', field.choices), field.error_messages['invalid_choice']) def test_upstream_dns_accepts_ip_list(self): field = get_config_field('upstream_dns') ips1 = [factory.make_ip_address() for _ in range(3)] ips2 = [factory.make_ip_address() for _ in range(3)] input = ' '.join(ips1) + ' ' + ','.join(ips2) self.assertEqual(' '.join(ips1 + ips2), field.clean(input)) class TestBootSourceSettingsForm(MAASServerTestCase): def setUp(self): super(TestBootSourceSettingsForm, self).setUp() self.form_data = { 'boot_source_url': 'http://example.com/good', 'boot_source_keyring': '/a/path'} def test_happy_with_good_data(self): form = BootSourceSettingsForm(data=self.form_data) self.assertTrue(form.is_valid()) self.assertEqual( "http://example.com/good", form.cleaned_data['boot_source_url']) self.assertEqual("/a/path", form.cleaned_data['boot_source_keyring']) def test_unhappy_by_default(self): form = BootSourceSettingsForm() self.assertFalse(form.is_valid()) def test_reject_leading_spaces_in_boot_source_url(self): # https://bugs.launchpad.net/maas/+bug/1499062 self.form_data['boot_source_url'] = ' http://example.com/leadingspace' form = BootSourceSettingsForm(data=self.form_data) self.assertFalse(form.is_valid()) def test_reject_non_url_in_boot_source_url(self): self.form_data['boot_source_url'] = 'not_a_URL' form = BootSourceSettingsForm(data=self.form_data) self.assertFalse(form.is_valid()) def test_strips_boot_source_keyring(self): self.form_data['boot_source_keyring'] = ' /a/path ' form = BootSourceSettingsForm(data=self.form_data) self.assertTrue(form.is_valid()) self.assertEqual("/a/path", form.cleaned_data['boot_source_keyring']) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_space.py0000644000000000000000000000273713056115004022677 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for Space forms.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.forms_space import SpaceForm from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase class TestSpaceForm(MAASServerTestCase): def test__requires_name(self): form = SpaceForm({}) self.assertTrue(form.is_valid(), form.errors) def test__creates_space(self): space_name = factory.make_name("space") form = SpaceForm({ "name": space_name, }) self.assertTrue(form.is_valid(), form.errors) space = form.save() self.assertEquals(space_name, space.get_name()) def test__doest_require_name_on_update(self): space = factory.make_Space() form = SpaceForm(instance=space, data={}) self.assertTrue(form.is_valid(), form.errors) def test__updates_space(self): new_name = factory.make_name("space") space = factory.make_Space() form = SpaceForm(instance=space, data={ "name": new_name, }) self.assertTrue(form.is_valid(), form.errors) form.save() self.assertEquals(new_name, reload_object(space).name) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_subnet.py0000644000000000000000000002713113056115004023077 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for Subnet forms.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random from maasserver.forms_subnet import SubnetForm from maasserver.models.fabric import Fabric from maasserver.models.space import Space from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase from testtools.matchers import MatchesStructure class TestSubnetForm(MAASServerTestCase): def test__requires_cidr(self): form = SubnetForm({}) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "cidr": ["This field is required."], }, form.errors) def test__creates_subnet(self): subnet_name = factory.make_name("subnet") vlan = factory.make_VLAN() space = factory.make_Space() network = factory.make_ip4_or_6_network() cidr = unicode(network.cidr) gateway_ip = factory.pick_ip_in_network(network) dns_servers = [] for _ in range(2): dns_servers.append( factory.pick_ip_in_network( network, but_not=[gateway_ip] + dns_servers)) form = SubnetForm({ "name": subnet_name, "vlan": vlan.id, "space": space.id, "cidr": cidr, "gateway_ip": gateway_ip, "dns_servers": ','.join(dns_servers), }) self.assertTrue(form.is_valid(), form.errors) subnet = form.save() self.assertThat( subnet, MatchesStructure.byEquality( name=subnet_name, vlan=vlan, space=space, cidr=cidr, gateway_ip=gateway_ip, dns_servers=dns_servers)) def test__creates_subnet_name_equal_to_cidr(self): vlan = factory.make_VLAN() space = factory.make_Space() network = factory.make_ip4_or_6_network() cidr = unicode(network.cidr) form = SubnetForm({ "vlan": vlan.id, "space": space.id, "cidr": cidr, }) self.assertTrue(form.is_valid(), form.errors) subnet = form.save() self.assertThat( subnet, MatchesStructure.byEquality( name=cidr, vlan=vlan, space=space, cidr=cidr)) def test__creates_subnet_in_default_space(self): vlan = factory.make_VLAN() network = factory.make_ip4_or_6_network() cidr = unicode(network.cidr) form = SubnetForm({ "vlan": vlan.id, "cidr": cidr, }) self.assertTrue(form.is_valid(), form.errors) subnet = form.save() self.assertThat( subnet, MatchesStructure.byEquality( name=cidr, vlan=vlan, cidr=cidr, space=Space.objects.get_default_space())) def test__creates_subnet_in_default_fabric_and_vlan(self): network = factory.make_ip4_or_6_network() cidr = unicode(network.cidr) form = SubnetForm({ "cidr": cidr, }) self.assertTrue(form.is_valid(), form.errors) subnet = form.save() self.assertThat( subnet, MatchesStructure.byEquality( name=cidr, cidr=cidr, vlan=Fabric.objects.get_default_fabric().get_default_vlan(), space=Space.objects.get_default_space())) def test__creates_subnet_in_default_vlan_in_fabric(self): fabric = factory.make_Fabric() network = factory.make_ip4_or_6_network() cidr = unicode(network.cidr) form = SubnetForm({ "cidr": cidr, "fabric": fabric.id, }) self.assertTrue(form.is_valid(), form.errors) subnet = form.save() self.assertThat( subnet, MatchesStructure.byEquality( name=cidr, cidr=cidr, vlan=fabric.get_default_vlan(), space=Space.objects.get_default_space())) def test__creates_subnet_in_default_fabric_with_vid(self): vlan = factory.make_VLAN(fabric=Fabric.objects.get_default_fabric()) network = factory.make_ip4_or_6_network() cidr = unicode(network.cidr) form = SubnetForm({ "cidr": cidr, "vid": vlan.vid, }) self.assertTrue(form.is_valid(), form.errors) subnet = form.save() self.assertThat( subnet, MatchesStructure.byEquality( name=cidr, cidr=cidr, vlan=vlan, space=Space.objects.get_default_space())) def test__creates_subnet_in_fabric_with_vid(self): fabric = factory.make_Fabric() vlan = factory.make_VLAN(fabric=fabric) network = factory.make_ip4_or_6_network() cidr = unicode(network.cidr) form = SubnetForm({ "cidr": cidr, "fabric": fabric.id, "vid": vlan.vid, }) self.assertTrue(form.is_valid(), form.errors) subnet = form.save() self.assertThat( subnet, MatchesStructure.byEquality( name=cidr, cidr=cidr, vlan=vlan, space=Space.objects.get_default_space())) def test__error_for_unknown_vid_in_default_fabric(self): fabric = factory.make_Fabric() vlan = factory.make_VLAN(fabric=fabric) network = factory.make_ip4_or_6_network() cidr = unicode(network.cidr) form = SubnetForm({ "cidr": cidr, "vid": vlan.vid, }) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "vid": ["No VLAN with vid %s in default fabric." % vlan.vid] }, form.errors) def test__error_for_unknown_vid_in_fabric(self): fabric = factory.make_Fabric() vlan = factory.make_VLAN(fabric=Fabric.objects.get_default_fabric()) network = factory.make_ip4_or_6_network() cidr = unicode(network.cidr) form = SubnetForm({ "cidr": cidr, "fabric": fabric.id, "vid": vlan.vid, }) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "vid": ["No VLAN with vid %s in fabric %s." % (vlan.vid, fabric)] }, form.errors) def test__error_for_vlan_not_in_fabric(self): fabric = factory.make_Fabric() vlan = factory.make_VLAN(fabric=Fabric.objects.get_default_fabric()) network = factory.make_ip4_or_6_network() cidr = unicode(network.cidr) form = SubnetForm({ "cidr": cidr, "fabric": fabric.id, "vlan": vlan.id, }) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "vlan": ["VLAN %s is not in fabric %s." % (vlan, fabric)] }, form.errors) def test__doest_require_vlan_space_or_cidr_on_update(self): subnet = factory.make_Subnet() form = SubnetForm(instance=subnet, data={}) self.assertTrue(form.is_valid(), form.errors) def test__updates_subnet(self): new_name = factory.make_name("subnet") subnet = factory.make_Subnet() new_vlan = factory.make_VLAN() new_space = factory.make_Space() new_network = factory.make_ip4_or_6_network() new_cidr = unicode(new_network.cidr) new_gateway_ip = factory.pick_ip_in_network(new_network) new_dns_servers = [] for _ in range(2): new_dns_servers.append( factory.pick_ip_in_network( new_network, but_not=[new_gateway_ip] + new_dns_servers)) form = SubnetForm(instance=subnet, data={ "name": new_name, "vlan": new_vlan.id, "space": new_space.id, "cidr": new_cidr, "gateway_ip": new_gateway_ip, "dns_servers": ','.join(new_dns_servers), }) self.assertTrue(form.is_valid(), form.errors) form.save() subnet = reload_object(subnet) self.assertThat( subnet, MatchesStructure.byEquality( name=new_name, vlan=new_vlan, space=new_space, cidr=new_cidr, gateway_ip=new_gateway_ip, dns_servers=new_dns_servers)) def test__updates_subnet_name_to_cidr(self): subnet = factory.make_Subnet() subnet.name = subnet.cidr subnet.save() new_network = factory.make_ip4_or_6_network() new_cidr = unicode(new_network.cidr) new_gateway_ip = factory.pick_ip_in_network(new_network) form = SubnetForm(instance=subnet, data={ "cidr": new_cidr, "gateway_ip": new_gateway_ip, }) self.assertTrue(form.is_valid(), form.errors) form.save() subnet = reload_object(subnet) self.assertThat( subnet, MatchesStructure.byEquality( name=new_cidr, cidr=new_cidr, gateway_ip=new_gateway_ip)) def test__updates_subnet_name_doesnt_remove_dns_server(self): # Regression test for lp:1521833 dns_servers = [factory.make_ip_address() for _ in range(random.randint(2, 10))] subnet = factory.make_Subnet(dns_servers=dns_servers) form = SubnetForm(instance=subnet, data={ "name": factory.make_name("subnet") }) self.assertTrue(form.is_valid(), form.errors) form.save() subnet = reload_object(subnet) self.assertEquals(dns_servers, subnet.dns_servers) def test__doesnt_overwrite_other_fields(self): new_name = factory.make_name("subnet") subnet = factory.make_Subnet() form = SubnetForm(instance=subnet, data={ "name": new_name, }) self.assertTrue(form.is_valid(), form.errors) form.save() subnet = reload_object(subnet) self.assertThat( subnet, MatchesStructure.byEquality( name=new_name, vlan=subnet.vlan, space=subnet.space, cidr=subnet.cidr, gateway_ip=subnet.gateway_ip, dns_servers=subnet.dns_servers)) def test__clears_gateway_and_dns_ervers(self): subnet = factory.make_Subnet() form = SubnetForm(instance=subnet, data={ "gateway_ip": "", "dns_servers": "", }) self.assertTrue(form.is_valid(), form.errors) form.save() subnet = reload_object(subnet) self.assertThat( subnet, MatchesStructure.byEquality( gateway_ip=None, dns_servers=[])) def test__clean_dns_servers_accepts_comma_separated_list(self): subnet = factory.make_Subnet() dns_servers = [factory.make_ip_address() for _ in range(random.randint(2, 10))] form = SubnetForm(instance=subnet, data={ "dns_servers": ','.join(dns_servers) }) self.assertTrue(form.is_valid(), form.errors) form.save() subnet = reload_object(subnet) self.assertEquals(dns_servers, subnet.dns_servers) def test__clean_dns_servers_accepts_space_separated_list(self): subnet = factory.make_Subnet() dns_servers = [factory.make_ip_address() for _ in range(random.randint(2, 10))] form = SubnetForm(instance=subnet, data={ "dns_servers": " ".join(dns_servers) }) self.assertTrue(form.is_valid(), form.errors) form.save() subnet = reload_object(subnet) self.assertEquals(dns_servers, subnet.dns_servers) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_user.py0000644000000000000000000001127413056115004022556 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for user-creation forms.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from django.contrib.auth.models import User from maasserver.forms import ( EditUserForm, NewUserCreationForm, ProfileForm, ) from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from testtools.matchers import MatchesRegex class TestUniqueEmailForms(MAASServerTestCase): def assertFormFailsValidationBecauseEmailNotUnique(self, form): self.assertFalse(form.is_valid()) self.assertIn('email', form._errors) self.assertEquals(1, len(form._errors['email'])) # Cope with 'Email' and 'E-mail' in error message. self.assertThat( form._errors['email'][0], MatchesRegex( r'User with this E-{0,1}mail address already exists.')) def test_ProfileForm_fails_validation_if_email_taken(self): another_email = '%s@example.com' % factory.make_string() factory.make_User(email=another_email) email = '%s@example.com' % factory.make_string() user = factory.make_User(email=email) form = ProfileForm(instance=user, data={'email': another_email}) self.assertFormFailsValidationBecauseEmailNotUnique(form) def test_ProfileForm_validates_if_email_unchanged(self): email = '%s@example.com' % factory.make_string() user = factory.make_User(email=email) form = ProfileForm(instance=user, data={'email': email}) self.assertTrue(form.is_valid()) def test_NewUserCreationForm_fails_validation_if_email_taken(self): email = '%s@example.com' % factory.make_string() username = factory.make_string() password = factory.make_string() factory.make_User(email=email) form = NewUserCreationForm( { 'email': email, 'username': username, 'password1': password, 'password2': password, }) self.assertFormFailsValidationBecauseEmailNotUnique(form) def test_EditUserForm_fails_validation_if_email_taken(self): another_email = '%s@example.com' % factory.make_string() factory.make_User(email=another_email) email = '%s@example.com' % factory.make_string() user = factory.make_User(email=email) form = EditUserForm(instance=user, data={'email': another_email}) self.assertFormFailsValidationBecauseEmailNotUnique(form) def test_EditUserForm_validates_if_email_unchanged(self): email = '%s@example.com' % factory.make_string() user = factory.make_User(email=email) form = EditUserForm( instance=user, data={ 'email': email, 'username': factory.make_string(), }) self.assertTrue(form.is_valid()) class TestNewUserCreationForm(MAASServerTestCase): def test_saves_to_db_by_default(self): password = factory.make_name('password') params = { 'email': '%s@example.com' % factory.make_string(), 'username': factory.make_name('user'), 'password1': password, 'password2': password, } form = NewUserCreationForm(params) form.save() self.assertIsNotNone(User.objects.get(username=params['username'])) def test_email_is_required(self): password = factory.make_name('password') params = { 'email': '', 'username': factory.make_name('user'), 'password1': password, 'password2': password, } form = NewUserCreationForm(params) self.assertFalse(form.is_valid()) self.assertEquals( {'email': ['This field is required.']}, form._errors) def test_does_not_save_to_db_if_commit_is_False(self): password = factory.make_name('password') params = { 'email': '%s@example.com' % factory.make_string(), 'username': factory.make_name('user'), 'password1': password, 'password2': password, } form = NewUserCreationForm(params) form.save(commit=False) self.assertItemsEqual( [], User.objects.filter(username=params['username'])) def test_fields_order(self): form = NewUserCreationForm() self.assertEqual( ['username', 'last_name', 'email', 'password1', 'password2', 'is_superuser'], list(form.fields)) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_validate_new_static_ip_range.py0000644000000000000000000001335413056115004027456 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `validate_new_static_ip_ranges`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from django.core.exceptions import ValidationError from maasserver.enum import ( NODEGROUP_STATUS, NODEGROUPINTERFACE_MANAGEMENT, ) from maasserver.forms import ( ERROR_MESSAGE_STATIC_IPS_OUTSIDE_RANGE, ERROR_MESSAGE_STATIC_RANGE_IN_USE, validate_new_static_ip_ranges, ) from maasserver.models.staticipaddress import StaticIPAddress from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from netaddr import IPNetwork class TestValidateNewStaticIPRanges(MAASServerTestCase): """Tests for `validate_new_static_ip_ranges`().""" def make_cluster_interface(self): network = IPNetwork("10.1.0.0/24") nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS, network=network) [interface] = nodegroup.get_managed_interfaces() interface.ip_range_low = '10.1.0.1' interface.ip_range_high = '10.1.0.10' interface.static_ip_range_low = '10.1.0.50' interface.static_ip_range_high = '10.1.0.60' interface.save() return interface def test_raises_error_when_allocated_ips_fall_outside_new_range(self): interface = self.make_cluster_interface() StaticIPAddress.objects.allocate_new( '10.1.0.0/16', '10.1.0.56', '10.1.0.60', '10.1.0.1', '10.1.0.10') error = self.assertRaises( ValidationError, validate_new_static_ip_ranges, instance=interface, management=interface.management, static_ip_range_low='10.1.0.50', static_ip_range_high='10.1.0.55') self.assertEqual( ERROR_MESSAGE_STATIC_IPS_OUTSIDE_RANGE, error.message) def test_removing_static_range_raises_error_if_ips_allocated(self): interface = self.make_cluster_interface() StaticIPAddress.objects.allocate_new( '10.1.0.0/16', '10.1.0.56', '10.1.0.60', '10.1.0.1', '10.1.0.10') error = self.assertRaises( ValidationError, validate_new_static_ip_ranges, instance=interface, management=interface.management, static_ip_range_low='', static_ip_range_high='') self.assertEqual( ERROR_MESSAGE_STATIC_RANGE_IN_USE, error.message) def test_allows_range_expansion(self): interface = self.make_cluster_interface() StaticIPAddress.objects.allocate_new( '10.1.0.0/16', '10.1.0.56', '10.1.0.60', '10.1.0.1', '10.1.0.10') is_valid = validate_new_static_ip_ranges( interface, interface.management, static_ip_range_low='10.1.0.40', static_ip_range_high='10.1.0.100') self.assertTrue(is_valid) def test_allows_allocated_ip_as_upper_bound(self): interface = self.make_cluster_interface() StaticIPAddress.objects.allocate_new( '10.1.0.0/16', '10.1.0.55', '10.1.0.55', '10.1.0.1', '10.1.0.10') is_valid = validate_new_static_ip_ranges( interface, interface.management, static_ip_range_low=interface.static_ip_range_low, static_ip_range_high='10.1.0.55') self.assertTrue(is_valid) def test_allows_allocated_ip_as_lower_bound(self): interface = self.make_cluster_interface() StaticIPAddress.objects.allocate_new( '10.1.0.0/16', '10.1.0.55', '10.1.0.55', '10.1.0.1', '10.1.0.10') is_valid = validate_new_static_ip_ranges( interface, interface.management, static_ip_range_low='10.1.0.55', static_ip_range_high=interface.static_ip_range_high) self.assertTrue(is_valid) def test_ignores_unmanaged_interfaces(self): interface = self.make_cluster_interface() interface.management = NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED interface.save() StaticIPAddress.objects.allocate_new( interface.network, interface.static_ip_range_low, interface.static_ip_range_high, interface.ip_range_low, interface.ip_range_high, subnet=interface.subnet) is_valid = validate_new_static_ip_ranges( interface, interface.management, static_ip_range_low='10.1.0.57', static_ip_range_high='10.1.0.58') self.assertTrue(is_valid) def test_ignores_interfaces_with_no_static_range(self): interface = self.make_cluster_interface() interface.static_ip_range_low = None interface.static_ip_range_high = None interface.save() StaticIPAddress.objects.allocate_new( '10.1.0.0/16', '10.1.0.56', '10.1.0.60', '10.1.0.1', '10.1.0.10') is_valid = validate_new_static_ip_ranges( interface, interface.management, static_ip_range_low='10.1.0.57', static_ip_range_high='10.1.0.58') self.assertTrue(is_valid) def test_ignores_unchanged_static_range(self): interface = self.make_cluster_interface() StaticIPAddress.objects.allocate_new( interface.network, interface.static_ip_range_low, interface.static_ip_range_high, interface.ip_range_low, interface.ip_range_high) is_valid = validate_new_static_ip_ranges( interface, interface.management, static_ip_range_low=interface.static_ip_range_low, static_ip_range_high=interface.static_ip_range_high) self.assertTrue(is_valid) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_validate_nonoverlapping_networks.py0000644000000000000000000000750513056115004030450 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `validate_nonoverlapping_networks`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from django.core.exceptions import ValidationError from maasserver.enum import NODEGROUPINTERFACE_MANAGEMENT from maasserver.forms import validate_nonoverlapping_networks from maastesting.factory import factory from testtools import TestCase from testtools.matchers import ( Contains, MatchesAll, MatchesRegex, StartsWith, ) class TestValidateNonoverlappingNetworks(TestCase): """Tests for `validate_nonoverlapping_networks`.""" def make_interface_definition(self, ip, netmask, name=None): """Return a minimal imitation of an interface definition.""" if name is None: name = factory.make_name('itf') return { 'interface': name, 'ip': ip, 'subnet_mask': netmask, 'management': NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS, } def test_accepts_zero_interfaces(self): validate_nonoverlapping_networks([]) # Success is getting here without error. pass def test_accepts_single_interface(self): validate_nonoverlapping_networks( [self.make_interface_definition('10.1.1.1', '255.255.0.0')]) # Success is getting here without error. pass def test_accepts_disparate_ranges(self): validate_nonoverlapping_networks([ self.make_interface_definition('10.1.0.0', '255.255.0.0'), self.make_interface_definition('192.168.0.0', '255.255.255.0'), ]) # Success is getting here without error. pass def test_accepts_near_neighbours(self): validate_nonoverlapping_networks([ self.make_interface_definition('10.1.0.0', '255.255.0.0'), self.make_interface_definition('10.2.0.0', '255.255.0.0'), ]) # Success is getting here without error. pass def test_rejects_identical_ranges(self): definitions = [ self.make_interface_definition('192.168.0.0', '255.255.255.0'), self.make_interface_definition('192.168.0.0', '255.255.255.0'), ] error = self.assertRaises( ValidationError, validate_nonoverlapping_networks, definitions) error_text = error.messages[0] self.assertThat( error_text, MatchesRegex( "Conflicting networks on [^\\s]+ and [^\\s]+: " "address ranges overlap.")) self.assertThat( error_text, MatchesAll( *( Contains(definition['interface']) for definition in definitions ))) def test_rejects_nested_ranges(self): definitions = [ self.make_interface_definition('192.168.0.0', '255.255.0.0'), self.make_interface_definition('192.168.100.0', '255.255.255.0'), ] error = self.assertRaises( ValidationError, validate_nonoverlapping_networks, definitions) self.assertIn("Conflicting networks", unicode(error)) def test_detects_conflict_regardless_of_order(self): definitions = [ self.make_interface_definition('192.168.100.0', '255.255.255.0'), self.make_interface_definition('192.168.1.0', '255.255.255.0'), self.make_interface_definition('192.168.64.0', '255.255.192.0'), ] error = self.assertRaises( ValidationError, validate_nonoverlapping_networks, definitions) self.assertThat(error.messages[0], StartsWith("Conflicting networks")) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_vlan.py0000644000000000000000000000633413056115004022541 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for VLAN forms.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random from maasserver.forms_vlan import VLANForm from maasserver.models.vlan import DEFAULT_MTU from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase class TestVLANForm(MAASServerTestCase): def test__requires_vid(self): fabric = factory.make_Fabric() form = VLANForm(fabric=fabric, data={}) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ "vid": [ "This field is required.", "Vid must be between 0 and 4095.", ], }, form.errors) def test__creates_vlan(self): fabric = factory.make_Fabric() vlan_name = factory.make_name("vlan") vid = random.randint(1, 1000) mtu = random.randint(552, 4096) form = VLANForm(fabric=fabric, data={ "name": vlan_name, "vid": vid, "mtu": mtu, }) self.assertTrue(form.is_valid(), form.errors) vlan = form.save() self.assertEquals(vlan_name, vlan.name) self.assertEquals(vid, vlan.vid) self.assertEquals(fabric, vlan.fabric) self.assertEquals(mtu, vlan.mtu) def test__creates_vlan_with_default_mtu(self): fabric = factory.make_Fabric() vlan_name = factory.make_name("vlan") vid = random.randint(1, 1000) form = VLANForm(fabric=fabric, data={ "name": vlan_name, "vid": vid, }) self.assertTrue(form.is_valid(), form.errors) vlan = form.save() self.assertEquals(vlan_name, vlan.name) self.assertEquals(vid, vlan.vid) self.assertEquals(fabric, vlan.fabric) self.assertEquals(DEFAULT_MTU, vlan.mtu) def test__doest_require_name_vid_or_mtu_on_update(self): vlan = factory.make_VLAN() form = VLANForm(instance=vlan, data={}) self.assertTrue(form.is_valid(), form.errors) def test__can_edit_default_vlan_mtu(self): fabric = factory.make_Fabric() vlan = fabric.get_default_vlan() new_mtu = random.randint(552, 9000) form = VLANForm(instance=vlan, data={'mtu': new_mtu}) self.assertTrue(form.is_valid(), form.errors) form.save() self.assertEquals(new_mtu, reload_object(vlan).mtu) def test__updates_vlan(self): vlan = factory.make_VLAN() new_name = factory.make_name("vlan") new_vid = random.randint(1, 1000) new_mtu = random.randint(552, 4096) form = VLANForm(instance=vlan, data={ "name": new_name, "vid": new_vid, "mtu": new_mtu, }) self.assertTrue(form.is_valid(), form.errors) form.save() self.assertEquals(new_name, reload_object(vlan).name) self.assertEquals(new_vid, reload_object(vlan).vid) self.assertEquals(new_mtu, reload_object(vlan).mtu) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_volume_group.py0000644000000000000000000005313713056115004024327 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for all forms that are used with `VolumeGroup`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random import uuid from maasserver.enum import FILESYSTEM_TYPE from maasserver.forms import ( CreateLogicalVolumeForm, CreateVolumeGroupForm, UpdateVolumeGroupForm, ) from maasserver.models.blockdevice import MIN_BLOCK_DEVICE_SIZE from maasserver.models.partition import PARTITION_ALIGNMENT_SIZE from maasserver.models.partitiontable import PARTITION_TABLE_EXTRA_SPACE from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.utils.converters import round_size_to_nearest_block from testtools.matchers import MatchesStructure class TestCreateVolumeGroupForm(MAASServerTestCase): def test_requires_fields(self): node = factory.make_Node() form = CreateVolumeGroupForm(node, data={}) self.assertFalse(form.is_valid(), form.errors) self.assertItemsEqual(['name'], form.errors.keys()) def test_is_not_valid_if_invalid_uuid(self): node = factory.make_Node() block_device = factory.make_PhysicalBlockDevice(node=node) data = { 'name': factory.make_name("name"), 'uuid': factory.make_string(size=32), 'block_devices': [block_device.id], } form = CreateVolumeGroupForm(node, data=data) self.assertFalse( form.is_valid(), "Should be invalid because of an invalid uuid.") self.assertEquals({'uuid': ["Enter a valid value."]}, form._errors) def test_is_not_valid_missing_block_devices_and_partitions(self): node = factory.make_Node() vguuid = "%s" % uuid.uuid4() data = { 'name': factory.make_name("name"), 'uuid': vguuid, } form = CreateVolumeGroupForm(node, data=data) self.assertFalse( form.is_valid(), "Should be invalid because of missing block_devices and " "partitions.") self.assertEquals({ '__all__': [ "At least one valid block device or partition is required.", ]}, form._errors) def test_is_not_valid_if_block_device_does_not_belong_to_node(self): node = factory.make_Node() block_device = factory.make_PhysicalBlockDevice() data = { 'name': factory.make_name("name"), 'block_devices': [block_device.id], } form = CreateVolumeGroupForm(node, data=data) self.assertFalse( form.is_valid(), "Should be invalid because of block device does not " "belonging to node.") self.assertEquals({ 'block_devices': [ "Select a valid choice. %s is not one of the available " "choices." % block_device.id, ]}, form._errors) def test_is_not_valid_if_partition_does_not_belong_to_node(self): node = factory.make_Node() partition = factory.make_Partition() data = { 'name': factory.make_name("name"), 'partitions': [partition.id], } form = CreateVolumeGroupForm(node, data=data) self.assertFalse( form.is_valid(), "Should be invalid because of partition does not " "belonging to node.") self.assertEquals({ 'partitions': [ "Select a valid choice. %s is not one of the available " "choices." % partition.id, ]}, form._errors) def test_creates_volume_group_with_name_and_uuid(self): node = factory.make_Node() block_device = factory.make_PhysicalBlockDevice(node=node) name = factory.make_name("vg") vguuid = "%s" % uuid.uuid4() data = { 'name': name, 'uuid': vguuid, 'block_devices': [block_device.id], } form = CreateVolumeGroupForm(node, data=data) self.assertTrue(form.is_valid(), form._errors) volume_group = form.save() self.assertEquals(name, volume_group.name) self.assertEquals(vguuid, volume_group.uuid) def test_creates_volume_group_with_block_devices(self): node = factory.make_Node() block_devices = [ factory.make_PhysicalBlockDevice(node=node) for _ in range(3) ] block_device_ids = [ block_device.id for block_device in block_devices ] data = { 'name': factory.make_name("vg"), 'block_devices': block_device_ids, } form = CreateVolumeGroupForm(node, data=data) self.assertTrue(form.is_valid(), form._errors) volume_group = form.save() block_devices_in_vg = [ filesystem.block_device.actual_instance for filesystem in volume_group.filesystems.all() ] self.assertItemsEqual(block_devices, block_devices_in_vg) def test_creates_volume_group_with_boot_disk(self): node = factory.make_Node(with_boot_disk=False) boot_disk = factory.make_PhysicalBlockDevice(node=node) data = { 'name': factory.make_name("vg"), 'block_devices': [boot_disk.id], } form = CreateVolumeGroupForm(node, data=data) self.assertTrue(form.is_valid(), form._errors) volume_group = form.save() boot_partition = boot_disk.get_partitiontable().partitions.first() self.assertEquals( boot_partition.get_effective_filesystem().filesystem_group.id, volume_group.id) def test_creates_volume_group_with_block_devices_by_name(self): node = factory.make_Node() block_devices = [ factory.make_PhysicalBlockDevice(node=node) for _ in range(3) ] block_device_names = [ block_device.name for block_device in block_devices ] data = { 'name': factory.make_name("vg"), 'block_devices': block_device_names, } form = CreateVolumeGroupForm(node, data=data) self.assertTrue(form.is_valid(), form._errors) volume_group = form.save() block_devices_in_vg = [ filesystem.block_device.actual_instance for filesystem in volume_group.filesystems.all() ] self.assertItemsEqual(block_devices, block_devices_in_vg) def test_creates_volume_group_with_partitions(self): node = factory.make_Node() block_device = factory.make_PhysicalBlockDevice( node=node, size=(MIN_BLOCK_DEVICE_SIZE * 3) + PARTITION_TABLE_EXTRA_SPACE) partition_table = factory.make_PartitionTable( block_device=block_device) partitions = [ partition_table.add_partition(size=MIN_BLOCK_DEVICE_SIZE) for _ in range(2) ] partition_ids = [ partition.id for partition in partitions ] data = { 'name': factory.make_name("vg"), 'partitions': partition_ids, } form = CreateVolumeGroupForm(node, data=data) self.assertTrue(form.is_valid(), form._errors) volume_group = form.save() partitions_in_vg = [ filesystem.partition for filesystem in volume_group.filesystems.all() ] self.assertItemsEqual(partitions, partitions_in_vg) def test_creates_volume_group_with_partitions_by_name(self): node = factory.make_Node() block_device = factory.make_PhysicalBlockDevice( node=node, size=(MIN_BLOCK_DEVICE_SIZE * 3) + PARTITION_TABLE_EXTRA_SPACE) partition_table = factory.make_PartitionTable( block_device=block_device) partitions = [ partition_table.add_partition(size=MIN_BLOCK_DEVICE_SIZE) for _ in range(2) ] partition_names = [ partition.name for partition in partitions ] data = { 'name': factory.make_name("vg"), 'partitions': partition_names, } form = CreateVolumeGroupForm(node, data=data) self.assertTrue(form.is_valid(), form._errors) volume_group = form.save() partitions_in_vg = [ filesystem.partition for filesystem in volume_group.filesystems.all() ] self.assertItemsEqual(partitions, partitions_in_vg) def test_creates_volume_group_with_block_devices_and_partitions(self): node = factory.make_Node() block_devices = [ factory.make_PhysicalBlockDevice(node=node) for _ in range(3) ] block_device_ids = [ block_device.id for block_device in block_devices ] block_device = factory.make_PhysicalBlockDevice( node=node, size=(MIN_BLOCK_DEVICE_SIZE * 3) + PARTITION_TABLE_EXTRA_SPACE) partition_table = factory.make_PartitionTable( block_device=block_device) partitions = [ partition_table.add_partition(size=MIN_BLOCK_DEVICE_SIZE) for _ in range(2) ] partition_ids = [ partition.id for partition in partitions ] data = { 'name': factory.make_name("vg"), 'block_devices': block_device_ids, 'partitions': partition_ids, } form = CreateVolumeGroupForm(node, data=data) self.assertTrue(form.is_valid(), form._errors) volume_group = form.save() block_devices_in_vg = [ filesystem.block_device.actual_instance for filesystem in volume_group.filesystems.all() if filesystem.block_device is not None ] partitions_in_vg = [ filesystem.partition for filesystem in volume_group.filesystems.all() if filesystem.partition is not None ] self.assertItemsEqual(block_devices, block_devices_in_vg) self.assertItemsEqual(partitions, partitions_in_vg) class TestUpdateVolumeGroupForm(MAASServerTestCase): def test_requires_no_fields(self): volume_group = factory.make_VolumeGroup() form = UpdateVolumeGroupForm(volume_group, data={}) self.assertTrue(form.is_valid(), form.errors) def test_updates_name(self): volume_group = factory.make_VolumeGroup() name = factory.make_name("vg") data = { 'name': name, } form = UpdateVolumeGroupForm(volume_group, data=data) self.assertTrue(form.is_valid(), form._errors) volume_group = form.save() self.assertEquals(name, volume_group.name) def test_is_not_valid_if_invalid_uuid(self): volume_group = factory.make_VolumeGroup() data = { 'uuid': factory.make_string(size=32), } form = UpdateVolumeGroupForm(volume_group, data=data) self.assertFalse( form.is_valid(), "Should be invalid because of an invalid uuid.") self.assertEquals({'uuid': ["Enter a valid value."]}, form._errors) def test_updates_uuid(self): volume_group = factory.make_VolumeGroup() vguuid = "%s" % uuid.uuid4() data = { 'uuid': vguuid, } form = UpdateVolumeGroupForm(volume_group, data=data) self.assertTrue(form.is_valid(), form._errors) volume_group = form.save() self.assertEquals(vguuid, volume_group.uuid) def test_adds_block_device(self): node = factory.make_Node() volume_group = factory.make_VolumeGroup(node=node) block_device = factory.make_PhysicalBlockDevice(node=node) data = { 'add_block_devices': [block_device.id], } form = UpdateVolumeGroupForm(volume_group, data=data) self.assertTrue(form.is_valid(), form._errors) volume_group = form.save() self.assertEquals( volume_group.id, block_device.get_effective_filesystem().filesystem_group.id) def test_adds_boot_disk(self): node = factory.make_Node(with_boot_disk=False) boot_disk = factory.make_PhysicalBlockDevice(node=node) volume_group = factory.make_VolumeGroup(node=node) data = { 'add_block_devices': [boot_disk.id], } form = UpdateVolumeGroupForm(volume_group, data=data) self.assertTrue(form.is_valid(), form._errors) volume_group = form.save() boot_partition = boot_disk.get_partitiontable().partitions.first() self.assertEquals( boot_partition.get_effective_filesystem().filesystem_group.id, volume_group.id) def test_adds_block_device_by_name(self): node = factory.make_Node() volume_group = factory.make_VolumeGroup(node=node) block_device = factory.make_PhysicalBlockDevice(node=node) data = { 'add_block_devices': [block_device.name], } form = UpdateVolumeGroupForm(volume_group, data=data) self.assertTrue(form.is_valid(), form._errors) volume_group = form.save() self.assertEquals( volume_group.id, block_device.get_effective_filesystem().filesystem_group.id) def test_removes_block_device(self): node = factory.make_Node() volume_group = factory.make_VolumeGroup(node=node) block_device = factory.make_PhysicalBlockDevice(node=node) factory.make_Filesystem( fstype=FILESYSTEM_TYPE.LVM_PV, block_device=block_device, filesystem_group=volume_group) data = { 'remove_block_devices': [block_device.id], } form = UpdateVolumeGroupForm(volume_group, data=data) self.assertTrue(form.is_valid(), form._errors) volume_group = form.save() self.assertIsNone(block_device.get_effective_filesystem()) def test_removes_block_device_by_name(self): node = factory.make_Node() volume_group = factory.make_VolumeGroup(node=node) block_device = factory.make_PhysicalBlockDevice(node=node) factory.make_Filesystem( fstype=FILESYSTEM_TYPE.LVM_PV, block_device=block_device, filesystem_group=volume_group) data = { 'remove_block_devices': [block_device.name], } form = UpdateVolumeGroupForm(volume_group, data=data) self.assertTrue(form.is_valid(), form._errors) volume_group = form.save() self.assertIsNone(block_device.get_effective_filesystem()) def test_adds_partition(self): node = factory.make_Node() volume_group = factory.make_VolumeGroup(node=node) block_device = factory.make_PhysicalBlockDevice(node=node) partition_table = factory.make_PartitionTable( block_device=block_device) partition = factory.make_Partition(partition_table=partition_table) data = { 'add_partitions': [partition.id], } form = UpdateVolumeGroupForm(volume_group, data=data) self.assertTrue(form.is_valid(), form._errors) volume_group = form.save() self.assertEquals( volume_group.id, partition.get_effective_filesystem().filesystem_group.id) def test_adds_partition_by_name(self): node = factory.make_Node() volume_group = factory.make_VolumeGroup(node=node) block_device = factory.make_PhysicalBlockDevice(node=node) partition_table = factory.make_PartitionTable( block_device=block_device) partition = factory.make_Partition(partition_table=partition_table) data = { 'add_partitions': [partition.name], } form = UpdateVolumeGroupForm(volume_group, data=data) self.assertTrue(form.is_valid(), form._errors) volume_group = form.save() self.assertEquals( volume_group.id, partition.get_effective_filesystem().filesystem_group.id) def test_removes_partition(self): node = factory.make_Node() volume_group = factory.make_VolumeGroup(node=node) block_device = factory.make_PhysicalBlockDevice(node=node) partition_table = factory.make_PartitionTable( block_device=block_device) partition = factory.make_Partition(partition_table=partition_table) factory.make_Filesystem( fstype=FILESYSTEM_TYPE.LVM_PV, partition=partition, filesystem_group=volume_group) data = { 'remove_partitions': [partition.id], } form = UpdateVolumeGroupForm(volume_group, data=data) self.assertTrue(form.is_valid(), form._errors) volume_group = form.save() self.assertIsNone(partition.get_effective_filesystem()) def test_removes_partition_by_name(self): node = factory.make_Node() volume_group = factory.make_VolumeGroup(node=node) block_device = factory.make_PhysicalBlockDevice(node=node) partition_table = factory.make_PartitionTable( block_device=block_device) partition = factory.make_Partition(partition_table=partition_table) factory.make_Filesystem( fstype=FILESYSTEM_TYPE.LVM_PV, partition=partition, filesystem_group=volume_group) data = { 'remove_partitions': [partition.name], } form = UpdateVolumeGroupForm(volume_group, data=data) self.assertTrue(form.is_valid(), form._errors) volume_group = form.save() self.assertIsNone(partition.get_effective_filesystem()) class TestCreateLogicalVolumeForm(MAASServerTestCase): def test_requires_no_fields(self): volume_group = factory.make_VolumeGroup() form = CreateLogicalVolumeForm(volume_group, data={}) self.assertFalse(form.is_valid(), form.errors) self.assertItemsEqual(['name', 'size'], form.errors.keys()) def test_is_not_valid_if_invalid_uuid(self): volume_group = factory.make_VolumeGroup() name = factory.make_name("lv") data = { 'name': name, 'uuid': factory.make_string(size=32), 'size': volume_group.get_size() - 1, } form = CreateLogicalVolumeForm(volume_group, data=data) self.assertFalse( form.is_valid(), "Should be invalid because of an invalid uuid.") self.assertEquals({'uuid': ["Enter a valid value."]}, form._errors) def test_is_not_valid_if_size_less_than_minimum_block_size(self): volume_group = factory.make_VolumeGroup() name = factory.make_name("lv") data = { 'name': name, 'size': MIN_BLOCK_DEVICE_SIZE - 1, } form = CreateLogicalVolumeForm(volume_group, data=data) self.assertFalse( form.is_valid(), "Should be invalid because of an invalid size.") self.assertEquals({ 'size': [ "Ensure this value is greater than or equal to %s." % ( MIN_BLOCK_DEVICE_SIZE), ]}, form._errors) def test_is_not_valid_if_size_greater_than_free_space(self): volume_group = factory.make_VolumeGroup() volume_group.create_logical_volume( factory.make_name("lv"), size=volume_group.get_size() - MIN_BLOCK_DEVICE_SIZE - 1) name = factory.make_name("lv") free_space = volume_group.get_lvm_free_space() data = { 'name': name, 'size': free_space + 2, } form = CreateLogicalVolumeForm(volume_group, data=data) self.assertFalse( form.is_valid(), "Should be invalid because of an invalid size.") self.assertEquals({ 'size': [ "Ensure this value is less than or equal to %s." % ( volume_group.get_lvm_free_space()), ]}, form._errors) def test_is_not_valid_if_free_space_less_than_min_size(self): volume_group = factory.make_VolumeGroup() volume_group.create_logical_volume( factory.make_name("lv"), size=volume_group.get_size()) name = factory.make_name("lv") data = { 'name': name, 'size': MIN_BLOCK_DEVICE_SIZE, } form = CreateLogicalVolumeForm(volume_group, data=data) self.assertFalse( form.is_valid(), "Should be invalid because of an no free space.") self.assertEquals({ '__all__': [ "Volume group (%s) cannot hold any more logical volumes, " "because it doesn't have enough free space." % ( volume_group.name), ]}, form._errors) def test_creates_logical_volume(self): volume_group = factory.make_VolumeGroup() name = factory.make_name("lv") vguuid = "%s" % uuid.uuid4() size = random.randint(MIN_BLOCK_DEVICE_SIZE, volume_group.get_size()) data = { 'name': name, 'uuid': vguuid, 'size': size, } form = CreateLogicalVolumeForm(volume_group, data=data) self.assertTrue(form.is_valid(), form._errors) logical_volume = form.save() expected_size = round_size_to_nearest_block( size, PARTITION_ALIGNMENT_SIZE, False) self.assertThat( logical_volume, MatchesStructure.byEquality( name=name, uuid=vguuid, size=expected_size, )) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_forms_zone.py0000644000000000000000000000453113056115004022551 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `ZoneForm`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.forms import ZoneForm from maasserver.models import Zone from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase class TestZoneForm(MAASServerTestCase): """Tests for `ZoneForm`.""" def test_creates_zone(self): name = factory.make_name('zone') description = factory.make_string() form = ZoneForm(data={'name': name, 'description': description}) form.save() zone = Zone.objects.get(name=name) self.assertIsNotNone(zone) self.assertEqual(description, zone.description) def test_updates_zone(self): zone = factory.make_Zone() new_description = factory.make_string() form = ZoneForm(data={'description': new_description}, instance=zone) form.save() zone = reload_object(zone) self.assertEqual(new_description, zone.description) def test_renames_zone(self): zone = factory.make_Zone() new_name = factory.make_name('zone') form = ZoneForm(data={'name': new_name}, instance=zone) form.save() zone = reload_object(zone) self.assertEqual(new_name, zone.name) self.assertEqual(zone, Zone.objects.get(name=new_name)) def test_update_default_zone_description_works(self): zone = Zone.objects.get_default_zone() new_description = factory.make_string() form = ZoneForm(data={'description': new_description}, instance=zone) self.assertTrue(form.is_valid(), form._errors) form.save() zone = reload_object(zone) self.assertEqual(new_description, zone.description) def test_disallows_renaming_default_zone(self): zone = Zone.objects.get_default_zone() form = ZoneForm( data={'name': factory.make_name('zone')}, instance=zone) self.assertFalse(form.is_valid()) self.assertEqual( {'name': ["This zone is the default zone, it cannot be renamed."]}, form.errors) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_js.py0000644000000000000000000001144313056115004021004 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Run YUI3 unit tests with Selenium.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from abc import ( ABCMeta, abstractmethod, ) from glob import glob import json import os from os.path import ( abspath, join, relpath, ) import sys from time import sleep from maastesting import ( root, yui3, ) from maastesting.fixtures import ( DisplayFixture, ProxiesDisabledFixture, SeleniumFixture, ) from maastesting.testcase import MAASTestCase from maastesting.utils import extract_word_list from nose.tools import nottest from provisioningserver.utils.twisted import retries from testtools import clone_test_with_new_id # Nose is over-zealous. nottest(clone_test_with_new_id) def get_browser_names_from_env(): """Parse the environment variable ``MAAS_TEST_BROWSERS`` to get a list of the browsers to use for the JavaScript tests. Returns ['Chrome', 'PhantomJS'] if the environment variable is not set. """ names = os.environ.get('MAAS_TEST_BROWSERS', 'Chrome, PhantomJS') return extract_word_list(names) class YUIUnitTestsBase: """Base class for running YUI3 tests in a variety of browsers. Calls to instance of this class are intercepted. If the call is to a clone the superclass is called, and thus the test executes as normal. Otherwise the `multiply` method is called. This method can then arrange for the testcase to be run in multiple environments, cloning the test for each. In this way it can efficiently set-up and tear-down resources for the tests, and also report on a per-test basis. If test resources were fully working for MAAS tests this might not be necessary, but at the time of implementation this was a solution with the lowest friction (at least, lower than ripping nose out, or teaching it about test resources). """ __metaclass__ = ABCMeta test_paths = glob(join(root, "src/maasserver/static/js/tests/*.html")) assert test_paths != [], "No JavaScript unit test pages found." # Indicates if this test has been cloned. cloned = False def clone(self, suffix): # Clone this test with a new suffix. test = clone_test_with_new_id( self, "%s#%s" % (self.id(), suffix)) test.cloned = True return test @abstractmethod def multiply(self, result): """Run the test for each of a specified range of browsers. This method should sort out shared fixtures. """ def __call__(self, result=None): if self.cloned: # This test has been cloned; just call-up to run the test. super(YUIUnitTestsBase, self).__call__(result) else: try: with ProxiesDisabledFixture(): self.multiply(result) except KeyboardInterrupt: raise except: if result is None: raise else: result.addError(self, sys.exc_info()) def test_YUI3_unit_tests(self): # Load the page and then wait for #suite to contain # 'done'. Read the results in '#test_results'. self.browser.get(self.test_url) for elapsed, remaining, wait in retries(intervals=0.2): suite = self.browser.find_element_by_id("suite") if suite.text == "done": results = self.browser.find_element_by_id("test_results") results = json.loads(results.text) break else: sleep(wait) else: self.fail("Timed-out after %ds" % elapsed) if results['failed'] != 0: message = '%d test(s) failed.\n\n%s' % ( results['failed'], yui3.get_failed_tests_message(results)) self.fail(message) class YUIUnitTestsLocal(YUIUnitTestsBase, MAASTestCase): scenarios = tuple( (relpath(path, root), {"test_url": "file://%s" % abspath(path)}) for path in YUIUnitTestsBase.test_paths) def multiply(self, result): # Run this test locally for each browser requested. Use the same # display fixture for all browsers. This is done here so that all # scenarios are played out for each browser in turn; starting and # stopping browsers is costly. with DisplayFixture(): for browser_name in get_browser_names_from_env(): browser_test = self.clone("local:%s" % browser_name) with SeleniumFixture(browser_name) as selenium: browser_test.browser = selenium.browser browser_test(result) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_middleware.py0000644000000000000000000005524613056115004022516 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test maasserver middleware classes.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import httplib import json import logging import random from crochet import TimeoutError from django.conf import settings from django.contrib.messages import constants from django.core.exceptions import ( PermissionDenied, ValidationError, ) from django.core.urlresolvers import reverse from django.http import HttpResponse from django.http.request import build_request_repr from fixtures import FakeLogger from maasserver import middleware as middleware_module from maasserver.components import ( get_persistent_error, register_persistent_error, ) from maasserver.enum import ( COMPONENT, NODEGROUP_STATUS, ) from maasserver.exceptions import ( MAASAPIException, MAASAPINotFound, ) from maasserver.middleware import ( APIErrorsMiddleware, APIRPCErrorsMiddleware, CSRFHelperMiddleware, DebuggingLoggerMiddleware, ExceptionMiddleware, ExternalComponentsMiddleware, RPCErrorsMiddleware, ) from maasserver.models import nodegroup as nodegroup_module from maasserver.testing import extract_redirect from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.utils.orm import make_serialization_failure from maastesting.matchers import ( MockCalledOnceWith, MockNotCalled, ) from maastesting.utils import sample_binary_data from mock import Mock from provisioningserver.rpc.exceptions import ( NoConnectionsAvailable, PowerActionAlreadyInProgress, ) from provisioningserver.utils.shell import ExternalProcessError from testtools.matchers import ( Contains, Equals, Not, ) class ExceptionMiddlewareTest(MAASServerTestCase): def make_base_path(self): """Return a path to handle exceptions for.""" return "/%s" % factory.make_string() def make_middleware(self, base_path): """Create an ExceptionMiddleware for base_path.""" class TestingExceptionMiddleware(ExceptionMiddleware): path_regex = base_path return TestingExceptionMiddleware() def process_exception(self, exception): """Run a given exception through a fake ExceptionMiddleware. :param exception: The exception to simulate. :type exception: Exception :return: The response as returned by the ExceptionMiddleware. :rtype: HttpResponse or None. """ base_path = self.make_base_path() middleware = self.make_middleware(base_path) request = factory.make_fake_request(base_path) return middleware.process_exception(request, exception) def test_ignores_paths_outside_path_regex(self): middleware = self.make_middleware(self.make_base_path()) request = factory.make_fake_request(self.make_base_path()) exception = MAASAPINotFound("Huh?") self.assertIsNone(middleware.process_exception(request, exception)) def test_ignores_serialization_failures(self): base_path = self.make_base_path() middleware = self.make_middleware(base_path) request = factory.make_fake_request(base_path) exception = make_serialization_failure() self.assertIsNone(middleware.process_exception(request, exception)) def test_unknown_exception_generates_internal_server_error(self): # An unknown exception generates an internal server error with the # exception message. error_message = factory.make_string() response = self.process_exception(RuntimeError(error_message)) self.assertEqual( (httplib.INTERNAL_SERVER_ERROR, error_message), (response.status_code, response.content)) def test_reports_MAASAPIException_with_appropriate_api_error(self): class MyException(MAASAPIException): api_error = httplib.UNAUTHORIZED error_message = factory.make_string() exception = MyException(error_message) response = self.process_exception(exception) self.assertEqual( (httplib.UNAUTHORIZED, error_message), (response.status_code, response.content)) def test_renders_MAASAPIException_as_unicode(self): class MyException(MAASAPIException): api_error = httplib.UNAUTHORIZED error_message = "Error %s" % unichr(233) response = self.process_exception(MyException(error_message)) self.assertEqual( (httplib.UNAUTHORIZED, error_message), (response.status_code, response.content.decode('utf-8'))) def test_reports_ValidationError_as_Bad_Request(self): error_message = factory.make_string() response = self.process_exception(ValidationError(error_message)) self.assertEqual( (httplib.BAD_REQUEST, error_message), (response.status_code, response.content)) def test_returns_ValidationError_message_dict_as_json(self): exception_dict = {'hostname': ['invalid']} exception = ValidationError(exception_dict) response = self.process_exception(exception) self.assertEqual(exception_dict, json.loads(response.content)) self.assertIn('application/json', response['Content-Type']) def test_reports_PermissionDenied_as_Forbidden(self): error_message = factory.make_string() response = self.process_exception(PermissionDenied(error_message)) self.assertEqual( (httplib.FORBIDDEN, error_message), (response.status_code, response.content)) def test_api_500_error_is_logged(self): logger = self.useFixture(FakeLogger('maasserver')) error_text = factory.make_string() self.process_exception(MAASAPIException(error_text)) self.assertThat(logger.output, Contains(error_text)) def test_generic_500_error_is_logged(self): logger = self.useFixture(FakeLogger('maasserver')) error_text = factory.make_string() self.process_exception(Exception(error_text)) self.assertThat(logger.output, Contains(error_text)) def test_reports_ExternalProcessError_as_ServiceUnavailable(self): error_text = factory.make_string() exception = ExternalProcessError(1, ["cmd"], error_text) retry_after = random.randint(0, 10) self.patch( middleware_module, 'RETRY_AFTER_SERVICE_UNAVAILABLE', retry_after) response = self.process_exception(exception) self.expectThat( response.status_code, Equals(httplib.SERVICE_UNAVAILABLE)) self.expectThat(response.content, Equals(unicode(exception))) self.expectThat(response['Retry-After'], Equals("%s" % retry_after)) class APIErrorsMiddlewareTest(MAASServerTestCase): def test_handles_error_on_API(self): middleware = APIErrorsMiddleware() api_request = factory.make_fake_request("/api/1.0/hello") error_message = factory.make_string() exception = MAASAPINotFound(error_message) response = middleware.process_exception(api_request, exception) self.assertEqual( (httplib.NOT_FOUND, error_message), (response.status_code, response.content)) def test_ignores_error_outside_API(self): middleware = APIErrorsMiddleware() non_api_request = factory.make_fake_request("/middleware/api/hello") exception = MAASAPINotFound(factory.make_string()) self.assertIsNone( middleware.process_exception(non_api_request, exception)) def test_503_response_includes_retry_after_header(self): middleware = APIErrorsMiddleware() request = factory.make_fake_request( "/api/1.0/" + factory.make_string(), 'POST') error = ExternalProcessError(returncode=-1, cmd="foo-bar") response = middleware.process_exception(request, error) self.assertEqual( ( httplib.SERVICE_UNAVAILABLE, '%s' % middleware_module.RETRY_AFTER_SERVICE_UNAVAILABLE, ), (response.status_code, response['Retry-after'])) class DebuggingLoggerMiddlewareTest(MAASServerTestCase): def test_debugging_logger_does_not_log_request_if_info_level(self): logger = self.useFixture(FakeLogger('maasserver', logging.INFO)) request = factory.make_fake_request("/api/1.0/nodes/") DebuggingLoggerMiddleware().process_request(request) self.assertThat( logger.output, Not(Contains(build_request_repr(request)))) def test_debugging_logger_does_not_log_response_if_info_level(self): logger = self.useFixture(FakeLogger('maasserver', logging.INFO)) request = factory.make_fake_request("/api/1.0/nodes/") response = HttpResponse( content="test content", status=httplib.OK, mimetype=b"text/plain; charset=utf-8") DebuggingLoggerMiddleware().process_response(request, response) self.assertThat( logger.output, Not(Contains(build_request_repr(request)))) def test_debugging_logger_logs_request(self): logger = self.useFixture(FakeLogger('maasserver', logging.DEBUG)) request = factory.make_fake_request("/api/1.0/nodes/") request.content = "test content" DebuggingLoggerMiddleware().process_request(request) self.assertThat(logger.output, Contains(build_request_repr(request))) def test_debugging_logger_logs_response(self): logger = self.useFixture(FakeLogger('maasserver', logging.DEBUG)) request = factory.make_fake_request("foo") response = HttpResponse( content="test content", status=httplib.OK, mimetype=b"text/plain; charset=utf-8") DebuggingLoggerMiddleware().process_response(request, response) self.assertThat( logger.output, Contains(response.content)) def test_debugging_logger_logs_binary_response(self): logger = self.useFixture(FakeLogger('maasserver', logging.DEBUG)) request = factory.make_fake_request("foo") response = HttpResponse( content=sample_binary_data, status=httplib.OK, mimetype=b"application/octet-stream") DebuggingLoggerMiddleware().process_response(request, response) self.assertThat( logger.output, Contains("non-utf-8 (binary?) content")) class RPCErrorsMiddlewareTest(MAASServerTestCase): def test_handles_PowerActionAlreadyInProgress(self): middleware = RPCErrorsMiddleware() request = factory.make_fake_request(factory.make_string(), 'POST') error_message = ( "Unable to execute power action: another action is " "already in progress for node %s" % factory.make_name('node')) error = PowerActionAlreadyInProgress(error_message) response = middleware.process_exception(request, error) # The response is a redirect. self.assertEqual(request.path, extract_redirect(response)) # An error message has been published. self.assertEqual( [(constants.ERROR, "Error: %s" % error_message, '')], request._messages.messages) def test_handles_NoConnectionsAvailable(self): middleware = RPCErrorsMiddleware() request = factory.make_fake_request(factory.make_string(), 'POST') error_message = ( "No connections available for cluster %s" % factory.make_name('cluster')) error = NoConnectionsAvailable(error_message) response = middleware.process_exception(request, error) # The response is a redirect. self.assertEqual(request.path, extract_redirect(response)) # An error message has been published. self.assertEqual( [(constants.ERROR, "Error: " + error_message, '')], request._messages.messages) def test_handles_TimeoutError(self): middleware = RPCErrorsMiddleware() request = factory.make_fake_request(factory.make_string(), 'POST') error_message = "Here, have a picture of Queen Victoria!" error = TimeoutError(error_message) response = middleware.process_exception(request, error) # The response is a redirect. self.assertEqual(request.path, extract_redirect(response)) # An error message has been published. self.assertEqual( [(constants.ERROR, "Error: " + error_message, '')], request._messages.messages) def test_ignores_non_rpc_errors(self): middleware = RPCErrorsMiddleware() request = factory.make_fake_request(factory.make_string(), 'POST') exception = ZeroDivisionError( "You may think it's a long walk down the street to the chemist " "but that's just peanuts to space!") response = middleware.process_exception(request, exception) self.assertIsNone(response) def test_ignores_error_on_API(self): middleware = RPCErrorsMiddleware() non_api_request = factory.make_fake_request("/api/1.0/ohai") exception_class = random.choice( (NoConnectionsAvailable, PowerActionAlreadyInProgress)) exception = exception_class(factory.make_string()) self.assertIsNone( middleware.process_exception(non_api_request, exception)) def test_no_connections_available_has_usable_cluster_name_in_msg(self): # If a NoConnectionsAvailable exception carries a reference to # the cluster UUID, RPCErrorsMiddleware will look up the # cluster's name and make the error message it displays more # useful. middleware = RPCErrorsMiddleware() request = factory.make_fake_request(factory.make_string(), 'POST') cluster = factory.make_NodeGroup() error = NoConnectionsAvailable( factory.make_name('msg'), uuid=cluster.uuid) middleware.process_exception(request, error) expected_error_message = ( "Error: Unable to connect to cluster '%s' (%s); no connections " "available." % (cluster.cluster_name, cluster.uuid)) self.assertEqual( [(constants.ERROR, expected_error_message, '')], request._messages.messages) class APIRPCErrorsMiddlewareTest(MAASServerTestCase): def test_handles_error_on_API(self): middleware = APIRPCErrorsMiddleware() api_request = factory.make_fake_request("/api/1.0/hello") error_message = factory.make_string() exception_class = random.choice( (NoConnectionsAvailable, PowerActionAlreadyInProgress)) exception = exception_class(error_message) response = middleware.process_exception(api_request, exception) self.assertEqual( (middleware.handled_exceptions[exception_class], error_message), (response.status_code, response.content)) def test_ignores_error_outside_API(self): middleware = APIRPCErrorsMiddleware() non_api_request = factory.make_fake_request("/middleware/api/hello") exception_class = random.choice( (NoConnectionsAvailable, PowerActionAlreadyInProgress)) exception = exception_class(factory.make_string()) self.assertIsNone( middleware.process_exception(non_api_request, exception)) def test_no_connections_available_returned_as_503(self): middleware = APIRPCErrorsMiddleware() request = factory.make_fake_request( "/api/1.0/" + factory.make_string(), 'POST') error_message = ( "Unable to connect to cluster '%s'; no connections available" % factory.make_name('cluster')) error = NoConnectionsAvailable(error_message) response = middleware.process_exception(request, error) self.assertEqual( (httplib.SERVICE_UNAVAILABLE, error_message), (response.status_code, response.content)) def test_503_response_includes_retry_after_header_by_default(self): middleware = APIRPCErrorsMiddleware() request = factory.make_fake_request( "/api/1.0/" + factory.make_string(), 'POST') error = NoConnectionsAvailable(factory.make_name()) response = middleware.process_exception(request, error) self.assertEqual( ( httplib.SERVICE_UNAVAILABLE, '%s' % middleware_module.RETRY_AFTER_SERVICE_UNAVAILABLE, ), (response.status_code, response['Retry-after'])) def test_power_action_already_in_progress_returned_as_503(self): middleware = APIRPCErrorsMiddleware() request = factory.make_fake_request( "/api/1.0/" + factory.make_string(), 'POST') error_message = ( "Unable to execute power action: another action is already in " "progress for node %s" % factory.make_name('node')) error = PowerActionAlreadyInProgress(error_message) response = middleware.process_exception(request, error) self.assertEqual( (httplib.SERVICE_UNAVAILABLE, error_message), (response.status_code, response.content)) def test_handles_TimeoutError(self): middleware = APIRPCErrorsMiddleware() request = factory.make_fake_request( "/api/1.0/" + factory.make_string(), 'POST') error_message = "No thanks, I'm trying to give them up." error = TimeoutError(error_message) response = middleware.process_exception(request, error) self.assertEqual( (httplib.GATEWAY_TIMEOUT, error_message), (response.status_code, response.content)) def test_ignores_non_rpc_errors(self): middleware = APIRPCErrorsMiddleware() request = factory.make_fake_request( "/api/1.0/" + factory.make_string(), 'POST') exception = ZeroDivisionError( "You may think it's a long walk down the street to the chemist " "but that's just peanuts to space!") response = middleware.process_exception(request, exception) self.assertIsNone(response) class ExternalComponentsMiddlewareTest(MAASServerTestCase): """Tests for the ExternalComponentsMiddleware.""" def test__checks_connectivity_of_accepted_clusters(self): getAllClients = self.patch(middleware_module, 'getAllClients') request = factory.make_fake_request(factory.make_string(), 'GET') middleware = ExternalComponentsMiddleware() middleware.process_request(request) self.assertThat(getAllClients, MockCalledOnceWith()) def test__ignores_non_accepted_clusters(self): factory.make_NodeGroup(status=factory.pick_enum( NODEGROUP_STATUS, but_not=[NODEGROUP_STATUS.ENABLED])) getAllClients = self.patch(nodegroup_module, 'getAllClients') request = factory.make_fake_request(factory.make_string(), 'GET') middleware = ExternalComponentsMiddleware() middleware.process_request(request) self.assertThat(getAllClients, MockNotCalled()) def test__registers_error_if_all_clusters_are_disconnected(self): factory.make_NodeGroup(status=NODEGROUP_STATUS.ENABLED) getAllClients = self.patch(nodegroup_module, 'getAllClients') getAllClients.return_value = [] request = factory.make_fake_request(factory.make_string(), 'GET') middleware = ExternalComponentsMiddleware() middleware.process_request(request) error = get_persistent_error(COMPONENT.CLUSTERS) self.assertEqual( "One cluster is not yet connected to the region. Visit the " "clusters page for more information." % reverse('cluster-list'), error) def test__registers_error_if_any_clusters_are_disconnected(self): clusters = [ factory.make_NodeGroup(status=NODEGROUP_STATUS.ENABLED), factory.make_NodeGroup(status=NODEGROUP_STATUS.ENABLED), factory.make_NodeGroup(status=NODEGROUP_STATUS.ENABLED), ] getAllClients = self.patch(middleware_module, 'getAllClients') getAllClients.return_value = [Mock(ident=clusters[0].uuid)] request = factory.make_fake_request(factory.make_string(), 'GET') middleware = ExternalComponentsMiddleware() middleware.process_request(request) error = get_persistent_error(COMPONENT.CLUSTERS) self.assertEqual( "2 clusters are not yet connected to the region. Visit the " "clusters page for more information." % reverse('cluster-list'), error) def test__removes_error_once_all_clusters_are_connected(self): clusters = [ factory.make_NodeGroup(status=NODEGROUP_STATUS.ENABLED), factory.make_NodeGroup(status=NODEGROUP_STATUS.ENABLED), ] getAllClients = self.patch(middleware_module, 'getAllClients') getAllClients.return_value = [ Mock(ident=cluster.uuid) for cluster in clusters ] register_persistent_error( COMPONENT.CLUSTERS, "Who flung that batter pudding?") request = factory.make_fake_request(factory.make_string(), 'GET') middleware = ExternalComponentsMiddleware() middleware.process_request(request) error = get_persistent_error(COMPONENT.CLUSTERS) self.assertIsNone(error) def test__does_not_suppress_exceptions_from_connectivity_checks(self): middleware = ExternalComponentsMiddleware() error_type = factory.make_exception_type() check_cluster_connectivity = self.patch( middleware, "_check_cluster_connectivity") check_cluster_connectivity.side_effect = error_type self.assertRaises(error_type, middleware.process_request, None) self.assertThat(check_cluster_connectivity, MockCalledOnceWith()) class CSRFHelperMiddlewareTest(MAASServerTestCase): """Tests for the CSRFHelperMiddleware.""" def test_sets_csrf_exception_if_no_session_cookie(self): middleware = CSRFHelperMiddleware() cookies = {} request = factory.make_fake_request( factory.make_string(), 'GET', cookies=cookies) self.assertIsNone(middleware.process_request(request)) self.assertTrue(getattr(request, 'csrf_processing_done', None)) def test_doesnt_set_csrf_exception_if_session_cookie(self): middleware = CSRFHelperMiddleware() cookies = { settings.SESSION_COOKIE_NAME: factory.make_name('session') } request = factory.make_fake_request( factory.make_string(), 'GET', cookies=cookies) self.assertIsNone(middleware.process_request(request)) self.assertIsNone(getattr(request, 'csrf_processing_done', None)) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_migrations.py0000644000000000000000000000252413056115004022544 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Sanity checks for database migrations. These tests need to be included in each of the MAAS applications that has South-managed database migrations. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.testing.db_migrations import detect_sequence_clashes from maastesting.testcase import MAASTestCase EXISTING_DUPES = [ (2, '0002_add_token_to_node'), (2, '0002_macaddress_unique'), (39, '0039_add_filestorage_content'), (39, '0039_add_nodegroup_to_bootimage'), (88, '0088_ip_to_custom_field'), (88, '0088_z_backport_trunk_0099'), (100, '0100_remove_cluster_from_bootsrouce'), (100, '0100_remove_duplicate_bootsource_urls'), (139, '0139_power_parameters_and_state_updated_field'), (139, '0139_z_add_vlan_fabric'), ] class TestMigrations(MAASTestCase): def test_migrations_mostly_have_unique_numbers(self): # Apart from some duplicates that predate this test and had to # be grandfathered in, database migrations have unique numbers. self.assertEqual( EXISTING_DUPES, detect_sequence_clashes('maasserver')) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_node_action.py0000644000000000000000000010007513056115004022652 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for node actions.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from django.db import transaction from maasserver import locks from maasserver.clusterrpc.utils import get_error_message_for_exception from maasserver.enum import ( NODE_PERMISSION, NODE_STATUS, NODE_STATUS_CHOICES, NODE_STATUS_CHOICES_DICT, POWER_STATE, ) from maasserver.exceptions import NodeActionError from maasserver.models import ( signals, StaticIPAddress, ) from maasserver.node_action import ( Abort, Acquire, Commission, compile_node_actions, Delete, Deploy, MarkBroken, MarkFixed, NodeAction, PowerOff, PowerOn, Release, RPC_EXCEPTIONS, SetZone, ) from maasserver.node_status import ( MONITORED_STATUSES, NON_MONITORED_STATUSES, ) from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.osystems import ( make_osystem_with_releases, make_usable_osystem, ) from maasserver.testing.testcase import ( MAASServerTestCase, MAASTransactionServerTestCase, ) from maasserver.utils.orm import ( post_commit, post_commit_hooks, ) from maastesting.matchers import MockCalledOnceWith from metadataserver.enum import RESULT_TYPE from metadataserver.models.noderesult import NodeResult from mock import ANY from provisioningserver.utils.shell import ExternalProcessError from testtools.matchers import Equals ALL_STATUSES = NODE_STATUS_CHOICES_DICT.keys() class FakeNodeAction(NodeAction): name = "fake" display = "Action label" actionable_statuses = ALL_STATUSES permission = NODE_PERMISSION.VIEW installable_only = False # For testing: an inhibition for inhibit() to return. fake_inhibition = None def inhibit(self): return self.fake_inhibition def execute(self): pass class TestNodeAction(MAASServerTestCase): def test_compile_node_actions_returns_available_actions(self): class MyAction(FakeNodeAction): name = factory.make_string() actions = compile_node_actions( factory.make_Node(), factory.make_admin(), classes=[MyAction]) self.assertEqual([MyAction.name], actions.keys()) def test_compile_node_actions_checks_node_status(self): class MyAction(FakeNodeAction): actionable_statuses = (NODE_STATUS.READY, ) node = factory.make_Node(status=NODE_STATUS.NEW) actions = compile_node_actions( node, factory.make_admin(), classes=[MyAction]) self.assertEqual({}, actions) def test_compile_node_actions_checks_permission(self): class MyAction(FakeNodeAction): permission = NODE_PERMISSION.EDIT node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) actions = compile_node_actions( node, factory.make_User(), classes=[MyAction]) self.assertEqual({}, actions) def test_compile_node_actions_includes_inhibited_actions(self): class MyAction(FakeNodeAction): fake_inhibition = factory.make_string() actions = compile_node_actions( factory.make_Node(), factory.make_admin(), classes=[MyAction]) self.assertEqual([MyAction.name], actions.keys()) def test_compile_node_actions_maps_names(self): class Action1(FakeNodeAction): name = factory.make_string() class Action2(FakeNodeAction): name = factory.make_string() actions = compile_node_actions( factory.make_Node(), factory.make_admin(), classes=[Action1, Action2]) for name, action in actions.items(): self.assertEqual(name, action.name) def test_compile_node_actions_maintains_order(self): names = [factory.make_string() for counter in range(4)] classes = [ type(b"Action%d" % counter, (FakeNodeAction,), {'name': name}) for counter, name in enumerate(names)] actions = compile_node_actions( factory.make_Node(), factory.make_admin(), classes=classes) self.assertSequenceEqual(names, actions.keys()) self.assertSequenceEqual( names, [action.name for action in actions.values()]) def test_is_permitted_allows_if_user_has_permission(self): class MyAction(FakeNodeAction): permission = NODE_PERMISSION.EDIT node = factory.make_Node( status=NODE_STATUS.ALLOCATED, owner=factory.make_User()) self.assertTrue(MyAction(node, node.owner).is_permitted()) def test_is_permitted_disallows_if_user_lacks_permission(self): class MyAction(FakeNodeAction): permission = NODE_PERMISSION.EDIT node = factory.make_Node( status=NODE_STATUS.ALLOCATED, owner=factory.make_User()) self.assertFalse(MyAction(node, factory.make_User()).is_permitted()) def test_is_permitted_uses_installable_permission(self): class MyAction(FakeNodeAction): permission = NODE_PERMISSION.VIEW installable_permission = NODE_PERMISSION.EDIT node = factory.make_Node( status=NODE_STATUS.ALLOCATED, owner=factory.make_User()) self.assertFalse(MyAction(node, factory.make_User()).is_permitted()) def test_is_permitted_doest_use_installable_permission_if_device(self): class MyAction(FakeNodeAction): permission = NODE_PERMISSION.VIEW installable_permission = NODE_PERMISSION.EDIT node = factory.make_Node( status=NODE_STATUS.ALLOCATED, owner=factory.make_User(), installable=False) self.assertTrue(MyAction(node, factory.make_User()).is_permitted()) def test_inhibition_wraps_inhibit(self): inhibition = factory.make_string() action = FakeNodeAction(factory.make_Node(), factory.make_User()) action.fake_inhibition = inhibition self.assertEqual(inhibition, action.inhibition) def test_inhibition_caches_inhibition(self): # The inhibition property will call inhibit() only once. We can # prove this by changing the string inhibit() returns; it won't # affect the value of the property. inhibition = factory.make_string() action = FakeNodeAction(factory.make_Node(), factory.make_User()) action.fake_inhibition = inhibition self.assertEqual(inhibition, action.inhibition) action.fake_inhibition = factory.make_string() self.assertEqual(inhibition, action.inhibition) def test_inhibition_caches_None(self): # An inhibition of None is also faithfully cached. In other # words, it doesn't get mistaken for an uninitialized cache or # anything. action = FakeNodeAction(factory.make_Node(), factory.make_User()) action.fake_inhibition = None self.assertIsNone(action.inhibition) action.fake_inhibition = factory.make_string() self.assertIsNone(action.inhibition) def test_installable_only_is_not_actionable_if_node_isnt_installable(self): status = NODE_STATUS.NEW owner = factory.make_User() node = factory.make_Node( owner=owner, status=status, installable=False) action = FakeNodeAction(node, owner) action.installable_only = True self.assertFalse(action.is_actionable()) def test_installable_only_is_actionable_if_node_is_installable(self): status = NODE_STATUS.NEW owner = factory.make_User() node = factory.make_Node( owner=owner, status=status, installable=True) action = FakeNodeAction(node, owner) action.installable_only = True self.assertTrue(action.is_actionable()) def test_is_actionable_checks_node_status_in_actionable_status(self): class MyAction(FakeNodeAction): actionable_statuses = [NODE_STATUS.ALLOCATED] node = factory.make_Node(status=NODE_STATUS.BROKEN) self.assertFalse(MyAction(node, factory.make_User()).is_actionable()) class TestDeleteAction(MAASServerTestCase): def test__deletes_node(self): node = factory.make_Node() action = Delete(node, factory.make_admin()) action.execute() self.assertIsNone(reload_object(node)) class TestCommissionAction(MAASServerTestCase): scenarios = ( ("NEW", {"status": NODE_STATUS.NEW}), ("FAILED_COMMISSIONING", { "status": NODE_STATUS.FAILED_COMMISSIONING}), ("READY", {"status": NODE_STATUS.READY}), ) def test_raise_NodeActionError_if_on(self): node = factory.make_Node( status=self.status, power_state=POWER_STATE.ON) user = factory.make_admin() action = Commission(node, user) self.assertTrue(action.is_permitted()) self.assertRaises(NodeActionError, action.execute) def test_Commission_starts_commissioning(self): node = factory.make_Node( interface=True, status=self.status, power_type='ether_wake', power_state=POWER_STATE.OFF) self.patch_autospec(node, 'start_transition_monitor') node_start = self.patch(node, '_start') node_start.side_effect = lambda user, user_data: post_commit() admin = factory.make_admin() action = Commission(node, admin) with post_commit_hooks: action.execute() self.assertEqual(NODE_STATUS.COMMISSIONING, node.status) self.assertThat( node_start, MockCalledOnceWith(admin, ANY)) class TestAbortAction(MAASTransactionServerTestCase): def test_Abort_aborts_disk_erasing(self): with transaction.atomic(): owner = factory.make_User() node = factory.make_Node( status=NODE_STATUS.DISK_ERASING, owner=owner) node_stop = self.patch_autospec(node, '_stop') # Return a post-commit hook from Node.stop(). node_stop.side_effect = lambda user: post_commit() with post_commit_hooks: with transaction.atomic(): Abort(node, owner).execute() with transaction.atomic(): node = reload_object(node) self.assertEqual(NODE_STATUS.FAILED_DISK_ERASING, node.status) self.assertThat(node_stop, MockCalledOnceWith(owner)) def test_Abort_aborts_commissioning(self): """Makes sure a COMMISSIONING node is returned to NEW status after an abort. """ with transaction.atomic(): node = factory.make_Node( interface=True, status=NODE_STATUS.COMMISSIONING, power_type='virsh') admin = factory.make_admin() self.patch_autospec(node, 'stop_transition_monitor') node_stop = self.patch_autospec(node, '_stop') # Return a post-commit hook from Node.stop(). node_stop.side_effect = lambda user: post_commit() with post_commit_hooks: with transaction.atomic(): Abort(node, admin).execute() with transaction.atomic(): node = reload_object(node) self.assertEqual(NODE_STATUS.NEW, node.status) self.assertThat(node_stop, MockCalledOnceWith(admin)) def test_Abort_aborts_deployment(self): """Makes sure a DEPLOYING node is returned to ALLOCATED status after an abort. """ with transaction.atomic(): node = factory.make_Node( interface=True, status=NODE_STATUS.DEPLOYING, power_type='virsh') admin = factory.make_admin() self.patch_autospec(node, 'stop_transition_monitor') node_stop = self.patch_autospec(node, '_stop') # Return a post-commit hook from Node.stop(). node_stop.side_effect = lambda user: post_commit() with post_commit_hooks: with transaction.atomic(): Abort(node, admin).execute() with transaction.atomic(): node = reload_object(node) self.assertEqual(NODE_STATUS.ALLOCATED, node.status) self.assertThat(node_stop, MockCalledOnceWith(admin)) class TestAcquireNodeAction(MAASServerTestCase): def test_Acquire_acquires_node(self): node = factory.make_Node( interface=True, status=NODE_STATUS.READY, power_type='ether_wake', with_boot_disk=True) user = factory.make_User() Acquire(node, user).execute() self.assertEqual(NODE_STATUS.ALLOCATED, node.status) self.assertEqual(user, node.owner) def test_Acquire_uses_node_acquire_lock(self): node = factory.make_Node( interface=True, status=NODE_STATUS.READY, power_type='ether_wake', with_boot_disk=True) user = factory.make_User() node_acquire = self.patch(locks, 'node_acquire') Acquire(node, user).execute() self.assertThat(node_acquire.__enter__, MockCalledOnceWith()) self.assertThat( node_acquire.__exit__, MockCalledOnceWith(None, None, None)) class TestDeployAction(MAASServerTestCase): def test_Deploy_inhibit_allows_user_with_SSH_key(self): user_with_key = factory.make_User() factory.make_SSHKey(user_with_key) self.assertIsNone( Deploy(factory.make_Node(), user_with_key).inhibit()) def test_Deploy_inhibit_allows_user_without_SSH_key(self): user_without_key = factory.make_User() action = Deploy(factory.make_Node(), user_without_key) inhibition = action.inhibit() self.assertIsNone(inhibition) def test_Deploy_is_actionable_if_user_doesnt_have_ssh_keys(self): owner = factory.make_User() node = factory.make_Node( interface=True, status=NODE_STATUS.ALLOCATED, power_type='ether_wake', owner=owner) self.assertTrue(Deploy(node, owner).is_actionable()) def test_Deploy_is_actionable_if_user_has_ssh_keys(self): owner = factory.make_User() factory.make_SSHKey(owner) node = factory.make_Node( interface=True, status=NODE_STATUS.ALLOCATED, power_type='ether_wake', owner=owner) self.assertTrue(Deploy(node, owner).is_actionable()) def test_Deploy_starts_node(self): user = factory.make_User() node = factory.make_Node( interface=True, status=NODE_STATUS.ALLOCATED, power_type='ether_wake', owner=user) node_start = self.patch(node, 'start') Deploy(node, user).execute() self.assertThat( node_start, MockCalledOnceWith(user)) def test_Deploy_raises_NodeActionError_for_invalid_os(self): user = factory.make_User() node = factory.make_Node( interface=True, status=NODE_STATUS.ALLOCATED, power_type='ether_wake', owner=user) self.patch(node, 'start') os_name = factory.make_name("os") release_name = factory.make_name("release") extra = { "osystem": os_name, "distro_series": release_name, } error = self.assertRaises( NodeActionError, Deploy(node, user).execute, **extra) self.assertEquals( "%s is not a support operating system." % os_name, error.message) def test_Deploy_sets_osystem_and_series(self): user = factory.make_User() node = factory.make_Node( interface=True, status=NODE_STATUS.ALLOCATED, power_type='ether_wake', owner=user) self.patch(node, 'start') osystem = make_usable_osystem(self) os_name = osystem["name"] release_name = osystem["releases"][0]["name"] extra = { "osystem": os_name, "distro_series": release_name } Deploy(node, user).execute(**extra) self.expectThat(node.osystem, Equals(os_name)) self.expectThat( node.distro_series, Equals(release_name)) def test_Deploy_sets_osystem_and_series_strips_license_key_token(self): user = factory.make_User() node = factory.make_Node( interface=True, status=NODE_STATUS.ALLOCATED, power_type='ether_wake', owner=user) self.patch(node, 'start') osystem = make_usable_osystem(self) os_name = osystem["name"] release_name = osystem["releases"][0]["name"] extra = { "osystem": os_name, "distro_series": release_name + '*' } Deploy(node, user).execute(**extra) self.expectThat(node.osystem, Equals(os_name)) self.expectThat( node.distro_series, Equals(release_name)) def test_Deploy_doesnt_set_osystem_and_series_if_os_missing(self): user = factory.make_User() node = factory.make_Node( interface=True, status=NODE_STATUS.ALLOCATED, power_type='ether_wake', owner=user) self.patch(node, 'start') osystem = make_osystem_with_releases(self) extra = { "distro_series": osystem["releases"][0]["name"], } Deploy(node, user).execute(**extra) self.expectThat(node.osystem, Equals("")) self.expectThat(node.distro_series, Equals("")) def test_Deploy_doesnt_set_osystem_and_series_if_series_missing(self): user = factory.make_User() node = factory.make_Node( interface=True, status=NODE_STATUS.ALLOCATED, power_type='ether_wake', owner=user) self.patch(node, 'start') osystem = make_osystem_with_releases(self) extra = { "osystem": osystem["name"], } Deploy(node, user).execute(**extra) self.expectThat(node.osystem, Equals("")) self.expectThat(node.distro_series, Equals("")) def test_Deploy_allocates_node_if_node_not_already_allocated(self): user = factory.make_User() node = factory.make_Node(status=NODE_STATUS.READY, with_boot_disk=True) self.patch(node, 'start') action = Deploy(node, user) action.execute() self.assertEqual(user, node.owner) self.assertEqual(NODE_STATUS.ALLOCATED, node.status) class TestDeployActionTransactional(MAASTransactionServerTestCase): '''The following TestDeployAction tests require MAASTransactionServerTestCase, and thus, have been separated from the TestDeployAction above. ''' def test_Deploy_returns_error_when_no_more_static_IPs(self): user = factory.make_User() node = factory.make_Node_with_Interface_on_Subnet( status=NODE_STATUS.ALLOCATED, power_type='ether_wake', owner=user, power_state=POWER_STATE.OFF) boot_interface = node.get_boot_interface() ip_address = boot_interface.ip_addresses.first() subnet = ip_address.subnet ngi = subnet.nodegroupinterface_set.first() # Narrow the available IP range and pre-claim the only address. ngi.static_ip_range_high = ngi.static_ip_range_low ngi.save() with transaction.atomic(): StaticIPAddress.objects.allocate_new( ngi.network, ngi.static_ip_range_low, ngi.static_ip_range_high, ngi.ip_range_low, ngi.ip_range_high) e = self.assertRaises(NodeActionError, Deploy(node, user).execute) self.expectThat( e.message, Equals( "%s: Failed to start, static IP addresses are exhausted." % node.hostname)) self.assertEqual(NODE_STATUS.ALLOCATED, node.status) class TestSetZoneAction(MAASServerTestCase): def test_SetZone_sets_zone(self): user = factory.make_User() zone1 = factory.make_Zone() zone2 = factory.make_Zone() node = factory.make_Node(status=NODE_STATUS.NEW, zone=zone1) action = SetZone(node, user) action.execute(zone_id=zone2.id) self.assertEqual(node.zone.id, zone2.id) class TestPowerOnAction(MAASServerTestCase): def test_PowerOn_starts_node(self): user = factory.make_User() node = factory.make_Node( interface=True, status=NODE_STATUS.ALLOCATED, power_type='ether_wake', owner=user) node_start = self.patch(node, 'start') PowerOn(node, user).execute() self.assertThat( node_start, MockCalledOnceWith(user)) def test_PowerOn_requires_edit_permission(self): user = factory.make_User() node = factory.make_Node() self.assertFalse( user.has_perm(NODE_PERMISSION.EDIT, node)) self.assertFalse(PowerOn(node, user).is_permitted()) def test_PowerOn_is_actionable_if_node_doesnt_have_an_owner(self): owner = factory.make_User() node = factory.make_Node( interface=True, status=NODE_STATUS.DEPLOYED, power_type='ether_wake') self.assertTrue(PowerOn(node, owner).is_actionable()) def test_PowerOn_is_actionable_if_node_does_have_an_owner(self): owner = factory.make_User() node = factory.make_Node( interface=True, status=NODE_STATUS.DEPLOYED, power_type='ether_wake', owner=owner) self.assertTrue(PowerOn(node, owner).is_actionable()) class TestPowerOffAction(MAASServerTestCase): def test__stops_deployed_node(self): user = factory.make_User() params = dict( power_address=factory.make_string(), power_user=factory.make_string(), power_pass=factory.make_string()) node = factory.make_Node( interface=True, status=NODE_STATUS.DEPLOYED, power_type='ipmi', owner=user, power_parameters=params) self.patch(node, 'start_transition_monitor') node_stop = self.patch_autospec(node, 'stop') PowerOff(node, user).execute() self.assertThat(node_stop, MockCalledOnceWith(user)) def test__stops_Ready_node(self): admin = factory.make_admin() params = dict( power_address=factory.make_string(), power_user=factory.make_string(), power_pass=factory.make_string()) node = factory.make_Node( interface=True, status=NODE_STATUS.READY, power_type='ipmi', power_parameters=params) node_stop = self.patch_autospec(node, 'stop') PowerOff(node, admin).execute() self.assertThat(node_stop, MockCalledOnceWith(admin)) def test__actionable_for_non_monitored_states(self): all_statuses = NON_MONITORED_STATUSES results = {} for status in all_statuses: node = factory.make_Node( status=status, power_type='ipmi', power_state=POWER_STATE.ON) actions = compile_node_actions( node, factory.make_admin(), classes=[PowerOff]) results[status] = actions.keys() expected_results = {status: [PowerOff.name] for status in all_statuses} self.assertEqual( expected_results, results, "Nodes with certain statuses could not be powered off.") def test__non_actionable_for_monitored_states(self): all_statuses = MONITORED_STATUSES results = {} for status in all_statuses: node = factory.make_Node( status=status, power_type='ipmi', power_state=POWER_STATE.ON) actions = compile_node_actions( node, factory.make_admin(), classes=[PowerOff]) results[status] = actions.keys() expected_results = {status: [] for status in all_statuses} self.assertEqual( expected_results, results, "Nodes with certain statuses could be powered off.") def test__non_actionable_if_node_already_off(self): all_statuses = NON_MONITORED_STATUSES results = {} for status in all_statuses: node = factory.make_Node( status=status, power_type='ipmi', power_state=POWER_STATE.OFF) actions = compile_node_actions( node, factory.make_admin(), classes=[PowerOff]) results[status] = actions.keys() expected_results = {status: [] for status in all_statuses} self.assertEqual( expected_results, results, "Nodes already powered off can be powered off.") ACTIONABLE_STATUSES = [ NODE_STATUS.DEPLOYING, NODE_STATUS.FAILED_DEPLOYMENT, NODE_STATUS.FAILED_DISK_ERASING, ] class TestReleaseAction(MAASServerTestCase): scenarios = [ (NODE_STATUS_CHOICES_DICT[status], dict(actionable_status=status)) for status in ACTIONABLE_STATUSES ] def test_Release_stops_and_releases_node(self): user = factory.make_User() params = dict( power_address=factory.make_string(), power_user=factory.make_string(), power_pass=factory.make_string()) node = factory.make_Node( interface=True, status=self.actionable_status, power_type='ipmi', power_state=POWER_STATE.ON, owner=user, power_parameters=params) self.patch(node, 'start_transition_monitor') node_stop = self.patch_autospec(node, '_stop') Release(node, user).execute() self.expectThat(node.status, Equals(NODE_STATUS.RELEASING)) self.assertThat( node_stop, MockCalledOnceWith(user)) class TestMarkBrokenAction(MAASServerTestCase): def test_changes_status(self): user = factory.make_User() node = factory.make_Node(owner=user, status=NODE_STATUS.COMMISSIONING) action = MarkBroken(node, user) self.assertTrue(action.is_permitted()) action.execute() self.assertEqual(NODE_STATUS.BROKEN, reload_object(node).status) def test_updates_error_description(self): user = factory.make_User() node = factory.make_Node(owner=user, status=NODE_STATUS.COMMISSIONING) action = MarkBroken(node, user) self.assertTrue(action.is_permitted()) action.execute() self.assertEqual( "via web interface", reload_object(node).error_description ) def test_requires_edit_permission(self): user = factory.make_User() node = factory.make_Node() self.assertFalse(MarkBroken(node, user).is_permitted()) class TestMarkFixedAction(MAASServerTestCase): def make_commissioning_data(self, node, result=0, count=3): return [ NodeResult.objects.create( node=node, name=factory.make_name(), script_result=result, result_type=RESULT_TYPE.COMMISSIONING) for _ in range(count) ] def test_changes_status(self): node = factory.make_Node( status=NODE_STATUS.BROKEN, power_state=POWER_STATE.OFF) self.make_commissioning_data(node) user = factory.make_admin() action = MarkFixed(node, user) self.assertTrue(action.is_permitted()) action.execute() self.assertEqual(NODE_STATUS.READY, reload_object(node).status) def test_raise_NodeActionError_if_on(self): node = factory.make_Node( status=NODE_STATUS.BROKEN, power_state=POWER_STATE.ON) user = factory.make_admin() action = MarkFixed(node, user) self.assertTrue(action.is_permitted()) self.assertRaises(NodeActionError, action.execute) def test_raise_NodeActionError_if_no_commissioning_results(self): node = factory.make_Node( status=NODE_STATUS.BROKEN, power_state=POWER_STATE.OFF) user = factory.make_admin() action = MarkFixed(node, user) self.assertTrue(action.is_permitted()) self.assertRaises(NodeActionError, action.execute) def test_raise_NodeActionError_if_one_commissioning_result_fails(self): node = factory.make_Node( status=NODE_STATUS.BROKEN, power_state=POWER_STATE.OFF) self.make_commissioning_data(node) self.make_commissioning_data(node, result=1, count=1) user = factory.make_admin() action = MarkFixed(node, user) self.assertTrue(action.is_permitted()) self.assertRaises(NodeActionError, action.execute) def test_raise_NodeActionError_if_multi_commissioning_result_fails(self): node = factory.make_Node( status=NODE_STATUS.BROKEN, power_state=POWER_STATE.OFF) self.make_commissioning_data(node) self.make_commissioning_data(node, result=1) user = factory.make_admin() action = MarkFixed(node, user) self.assertTrue(action.is_permitted()) self.assertRaises(NodeActionError, action.execute) def test_requires_admin_permission(self): user = factory.make_User() node = factory.make_Node() self.assertFalse(MarkFixed(node, user).is_permitted()) def test_not_enabled_if_not_broken(self): status = factory.pick_choice( NODE_STATUS_CHOICES, but_not=[NODE_STATUS.BROKEN]) node = factory.make_Node(status=status) actions = compile_node_actions( node, factory.make_admin(), classes=[MarkFixed]) self.assertItemsEqual([], actions) class TestActionsErrorHandling(MAASServerTestCase): """Tests for error handling in actions. This covers RPC exceptions and `ExternalProcessError`s. """ exceptions = RPC_EXCEPTIONS + (ExternalProcessError,) scenarios = [ (exception_class.__name__, {"exception_class": exception_class}) for exception_class in exceptions ] def make_exception(self): if self.exception_class is ExternalProcessError: exception = self.exception_class( 1, ["cmd"], factory.make_name("exception")) else: exception = self.exception_class(factory.make_name("exception")) return exception def patch_rpc_methods(self, node): exception = self.make_exception() self.patch(node, '_start').side_effect = exception self.patch(node, '_stop').side_effect = exception self.patch_autospec(node, 'start_transition_monitor') self.patch_autospec(node, 'stop_transition_monitor') def make_action(self, action_class, node_status, power_state=None): node = factory.make_Node( interface=True, status=node_status, power_type='ether_wake', power_state=power_state) admin = factory.make_admin() return action_class(node, admin) def test_Commission_handles_rpc_errors(self): self.addCleanup(signals.power.enable) signals.power.disable() action = self.make_action( Commission, NODE_STATUS.READY, POWER_STATE.OFF) self.patch_rpc_methods(action.node) exception = self.assertRaises(NodeActionError, action.execute) self.assertEqual( get_error_message_for_exception(action.node._start.side_effect), unicode(exception)) def test_Abort_handles_rpc_errors(self): action = self.make_action( Abort, NODE_STATUS.DISK_ERASING) self.patch_rpc_methods(action.node) exception = self.assertRaises(NodeActionError, action.execute) self.assertEqual( get_error_message_for_exception(action.node._stop.side_effect), unicode(exception)) def test_PowerOn_handles_rpc_errors(self): action = self.make_action(PowerOn, NODE_STATUS.READY) self.patch_rpc_methods(action.node) exception = self.assertRaises(NodeActionError, action.execute) self.assertEqual( get_error_message_for_exception(action.node._start.side_effect), unicode(exception)) def test_PowerOff_handles_rpc_errors(self): action = self.make_action(PowerOff, NODE_STATUS.DEPLOYED) self.patch_rpc_methods(action.node) exception = self.assertRaises(NodeActionError, action.execute) self.assertEqual( get_error_message_for_exception(action.node._stop.side_effect), unicode(exception)) def test_Release_handles_rpc_errors(self): action = self.make_action( Release, NODE_STATUS.ALLOCATED, power_state=POWER_STATE.ON) self.patch_rpc_methods(action.node) exception = self.assertRaises(NodeActionError, action.execute) self.assertEqual( get_error_message_for_exception(action.node._stop.side_effect), unicode(exception)) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_node_constraint_filter_forms.py0000644000000000000000000014204313056115004026335 0ustar 00000000000000# Copyright 2013-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test node constraint forms.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from random import randint from unittest import skip from django import forms from django.core.exceptions import ValidationError from maasserver.enum import ( INTERFACE_TYPE, IPADDRESS_TYPE, ) from maasserver.fields import MAC from maasserver.models import Node from maasserver.node_constraint_filter_forms import ( AcquireNodeForm, detect_nonexistent_zone_names, generate_architecture_wildcards, get_architecture_wildcards, get_storage_constraints_from_string, JUJU_ACQUIRE_FORM_FIELDS_MAPPING, nodes_by_storage, parse_legacy_tags, RenamableFieldsForm, ) from maasserver.testing.architecture import patch_usable_architectures from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.utils import ignore_unused from testtools.matchers import ( Contains, ContainsAll, ) class TestUtils(MAASServerTestCase): def test_generate_architecture_wildcards(self): # Create a test architecture choice list of one architecture that only # has one available subarch (single_subarch) and two architectures that # have a matching primary architecture (double_subarch_{1,2}) single_subarch = factory.make_name('arch'), factory.make_name('arch') double_subarch_1 = factory.make_name('arch'), factory.make_name('arch') double_subarch_2 = double_subarch_1[0], factory.make_name('arch') arches = [ '/'.join(single_subarch), '/'.join(double_subarch_1), '/'.join(double_subarch_2), ] # single_subarch should end up in the dict essentially unchanged, and # the double_subarchs should have been flattened into a single dict # element with a list of them. self.assertEquals( { single_subarch[0]: frozenset([arches[0]]), double_subarch_1[0]: frozenset([arches[1], arches[2]]), }, generate_architecture_wildcards(arches) ) def test_get_architecture_wildcards_aliases_armhf_as_arm(self): subarch = factory.make_name('sub') arches = ['armhf/%s' % subarch] self.assertEqual( { 'arm': frozenset(arches), 'armhf': frozenset(arches), }, get_architecture_wildcards(arches)) def test_get_architecture_wildcards_does_not_overwrite_existing_arm(self): arm = 'arm/%s' % factory.make_name('armsub') armhf = 'armhf/%s' % factory.make_name('armhfsub') self.assertEqual( { 'arm': frozenset([arm]), 'armhf': frozenset([armhf]), }, get_architecture_wildcards([arm, armhf])) def test_parse_legacy_tags(self): self.assertEquals([], parse_legacy_tags([])) self.assertEquals(['a', 'b'], parse_legacy_tags(['a', 'b'])) self.assertEquals(['a', 'b'], parse_legacy_tags(['a b'])) self.assertEquals(['a', 'b'], parse_legacy_tags(['a, b'])) self.assertEquals(['a', 'b', 'c'], parse_legacy_tags(['a, b c'])) self.assertEquals(['a', 'b'], parse_legacy_tags(['a,b'])) self.assertEquals( ['a', 'b', 'c', 'd'], parse_legacy_tags(['a,b', 'c d'])) def test_JUJU_ACQUIRE_FORM_FIELDS_MAPPING_fields(self): self.assertThat( list(AcquireNodeForm().fields), ContainsAll(JUJU_ACQUIRE_FORM_FIELDS_MAPPING)) def test_detect_nonexistent_zone_names_returns_empty_if_no_names(self): self.assertEqual([], detect_nonexistent_zone_names([])) def test_detect_nonexistent_zone_names_returns_empty_if_all_OK(self): zones = [factory.make_Zone() for _ in range(3)] self.assertEqual( [], detect_nonexistent_zone_names([zone.name for zone in zones])) def test_detect_nonexistent_zone_names_reports_unknown_zone_names(self): non_zone = factory.make_name('nonzone') self.assertEqual([non_zone], detect_nonexistent_zone_names([non_zone])) def test_detect_nonexistent_zone_names_is_consistent(self): names = [factory.make_name('nonzone') for _ in range(3)] self.assertEqual( detect_nonexistent_zone_names(names), detect_nonexistent_zone_names(names)) def test_detect_nonexistent_zone_names_combines_good_and_bad_names(self): zone = factory.make_Zone().name non_zone = factory.make_name('nonzone') self.assertEqual( [non_zone], detect_nonexistent_zone_names([zone, non_zone])) def test_detect_nonexistent_zone_names_asserts_parameter_type(self): self.assertRaises( AssertionError, detect_nonexistent_zone_names, "text") def test_get_storage_constraints_from_string_returns_None_for_empty(self): self.assertEquals(None, get_storage_constraints_from_string("")) def test_get_storage_constraints_from_string_None_for_empty_tags(self): self.assertEquals( [None, None, None], [tags for _, _, tags in get_storage_constraints_from_string("0,0,0")]) def test_get_storage_constraints_from_string_returns_size_in_bytes(self): self.assertEquals( [int(1.5 * (1000 ** 3)), 3 * (1000 ** 3), int(6.75 * (1000 ** 3))], [ size for _, size, _ in get_storage_constraints_from_string( "1.5,3,6.75") ]) def test_get_storage_constraints_from_string_sorts_more_tags_first(self): """Ensure first tag set remains first, all others are sorted""" self.assertEquals( [[u'ssd'], [u'ssd', u'sata', u'removable'], [u'ssd', u'sata'], [u'ssd']], [ tags for _, _, tags in get_storage_constraints_from_string( "0(ssd),0(ssd,sata),0(ssd),0(ssd,sata,removable)") ]) def test_nodes_by_storage_returns_None_when_storage_string_is_empty(self): self.assertEquals(None, nodes_by_storage("")) class TestRenamableForm(RenamableFieldsForm): field1 = forms.CharField(label="A field which is forced to contain 'foo'.") field2 = forms.CharField(label="Field 2", required=False) def clean_field1(self): name = self.get_field_name('field1') value = self.cleaned_data[name] if value != "foo": raise ValidationError("The value should be 'foo'") return value class TestRenamableFieldsForm(MAASServerTestCase): def test_rename_field_renames_field(self): form = TestRenamableForm() form.rename_field('field1', 'new_field') self.assertItemsEqual(form.fields, ['new_field', 'field2']) def test_rename_field_updates_mapping(self): form = TestRenamableForm() form.rename_field('field1', 'new_field') self.assertEquals('new_field', form.get_field_name('field1')) def test_rename_field_renames_validation_method(self): form = TestRenamableForm(data={'new_field': 'not foo', 'field2': 'a'}) form.rename_field('field1', 'new_field') self.assertEquals( (False, {'new_field': ["The value should be 'foo'"]}), (form.is_valid(), form.errors)) class TestAcquireNodeForm(MAASServerTestCase): def set_usable_arch(self): """Produce an arbitrary, valid, architecture name.""" arch = '%s/%s' % (factory.make_name('arch'), factory.make_name('sub')) patch_usable_architectures(self, [arch]) return arch def create_node_on_subnets(self, subnets): node = factory.make_Node() for subnet in subnets: nic = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, node=node) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.DHCP, ip="", interface=nic, subnet=subnet) return node def test_strict_form_checks_unknown_constraints(self): data = {'unknown_constraint': 'boo'} form = AcquireNodeForm.Strict(data=data) self.assertEquals( (False, {'unknown_constraint': ["No such constraint."]}), (form.is_valid(), form.errors)) def test_not_strict_does_not_check_unknown_constraints(self): data = {'unknown_constraint': 'boo'} form = AcquireNodeForm(data=data) self.assertTrue(form.is_valid()) def assertConstrainedNodes(self, nodes, data): form = AcquireNodeForm(data=data) self.assertTrue(form.is_valid(), dict(form.errors)) filtered_nodes, _, _ = form.filter_nodes(Node.objects.all()) self.assertItemsEqual(nodes, filtered_nodes) def test_no_constraints(self): nodes = [factory.make_Node() for _ in range(3)] form = AcquireNodeForm(data={}) self.assertTrue(form.is_valid()) self.assertItemsEqual(nodes, Node.objects.all()) def test_hostname(self): nodes = [factory.make_Node() for _ in range(3)] self.assertConstrainedNodes([nodes[0]], {'name': nodes[0].hostname}) self.assertConstrainedNodes([], {'name': 'unknown-name'}) def test_hostname_with_domain_part(self): nodes = [factory.make_Node() for _ in range(3)] self.assertConstrainedNodes( [nodes[0]], {'name': '%s.%s' % (nodes[0].hostname, nodes[0].nodegroup.name)}) self.assertConstrainedNodes( [], {'name': '%s.%s' % (nodes[0].hostname, 'unknown-domain')}) self.assertConstrainedNodes( [], {'name': '%s.%s' % (nodes[0].hostname, nodes[1].nodegroup.name)}) node = factory.make_Node(hostname="host21.mydomain") self.assertConstrainedNodes( [node], {'name': 'host21.mydomain'}) self.assertConstrainedNodes( [node], {'name': 'host21.%s' % node.nodegroup.name}) def test_cpu_count(self): node1 = factory.make_Node(cpu_count=1) node2 = factory.make_Node(cpu_count=2) nodes = [node1, node2] self.assertConstrainedNodes(nodes, {'cpu_count': '0'}) self.assertConstrainedNodes(nodes, {'cpu_count': '1.0'}) self.assertConstrainedNodes([node2], {'cpu_count': '2'}) self.assertConstrainedNodes([], {'cpu_count': '4'}) def test_invalid_cpu_count(self): form = AcquireNodeForm(data={'cpu_count': 'invalid'}) self.assertEquals( (False, {'cpu_count': ["Invalid CPU count: number required."]}), (form.is_valid(), form.errors)) def test_memory(self): node1 = factory.make_Node(memory=1024) node2 = factory.make_Node(memory=4096) self.assertConstrainedNodes([node1, node2], {'mem': '512'}) self.assertConstrainedNodes([node1, node2], {'mem': '1024'}) self.assertConstrainedNodes([node2], {'mem': '2048'}) self.assertConstrainedNodes([node2], {'mem': '4096'}) self.assertConstrainedNodes([], {'mem': '8192'}) self.assertConstrainedNodes([node2], {'mem': '4096.0'}) def test_invalid_memory(self): form = AcquireNodeForm(data={'mem': 'invalid'}) self.assertEquals( (False, {'mem': ["Invalid memory: number of MiB required."]}), (form.is_valid(), form.errors)) def test_legacy_networks_field_falls_back_to_subnets_query(self): subnets = [ factory.make_Subnet() for _ in range(3) ] nodes = [ factory.make_Node_with_Interface_on_Subnet(subnet=subnet) for subnet in subnets ] # Filter for this subnet. Take one in the middle to avoid # coincidental success based on ordering. pick = 1 self.assertConstrainedNodes( {nodes[pick]}, {'networks': [subnets[pick].name]}) def test_networks_filters_by_name(self): subnets = [ factory.make_Subnet() for _ in range(3) ] nodes = [ factory.make_Node_with_Interface_on_Subnet(subnet=subnet) for subnet in subnets ] # Filter for this subnet. Take one in the middle to avoid # coincidental success based on ordering. pick = 1 self.assertConstrainedNodes( {nodes[pick]}, {'networks': [subnets[pick].name]}) def test_networks_filters_by_space(self): subnets = [ factory.make_Subnet() for _ in range(3) ] nodes = [ factory.make_Node_with_Interface_on_Subnet(subnet=subnet) for subnet in subnets ] # Filter for this subnet. Take one in the middle to avoid # coincidental success based on ordering. pick = 1 self.assertConstrainedNodes( {nodes[pick]}, {'networks': ["space:%s" % subnets[pick].space.name]}) def test_networks_filters_by_ip(self): subnets = [ factory.make_Subnet() for _ in range(3) ] nodes = [ factory.make_Node_with_Interface_on_Subnet(subnet=subnet) for subnet in subnets ] # Filter for this subnet. Take one in the middle to avoid # coincidental success based on ordering. pick = 1 self.assertConstrainedNodes( {nodes[pick]}, {'networks': [ 'ip:%s' % factory.pick_ip_in_network( subnets[pick].get_ipnetwork())]}) def test_networks_filters_by_vlan_tag(self): vlan_tags = list(range(1, 6)) subnets = [ factory.make_Subnet(vlan=factory.make_VLAN(vid=tag)) for tag in vlan_tags ] nodes = [ factory.make_Node_with_Interface_on_Subnet(subnet=subnet) for subnet in subnets ] # Filter for this network. Take one in the middle to avoid # coincidental success based on ordering. pick = 1 self.assertConstrainedNodes( {nodes[pick]}, {'networks': ['vlan:%d' % vlan_tags[pick]]}) def test_networks_filter_ignores_macs_on_other_subnets(self): subnet = factory.make_Subnet() node = factory.make_Node_with_Interface_on_Subnet(subnet=subnet) factory.make_Node_with_Interface_on_Subnet() self.assertConstrainedNodes({node}, {'networks': [subnet.name]}) def test_networks_filter_ignores_other_subnets_on_mac(self): subnets = [ factory.make_Subnet() for _ in range(3) ] node = factory.make_Node() for subnet in subnets: nic = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, node=node) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.DHCP, ip="", interface=nic, subnet=subnet) self.assertConstrainedNodes( {node}, {'networks': [subnets[1].name]}) def test_invalid_subnets(self): form = AcquireNodeForm(data={'networks': 'ip:10.0.0.0'}) self.assertEquals( ( False, { 'networks': [ "Invalid parameter: " "list of subnet specifiers required.", ], }, ), (form.is_valid(), form.errors)) # The validator is unit-tested separately. This just verifies that it # is being consulted. form = AcquireNodeForm(data={'networks': ['vlan:-1']}) self.assertEquals( (False, {'networks': [ "VLAN tag (VID) out of range (0-4094; 0 for untagged.)"]}), (form.is_valid(), form.errors)) def test_networks_combines_filters(self): subnets = [ factory.make_Subnet() for _ in range(3) ] [ subnet_by_name, subnet_by_ip, subnet_by_vlan, ] = subnets self.create_node_on_subnets([subnet_by_name, subnet_by_ip]) self.create_node_on_subnets([subnet_by_name, subnet_by_vlan]) node = self.create_node_on_subnets( [subnet_by_name, subnet_by_ip, subnet_by_vlan]) self.create_node_on_subnets([subnet_by_ip, subnet_by_vlan]) self.create_node_on_subnets([]) self.assertConstrainedNodes( {node}, { 'networks': [ subnet_by_name.name, 'ip:%s' % factory.pick_ip_in_network( subnet_by_ip.get_ipnetwork()), 'vlan:%d' % subnet_by_vlan.vlan.vid, ], }) def test_networks_ignores_other_subnets(self): [this_subnet, other_subnet] = [ factory.make_Subnet() for _ in range(2) ] node = self.create_node_on_subnets([this_subnet, other_subnet]) self.assertConstrainedNodes( [node], {'networks': [this_subnet.name]}) def test_legacy_not_networks_falls_back_to_not_networks_query(self): [not_subnet, subnet] = [ factory.make_Subnet() for _ in range(2) ] factory.make_Node_with_Interface_on_Subnet(subnet=not_subnet) node = factory.make_Node_with_Interface_on_Subnet(subnet=subnet) self.assertConstrainedNodes( {node}, {'not_networks': [not_subnet.name]}) def test_not_networks_filters_by_name(self): [subnet, not_subnet] = [ factory.make_Subnet() for _ in range(2) ] node = factory.make_Node_with_Interface_on_Subnet(subnet=subnet) self.assertConstrainedNodes( {node}, {'not_networks': [not_subnet.name]}) def test_not_networks_filters_by_ip(self): [subnet, not_subnet] = [ factory.make_Subnet() for _ in range(2) ] node = factory.make_Node_with_Interface_on_Subnet(subnet=subnet) self.assertConstrainedNodes( {node}, {'not_networks': ['ip:%s' % factory.pick_ip_in_network( not_subnet.get_ipnetwork())]}) def test_not_networks_filters_by_vlan_tag(self): vlan_tags = list(range(1, 3)) subnets = [ factory.make_Subnet(vlan=factory.make_VLAN(vid=tag)) for tag in vlan_tags ] nodes = [ factory.make_Node_with_Interface_on_Subnet(subnet=subnet) for subnet in subnets ] self.assertConstrainedNodes( {nodes[0]}, {'not_networks': ['vlan:%d' % vlan_tags[1]]}) def test_not_networks_accepts_nodes_without_subnet_connections(self): interfaceless_node = factory.make_Node() unconnected_node = factory.make_Node() factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=unconnected_node) self.assertConstrainedNodes( {interfaceless_node, unconnected_node}, {'not_networks': [factory.make_Subnet().name]}) def test_not_networks_exclude_node_with_any_interface(self): subnet = factory.make_Subnet() node = factory.make_Node_with_Interface_on_Subnet(subnet=subnet) other_subnet = factory.make_Subnet() other_nic = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, node=node) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.DHCP, ip="", interface=other_nic, subnet=other_subnet) self.assertConstrainedNodes([], {'not_networks': [subnet.name]}) def test_not_networks_excludes_node_with_interface_on_any_not_subnet(self): factory.make_Subnet() not_subnet = factory.make_Subnet() factory.make_Node_with_Interface_on_Subnet(subnet=not_subnet) self.assertConstrainedNodes([], {'not_networks': [not_subnet.name]}) def test_invalid_not_networks(self): form = AcquireNodeForm(data={'not_networks': 'ip:10.0.0.0'}) self.assertEquals( ( False, { 'not_networks': [ "Invalid parameter: " "list of subnet specifiers required.", ], }, ), (form.is_valid(), form.errors)) # The validator is unit-tested separately. This just verifies that it # is being consulted. form = AcquireNodeForm(data={'not_networks': ['vlan:-1']}) self.assertEquals( (False, {'not_networks': [ "VLAN tag (VID) out of range (0-4094; 0 for untagged.)"]}), (form.is_valid(), form.errors)) def test_not_networks_combines_filters(self): subnets = [ factory.make_Subnet() for _ in range(5) ] [ subnet_by_name, subnet_by_ip, subnet_by_vlan, other_subnet, remaining_subnet, ] = subnets self.create_node_on_subnets([subnet_by_name]) self.create_node_on_subnets([subnet_by_name, subnet_by_ip]) self.create_node_on_subnets([subnet_by_name, subnet_by_vlan]) self.create_node_on_subnets([subnet_by_vlan]) self.create_node_on_subnets([subnet_by_vlan, other_subnet]) node = self.create_node_on_subnets([remaining_subnet]) self.assertConstrainedNodes( {node}, { 'not_networks': [ subnet_by_name.name, 'ip:%s' % factory.pick_ip_in_network( subnet_by_ip.get_ipnetwork()), 'vlan:%d' % subnet_by_vlan.vlan.vid, ], }) def test_connected_to(self): mac1 = MAC('aa:bb:cc:dd:ee:ff') mac2 = MAC('00:11:22:33:44:55') node1 = factory.make_Node(routers=[mac1, mac2]) node2 = factory.make_Node(routers=[mac1]) factory.make_Node() self.assertConstrainedNodes( [node1], {'connected_to': [ mac1.get_raw(), mac2.get_raw()]}) self.assertConstrainedNodes( [node1, node2], {'connected_to': [mac1.get_raw()]}) def test_invalid_connected_to(self): form = AcquireNodeForm(data={'connected_to': 'invalid'}) self.assertEquals( (False, { 'connected_to': ["Invalid parameter: list of MAC addresses required."]}), (form.is_valid(), form.errors)) def test_not_connected_to(self): mac1 = MAC('aa:bb:cc:dd:ee:ff') mac2 = MAC('00:11:22:33:44:55') node1 = factory.make_Node(routers=[mac1, mac2]) node2 = factory.make_Node(routers=[mac1]) node3 = factory.make_Node() self.assertConstrainedNodes( [node3], {'not_connected_to': [ mac1.get_raw(), mac2.get_raw()]}) self.assertConstrainedNodes( [node2, node3], {'not_connected_to': [mac2.get_raw()]}) self.assertConstrainedNodes( [node1, node2, node3], {'not_connected_to': ["b1:b1:b1:b1:b1:b1"]}) def test_invalid_not_connected_to(self): form = AcquireNodeForm(data={'not_connected_to': 'invalid'}) self.assertEquals( (False, { 'not_connected_to': ["Invalid parameter: list of MAC addresses required."]}), (form.is_valid(), form.errors)) def test_zone(self): node1 = factory.make_Node() node2 = factory.make_Node() node3 = factory.make_Node() zone1 = factory.make_Zone(nodes=[node1, node2]) zone2 = factory.make_Zone() self.assertConstrainedNodes( [node1, node2], {'zone': zone1.name}) self.assertConstrainedNodes( [node1, node2, node3], {'zone': ''}) self.assertConstrainedNodes( [node1, node2, node3], {}) self.assertConstrainedNodes( [], {'zone': zone2.name}) def test_invalid_zone(self): form = AcquireNodeForm(data={'zone': 'unknown'}) self.assertEquals( (False, {'zone': ["No such zone: 'unknown'."]}), (form.is_valid(), form.errors)) def test_not_in_zone_excludes_given_zones(self): ineligible_nodes = [factory.make_Node() for _ in range(2)] eligible_nodes = [factory.make_Node() for _ in range(2)] self.assertConstrainedNodes( eligible_nodes, {'not_in_zone': [node.zone.name for node in ineligible_nodes]}) def test_not_in_zone_with_required_zone_yields_no_nodes(self): zone = factory.make_Zone() factory.make_Node(zone=zone) self.assertConstrainedNodes([], {'zone': zone, 'not_in_zone': [zone]}) def test_validates_not_in_zone(self): bad_zone_name = '#$&*!' form = AcquireNodeForm(data={'not_in_zone': [bad_zone_name]}) self.assertFalse(form.is_valid()) self.assertEqual(['not_in_zone'], form.errors.keys()) def test_not_in_zone_must_be_zone_name(self): non_zone = factory.make_name('nonzone') form = AcquireNodeForm(data={'not_in_zone': [non_zone]}) self.assertFalse(form.is_valid()) self.assertEqual( {'not_in_zone': ["No such zone(s): %s." % non_zone]}, form.errors) def test_not_in_zone_can_exclude_multiple_zones(self): # Three nodes, all in different physical zones. If we say we don't # want the first node's zone or the second node's zone, we get the node # in the remaining zone. nodes = [factory.make_Node() for _ in range(3)] self.assertConstrainedNodes( [nodes[2]], {'not_in_zone': [nodes[0].zone.name, nodes[1].zone.name]}) def test_tags(self): tag_big = factory.make_Tag(name='big') tag_burly = factory.make_Tag(name='burly') node_big = factory.make_Node() node_big.tags.add(tag_big) node_burly = factory.make_Node() node_burly.tags.add(tag_burly) node_bignburly = factory.make_Node() node_bignburly.tags.add(tag_big) node_bignburly.tags.add(tag_burly) self.assertConstrainedNodes( [node_big, node_bignburly], {'tags': ['big']}) self.assertConstrainedNodes( [node_burly, node_bignburly], {'tags': ['burly']}) self.assertConstrainedNodes( [node_bignburly], {'tags': ['big', 'burly']}) def test_not_tags_negates_individual_tags(self): tag = factory.make_Tag() tagged_node = factory.make_Node() tagged_node.tags.add(tag) untagged_node = factory.make_Node() self.assertConstrainedNodes( [untagged_node], {'not_tags': [tag.name]}) def test_not_tags_negates_multiple_tags(self): tagged_node = factory.make_Node() tags = [ factory.make_Tag('spam'), factory.make_Tag('eggs'), factory.make_Tag('ham'), ] tagged_node.tags = tags partially_tagged_node = factory.make_Node() partially_tagged_node.tags.add(tags[0]) self.assertConstrainedNodes( [partially_tagged_node], {'not_tags': ['eggs', 'ham']}) def test_invalid_tags(self): form = AcquireNodeForm(data={'tags': ['big', 'unknown']}) self.assertEquals( (False, { 'tags': ["No such tag(s): 'big', 'unknown'."]}), (form.is_valid(), form.errors)) def test_storage_invalid_constraint(self): form = AcquireNodeForm(data={'storage': '10(ssd,20'}) self.assertEquals( (False, { 'storage': ['Malformed storage constraint, "10(ssd,20".']}), (form.is_valid(), form.errors)) def test_storage_invalid_size_constraint(self): form = AcquireNodeForm(data={'storage': 'abc'}) self.assertEquals( (False, { 'storage': ['Malformed storage constraint, "abc".']}), (form.is_valid(), form.errors)) def test_storage_single_contraint_only_matches_physical_devices(self): node1 = factory.make_Node(with_boot_disk=False) factory.make_PhysicalBlockDevice(node=node1) node2 = factory.make_Node(with_boot_disk=False) factory.make_BlockDevice(node=node2) self.assertConstrainedNodes([node1], {'storage': '0'}) def test_storage_single_contraint_matches_all_sizes_larger(self): node1 = factory.make_Node(with_boot_disk=False) # 1gb block device factory.make_PhysicalBlockDevice( node=node1, size=1 * (1000 ** 3)) node2 = factory.make_Node(with_boot_disk=False) # 4gb block device factory.make_PhysicalBlockDevice( node=node2, size=4 * (1000 ** 3)) node3 = factory.make_Node(with_boot_disk=False) # 8gb block device factory.make_PhysicalBlockDevice( node=node3, size=8 * (1000 ** 3)) # all nodes with physical devices larger than 2gb self.assertConstrainedNodes([node2, node3], {'storage': '2'}) def test_storage_single_contraint_matches_on_tags(self): node1 = factory.make_Node(with_boot_disk=False) factory.make_PhysicalBlockDevice(node=node1, tags=['ssd']) node2 = factory.make_Node(with_boot_disk=False) factory.make_PhysicalBlockDevice(node=node2, tags=['rotary']) self.assertConstrainedNodes([node1], {'storage': '0(ssd)'}) def test_storage_single_contraint_matches_decimal_size(self): node1 = factory.make_Node(with_boot_disk=False) # 2gb, 4gb block device factory.make_PhysicalBlockDevice( node=node1, size=2 * (1000 ** 3)) factory.make_PhysicalBlockDevice( node=node1, size=4 * (1000 ** 3)) node2 = factory.make_Node(with_boot_disk=False) # 1gb block device factory.make_PhysicalBlockDevice( node=node2, size=1 * (1000 ** 3)) self.assertConstrainedNodes([node1], {'storage': '1.5'}) def test_storage_multi_contraint_only_matches_physical_devices(self): node1 = factory.make_Node(with_boot_disk=False) factory.make_PhysicalBlockDevice(node=node1) factory.make_PhysicalBlockDevice(node=node1) node2 = factory.make_Node(with_boot_disk=False) factory.make_BlockDevice(node=node2) factory.make_BlockDevice(node=node2) self.assertConstrainedNodes([node1], {'storage': '0,0'}) def test_storage_multi_contraint_matches_all_sizes_larger(self): node1 = factory.make_Node(with_boot_disk=False) # 1gb, 2gb, 3gb block device factory.make_PhysicalBlockDevice( node=node1, size=1 * (1000 ** 3)) factory.make_PhysicalBlockDevice( node=node1, size=2 * (1000 ** 3)) factory.make_PhysicalBlockDevice( node=node1, size=3 * (1000 ** 3)) node2 = factory.make_Node(with_boot_disk=False) # 5gb, 6gb, 7gb block device factory.make_PhysicalBlockDevice( node=node2, size=5 * (1000 ** 3)) factory.make_PhysicalBlockDevice( node=node2, size=6 * (1000 ** 3)) factory.make_PhysicalBlockDevice( node=node2, size=7 * (1000 ** 3)) node3 = factory.make_Node(with_boot_disk=False) # 8gb, 9gb, 10gb block device factory.make_PhysicalBlockDevice( node=node3, size=8 * (1000 ** 3)) factory.make_PhysicalBlockDevice( node=node3, size=9 * (1000 ** 3)) factory.make_PhysicalBlockDevice( node=node3, size=10 * (1000 ** 3)) # all nodes with physical devices larger than 2gb self.assertConstrainedNodes([node2, node3], {'storage': '4,4,4'}) def test_storage_multi_contraint_matches_on_tags(self): node1 = factory.make_Node(with_boot_disk=False) factory.make_PhysicalBlockDevice(node=node1, tags=['ssd']) factory.make_PhysicalBlockDevice(node=node1, tags=['ssd', 'removable']) node2 = factory.make_Node(with_boot_disk=False) factory.make_PhysicalBlockDevice(node=node2, tags=['ssd']) factory.make_PhysicalBlockDevice(node=node2, tags=['ssd', 'sata']) self.assertConstrainedNodes( [node1], {'storage': '0(ssd),0(ssd,removable)'}) def test_storage_multi_contraint_matches_on_size_and_tags(self): node1 = factory.make_Node(with_boot_disk=False) # 1gb, 2gb block device factory.make_PhysicalBlockDevice( node=node1, size=1 * (1000 ** 3), tags=['ssd']) factory.make_PhysicalBlockDevice( node=node1, size=2 * (1000 ** 3), tags=['ssd']) node2 = factory.make_Node(with_boot_disk=False) # 4gb, 5gb block device factory.make_PhysicalBlockDevice( node=node2, size=4 * (1000 ** 3), tags=['ssd']) factory.make_PhysicalBlockDevice( node=node2, size=5 * (1000 ** 3), tags=['ssd']) self.assertConstrainedNodes( [node2], {'storage': '3(ssd),3(ssd)'}) def test_storage_first_constraint_matches_first_blockdevice(self): """ Make sure a constraint like 10(ssd),5,20 will match a node with a 11(ssd) first device, a 21 second device and a 10 third device, but not a 5/20/10(ssd) node """ node1 = factory.make_Node(with_boot_disk=False) factory.make_PhysicalBlockDevice(node=node1, size=6 * (1000 ** 3)) factory.make_PhysicalBlockDevice(node=node1, size=21 * (1000 ** 3)) factory.make_PhysicalBlockDevice(node=node1, size=11 * (1000 ** 3), tags=['ssd']) node2 = factory.make_Node(with_boot_disk=False) factory.make_PhysicalBlockDevice(node=node2, size=11 * (1000 ** 3), tags=['ssd']) factory.make_PhysicalBlockDevice(node=node2, size=6 * (1000 ** 3)) factory.make_PhysicalBlockDevice(node=node2, size=21 * (1000 ** 3)) self.assertConstrainedNodes( [node2], {'storage': '10(ssd),5,20'}) def test_storage_multi_contraint_matches_large_disk_count(self): node1 = factory.make_Node(with_boot_disk=False) for _ in range(10): factory.make_PhysicalBlockDevice(node=node1) node2 = factory.make_Node(with_boot_disk=False) for _ in range(5): factory.make_PhysicalBlockDevice(node=node2) self.assertConstrainedNodes( [node1], {'storage': '0,0,0,0,0,0,0,0,0,0'}) @skip( "XXX: allenap 2015-03-17 bug=1433012: This test keeps failing when " "landing unrelated branches, so has been disabled.") def test_storage_with_named_constraints(self): node1 = factory.make_Node(with_boot_disk=False) factory.make_PhysicalBlockDevice(node=node1, size=11 * (1000 ** 3), tags=['ssd']) factory.make_PhysicalBlockDevice(node=node1, size=6 * (1000 ** 3), tags=['rotary', '5400rpm']) factory.make_PhysicalBlockDevice(node=node1, size=21 * (1000 ** 3)) form = AcquireNodeForm({u'storage': u'root:10(ssd),data:5(rotary,5400rpm),20'}) self.assertTrue(form.is_valid(), form.errors) filtered_nodes, constraint_map = form.filter_nodes(Node.objects.all()) node = filtered_nodes[0] disk0 = node.physicalblockdevice_set.get( id=constraint_map[node.id].keys()[0]) # 1st constraint with name self.assertGreaterEqual(disk0.size, 10 * 1000 ** 3) disk1 = node.physicalblockdevice_set.get( id=constraint_map[node.id].keys()[1]) # 2nd constraint with name self.assertGreaterEqual(disk1.size, 5 * 1000 ** 3) def test_fabrics_constraint(self): fabric1 = factory.make_Fabric(name="fabric1") fabric2 = factory.make_Fabric(name="fabric2") factory.make_Node_with_Interface_on_Subnet(fabric=fabric1) node2 = factory.make_Node_with_Interface_on_Subnet(fabric=fabric2) form = AcquireNodeForm({ u'fabrics': [u'fabric2']}) self.assertTrue(form.is_valid(), dict(form.errors)) filtered_nodes, _, _ = form.filter_nodes(Node.nodes) self.assertItemsEqual([node2], filtered_nodes) def test_not_fabrics_constraint(self): fabric1 = factory.make_Fabric(name="fabric1") fabric2 = factory.make_Fabric(name="fabric2") factory.make_Node_with_Interface_on_Subnet(fabric=fabric1) node2 = factory.make_Node_with_Interface_on_Subnet(fabric=fabric2) form = AcquireNodeForm({ u'not_fabrics': [u'fabric1']}) self.assertTrue(form.is_valid(), dict(form.errors)) filtered_nodes, _, _ = form.filter_nodes(Node.nodes) self.assertItemsEqual([node2], filtered_nodes) def test_fabric_classes_constraint(self): fabric1 = factory.make_Fabric(class_type="10g") fabric2 = factory.make_Fabric(class_type="1g") factory.make_Node_with_Interface_on_Subnet(fabric=fabric1) node2 = factory.make_Node_with_Interface_on_Subnet(fabric=fabric2) form = AcquireNodeForm({ u'fabric_classes': [u'1g']}) self.assertTrue(form.is_valid(), dict(form.errors)) filtered_nodes, _, _ = form.filter_nodes(Node.nodes) self.assertItemsEqual([node2], filtered_nodes) def test_not_fabric_classes_constraint(self): fabric1 = factory.make_Fabric(class_type="10g") fabric2 = factory.make_Fabric(class_type="1g") factory.make_Node_with_Interface_on_Subnet(fabric=fabric1) node2 = factory.make_Node_with_Interface_on_Subnet(fabric=fabric2) form = AcquireNodeForm({ u'not_fabric_classes': [u'10g']}) self.assertTrue(form.is_valid(), dict(form.errors)) filtered_nodes, _, _ = form.filter_nodes(Node.nodes) self.assertItemsEqual([node2], filtered_nodes) def test_interfaces_constraint_rejected_if_syntax_is_invalid(self): factory.make_Node_with_Interface_on_Subnet() form = AcquireNodeForm({ u'interfaces': u'label:x'}) self.assertFalse(form.is_valid(), dict(form.errors)) self.assertThat(form.errors, Contains('interfaces')) def test_interfaces_constraint_rejected_if_key_is_invalid(self): factory.make_Node_with_Interface_on_Subnet() form = AcquireNodeForm({ u'interfaces': u'label:chirp_chirp_thing=silenced'}) self.assertFalse(form.is_valid(), dict(form.errors)) self.assertThat(form.errors, Contains('interfaces')) def test_interfaces_constraint_validated(self): factory.make_Node_with_Interface_on_Subnet() form = AcquireNodeForm({ u'interfaces': u'label:fabric=fabric-0'}) self.assertTrue(form.is_valid(), dict(form.errors)) def test_interfaces_constraint_with_multiple_labels_and_values_validated( self): factory.make_Node_with_Interface_on_Subnet() form = AcquireNodeForm({ u'interfaces': u'label:fabric=fabric-0,fabric=fabric-1,space=default;' u'label2:fabric=fabric-3,fabric=fabric-4,space=foo'}) self.assertTrue(form.is_valid(), dict(form.errors)) def test_interfaces_filters_by_fabric_class(self): fabric1 = factory.make_Fabric(class_type="1g") fabric2 = factory.make_Fabric(class_type="10g") node1 = factory.make_Node_with_Interface_on_Subnet(fabric=fabric1) node2 = factory.make_Node_with_Interface_on_Subnet(fabric=fabric2) form = AcquireNodeForm({ u'interfaces': u'label:fabric_class=10g'}) self.assertTrue(form.is_valid(), dict(form.errors)) filtered_nodes, _, _ = form.filter_nodes(Node.nodes) self.assertItemsEqual([node2], filtered_nodes) form = AcquireNodeForm({ u'interfaces': u'label:fabric_class=1g'}) self.assertTrue(form.is_valid(), dict(form.errors)) filtered_nodes, _, _ = form.filter_nodes(Node.nodes) self.assertItemsEqual([node1], filtered_nodes) def test_interfaces_filters_work_with_multiple_labels(self): fabric1 = factory.make_Fabric(class_type="1g") fabric2 = factory.make_Fabric(class_type="10g") vlan1 = factory.make_VLAN(vid=1, fabric=fabric1) vlan2 = factory.make_VLAN(vid=2, fabric=fabric2) node1 = factory.make_Node_with_Interface_on_Subnet( fabric=fabric1, vlan=vlan1) node2 = factory.make_Node_with_Interface_on_Subnet( fabric=fabric2, vlan=vlan2) form = AcquireNodeForm({ u'interfaces': u'fabric:fabric_class=1g;vlan:vid=1'}) self.assertTrue(form.is_valid(), dict(form.errors)) filtered_nodes, _, _ = form.filter_nodes(Node.nodes) self.assertItemsEqual([node1], filtered_nodes) form = AcquireNodeForm({ u'interfaces': u'label:fabric_class=10g;vlan:vid=2'}) self.assertTrue(form.is_valid(), dict(form.errors)) filtered_nodes, _, _ = form.filter_nodes(Node.nodes) self.assertItemsEqual([node2], filtered_nodes) def test_interfaces_filters_same_key_treated_as_OR_operation(self): fabric1 = factory.make_Fabric(class_type="1g") fabric2 = factory.make_Fabric(class_type="10g") vlan1 = factory.make_VLAN(vid=1, fabric=fabric1) vlan2 = factory.make_VLAN(vid=2, fabric=fabric2) node1 = factory.make_Node_with_Interface_on_Subnet( fabric=fabric1, vlan=vlan1) node2 = factory.make_Node_with_Interface_on_Subnet( fabric=fabric2, vlan=vlan2) form = AcquireNodeForm({ u'interfaces': u'fabric:fabric_class=1g,fabric_class=10g;vlan:vid=1'}) self.assertTrue(form.is_valid(), dict(form.errors)) filtered_nodes, _, _ = form.filter_nodes(Node.nodes) self.assertItemsEqual([node1], filtered_nodes) form = AcquireNodeForm({ u'interfaces': u'label:fabric_class=10g,fabric_class=1g;vlan:vid=2'}) self.assertTrue(form.is_valid(), dict(form.errors)) filtered_nodes, _, _ = form.filter_nodes(Node.nodes) self.assertItemsEqual([node2], filtered_nodes) def test_interfaces_filters_different_key_treated_as_AND_operation(self): fabric1 = factory.make_Fabric(class_type="1g") fabric2 = factory.make_Fabric(class_type="10g") vlan1 = factory.make_VLAN(vid=1, fabric=fabric1) vlan2 = factory.make_VLAN(vid=2, fabric=fabric2) node1 = factory.make_Node_with_Interface_on_Subnet( fabric=fabric1, vlan=vlan1) node2 = factory.make_Node_with_Interface_on_Subnet( fabric=fabric2, vlan=vlan2) form = AcquireNodeForm({ u'interfaces': u'none:fabric_class=1g,vid=2'}) self.assertTrue(form.is_valid(), dict(form.errors)) filtered_nodes, _, _ = form.filter_nodes(Node.nodes) self.assertItemsEqual([], filtered_nodes) form = AcquireNodeForm({ u'interfaces': u'any:fabric_class=10g,fabric_class=1g,vid=1,vid=2'}) self.assertTrue(form.is_valid(), dict(form.errors)) filtered_nodes, _, _ = form.filter_nodes(Node.nodes) self.assertItemsEqual([node1, node2], filtered_nodes) def test_combined_constraints(self): tag_big = factory.make_Tag(name='big') arch = '%s/generic' % factory.make_name('arch') wrong_arch = '%s/generic' % factory.make_name('arch') patch_usable_architectures(self, [arch, wrong_arch]) node_big = factory.make_Node(architecture=arch) node_big.tags.add(tag_big) node_small = factory.make_Node(architecture=arch) ignore_unused(node_small) node_big_other_arch = factory.make_Node(architecture=wrong_arch) node_big_other_arch.tags.add(tag_big) self.assertConstrainedNodes( [node_big, node_big_other_arch], {'tags': ['big']}) self.assertConstrainedNodes( [node_big], {'arch': arch, 'tags': ['big']}) def test_invalid_combined_constraints(self): form = AcquireNodeForm( data={'tags': ['unknown'], 'mem': 'invalid'}) self.assertEquals( (False, { 'tags': ["No such tag(s): 'unknown'."], 'mem': ["Invalid memory: number of MiB required."], }), (form.is_valid(), form.errors)) def test_returns_distinct_nodes(self): node = factory.make_Node() subnet = factory.make_Subnet() nic1 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, node=node) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.DHCP, ip="", interface=nic1, subnet=subnet) nic2 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, node=node) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.DHCP, ip="", interface=nic2, subnet=subnet) self.assertConstrainedNodes( {node}, {'networks': [subnet.name]}) def test_describe_constraints_returns_empty_if_no_constraints(self): form = AcquireNodeForm(data={}) self.assertTrue(form.is_valid(), form.errors) self.assertEqual('', form.describe_constraints()) def test_describe_constraints_shows_simple_constraint(self): hostname = factory.make_name('host') form = AcquireNodeForm(data={'name': hostname}) self.assertTrue(form.is_valid(), form.errors) self.assertEqual('name=%s' % hostname, form.describe_constraints()) def test_describe_constraints_shows_arch_as_special_case(self): # The "arch" field is technically a single-valued string field # on the form, but its "cleaning" produces a list of strings. arch = self.set_usable_arch() form = AcquireNodeForm(data={'arch': arch}) self.assertTrue(form.is_valid(), form.errors) self.assertEqual('arch=%s' % arch, form.describe_constraints()) def test_describe_constraints_shows_multi_constraint(self): tag = factory.make_Tag() form = AcquireNodeForm(data={'tags': [tag.name]}) self.assertTrue(form.is_valid(), form.errors) self.assertEqual('tags=%s' % tag.name, form.describe_constraints()) def test_describe_constraints_sorts_constraints(self): hostname = factory.make_name('host') zone = factory.make_Zone() form = AcquireNodeForm(data={'name': hostname, 'zone': zone}) self.assertTrue(form.is_valid(), form.errors) self.assertEqual( 'name=%s zone=%s' % (hostname, zone), form.describe_constraints()) def test_describe_constraints_combines_constraint_values(self): tag1 = factory.make_Tag() tag2 = factory.make_Tag() form = AcquireNodeForm(data={'tags': [tag1.name, tag2.name]}) self.assertTrue(form.is_valid(), form.errors) self.assertEqual( 'tags=%s,%s' % tuple(sorted([tag1.name, tag2.name])), form.describe_constraints()) def test_describe_constraints_shows_all_constraints(self): constraints = { 'name': factory.make_name('host'), 'arch': self.set_usable_arch(), 'cpu_count': randint(1, 32), 'mem': randint(1024, 256 * 1024), 'tags': [factory.make_Tag().name], 'not_tags': [factory.make_Tag().name], 'networks': [factory.make_Subnet().name], 'not_networks': [factory.make_Subnet().name], 'connected_to': [factory.make_mac_address()], 'not_connected_to': [factory.make_mac_address()], 'zone': factory.make_Zone(), 'not_in_zone': [factory.make_Zone().name], 'storage': '0(ssd),10(ssd)', 'interfaces': 'label:fabric=fabric-0', 'fabrics': [factory.make_Fabric().name], 'not_fabrics': [factory.make_Fabric().name], 'fabric_classes': [ factory.make_Fabric(class_type="10g").class_type], 'not_fabric_classes': [ factory.make_Fabric(class_type="1g").class_type], } form = AcquireNodeForm(data=constraints) self.assertTrue(form.is_valid(), form.errors) # Check first: we didn't forget to test any attributes. When we add # a constraint to the form, we'll have to add it here as well. self.assertItemsEqual(form.fields.keys(), constraints.keys()) described_constraints = { constraint.split('=', 1)[0] for constraint in form.describe_constraints().split() } self.assertItemsEqual(constraints.keys(), described_constraints) class TestAcquireNodeFormOrdersResults(MAASServerTestCase): def test_describe_constraints_shows_all_constraints(self): nodes = [ factory.make_Node( cpu_count=randint(5, 32), memory=randint(1024, 256 * 1024) ) for _ in range(4)] sorted_nodes = sorted( nodes, key=lambda n: n.cpu_count + n.memory / 1024) # The form should select all the nodes. All we're interested # in here is the ordering. form = AcquireNodeForm(data={'cpu_count': 4}) self.assertTrue(form.is_valid(), form.errors) filtered_nodes, _, _ = form.filter_nodes(Node.objects.all()) self.assertEqual( sorted_nodes, list(filtered_nodes)) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_nonces_cleanup.py0000644000000000000000000001450413056115004023365 0ustar 00000000000000# Copyright 2013-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the nonces cleanup module.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import time from maasserver import nonces_cleanup from maasserver.nonces_cleanup import ( cleanup_old_nonces, create_checkpoint_nonce, delete_old_nonces, find_checkpoint_nonce, get_time_string, key_prefix, NonceCleanupService, time as module_time, timestamp_threshold, ) from maasserver.testing.testcase import MAASServerTestCase from maastesting.matchers import ( MockCalledOnceWith, MockCallsMatch, MockNotCalled, ) from mock import call from piston.models import Nonce from testtools.matchers import ( ContainsAll, StartsWith, ) from twisted.internet.defer import maybeDeferred from twisted.internet.task import Clock class TestCleanupOldNonces(MAASServerTestCase): def test_cleanup_old_nonces_returns_0_if_no_checkpoint(self): self.assertEquals(0, cleanup_old_nonces()) def test_cleanup_old_nonces_cleans_up_old_nonces(self): now = time.time() # Patch the module's time module so that the nonces appear as if # they were created now - timestamp_threshold seconds ago. timemod = self.patch(module_time, "time") timemod.return_value = now - timestamp_threshold old_nonces = [Nonce.objects.create() for _ in range(3)] self.assertEquals(0, cleanup_old_nonces()) # Patch the module's time module back. timemod.return_value = now new_nonces = [Nonce.objects.create() for _ in range(3)] cleanup_count = cleanup_old_nonces() # The old nonces plus the checkpoint nonce are deleted. self.assertEquals(len(old_nonces) + 1, cleanup_count) self.assertThat(Nonce.objects.all(), ContainsAll(new_nonces)) self.assertEqual(len(new_nonces) + 1, Nonce.objects.all().count()) class TestUtilities(MAASServerTestCase): def test_create_checkpoint_nonce_creates_checkpoint_nonce(self): before = time.time() create_checkpoint_nonce() checkpoint = Nonce.objects.get(token_key='', consumer_key='') after = time.time() checkpoint_time = checkpoint.key[len(key_prefix):] self.assertLessEqual(before, float(checkpoint_time)) self.assertGreaterEqual(after, float(checkpoint_time)) def test_create_checkpoint_nonce_gets_checkpoint_if_exists(self): now = time.time() self.patch(module_time, "time").return_value = now create_checkpoint_nonce() nonce1 = Nonce.objects.filter( token_key='', consumer_key='').latest('id') create_checkpoint_nonce() nonce2 = Nonce.objects.filter( token_key='', consumer_key='').latest('id') self.assertEqual(nonce1.id, nonce2.id) def test_delete_old_nonces_delete_nonces(self): # Create old nonces. [Nonce.objects.create() for _ in range(3)] checkpoint = Nonce.objects.create() new_nonces = [Nonce.objects.create() for _ in range(3)] delete_old_nonces(checkpoint) self.assertItemsEqual(new_nonces, Nonce.objects.all()) def test_find_checkpoint_nonce_returns_None_if_no_checkpoint(self): self.assertIsNone(find_checkpoint_nonce()) def test_find_checkpoint_nonce_returns_most_recent_checkpoint(self): now = time.time() self.patch(module_time, "time").return_value = now # Create a "checkpoint" nonce created timestamp_threshold + 5 # seconds ago. Nonce.objects.create( token_key='', consumer_key='', key=get_time_string(now - 5 - timestamp_threshold)) # Create a "checkpoint" nonce created timestamp_threshold # seconds ago. checkpoint = Nonce.objects.create( token_key='', consumer_key='', key=get_time_string(now - timestamp_threshold)) # Create a "checkpoint" nonce created 1 second ago. Nonce.objects.create( token_key='', consumer_key='', key=get_time_string(now - 1)) self.assertEqual(checkpoint, find_checkpoint_nonce()) def test_get_time_string_returns_comparable_string(self): now = time.time() self.assertGreater(get_time_string(now + 1), get_time_string(now)) def test_get_time_string_ends_with_suffix(self): now = time.time() self.assertThat(get_time_string(now), StartsWith(key_prefix)) class TestNonceCleanupService(MAASServerTestCase): def test_init_with_default_interval(self): # The service itself calls `cleanup_old_nonces` in a thread, via # a couple of decorators. This indirection makes it clearer to # mock `cleanup_old_nonces` here and track calls to it. cleanup_old_nonces = self.patch(nonces_cleanup, "cleanup_old_nonces") # Making `deferToDatabase` use the current thread helps testing. self.patch(nonces_cleanup, "deferToDatabase", maybeDeferred) service = NonceCleanupService() # Use a deterministic clock instead of the reactor for testing. service.clock = Clock() # The interval is stored as `step` by TimerService, # NonceCleanupService's parent class. interval = 24 * 60 * 60 # seconds. self.assertEqual(service.step, interval) # `cleanup_old_nonces` is not called before the service is # started. self.assertThat(cleanup_old_nonces, MockNotCalled()) # `cleanup_old_nonces` is called the moment the service is # started. service.startService() self.assertThat(cleanup_old_nonces, MockCalledOnceWith()) # Advancing the clock by `interval - 1` means that # `cleanup_old_nonces` has still only been called once. service.clock.advance(interval - 1) self.assertThat(cleanup_old_nonces, MockCalledOnceWith()) # Advancing the clock one more second causes another call to # `cleanup_old_nonces`. service.clock.advance(1) self.assertThat(cleanup_old_nonces, MockCallsMatch(call(), call())) def test_interval_can_be_set(self): interval = self.getUniqueInteger() service = NonceCleanupService(interval) self.assertEqual(interval, service.step) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_plugin.py0000644000000000000000000002111413056115004021662 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the ``maasregiond`` TAP.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from operator import setitem import random import crochet from django.db import connections from django.db.backends import BaseDatabaseWrapper from maasserver import eventloop from maasserver.plugin import ( Options, RegionServiceMaker, ) from maasserver.testing.testcase import MAASServerTestCase from maasserver.utils.orm import ( disable_all_database_connections, DisabledDatabaseConnection, enable_all_database_connections, ) from maastesting.factory import factory from maastesting.matchers import ( MockCalledOnceWith, MockNotCalled, ) from maastesting.testcase import MAASTestCase from provisioningserver import logger from provisioningserver.utils.twisted import ( asynchronous, ThreadPool, ) from south import migration from testtools import monkey from testtools.matchers import IsInstance from twisted.application.service import MultiService from twisted.internet import reactor def import_websocket_handlers(): # Import the websocket handlers for their side-effects: merely defining # DeviceHandler, e.g., causes a database access, which will crash if it # happens inside the reactor thread where database access is forbidden and # prevented. The most sensible solution to this might be to disallow # database access at import time. import maasserver.websockets.handlers # noqa class TestOptions(MAASTestCase): """Tests for `maasserver.plugin.Options`.""" def test_defaults(self): options = Options() self.assertEqual({"introspect": None}, options.defaults) def test_parse_minimal_options(self): options = Options() # The minimal set of options that must be provided. arguments = [] options.parseOptions(arguments) # No error. class TestRegionServiceMaker(MAASTestCase): """Tests for `maasserver.plugin.RegionServiceMaker`.""" def setUp(self): super(TestRegionServiceMaker, self).setUp() self.patch(eventloop.loop, "services", MultiService()) self.patch_autospec(crochet, "no_setup") self.patch_autospec(logger, "basicConfig") # Enable database access in the reactor just for these tests. asynchronous(enable_all_database_connections, 5)() # _checkDatabase() is tested separately; see later. self.patch_autospec(RegionServiceMaker, "_checkDatabase") import_websocket_handlers() def tearDown(self): super(TestRegionServiceMaker, self).tearDown() # Disable database access in the reactor again. asynchronous(disable_all_database_connections, 5)() def test_init(self): service_maker = RegionServiceMaker("Harry", "Hill") self.assertEqual("Harry", service_maker.tapname) self.assertEqual("Hill", service_maker.description) @asynchronous(timeout=5) def test_makeService(self): options = Options() service_maker = RegionServiceMaker("Harry", "Hill") # Disable _configureThreads() as it's too invasive right now. self.patch_autospec(service_maker, "_configureThreads") service = service_maker.makeService(options) self.assertIsInstance(service, MultiService) expected_services = [ "database-tasks", "import-resources", "import-resources-progress", "nonce-cleanup", "rpc", "rpc-advertise", "web", ] self.assertItemsEqual(expected_services, service.namedServices) self.assertEqual( len(service.namedServices), len(service.services), "Not all services are named.") self.assertThat(logger.basicConfig, MockCalledOnceWith()) self.assertThat(crochet.no_setup, MockCalledOnceWith()) self.assertThat( RegionServiceMaker._checkDatabase, MockCalledOnceWith(service_maker)) @asynchronous(timeout=5) def test_configures_thread_pool(self): # Patch and restore where it's visible because patching a running # reactor is potentially fairly harmful. patcher = monkey.MonkeyPatcher() patcher.add_patch(reactor, "threadpool", None) patcher.add_patch(reactor, "threadpoolForDatabase", None) patcher.patch() try: service_maker = RegionServiceMaker("Harry", "Hill") service_maker.makeService(Options()) threadpool = reactor.getThreadPool() self.assertThat(threadpool, IsInstance(ThreadPool)) finally: patcher.restore() def assertConnectionsEnabled(self): for alias in connections: self.assertThat( connections[alias], IsInstance(BaseDatabaseWrapper)) def assertConnectionsDisabled(self): for alias in connections: self.assertEqual( DisabledDatabaseConnection, type(connections[alias])) @asynchronous(timeout=5) def test_disables_database_connections_in_reactor(self): self.assertConnectionsEnabled() service_maker = RegionServiceMaker("Harry", "Hill") # Disable _configureThreads() as it's too invasive right now. self.patch_autospec(service_maker, "_configureThreads") service_maker.makeService(Options()) self.assertConnectionsDisabled() class TestRegionServiceMakerDatabaseChecks(MAASServerTestCase): """Tests for `maasserver.plugin.RegionServiceMaker._checkDatabase`.""" def setUp(self): super(TestRegionServiceMakerDatabaseChecks, self).setUp() import_websocket_handlers() @asynchronous(timeout=5) def test__checks_database_connectivity_early(self): exception_type = factory.make_exception_type() service_maker = RegionServiceMaker("Harry", "Hill") _checkDatabase = self.patch_autospec(service_maker, "_checkDatabase") _checkDatabase.side_effect = exception_type # Disable _configureThreads() as it's too invasive right now. self.patch_autospec(service_maker, "_configureThreads") self.patch_autospec(eventloop.loop, "populate") self.assertRaises(exception_type, service_maker.makeService, Options()) self.assertThat(_checkDatabase, MockCalledOnceWith()) self.assertThat(eventloop.loop.populate, MockNotCalled()) def test__completes_quietly_if_database_can_be_connected_to(self): service_maker = RegionServiceMaker("Harry", "Hill") try: service_maker._checkDatabase() except SystemExit as error: # Django/South sometimes declares that all migrations have been # applied, sometimes it declares that none have been applied, and # it appears to depend on what other tests have run or are due to # run. This is highly irritating. This workaround is ugly and # diminishes the value of this test, but it also avoids a long and # expensive diving expedition into Django's convoluted innards. self.assertDocTestMatches( "The MAAS database schema is not yet fully installed: " "... migration(s) are missing.", unicode(error)) else: # This is what was meant to happen. pass def test__complains_if_database_cannot_be_connected_to(self): # Disable all database connections in this thread. for alias in connections: self.addCleanup(setitem, connections, alias, connections[alias]) connections[alias] = DisabledDatabaseConnection() service_maker = RegionServiceMaker("Harry", "Hill") error = self.assertRaises(SystemExit, service_maker._checkDatabase) self.assertDocTestMatches( "The MAAS database cannot be used. Please investigate: ...", unicode(error)) def test__complains_if_not_all_migrations_have_been_applied(self): def random_unapplied(migrations, _): # Always declare that one migration has not been applied. return [random.choice(migrations)] self.patch(migration, "get_unapplied_migrations", random_unapplied) service_maker = RegionServiceMaker("Harry", "Hill") error = self.assertRaises(SystemExit, service_maker._checkDatabase) self.assertEqual( "The MAAS database schema is not yet fully installed: " "1 migration(s) are missing.", unicode(error)) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_populate_tags.py0000644000000000000000000002772213056115004023246 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.populate_tags`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from itertools import izip from fixtures import FakeLogger from maasserver import populate_tags as populate_tags_module from maasserver.enum import NODEGROUP_STATUS from maasserver.models import Tag from maasserver.populate_tags import ( _do_populate_tags, _get_clients_for_populating_tags, populate_tags, populate_tags_for_single_node, ) from maasserver.rpc.testing.fixtures import MockLiveRegionToClusterRPCFixture from maasserver.testing.eventloop import ( RegionEventLoopFixture, RunningEventLoopFixture, ) from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maastesting.matchers import MockCalledOnceWith from maastesting.twisted import ( always_fail_with, always_succeed_with, ) from metadataserver.models import commissioningscript from mock import ( ANY, create_autospec, sentinel, ) from provisioningserver.rpc.cluster import EvaluateTag from provisioningserver.rpc.common import Client from provisioningserver.utils.twisted import asynchronous from testtools.deferredruntest import extract_result from testtools.monkey import MonkeyPatcher from twisted.internet import defer def make_accepted_NodeGroup(): return factory.make_NodeGroup(status=NODEGROUP_STATUS.ENABLED) def make_Tag_without_populating(): # Create a tag but prevent evaluation when saving. dont_populate = MonkeyPatcher((Tag, "populate_nodes", lambda self: None)) return dont_populate.run_with_patches(factory.make_Tag) class TestGetClientsForPopulatingTags(MAASServerTestCase): def test__returns_no_clients_when_there_are_no_clusters(self): tag_name = factory.make_name("tag") clients = _get_clients_for_populating_tags([], tag_name) self.assertEqual([], clients) def patch_getClientFor(self): return self.patch_autospec(populate_tags_module, "getClientFor") def test__returns_no_clients_when_there_is_an_error(self): nodegroup_with_connection = make_accepted_NodeGroup() nodegroup_without_connection = make_accepted_NodeGroup() def getClientFor(uuid, timeout): if uuid == nodegroup_with_connection.uuid: return defer.succeed(sentinel.client) else: return defer.fail(ZeroDivisionError()) self.patch_getClientFor().side_effect = getClientFor tag_name = factory.make_name("tag") clusters = [ (nodegroup_with_connection.uuid, nodegroup_with_connection.cluster_name), (nodegroup_without_connection.uuid, nodegroup_without_connection.cluster_name), ] clients = _get_clients_for_populating_tags(clusters, tag_name) self.assertEqual([sentinel.client], clients) def test__logs_errors_obtaining_clients(self): getClientFor = self.patch_getClientFor() getClientFor.side_effect = always_fail_with( ZeroDivisionError("an error message one would surmise")) nodegroup = make_accepted_NodeGroup() tag_name = factory.make_name("tag") clusters = [(nodegroup.uuid, nodegroup.cluster_name)] with FakeLogger("maas") as log: _get_clients_for_populating_tags(clusters, tag_name) self.assertDocTestMatches( "Cannot evaluate tag ... on cluster ... (...): ... surmise", log.output) def test__waits_for_clients_for_30_seconds_by_default(self): getClientFor = self.patch_getClientFor() getClientFor.side_effect = always_succeed_with(sentinel.client) nodegroup = make_accepted_NodeGroup() tag_name = factory.make_name("tag") clusters = [(nodegroup.uuid, nodegroup.cluster_name)] clients = _get_clients_for_populating_tags(clusters, tag_name) self.assertEqual([sentinel.client], clients) self.assertThat( getClientFor, MockCalledOnceWith( nodegroup.uuid, timeout=30)) def test__obtains_multiple_clients(self): getClientFor = self.patch_getClientFor() # Return a 2-tuple as a stand-in for a real client. getClientFor.side_effect = lambda uuid, timeout: ( defer.succeed((sentinel.client, uuid))) nodegroups = [make_accepted_NodeGroup() for _ in xrange(3)] tag_name = factory.make_name("tag") clusters = [(ng.uuid, ng.cluster_name) for ng in nodegroups] clients = _get_clients_for_populating_tags(clusters, tag_name) self.assertItemsEqual( [(sentinel.client, nodegroup.uuid) for nodegroup in nodegroups], clients) class TestDoPopulateTags(MAASServerTestCase): def patch_clients(self, nodegroups): clients = [create_autospec(Client, instance=True) for _ in nodegroups] for nodegroup, client in izip(nodegroups, clients): client.side_effect = always_succeed_with(None) client.ident = nodegroup.uuid _get_clients = self.patch_autospec( populate_tags_module, "_get_clients_for_populating_tags") _get_clients.return_value = defer.succeed(clients) return clients def test__makes_calls_to_each_client_given(self): nodegroups = [make_accepted_NodeGroup() for _ in xrange(3)] clients = self.patch_clients(nodegroups) tag_name = factory.make_name("tag") tag_definition = factory.make_name("definition") tag_nsmap_prefix = factory.make_name("prefix") tag_nsmap_uri = factory.make_name("uri") tag_nsmap = {tag_nsmap_prefix: tag_nsmap_uri} clusters = list( (ng.uuid, ng.cluster_name, ng.api_credentials) for ng in nodegroups) [d] = _do_populate_tags( clusters, tag_name, tag_definition, tag_nsmap) self.assertIsNone(extract_result(d)) for nodegroup, client in izip(nodegroups, clients): self.expectThat(client, MockCalledOnceWith( EvaluateTag, tag_name=tag_name, tag_definition=tag_definition, tag_nsmap=[{"prefix": tag_nsmap_prefix, "uri": tag_nsmap_uri}], credentials=nodegroup.api_credentials)) def test__logs_successes(self): nodegroups = [make_accepted_NodeGroup()] self.patch_clients(nodegroups) tag_name = factory.make_name("tag") tag_definition = factory.make_name("definition") tag_nsmap = {} clusters = list( (ng.uuid, ng.cluster_name, ng.api_credentials) for ng in nodegroups) with FakeLogger("maas") as log: [d] = _do_populate_tags( clusters, tag_name, tag_definition, tag_nsmap) self.assertIsNone(extract_result(d)) self.assertDocTestMatches( "Tag tag-... (definition-...) evaluated on cluster ... (...)", log.output) def test__logs_failures(self): nodegroups = [make_accepted_NodeGroup()] [client] = self.patch_clients(nodegroups) client.side_effect = always_fail_with( ZeroDivisionError("splendid day for a spot of cricket")) tag_name = factory.make_name("tag") tag_definition = factory.make_name("definition") tag_nsmap = {} clusters = list( (ng.uuid, ng.cluster_name, ng.api_credentials) for ng in nodegroups) with FakeLogger("maas") as log: [d] = _do_populate_tags( clusters, tag_name, tag_definition, tag_nsmap) self.assertIsNone(extract_result(d)) self.assertDocTestMatches( "Tag tag-... (definition-...) could not be evaluated ... (...): " "splendid day for a spot of cricket", log.output) class TestPopulateTags(MAASServerTestCase): def patch_do_populate_tags(self): do_populate_tags = self.patch_autospec( populate_tags_module, "_do_populate_tags") do_populate_tags.return_value = [sentinel.d] return do_populate_tags def test__calls_do_populate_tags_with_no_clusters(self): do_populate_tags = self.patch_do_populate_tags() tag = make_Tag_without_populating() populate_tags(tag) self.assertThat(do_populate_tags, MockCalledOnceWith( (), tag.name, tag.definition, populate_tags_module.tag_nsmap)) def test__calls_do_populate_tags_with_clusters(self): do_populate_tags = self.patch_do_populate_tags() nodegroups = [make_accepted_NodeGroup() for _ in xrange(3)] tag = make_Tag_without_populating() populate_tags(tag) clusters_expected = tuple( (ng.uuid, ng.cluster_name, ng.api_credentials) for ng in nodegroups) self.assertThat(do_populate_tags, MockCalledOnceWith( clusters_expected, tag.name, tag.definition, populate_tags_module.tag_nsmap)) class TestPopulateTagsEndToNearlyEnd(MAASServerTestCase): def prepare_live_rpc(self): self.useFixture(RegionEventLoopFixture("rpc")) self.useFixture(RunningEventLoopFixture()) return self.useFixture(MockLiveRegionToClusterRPCFixture()) def test__calls_are_made_to_all_clusters(self): rpc_fixture = self.prepare_live_rpc() nodegroups = [make_accepted_NodeGroup() for _ in xrange(3)] protocols = [] for nodegroup in nodegroups: protocol = rpc_fixture.makeCluster(nodegroup, EvaluateTag) protocol.EvaluateTag.side_effect = always_succeed_with({}) protocols.append(protocol) tag = make_Tag_without_populating() d = populate_tags(tag) # `d` is a testing-only convenience. We must wait for it to fire, and # we must do that from the reactor thread. wait_for_populate = asynchronous(lambda: d) wait_for_populate().wait(10) for nodegroup, protocol in izip(nodegroups, protocols): self.expectThat(protocol.EvaluateTag, MockCalledOnceWith( protocol, tag_name=tag.name, tag_definition=tag.definition, tag_nsmap=ANY, credentials=nodegroup.api_credentials)) class TestPopulateTagsForSingleNode(MAASServerTestCase): def test_updates_node_with_all_applicable_tags(self): node = factory.make_Node() factory.make_NodeResult_for_commissioning( node, commissioningscript.LSHW_OUTPUT_NAME, 0, b"") factory.make_NodeResult_for_commissioning( node, commissioningscript.LLDP_OUTPUT_NAME, 0, b"") tags = [ factory.make_Tag("foo", "/foo"), factory.make_Tag("bar", "//lldp:bar"), factory.make_Tag("baz", "/foo/bar"), ] populate_tags_for_single_node(tags, node) self.assertItemsEqual( ["foo", "bar"], [tag.name for tag in node.tags.all()]) def test_ignores_tags_with_unrecognised_namespaces(self): node = factory.make_Node() factory.make_NodeResult_for_commissioning( node, commissioningscript.LSHW_OUTPUT_NAME, 0, b"") tags = [ factory.make_Tag("foo", "/foo"), factory.make_Tag("lou", "//nge:bar"), ] populate_tags_for_single_node(tags, node) # Look mom, no exception! self.assertSequenceEqual( ["foo"], [tag.name for tag in node.tags.all()]) def test_ignores_tags_without_definition(self): node = factory.make_Node() factory.make_NodeResult_for_commissioning( node, commissioningscript.LSHW_OUTPUT_NAME, 0, b"") tags = [ factory.make_Tag("foo", "/foo"), Tag(name="empty", definition=""), Tag(name="null", definition=None), ] populate_tags_for_single_node(tags, node) # Look mom, no exception! self.assertSequenceEqual( ["foo"], [tag.name for tag in node.tags.all()]) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_preseed.py0000644000000000000000000015324513056115004022026 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test `maasserver.preseed` and related bits and bobs.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import httplib import os from pipes import quote import random from urlparse import urlparse from django.conf import settings from django.core.urlresolvers import reverse from maasserver import preseed as preseed_module from maasserver.clusterrpc.testing.boot_images import make_rpc_boot_image from maasserver.enum import ( NODE_BOOT, NODE_STATUS, NODEGROUPINTERFACE_MANAGEMENT, PRESEED_TYPE, ) from maasserver.exceptions import ( ClusterUnavailable, MissingBootImage, PreseedError, ) from maasserver.models import ( BootResource, Config, ) from maasserver.preseed import ( compose_curtin_kernel_preseed, compose_curtin_maas_reporter, compose_curtin_swap_preseed, compose_curtin_verbose_preseed, compose_enlistment_preseed_url, compose_preseed_url, curtin_maas_reporter, GENERIC_FILENAME, get_available_purpose_for_node, get_curtin_config, get_curtin_context, get_curtin_image, get_curtin_installer_url, get_curtin_merged_config, get_curtin_userdata, get_enlist_preseed, get_netloc_and_path, get_node_preseed_context, get_preseed, get_preseed_context, get_preseed_filenames, get_preseed_template, get_preseed_type_for, get_supported_purposes_for_node, load_preseed_template, PreseedTemplate, render_enlistment_preseed, render_preseed, split_subarch, TemplateNotFoundError, ) from maasserver.rpc.testing.mixins import PreseedRPCMixin from maasserver.testing.architecture import make_usable_architecture from maasserver.testing.config import RegionConfigurationFixture from maasserver.testing.factory import factory from maasserver.testing.osystems import make_usable_osystem from maasserver.testing.testcase import MAASServerTestCase from maasserver.utils import absolute_reverse from maasserver.utils.curtin import curtin_supports_webhook_events from maastesting.matchers import ( MockCalledOnceWith, MockNotCalled, ) from metadataserver.models import NodeKey from mock import sentinel from provisioningserver.drivers.osystem.ubuntu import UbuntuOS from provisioningserver.rpc.exceptions import NoConnectionsAvailable from provisioningserver.utils.enum import map_enum from testtools.matchers import ( AllMatch, Contains, ContainsAll, HasLength, IsInstance, MatchesAll, Not, StartsWith, ) from twisted.internet import defer import yaml class BootImageHelperMixin: def make_rpc_boot_image_for(self, node, purpose): osystem = node.get_osystem() series = node.get_distro_series() arch, subarch = node.split_arch() return make_rpc_boot_image( osystem=osystem, release=series, architecture=arch, subarchitecture=subarch, purpose=purpose) def configure_get_boot_images_for_node(self, node, purpose): boot_image = self.make_rpc_boot_image_for(node, purpose) self.patch( preseed_module, 'get_boot_images_for').return_value = [boot_image] class TestSplitSubArch(MAASServerTestCase): """Tests for `split_subarch`.""" def test_split_subarch_returns_list(self): self.assertEqual(['amd64'], split_subarch('amd64')) def test_split_subarch_splits_sub_architecture(self): self.assertEqual(['amd64', 'test'], split_subarch('amd64/test')) class TestGetNetlocAndPath(MAASServerTestCase): """Tests for `get_netloc_and_path`.""" def test_get_netloc_and_path(self): input_and_results = [ ('http://name.domain:66/my/path', ('name.domain:66', '/my/path')), ('http://name.domain:80/my/path', ('name.domain:80', '/my/path')), ('http://name.domain/my/path', ('name.domain', '/my/path')), ('https://domain/path', ('domain', '/path')), ('http://domain:12', ('domain:12', '')), ('http://domain/', ('domain', '/')), ('http://domain', ('domain', '')), ] inputs = [input for input, _ in input_and_results] results = [result for _, result in input_and_results] self.assertEqual(results, map(get_netloc_and_path, inputs)) class TestGetPreseedFilenames(MAASServerTestCase): """Tests for `get_preseed_filenames`.""" def test__returns_filenames(self): hostname = factory.make_string() prefix = factory.make_string() osystem = factory.make_string() release = factory.make_string() node = factory.make_Node(hostname=hostname) arch, subarch = node.architecture.split('/') self.assertSequenceEqual( [ '%s_%s_%s_%s_%s_%s' % ( prefix, osystem, arch, subarch, release, hostname), '%s_%s_%s_%s_%s' % (prefix, osystem, arch, subarch, release), '%s_%s_%s_%s' % (prefix, osystem, arch, subarch), '%s_%s_%s' % (prefix, osystem, arch), '%s_%s' % (prefix, osystem), '%s' % prefix, 'generic', ], list(get_preseed_filenames( node, prefix, osystem, release, default=True))) def test__returns_limited_filenames_if_node_is_None(self): osystem = factory.make_string() release = factory.make_string() prefix = factory.make_string() self.assertSequenceEqual( [ '%s_%s_%s' % (prefix, osystem, release), '%s_%s' % (prefix, osystem), '%s' % prefix, ], list(get_preseed_filenames(None, prefix, osystem, release))) def test__supports_empty_prefix(self): hostname = factory.make_string() osystem = factory.make_string() release = factory.make_string() node = factory.make_Node(hostname=hostname) arch, subarch = node.architecture.split('/') self.assertSequenceEqual( [ '%s_%s_%s_%s_%s' % (osystem, arch, subarch, release, hostname), '%s_%s_%s_%s' % (osystem, arch, subarch, release), '%s_%s_%s' % (osystem, arch, subarch), '%s_%s' % (osystem, arch), '%s' % osystem, ], list(get_preseed_filenames(node, '', osystem, release))) def test__returns_list_without_default(self): # If default=False is passed to get_preseed_filenames, the # returned list won't include the default template name as a # last resort template. hostname = factory.make_string() prefix = factory.make_string() release = factory.make_string() node = factory.make_Node(hostname=hostname) self.assertSequenceEqual( 'generic', list(get_preseed_filenames( node, prefix, release, default=True))[-1]) def test__returns_list_with_default(self): # If default=True is passed to get_preseed_filenames, the # returned list will include the default template name as a # last resort template. hostname = factory.make_string() prefix = factory.make_string() release = factory.make_string() node = factory.make_Node(hostname=hostname) self.assertSequenceEqual( prefix, list(get_preseed_filenames( node, prefix, release, default=False))[-1]) def test__returns_backward_compatible_name_for_ubuntu_without_prefix(self): # If the OS is Ubuntu, also include backward-compatible filenames. # See bug 1439366 for details. hostname = factory.make_string() osystem = UbuntuOS().name release = factory.make_string() node = factory.make_Node(hostname=hostname) arch, subarch = node.architecture.split('/') self.assertSequenceEqual( [ '%s_%s_%s_%s_%s' % (osystem, arch, subarch, release, hostname), '%s_%s_%s_%s' % (arch, subarch, release, hostname), '%s_%s_%s_%s' % (osystem, arch, subarch, release), '%s_%s_%s' % (arch, subarch, release), '%s_%s_%s' % (osystem, arch, subarch), '%s_%s' % (arch, subarch), '%s_%s' % (osystem, arch), '%s' % (arch), '%s' % osystem, ], list(get_preseed_filenames(node, '', osystem, release))) def test__returns_backward_compatible_name_for_ubuntu_with_prefix(self): # If the OS is Ubuntu, also include backward-compatible filenames. # See bug 1439366 for details. hostname = factory.make_string() osystem = UbuntuOS().name release = factory.make_string() node = factory.make_Node(hostname=hostname) arch, subarch = node.architecture.split('/') prefix = factory.make_string() self.assertSequenceEqual( [ '%s_%s_%s_%s_%s_%s' % ( prefix, osystem, arch, subarch, release, hostname), '%s_%s_%s_%s_%s' % (prefix, arch, subarch, release, hostname), '%s_%s_%s_%s_%s' % (prefix, osystem, arch, subarch, release), '%s_%s_%s_%s' % (prefix, arch, subarch, release), '%s_%s_%s_%s' % (prefix, osystem, arch, subarch), '%s_%s_%s' % (prefix, arch, subarch), '%s_%s_%s' % (prefix, osystem, arch), '%s_%s' % (prefix, arch), '%s_%s' % (prefix, osystem), '%s' % prefix, ], list(get_preseed_filenames(node, prefix, osystem, release))) class TestConfiguration(MAASServerTestCase): """Test for correct configuration of the preseed component.""" def test_setting_defined(self): self.assertThat( settings.PRESEED_TEMPLATE_LOCATIONS, AllMatch(IsInstance(unicode))) class TestGetPreseedTemplate(MAASServerTestCase): """Tests for `get_preseed_template`.""" def test_get_preseed_template_returns_None_if_no_template_locations(self): # get_preseed_template() returns None when no template locations are # defined. self.patch(settings, "PRESEED_TEMPLATE_LOCATIONS", []) self.assertEqual( (None, None), get_preseed_template( (factory.make_string(), factory.make_string()))) def test_get_preseed_template_returns_None_when_no_filenames(self): # get_preseed_template() returns None when no filenames are passed in. self.patch(settings, "PRESEED_TEMPLATE_LOCATIONS", [self.make_dir()]) self.assertEqual((None, None), get_preseed_template(())) def test_get_preseed_template_find_template_in_first_location(self): template_content = factory.make_string() template_path = self.make_file(contents=template_content) template_filename = os.path.basename(template_path) locations = [ os.path.dirname(template_path), self.make_dir(), ] self.patch(settings, "PRESEED_TEMPLATE_LOCATIONS", locations) self.assertEqual( (template_path, template_content), get_preseed_template([template_filename])) def test_get_preseed_template_find_template_in_last_location(self): template_content = factory.make_string() template_path = self.make_file(contents=template_content) template_filename = os.path.basename(template_path) locations = [ self.make_dir(), os.path.dirname(template_path), ] self.patch(settings, "PRESEED_TEMPLATE_LOCATIONS", locations) self.assertEqual( (template_path, template_content), get_preseed_template([template_filename])) class TestLoadPreseedTemplate(MAASServerTestCase): """Tests for `load_preseed_template`.""" def setUp(self): super(TestLoadPreseedTemplate, self).setUp() self.location = self.make_dir() self.patch( settings, "PRESEED_TEMPLATE_LOCATIONS", [self.location]) def create_template(self, location, name, content=None): # Create a tempita template in the given `self.location` with the # given `name`. If content is not provided, a random content # will be put inside the template. path = os.path.join(self.location, name) rendered_content = None if content is None: rendered_content = factory.make_string() content = b'{{def stuff}}%s{{enddef}}{{stuff}}' % rendered_content with open(path, "wb") as outf: outf.write(content) return rendered_content def test_load_preseed_template_returns_PreseedTemplate(self): name = factory.make_string() self.create_template(self.location, name) node = factory.make_Node() template = load_preseed_template(node, name) self.assertIsInstance(template, PreseedTemplate) def test_load_preseed_template_raises_if_no_template(self): node = factory.make_Node() unknown_template_name = factory.make_string() self.assertRaises( TemplateNotFoundError, load_preseed_template, node, unknown_template_name) def test_load_preseed_template_generic_lookup(self): # The template lookup method ends up picking up a template named # 'generic' if no more specific template exist. content = self.create_template(self.location, GENERIC_FILENAME) node = factory.make_Node(hostname=factory.make_string()) template = load_preseed_template(node, factory.make_string()) self.assertEqual(content, template.substitute()) def test_load_preseed_template_prefix_lookup(self): # 2nd last in the hierarchy is a template named 'prefix'. prefix = factory.make_string() # Create the generic template. This one will be ignored due to the # presence of a more specific template. self.create_template(self.location, GENERIC_FILENAME) # Create the 'prefix' template. This is the one which will be # picked up. content = self.create_template(self.location, prefix) node = factory.make_Node(hostname=factory.make_string()) template = load_preseed_template(node, prefix) self.assertEqual(content, template.substitute()) def test_load_preseed_template_node_specific_lookup(self): # At the top of the lookup hierarchy is a template specific to this # node. It will be used first if it's present. prefix = factory.make_string() osystem = factory.make_string() release = factory.make_string() # Create the generic and 'prefix' templates. They will be ignored # due to the presence of a more specific template. self.create_template(self.location, GENERIC_FILENAME) self.create_template(self.location, prefix) node = factory.make_Node(hostname=factory.make_string()) node_template_name = "%s_%s_%s_%s_%s" % ( prefix, osystem, node.architecture.replace('/', '_'), release, node.hostname) # Create the node-specific template. content = self.create_template(self.location, node_template_name) template = load_preseed_template(node, prefix, osystem, release) self.assertEqual(content, template.substitute()) def test_load_preseed_template_with_inherits(self): # A preseed file can "inherit" from another file. prefix = factory.make_string() # Create preseed template. master_template_name = factory.make_string() preseed_content = '{{inherit "%s"}}' % master_template_name self.create_template(self.location, prefix, preseed_content) master_content = self.create_template( self.location, master_template_name) node = factory.make_Node() template = load_preseed_template(node, prefix) self.assertEqual(master_content, template.substitute()) def test_load_preseed_template_parent_lookup_doesnt_include_default(self): # The lookup for parent templates does not include the default # 'generic' file. prefix = factory.make_string() # Create 'generic' template. It won't be used because the # lookup for parent templates does not use the 'generic' template. self.create_template(self.location, GENERIC_FILENAME) unknown_master_template_name = factory.make_string() # Create preseed template. preseed_content = '{{inherit "%s"}}' % unknown_master_template_name self.create_template(self.location, prefix, preseed_content) node = factory.make_Node() template = load_preseed_template(node, prefix) self.assertRaises( TemplateNotFoundError, template.substitute) class TestPreseedContext(MAASServerTestCase): """Tests for `get_preseed_context`.""" def test_get_preseed_context_contains_keys(self): release = factory.make_string() nodegroup = factory.make_NodeGroup(maas_url=factory.make_string()) context = get_preseed_context(release, nodegroup) self.assertItemsEqual( ['osystem', 'release', 'metadata_enlist_url', 'server_host', 'server_url', 'main_archive_hostname', 'main_archive_directory', 'ports_archive_hostname', 'ports_archive_directory', 'enable_http_proxy', 'http_proxy'], context) def test_get_preseed_context_archive_refs(self): # urlparse lowercases the hostnames. That should not have any # impact but for testing, create lower-case hostnames. main_archive = factory.make_url(netloc="main-archive.example.com") ports_archive = factory.make_url(netloc="ports-archive.example.com") Config.objects.set_config('main_archive', main_archive) Config.objects.set_config('ports_archive', ports_archive) nodegroup = factory.make_NodeGroup(maas_url=factory.make_string()) context = get_preseed_context(factory.make_Node(), nodegroup) parsed_main_archive = urlparse(main_archive) parsed_ports_archive = urlparse(ports_archive) self.assertEqual( ( parsed_main_archive.hostname, parsed_main_archive.path, parsed_ports_archive.hostname, parsed_ports_archive.path, ), ( context['main_archive_hostname'], context['main_archive_directory'], context['ports_archive_hostname'], context['ports_archive_directory'], )) class TestNodePreseedContext( PreseedRPCMixin, BootImageHelperMixin, MAASServerTestCase): """Tests for `get_node_preseed_context`.""" def test_get_node_preseed_context_contains_keys(self): node = factory.make_Node(nodegroup=self.rpc_nodegroup) self.configure_get_boot_images_for_node(node, 'install') release = factory.make_string() context = get_node_preseed_context(node, release) self.assertItemsEqual( ['driver', 'driver_package', 'node', 'node_disable_pxe_data', 'node_disable_pxe_url', 'preseed_data', 'third_party_drivers', 'license_key', ], context) def test_context_contains_third_party_drivers(self): node = factory.make_Node(nodegroup=self.rpc_nodegroup) self.configure_get_boot_images_for_node(node, 'install') release = factory.make_string() enable_third_party_drivers = factory.pick_bool() Config.objects.set_config( 'enable_third_party_drivers', enable_third_party_drivers) context = get_node_preseed_context(node, release) self.assertEqual( enable_third_party_drivers, context['third_party_drivers']) class TestPreseedTemplate(MAASServerTestCase): """Tests for class:`PreseedTemplate`.""" def test_escape_shell(self): template = PreseedTemplate("{{var|escape.shell}}") var = "$ ! ()" observed = template.substitute(var=var) self.assertEqual(quote(var), observed) class TestRenderPreseed( PreseedRPCMixin, BootImageHelperMixin, MAASServerTestCase): """Tests for `render_preseed`. These tests check that the templates render (i.e. that no variable is missing). """ # Create a scenario for each possible value of PRESEED_TYPE except # enlistment. Those have their own test case. scenarios = [ (name, {'preseed': value}) for name, value in map_enum(PRESEED_TYPE).items() if not value.startswith('enlist') ] def test_render_preseed(self): node = factory.make_Node(nodegroup=self.rpc_nodegroup) self.configure_get_boot_images_for_node(node, 'install') preseed = render_preseed(node, self.preseed, "precise") # The test really is that the preseed is rendered without an # error. self.assertIsInstance(preseed, bytes) def test_get_preseed_uses_nodegroup_maas_url(self): ng_url = 'http://%s' % factory.make_hostname() self.rpc_nodegroup.maas_url = ng_url self.rpc_nodegroup.save() maas_url = factory.make_simple_http_url() node = factory.make_Node( nodegroup=self.rpc_nodegroup, status=NODE_STATUS.COMMISSIONING) self.configure_get_boot_images_for_node(node, 'install') self.useFixture(RegionConfigurationFixture(maas_url=maas_url)) preseed = render_preseed(node, self.preseed, "precise") self.assertThat( preseed, MatchesAll(*[Contains(ng_url), Not(Contains(maas_url))])) class TestRenderEnlistmentPreseed(MAASServerTestCase): """Tests for `render_enlistment_preseed`.""" # Create a scenario for each possible value of PRESEED_TYPE for # enlistment. The rest have their own test case. scenarios = [ (name, {'preseed': value}) for name, value in map_enum(PRESEED_TYPE).items() if value.startswith('enlist') ] def test_render_enlistment_preseed(self): preseed = render_enlistment_preseed(self.preseed, "precise") # The test really is that the preseed is rendered without an # error. self.assertIsInstance(preseed, bytes) def test_render_enlistment_preseed_valid_yaml(self): preseed = render_enlistment_preseed(self.preseed, "precise") self.assertTrue(yaml.safe_load(preseed)) def test_get_preseed_uses_nodegroup_maas_url(self): ng_url = 'http://%s' % factory.make_hostname() maas_url = factory.make_simple_http_url() self.useFixture(RegionConfigurationFixture(maas_url=maas_url)) nodegroup = factory.make_NodeGroup(maas_url=ng_url) preseed = render_enlistment_preseed( self.preseed, "precise", nodegroup=nodegroup) self.assertThat( preseed, MatchesAll(*[Contains(ng_url), Not(Contains(maas_url))])) class TestRenderPreseedWindows( PreseedRPCMixin, BootImageHelperMixin, MAASServerTestCase): """Tests for `render_preseed`. These tests check that the templates render (i.e. that no variable is missing). """ # Create a scenario for each possible windows release. scenarios = [ (release, {'release': release}) for release in ['win2012', 'win2012hv', 'win2012hvr2', 'win2012r2'] ] def return_windows_specific_preseed_data(self): rpc_get_preseed_data = self.rpc_cluster.GetPreseedData rpc_get_preseed_data.side_effect = None rpc_get_preseed_data.return_value = defer.succeed({"data": { 'maas_metadata_url': factory.make_name("metadata-url"), 'maas_oauth_consumer_secret': factory.make_name("consumer-secret"), 'maas_oauth_consumer_key': factory.make_name("consumer-key"), 'maas_oauth_token_key': factory.make_name("token-key"), 'maas_oauth_token_secret': factory.make_name("token-secret"), 'hostname': factory.make_name("hostname"), }}) def test_render_preseed(self): self.return_windows_specific_preseed_data() node = factory.make_Node( nodegroup=self.rpc_nodegroup, osystem='windows', architecture='amd64/generic', distro_series=self.release, status=NODE_STATUS.DEPLOYING) self.configure_get_boot_images_for_node(node, 'install') preseed = render_preseed( node, '', osystem='windows', release=self.release) # The test really is that the preseed is rendered without an # error. self.assertIsInstance(preseed, bytes) class TestComposeCurtinMAASReporter(MAASServerTestCase): def load_reporter(self, preseeds): [reporter_yaml] = preseeds return yaml.safe_load(reporter_yaml) def test__curtin_maas_reporter_with_events_support(self): node = factory.make_Node() token = NodeKey.objects.get_token_for_node(node) reporter = curtin_maas_reporter(node, True) self.assertEqual(['reporting', 'install'], list(reporter.keys())) self.assertEqual( absolute_reverse( 'metadata-status', args=[node.system_id], base_url=node.nodegroup.maas_url), reporter['reporting']['maas']['endpoint']) self.assertEqual( 'webhook', reporter['reporting']['maas']['type']) self.assertEqual( token.consumer.key, reporter['reporting']['maas']['consumer_key']) self.assertEqual( token.key, reporter['reporting']['maas']['token_key']) self.assertEqual( token.secret, reporter['reporting']['maas']['token_secret']) def test__curtin_maas_reporter_without_events_support(self): node = factory.make_Node() token = NodeKey.objects.get_token_for_node(node) reporter = curtin_maas_reporter(node, False) self.assertEqual(['reporter'], list(reporter.keys())) self.assertEqual( absolute_reverse( 'curtin-metadata-version', args=['latest'], query={'op': 'signal'}, base_url=node.nodegroup.maas_url), reporter['reporter']['maas']['url']) self.assertEqual( token.consumer.key, reporter['reporter']['maas']['consumer_key']) self.assertEqual( token.key, reporter['reporter']['maas']['token_key']) self.assertEqual( token.secret, reporter['reporter']['maas']['token_secret']) def test__returns_list_of_yaml_strings_matching_curtin(self): preseeds = compose_curtin_maas_reporter(factory.make_Node()) self.assertIsInstance(preseeds, list) self.assertThat(preseeds, HasLength(1)) reporter = self.load_reporter(preseeds) self.assertIsInstance(reporter, dict) if curtin_supports_webhook_events(): self.assertEqual(['reporting', 'install'], list(reporter.keys())) else: self.assertEqual(['reporter'], list(reporter.keys())) class TestComposeCurtinSwapSpace(MAASServerTestCase): def test__returns_null_swap_size(self): node = factory.make_Node() self.assertEqual(node.swap_size, None) swap_preseed = compose_curtin_swap_preseed(node) self.assertEqual(swap_preseed, []) def test__returns_set_swap_size(self): node = factory.make_Node() node.swap_size = 10 * 1000 ** 3 swap_preseed = compose_curtin_swap_preseed(node) self.assertEqual(swap_preseed, ['swap: {size: 10000000000B}\n']) class TestComposeCurtinKernel(MAASServerTestCase): def test__returns_null_kernel(self): node = factory.make_Node() self.assertEqual(node.hwe_kernel, None) kernel_preseed = compose_curtin_kernel_preseed(node) self.assertEqual(kernel_preseed, []) def test__returns_set_kernel(self): self.patch( BootResource.objects, 'get_kpackage_for_node').return_value = ( 'linux-image-generic-lts-vivid') node = factory.make_Node(hwe_kernel='hwe-v') self.assertEqual(node.hwe_kernel, 'hwe-v') kernel_preseed = compose_curtin_kernel_preseed(node) self.assertEqual(kernel_preseed, ['kernel:\n' + ' mapping: {}\n' + ' package: linux-image-generic-lts-vivid\n' ]) class TestComposeCurtinVerbose(MAASServerTestCase): def test__returns_empty_when_false(self): Config.objects.set_config("curtin_verbose", False) self.assertEqual([], compose_curtin_verbose_preseed()) def test__returns_verbosity_config(self): Config.objects.set_config("curtin_verbose", True) preseed = compose_curtin_verbose_preseed() self.assertEquals({ "verbosity": 3, "showtrace": True, }, yaml.load(preseed[0])) class TestGetCurtinMergedConfig(MAASServerTestCase): def test__merges_configs_together(self): configs = [ yaml.safe_dump({ "maas": { "test": "data" }, "override": "data", }), yaml.safe_dump({ "maas2": { "test": "data2" }, "override": "data2", }), ] mock_yaml_config = self.patch_autospec( preseed_module, "get_curtin_yaml_config") mock_yaml_config.return_value = configs self.assertEquals({ "maas": { "test": "data" }, "maas2": { "test": "data2" }, "override": "data2", }, get_curtin_merged_config(sentinel.node)) self.assertThat(mock_yaml_config, MockCalledOnceWith(sentinel.node)) class TestGetCurtinUserData( PreseedRPCMixin, BootImageHelperMixin, MAASServerTestCase): """Tests for `get_curtin_userdata`.""" def test_get_curtin_userdata_calls_compose_curtin_config_on_ubuntu(self): node = factory.make_Node( nodegroup=self.rpc_nodegroup, boot_type=NODE_BOOT.FASTPATH, interface=True) factory.make_NodeGroupInterface( node.nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) arch, subarch = node.architecture.split('/') self.configure_get_boot_images_for_node(node, 'xinstall') mock_compose_storage = self.patch( preseed_module, "compose_curtin_storage_config") mock_compose_network = self.patch( preseed_module, "compose_curtin_network_config") self.patch( preseed_module, "curtin_supports_custom_storage").value = True node.osystem = u'ubuntu' user_data = get_curtin_userdata(node) self.assertIn("PREFIX='curtin'", user_data) self.assertThat(mock_compose_storage, MockCalledOnceWith(node)) self.assertThat(mock_compose_network, MockCalledOnceWith(node)) def test_get_curtin_userdata_doesnt_call_compose_config_on_otheros(self): node = factory.make_Node( nodegroup=self.rpc_nodegroup, boot_type=NODE_BOOT.FASTPATH, interface=True) factory.make_NodeGroupInterface( node.nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) arch, subarch = node.architecture.split('/') self.configure_get_boot_images_for_node(node, 'xinstall') mock_compose_storage = self.patch( preseed_module, "compose_curtin_storage_config") mock_compose_network = self.patch( preseed_module, "compose_curtin_network_config") self.patch( preseed_module, "curtin_supports_custom_storage").value = True node.osystem = factory.make_name("osystem") user_data = get_curtin_userdata(node) self.assertIn("PREFIX='curtin'", user_data) self.assertThat(mock_compose_storage, MockNotCalled()) self.assertThat(mock_compose_network, MockNotCalled()) def test_get_curtin_userdata_calls_curtin_supports_custom_storage(self): node = factory.make_Node( nodegroup=self.rpc_nodegroup, boot_type=NODE_BOOT.FASTPATH, interface=True) factory.make_NodeGroupInterface( node.nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) arch, subarch = node.architecture.split('/') self.configure_get_boot_images_for_node(node, 'xinstall') mock_supports_storage = self.patch( preseed_module, "curtin_supports_custom_storage") mock_supports_storage.return_value = False user_data = get_curtin_userdata(node) self.assertIn("PREFIX='curtin'", user_data) self.assertThat(mock_supports_storage, MockCalledOnceWith()) class TestGetCurtinUserDataOS( PreseedRPCMixin, BootImageHelperMixin, MAASServerTestCase): """Tests for `get_curtin_userdata` using os specific scenarios.""" # Create a scenario for each possible os specific preseed. scenarios = [ (name, {'os_name': name}) for name in ['centos', 'suse', 'windows'] ] def test_get_curtin_userdata(self): node = factory.make_Node( nodegroup=self.rpc_nodegroup, osystem=self.os_name, boot_type=NODE_BOOT.FASTPATH, interface=True) factory.make_NodeGroupInterface( node.nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) arch, subarch = node.architecture.split('/') self.configure_get_boot_images_for_node(node, 'xinstall') user_data = get_curtin_userdata(node) # Just check that the user data looks good. self.assertIn("PREFIX='curtin'", user_data) class TestCurtinUtilities( PreseedRPCMixin, BootImageHelperMixin, MAASServerTestCase): """Tests for the curtin-related utilities.""" def test_get_curtin_config(self): node = factory.make_Node( nodegroup=self.rpc_nodegroup, boot_type=NODE_BOOT.FASTPATH) self.configure_get_boot_images_for_node(node, 'xinstall') config = get_curtin_config(node) self.assertThat( config, ContainsAll( [ 'mode: reboot', "debconf_selections:", ] )) def make_fastpath_node(self, main_arch=None): """Return a `Node`, with FPI enabled, and the given main architecture. :param main_arch: A main architecture, such as `i386` or `armhf`. A subarchitecture will be made up. """ if main_arch is None: main_arch = factory.make_name('arch') arch = '%s/%s' % (main_arch, factory.make_name('subarch')) node = factory.make_Node( nodegroup=self.rpc_nodegroup, architecture=arch, boot_type=NODE_BOOT.FASTPATH) return node def extract_archive_setting(self, userdata): """Extract the `ubuntu_archive` setting from `userdata`.""" userdata_lines = [] for line in userdata.splitlines(): line = line.strip() if line.startswith('ubuntu_archive'): userdata_lines.append(line) self.assertThat(userdata_lines, HasLength(1)) [userdata_line] = userdata_lines key, value = userdata_line.split(':', 1) return value.strip() def summarise_url(self, url): """Return just the hostname and path from `url`, normalised.""" # This is needed because the userdata deliberately makes some minor # changes to the archive URLs, making it harder to recognise which # archive they use: slashes are added, schemes are hard-coded. parsed_result = urlparse(url) return parsed_result.netloc, parsed_result.path.strip('/') def test_get_curtin_config_uses_main_archive_for_i386(self): node = self.make_fastpath_node('i386') self.configure_get_boot_images_for_node(node, 'xinstall') userdata = get_curtin_config(node) self.assertEqual( self.summarise_url(Config.objects.get_config('main_archive')), self.summarise_url(self.extract_archive_setting(userdata))) def test_get_curtin_config_uses_main_archive_for_amd64(self): node = self.make_fastpath_node('amd64') self.configure_get_boot_images_for_node(node, 'xinstall') userdata = get_curtin_config(node) self.assertEqual( self.summarise_url(Config.objects.get_config('main_archive')), self.summarise_url(self.extract_archive_setting(userdata))) def test_get_curtin_config_uses_ports_archive_for_other_arch(self): node = self.make_fastpath_node() self.configure_get_boot_images_for_node(node, 'xinstall') userdata = get_curtin_config(node) self.assertEqual( self.summarise_url(Config.objects.get_config('ports_archive')), self.summarise_url(self.extract_archive_setting(userdata))) def test_get_curtin_context(self): node = factory.make_Node( nodegroup=self.rpc_nodegroup, boot_type=NODE_BOOT.FASTPATH) context = get_curtin_context(node) self.assertItemsEqual( ['curtin_preseed'], context) self.assertIn('cloud-init', context['curtin_preseed']) def test_get_curtin_image_calls_get_boot_images_for(self): osystem = factory.make_name('os') series = factory.make_name('series') architecture = make_usable_architecture(self) arch, subarch = architecture.split('/') node = factory.make_Node( osystem=osystem, distro_series=series, architecture=architecture) mock_get_boot_images_for = self.patch( preseed_module, 'get_boot_images_for') mock_get_boot_images_for.return_value = [ make_rpc_boot_image(purpose='xinstall')] get_curtin_image(node) self.assertThat( mock_get_boot_images_for, MockCalledOnceWith(node.nodegroup, osystem, arch, subarch, series)) def test_get_curtin_image_raises_ClusterUnavailable(self): node = factory.make_Node() self.patch( preseed_module, 'get_boot_images_for').side_effect = NoConnectionsAvailable self.assertRaises(ClusterUnavailable, get_curtin_image, node) def test_get_curtin_image_raises_MissingBootImage(self): node = factory.make_Node() self.patch( preseed_module, 'get_boot_images_for').return_value = [] self.assertRaises(MissingBootImage, get_curtin_image, node) def test_get_curtin_image_returns_xinstall_image(self): node = factory.make_Node() other_images = [make_rpc_boot_image() for _ in range(3)] xinstall_image = make_rpc_boot_image(purpose='xinstall') images = other_images + [xinstall_image] self.patch( preseed_module, 'get_boot_images_for').return_value = images self.assertEqual(xinstall_image, get_curtin_image(node)) def test_get_curtin_installer_url_returns_url(self): osystem = make_usable_osystem(self) series = osystem['default_release'] architecture = make_usable_architecture(self) xinstall_path = factory.make_name('xi_path') xinstall_type = factory.make_name('xi_type') cluster_ip = factory.make_ipv4_address() node = factory.make_Node( nodegroup=self.rpc_nodegroup, osystem=osystem['name'], architecture=architecture, distro_series=series, boot_cluster_ip=cluster_ip) arch, subarch = architecture.split('/') boot_image = make_rpc_boot_image( osystem=osystem['name'], release=series, architecture=arch, subarchitecture=subarch, purpose='xinstall', xinstall_path=xinstall_path, xinstall_type=xinstall_type) self.patch( preseed_module, 'get_boot_images_for').return_value = [boot_image] installer_url = get_curtin_installer_url(node) self.assertEqual( '%s:http://%s:5248/images/%s/%s/%s/%s/%s/%s' % ( xinstall_type, cluster_ip, osystem['name'], arch, subarch, series, boot_image['label'], xinstall_path), installer_url) def test_get_curtin_installer_url_fails_if_no_boot_image(self): osystem = make_usable_osystem(self) series = osystem['default_release'] architecture = make_usable_architecture(self) node = factory.make_Node( nodegroup=self.rpc_nodegroup, osystem=osystem['name'], architecture=architecture, distro_series=series) # Make boot image that is not xinstall arch, subarch = architecture.split('/') boot_image = make_rpc_boot_image( osystem=osystem['name'], release=series, architecture=arch, subarchitecture=subarch) self.patch( preseed_module, 'get_boot_images_for').return_value = [boot_image] error = self.assertRaises( MissingBootImage, get_curtin_installer_url, node) arch, subarch = architecture.split('/') msg = ( "No image could be found for the given selection: " "os=%s, arch=%s, subarch=%s, series=%s, purpose=xinstall." % ( osystem['name'], arch, subarch, node.get_distro_series(), )) self.assertIn(msg, "%s" % error) def test_get_curtin_installer_url_doesnt_append_on_tgz(self): osystem = make_usable_osystem(self) series = osystem['default_release'] architecture = make_usable_architecture(self) xinstall_path = factory.make_name('xi_path') xinstall_type = 'tgz' cluster_ip = factory.make_ipv4_address() node = factory.make_Node( nodegroup=self.rpc_nodegroup, osystem=osystem['name'], architecture=architecture, distro_series=series, boot_cluster_ip=cluster_ip) factory.make_NodeGroupInterface( node.nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) arch, subarch = architecture.split('/') boot_image = make_rpc_boot_image( osystem=osystem['name'], release=series, architecture=arch, subarchitecture=subarch, purpose='xinstall', xinstall_path=xinstall_path, xinstall_type=xinstall_type) self.patch( preseed_module, 'get_boot_images_for').return_value = [boot_image] installer_url = get_curtin_installer_url(node) self.assertEqual( 'http://%s:5248/images/%s/%s/%s/%s/%s/%s' % ( cluster_ip, osystem['name'], arch, subarch, series, boot_image['label'], xinstall_path), installer_url) def test_get_supported_purposes_for_node_calls_get_boot_images_for(self): osystem = factory.make_name('os') series = factory.make_name('series') architecture = make_usable_architecture(self) arch, subarch = architecture.split('/') node = factory.make_Node( osystem=osystem, distro_series=series, architecture=architecture) mock_get_boot_images_for = self.patch( preseed_module, 'get_boot_images_for') mock_get_boot_images_for.return_value = [ make_rpc_boot_image(purpose='xinstall')] get_supported_purposes_for_node(node) self.assertThat( mock_get_boot_images_for, MockCalledOnceWith(node.nodegroup, osystem, arch, subarch, series)) def test_get_supported_purposes_for_node_raises_ClusterUnavailable(self): node = factory.make_Node() self.patch( preseed_module, 'get_boot_images_for').side_effect = NoConnectionsAvailable self.assertRaises( ClusterUnavailable, get_supported_purposes_for_node, node) def test_get_supported_purposes_for_node_returns_set_of_purposes(self): osystem = factory.make_name('os') series = factory.make_name('series') architecture = make_usable_architecture(self) arch, subarch = architecture.split('/') node = factory.make_Node( osystem=osystem, distro_series=series, architecture=architecture) mock_get_boot_images_for = self.patch( preseed_module, 'get_boot_images_for') mock_get_boot_images_for.return_value = [ make_rpc_boot_image(purpose='xinstall'), make_rpc_boot_image(purpose='xinstall'), make_rpc_boot_image(purpose='install')] self.assertItemsEqual( {'xinstall', 'install'}, get_supported_purposes_for_node(node)) def test_get_available_purpose_for_node_raises_PreseedError(self): node = factory.make_Node() self.patch( preseed_module, 'get_supported_purposes_for_node').return_value = set() self.assertRaises( PreseedError, get_available_purpose_for_node, [], node) def test_get_available_purpose_for_node_returns_best_purpose_match(self): node = factory.make_Node() purposes = [factory.make_name('purpose') for _ in range(3)] purpose = random.choice(purposes) self.patch( preseed_module, 'get_supported_purposes_for_node').return_value = [purpose] self.assertEqual( purpose, get_available_purpose_for_node(purposes, node)) def test_get_preseed_type_for_commissioning(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) self.assertEqual( PRESEED_TYPE.COMMISSIONING, get_preseed_type_for(node)) def test_get_preseed_type_for_disk_erasing(self): node = factory.make_Node(status=NODE_STATUS.DISK_ERASING) self.assertEqual( PRESEED_TYPE.COMMISSIONING, get_preseed_type_for(node)) def test_get_preseed_type_for_default(self): node = factory.make_Node( boot_type=NODE_BOOT.DEBIAN, status=NODE_STATUS.DEPLOYING) self.configure_get_boot_images_for_node(node, 'install') self.assertEqual( PRESEED_TYPE.DEFAULT, get_preseed_type_for(node)) def test_get_preseed_type_for_curtin(self): node = factory.make_Node( boot_type=NODE_BOOT.FASTPATH, status=NODE_STATUS.DEPLOYING) self.configure_get_boot_images_for_node(node, 'xinstall') self.assertEqual( PRESEED_TYPE.CURTIN, get_preseed_type_for(node)) def test_get_preseed_type_for_default_when_curtin_not_supported(self): node = factory.make_Node( boot_type=NODE_BOOT.FASTPATH, status=NODE_STATUS.DEPLOYING) self.configure_get_boot_images_for_node(node, 'install') self.assertEqual( PRESEED_TYPE.DEFAULT, get_preseed_type_for(node)) def test_get_preseed_type_for_curtin_when_default_not_supported(self): node = factory.make_Node( boot_type=NODE_BOOT.DEBIAN, status=NODE_STATUS.DEPLOYING) self.configure_get_boot_images_for_node(node, 'xinstall') self.assertEqual( PRESEED_TYPE.CURTIN, get_preseed_type_for(node)) def test_get_preseed_type_for_poweroff(self): # A 'ready' node isn't supposed to be powered on and thus # will get a 'commissioning' preseed in order to be powered # down. node = factory.make_Node( boot_type=NODE_BOOT.DEBIAN, status=NODE_STATUS.READY) self.assertEqual( PRESEED_TYPE.COMMISSIONING, get_preseed_type_for(node)) class TestRenderPreseedArchives( PreseedRPCMixin, BootImageHelperMixin, MAASServerTestCase): """Test that the default preseed contains the default mirrors.""" def test_render_preseed_uses_default_archives_intel(self): nodes = [ factory.make_Node( nodegroup=self.rpc_nodegroup, status=NODE_STATUS.DEPLOYING, architecture=make_usable_architecture( self, arch_name="i386", subarch_name="generic")), factory.make_Node( nodegroup=self.rpc_nodegroup, status=NODE_STATUS.DEPLOYING, architecture=make_usable_architecture( self, arch_name="amd64", subarch_name="generic")), ] boot_images = [ self.make_rpc_boot_image_for(node, 'install') for node in nodes ] self.patch( preseed_module, 'get_boot_images_for').return_value = boot_images default_snippets = [ "d-i mirror/http/hostname string archive.ubuntu.com", "d-i mirror/http/directory string /ubuntu", ] for node in nodes: preseed = render_preseed(node, PRESEED_TYPE.DEFAULT, "precise") self.assertThat(preseed, ContainsAll(default_snippets)) def test_render_preseed_uses_default_archives_arm(self): node = factory.make_Node( nodegroup=self.rpc_nodegroup, architecture=make_usable_architecture( self, arch_name="armhf", subarch_name="generic")) self.configure_get_boot_images_for_node(node, 'install') default_snippets = [ "d-i mirror/http/hostname string ports.ubuntu.com", "d-i mirror/http/directory string /ubuntu-ports", ] preseed = render_preseed(node, PRESEED_TYPE.DEFAULT, "precise") self.assertThat(preseed, ContainsAll(default_snippets)) class TestPreseedProxy( PreseedRPCMixin, BootImageHelperMixin, MAASServerTestCase): def test_preseed_uses_default_proxy(self): server_host = "%s.example.com" % factory.make_hostname() url = factory.make_simple_http_url(netloc=server_host) self.useFixture(RegionConfigurationFixture(maas_url=url)) expected_proxy_statement = ( "mirror/http/proxy string http://%s:8000" % server_host) node = factory.make_Node(nodegroup=self.rpc_nodegroup) self.configure_get_boot_images_for_node(node, 'install') preseed = render_preseed( node, PRESEED_TYPE.DEFAULT, "precise") self.assertIn(expected_proxy_statement, preseed) def test_preseed_uses_configured_proxy(self): http_proxy = 'http://%s:%d/%s' % ( factory.make_string(), factory.pick_port(), factory.make_string()) Config.objects.set_config('http_proxy', http_proxy) expected_proxy_statement = ( "mirror/http/proxy string %s" % http_proxy) node = factory.make_Node(nodegroup=self.rpc_nodegroup) self.configure_get_boot_images_for_node(node, 'install') preseed = render_preseed( node, PRESEED_TYPE.DEFAULT, "precise") self.assertIn(expected_proxy_statement, preseed) class TestPreseedMethods( PreseedRPCMixin, BootImageHelperMixin, MAASServerTestCase): """Tests for `get_enlist_preseed` and `get_preseed`. These tests check that the preseed templates render and 'look right'. """ def test_get_preseed_returns_default_preseed(self): node = factory.make_Node( nodegroup=self.rpc_nodegroup, boot_type=NODE_BOOT.DEBIAN, status=NODE_STATUS.DEPLOYING) self.configure_get_boot_images_for_node(node, 'install') preseed = get_preseed(node) self.assertIn('preseed/late_command', preseed) def test_get_preseed_returns_curtin_preseed(self): node = factory.make_Node( nodegroup=self.rpc_nodegroup, boot_type=NODE_BOOT.FASTPATH, status=NODE_STATUS.DEPLOYING) self.configure_get_boot_images_for_node(node, 'xinstall') preseed = get_preseed(node) curtin_url = reverse('curtin-metadata') self.assertIn(curtin_url, preseed) def test_get_enlist_preseed_returns_enlist_preseed(self): preseed = get_enlist_preseed() self.assertTrue(preseed.startswith('#cloud-config')) def test_get_preseed_returns_commissioning_preseed(self): node = factory.make_Node( nodegroup=self.rpc_nodegroup, status=NODE_STATUS.COMMISSIONING) preseed = get_preseed(node) self.assertIn('#cloud-config', preseed) def test_get_preseed_returns_commissioning_preseed_for_disk_erasing(self): node = factory.make_Node( nodegroup=self.rpc_nodegroup, status=NODE_STATUS.DISK_ERASING) preseed = get_preseed(node) self.assertIn('#cloud-config', preseed) class TestPreseedURLs( PreseedRPCMixin, BootImageHelperMixin, MAASServerTestCase): """Tests for functions that return preseed URLs.""" def test_compose_enlistment_preseed_url_links_to_enlistment_preseed(self): response = self.client.get(compose_enlistment_preseed_url()) self.assertEqual( (httplib.OK, get_enlist_preseed()), (response.status_code, response.content)) def test_compose_enlistment_preseed_url_returns_absolute_link(self): maas_url = factory.make_simple_http_url(path='') self.useFixture(RegionConfigurationFixture(maas_url=maas_url)) self.assertThat( compose_enlistment_preseed_url(), StartsWith(maas_url)) def test_compose_enlistment_preseed_url_returns_abs_link_wth_nodegrp(self): maas_url = factory.make_simple_http_url(path='') self.useFixture(RegionConfigurationFixture(maas_url=maas_url)) nodegroup = factory.make_NodeGroup(maas_url) self.assertThat( compose_enlistment_preseed_url(nodegroup=nodegroup), StartsWith(maas_url)) def test_compose_preseed_url_links_to_preseed_for_node(self): node = factory.make_Node(nodegroup=self.rpc_nodegroup) self.configure_get_boot_images_for_node(node, 'install') response = self.client.get(compose_preseed_url(node)) self.assertEqual( (httplib.OK, get_preseed(node)), (response.status_code, response.content)) def test_compose_preseed_url_returns_absolute_link(self): self.assertThat( compose_preseed_url(factory.make_Node()), StartsWith('http://')) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_preseed_network.py0000644000000000000000000003222713056115004023573 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test `maasserver.preseed_network`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random from textwrap import dedent from maasserver.dns.zonegenerator import ( get_dns_search_paths, get_dns_server_address, ) from maasserver.enum import ( INTERFACE_TYPE, IPADDRESS_FAMILY, IPADDRESS_TYPE, ) from maasserver.preseed_network import compose_curtin_network_config from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from netaddr import IPNetwork from testtools.matchers import ( ContainsDict, Equals, IsInstance, MatchesDict, MatchesListwise, ) import yaml class AssertNetworkConfigMixin: IFACE_CONFIG = dedent("""\ - id: %(name)s name: %(name)s type: physical mac_address: %(mac)s """) BOND_CONFIG = dedent("""\ - id: %(name)s name: %(name)s type: bond mac_address: %(mac)s bond_interfaces: """) BRIDGE_CONFIG = dedent("""\ - id: %(name)s name: %(name)s type: bridge mac_address: %(mac)s bridge_interfaces: """) VLAN_CONFIG = dedent("""\ - id: %(name)s name: %(name)s type: vlan vlan_link: %(parent)s vlan_id: %(vlan_id)s """) def assertNetworkConfig(self, expected, output): output = output[0] output = yaml.load(output) self.assertThat(output, ContainsDict({ "network_commands": MatchesDict({ "builtin": Equals(["curtin", "net-meta", "custom"]), }), "network": MatchesDict({ "version": Equals(1), "config": IsInstance(list), }), })) expected_network = yaml.load(expected) output_network = output["network"]["config"] expected_equals = map(Equals, expected_network) self.assertThat(output_network, MatchesListwise(expected_equals)) def collect_interface_config(self, node, filter="physical"): interfaces = node.interface_set.filter(enabled=True).order_by('id') if filter: interfaces = interfaces.filter(type=filter) gateways = node.get_default_gateways() ipv4_gateway_set, ipv6_gateway_set = False, False def set_gateway_ip(iface, subnet, ret, ipv4_set, ipv6_set): ip_family = subnet.get_ipnetwork().version if ip_family == IPADDRESS_FAMILY.IPv4 and ipv4_set: return (ret, ipv4_set, ipv6_set) elif ip_family == IPADDRESS_FAMILY.IPv6 and ipv6_set: return (ret, ipv4_set, ipv6_set) for gateway in gateways: if gateway is not None: iface_id, subnet_id, gateway_ip = gateway if (iface_id == iface.id and subnet_id == subnet.id and gateway_ip == subnet.gateway_ip): ret += " gateway: %s\n" % gateway_ip if ip_family == IPADDRESS_FAMILY.IPv4: ipv4_set = True elif ip_family == IPADDRESS_FAMILY.IPv6: ipv6_set = True return (ret, ipv4_set, ipv6_set) def get_param_value(value): if isinstance(value, (bytes, unicode)): return value elif isinstance(value, bool): return 1 if value else 0 else: return value def set_interface_params(iface, ret): if iface.params: for key, value in iface.params.items(): if not key.startswith("bond_") and key != 'mtu': ret += " %s: %s\n" % (key, get_param_value(value)) ret += " mtu: %s\n" % iface.get_effective_mtu() return ret def is_link_up(addresses): if len(addresses) == 0: return True elif len(addresses) == 1: address = addresses[0] if (address.alloc_type == IPADDRESS_TYPE.STICKY and not address.ip): return True return False ret = "" for iface in interfaces: self.assertIn(iface.type, ["physical", "bond", "vlan"]) fmt_dict = {"name": iface.name, "mac": unicode(iface.mac_address)} if iface.type == "physical": ret += self.IFACE_CONFIG % fmt_dict elif iface.type == "bridge": ret += self.BRIDGE_CONFIG % fmt_dict for parent in iface.parents.order_by('id'): ret += " - %s" % parent.name elif iface.type == "bond": ret += self.BOND_CONFIG % fmt_dict for parent in iface.parents.order_by('id'): ret += " - %s\n" % parent.name ret += " params:\n" if iface.params: for key, value in iface.params.items(): if key.startswith("bond_"): key = key.replace("bond_", "bond-") ret += " %s: %s\n" % ( key, get_param_value(value)) elif iface.type == "vlan": fmt_dict['parent'] = iface.parents.first().get_name() fmt_dict['vlan_id'] = iface.vlan.vid ret += self.VLAN_CONFIG % fmt_dict ret = set_interface_params(iface, ret) addresses = iface.ip_addresses.exclude( alloc_type__in=[ IPADDRESS_TYPE.DISCOVERED, IPADDRESS_TYPE.DHCP, ]).order_by('id') ret += " subnets:\n" if is_link_up(addresses): ret += " - type: manual\n" else: for address in addresses: subnet = address.subnet if subnet is not None: subnet_len = subnet.cidr.split('/')[1] ret += " - address: %s/%s\n" % ( unicode(address.ip), subnet_len) ret += " type: static\n" ret, ipv4_gateway_set, ipv6_gateway_set = ( set_gateway_ip( iface, subnet, ret, ipv4_gateway_set, ipv6_gateway_set)) if subnet.dns_servers is not None: ret += " dns_nameservers:\n" for dns_server in subnet.dns_servers: ret += " - %s\n" % dns_server dhcp_types = set() for dhcp_ip in iface.ip_addresses.filter( alloc_type=IPADDRESS_TYPE.DHCP): if dhcp_ip.subnet is None: dhcp_types.add(4) dhcp_types.add(6) else: dhcp_types.add( dhcp_ip.subnet.get_ipnetwork().version) if dhcp_types == set([4, 6]): ret += " - type: dhcp\n" elif dhcp_types == set([4]): ret += " - type: dhcp4\n" elif dhcp_types == set([6]): ret += " - type: dhcp6\n" return ret def collectDNSConfig(self, node): config = "- type: nameserver\n address: %s\n search:\n" % ( get_dns_server_address(nodegroup=node.nodegroup)) domain_name = node.fqdn.split('.', 1)[1] dns_searches = [domain_name] + [ name for name in sorted(get_dns_search_paths()) if name != domain_name] for dns_name in dns_searches: config += " - %s\n" % dns_name return config class TestSimpleNetworkLayout(MAASServerTestCase, AssertNetworkConfigMixin): def test__renders_expected_output(self): factory.make_NodeGroup( name=factory.make_name('aaa')) nodegroup = factory.make_NodeGroup( name=factory.make_name('bbb')) factory.make_NodeGroup( name=factory.make_name('ccc')) node = factory.make_Node_with_Interface_on_Subnet( interface_count=2, nodegroup=nodegroup) for iface in node.interface_set.filter(enabled=True): factory.make_StaticIPAddress( interface=iface, subnet=iface.vlan.subnet_set.first()) iface.params = { "mtu": random.randint(600, 1400), "accept_ra": factory.pick_bool(), "autoconf": factory.pick_bool(), } iface.save() extra_interface = node.interface_set.all()[1] sip = factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip="", subnet=None, interface=extra_interface) sip.subnet = None sip.save() factory.make_Interface(node=node) net_config = self.collect_interface_config(node) net_config += self.collectDNSConfig(node) config = compose_curtin_network_config(node) self.assertNetworkConfig(net_config, config) class TestBondNetworkLayout(MAASServerTestCase, AssertNetworkConfigMixin): def test__renders_expected_output(self): node = factory.make_Node_with_Interface_on_Subnet( interface_count=2) interfaces = node.interface_set.all() vlan = node.interface_set.first().vlan bond_iface = factory.make_Interface( iftype=INTERFACE_TYPE.BOND, node=node, vlan=vlan, parents=interfaces) bond_iface.params = { "bond_mode": "balance-rr", } bond_iface.save() factory.make_StaticIPAddress( interface=bond_iface, alloc_type=IPADDRESS_TYPE.STICKY, subnet=bond_iface.vlan.subnet_set.first()) net_config = self.collect_interface_config(node, filter="physical") net_config += self.collect_interface_config(node, filter="bond") net_config += self.collectDNSConfig(node) config = compose_curtin_network_config(node) self.assertNetworkConfig(net_config, config) class TestVLANNetworkLayout(MAASServerTestCase, AssertNetworkConfigMixin): def test__renders_expected_output(self): node = factory.make_Node_with_Interface_on_Subnet( interface_count=1) interfaces = node.interface_set.all() vlan_iface = factory.make_Interface( iftype=INTERFACE_TYPE.VLAN, node=node, parents=interfaces) subnet = factory.make_Subnet(vlan=vlan_iface.vlan) factory.make_StaticIPAddress(interface=vlan_iface, subnet=subnet) net_config = self.collect_interface_config(node, filter="physical") net_config += self.collect_interface_config(node, filter="vlan") net_config += self.collectDNSConfig(node) config = compose_curtin_network_config(node) self.assertNetworkConfig(net_config, config) class TestVLANOnBondNetworkLayout(MAASServerTestCase, AssertNetworkConfigMixin): def test__renders_expected_output(self): node = factory.make_Node_with_Interface_on_Subnet( interface_count=2) phys_ifaces = node.interface_set.all() phys_vlan = node.interface_set.first().vlan bond_iface = factory.make_Interface(iftype=INTERFACE_TYPE.BOND, node=node, vlan=phys_vlan, parents=phys_ifaces) bond_iface.params = { "bond_mode": "balance-rr", } bond_iface.save() vlan_iface = factory.make_Interface( iftype=INTERFACE_TYPE.VLAN, node=node, parents=[bond_iface]) subnet = factory.make_Subnet(vlan=vlan_iface.vlan) factory.make_StaticIPAddress(interface=vlan_iface, subnet=subnet) net_config = self.collect_interface_config(node, filter="physical") net_config += self.collect_interface_config(node, filter="bond") net_config += self.collect_interface_config(node, filter="vlan") net_config += self.collectDNSConfig(node) config = compose_curtin_network_config(node) self.assertNetworkConfig(net_config, config) class TestDHCPNetworkLayout(MAASServerTestCase, AssertNetworkConfigMixin): def test__dhcp_configurations_rendered(self): node = factory.make_Node_with_Interface_on_Subnet() iface = node.interface_set.first() subnet = iface.vlan.subnet_set.first() factory.make_StaticIPAddress( ip=None, alloc_type=IPADDRESS_TYPE.DHCP, interface=iface, subnet=subnet) config = compose_curtin_network_config(node) config_yaml = yaml.load(config[0]) self.assertThat( config_yaml['network']['config'][0]['subnets'][0]['type'], Equals('dhcp' + unicode(IPNetwork(subnet.cidr).version)) ) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_preseed_storage.py0000644000000000000000000010262413056115004023545 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test `maasserver.preseed_storage`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from textwrap import dedent from maasserver.enum import ( CACHE_MODE_TYPE, FILESYSTEM_GROUP_TYPE, FILESYSTEM_TYPE, NODE_STATUS, PARTITION_TABLE_TYPE, ) from maasserver.models.filesystemgroup import ( Bcache, RAID, VolumeGroup, ) from maasserver.models.partitiontable import ( PARTITION_TABLE_EXTRA_SPACE, PREP_PARTITION_SIZE, ) from maasserver.preseed_storage import compose_curtin_storage_config from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from testtools.matchers import ( ContainsDict, Equals, IsInstance, MatchesDict, MatchesListwise, ) import yaml class AssertStorageConfigMixin: def assertStorageConfig(self, expected, output): output = output[0] output = yaml.load(output) self.assertThat(output, ContainsDict({ "partitioning_commands": MatchesDict({ "builtin": Equals(["curtin", "block-meta", "custom"]), }), "storage": MatchesDict({ "version": Equals(1), "config": IsInstance(list), }), })) expected = yaml.load(expected) output_storage = output["storage"]["config"] expected_storage = expected["config"] expected_equals = map(Equals, expected_storage) self.assertThat(output_storage, MatchesListwise(expected_equals)) class TestSimpleGPTLayout(MAASServerTestCase, AssertStorageConfigMixin): STORAGE_CONFIG = dedent("""\ config: - id: sda name: sda type: disk wipe: superblock ptable: gpt model: QEMU HARDDISK serial: QM00001 grub_device: true - id: sda-part1 name: sda-part1 type: partition number: 1 uuid: 6efc2c3d-bc9d-4ee5-a7ed-c6e1574d5398 size: 536870912B device: sda wipe: superblock offset: 4194304B flag: boot - id: sda-part2 name: sda-part2 type: partition number: 2 uuid: 0c1c1c3a-1e9d-4047-8ef6-328a03d513e5 size: 1073741824B device: sda wipe: superblock flag: boot - id: sda-part3 name: sda-part3 type: partition number: 3 uuid: f74ff260-2a5b-4a36-b1b8-37f746b946bf size: 6970933248B wipe: superblock device: sda - id: sda-part1_format type: format fstype: fat32 label: efi uuid: bf34f38c-02b7-4b4b-bb7c-e73521f9ead7 volume: sda-part1 - id: sda-part2_format type: format fstype: ext4 label: boot uuid: f98e5b7b-cbb1-437e-b4e5-1769f81f969f volume: sda-part2 - id: sda-part3_format type: format fstype: ext4 label: root uuid: 90a69b22-e281-4c5b-8df9-b09514f27ba1 volume: sda-part3 - id: sda-part3_mount type: mount path: / device: sda-part3_format - id: sda-part2_mount type: mount path: /boot device: sda-part2_format - id: sda-part1_mount type: mount path: /boot/efi device: sda-part1_format """) def test__renders_expected_output(self): node = factory.make_Node( status=NODE_STATUS.ALLOCATED, bios_boot_method="uefi", with_boot_disk=False) boot_disk = factory.make_PhysicalBlockDevice( node=node, size=8 * 1024 ** 3, name="sda", model="QEMU HARDDISK", serial="QM00001") # 8 GiB partition_table = factory.make_PartitionTable( table_type=PARTITION_TABLE_TYPE.GPT, block_device=boot_disk) efi_partition = factory.make_Partition( partition_table=partition_table, uuid="6efc2c3d-bc9d-4ee5-a7ed-c6e1574d5398", size=512 * 1024 ** 2, bootable=True) boot_partition = factory.make_Partition( partition_table=partition_table, uuid="0c1c1c3a-1e9d-4047-8ef6-328a03d513e5", size=1 * 1024 ** 3, bootable=True) root_partition = factory.make_Partition( partition_table=partition_table, uuid="f74ff260-2a5b-4a36-b1b8-37f746b946bf", size=(6.5 * 1024 ** 3) - PARTITION_TABLE_EXTRA_SPACE, bootable=False) factory.make_Filesystem( partition=efi_partition, fstype=FILESYSTEM_TYPE.FAT32, uuid="bf34f38c-02b7-4b4b-bb7c-e73521f9ead7", label="efi", mount_point="/boot/efi") factory.make_Filesystem( partition=boot_partition, fstype=FILESYSTEM_TYPE.EXT4, uuid="f98e5b7b-cbb1-437e-b4e5-1769f81f969f", label="boot", mount_point="/boot") factory.make_Filesystem( partition=root_partition, fstype=FILESYSTEM_TYPE.EXT4, uuid="90a69b22-e281-4c5b-8df9-b09514f27ba1", label="root", mount_point="/") node._create_acquired_filesystems() config = compose_curtin_storage_config(node) self.assertStorageConfig(self.STORAGE_CONFIG, config) class TestSimpleMBRLayout(MAASServerTestCase, AssertStorageConfigMixin): STORAGE_CONFIG = dedent("""\ config: - id: sda name: sda type: disk wipe: superblock ptable: msdos model: QEMU HARDDISK serial: QM00001 grub_device: true - id: sda-part1 name: sda-part1 type: partition number: 1 uuid: 6efc2c3d-bc9d-4ee5-a7ed-c6e1574d5398 size: 536870912B device: sda wipe: superblock offset: 4194304B flag: boot - id: sda-part2 name: sda-part2 type: partition number: 2 uuid: 0c1c1c3a-1e9d-4047-8ef6-328a03d513e5 size: 1073741824B wipe: superblock device: sda flag: boot - id: sda-part3 name: sda-part3 type: partition number: 3 uuid: f74ff260-2a5b-4a36-b1b8-37f746b946bf size: 2684354560B wipe: superblock device: sda - id: sda-part4 type: partition number: 4 device: sda flag: extended size: 4287627264B - id: sda-part5 name: sda-part5 type: partition number: 5 uuid: 1b59e74f-6189-41a1-ba8e-fbf38df19820 size: 2146435072B device: sda wipe: superblock flag: logical - id: sda-part6 name: sda-part6 type: partition number: 6 uuid: 8c365c80-900b-40a1-a8c7-1e445878d19a size: 2138046464B device: sda wipe: superblock flag: logical - id: sda-part1_format type: format fstype: fat32 label: efi uuid: bf34f38c-02b7-4b4b-bb7c-e73521f9ead7 volume: sda-part1 - id: sda-part2_format type: format fstype: ext4 label: boot uuid: f98e5b7b-cbb1-437e-b4e5-1769f81f969f volume: sda-part2 - id: sda-part3_format type: format fstype: ext4 label: root uuid: 90a69b22-e281-4c5b-8df9-b09514f27ba1 volume: sda-part3 - id: sda-part5_format type: format fstype: ext4 label: srv uuid: 9c1764f0-2b48-4127-b719-ec61ac7d5f4c volume: sda-part5 - id: sda-part6_format type: format fstype: ext4 label: srv-data uuid: bcac8449-3a45-4586-bdfb-c21e6ba47902 volume: sda-part6 - id: sda-part3_mount type: mount path: / device: sda-part3_format - id: sda-part5_mount type: mount path: /srv device: sda-part5_format - id: sda-part2_mount type: mount path: /boot device: sda-part2_format - id: sda-part1_mount type: mount path: /boot/efi device: sda-part1_format - id: sda-part6_mount type: mount path: /srv/data device: sda-part6_format """) def test__renders_expected_output(self): node = factory.make_Node( status=NODE_STATUS.ALLOCATED, with_boot_disk=False) boot_disk = factory.make_PhysicalBlockDevice( node=node, size=8 * 1024 ** 3, name="sda", model="QEMU HARDDISK", serial="QM00001") # 8 GiB partition_table = factory.make_PartitionTable( table_type=PARTITION_TABLE_TYPE.MBR, block_device=boot_disk) efi_partition = factory.make_Partition( partition_table=partition_table, uuid="6efc2c3d-bc9d-4ee5-a7ed-c6e1574d5398", size=512 * 1024 ** 2, bootable=True) boot_partition = factory.make_Partition( partition_table=partition_table, uuid="0c1c1c3a-1e9d-4047-8ef6-328a03d513e5", size=1 * 1024 ** 3, bootable=True) root_partition = factory.make_Partition( partition_table=partition_table, uuid="f74ff260-2a5b-4a36-b1b8-37f746b946bf", size=2.5 * 1024 ** 3, bootable=False) partition_five = factory.make_Partition( partition_table=partition_table, uuid="1b59e74f-6189-41a1-ba8e-fbf38df19820", size=2 * 1024 ** 3, bootable=False) partition_six = factory.make_Partition( partition_table=partition_table, uuid="8c365c80-900b-40a1-a8c7-1e445878d19a", size=(2 * 1024 ** 3) - PARTITION_TABLE_EXTRA_SPACE, bootable=False) factory.make_Filesystem( partition=efi_partition, fstype=FILESYSTEM_TYPE.FAT32, uuid="bf34f38c-02b7-4b4b-bb7c-e73521f9ead7", label="efi", mount_point="/boot/efi") factory.make_Filesystem( partition=boot_partition, fstype=FILESYSTEM_TYPE.EXT4, uuid="f98e5b7b-cbb1-437e-b4e5-1769f81f969f", label="boot", mount_point="/boot") factory.make_Filesystem( partition=root_partition, fstype=FILESYSTEM_TYPE.EXT4, uuid="90a69b22-e281-4c5b-8df9-b09514f27ba1", label="root", mount_point="/") factory.make_Filesystem( partition=partition_five, fstype=FILESYSTEM_TYPE.EXT4, uuid="9c1764f0-2b48-4127-b719-ec61ac7d5f4c", label="srv", mount_point="/srv") factory.make_Filesystem( partition=partition_six, fstype=FILESYSTEM_TYPE.EXT4, uuid="bcac8449-3a45-4586-bdfb-c21e6ba47902", label="srv-data", mount_point="/srv/data") node._create_acquired_filesystems() config = compose_curtin_storage_config(node) self.assertStorageConfig(self.STORAGE_CONFIG, config) class TestSimpleWithEmptyDiskLayout( MAASServerTestCase, AssertStorageConfigMixin): STORAGE_CONFIG = dedent("""\ config: - id: sda name: sda type: disk wipe: superblock ptable: msdos model: QEMU HARDDISK serial: QM00001 grub_device: true - id: sdb name: sdb type: disk wipe: superblock path: /dev/disk/by-id/wwn-0x55cd2e400009bf84 - id: sda-part1 name: sda-part1 type: partition number: 1 uuid: 6efc2c3d-bc9d-4ee5-a7ed-c6e1574d5398 size: 8581545984B device: sda wipe: superblock offset: 4194304B - id: sda-part1_format type: format fstype: ext4 label: root uuid: 90a69b22-e281-4c5b-8df9-b09514f27ba1 volume: sda-part1 - id: sda-part1_mount type: mount path: / device: sda-part1_format """) def test__renders_expected_output(self): node = factory.make_Node( status=NODE_STATUS.ALLOCATED, with_boot_disk=False) boot_disk = factory.make_PhysicalBlockDevice( node=node, size=8 * 1024 ** 3, name="sda", model="QEMU HARDDISK", serial="QM00001") # 8 GiB factory.make_PhysicalBlockDevice( node=node, size=8 * 1024 ** 3, name="sdb", id_path="/dev/disk/by-id/wwn-0x55cd2e400009bf84") # Free disk partition_table = factory.make_PartitionTable( table_type=PARTITION_TABLE_TYPE.MBR, block_device=boot_disk) root_partition = factory.make_Partition( partition_table=partition_table, uuid="6efc2c3d-bc9d-4ee5-a7ed-c6e1574d5398", size=(8 * 1024 ** 3) - PARTITION_TABLE_EXTRA_SPACE, bootable=False) factory.make_Filesystem( partition=root_partition, fstype=FILESYSTEM_TYPE.EXT4, uuid="90a69b22-e281-4c5b-8df9-b09514f27ba1", label="root", mount_point="/") node._create_acquired_filesystems() config = compose_curtin_storage_config(node) self.assertStorageConfig(self.STORAGE_CONFIG, config) class TestMBRWithBootDiskWithoutPartitionsLayout( MAASServerTestCase, AssertStorageConfigMixin): STORAGE_CONFIG = dedent("""\ config: - id: sda name: sda type: disk wipe: superblock ptable: msdos model: QEMU HARDDISK serial: QM00001 - id: sdb name: sdb type: disk wipe: superblock ptable: msdos path: /dev/disk/by-id/wwn-0x55cd2e400009bf84 grub_device: true - id: sda-part1 name: sda-part1 type: partition number: 1 uuid: 6efc2c3d-bc9d-4ee5-a7ed-c6e1574d5398 size: 8581545984B device: sda wipe: superblock offset: 4194304B - id: sda-part1_format type: format fstype: ext4 label: root uuid: 90a69b22-e281-4c5b-8df9-b09514f27ba1 volume: sda-part1 - id: sda-part1_mount type: mount path: / device: sda-part1_format """) def test__renders_expected_output(self): node = factory.make_Node( status=NODE_STATUS.ALLOCATED, with_boot_disk=False) first_disk = factory.make_PhysicalBlockDevice( node=node, size=8 * 1024 ** 3, name="sda", model="QEMU HARDDISK", serial="QM00001") # 8 GiB boot_disk = factory.make_PhysicalBlockDevice( node=node, size=8 * 1024 ** 3, name="sdb", id_path="/dev/disk/by-id/wwn-0x55cd2e400009bf84") node.boot_disk = boot_disk node.save() partition_table = factory.make_PartitionTable( table_type=PARTITION_TABLE_TYPE.MBR, block_device=first_disk) root_partition = factory.make_Partition( partition_table=partition_table, uuid="6efc2c3d-bc9d-4ee5-a7ed-c6e1574d5398", size=(8 * 1024 ** 3) - PARTITION_TABLE_EXTRA_SPACE, bootable=False) factory.make_Filesystem( partition=root_partition, fstype=FILESYSTEM_TYPE.EXT4, uuid="90a69b22-e281-4c5b-8df9-b09514f27ba1", label="root", mount_point="/") node._create_acquired_filesystems() config = compose_curtin_storage_config(node) self.assertStorageConfig(self.STORAGE_CONFIG, config) class TestComplexDiskLayout( MAASServerTestCase, AssertStorageConfigMixin): STORAGE_CONFIG = dedent("""\ config: - id: sda name: sda type: disk wipe: superblock ptable: gpt model: QEMU HARDDISK serial: QM00001 grub_device: true - id: sdb name: sdb type: disk wipe: superblock ptable: gpt model: QEMU SSD serial: QM00002 - id: sdc name: sdc type: disk wipe: superblock model: QEMU HARDDISK serial: QM00003 - id: sdd name: sdd type: disk wipe: superblock model: QEMU HARDDISK serial: QM00004 - id: sde name: sde type: disk wipe: superblock model: QEMU HARDDISK serial: QM00005 - id: sdf name: sdf type: disk wipe: superblock model: QEMU HARDDISK serial: QM00006 - id: sdg name: sdg type: disk wipe: superblock model: QEMU HARDDISK serial: QM00007 - id: md0 name: md0 type: raid raidlevel: 5 devices: - sdc - sdd - sde spare_devices: - sdf - sdg ptable: gpt - id: sda-part1 name: sda-part1 type: partition number: 1 uuid: 6efc2c3d-bc9d-4ee5-a7ed-c6e1574d5398 size: 536870912B device: sda wipe: superblock offset: 4194304B flag: boot - id: sda-part2 name: sda-part2 type: partition number: 2 uuid: 0c1c1c3a-1e9d-4047-8ef6-328a03d513e5 size: 1073741824B device: sda wipe: superblock flag: boot - id: sda-part3 name: sda-part3 type: partition number: 3 uuid: f74ff260-2a5b-4a36-b1b8-37f746b946bf size: 6970933248B device: sda wipe: superblock - id: sdb-part1 name: sdb-part1 type: partition number: 1 offset: 4194304B uuid: f3281144-a0b6-46f1-90af-8541f97f7b1f size: 2139095040B wipe: superblock device: sdb - id: bcache0 name: bcache0 type: bcache backing_device: sda-part3 cache_device: sdb-part1 cache_mode: writethrough - id: sdb-part2 name: sdb-part2 type: partition number: 2 uuid: ea7f96d0-b508-40d9-8495-b2163df35c9b size: 6442450944B wipe: superblock device: sdb - id: vgroot name: vgroot type: lvm_volgroup uuid: 1793be1b-890a-44cb-9322-057b0d53b53c devices: - sdb-part2 - id: vgroot-lvextra name: lvextra type: lvm_partition volgroup: vgroot size: 2147483648B - id: vgroot-lvroot name: lvroot type: lvm_partition volgroup: vgroot size: 2147483648B - id: md0-part1 name: md0-part1 type: partition number: 1 offset: 4194304B uuid: 18a6e885-3e6d-4505-8a0d-cf34df11a8b0 size: 2199014866944B wipe: superblock device: md0 - id: sda-part1_format type: format fstype: fat32 label: efi uuid: bf34f38c-02b7-4b4b-bb7c-e73521f9ead7 volume: sda-part1 - id: sda-part2_format type: format fstype: ext4 label: boot uuid: f98e5b7b-cbb1-437e-b4e5-1769f81f969f volume: sda-part2 - id: vgroot-lvroot_format type: format fstype: ext4 label: root uuid: 90a69b22-e281-4c5b-8df9-b09514f27ba1 volume: vgroot-lvroot - id: md0-part1_format type: format fstype: ext4 label: data uuid: a8ad29a3-6083-45af-af8b-06ead59f108b volume: md0-part1 - id: vgroot-lvroot_mount type: mount path: / device: vgroot-lvroot_format - id: sda-part2_mount type: mount path: /boot device: sda-part2_format - id: sda-part1_mount type: mount path: /boot/efi device: sda-part1_format - id: md0-part1_mount type: mount path: /srv/data device: md0-part1_format """) def test__renders_expected_output(self): node = factory.make_Node( status=NODE_STATUS.ALLOCATED, bios_boot_method="uefi", with_boot_disk=False) boot_disk = factory.make_PhysicalBlockDevice( node=node, size=8 * 1024 ** 3, name="sda", model="QEMU HARDDISK", serial="QM00001") # 8 GiB ssd_disk = factory.make_PhysicalBlockDevice( node=node, size=8 * 1024 ** 3, name="sdb", model="QEMU SSD", serial="QM00002") # 8 GiB raid_5_disk_1 = factory.make_PhysicalBlockDevice( node=node, size=1 * 1024 ** 4, name="sdc", model="QEMU HARDDISK", serial="QM00003") # 1 TiB raid_5_disk_2 = factory.make_PhysicalBlockDevice( node=node, size=1 * 1024 ** 4, name="sdd", model="QEMU HARDDISK", serial="QM00004") # 1 TiB raid_5_disk_3 = factory.make_PhysicalBlockDevice( node=node, size=1 * 1024 ** 4, name="sde", model="QEMU HARDDISK", serial="QM00005") # 1 TiB raid_5_disk_4 = factory.make_PhysicalBlockDevice( node=node, size=1 * 1024 ** 4, name="sdf", model="QEMU HARDDISK", serial="QM00006") # 1 TiB raid_5_disk_5 = factory.make_PhysicalBlockDevice( node=node, size=1 * 1024 ** 4, name="sdg", model="QEMU HARDDISK", serial="QM00007") # 1 TiB boot_partition_table = factory.make_PartitionTable( table_type=PARTITION_TABLE_TYPE.GPT, block_device=boot_disk) efi_partition = factory.make_Partition( partition_table=boot_partition_table, uuid="6efc2c3d-bc9d-4ee5-a7ed-c6e1574d5398", size=512 * 1024 ** 2, bootable=True) boot_partition = factory.make_Partition( partition_table=boot_partition_table, uuid="0c1c1c3a-1e9d-4047-8ef6-328a03d513e5", size=1 * 1024 ** 3, bootable=True) root_partition = factory.make_Partition( partition_table=boot_partition_table, uuid="f74ff260-2a5b-4a36-b1b8-37f746b946bf", size=(6.5 * 1024 ** 3) - PARTITION_TABLE_EXTRA_SPACE, bootable=False) factory.make_Filesystem( partition=efi_partition, fstype=FILESYSTEM_TYPE.FAT32, uuid="bf34f38c-02b7-4b4b-bb7c-e73521f9ead7", label="efi", mount_point="/boot/efi") factory.make_Filesystem( partition=boot_partition, fstype=FILESYSTEM_TYPE.EXT4, uuid="f98e5b7b-cbb1-437e-b4e5-1769f81f969f", label="boot", mount_point="/boot") ssd_partition_table = factory.make_PartitionTable( table_type=PARTITION_TABLE_TYPE.GPT, block_device=ssd_disk) cache_partition = factory.make_Partition( partition_table=ssd_partition_table, uuid="f3281144-a0b6-46f1-90af-8541f97f7b1f", size=(2 * 1024 ** 3) - PARTITION_TABLE_EXTRA_SPACE, bootable=False) cache_set = factory.make_CacheSet(partition=cache_partition) Bcache.objects.create_bcache( name="bcache0", uuid="9e7bdc2d-1567-4e1c-a89a-4e20df099458", backing_partition=root_partition, cache_set=cache_set, cache_mode=CACHE_MODE_TYPE.WRITETHROUGH) lvm_partition = factory.make_Partition( partition_table=ssd_partition_table, uuid="ea7f96d0-b508-40d9-8495-b2163df35c9b", size=(6 * 1024 ** 3), bootable=False) vgroot = VolumeGroup.objects.create_volume_group( name="vgroot", uuid="1793be1b-890a-44cb-9322-057b0d53b53c", block_devices=[], partitions=[lvm_partition]) lvroot = vgroot.create_logical_volume( name="lvroot", uuid="98fac182-45a4-4afc-ba57-a1ace0396679", size=2 * 1024 ** 3) vgroot.create_logical_volume( name="lvextra", uuid="0d960ec6-e6d0-466f-8f83-ee9c11e5b9ba", size=2 * 1024 ** 3) factory.make_Filesystem( block_device=lvroot, fstype=FILESYSTEM_TYPE.EXT4, uuid="90a69b22-e281-4c5b-8df9-b09514f27ba1", label="root", mount_point="/") raid_5 = RAID.objects.create_raid( level=FILESYSTEM_GROUP_TYPE.RAID_5, name="md0", uuid="ec7816a7-129e-471e-9735-4e27c36fa10b", block_devices=[raid_5_disk_1, raid_5_disk_2, raid_5_disk_3], spare_devices=[raid_5_disk_4, raid_5_disk_5]) raid_5_partition_table = factory.make_PartitionTable( table_type=PARTITION_TABLE_TYPE.GPT, block_device=raid_5.virtual_device) raid_5_partition = factory.make_Partition( partition_table=raid_5_partition_table, uuid="18a6e885-3e6d-4505-8a0d-cf34df11a8b0", size=(2 * 1024 ** 4) - PARTITION_TABLE_EXTRA_SPACE, bootable=False) factory.make_Filesystem( partition=raid_5_partition, fstype=FILESYSTEM_TYPE.EXT4, uuid="a8ad29a3-6083-45af-af8b-06ead59f108b", label="data", mount_point="/srv/data") node._create_acquired_filesystems() config = compose_curtin_storage_config(node) self.assertStorageConfig(self.STORAGE_CONFIG, config) class TestSimplePower8Layout(MAASServerTestCase, AssertStorageConfigMixin): STORAGE_CONFIG = dedent("""\ config: - id: sda name: sda type: disk wipe: superblock ptable: gpt model: QEMU HARDDISK serial: QM00001 - id: sda-part1 name: sda-part1 type: partition number: 1 offset: 4194304B size: 8388608B device: sda wipe: zero flag: prep grub_device: True - id: sda-part2 name: sda-part2 type: partition number: 2 uuid: f74ff260-2a5b-4a36-b1b8-37f746b946bf size: 8573157376B wipe: superblock device: sda - id: sda-part2_format type: format fstype: ext4 label: root uuid: 90a69b22-e281-4c5b-8df9-b09514f27ba1 volume: sda-part2 - id: sda-part2_mount type: mount path: / device: sda-part2_format """) def test__renders_expected_output(self): node = factory.make_Node( status=NODE_STATUS.ALLOCATED, architecture="ppc64el/generic", bios_boot_method="uefi", with_boot_disk=False) boot_disk = factory.make_PhysicalBlockDevice( node=node, size=8 * 1024 ** 3, name="sda", model="QEMU HARDDISK", serial="QM00001") # 8 GiB partition_table = factory.make_PartitionTable( table_type=PARTITION_TABLE_TYPE.GPT, block_device=boot_disk) root_partition = factory.make_Partition( partition_table=partition_table, uuid="f74ff260-2a5b-4a36-b1b8-37f746b946bf", size=( (8 * 1024 ** 3) - PARTITION_TABLE_EXTRA_SPACE - PREP_PARTITION_SIZE), bootable=False) factory.make_Filesystem( partition=root_partition, fstype=FILESYSTEM_TYPE.EXT4, uuid="90a69b22-e281-4c5b-8df9-b09514f27ba1", label="root", mount_point="/") node._create_acquired_filesystems() config = compose_curtin_storage_config(node) self.assertStorageConfig(self.STORAGE_CONFIG, config) class TestPower8ExtraSpaceLayout( MAASServerTestCase, AssertStorageConfigMixin): STORAGE_CONFIG = dedent("""\ config: - id: sda name: sda type: disk wipe: superblock ptable: gpt model: QEMU HARDDISK serial: QM00001 - id: sda-part1 name: sda-part1 type: partition number: 1 offset: 4194304B size: 8388608B device: sda wipe: zero flag: prep grub_device: True - id: sda-part2 name: sda-part2 type: partition number: 2 uuid: f74ff260-2a5b-4a36-b1b8-37f746b946bf size: 7507804160B wipe: superblock device: sda - id: sda-part2_format type: format fstype: ext4 label: root uuid: 90a69b22-e281-4c5b-8df9-b09514f27ba1 volume: sda-part2 - id: sda-part2_mount type: mount path: / device: sda-part2_format """) def test__renders_expected_output(self): node = factory.make_Node( status=NODE_STATUS.ALLOCATED, architecture="ppc64el/generic", bios_boot_method="uefi", with_boot_disk=False) boot_disk = factory.make_PhysicalBlockDevice( node=node, size=8 * 1024 ** 3, name="sda", model="QEMU HARDDISK", serial="QM00001") # 8 GiB partition_table = factory.make_PartitionTable( table_type=PARTITION_TABLE_TYPE.GPT, block_device=boot_disk) root_partition = factory.make_Partition( partition_table=partition_table, uuid="f74ff260-2a5b-4a36-b1b8-37f746b946bf", size=(7 * 1024 ** 3) - PARTITION_TABLE_EXTRA_SPACE, bootable=False) factory.make_Filesystem( partition=root_partition, fstype=FILESYSTEM_TYPE.EXT4, uuid="90a69b22-e281-4c5b-8df9-b09514f27ba1", label="root", mount_point="/") node._create_acquired_filesystems() config = compose_curtin_storage_config(node) self.assertStorageConfig(self.STORAGE_CONFIG, config) class TestPower8NoPartitionTableLayout( MAASServerTestCase, AssertStorageConfigMixin): STORAGE_CONFIG = dedent("""\ config: - id: sda name: sda type: disk wipe: superblock ptable: gpt model: QEMU HARDDISK serial: QM00001 - id: sdb name: sdb type: disk wipe: superblock ptable: gpt model: QEMU HARDDISK serial: QM00002 - id: sdb-part1 name: sdb-part1 type: partition number: 1 offset: 4194304B size: 8388608B device: sdb wipe: zero flag: prep grub_device: True - id: sda-part1 name: sda-part1 type: partition number: 1 uuid: f74ff260-2a5b-4a36-b1b8-37f746b946bf offset: 4194304B size: 8573157376B wipe: superblock device: sda - id: sda-part1_format type: format fstype: ext4 label: root uuid: 90a69b22-e281-4c5b-8df9-b09514f27ba1 volume: sda-part1 - id: sda-part1_mount type: mount path: / device: sda-part1_format """) def test__renders_expected_output(self): node = factory.make_Node( status=NODE_STATUS.ALLOCATED, architecture="ppc64el/generic", bios_boot_method="uefi", with_boot_disk=False) root_disk = factory.make_PhysicalBlockDevice( node=node, size=8 * 1024 ** 3, name="sda", model="QEMU HARDDISK", serial="QM00001") # 8 GiB partition_table = factory.make_PartitionTable( table_type=PARTITION_TABLE_TYPE.GPT, block_device=root_disk) boot_disk = factory.make_PhysicalBlockDevice( node=node, size=8 * 1024 ** 3, name="sdb", model="QEMU HARDDISK", serial="QM00002") # 8 GiB node.boot_disk = boot_disk node.save() root_partition = factory.make_Partition( partition_table=partition_table, uuid="f74ff260-2a5b-4a36-b1b8-37f746b946bf", size=( (8 * 1024 ** 3) - PARTITION_TABLE_EXTRA_SPACE - PREP_PARTITION_SIZE), bootable=False) factory.make_Filesystem( partition=root_partition, fstype=FILESYSTEM_TYPE.EXT4, uuid="90a69b22-e281-4c5b-8df9-b09514f27ba1", label="root", mount_point="/") node._create_acquired_filesystems() config = compose_curtin_storage_config(node) self.assertStorageConfig(self.STORAGE_CONFIG, config) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_security.py0000644000000000000000000001424113056115004022236 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for MAAS's security module.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from binascii import b2a_hex from datetime import datetime from os import unlink from fixtures import EnvironmentVariableFixture from maasserver import security from maasserver.models.config import Config from maasserver.testing.testcase import MAASServerTestCase from maastesting.djangotestcase import DjangoTransactionTestCase from maastesting.testcase import MAASTestCase from provisioningserver.utils.fs import write_text_file from pytz import UTC from testtools.matchers import ( AfterPreprocessing, Equals, FileContains, GreaterThan, IsInstance, MatchesAll, MatchesAny, ) from twisted.internet import ssl class TestGetSerial(MAASTestCase): def test_that_it_works_eh(self): nowish = datetime(2014, 03, 24, 16, 07, tzinfo=UTC) security_datetime = self.patch(security, "datetime") # Make security.datetime() work like regular datetime. security_datetime.side_effect = datetime # Make security.datetime.now() return a fixed value. security_datetime.now.return_value = nowish self.assertEqual(69005220, security.get_serial()) is_valid_region_certificate = MatchesAll( IsInstance(ssl.PrivateCertificate), AfterPreprocessing( lambda cert: cert.getSubject(), Equals({"commonName": "MAAS Region"})), AfterPreprocessing( lambda cert: cert.getPublicKey().original.bits(), Equals(2048)), AfterPreprocessing( lambda cert: cert.privateKey.original.bits(), Equals(2048)), ) class TestCertificateFunctions(MAASServerTestCase): def patch_serial(self): serial = self.getUniqueInteger() self.patch(security, "get_serial").return_value = serial return serial def test_generate_region_certificate(self): serial = self.patch_serial() cert = security.generate_region_certificate() self.assertThat(cert, is_valid_region_certificate) self.assertEqual(serial, cert.serialNumber()) def test_save_region_certificate(self): cert = security.generate_region_certificate() security.save_region_certificate(cert) self.assertEqual( cert.dumpPEM().decode("ascii"), Config.objects.get_config("rpc_region_certificate")) def test_load_region_certificate(self): cert = security.generate_region_certificate() Config.objects.set_config( "rpc_region_certificate", cert.dumpPEM().decode("ascii")) self.assertEqual(cert, security.load_region_certificate()) def test_load_region_certificate_when_none_exists(self): self.assertIsNone(security.load_region_certificate()) def test_get_region_certificate(self): cert = security.generate_region_certificate() security.save_region_certificate(cert) self.assertEqual(cert, security.get_region_certificate()) def test_get_region_certificate_when_none_exists(self): cert = security.get_region_certificate() self.assertThat(cert, is_valid_region_certificate) self.assertEqual(cert, security.load_region_certificate()) is_valid_secret = MatchesAll( IsInstance(bytes), AfterPreprocessing( len, MatchesAny(Equals(16), GreaterThan(16)))) class TestGetSharedSecret(DjangoTransactionTestCase): def setUp(self): super(TestGetSharedSecret, self).setUp() self.useFixture(EnvironmentVariableFixture( "MAAS_ROOT", self.make_dir())) def test__generates_new_secret_when_none_exists(self): secret = security.get_shared_secret() self.assertThat(secret, is_valid_secret) def test__same_secret_is_returned_on_subsequent_calls(self): self.assertEqual( security.get_shared_secret(), security.get_shared_secret()) def test__uses_database_secret_when_none_on_fs(self): secret_before = security.get_shared_secret() unlink(security.get_shared_secret_filesystem_path()) secret_after = security.get_shared_secret() self.assertEqual(secret_before, secret_after) # The secret found in the database is written to the filesystem. self.assertThat( security.get_shared_secret_filesystem_path(), FileContains(b2a_hex(secret_after))) def test__uses_filesystem_secret_when_none_in_database(self): secret_before = security.get_shared_secret() Config.objects.set_config("rpc_shared_secret", None) secret_after = security.get_shared_secret() self.assertEqual(secret_before, secret_after) # The secret found on the filesystem is saved in the database. self.assertEqual( b2a_hex(secret_after), Config.objects.get_config("rpc_shared_secret")) def test__errors_when_database_value_cannot_be_decoded(self): security.get_shared_secret() # Ensure that the directory exists. Config.objects.set_config("rpc_shared_secret", "_") self.assertRaises(TypeError, security.get_shared_secret) def test__errors_when_database_and_filesystem_values_differ(self): security.get_shared_secret() # Ensure that the directory exists. Config.objects.set_config("rpc_shared_secret", "666f6f") write_text_file( security.get_shared_secret_filesystem_path(), "626172") self.assertRaises(AssertionError, security.get_shared_secret) def test__deals_fine_with_whitespace_in_database_value(self): Config.objects.set_config("rpc_shared_secret", " 666f6f\n") # Ordinarily we would need to commit now, because get_shared_secret() # runs in a separate thread. However, Django thinks that transaction # management means AUTOCOMMIT, which spares us this diabolical chore. # This is not unique to this test method; it comes from using Django's # DjangoTransactionTestCase, which also has a misleading name. self.assertEqual(b"foo", security.get_shared_secret()) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_sequence.py0000644000000000000000000000500113056115004022171 0ustar 00000000000000# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test :class:`Sequence`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random from django.db import connection from django.db.utils import DatabaseError from maasserver.sequence import Sequence from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase class TestSequence(MAASServerTestCase): def query_seq(self, name): cursor = connection.cursor() cursor.execute( "SELECT nextval(%s)", [name]) return cursor.fetchone()[0] def test_create_sequence(self): name = factory.make_name('seq', sep='') seq = Sequence(name) seq.create() val = self.query_seq(seq.name) self.assertEqual(1, val) def test_sequence_respects_minvalue(self): name = factory.make_name('seq', sep='') minvalue = random.randint(1, 50) seq = Sequence(name, minvalue=minvalue) seq.create() val = self.query_seq(seq.name) self.assertEqual(minvalue, val) def test_sequence_respects_incr(self): name = factory.make_name('seq', sep='') incr = random.randint(1, 50) seq = Sequence(name, incr=incr) seq.create() val = self.query_seq(seq.name) val = self.query_seq(seq.name) self.assertEqual(1 + incr, val) def test_sequence_respects_maxvalue_and_cycles(self): name = factory.make_name('seq', sep='') maxvalue = random.randint(10, 50) seq = Sequence(name, maxvalue=maxvalue) seq.create() cursor = connection.cursor() query = "ALTER SEQUENCE %s" % seq.name cursor.execute(query + " RESTART WITH %s", [maxvalue]) val = self.query_seq(seq.name) val = self.query_seq(seq.name) self.assertEqual(1, val) def test_drop_sequence(self): name = factory.make_name('seq', sep='') seq = Sequence(name) seq.create() seq.drop() self.assertRaisesRegexp( DatabaseError, "does not exist", self.query_seq, seq.name) def test_nextval_returns_sequential_values(self): name = factory.make_name('seq', sep='') seq = Sequence(name) seq.create() self.assertSequenceEqual( range(1, 11), [seq.nextval() for _ in range(10)]) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_server_address.py0000644000000000000000000002041613056115004023403 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the server_address module.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from collections import defaultdict from random import randint from maasserver import server_address from maasserver.exceptions import UnresolvableHost from maasserver.server_address import get_maas_facing_server_address from maasserver.testing.config import RegionConfigurationFixture from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from netaddr import IPAddress def make_hostname(): return '%s.example.com' % factory.make_hostname() class TestGetMAASFacingServerHost(MAASServerTestCase): def set_maas_url(self, hostname, with_port=False): """Set configured maas URL to be a (partly) random URL.""" url = factory.make_simple_http_url(netloc=hostname, port=with_port) self.useFixture(RegionConfigurationFixture(maas_url=url)) def test_get_maas_facing_server_host_returns_host_name(self): hostname = make_hostname() self.set_maas_url(hostname) self.assertEqual( hostname, server_address.get_maas_facing_server_host()) def test_get_maas_facing_server_host_returns_ip_if_ip_configured(self): ip = factory.make_ipv4_address() self.set_maas_url(ip) self.assertEqual(ip, server_address.get_maas_facing_server_host()) def test_get_maas_facing_server_host_returns_nodegroup_maas_url(self): hostname = factory.make_hostname() maas_url = 'http://%s' % hostname nodegroup = factory.make_NodeGroup(maas_url=maas_url) self.assertEqual( hostname, server_address.get_maas_facing_server_host(nodegroup)) def test_get_maas_facing_server_host_strips_out_port(self): hostname = make_hostname() self.set_maas_url(hostname, with_port=True) self.assertEqual( hostname, server_address.get_maas_facing_server_host()) def test_get_maas_facing_server_host_parses_IPv6_address_in_URL(self): ip = factory.make_ipv6_address() self.set_maas_url('[%s]' % ip) self.assertEqual( unicode(ip), server_address.get_maas_facing_server_host()) class FakeResolveHostname: """Fake implementation for `resolve_hostname`. Makes `resolve_hostname` return the given IP addresses (always as `IPAddress`, even though you may pass them as text). It will return just the IPv4 ones, or just the IPv6 ones, depending on which kind of address the caller requests. :ivar results_by_ip_version: Return values, as a dict mapping IP version to the set of results for that IP version. :ivar hostname: Host name that was passed by the last invocation. """ def __init__(self, *addresses): self.hostname = None self.results_by_ip_version = defaultdict(set) for addr in addresses: addr = IPAddress(addr) self.results_by_ip_version[addr.version].add(addr) def __call__(self, hostname, ip_version): assert ip_version in (4, 6) self.hostname = hostname return self.results_by_ip_version[ip_version] class TestGetMAASFacingServerAddress(MAASServerTestCase): def make_addresses(self): """Return a set of IP addresses, mixing IPv4 and IPv6.""" return { factory.make_ipv4_address(), factory.make_ipv6_address(), } def patch_get_maas_facing_server_host(self, host=None): if host is None: host = make_hostname() patch = self.patch(server_address, 'get_maas_facing_server_host') patch.return_value = unicode(host) return patch def patch_resolve_hostname(self, addresses=None): if addresses is None: addresses = self.make_addresses() fake = FakeResolveHostname(*addresses) return self.patch(server_address, 'resolve_hostname', fake) def test__integrates_with_get_maas_facing_server_host(self): ip = factory.make_ipv4_address() maas_url = 'http://%s' % ip nodegroup = factory.make_NodeGroup(maas_url=maas_url) self.assertEqual( unicode(ip), server_address.get_maas_facing_server_host(nodegroup)) def test__uses_IPv4_hostname_directly_if_ipv4_set(self): ip = factory.make_ipv4_address() self.patch_get_maas_facing_server_host(ip) fake_resolve = self.patch_resolve_hostname() result = get_maas_facing_server_address(ipv4=True) self.assertEqual(ip, result) self.assertIsNone(fake_resolve.hostname) def test__rejects_IPv4_hostname_if_ipv4_not_set(self): self.patch_get_maas_facing_server_host(factory.make_ipv4_address()) fake_resolve = self.patch_resolve_hostname() self.assertRaises( UnresolvableHost, get_maas_facing_server_address, ipv4=False) self.assertIsNone(fake_resolve.hostname) def test__uses_IPv6_hostname_directly_if_ipv6_set(self): ip = factory.make_ipv6_address() self.patch_get_maas_facing_server_host(ip) fake_resolve = self.patch_resolve_hostname() result = get_maas_facing_server_address(ipv6=True) self.assertEqual(ip, result) self.assertIsNone(fake_resolve.hostname) def test__rejects_IPv6_hostname_if_ipv6_not_set(self): self.patch_get_maas_facing_server_host(factory.make_ipv6_address()) fake_resolve = self.patch_resolve_hostname() self.assertRaises( UnresolvableHost, get_maas_facing_server_address, ipv6=False) self.assertIsNone(fake_resolve.hostname) def test__resolves_hostname(self): hostname = make_hostname() self.patch_get_maas_facing_server_host(hostname) ip = factory.make_ipv4_address() fake_resolve = self.patch_resolve_hostname([ip]) result = get_maas_facing_server_address() self.assertEqual(unicode(ip), result) self.assertEqual(hostname, fake_resolve.hostname) def test__prefers_IPv4_if_ipv4_set(self): # If a server has mixed v4 and v6 addresses, # get_maas_facing_server_address() will return a v4 address # rather than a v6 one. v4_ip = factory.make_ipv4_address() v6_ip = factory.make_ipv6_address() self.patch_resolve_hostname([v4_ip, v6_ip]) self.patch_get_maas_facing_server_host() self.assertEqual( unicode(v4_ip), get_maas_facing_server_address(ipv4=True, ipv6=True)) def test__ignores_IPv4_if_ipv4_not_set(self): v4_ip = factory.make_ipv4_address() v6_ip = factory.make_ipv6_address() self.patch_resolve_hostname([v4_ip, v6_ip]) self.patch_get_maas_facing_server_host() self.assertEqual( unicode(v6_ip), get_maas_facing_server_address(ipv4=False, ipv6=True)) def test__falls_back_on_IPv6_if_ipv4_set_but_no_IPv4_address_found(self): v6_ip = factory.make_ipv6_address() self.patch_resolve_hostname([v6_ip]) self.patch_get_maas_facing_server_host() self.assertEqual( unicode(v6_ip), get_maas_facing_server_address(ipv4=True, ipv6=True)) def test__prefers_global_IPv6_over_link_local_IPv6(self): global_ipv6 = factory.make_ipv6_address() local_ipv6 = [ 'fe80::%d:9876:5432:10' % randint(0, 9999) for _ in range(5) ] self.patch_resolve_hostname([global_ipv6] + local_ipv6) self.patch_get_maas_facing_server_host() self.assertEqual( unicode(global_ipv6), get_maas_facing_server_address()) def test__fails_if_neither_ipv4_nor_ipv6_set(self): self.patch_resolve_hostname() self.patch_get_maas_facing_server_host() self.assertRaises( UnresolvableHost, get_maas_facing_server_address, ipv4=False, ipv6=False) def test__raises_error_if_hostname_does_not_resolve(self): self.patch_resolve_hostname([]) self.patch_get_maas_facing_server_host() self.assertRaises( UnresolvableHost, get_maas_facing_server_address) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_start_up.py0000644000000000000000000001525613056115004022237 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test the start up utility.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver import ( eventloop, locks, start_up, ) from maasserver.bootresources import ensure_boot_source_definition from maasserver.clusterrpc.testing.boot_images import make_rpc_boot_image from maasserver.models import ( BootSource, BootSourceSelection, NodeGroup, ) from maasserver.models.testing import UpdateBootSourceCacheDisconnected from maasserver.testing.eventloop import RegionEventLoopFixture from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maastesting.matchers import ( MockCalledOnceWith, MockCallsMatch, MockNotCalled, ) from mock import ( ANY, call, ) from testtools.matchers import ( Equals, HasLength, ) from twisted.internet import reactor class LockChecker: """Callable. Records calls, and whether the startup lock was held.""" def __init__(self, lock_file=None): self.call_count = 0 self.lock_was_held = None def __call__(self, *args, **kwargs): self.call_count += 1 self.lock_was_held = locks.startup.is_locked() class TestStartUp(MAASServerTestCase): """Tests for the `start_up` function. The actual work happens in `inner_start_up` and `test_start_up`; the tests you see here are for the locking wrapper only. """ def setUp(self): super(TestStartUp, self).setUp() self.useFixture(RegionEventLoopFixture()) self.patch(start_up, 'create_gnupg_home') self.patch(start_up, 'post_commit_do') def tearDown(self): super(TestStartUp, self).tearDown() # start_up starts the Twisted event loop, so we need to stop it. eventloop.reset().wait(5) def test_inner_start_up_runs_in_exclusion(self): self.useFixture(UpdateBootSourceCacheDisconnected()) lock_checker = LockChecker() self.patch(start_up, 'dns_update_all_zones', lock_checker) start_up.inner_start_up() self.assertEqual(1, lock_checker.call_count) self.assertEqual(True, lock_checker.lock_was_held) def test_start_up_retries_with_wait_on_exception(self): inner_start_up = self.patch(start_up, 'inner_start_up') inner_start_up.side_effect = [ factory.make_exception("Boom!"), None, # Success. ] # We don't want to really sleep. self.patch(start_up, "pause") # start_up() returns without error. start_up.start_up() # However, it did call inner_start_up() twice; the first call resulted # in the "Boom!" exception so it tried again. self.expectThat(inner_start_up, MockCallsMatch(call(), call())) # It also slept once, for 3 seconds, between those attempts. self.expectThat(start_up.pause, MockCalledOnceWith(3.0)) class TestStartImportOnUpgrade(MAASServerTestCase): """Tests for the `start_import_on_upgrade` function.""" def setUp(self): super(TestStartImportOnUpgrade, self).setUp() self.useFixture(UpdateBootSourceCacheDisconnected()) self.patch_autospec(start_up, "get_all_available_boot_images") self.patch_autospec(start_up, 'import_resources') ensure_boot_source_definition() def test__does_nothing_if_boot_resources_exist(self): factory.make_BootResource() start_up.start_import_on_upgrade() self.assertThat(start_up.import_resources, MockNotCalled()) def test__does_nothing_if_no_cluster_has_any_images(self): start_up.get_all_available_boot_images.return_value = [] start_up.start_import_on_upgrade() self.assertThat(start_up.import_resources, MockNotCalled()) def test__calls_import_resources_when_any_cluster_has_an_image(self): boot_images = [make_rpc_boot_image()] start_up.get_all_available_boot_images.return_value = boot_images start_up.start_import_on_upgrade() self.assertThat(start_up.import_resources, MockCalledOnceWith()) def test__sets_source_selections_based_on_boot_images(self): boot_images = [make_rpc_boot_image() for _ in range(3)] start_up.get_all_available_boot_images.return_value = boot_images start_up.start_import_on_upgrade() boot_source = BootSource.objects.first() for image in boot_images: selection = BootSourceSelection.objects.get( boot_source=boot_source, os=image["osystem"], release=image["release"]) self.assertIsNotNone(selection) self.expectThat(selection.arches, Equals([image["architecture"]])) self.expectThat(selection.subarches, Equals(["*"])) self.expectThat(selection.labels, Equals([image["label"]])) class TestInnerStartUp(MAASServerTestCase): """Tests for the actual work done in `inner_start_up`.""" def setUp(self): super(TestInnerStartUp, self).setUp() self.useFixture(UpdateBootSourceCacheDisconnected()) self.patch_autospec(start_up, 'create_gnupg_home') self.patch_autospec(start_up, 'post_commit_do') def test__calls_write_full_dns_config(self): self.patch_autospec(start_up, 'dns_update_all_zones') start_up.inner_start_up() self.assertThat( start_up.dns_update_all_zones, MockCalledOnceWith(reload_retry=True)) def test__creates_master_nodegroup(self): start_up.inner_start_up() clusters = NodeGroup.objects.all() self.assertThat(clusters, HasLength(1)) self.assertItemsEqual([NodeGroup.objects.ensure_master()], clusters) def test__calls_create_gnupg_home(self): start_up.inner_start_up() self.assertThat(start_up.create_gnupg_home, MockCalledOnceWith()) def test__calls_register_all_triggers(self): self.patch(start_up, 'register_all_triggers') start_up.inner_start_up() self.assertThat(start_up.register_all_triggers, MockCalledOnceWith()) def test__initialises_boot_source_config(self): self.assertItemsEqual([], BootSource.objects.all()) start_up.inner_start_up() self.assertThat(BootSource.objects.all(), HasLength(1)) def test__calls_start_import_on_upgrade(self): start_up.inner_start_up() self.assertThat( start_up.post_commit_do, MockCalledOnceWith( reactor.callLater, ANY, reactor.threadpoolForDatabase.callInThread, start_up.start_import_on_upgrade)) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_storage_layouts.py0000644000000000000000000017425013056115004023622 0ustar 00000000000000# Copyright 2015-2016 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test the storage layouts.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from math import ceil import random from maasserver.enum import ( CACHE_MODE_TYPE, FILESYSTEM_GROUP_TYPE, FILESYSTEM_TYPE, PARTITION_TABLE_TYPE, ) from maasserver.models.blockdevice import MIN_BLOCK_DEVICE_SIZE from maasserver.models.filesystemgroup import VolumeGroup from maasserver.models.partition import ( MAX_PARTITION_SIZE_FOR_MBR, PARTITION_ALIGNMENT_SIZE, ) from maasserver.models.partitiontable import ( PARTITION_TABLE_EXTRA_SPACE, PREP_PARTITION_SIZE, ) from maasserver.storage_layouts import ( BcacheStorageLayout, BcacheStorageLayoutBase, calculate_size_from_precentage, EFI_PARTITION_SIZE, FlatStorageLayout, get_storage_layout_choices, get_storage_layout_for_node, is_precentage, LVMStorageLayout, MIN_BOOT_PARTITION_SIZE, MIN_ROOT_PARTITION_SIZE, StorageLayoutBase, StorageLayoutFieldsError, StorageLayoutForm, StorageLayoutMissingBootDiskError, ) from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.utils.converters import round_size_to_nearest_block from maastesting.matchers import MockCalledOnceWith from testtools.matchers import MatchesStructure LARGE_BLOCK_DEVICE = 10 * 1024 * 1024 * 1024 # 10 GiB def make_Node_with_uefi_boot_method(*args, **kwargs): kwargs['bios_boot_method'] = "uefi" kwargs['with_boot_disk'] = False return factory.make_Node(*args, **kwargs) def make_ppc64el_Node_with_powernv_boot_method(*args, **kwargs): kwargs['bios_boot_method'] = "powernv" kwargs['with_boot_disk'] = False kwargs['architecture'] = "ppc64el/generic" return factory.make_Node(*args, **kwargs) def make_ppc64el_Node_with_uefi_boot_method(*args, **kwargs): kwargs['bios_boot_method'] = "powerkvm" kwargs['with_boot_disk'] = False kwargs['architecture'] = "ppc64el/generic" return factory.make_Node(*args, **kwargs) def make_arm64_Node_without_uefi_boot_method(*args, **kwargs): kwargs['bios_boot_method'] = "pxe" kwargs['with_boot_disk'] = False kwargs['architecture'] = "arm64/generic" return factory.make_Node(*args, **kwargs) class TestFormHelpers(MAASServerTestCase): def test_get_storage_layout_choices(self): self.assertItemsEqual([ ("flat", "Flat layout"), ("lvm", "LVM layout"), ("bcache", "Bcache layout"), ], get_storage_layout_choices()) def test_get_storage_layout_for_node(self): node = make_Node_with_uefi_boot_method() layout = get_storage_layout_for_node("flat", node) self.assertIsInstance(layout, FlatStorageLayout) self.assertEquals(node, layout.node) class TestStorageLayoutForm(MAASServerTestCase): def test__field_is_not_required(self): form = StorageLayoutForm(required=False, data={}) self.assertTrue(form.is_valid(), form.errors) def test__field_is_required(self): form = StorageLayoutForm(required=True, data={}) self.assertFalse(form.is_valid(), form.errors) self.assertEquals({ 'storage_layout': ['This field is required.'], }, form.errors) class TestIsPrecentageHelper(MAASServerTestCase): """Tests for `is_precentage`.""" scenarios = [ ('100%', { 'value': '100%', 'is_precentage': True, }), ('10%', { 'value': '10%', 'is_precentage': True, }), ('1.5%', { 'value': '1.5%', 'is_precentage': True, }), ('1000.42%', { 'value': '1000.42%', 'is_precentage': True, }), ('0.816112383915%', { 'value': '0.816112383915%', 'is_precentage': True, }), ('1000', { 'value': '1000', 'is_precentage': False, }), ('10', { 'value': '10', 'is_precentage': False, }), ('0', { 'value': '0', 'is_precentage': False, }), ('int(0)', { 'value': 0, 'is_precentage': False, }), ] def test__returns_correct_result(self): self.assertEquals( self.is_precentage, is_precentage(self.value), "%s gave incorrect result." % self.value) class TestCalculateSizeFromPrecentHelper(MAASServerTestCase): """Tests for `calculate_size_from_precentage`.""" scenarios = [ ('100%', { 'input': 10000, 'precent': '100%', 'output': 10000, }), ('10%', { 'input': 10000, 'precent': '10%', 'output': 1000, }), ('1%', { 'input': 10000, 'precent': '1%', 'output': 100, }), ('5%', { 'input': 4096, 'precent': '5%', 'output': int(ceil(4096 * .05)), }), ('0.816112383915%', { 'input': 4096, 'precent': '0.816112383915%', 'output': int(ceil(4096 * 0.00816112383915)), }), ] def test__returns_correct_result(self): self.assertEquals( self.output, calculate_size_from_precentage(self.input, self.precent), "%s gave incorrect result." % self.precent) class TestStorageLayoutBase(MAASServerTestCase): """Tests for `StorageLayoutBase`.""" def test__init__sets_node(self): node = make_Node_with_uefi_boot_method() layout = StorageLayoutBase(node) self.assertEquals(node, layout.node) def test__init__loads_the_physical_block_devices(self): node = make_Node_with_uefi_boot_method() block_devices = [ factory.make_PhysicalBlockDevice(node=node) for _ in range(3) ] layout = StorageLayoutBase(node) self.assertEquals(block_devices, layout.block_devices) def test_raises_error_when_no_block_devices(self): node = make_Node_with_uefi_boot_method() layout = StorageLayoutBase(node) error = self.assertRaises( StorageLayoutMissingBootDiskError, layout.configure) self.assertEquals( "Node doesn't have any storage devices to configure.", error.message) def test_raises_error_when_precentage_to_low_for_boot_disk(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = StorageLayoutBase(node, { 'boot_size': "0%", }) error = self.assertRaises(StorageLayoutFieldsError, layout.configure) self.assertEquals({ "boot_size": [ "Size is too small. Minimum size is %s." % ( MIN_BOOT_PARTITION_SIZE)], }, error.error_dict) def test_raises_error_when_value_to_low_for_boot_disk(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = StorageLayoutBase(node, { 'boot_size': MIN_BOOT_PARTITION_SIZE - 1, }) error = self.assertRaises(StorageLayoutFieldsError, layout.configure) self.assertEquals({ "boot_size": [ "Size is too small. Minimum size is %s." % ( MIN_BOOT_PARTITION_SIZE)], }, error.error_dict) def test_raises_error_when_precentage_to_high_for_boot_disk(self): node = make_Node_with_uefi_boot_method() boot_disk = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) max_size = ( boot_disk.size - EFI_PARTITION_SIZE - MIN_ROOT_PARTITION_SIZE) to_high_precent = max_size / float(boot_disk.size) to_high_precent = "%s%%" % ((to_high_precent + 1) * 100) layout = StorageLayoutBase(node, { 'boot_size': to_high_precent, }) error = self.assertRaises(StorageLayoutFieldsError, layout.configure) self.assertEquals({ "boot_size": [ "Size is too large. Maximum size is %s." % max_size], }, error.error_dict) def test_raises_error_when_value_to_high_for_boot_disk(self): node = make_Node_with_uefi_boot_method() boot_disk = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) max_size = ( boot_disk.size - EFI_PARTITION_SIZE - MIN_ROOT_PARTITION_SIZE) layout = StorageLayoutBase(node, { 'boot_size': max_size + 1, }) error = self.assertRaises(StorageLayoutFieldsError, layout.configure) self.assertEquals({ "boot_size": [ "Size is too large. Maximum size is %s." % max_size], }, error.error_dict) def test_raises_error_when_precentage_to_low_for_root_disk(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = StorageLayoutBase(node, { 'root_size': "0%", }) error = self.assertRaises(StorageLayoutFieldsError, layout.configure) self.assertEquals({ "root_size": [ "Size is too small. Minimum size is %s." % ( MIN_ROOT_PARTITION_SIZE)], }, error.error_dict) def test_raises_error_when_value_to_low_for_root_disk(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = StorageLayoutBase(node, { 'root_size': MIN_ROOT_PARTITION_SIZE - 1, }) error = self.assertRaises(StorageLayoutFieldsError, layout.configure) self.assertEquals({ "root_size": [ "Size is too small. Minimum size is %s." % ( MIN_ROOT_PARTITION_SIZE)], }, error.error_dict) def test_raises_error_when_precentage_to_high_for_root_disk(self): node = make_Node_with_uefi_boot_method() boot_disk = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) max_size = ( boot_disk.size - EFI_PARTITION_SIZE - MIN_BOOT_PARTITION_SIZE) to_high_precent = max_size / float(boot_disk.size) to_high_precent = "%s%%" % ((to_high_precent + 1) * 100) layout = StorageLayoutBase(node, { 'root_size': to_high_precent, }) error = self.assertRaises(StorageLayoutFieldsError, layout.configure) self.assertEquals({ "root_size": [ "Size is too large. Maximum size is %s." % max_size], }, error.error_dict) def test_raises_error_when_value_to_high_for_root_disk(self): node = make_Node_with_uefi_boot_method() boot_disk = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) max_size = ( boot_disk.size - EFI_PARTITION_SIZE - MIN_BOOT_PARTITION_SIZE) layout = StorageLayoutBase(node, { 'root_size': max_size + 1, }) error = self.assertRaises(StorageLayoutFieldsError, layout.configure) self.assertEquals({ "root_size": [ "Size is too large. Maximum size is %s." % max_size], }, error.error_dict) def test_raises_error_when_boot_and_root_to_big(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = StorageLayoutBase(node, { 'boot_size': "50%", 'root_size': "60%", }) error = self.assertRaises(StorageLayoutFieldsError, layout.configure) self.assertEquals({ "__all__": [ "Size of the boot partition and root partition are larger " "than the available space on the boot disk."], }, error.error_dict) def test_doesnt_error_if_boot_and_root_valid(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = StorageLayoutBase(node, { 'boot_size': "50%", 'root_size': "50%", }) self.patch(StorageLayoutBase, "configure_storage") # This should not raise an exception. layout.configure() def test_get_boot_size_returns_0_if_not_set(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = StorageLayoutBase(node, { 'root_size': "50%", }) self.assertTrue(layout.is_valid(), layout.errors) self.assertEquals(0, layout.get_boot_size()) def test_get_boot_size_returns_boot_size_if_set(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) boot_size = random.randint( MIN_BOOT_PARTITION_SIZE, MIN_BOOT_PARTITION_SIZE * 2) layout = StorageLayoutBase(node, { 'boot_size': boot_size, }) self.assertTrue(layout.is_valid(), layout.errors) self.assertEquals(boot_size, layout.get_boot_size()) def test_get_root_device_returns_None_if_not_set(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = StorageLayoutBase(node, { }) self.assertTrue(layout.is_valid(), layout.errors) self.assertIsNone(layout.get_root_device()) def test_get_root_device_returns_root_device_if_set(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) root_device = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = StorageLayoutBase(node, { 'root_device': root_device.id, }) self.assertTrue(layout.is_valid(), layout.errors) self.assertEquals(root_device, layout.get_root_device()) def test_get_root_size_returns_None_if_not_set(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = StorageLayoutBase(node, { }) self.assertTrue(layout.is_valid(), layout.errors) self.assertIsNone(layout.get_root_size()) def test_get_root_size_returns_root_size_if_set(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) root_size = random.randint( MIN_ROOT_PARTITION_SIZE, MIN_ROOT_PARTITION_SIZE * 2) layout = StorageLayoutBase(node, { 'root_size': root_size, }) self.assertTrue(layout.is_valid(), layout.errors) self.assertEquals(root_size, layout.get_root_size()) def test_configure_calls_configure_storage(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = StorageLayoutBase(node) mock_configure_storage = self.patch( StorageLayoutBase, "configure_storage") layout.configure() self.assertThat(mock_configure_storage, MockCalledOnceWith(True)) class LayoutHelpersMixin: def assertEFIPartition(self, partition, boot_disk): self.assertIsNotNone(partition) self.assertEquals( round_size_to_nearest_block( EFI_PARTITION_SIZE, boot_disk.block_size), partition.size) self.assertThat( partition.get_effective_filesystem(), MatchesStructure.byEquality( fstype=FILESYSTEM_TYPE.FAT32, label="efi", mount_point="/boot/efi", )) class TestFlatStorageLayout(MAASServerTestCase, LayoutHelpersMixin): def test__init_sets_up_all_fields(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = FlatStorageLayout(node) self.assertItemsEqual([ 'root_device', 'root_size', 'boot_size', ], layout.fields.keys()) def test__creates_layout_with_mbr_defaults(self): node = factory.make_Node(with_boot_disk=False) boot_disk = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = FlatStorageLayout(node) layout.configure() # Validate partition table. partition_table = boot_disk.get_partitiontable() self.assertEquals(PARTITION_TABLE_TYPE.MBR, partition_table.table_type) # Validate root partition. partitions = partition_table.partitions.order_by('id').all() root_partition = partitions[0] self.assertIsNotNone(root_partition) self.assertEquals( round_size_to_nearest_block( boot_disk.size - PARTITION_TABLE_EXTRA_SPACE, PARTITION_ALIGNMENT_SIZE, False), root_partition.size) self.assertThat( root_partition.get_effective_filesystem(), MatchesStructure.byEquality( fstype=FILESYSTEM_TYPE.EXT4, label="root", mount_point="/", )) def test__creates_layout_with_maximum_mbr_partition_size(self): node = factory.make_Node(with_boot_disk=False) boot_disk = factory.make_PhysicalBlockDevice( node=node, size=3 * (1024 ** 4)) layout = FlatStorageLayout(node) layout.configure() # Validate partition table. partition_table = boot_disk.get_partitiontable() self.assertEquals(PARTITION_TABLE_TYPE.MBR, partition_table.table_type) # Validate root partition. partitions = partition_table.partitions.order_by('id').all() root_partition = partitions[0] self.assertIsNotNone(root_partition) self.assertEquals( round_size_to_nearest_block( MAX_PARTITION_SIZE_FOR_MBR, PARTITION_ALIGNMENT_SIZE, False), root_partition.size) self.assertThat( root_partition.get_effective_filesystem(), MatchesStructure.byEquality( fstype=FILESYSTEM_TYPE.EXT4, label="root", mount_point="/", )) def test__creates_layout_for_powernv(self): node = make_ppc64el_Node_with_powernv_boot_method() boot_disk = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = FlatStorageLayout(node) layout.configure() # Validate partition table. partition_table = boot_disk.get_partitiontable() self.assertEqual(PARTITION_TABLE_TYPE.GPT, partition_table.table_type) # Validate root partition. partitions = partition_table.partitions.order_by('id').all() root_partition = partitions[0] self.assertIsNotNone(root_partition) self.assertEqual( round_size_to_nearest_block( boot_disk.size - PARTITION_TABLE_EXTRA_SPACE - PREP_PARTITION_SIZE, PARTITION_ALIGNMENT_SIZE, False), root_partition.size) self.assertThat( root_partition.get_effective_filesystem(), MatchesStructure.byEquality( fstype=FILESYSTEM_TYPE.EXT4, label="root", mount_point="/", )) def test__creates_layout_for_powerkvm(self): node = make_ppc64el_Node_with_uefi_boot_method() boot_disk = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = FlatStorageLayout(node) layout.configure() # Validate partition table. partition_table = boot_disk.get_partitiontable() self.assertEqual(PARTITION_TABLE_TYPE.GPT, partition_table.table_type) # Validate root partition. partitions = partition_table.partitions.order_by('id').all() root_partition = partitions[0] self.assertIsNotNone(root_partition) self.assertEqual( round_size_to_nearest_block( boot_disk.size - PARTITION_TABLE_EXTRA_SPACE - PREP_PARTITION_SIZE, PARTITION_ALIGNMENT_SIZE, False), root_partition.size) self.assertThat( root_partition.get_effective_filesystem(), MatchesStructure.byEquality( fstype=FILESYSTEM_TYPE.EXT4, label="root", mount_point="/", )) def test__creates_layout_with_uefi_defaults(self): node = make_Node_with_uefi_boot_method() boot_disk = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = FlatStorageLayout(node) layout.configure() # Validate partition table. partition_table = boot_disk.get_partitiontable() self.assertEquals(PARTITION_TABLE_TYPE.GPT, partition_table.table_type) # Validate efi partition. partitions = partition_table.partitions.order_by('id').all() efi_partition = partitions[0] self.assertEFIPartition(efi_partition, boot_disk) # Validate root partition. root_partition = partitions[1] self.assertIsNotNone(root_partition) self.assertEquals( round_size_to_nearest_block( boot_disk.size - EFI_PARTITION_SIZE - PARTITION_TABLE_EXTRA_SPACE, PARTITION_ALIGNMENT_SIZE, False), root_partition.size) self.assertThat( root_partition.get_effective_filesystem(), MatchesStructure.byEquality( fstype=FILESYSTEM_TYPE.EXT4, label="root", mount_point="/", )) def test__creates_layout_for_arm64(self): node = make_arm64_Node_without_uefi_boot_method() boot_disk = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = FlatStorageLayout(node) layout.configure() # Validate partition table. partition_table = boot_disk.get_partitiontable() self.assertEqual(PARTITION_TABLE_TYPE.MBR, partition_table.table_type) # Validate boot partition. partitions = partition_table.partitions.order_by('id').all() boot_partition = partitions[0] self.assertIsNotNone(boot_partition) self.assertEqual( round_size_to_nearest_block( MIN_BOOT_PARTITION_SIZE, PARTITION_ALIGNMENT_SIZE, False), boot_partition.size) self.assertThat( boot_partition.get_effective_filesystem(), MatchesStructure.byEquality( fstype=FILESYSTEM_TYPE.EXT4, label="boot", mount_point="/boot", )) # Validate root partition. root_partition = partitions[1] self.assertIsNotNone(root_partition) self.assertEqual( round_size_to_nearest_block( boot_disk.size - EFI_PARTITION_SIZE - PARTITION_TABLE_EXTRA_SPACE, PARTITION_ALIGNMENT_SIZE, False), root_partition.size) self.assertThat( root_partition.get_effective_filesystem(), MatchesStructure.byEquality( fstype=FILESYSTEM_TYPE.EXT4, label="root", mount_point="/", )) def test__creates_layout_with_boot_size(self): node = make_Node_with_uefi_boot_method() boot_disk = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) boot_size = random.randint( MIN_BOOT_PARTITION_SIZE, MIN_BOOT_PARTITION_SIZE * 2) layout = FlatStorageLayout(node, { 'boot_size': boot_size, }) layout.configure() # Validate partition table. partition_table = boot_disk.get_partitiontable() self.assertEquals(PARTITION_TABLE_TYPE.GPT, partition_table.table_type) # Validate efi partition. partitions = partition_table.partitions.order_by('id').all() efi_partition = partitions[0] self.assertEFIPartition(efi_partition, boot_disk) # Validate boot partition. boot_partition = partitions[1] self.assertIsNotNone(boot_partition) self.assertEquals( round_size_to_nearest_block( boot_size, PARTITION_ALIGNMENT_SIZE, False), boot_partition.size) self.assertThat( boot_partition.get_effective_filesystem(), MatchesStructure.byEquality( fstype=FILESYSTEM_TYPE.EXT4, label="boot", mount_point="/boot", )) # Validate root partition. root_partition = partitions[2] self.assertIsNotNone(root_partition) self.assertEquals( round_size_to_nearest_block( boot_disk.size - boot_partition.size - EFI_PARTITION_SIZE - PARTITION_TABLE_EXTRA_SPACE, PARTITION_ALIGNMENT_SIZE, False), root_partition.size) self.assertThat( root_partition.get_effective_filesystem(), MatchesStructure.byEquality( fstype=FILESYSTEM_TYPE.EXT4, label="root", mount_point="/", )) def test__creates_layout_with_root_size(self): node = make_Node_with_uefi_boot_method() boot_disk = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) root_size = random.randint( MIN_ROOT_PARTITION_SIZE, MIN_ROOT_PARTITION_SIZE * 2) layout = FlatStorageLayout(node, { 'root_size': root_size, }) layout.configure() # Validate partition table. partition_table = boot_disk.get_partitiontable() self.assertEquals(PARTITION_TABLE_TYPE.GPT, partition_table.table_type) # Validate efi partition. partitions = partition_table.partitions.order_by('id').all() efi_partition = partitions[0] self.assertEFIPartition(efi_partition, boot_disk) # Validate root partition. root_partition = partitions[1] self.assertIsNotNone(root_partition) self.assertEquals( round_size_to_nearest_block( root_size, PARTITION_ALIGNMENT_SIZE, False), root_partition.size) self.assertThat( root_partition.get_effective_filesystem(), MatchesStructure.byEquality( fstype=FILESYSTEM_TYPE.EXT4, label="root", mount_point="/", )) def test__creates_layout_with_boot_size_and_root_size(self): node = make_Node_with_uefi_boot_method() boot_disk = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) boot_size = random.randint( MIN_BOOT_PARTITION_SIZE, MIN_BOOT_PARTITION_SIZE * 2) root_size = random.randint( MIN_ROOT_PARTITION_SIZE, MIN_ROOT_PARTITION_SIZE * 2) layout = FlatStorageLayout(node, { 'boot_size': boot_size, 'root_size': root_size, }) layout.configure() # Validate partition table. partition_table = boot_disk.get_partitiontable() self.assertEquals(PARTITION_TABLE_TYPE.GPT, partition_table.table_type) # Validate efi partition. partitions = partition_table.partitions.order_by('id').all() efi_partition = partitions[0] self.assertEFIPartition(efi_partition, boot_disk) # Validate boot partition. boot_partition = partitions[1] self.assertIsNotNone(boot_partition) self.assertEquals( round_size_to_nearest_block( boot_size, PARTITION_ALIGNMENT_SIZE, False), boot_partition.size) self.assertThat( boot_partition.get_effective_filesystem(), MatchesStructure.byEquality( fstype=FILESYSTEM_TYPE.EXT4, label="boot", mount_point="/boot", )) # Validate root partition. root_partition = partitions[2] self.assertIsNotNone(root_partition) self.assertEquals( round_size_to_nearest_block( root_size, PARTITION_ALIGNMENT_SIZE, False), root_partition.size) self.assertThat( root_partition.get_effective_filesystem(), MatchesStructure.byEquality( fstype=FILESYSTEM_TYPE.EXT4, label="root", mount_point="/", )) def test__creates_layout_with_root_device_and_root_size(self): node = make_Node_with_uefi_boot_method() boot_disk = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) root_device = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) root_size = random.randint( MIN_ROOT_PARTITION_SIZE, MIN_ROOT_PARTITION_SIZE * 2) layout = FlatStorageLayout(node, { 'root_device': root_device.id, 'root_size': root_size, }) layout.configure() # Validate boot partition table. boot_partition_table = boot_disk.get_partitiontable() self.assertEquals( PARTITION_TABLE_TYPE.GPT, boot_partition_table.table_type) # Validate efi partition. boot_partitions = boot_partition_table.partitions.order_by('id').all() efi_partition = boot_partitions[0] self.assertEFIPartition(efi_partition, boot_disk) # Validate the root device partition table and partition. root_partition_table = root_device.get_partitiontable() self.assertEquals( PARTITION_TABLE_TYPE.GPT, boot_partition_table.table_type) root_partition = root_partition_table.partitions.order_by( 'id').all()[0] self.assertIsNotNone(root_partition) self.assertEquals( round_size_to_nearest_block( root_size, PARTITION_ALIGNMENT_SIZE, False), root_partition.size) self.assertThat( root_partition.get_effective_filesystem(), MatchesStructure.byEquality( fstype=FILESYSTEM_TYPE.EXT4, label="root", mount_point="/", )) class TestLVMStorageLayout(MAASServerTestCase, LayoutHelpersMixin): def test__init_sets_up_all_fields(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = LVMStorageLayout(node) self.assertItemsEqual([ 'root_device', 'root_size', 'boot_size', 'vg_name', 'lv_name', 'lv_size', ], layout.fields.keys()) def test_get_vg_name_returns_default_if_not_set(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = LVMStorageLayout(node, { }) self.assertTrue(layout.is_valid(), layout.errors) self.assertEquals(layout.DEFAULT_VG_NAME, layout.get_vg_name()) def test_get_vg_name_returns_vg_name_if_set(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) vg_name = factory.make_name("vg") layout = LVMStorageLayout(node, { 'vg_name': vg_name, }) self.assertTrue(layout.is_valid(), layout.errors) self.assertEquals(vg_name, layout.get_vg_name()) def test_get_lv_name_returns_default_if_not_set(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = LVMStorageLayout(node, { }) self.assertTrue(layout.is_valid(), layout.errors) self.assertEquals(layout.DEFAULT_LV_NAME, layout.get_lv_name()) def test_get_lv_name_returns_lv_name_if_set(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) lv_name = factory.make_name("lv") layout = LVMStorageLayout(node, { 'lv_name': lv_name, }) self.assertTrue(layout.is_valid(), layout.errors) self.assertEquals(lv_name, layout.get_lv_name()) def test_get_lv_size_returns_None_if_not_set(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = LVMStorageLayout(node, { }) self.assertTrue(layout.is_valid(), layout.errors) self.assertIsNone(layout.get_lv_size()) def test_get_lv_size_returns_lv_size_if_set(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) lv_size = random.randint( MIN_ROOT_PARTITION_SIZE, MIN_ROOT_PARTITION_SIZE * 2) layout = LVMStorageLayout(node, { 'lv_size': lv_size, }) self.assertTrue(layout.is_valid(), layout.errors) self.assertEquals(lv_size, layout.get_lv_size()) def test_get_calculated_lv_size_returns_set_lv_size(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) lv_size = random.randint( MIN_ROOT_PARTITION_SIZE, MIN_ROOT_PARTITION_SIZE * 2) layout = LVMStorageLayout(node, { 'lv_size': lv_size, }) self.assertTrue(layout.is_valid(), layout.errors) volume_group = factory.make_VolumeGroup(node=node) self.assertEquals(lv_size, layout.get_calculated_lv_size(volume_group)) def test_get_calculated_lv_size_returns_size_of_volume_group(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = LVMStorageLayout(node, { }) self.assertTrue(layout.is_valid(), layout.errors) volume_group = factory.make_VolumeGroup(node=node) self.assertEquals( volume_group.get_size(), layout.get_calculated_lv_size(volume_group)) def test_raises_error_when_precentage_to_low_for_logical_volume(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = LVMStorageLayout(node, { 'lv_size': "0%", }) error = self.assertRaises(StorageLayoutFieldsError, layout.configure) self.assertEquals({ "lv_size": [ "Size is too small. Minimum size is %s." % ( MIN_ROOT_PARTITION_SIZE)], }, error.error_dict) def test_raises_error_when_value_to_low_for_logical_volume(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = LVMStorageLayout(node, { 'lv_size': MIN_ROOT_PARTITION_SIZE - 1, }) error = self.assertRaises(StorageLayoutFieldsError, layout.configure) self.assertEquals({ "lv_size": [ "Size is too small. Minimum size is %s." % ( MIN_ROOT_PARTITION_SIZE)], }, error.error_dict) def test_raises_error_when_precentage_to_high_for_logical_volume(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) root_size = random.randint( MIN_ROOT_PARTITION_SIZE, MIN_ROOT_PARTITION_SIZE * 2) layout = LVMStorageLayout(node, { 'root_size': root_size, 'lv_size': "101%", }) error = self.assertRaises(StorageLayoutFieldsError, layout.configure) self.assertEquals({ "lv_size": [ "Size is too large. Maximum size is %s." % root_size], }, error.error_dict) def test_raises_error_when_value_to_high_for_logical_volume(self): node = make_Node_with_uefi_boot_method() boot_disk = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) max_size = ( boot_disk.size - EFI_PARTITION_SIZE) layout = LVMStorageLayout(node, { 'lv_size': max_size + 1, }) error = self.assertRaises(StorageLayoutFieldsError, layout.configure) self.assertEquals({ "lv_size": [ "Size is too large. Maximum size is %s." % max_size], }, error.error_dict) def test__creates_layout_with_defaults(self): node = make_Node_with_uefi_boot_method() boot_disk = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = LVMStorageLayout(node) layout.configure() # Validate the volume group on root partition. partition_table = boot_disk.get_partitiontable() partitions = partition_table.partitions.order_by('id').all() root_partition = partitions[1] volume_group = VolumeGroup.objects.get( filesystems__partition=root_partition) self.assertIsNotNone(volume_group) self.assertEquals(layout.DEFAULT_VG_NAME, volume_group.name) # Validate one logical volume on volume group. self.assertEquals( 1, volume_group.virtual_devices.count(), "Should have only 1 logical volume.") logical_volume = volume_group.virtual_devices.first() self.assertEquals(volume_group.get_size(), logical_volume.size) self.assertEquals(layout.DEFAULT_LV_NAME, logical_volume.name) self.assertThat( logical_volume.get_effective_filesystem(), MatchesStructure.byEquality( fstype=FILESYSTEM_TYPE.EXT4, label="root", mount_point="/", )) def test__creates_layout_with_vg_name_and_lv_name(self): node = make_Node_with_uefi_boot_method() boot_disk = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) vg_name = factory.make_name("vg") lv_name = factory.make_name("lv") layout = LVMStorageLayout(node, { "vg_name": vg_name, "lv_name": lv_name, }) layout.configure() # Validate the volume group on root partition. partition_table = boot_disk.get_partitiontable() partitions = partition_table.partitions.order_by('id').all() root_partition = partitions[1] volume_group = VolumeGroup.objects.get( filesystems__partition=root_partition) self.assertIsNotNone(volume_group) self.assertEquals(vg_name, volume_group.name) # Validate one logical volume on volume group. self.assertEquals( 1, volume_group.virtual_devices.count(), "Should have only 1 logical volume.") logical_volume = volume_group.virtual_devices.first() self.assertEquals(volume_group.get_size(), logical_volume.size) self.assertEquals(lv_name, logical_volume.name) self.assertThat( logical_volume.get_effective_filesystem(), MatchesStructure.byEquality( fstype=FILESYSTEM_TYPE.EXT4, label="root", mount_point="/", )) def test__creates_layout_with_lv_size(self): node = make_Node_with_uefi_boot_method() boot_disk = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) lv_size = random.randint( MIN_ROOT_PARTITION_SIZE, MIN_ROOT_PARTITION_SIZE * 2) layout = LVMStorageLayout(node, { "lv_size": lv_size, }) layout.configure() # Validate the volume group on root partition. partition_table = boot_disk.get_partitiontable() partitions = partition_table.partitions.order_by('id').all() root_partition = partitions[1] volume_group = VolumeGroup.objects.get( filesystems__partition=root_partition) self.assertIsNotNone(volume_group) self.assertEquals(layout.DEFAULT_VG_NAME, volume_group.name) # Validate one logical volume on volume group. self.assertEquals( 1, volume_group.virtual_devices.count(), "Should have only 1 logical volume.") logical_volume = volume_group.virtual_devices.first() expected_size = round_size_to_nearest_block( lv_size, PARTITION_ALIGNMENT_SIZE, False) self.assertEquals(expected_size, logical_volume.size) self.assertEquals(layout.DEFAULT_LV_NAME, logical_volume.name) self.assertThat( logical_volume.get_effective_filesystem(), MatchesStructure.byEquality( fstype=FILESYSTEM_TYPE.EXT4, label="root", mount_point="/", )) def test__creates_layout_with_multiple_mbr_partitions(self): node = factory.make_Node(with_boot_disk=False) boot_disk = factory.make_PhysicalBlockDevice( node=node, size=7 * (1024 ** 4)) layout = LVMStorageLayout(node) layout.configure() # Validate the volume group on root partition. partition_table = boot_disk.get_partitiontable() partitions = partition_table.partitions.order_by('id').all() root_partition = partitions[0] volume_group = VolumeGroup.objects.get( filesystems__partition=root_partition) self.assertIsNotNone(volume_group) self.assertEquals( 4, partition_table.partitions.count(), "Should have 4 partitions.") expected_size = round_size_to_nearest_block( MAX_PARTITION_SIZE_FOR_MBR, PARTITION_ALIGNMENT_SIZE, False) self.assertEquals(expected_size, root_partition.size) class TestBcacheStorageLayoutBase(MAASServerTestCase): def test_setup_cache_device_field_does_nothing_if_no_boot_device(self): node = make_Node_with_uefi_boot_method() layout = BcacheStorageLayoutBase(node) layout.setup_cache_device_field() self.assertNotIn('cache_device', layout.fields.keys()) def test_setup_cache_device_field_doesnt_include_boot_device(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) other_disks = [ factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) for _ in range(3) ] valid_choices = [ (disk.id, disk.id) for disk in other_disks ] layout = BcacheStorageLayoutBase(node) layout.setup_cache_device_field() self.assertItemsEqual( valid_choices, layout.fields['cache_device'].choices) def test__find_best_cache_device_returns_None_if_not_boot_disk(self): node = make_Node_with_uefi_boot_method() layout = BcacheStorageLayoutBase(node) self.assertIsNone(layout._find_best_cache_device()) def test__find_best_cache_device_returns_smallest_ssd_first(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) # Small SSD factory.make_PhysicalBlockDevice( node=node, size=5 * 1024 * 1024 * 1024, tags=['ssd']) # Smallest SSD smallest_ssd = factory.make_PhysicalBlockDevice( node=node, size=2 * 1024 * 1024 * 1024, tags=['ssd']) # Very small not SSD factory.make_PhysicalBlockDevice( node=node, size=1 * 1024 * 1024 * 1024, tags=['rotary']) layout = BcacheStorageLayoutBase(node) self.assertEquals(smallest_ssd, layout._find_best_cache_device()) def test__find_best_cache_device_returns_None_if_no_ssd(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) # Small Rotary factory.make_PhysicalBlockDevice( node=node, size=5 * 1024 * 1024 * 1024, tags=['rotary']) # Smallest Rotary factory.make_PhysicalBlockDevice( node=node, size=2 * 1024 * 1024 * 1024, tags=['rotary']) layout = BcacheStorageLayoutBase(node) self.assertIsNone(layout._find_best_cache_device()) def test_get_cache_device_returns_set_cache_device_over_find(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) # Small SSD small_ssd = factory.make_PhysicalBlockDevice( node=node, size=5 * 1024 * 1024 * 1024, tags=['ssd']) # Smallest SSD factory.make_PhysicalBlockDevice( node=node, size=2 * 1024 * 1024 * 1024, tags=['ssd']) layout = BcacheStorageLayoutBase(node) layout.cleaned_data = { 'cache_device': small_ssd.id, } self.assertEquals(small_ssd, layout.get_cache_device()) def test_get_cache_device_returns_the_best_cache_device_if_not_set(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) # Small SSD factory.make_PhysicalBlockDevice( node=node, size=5 * 1024 * 1024 * 1024, tags=['ssd']) # Smallest SSD smallest_ssd = factory.make_PhysicalBlockDevice( node=node, size=2 * 1024 * 1024 * 1024, tags=['ssd']) layout = BcacheStorageLayoutBase(node) layout.cleaned_data = {} self.assertEquals(smallest_ssd, layout.get_cache_device()) def test_get_cache_mode_returns_set_cache_mode(self): node = make_Node_with_uefi_boot_method() layout = BcacheStorageLayoutBase(node) cache_mode = factory.pick_enum(CACHE_MODE_TYPE) layout.cleaned_data = { 'cache_mode': cache_mode, } self.assertEquals(cache_mode, layout.get_cache_mode()) def test_get_cache_mode_returns_default_if_blank(self): node = make_Node_with_uefi_boot_method() layout = BcacheStorageLayoutBase(node) layout.cleaned_data = { 'cache_mode': '', } self.assertEquals(layout.DEFAULT_CACHE_MODE, layout.get_cache_mode()) def test_get_cache_size_returns_set_cache_size(self): node = make_Node_with_uefi_boot_method() layout = BcacheStorageLayoutBase(node) cache_size = random.randint( MIN_ROOT_PARTITION_SIZE, MIN_ROOT_PARTITION_SIZE * 2) layout.cleaned_data = { 'cache_size': cache_size, } self.assertEquals(cache_size, layout.get_cache_size()) def test_get_cache_size_returns_None_if_blank(self): node = make_Node_with_uefi_boot_method() layout = BcacheStorageLayoutBase(node) layout.cleaned_data = { 'cache_size': '', } self.assertIsNone(layout.get_cache_size()) def test_get_cache_no_part_returns_boolean(self): node = make_Node_with_uefi_boot_method() layout = BcacheStorageLayoutBase(node) cache_no_part = factory.pick_bool() layout.cleaned_data = { 'cache_no_part': cache_no_part, } self.assertEquals(cache_no_part, layout.get_cache_no_part()) def test_create_cache_set_setups_up_cache_device_with_partition(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) ssd = factory.make_PhysicalBlockDevice( node=node, size=5 * 1024 * 1024 * 1024, tags=['ssd']) layout = BcacheStorageLayoutBase(node) layout.cleaned_data = { 'cache_no_part': False, } cache_set = layout.create_cache_set() cache_device = cache_set.get_device() partition_table = ssd.get_partitiontable() self.assertIsNotNone(partition_table) partition = partition_table.partitions.order_by('id').all()[0] self.assertEquals(partition, cache_device) def test_create_cache_set_setups_up_cache_device_without_part(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) ssd = factory.make_PhysicalBlockDevice( node=node, size=5 * 1024 * 1024 * 1024, tags=['ssd']) layout = BcacheStorageLayoutBase(node) layout.cleaned_data = { 'cache_no_part': True, } cache_set = layout.create_cache_set() cache_device = cache_set.get_device() self.assertEquals(ssd, cache_device) def test_create_cache_set_setups_up_cache_device_with_cache_size(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) ssd = factory.make_PhysicalBlockDevice( node=node, size=5 * 1024 * 1024 * 1024, block_size=4096, tags=['ssd']) cache_size = round_size_to_nearest_block( random.randint( 3 * 1024 * 1024 * 1024, 5 * 1024 * 1024 * 1024), 4096) layout = BcacheStorageLayoutBase(node) layout.cleaned_data = { 'cache_size': cache_size, 'cache_no_part': False, } cache_set = layout.create_cache_set() cache_device = cache_set.get_device() partition_table = ssd.get_partitiontable() self.assertIsNotNone(partition_table) partition = partition_table.partitions.order_by('id').all()[0] self.assertEquals(partition, cache_device) self.assertEquals( round_size_to_nearest_block( cache_size, PARTITION_ALIGNMENT_SIZE, False), partition.size) def test_raises_error_when_invalid_cache_device(self): node = make_Node_with_uefi_boot_method() boot_disk = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) ssd = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE, tags=['ssd']) layout = BcacheStorageLayoutBase(node, { "cache_device": boot_disk.id, }) layout.setup_cache_device_field() self.assertFalse(layout.is_valid(), layout.errors) self.assertEquals({ "cache_device": [ "'%s' is not a valid cache_device. It should be one " "of: '%s'." % (boot_disk.id, ssd.id)], }, layout.errors) def test_raises_error_when_cache_size_and_cache_no_part_set(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = BcacheStorageLayoutBase(node, { "cache_size": MIN_ROOT_PARTITION_SIZE, "cache_no_part": True, }) layout.setup_cache_device_field() self.assertFalse(layout.is_valid(), layout.errors) self.assertEquals({ "cache_size": [ "Cannot use cache_size and cache_no_part at the same time."], "cache_no_part": [ "Cannot use cache_size and cache_no_part at the same time."], }, layout.errors) def test_raises_error_when_precentage_to_low_for_cache_size(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE, tags=['ssd']) layout = BcacheStorageLayoutBase(node, { 'cache_size': "0%", }) layout.setup_cache_device_field() error = self.assertRaises(StorageLayoutFieldsError, layout.configure) self.assertEquals({ "cache_size": [ "Size is too small. Minimum size is %s." % ( MIN_BLOCK_DEVICE_SIZE)], }, error.error_dict) def test_raises_error_when_value_to_low_for_cache_size(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE, tags=['ssd']) layout = BcacheStorageLayoutBase(node, { 'cache_size': MIN_BLOCK_DEVICE_SIZE - 1, }) layout.setup_cache_device_field() error = self.assertRaises(StorageLayoutFieldsError, layout.configure) self.assertEquals({ "cache_size": [ "Size is too small. Minimum size is %s." % ( MIN_BLOCK_DEVICE_SIZE)], }, error.error_dict) def test_raises_error_when_precentage_to_high_for_cache_size(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) ssd = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE, tags=['ssd']) layout = BcacheStorageLayoutBase(node, { 'cache_size': "101%", }) layout.setup_cache_device_field() error = self.assertRaises(StorageLayoutFieldsError, layout.configure) self.assertEquals({ "cache_size": [ "Size is too large. Maximum size is %s." % ssd.size], }, error.error_dict) def test_raises_error_when_value_to_high_for_cache_size(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) ssd = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE, tags=['ssd']) layout = BcacheStorageLayoutBase(node, { 'cache_size': ssd.size + 1, }) layout.setup_cache_device_field() error = self.assertRaises(StorageLayoutFieldsError, layout.configure) self.assertEquals({ "cache_size": [ "Size is too large. Maximum size is %s." % ssd.size], }, error.error_dict) class TestBcacheStorageLayout(MAASServerTestCase): def test__init_sets_up_cache_device_field(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = BcacheStorageLayout(node) self.assertIn('cache_device', layout.fields.keys()) def test__init_sets_up_all_fields(self): node = make_Node_with_uefi_boot_method() factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = BcacheStorageLayout(node) self.assertItemsEqual([ 'root_device', 'root_size', 'boot_size', 'cache_device', 'cache_mode', 'cache_size', 'cache_no_part', ], layout.fields.keys()) def test_configure_storage_creates_flat_layout_if_no_cache_device(self): node = make_Node_with_uefi_boot_method() boot_disk = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) layout = BcacheStorageLayout(node) layout.configure() partition_table = boot_disk.get_partitiontable() partitions = partition_table.partitions.order_by('id').all() root_partition = partitions[1] self.assertIsNotNone(root_partition) self.assertThat( root_partition.get_effective_filesystem(), MatchesStructure.byEquality( fstype=FILESYSTEM_TYPE.EXT4, label="root", mount_point="/", )) def test_configure_creates_boot_partition(self): node = make_Node_with_uefi_boot_method() boot_disk = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE, tags=['ssd']) layout = BcacheStorageLayout(node) layout.configure() partition_table = boot_disk.get_partitiontable() partitions = partition_table.partitions.order_by('id').all() boot_partition = partitions[1] self.assertEquals(1 * 1024 ** 3, boot_partition.size) self.assertThat( boot_partition.get_effective_filesystem(), MatchesStructure.byEquality( fstype=FILESYSTEM_TYPE.EXT4, label="boot", mount_point="/boot")) def test_configure_storage_creates_bcache_layout_with_ssd(self): node = make_Node_with_uefi_boot_method() boot_disk = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) ssd = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE, tags=['ssd']) layout = BcacheStorageLayout(node) layout.configure() partition_table = boot_disk.get_partitiontable() partitions = partition_table.partitions.order_by('id').all() root_partition = partitions[2] cache_partition_table = ssd.get_partitiontable() cache_partition = cache_partition_table.partitions.order_by( 'id').all()[0] self.assertEquals( FILESYSTEM_TYPE.BCACHE_BACKING, root_partition.get_effective_filesystem().fstype) self.assertEquals( FILESYSTEM_TYPE.BCACHE_CACHE, cache_partition.get_effective_filesystem().fstype) root_filesystem = root_partition.get_effective_filesystem() self.assertEquals( FILESYSTEM_GROUP_TYPE.BCACHE, root_filesystem.filesystem_group.group_type) cache_filesystem = cache_partition.get_effective_filesystem() self.assertEquals( root_filesystem.filesystem_group, cache_filesystem.cache_set.filesystemgroup_set.first()) bcache = root_partition.get_effective_filesystem().filesystem_group self.assertIsNotNone(bcache) self.assertThat( bcache.virtual_device.get_effective_filesystem(), MatchesStructure.byEquality( fstype=FILESYSTEM_TYPE.EXT4, label="root", mount_point="/", )) def test_configure_storage_creates_bcache_layout_without_partition(self): node = make_Node_with_uefi_boot_method() boot_disk = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) ssd = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE, tags=['ssd']) layout = BcacheStorageLayout(node, { "cache_no_part": True, }) layout.configure() partition_table = boot_disk.get_partitiontable() partitions = partition_table.partitions.order_by('id').all() root_partition = partitions[2] self.assertEquals( FILESYSTEM_TYPE.BCACHE_BACKING, root_partition.get_effective_filesystem().fstype) self.assertEquals( FILESYSTEM_TYPE.BCACHE_CACHE, ssd.get_effective_filesystem().fstype) root_filesystem = root_partition.get_effective_filesystem() self.assertEquals( FILESYSTEM_GROUP_TYPE.BCACHE, root_filesystem.filesystem_group.group_type) ssd_filesystem = ssd.get_effective_filesystem() self.assertEquals( root_partition.get_effective_filesystem().filesystem_group, ssd_filesystem.cache_set.filesystemgroup_set.first()) bcache = root_partition.get_effective_filesystem().filesystem_group self.assertThat( bcache.virtual_device.get_effective_filesystem(), MatchesStructure.byEquality( fstype=FILESYSTEM_TYPE.EXT4, label="root", mount_point="/", )) def test_configure_storage_creates_bcache_layout_with_cache_mode(self): node = make_Node_with_uefi_boot_method() boot_disk = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE) ssd = factory.make_PhysicalBlockDevice( node=node, size=LARGE_BLOCK_DEVICE, tags=['ssd']) cache_mode = factory.pick_enum(CACHE_MODE_TYPE) layout = BcacheStorageLayout(node, { "cache_no_part": True, "cache_mode": cache_mode, }) layout.configure() partition_table = boot_disk.get_partitiontable() partitions = partition_table.partitions.order_by('id').all() root_partition = partitions[2] self.assertEquals( FILESYSTEM_TYPE.BCACHE_BACKING, root_partition.get_effective_filesystem().fstype) self.assertEquals( FILESYSTEM_TYPE.BCACHE_CACHE, ssd.get_effective_filesystem().fstype) root_filesystem = root_partition.get_effective_filesystem() self.assertEquals( FILESYSTEM_GROUP_TYPE.BCACHE, root_filesystem.filesystem_group.group_type) ssd_filesystem = ssd.get_effective_filesystem() self.assertEquals( root_partition.get_effective_filesystem().filesystem_group, ssd_filesystem.cache_set.filesystemgroup_set.first()) bcache = root_partition.get_effective_filesystem().filesystem_group self.assertEquals(cache_mode, bcache.cache_mode) self.assertThat( bcache.virtual_device.get_effective_filesystem(), MatchesStructure.byEquality( fstype=FILESYSTEM_TYPE.EXT4, label="root", mount_point="/", )) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_third_party_drivers.py0000644000000000000000000000727113056115004024463 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.third_party_drivers`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import os from maasserver import third_party_drivers from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.third_party_drivers import ( DriversConfig, get_third_party_driver, match_aliases_to_driver, node_modaliases, populate_kernel_opts, ) from maastesting import root from metadataserver.enum import RESULT_TYPE from metadataserver.fields import Bin from metadataserver.models import ( commissioningscript, NodeResult, ) class TestNodeModaliases(MAASServerTestCase): def test_uses_commissioning_modaliases(self): test_data = b'hulla\nbaloo' node = factory.make_Node() NodeResult.objects.store_data( node, commissioningscript.LIST_MODALIASES_OUTPUT_NAME, 0, RESULT_TYPE.COMMISSIONING, Bin(test_data)) aliases = node_modaliases(node) self.assertEqual(['hulla', 'baloo'], aliases) def test_survives_no_commissioning_data(self): node = factory.make_Node() aliases = node_modaliases(node) self.assertEqual([], aliases) class TestMatchAliasesToDriver(MAASServerTestCase): def test_finds_first_match(self): drivers = [ {'modaliases': ['foo*'], 'comment': 'first'}, {'modaliases': ['foo*'], 'comment': 'notfirst'}, ] aliases = ['foobar'] driver = match_aliases_to_driver(aliases, drivers) self.assertEqual(drivers[0], driver) def test_finds_no_match(self): drivers = [{'modaliases': ['foo*']}] aliases = ['bar'] driver = match_aliases_to_driver(aliases, drivers) self.assertIsNone(driver) class TestPopulateKernelOpts(MAASServerTestCase): def test_blacklist_provided(self): driver = {'blacklist': 'bad'} driver = populate_kernel_opts(driver) self.assertEqual('modprobe.blacklist=bad', driver['kernel_opts']) def test_no_blacklist_provided(self): driver = {} driver = populate_kernel_opts(driver) self.assertNotIn('kernel_opts', driver) class TestGetThirdPartyCode(MAASServerTestCase): def test_finds_match(self): node = factory.make_Node() mock = self.patch(third_party_drivers, 'match_aliases_to_driver') base_driver = dict(comment='hooray') mock.return_value = base_driver driver = get_third_party_driver(node) self.assertEqual(base_driver, driver) # ensure driver is a copy, not the original base_driver['comment'] = 'boo' self.assertEqual('hooray', driver['comment']) def test_finds_no_match(self): node = factory.make_Node() mock = self.patch(third_party_drivers, 'match_aliases_to_driver') mock.return_value = None driver = get_third_party_driver(node) self.assertEqual({}, driver) class TestDriversConfig(MAASServerTestCase): def test_get_defaults_returns_empty_drivers_list(self): observed = DriversConfig.get_defaults() self.assertEqual({'drivers': []}, observed) def test_load_from_yaml(self): filename = os.path.join(root, "etc", "maas", "drivers.yaml") for entry in DriversConfig.load(filename)['drivers']: self.assertItemsEqual( ['blacklist', 'comment', 'key_binary', 'modaliases', 'module', 'repository', 'package'], entry) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_triggers.py0000644000000000000000000001514713056115004022223 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). from __future__ import ( absolute_import, print_function, unicode_literals, ) """Tests for `maasserver.triggers`.""" str = None __metaclass__ = type __all__ = [] from contextlib import closing from django.db import connection from maasserver.testing.testcase import MAASServerTestCase from maasserver.triggers import ( register_all_triggers, register_procedure, register_trigger, render_notification_procedure, ) from maasserver.utils.orm import psql_array class TestTriggers(MAASServerTestCase): def test_register_trigger_doesnt_create_trigger_if_already_exists(self): NODE_CREATE_PROCEDURE = render_notification_procedure( 'node_create_notify', 'node_create', 'NEW.system_id') register_procedure(NODE_CREATE_PROCEDURE) with closing(connection.cursor()) as cursor: cursor.execute( "DROP TRIGGER IF EXISTS maasserver_node_node_create_notify ON " "maasserver_node;" "CREATE TRIGGER maasserver_node_node_create_notify " "AFTER INSERT ON maasserver_node " "FOR EACH ROW EXECUTE PROCEDURE node_create_notify();") # Will raise an OperationError if trigger already exists. register_trigger("maasserver_node", "node_create_notify", "insert") def test_register_trigger_creates_missing_trigger(self): NODE_CREATE_PROCEDURE = render_notification_procedure( 'node_create_notify', 'node_create', 'NEW.system_id') register_procedure(NODE_CREATE_PROCEDURE) register_trigger("maasserver_node", "node_create_notify", "insert") with closing(connection.cursor()) as cursor: cursor.execute( "SELECT * FROM pg_trigger WHERE " "tgname = 'maasserver_node_node_create_notify'") triggers = cursor.fetchall() self.assertEquals(1, len(triggers), "Trigger was not created.") def test_register_all_triggers(self): register_all_triggers() triggers = [ "maasserver_node_node_create_notify", "maasserver_node_node_update_notify", "maasserver_node_node_delete_notify", "maasserver_node_device_create_notify", "maasserver_node_device_update_notify", "maasserver_node_device_delete_notify", "maasserver_nodegroup_nodegroup_create_notify", "maasserver_nodegroup_nodegroup_update_notify", "maasserver_nodegroup_nodegroup_delete_notify", "maasserver_zone_zone_create_notify", "maasserver_zone_zone_update_notify", "maasserver_zone_zone_delete_notify", "maasserver_tag_tag_create_notify", "maasserver_tag_tag_update_notify", "maasserver_tag_tag_delete_notify", "maasserver_node_tags_node_device_tag_link_notify", "maasserver_node_tags_node_device_tag_unlink_notify", "maasserver_tag_tag_update_node_device_notify", "auth_user_user_create_notify", "auth_user_user_update_notify", "auth_user_user_delete_notify", "maasserver_event_event_create_notify", "maasserver_event_event_update_notify", "maasserver_event_event_delete_notify", "maasserver_event_event_create_node_device_notify", "maasserver_nodegroupinterface_nodegroupinterface_create_notify", "maasserver_nodegroupinterface_nodegroupinterface_update_notify", "maasserver_nodegroupinterface_nodegroupinterface_delete_notify", "maasserver_subnet_subnet_update_nodegroup_notify", "maasserver_interface_ip_addresses_nd_sipaddress_link_notify", "maasserver_interface_ip_addresses_nd_sipaddress_unlink_notify", "metadataserver_noderesult_nd_noderesult_link_notify", "metadataserver_noderesult_nd_noderesult_unlink_notify", "maasserver_interface_nd_interface_link_notify", "maasserver_interface_nd_interface_unlink_notify", "maasserver_interface_nd_interface_update_notify", "maasserver_blockdevice_nd_blockdevice_link_notify", "maasserver_blockdevice_nd_blockdevice_unlink_notify", "maasserver_physicalblockdevice_nd_physblockdevice_update_notify", "maasserver_virtualblockdevice_nd_virtblockdevice_update_notify", "maasserver_sshkey_user_sshkey_link_notify", "maasserver_sshkey_user_sshkey_unlink_notify", "maasserver_sslkey_user_sslkey_link_notify", "maasserver_sslkey_user_sslkey_unlink_notify", "maasserver_fabric_fabric_create_notify", "maasserver_fabric_fabric_update_notify", "maasserver_fabric_fabric_delete_notify", "maasserver_vlan_vlan_create_notify", "maasserver_vlan_vlan_update_notify", "maasserver_vlan_vlan_delete_notify", "maasserver_subnet_subnet_create_notify", "maasserver_subnet_subnet_update_notify", "maasserver_subnet_subnet_delete_notify", "maasserver_space_space_create_notify", "maasserver_space_space_update_notify", "maasserver_space_space_delete_notify", "maasserver_subnet_subnet_node_update_notify", "maasserver_fabric_fabric_node_update_notify", "maasserver_space_space_node_update_notify", "maasserver_vlan_vlan_node_update_notify", "maasserver_staticipaddress_ipaddress_node_update_notify", "maasserver_staticipaddress_ipaddress_subnet_update_notify", ] sql, args = psql_array(triggers, sql_type="text") with closing(connection.cursor()) as cursor: cursor.execute( "SELECT tgname::text FROM pg_trigger WHERE " "tgname::text = ANY(%s) " "OR tgname::text SIMILAR TO 'maasserver.*'" % sql, args) db_triggers = cursor.fetchall() # Note: if this test fails, a trigger may have been added, but not # added to the list of expected triggers. triggers_found = [trigger[0] for trigger in db_triggers] self.assertEquals( len(triggers), len(db_triggers), "Missing %s triggers in the database. Triggers found: %s" % ( len(triggers) - len(db_triggers), triggers_found)) self.assertItemsEqual( triggers, triggers_found, "Missing triggers in the database. Triggers found: %s" % ( triggers_found)) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_webapp.py0000644000000000000000000002131513056115004021645 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). # TODO: Description here. """...""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random from textwrap import dedent from django.core.handlers.wsgi import WSGIHandler from lxml import html from maasserver import ( eventloop, start_up, webapp, ) from maasserver.websockets.protocol import WebSocketFactory from maastesting.factory import factory from maastesting.matchers import ( DocTestMatches, MockCalledOnceWith, ) from maastesting.testcase import MAASTestCase from maastesting.twisted import TwistedLoggerFixture from mock import sentinel from testtools.matchers import ( ContainsDict, Equals, Is, IsInstance, MatchesStructure, ) from twisted.internet import ( defer, reactor, ) from twisted.internet.endpoints import TCP4ServerEndpoint from twisted.web.resource import Resource from twisted.web.server import Site from twisted.web.test.requesthelper import DummyRequest class TestCleanPathRequest(MAASTestCase): def test_requestReceived_converts_extra_slashes_to_single(self): mock_super_requestReceived = self.patch( webapp.Request, "requestReceived") request = webapp.CleanPathRequest(sentinel.channel, sentinel.queued) path_pieces = [ factory.make_name("path") for _ in range(3) ] double_path = ("/" * random.randint(2, 8)).join(path_pieces) single_path = "/".join(path_pieces) request.requestReceived( sentinel.command, double_path, sentinel.version) self.assertThat( mock_super_requestReceived, MockCalledOnceWith( sentinel.command, single_path, sentinel.version)) def test_requestReceived_converts_extra_slashes_ignores_args(self): mock_super_requestReceived = self.patch( webapp.Request, "requestReceived") request = webapp.CleanPathRequest(sentinel.channel, sentinel.queued) path_pieces = [ factory.make_name("path") for _ in range(3) ] args = "?op=extra//data" double_path = ("/" * random.randint(2, 8)).join(path_pieces) + args single_path = "/".join(path_pieces) + args request.requestReceived( sentinel.command, double_path, sentinel.version) self.assertThat( mock_super_requestReceived, MockCalledOnceWith( sentinel.command, single_path, sentinel.version)) class TestResourceOverlay(MAASTestCase): def make_resourceoverlay(self): return webapp.ResourceOverlay(Resource()) def test__init__(self): resource = self.make_resourceoverlay() self.assertThat(resource, IsInstance(Resource)) def test_getChild(self): resource = self.make_resourceoverlay() self.assertThat(resource, IsInstance(webapp.ResourceOverlay)) self.assertThat(resource.basis, IsInstance(Resource)) class TestWebApplicationService(MAASTestCase): def make_endpoint(self): return TCP4ServerEndpoint(reactor, 0, interface="localhost") def make_webapp(self): service_endpoint = self.make_endpoint() service = webapp.WebApplicationService(service_endpoint) # Patch the getServiceNamed so the WebSocketFactory does not # error trying to register for events from the RPC service. In this # test the RPC service is not started. self.patch(eventloop.services, "getServiceNamed") return service def test__init_creates_site(self): service = self.make_webapp() self.assertThat(service.site, IsInstance(Site)) self.assertThat( service.site.requestFactory, Is(webapp.CleanPathRequest)) self.assertThat(service.websocket, IsInstance(WebSocketFactory)) def test__default_site_renders_starting_page(self): service = self.make_webapp() request = DummyRequest(b"any/where".split("/")) resource = service.site.getResourceFor(request) content = resource.render(request) page = html.fromstring(content) self.expectThat( page.find(".//title").text_content(), Equals("503 - MAAS is starting")) self.expectThat( page.find(".//h1").text_content(), Equals("MAAS is starting")) self.expectThat( page.find(".//p").text_content(), Equals("Please try again in a few seconds.")) self.expectThat( request.outgoingHeaders, ContainsDict({"retry-after": Equals("5")})) def test__startService_starts_websocket_and_application(self): service = self.make_webapp() self.addCleanup(service.stopService) # start_up() isn't safe to call right now, but we only really care # that it is called. self.patch_autospec(start_up, "start_up") start_up.start_up.return_value = defer.succeed(None) service.startService() self.assertTrue(service.running) self.assertThat(start_up.start_up, MockCalledOnceWith()) self.assertTrue(service.websocket.listener.connected()) def test__error_when_starting_is_logged(self): service = self.make_webapp() self.addCleanup(service.stopService) start_up_error = factory.make_exception() self.patch_autospec(start_up, "start_up") start_up.start_up.return_value = defer.fail(start_up_error) # The failure is logged. with TwistedLoggerFixture() as logger: service.startService() self.assertDocTestMatches( dedent("""\ Site starting on ... --- MAAS web application failed to start Traceback (most recent call last): ... maastesting.factory.TestException#... """), logger.output) def test__error_when_starting_changes_page_to_error(self): service = self.make_webapp() self.addCleanup(service.stopService) # start_up() isn't safe to call right now, but we only really care # that it is called. start_up_error = factory.make_exception() self.patch_autospec(start_up, "start_up") start_up.start_up.return_value = defer.fail(start_up_error) # No error is returned. service.startService() # The site's page (for any path) shows the error. request = DummyRequest(b"any/where".split("/")) resource = service.site.getResourceFor(request) content = resource.render(request) page = html.fromstring(content) self.expectThat( page.find(".//title").text_content(), Equals("503 - MAAS failed to start")) self.expectThat( page.find(".//h1").text_content(), Equals("MAAS failed to start")) self.assertDocTestMatches( dedent("""\ Traceback (most recent call last): ... maastesting.factory.TestException#... """), page.find(".//pre").text_content()) def test__successful_start_installs_wsgi_resource(self): service = self.make_webapp() self.addCleanup(service.stopService) self.patch_autospec(start_up, "start_up") start_up.start_up.return_value = defer.succeed(None) service.startService() resource = service.site.resource self.assertThat(resource, IsInstance(Resource)) overlay_resource = resource.getChildWithDefault("MAAS", request=None) self.assertThat(overlay_resource, IsInstance(webapp.ResourceOverlay)) self.assertThat(overlay_resource.basis, MatchesStructure( _reactor=Is(reactor), _threadpool=Is(service.threadpool), _application=IsInstance(WSGIHandler))) def test__stopService_stops_the_service_and_the_websocket(self): service = self.make_webapp() self.patch_autospec(start_up, "start_up") start_up.start_up.return_value = defer.succeed(None) with TwistedLoggerFixture() as logger: service.startService() self.expectThat( logger.output, DocTestMatches("""\ Site starting on ... --- Listening for database notifications. """)) with TwistedLoggerFixture() as logger: service.stopService() self.expectThat( logger.output, DocTestMatches("""\ (TCP Port ... Closed) --- Connection closed. """)) self.assertFalse(service.running) self.assertFalse(service.websocket.listener.connected()) maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_worker_user.py0000644000000000000000000000251513056115004022737 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test for the system user that represents node-group workers.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from django.contrib.auth.models import User from maasserver.models import UserProfile from maasserver.models.user import SYSTEM_USERS from maasserver.testing.testcase import MAASServerTestCase from maasserver.worker_user import ( get_worker_user, user_name, ) from testtools import ExpectedException class TestNodeGroupUser(MAASServerTestCase): """Test the special "user" that workers use to access the API.""" def test_get_worker_user_always_returns_same_user(self): self.assertEqual(get_worker_user().id, get_worker_user().id) def test_get_worker_user_holds_the_worker_user(self): worker_user = get_worker_user() self.assertIsInstance(worker_user, User) self.assertEqual(user_name, worker_user.username) def test_worker_user_is_system_user(self): worker_user = get_worker_user() self.assertIn(worker_user.username, SYSTEM_USERS) with ExpectedException(UserProfile.DoesNotExist): worker_user.userprofile maas-1.9.5+bzr4599.orig/src/maasserver/tests/test_x509.py0000644000000000000000000001650213056115004021076 0ustar 00000000000000# Copyright 2014-2015 Cloudbase Solutions SRL. # Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.x509`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import os from maasserver import x509 from maasserver.x509 import ( WinRMX509, WinRMX509Error, ) from maastesting.factory import factory from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import MAASTestCase import OpenSSL from testtools.matchers import ( FileContains, FileExists, ) class TestWinRMX509(MAASTestCase): def configure_WinRMX509(self): cert_name = factory.make_name('cert_name') upn_name = factory.make_name('upn_name') cert_dir = self.make_dir() winrmx509 = WinRMX509( cert_name=cert_name, upn_name=upn_name, cert_dir=cert_dir) return winrmx509 def make_certificate(self): winrmx509 = self.configure_WinRMX509() _, cert = winrmx509.get_key_and_cert() winrmx509.write_cert(cert) return cert, winrmx509 def dump_certificate(self, cert): return OpenSSL.crypto.dump_certificate( OpenSSL.crypto.FILETYPE_PEM, cert) def make_privatekey(self): winrmx509 = self.configure_WinRMX509() key, _ = winrmx509.get_key_and_cert() winrmx509.write_privatekey(key) return key, winrmx509 def dump_privatekey(self, key): return OpenSSL.crypto.dump_privatekey( OpenSSL.crypto.FILETYPE_PEM, key) def make_cert_and_privatekey(self): winrmx509 = self.configure_WinRMX509() key, cert = winrmx509.get_key_and_cert() winrmx509.write_cert(cert) winrmx509.write_privatekey(key) return key, cert, winrmx509 def test_create_cert_raises_error_on_file_already_exists(self): cert, winrmx509 = self.make_certificate() self.assertRaises(WinRMX509Error, winrmx509.create_cert) def test_create_cert_writes_cert(self): winrmx509 = self.configure_WinRMX509() winrmx509.create_cert() self.assertThat(winrmx509.pem_file, FileExists()) def test_create_cert_writes_privatekey(self): winrmx509 = self.configure_WinRMX509() winrmx509.create_cert() self.assertThat(winrmx509.key_file, FileExists()) def test_create_cert_exports_p12(self): winrmx509 = self.configure_WinRMX509() winrmx509.create_cert() self.assertThat(winrmx509.pfx_file, FileExists()) def test_create_cert_raises_error_on_export_p12_error(self): winrmx509 = self.configure_WinRMX509() self.patch(winrmx509, 'export_p12').side_effect = OpenSSL.crypto.Error self.assertRaises(WinRMX509Error, winrmx509.create_cert) def test_create_cert_calls_print_cert_details(self): winrmx509 = self.configure_WinRMX509() mock_print = self.patch(winrmx509, 'print_cert_details') winrmx509.create_cert(print_cert=True) self.assertThat(mock_print, MockCalledOnceWith(winrmx509.pem_file)) def test_get_key_and_cert_returns_rsa_key(self): winrmx509 = self.configure_WinRMX509() key, _ = winrmx509.get_key_and_cert() self.assertEqual(OpenSSL.crypto.TYPE_RSA, key.type()) def test_get_key_and_cert_returns_key_of_correct_size(self): winrmx509 = self.configure_WinRMX509() key, _ = winrmx509.get_key_and_cert() self.assertEqual(winrmx509.KEY_SIZE, key.bits()) def test_get_key_and_cert_returns_cert_with_upn_name(self): winrmx509 = self.configure_WinRMX509() _, cert = winrmx509.get_key_and_cert() self.assertEqual(winrmx509.upn_name, cert.get_subject().CN) def test_get_key_and_cert_returns_cert_with_valid_serial_number(self): winrmx509 = self.configure_WinRMX509() _, cert = winrmx509.get_key_and_cert() self.assertEqual(1000, cert.get_serial_number()) def test_get_key_and_cert_returns_cert_with_extensions(self): winrmx509 = self.configure_WinRMX509() _, cert = winrmx509.get_key_and_cert() self.assertEqual(2, cert.get_extension_count()) self.assertEqual( 'subjectAltName', cert.get_extension(0).get_short_name()) self.assertEqual( 'extendedKeyUsage', cert.get_extension(1).get_short_name()) def test_get_key_and_cert_returns_cert_with_issuer_set_from_subject(self): winrmx509 = self.configure_WinRMX509() _, cert = winrmx509.get_key_and_cert() self.assertEqual(cert.get_subject(), cert.get_issuer()) def test_get_cert_details(self): cert, winrmx509 = self.make_certificate() self.assertItemsEqual({ 'subject': cert.get_subject().CN, 'thumbprint': cert.digest(b'SHA1'), 'contents': self.dump_certificate(cert), }, winrmx509.get_cert_details(winrmx509.pem_file)) def test_write_privatekey(self): key, winrmx509 = self.make_privatekey() self.assertThat( winrmx509.key_file, FileContains(self.dump_privatekey(key))) def test_write_cert(self): cert, winrmx509 = self.make_certificate() self.assertThat( winrmx509.pem_file, FileContains(self.dump_certificate(cert))) def test_load_pem_file_returns_cert_and_contents(self): cert, winrmx509 = self.make_certificate() loaded_cert, contents = winrmx509.load_pem_file(winrmx509.pem_file) self.assertEqual(self.dump_certificate(cert), contents) self.assertEqual( self.dump_certificate(cert), self.dump_certificate(loaded_cert)) def test_load_pem_file_raises_error_on_invalid_cert(self): winrmx509 = self.configure_WinRMX509() self.patch( x509, 'read_text_file').return_value = factory.make_string() self.assertRaises(WinRMX509Error, winrmx509.load_pem_file, 'file') def test_export_p12(self): key, cert, winrmx509 = self.make_cert_and_privatekey() passphrase = factory.make_name('password') winrmx509.export_p12(key, cert, passphrase) with open(winrmx509.pfx_file, 'rb') as stream: p12_contents = stream.read() p12 = OpenSSL.crypto.load_pkcs12( p12_contents, bytes(passphrase.encode("utf-8"))) self.assertEqual( self.dump_certificate(cert), self.dump_certificate(p12.get_certificate())) self.assertEqual( self.dump_privatekey(key), self.dump_privatekey(p12.get_privatekey())) def test_get_ssl_dir_calls_ensure_dir(self): winrmx509 = self.configure_WinRMX509() mock_ensure_dir = self.patch(x509, 'ensure_dir') fake_dir = factory.make_name('dir') winrmx509.get_ssl_dir(fake_dir) self.assertThat(mock_ensure_dir, MockCalledOnceWith(fake_dir)) def test_get_ssl_dir_returns_home_ssl_dir(self): winrmx509 = self.configure_WinRMX509() self.patch(x509, 'ensure_dir') self.assertEqual( os.path.join(os.path.expanduser("~"), '.ssl'), winrmx509.get_ssl_dir()) def test_generate_passphrase(self): winrmx509 = self.configure_WinRMX509() self.assertEqual( winrmx509.PASSPHRASE_LENGTH, len(winrmx509.generate_passphrase())) maas-1.9.5+bzr4599.orig/src/maasserver/tests/data/test_dsa0000644000000000000000000000124013056115004021413 0ustar 00000000000000-----BEGIN DSA PRIVATE KEY----- MIIBvAIBAAKBgQC5fDwjGkmt6QghiWia+JB9ED5a4Mhty5Gvv8uMcdXTriYabC3t lffgil+0s9cqYQrNuiqjBXv3m2BgFA4U3VZROP04KQD+Bok43TtRRl02HYEMiaX6 edG01FtMyiD/3cB7OXQi5+F2AMV80rLzkRlUBmzOj+r4d5Ynk5HlRExmQwIVAIBo EpSY+9GojftVGPU8T0HaueR9AoGBAKVmDBdCFPgWFLRbsEUVBPcBBsQxI07bqFmA ljC+2Z/nBTryvgtMfGCLUN8SIiN2v9AwqAzyGT7yJlwzK7NM1Lp3oJ3UvYOydIAh xWPXXfq4GEoVB8AiPocvw/Q6dbpDZxg9G298ebxKEiusIayVgTOorO01uEeX78/8 Q4a7SHMYAoGBAJbZsmuuWN2kb7lD27IzKcOgd07esoHPWZnv4qg7xhS1GdVr485v 73OW1rfpWU6PdohckXLg9ZaoWtVTwNKTfHxS3iug9/pseBWTHdpmxCM5ClsZJii6 T4frR5NTOCGKLxOamTs///OXopZr5u3vT20NFlzFE95J86tGtxYPPivxAhQByRHQ RXk6Jpjwa5kX+bYX1J3FIg== -----END DSA PRIVATE KEY----- maas-1.9.5+bzr4599.orig/src/maasserver/tests/data/test_dsa.pub0000644000000000000000000000114013056115004022177 0ustar 00000000000000ssh-dss AAAAB3NzaC1kc3MAAACBALl8PCMaSa3pCCGJaJr4kH0QPlrgyG3Lka+/y4xx1dOuJhpsLe2V9+CKX7Sz1yphCs26KqMFe/ebYGAUDhTdVlE4/TgpAP4GiTjdO1FGXTYdgQyJpfp50bTUW0zKIP/dwHs5dCLn4XYAxXzSsvORGVQGbM6P6vh3lieTkeVETGZDAAAAFQCAaBKUmPvRqI37VRj1PE9B2rnkfQAAAIEApWYMF0IU+BYUtFuwRRUE9wEGxDEjTtuoWYCWML7Zn+cFOvK+C0x8YItQ3xIiI3a/0DCoDPIZPvImXDMrs0zUunegndS9g7J0gCHFY9dd+rgYShUHwCI+hy/D9Dp1ukNnGD0bb3x5vEoSK6whrJWBM6is7TW4R5fvz/xDhrtIcxgAAACBAJbZsmuuWN2kb7lD27IzKcOgd07esoHPWZnv4qg7xhS1GdVr485v73OW1rfpWU6PdohckXLg9ZaoWtVTwNKTfHxS3iug9/pseBWTHdpmxCM5ClsZJii6T4frR5NTOCGKLxOamTs///OXopZr5u3vT20NFlzFE95J86tGtxYPPivx ubuntu@server-7476 maas-1.9.5+bzr4599.orig/src/maasserver/tests/data/test_rsa0000644000000000000000000000321713056115004021437 0ustar 00000000000000-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA3a88w2TcMjFQbwU0+pAZ63z2b/wFG5MY+xuQmjE3cOsZCrYV dthJ7Q95cWkMUSs0E3wnNnfwgtSuLrIQnEbNUukkNsydow6tVJjlJAzILzYeCVYS WEinOprvIt7O6BkpgWnaOcL7ZaUszK7HDqX/EHv8Jk+Xx9digfYqSaqBTUDFW1XF WqxP7+sKZ9xmQutmEW+Yms/fzPrfdkIbudFtrvin6LqrSgdpcCAwymTMMN/BMbXT KLodhWacWqIeFHsuW0ad7TNyYJ1nF+n8lDqdoMSUTyJQ/a62YV+SJBJGTPDEG3T0 0C4CH/fKBaYeURusNbdNgbyPZySjOypChHwR7QIDAQABAoIBAE5/DH8LqcTEHX0S VO4cNHFkMEb68DwRXBkea5eNsdn0BUv7qaIJeDPO9OupjMj5CVmU7rWkxq8s6/hw 6NzNXUrsbvxQe8kPG2UHNqwLMp81BHG93oUQRNbFocOxLYaV0lKWzsUBO8+EK1bW 1HllYenOXTybll0W8TSfm9212E8n5YFYoJDFH9nEDiUqDcYLOKJc5njceZUr0KQ2 eKTpaNEtS78crNAwvdu4UzfC/I4ezfoWreXao0epr3anfTXXYf2TrdthgG7FclNP aOrCsQFXMrqVIc/FVXmbPKfhZOnOGglKVEUr0OmG3mfKan60636bGoiJJ2D6snKw YCAk8mkCgYEA8TcYZAG7gsi/1yTDrIfcrm5zGGBQ371vNOsTOVXGD28mepwUUl3B SucD7dAXlAyN/ikHly/wX/tVw3tF7N8nJx1wJ+KwGYgTVCgRBXexjiwz6NdyMbQX F7kb6LQjL6P40ygYTjVuFgee2KXd11NuT5pW1wleF3cY3tKWx0TWgEcCgYEA60Wv qz5yMg9+9A85Qis3VAw2DgJ3j12RYfSLhL0zkgZ6M1caZqMBuP7Q3HLlrEVJ+PJY aIiidIz9VLARa9wetAfJUVDsi/aijVXknLdm18Rujymsf97Wa4xj+006VmjdEeXV RoZGJ3l+j7yo2yfHxO1hHLLiXRXRQwOmUIGgSisCgYEAzYoI+o6PXS36ajUll0pd vTTYVhkcUMp2jD0TMHPqRRSNUUTV/Clvn4eiTW5X6QuZos0LbsSmquLbfar5NpIg JrBq9VGwhNDyx28sseAAKAl6YhnTcI7oboqJQYzdvqaWTDeKHnpgx9zOegU8N1Mc WDBHdwzAZHZTduszF7GMpdkCgYAQ7SuNS2nV1i2RC4NYElnhrxs4eM73PokWHgzn mOEb8WFbTjn1Bmc6UwLdyVpiwX1n7q+TnbjqX7ZeIGiwdN60nxbJxeOu0iixuGtB JyS8A0LdA+eIL5UHmcsbqlu3GcZF4l4su75SWrhTSQRw9/S0Y0uoT+pfPhGXG60c f6bzjwKBgQDe+l7pa+qkE0a8B11CgqKzmHOa/PRool7+0//WA5/H6jx0iAH5Ms2f 8bpVbWugyXOFXkFN+DOfzJgar5EQmAtpipx6OKx1DXQqabmUlm75HOkr1dxOshou w39I3JOc1yHjlPJHjVyJqJcVOHYAFbEu5w+Sk0YELajvgU7FkfkonA== -----END RSA PRIVATE KEY----- maas-1.9.5+bzr4599.orig/src/maasserver/tests/data/test_rsa0.pub0000644000000000000000000000062213056115004022301 0ustar 00000000000000ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDdrzzDZNwyMVBvBTT6kBnrfPZv/AUbkxj7G5CaMTdw6xkKthV22EntD3lxaQxRKzQTfCc2d/CC1K4ushCcRs1S6SQ2zJ2jDq1UmOUkDMgvNh4JVhJYSKc6mu8i3s7oGSmBado5wvtlpSzMrscOpf8Qe/wmT5fH12KB9ipJqoFNQMVbVcVarE/v6wpn3GZC62YRb5iaz9/M+t92Qhu50W2u+KfouqtKB2lwIDDKZMww38ExtdMouh2FZpxaoh4Uey5bRp3tM3JgnWcX6fyUOp2gxJRPIlD9rrZhX5IkEkZM8MQbdPTQLgIf98oFph5RG6w1t02BvI9nJKM7KkKEfBHt ubuntu@test_rsa0.pub maas-1.9.5+bzr4599.orig/src/maasserver/tests/data/test_rsa1.pub0000644000000000000000000000062213056115004022302 0ustar 00000000000000ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6Gkj1y8/0T7q/FqBSr9xRBO9GzT+JeoWNXaqhUBg179Zd53XM4qblVwz/rsMa70te8CYNIFU+GbcNY1tNCo78NlHjQA8H98COnbVWKxvABECHrJ8nbYB4lWH9wI8/uvR0um6yUb/tZYbiSqnQxhoGAF/uQQfhqzc+tc7uTjnsa6krrNqQCdpFbAVVy+vZzvcJl6CX8nu5uJ8jedWfXOZJFcQPH+VwkUT0oV+1zVeLpE4LFkRO52JrC9Dy1xgrYM0EhcrShBdD1GQx9IXdW4Z9PIaVcq/y4Qv574yHMvi+6hwG6xpCtRXmy0lG0LiG60c1yOredkO6U0MJIVbeZ/+r ubuntu@test_rsa1.pub maas-1.9.5+bzr4599.orig/src/maasserver/tests/data/test_rsa2.pub0000644000000000000000000000062213056115004022303 0ustar 00000000000000ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKVdMk4Q+13uUvXjb6iU+oB2Auk0HpaILZ8Pw/V63PTJ+QXtEp0vTe6DEvr9uF2vl6tF+AosiG4krEwqBNGx/h8MmFO7BgNTxn9eU2VwfHzmQ2nqkXHsXgp66cNT0Yd0nfvVV/fsMpKN9fUaYrXjAlFxvC9iQ33Rp6vj/X+oqDvYf3xZjbuZy+BxdJnmiTAJcFouTyrdy1Em1EZITq5M4EXw93/O2vAPYSFPAeELBE+mIMJxOCY1Fm101oAqO0qof3Rb2hZxc2WINjmqZIxoi+sviU0ny/dIFknhYEg1Xh2hObPn0nN5+4VHjBTdRmpRXqggotc53sYC5udVmFsW8B ubuntu@test_rsa2.pub maas-1.9.5+bzr4599.orig/src/maasserver/tests/data/test_rsa3.pub0000644000000000000000000000062213056115004022304 0ustar 00000000000000ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDai2ir5yxckoYTHUbFL6pe01Kx+Dy6nw9p7LhFaBixUOh8G7eIgFBguYcir2ZKBfM/lbTnW+MSiGF2VMlXX0+X9Ux2iwPSJa2wIA7Cc5prCz/RnMRKQ+2S1JJuORoi8tDI0p1R0sGWMXCwaj30oRN0THWz884+d3YlDD/O39h74gnLNEx/TQig/r/Aev3VfeKO6dlbbX81vSad2JVncislyMq1TgJdhn2/JI8t+LW0xVc6ZgQr94YB2M2DNjFSisP2vDrV5LWM+IqiF8T/YHkcSsANr8WWvZWa79uHyRBU3xr2qZZqMjMVL0B/NOJYXyGBIJ7HQnlVLmqFenKl8ZtL ubuntu@test_rsa3.pub maas-1.9.5+bzr4599.orig/src/maasserver/tests/data/test_rsa4.pub0000644000000000000000000000062213056115004022305 0ustar 00000000000000ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDNDA4vXVTxHuKikIXeA6/K/X7hKpJcOJV0HcXUHlSNa9phNW0f8vbci+BxcLAqIz/U+BPiQ9lCxz7so+qCTFrM4poOdkTyup8VUxUqntiaxgiCJZ1of+eMe39+S9XQk6RogiCpExanhD9xPLkK/mLr5phnQwDjEDJwD4OOF0rYsbYoqje/0Pd+Tm0PIepq/qwsu5PAKPJU8dfnp8BWLCuIJ+DA2lfRUjmxWwLczfM/4hu1bZlYp1mzJJgMIOY92/pUToYxvBiIiKs3qWh6HC5Vxo5Vz4w5WLnTnIPDvpYBvWj8LGXJwHuhqlzed2icwPk8krip2BzwsHotru3UXtKf ubuntu@test_rsa4.pub maas-1.9.5+bzr4599.orig/src/maasserver/tests/data/test_x509_0.pem0000644000000000000000000000206613056115004022357 0ustar 00000000000000-----BEGIN CERTIFICATE----- MIIC7zCCAdegAwIBAgIJAIe2LoljdfboMA0GCSqGSIb3DQEBCwUAMBAxDjAMBgNV BAMTBWJsYWtlMB4XDTE0MDQyNTE4MzIzNloXDTI0MDQyMjE4MzIzNlowEDEOMAwG A1UEAxMFYmxha2UwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9B9r6 4tFkbzu71iq9NYAr0u7BkybolU7V86Zpvg59MPzjW7zAf17N8hKRGfMYWJbgp3Wf NNbq1Bys+4BBDQn+nRkr1H8bAuzz3+HjhX/InjGyBm51MGaqtO4nJk/PX9s4qurX BTWhg/X/S0u7P+oqoUY3idLxAOKuZGrMIzUhikxew35QcDY3aiLjtrKN6R1SM+8S PQR0aOoGdMpXkAOA/zPEU6qYCaXfI56/TrxEBFohffTuTVZxOLv3OoN/2NkwxYCN sSLV/gAeX3Xi49K9++cDcWPK+I1t3uQ1psfWIjQMhwyUkOO7NHMOEtNCWbTvr/4w q8tTMAFQVCjJ9nxNAgMBAAGjTDBKMBMGA1UdJQQMMAoGCCsGAQUFBwMCMDMGA1Ud EQQsMCqgKAYKKwYBBAGCNxQCA6AaDBhibGFrZUBibGFrZS11Ym50LWRlc2t0b3Aw DQYJKoZIhvcNAQELBQADggEBABMLAl8asMyyMKFKMov9+mNkNvXB2KroJbjGNTxd b/ZFrwunYx+TAiYLy0/mnrJTHAl49+Xbr+kYsHSh4Bkj4TgXLBPulcbc7oM/KYuE YP8SK556f3IWqylYXaUmZBK4VOUKMYO/r2ZHbaaIm5DQGCVt9RGE5Nz22xGepJ4M FpK9fFx2PO17zEwCv32EAyUeTmnLt62g+V1ch4tQdoYT/O5KnYhh3dcFGCLPiOHZ JVZ/qG6DCvOYfYeU7oGtia9Uu+ClrS+nCTofYQYYFE0WKqGoS5tIxgRWjOUdvHrU 5dZFN3ODiH4KVk+B+VJfRngiMiJLKrD5/FAojThTGInIi9g= -----END CERTIFICATE----- maas-1.9.5+bzr4599.orig/src/maasserver/tests/data/test_x509_1.pem0000644000000000000000000000206613056115004022360 0ustar 00000000000000-----BEGIN CERTIFICATE----- MIIC7zCCAdegAwIBAgIJAKcZaoHYOg4cMA0GCSqGSIb3DQEBCwUAMBAxDjAMBgNV BAMTBWJsYWtlMB4XDTE0MDQyNTE4MzQyM1oXDTI0MDQyMjE4MzQyM1owEDEOMAwG A1UEAxMFYmxha2UwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCq0wOT PL2ZhtcU/1wzO7bVDRW3Me3SU/SqtDSgPT57CcrS2QM2LdjOO2DBedxNQI6X1nFb orOGgYu1xxX0Hs7KAnwyHVwsks9d+zNw5y0A8qN1Ml+s4+h6lobVGpL60rEfP2dG RFfjU5drbYLHLkf71Et46TdSskb9qoygs7XoPothx/t38C5HtwzUXupAge4BJR47 yvBvbrkC2HZrmsKhlqnJuuXWVaUXfAAmBkLVncBPYBZci/lpvhWACcK3ZP1Fq28c 3VS+OnHFe8MNiiI34jHRtct/NHwDMBvsfo0Jx2fBa8RsW+WtmU0AvgU/7Nz+uHe2 KlKf2odJ3Wh2suqHAgMBAAGjTDBKMBMGA1UdJQQMMAoGCCsGAQUFBwMCMDMGA1Ud EQQsMCqgKAYKKwYBBAGCNxQCA6AaDBhibGFrZUBibGFrZS11Ym50LWRlc2t0b3Aw DQYJKoZIhvcNAQELBQADggEBAITOKn0wNRQ921gpRfLMjXnV8lbVjGqSlCDXjwxk LahYH0q9zs4GTjKomz5QOPLrZ1bTum9DcbHxiWs+gcwCAqZ3uxFAyKWi5jJSGZmU ZY/cUxqVNkoCTRBVjjAVw+zrgJE1EI5BIe2tHMmU/+9oIOKq6BCcEQgT75FxyPqf cT0YR9k2qeTDN7TrdZ1uBZLfQFKRP4ILfzOrW3e9aHdJ5M1fvQ4pVfu0/FI3+wrz HE66X0Ct66iemU7Ey5HoqlRsLJpfUO2ZRN569fbK3v7NG5Fuz1ihD7ZSn5P5wgKU nSh9IfPKLuSuJNVK9ivLt2LNPQ172LkHlwscoDq0JhWMwj0= -----END CERTIFICATE----- maas-1.9.5+bzr4599.orig/src/maasserver/tests/data/test_x509_2.pem0000644000000000000000000000206613056115004022361 0ustar 00000000000000-----BEGIN CERTIFICATE----- MIIC7zCCAdegAwIBAgIJAMX+Jd4nv6LJMA0GCSqGSIb3DQEBCwUAMBAxDjAMBgNV BAMTBWJsYWtlMB4XDTE0MDQyNTE4MzQzMFoXDTI0MDQyMjE4MzQzMFowEDEOMAwG A1UEAxMFYmxha2UwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC7Bc72 dtZN/qfX9SOhJqmK1GLOopM+oh12Wqnl4mqKHfeJkbqzLj7zeCLzuFGbQJAQKTOP C8PUaJege4kg9Z3txuIaR195aULc33Q2cVXXNgTCv9DohRcNn5D+rhaUTw39OJS9 rbUPIdSHgH0yXvwbPcp22Qpky6WM34HEW3t3naod110aLIGCDE8QkTRztB364UWX IPIaKHMciTN0A1EOx/BaId6p4uHIcR7KanVbiT7F12RJ355RmcIM4t4GNSESRpei MiBbBBD50/IxgYqSuyIPQjFnGCZKuR2whu1AmrtpeECQEqaJvtg8vRI9sJUSw1o1 93aowHY7hZzylp4jAgMBAAGjTDBKMBMGA1UdJQQMMAoGCCsGAQUFBwMCMDMGA1Ud EQQsMCqgKAYKKwYBBAGCNxQCA6AaDBhibGFrZUBibGFrZS11Ym50LWRlc2t0b3Aw DQYJKoZIhvcNAQELBQADggEBAAOsz6/joMygbZrtqpTdtl31Bf9/s3/xjhRpQqjK 9obPR/URJjhmZdGjY6bnNDDn1buRXmQqUHr20xPg+iOj2p+XOivJwFt6fV1j0Yyx oeVpzNtKVU35GF2QtLsbOSsYa/IEXP9v3BNGWgB1YhjRca7WzN57+qDQ/FiZ2ozM 5bJe5FxIkrJVRpR9XrD/wDXiIUv0GS54fNfFdcFEo8drfmbr2OO5y0dofREf10sX WDtyKeoyB+T/0rQ/3GpWmDoCApb1kMbwtNX75hoNoQyUbYsAROpb3k400buw2I+S apkLbtE0tPklh9vP0buo0IohzGlPlE9W1D1dIMqfnX432GM= -----END CERTIFICATE----- maas-1.9.5+bzr4599.orig/src/maasserver/tests/data/test_x509_3.pem0000644000000000000000000000206613056115004022362 0ustar 00000000000000-----BEGIN CERTIFICATE----- MIIC7zCCAdegAwIBAgIJAKmOFdKgRIj7MA0GCSqGSIb3DQEBCwUAMBAxDjAMBgNV BAMTBWJsYWtlMB4XDTE0MDQyNTE4MzQzN1oXDTI0MDQyMjE4MzQzN1owEDEOMAwG A1UEAxMFYmxha2UwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDhqRd3 RiNMevSD5onqBcHncE274DYm4xMJdFKVe3kCrzfl987g6CWsz+kZxqJLzC2DSa5Y 6iznEAL97MCBaBtffHtZi3SJa9mLSZO8+XvHqNpmQK+hNXidX3DJIrpfXQNqSgI3 s8Rmd17qSlpFLy6wA83Df3+qtc6RZp2CTxKXPvbuXpPvQJKfK9yGpLlKE+IlzFap Ed3GWB9pSDlpxeMM8bQrhnJBx6rLCOTFyKhHUcgzgHCbqwV7Pqc6y/ctuL4schvO bJj74SKI78ORdfT15g2yqgXNZPAsHl911PfidRVBWk68MPNoRVi2poRCo9sCFkmo TDhs0tcZD759XBOXAgMBAAGjTDBKMBMGA1UdJQQMMAoGCCsGAQUFBwMCMDMGA1Ud EQQsMCqgKAYKKwYBBAGCNxQCA6AaDBhibGFrZUBibGFrZS11Ym50LWRlc2t0b3Aw DQYJKoZIhvcNAQELBQADggEBAJDwBbfSHiZLoALB+IOcLlEGyLQe8AUXtaWicSs3 8gONQNdJqAhNJ9rcdQq6qdgDS10ZN94ZkoU9XXL6IS8HSagO6wBGCkyjOak9zePO 1iOPQm7qbqzbXXXamVSq1AIFz3VTILEfi+q/PYx62ztRYINNHJSWz+hR6f7Rz4D8 koXMES4ElG5iGcMcnaEHdWBolr9HlCnDX8XuQ6cAOGbUbSmj6V6wuKvOmuHSgjJx rxsehEkgZi/EVJNkwvkLVJ00R3aL6lINHJa/Qdza17EGR5QZdpCeUV7gbjmu8nut Ctlxs+zXXhO6wwfZwzQHNN5U/RPPfxbgsN7PKncE0upuCmI= -----END CERTIFICATE----- maas-1.9.5+bzr4599.orig/src/maasserver/tests/data/test_x509_4.pem0000644000000000000000000000206613056115004022363 0ustar 00000000000000-----BEGIN CERTIFICATE----- MIIC7zCCAdegAwIBAgIJAMMCNW5S4V3wMA0GCSqGSIb3DQEBCwUAMBAxDjAMBgNV BAMTBWJsYWtlMB4XDTE0MDQyNTE4MzQ0MloXDTI0MDQyMjE4MzQ0MlowEDEOMAwG A1UEAxMFYmxha2UwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAWBwK zS/ZdPBvGrGLUPn51EenzrVuB/gzoMDllZyRl9nnPPNTOdlMbiUQXkUp01Tcum5X GWvhU0+xnlhyeDqAseOMCJcnm6Q6wjSj73QUST4Jmc5u+LzhPr3gzpSXl4vVJ9dD xyEufCll/lzUIEgDSUzpvuXO+jXN/nlnHWLTJ84qFtoO13J8sh9Zh46mo9r3yKqe 0Z+a1O8Y2/ru6zR/2Yipi6PiQVne8ZSbwU9zgJRI3JAWHvFfdmuoy2FMvp4s54M7 sUBom+xhl6/zHC0kgAl7V9j/8JGoiNU3J5OBZ1EoqiYz/7u+7sQ/YACrF0YGqRI2 etYCLrqJpCC/Zkm3AgMBAAGjTDBKMBMGA1UdJQQMMAoGCCsGAQUFBwMCMDMGA1Ud EQQsMCqgKAYKKwYBBAGCNxQCA6AaDBhibGFrZUBibGFrZS11Ym50LWRlc2t0b3Aw DQYJKoZIhvcNAQELBQADggEBAA2MolplJeMBUv/7ydQ8O5BqbNqTyyk0MUmYFvQG EZlXtWgLk0RCX6vyQVGbw50NibOYXLyZDdEb4RCfswpPnY4aJEr/PXbn7zDAsG+I vtl3zhOsNZhaDF9WNKZjvpd4NmXOLj8nk+EW+MKwxgUab1jMKcQiodBdtGQ8HzEk 6OKrj2nxZweDe6jC4hH9E19FF7wOsc4zOFyVNtdhPkdgEv/ksTVkKf+fXH62t18P OWB0i33U0tONAL+cd+ssED/LBrRMotcQ+W/lN+a3O2Z1+YoKz3YpJkytYOdkQzHn JMRGocMDS4Rcv8QdhB9uKHBxEKIOSugauWuDR4epOHItqp8= -----END CERTIFICATE----- maas-1.9.5+bzr4599.orig/src/maasserver/utils/__init__.py0000644000000000000000000002034013056115004021062 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Utilities.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'absolute_reverse', 'absolute_reverse_url', 'build_absolute_uri', 'find_nodegroup', 'get_local_cluster_UUID', 'ignore_unused', 'make_validation_error_message', 'strip_domain', 'synchronised', ] from functools import wraps from urllib import urlencode from urlparse import ( urljoin, urlparse, ) from django.core.urlresolvers import reverse from maasserver.config import RegionConfiguration from maasserver.enum import NODEGROUPINTERFACE_MANAGEMENT from maasserver.exceptions import NodeGroupMisconfiguration from provisioningserver.config import ( ClusterConfiguration, UUID_NOT_SET, ) from provisioningserver.utils.text import make_bullet_list def ignore_unused(*args): """Suppress warnings about unused variables. This function does nothing. Use it whenever you have deliberately unused symbols: pass them to this function and lint checkers will no longer consider them unused. """ def absolute_reverse(view_name, query=None, base_url=None, *args, **kwargs): """Return the absolute URL (i.e. including the URL scheme specifier and the network location of the MAAS server). Internally this method simply calls Django's 'reverse' method and prefixes the result of that call with the configured MAAS URL. Consult the 'maas-region-admin local_config_set --default-url' command for details on how to set the MAAS URL. :param view_name: Django's view function name/reference or URL pattern name for which to compute the absolute URL. :param query: Optional query argument which will be passed down to urllib.urlencode. The result of that call will be appended to the resulting url. :param base_url: Optional url used as base. If None is provided, then configured MAAS URL will be used. :param args: Positional arguments for Django's 'reverse' method. :param kwargs: Named arguments for Django's 'reverse' method. """ if not base_url: with RegionConfiguration.open() as config: base_url = config.maas_url url = urljoin(base_url, reverse(view_name, *args, **kwargs)) if query is not None: url += '?%s' % urlencode(query, doseq=True) return url def absolute_url_reverse(view_name, query=None, *args, **kwargs): """Returns the absolute path (i.e. starting with '/') for the given view. This utility is meant to be used by methods that need to compute URLs but run outside of Django and thus don't have the 'script prefix' transparently added the the URL. :param view_name: Django's view function name/reference or URL pattern name for which to compute the absolute URL. :param query: Optional query argument which will be passed down to urllib.urlencode. The result of that call will be appended to the resulting url. :param args: Positional arguments for Django's 'reverse' method. :param kwargs: Named arguments for Django's 'reverse' method. """ with RegionConfiguration.open() as config: abs_path = urlparse(config.maas_url).path if not abs_path.endswith('/'): # Add trailing '/' to get urljoin to behave. abs_path = abs_path + '/' # Force prefix to be '' so that Django doesn't use the 'script prefix' ( # which might be there or not depending on whether or not the thread local # variable has been initialized). reverse_link = reverse(view_name, prefix='', *args, **kwargs) if reverse_link.startswith('/'): # Drop the leading '/'. reverse_link = reverse_link[1:] url = urljoin(abs_path, reverse_link) if query is not None: url += '?%s' % urlencode(query, doseq=True) return url def build_absolute_uri(request, path): """Return absolute URI corresponding to given absolute path. :param request: An http request to the API. This is needed in order to figure out how the client is used to addressing the API on the network. :param path: The absolute http path to a given resource. :return: Full, absolute URI to the resource, taking its networking portion from `request` but the rest from `path`. """ scheme = "https" if request.is_secure() else "http" return "%s://%s%s" % (scheme, request.get_host(), path) def strip_domain(hostname): """Return `hostname` with the domain part removed.""" return hostname.split('.', 1)[0] def get_local_cluster_UUID(): """Return the UUID of the local cluster (or None if it cannot be found).""" with ClusterConfiguration.open() as config: if config.cluster_uuid == UUID_NOT_SET: return None else: return config.cluster_uuid def find_nodegroup(request): """Find the nodegroup whose subnet contains the requester's address. There may be multiple matching nodegroups, but this endeavours to choose the most appropriate. :raises `maasserver.exceptions.NodeGroupMisconfiguration`: When more than one nodegroup claims to manage the requester's network. """ # Circular imports. from maasserver.models import NodeGroup ip_address = request.META['REMOTE_ADDR'] if ip_address is None: return None # Fetch nodegroups with interfaces in the requester's network, # preferring those with managed networks first. The `NodeGroup` # objects returned are annotated with the `management` field of the # matching `NodeGroupInterface`. See https://docs.djangoproject.com # /en/dev/topics/db/sql/#adding-annotations for this curious feature # of Django's ORM. query = NodeGroup.objects.raw(""" SELECT ng.*, ngi.management FROM maasserver_nodegroup AS ng JOIN maasserver_nodegroupinterface AS ngi ON ng.id = ngi.nodegroup_id JOIN maasserver_subnet AS subnet ON subnet.id = ngi.subnet_id WHERE inet %s BETWEEN (ngi.ip & netmask(subnet.cidr)) AND (ngi.ip | ~netmask(subnet.cidr)) ORDER BY ngi.management DESC, ng.id ASC """, [ip_address]) nodegroups = list(query) if len(nodegroups) == 0: return None if len(nodegroups) == 1: return nodegroups[0] # There are multiple matching nodegroups. Only zero or one may # have a managed interface, otherwise it is a misconfiguration. unmanaged = NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED nodegroups_with_managed_interfaces = { nodegroup.id for nodegroup in nodegroups if nodegroup.management != unmanaged } if len(nodegroups_with_managed_interfaces) > 1: raise NodeGroupMisconfiguration( "Multiple clusters on the same network; only " "one cluster may manage the network of which " "%s is a member." % ip_address) return nodegroups[0] def synchronised(lock): """Decorator to synchronise a call against a given lock. Note: if the function being wrapped is a generator, the lock will *not* be held for the lifetime of the generator; to this decorator, it looks like the wrapped function has returned. """ def synchronise(func): @wraps(func) def call_with_lock(*args, **kwargs): with lock: return func(*args, **kwargs) return call_with_lock return synchronise def gen_validation_error_messages(error): """Return massaged messages from a :py:class:`ValidationError`.""" message_dict = error.message_dict for field in sorted(message_dict): field_messages = message_dict[field] if field == "__all__": for field_message in field_messages: yield field_message else: for field_message in field_messages: yield "%s: %s" % (field, field_message) def make_validation_error_message(error): """Return a massaged message from a :py:class:`ValidationError`. The message takes the form of a textual bullet-list. """ return make_bullet_list(gen_validation_error_messages(error)) maas-1.9.5+bzr4599.orig/src/maasserver/utils/async.py0000644000000000000000000001614513056115004020450 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Utilities for working with asynchronous operations.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'DeferredHooks', "gather", ] from collections import deque from contextlib import contextmanager from itertools import count from Queue import Queue import threading from crochet import wait_for_reactor from maasserver.exceptions import IteratorReusedError from provisioningserver.utils.twisted import ( asynchronous, synchronous, ) from twisted.internet import reactor from twisted.internet.defer import ( CancelledError, Deferred, maybeDeferred, ) from twisted.python import log class UseOnceIterator: """An iterator that is usable only once.""" def __init__(self, *args): """Create a new :class:`UseOnceIterator`. Takes the same arguments as iter(). """ self.iterable = iter(*args) self.has_run_once = False def __iter__(self): return self def next(self): if self.has_run_once: raise IteratorReusedError( "It is not possible to reuse a UseOnceIterator.") try: return self.iterable.next() except StopIteration: self.has_run_once = True raise @wait_for_reactor def gather(calls, timeout=10.0): """gather(calls, timeout=10.0) Issue calls into the reactor, passing results back to another thread. Note that `gather` does not explicitly report to the caller that it has timed-out; calls are silently cancelled and the generator simply reaches its end. If this information is important to your code, put in place some mechanism to check that all expected responses have been received, or create a modified version of thus function with the required behaviour. :param calls: An iterable of no-argument callables to be called in the reactor thread. Each will be called via :py:func:`~twisted.internet.defer.maybeDeferred`. :param timeout: The number of seconds before further results are ignored. Outstanding results will be cancelled. :return: A :class:`UseOnceIterator` of results. A result might be a failure, i.e. an instance of :py:class:`twisted.python.failure.Failure`, or a valid result; it's up to the caller to check. """ # Prepare of a list of Deferreds that we're going to wait for. deferreds = [maybeDeferred(call) for call in calls] # We'll use this queue (thread-safe) to pass results back. queue = Queue() # A sentinel to mark the end of the results. done = object() # This function will get called if not all results are in before # `timeout` seconds have passed. It puts `done` into the queue to # indicate the end of results, and cancels all outstanding deferred # calls. def cancel(): queue.put(done) for deferred in deferreds: try: deferred.cancel() except: log.err() if timeout is None: canceller = None else: canceller = reactor.callLater(timeout, cancel) countdown = count(len(deferreds), -1) # Callback to report the result back to the queue. If it's the last # result to be reported, `done` is put into the queue, and the # delayed call to `cancel` is itself cancelled. def report(result): queue.put(result) if next(countdown) == 1: queue.put(done) if canceller is not None: if canceller.active(): canceller.cancel() for deferred in deferreds: deferred.addBoth(report) # If there are no calls then there will be no results, so we put # `done` into the queue, and cancel the nascent delayed call to # `cancel`, if it exists. if len(deferreds) == 0: queue.put(done) if canceller is not None: canceller.cancel() # Return an iterator to the invoking thread that will stop at the # first sign of the `done` sentinel. return UseOnceIterator(queue.get, done) def suppress(failure, *exceptions): """Used as a errback, suppress the given exceptions.""" failure.trap(*exceptions) class DeferredHooks(threading.local): """A utility class for managing hooks that are specified as Deferreds. This is meant to be used by non-Twisted code to register hooks that need to be run at some later time *in Twisted*. This is a common pattern in MAAS, where the web-application needs to arrange post-commit actions that mutate remote state, via RPC for example. """ def __init__(self): super(DeferredHooks, self).__init__() self.hooks = deque() @synchronous def add(self, d): assert isinstance(d, Deferred) self.hooks.append(d) @contextmanager def savepoint(self): """Context manager that saves the current hooks on the way in. If the context exits with an exception the newly added hooks are cancelled, and the saved hooks are restored. If the context exits cleanly, the saved hooks are restored, and the newly hooks are added to the end of the hook queue. """ saved = self.hooks self.hooks = deque() try: yield except: self.reset() raise else: saved.extend(self.hooks) finally: self.hooks = saved @synchronous def fire(self): """Fire all hooks in sequence, in the reactor. If a hook fails, the subsequent hooks will be cancelled (by calling ``.cancel()``), and the exception will propagate out of this method. """ try: while len(self.hooks) > 0: hook = self.hooks.popleft() self._fire_in_reactor(hook).wait() finally: # Ensure that any remaining hooks are cancelled. self.reset() @synchronous def reset(self): """Cancel all hooks in sequence, in the reactor. This calls each hook's ``.cancel()`` method. If any of these raise an exception, it will be logged; it will not prevent cancellation of other hooks. """ try: while len(self.hooks) > 0: hook = self.hooks.popleft() self._cancel_in_reactor(hook).wait() finally: # Belt-n-braces. self.hooks.clear() @staticmethod @asynchronous def _fire_in_reactor(hook): hook.callback(None) return hook @staticmethod @asynchronous def _cancel_in_reactor(hook): hook.addErrback(suppress, CancelledError) hook.addErrback(log.err) try: hook.cancel() except: # The canceller has failed. We take a hint from DeferredList here, # by logging the exception and moving on. log.err(_why="Failure when cancelling hook.") else: return hook maas-1.9.5+bzr4599.orig/src/maasserver/utils/converters.py0000644000000000000000000000707213056115004021524 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Conversion utilities.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'XMLToYAML', ] from lxml import etree class XMLToYAML: """Convert XML to YAML.""" def __init__(self, text): self.text = text self.new_text = '' self.level = 0 self.indent_spaces = 2 def spaces(self): return self.level * self.indent_spaces * ' ' def addText(self, element): if '{' in element.tag: new_tag = element.tag.strip('{').replace('}', ':') self.new_text += "%s- %s:\n" % (self.spaces(), new_tag) else: self.new_text += "%s- %s:\n" % (self.spaces(), element.tag) self.level += 1 for key in element.keys(): self.new_text += "%s%s: %s\n" % ( self.spaces(), key, element.attrib[key]) def recurseElement(self, element): for child in element.iterchildren(): self.addText(child) if child.text is not None and not child.text.isspace(): self.new_text += "%s%s\n" % (self.spaces(), child.text.strip()) self.recurseElement(child) self.level -= 1 def convert(self): root = etree.fromstring(self.text) self.addText(root) self.recurseElement(root) return self.new_text def human_readable_bytes(num_bytes, include_suffix=True): """Return the human readable text for bytes. (SI units) :param num_bytes: Bytes to be converted. Can't be None :param include_suffix: Whether to include the computed suffix in the output. """ for unit in ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']: if abs(num_bytes) < 1000.0 or unit == 'YB': if include_suffix: return "%3.1f %s" % (num_bytes, unit) else: return "%3.1f" % num_bytes num_bytes /= 1000.0 def machine_readable_bytes(humanized): """Return the integer for a number of bytes in text form. (SI units) Accepts 'K', 'M', 'G', 'T', 'P' and 'E' NOT AN EXACT COUNTERPART TO human_readable_bytes! :param humanized: string be converted. """ if humanized == '' or humanized is None: return None elif humanized.endswith('K') or humanized.endswith('k'): return int(humanized[:-1]) * 1000 elif humanized.endswith('M') or humanized.endswith('m'): return int(humanized[:-1]) * 1000000 elif humanized.endswith('G') or humanized.endswith('g'): return int(humanized[:-1]) * 1000000000 elif humanized.endswith('T') or humanized.endswith('t'): return int(humanized[:-1]) * 1000000000000 elif humanized.endswith('P') or humanized.endswith('p'): return int(humanized[:-1]) * 1000000000000000 elif humanized.endswith('E') or humanized.endswith('e'): return int(humanized[:-1]) * 1000000000000000000 else: return int(humanized) def round_size_to_nearest_block(size, block_size, round_up=True): """Round the size to the nearest block returning the new size. :param size: The requested size to round. :param block_size: The block size to round to. :param round_up: If True, will round up to fill current block, else down. """ number_of_blocks = size / block_size if round_up and size % block_size > 0: number_of_blocks += 1 return block_size * number_of_blocks maas-1.9.5+bzr4599.orig/src/maasserver/utils/curtin.py0000644000000000000000000000073313056115004020633 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Curtin-related utility functions.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type import curtin def curtin_supports_webhook_events(): curtin_features = getattr(curtin, 'FEATURES', []) return 'REPORTING_EVENTS_WEBHOOK' in curtin_features maas-1.9.5+bzr4599.orig/src/maasserver/utils/dblocks.py0000644000000000000000000002071213056115004020747 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Region-wide advisory locking.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "DatabaseLock", "DatabaseXactLock", "DatabaseLockAttemptOutsideTransaction", "DatabaseLockAttemptWithoutConnection", "DatabaseLockNotHeld", ] from contextlib import closing from operator import itemgetter from django.db import connection # The fixed classid used for all MAAS locks. See `DatabaseLock` for the # rationale, and an explanation of this number's origin. classid = 20120116 # PostgreSQL advisory lock functions. LOCK = "pg_advisory_lock" LOCK_TRY = "pg_try_advisory_lock" LOCK_SHARED = "pg_advisory_lock_shared" LOCK_SHARED_TRY = "pg_try_advisory_lock_shared" LOCK_XACT = "pg_advisory_xact_lock" LOCK_XACT_TRY = "pg_try_advisory_xact_lock" LOCK_XACT_SHARED = "pg_advisory_xact_lock_shared" LOCK_XACT_SHARED_TRY = "pg_try_advisory_xact_lock_shared" UNLOCK = "pg_advisory_unlock" UNLOCK_SHARED = "pg_advisory_unlock_shared" UNUSED = None # Mapping from a lock function to its equivalent try-only lock function. to_try = { LOCK: LOCK_TRY, LOCK_TRY: LOCK_TRY, LOCK_SHARED: LOCK_SHARED_TRY, LOCK_SHARED_TRY: LOCK_SHARED_TRY, LOCK_XACT: LOCK_XACT_TRY, LOCK_XACT_TRY: LOCK_XACT_TRY, LOCK_XACT_SHARED: LOCK_XACT_SHARED_TRY, LOCK_XACT_SHARED_TRY: LOCK_XACT_SHARED_TRY, UNLOCK: UNLOCK, UNLOCK_SHARED: UNLOCK_SHARED, UNUSED: UNUSED, } # Mapping from a lock function to its equivalent shared lock function. to_shared = { LOCK: LOCK_SHARED, LOCK_TRY: LOCK_SHARED_TRY, LOCK_SHARED: LOCK_SHARED, LOCK_SHARED_TRY: LOCK_SHARED_TRY, LOCK_XACT: LOCK_XACT_SHARED, LOCK_XACT_TRY: LOCK_XACT_SHARED_TRY, LOCK_XACT_SHARED: LOCK_XACT_SHARED, LOCK_XACT_SHARED_TRY: LOCK_XACT_SHARED_TRY, UNLOCK: UNLOCK_SHARED, UNLOCK_SHARED: UNLOCK_SHARED, UNUSED: UNUSED, } class DatabaseLockAttemptWithoutConnection(Exception): """A locking attempt was made without a preexisting connection. :class:`DatabaseLock` should only be used with a preexisting connection. While this restriction is not absolutely necessary, it's here to ensure that users of :class:`DatabaseLock` take care with the lifecycle of their database connection: a connection that is inadvertently closed (by Django, by MAAS, by anything) will release all locks too. """ class DatabaseLockAttemptOutsideTransaction(Exception): """A locking attempt was made outside of a transaction. :class:`DatabaseXactLock` should only be used within a transaction. """ class DatabaseLockNotHeld(Exception): """A particular lock was not held.""" class DatabaseLockBase(tuple): """An advisory lock held in the database. Implemented using PostgreSQL's advisory locking functions. PostgreSQL's advisory lock functions are all available with a choice of two call signatures: (64-bit integer) or (32-bit integer, 32-bit integer). Here we use the two-argument form. The first argument is fixed at 20120116. This makes it easy to identify locks belonging to the MAAS application in PostgreSQL's ``pg_locks`` table. For example:: SELECT objid FROM pg_locks WHERE classid = 20120116; returns the second part of the lock key for all locks associated with the MAAS application. Fwiw, 20120116 is the date on which source history for MAAS began. It has no special significance to PostgreSQL, as far as I am aware. """ # Class attributes. MODE_DEFAULT = None MODE_CHOICES = () # Instance properties. classid = property(itemgetter(0)) objid = property(itemgetter(1)) def __new__(cls, objid, mode=None): return super(DatabaseLockBase, cls).__new__(cls, (classid, objid)) def __init__(self, objid, mode=None): super(DatabaseLockBase, self).__init__() if mode is None: self.lock, self.unlock = self.MODE_DEFAULT elif mode in self.MODE_CHOICES: self.lock, self.unlock = mode else: raise AssertionError( "Unsupported mode: %r is not in %r" % ( mode, self.MODE_CHOICES)) def __enter__(self): raise NotImplementedError() def __exit__(self, *exc_info): raise NotImplementedError() def __repr__(self): return "<%s classid=%d objid=%d lock=%s unlock=%s>" % ( self.__class__.__name__, self.classid, self.objid, self.lock, self.unlock) def is_locked(self): stmt = ( "SELECT 1 FROM pg_locks, pg_database" " WHERE pg_locks.locktype = 'advisory'" " AND pg_locks.classid = %s" " AND pg_locks.objid = %s" # objsubid is 2 when using the 2-argument version of the # pg_advisory_* locking functions. " AND pg_locks.objsubid = 2" " AND pg_locks.granted" # Advisory locks are local to each database so we join to # pg_databases to discover the OID of the currrent database. " AND pg_locks.database = pg_database.oid" " AND pg_database.datname = current_database()" ) with closing(connection.cursor()) as cursor: cursor.execute(stmt, self) return len(cursor.fetchall()) >= 1 @property def TRY(self): """Return an equivalent lock that uses `try` locking functions.""" return self.__class__(self.objid, ( to_try[self.lock], to_try[self.unlock])) @property def SHARED(self): """Return an equivalent lock that uses `shared` locking functions.""" return self.__class__(self.objid, ( to_shared[self.lock], to_shared[self.unlock])) def in_transaction(): """Are we in a transaction?""" return ( connection.in_atomic_block or len(connection.transaction_state) > 0) class DatabaseLock(DatabaseLockBase): """An advisory lock obtained with ``pg_advisory_lock``. Use this to obtain an exclusive lock on an external, shared, resource. Avoid using this to obtain a lock for a database modification because this lock must be released before the transaction is committed. In most cases you should prefer :py:class:`DatabaseXactLock` instead. See :py:class:`DatabaseLockBase`. """ MODE_DEFAULT = LOCK, UNLOCK MODE_CHOICES = ( (LOCK, UNLOCK), (LOCK_TRY, UNLOCK), (LOCK_SHARED, UNLOCK_SHARED), (LOCK_SHARED_TRY, UNLOCK_SHARED), ) def __enter__(self): if connection.connection is None: raise DatabaseLockAttemptWithoutConnection(self) with closing(connection.cursor()) as cursor: query = "SELECT %s(%%s, %%s)" % self.lock cursor.execute(query, self) if cursor.fetchone() == (False,): raise DatabaseLockNotHeld(self) def __exit__(self, *exc_info): with closing(connection.cursor()) as cursor: query = "SELECT %s(%%s, %%s)" % self.unlock cursor.execute(query, self) if cursor.fetchone() != (True,): raise DatabaseLockNotHeld(self) class DatabaseXactLock(DatabaseLockBase): """An advisory lock obtained with ``pg_advisory_xact_lock``. Use this to obtain an exclusive lock for a modification to the database. It can be used to synchronise access to an external resource too, but the point of release is less explicit because it's outside of the control of this class: the lock is only released when the transaction in which it was obtained is committed or aborted. See :py:class:`DatabaseLockBase`. """ MODE_DEFAULT = LOCK_XACT, UNUSED MODE_CHOICES = ( (LOCK_XACT, UNUSED), (LOCK_XACT_TRY, UNUSED), (LOCK_XACT_SHARED, UNUSED), (LOCK_XACT_SHARED_TRY, UNUSED), ) def __enter__(self): """Obtain lock using pg_advisory_xact_lock().""" if not in_transaction(): raise DatabaseLockAttemptOutsideTransaction(self) with closing(connection.cursor()) as cursor: query = "SELECT %s(%%s, %%s)" % self.lock cursor.execute(query, self) if cursor.fetchone() == (False,): raise DatabaseLockNotHeld(self) def __exit__(self, *exc_info): """Do nothing: this lock can only be released by the transaction.""" maas-1.9.5+bzr4599.orig/src/maasserver/utils/dbtasks.py0000644000000000000000000001344013056115004020761 0ustar 00000000000000# encoding: utf-8 # Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Database Tasks Service. A service that runs deferred database operations, and then ensures they're finished before stopping. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "DatabaseTaskAlreadyRunning", "DatabaseTasksService", ] from maasserver.utils.threads import deferToDatabase from provisioningserver.utils.twisted import ( asynchronous, FOREVER, ) from twisted.application.service import Service from twisted.internet.defer import ( Deferred, DeferredQueue, ) from twisted.internet.task import cooperate from twisted.python import log class DatabaseTaskAlreadyRunning(Exception): """The database task is running and can no longer be cancelled.""" class DatabaseTasksService(Service, object): """Run deferred database operations one at a time. Once the service is started, `deferTask` and `addTask` can be used to queue up execution of a database task. The former — `deferTask` — will return a `Deferred` that fires with the result of the database task. Errors arising from this task become the responsibility of the caller. The latter — `addTask` — returns nothing, and will log errors arising from the database task. Before this service has been started, and as soon as shutdown has commenced, database tasks will be rejected by `deferTask` and `addTask`. """ sentinel = object() def __init__(self, limit=100): """Initialise a new `DatabaseTasksService`. :param limit: The maximum number of database tasks to defer before rejecting additional tasks. """ super(DatabaseTasksService, self).__init__() # Start with a queue that rejects puts. self.queue = DeferredQueue(size=0, backlog=1) self.limit = limit @asynchronous def deferTask(self, func, *args, **kwargs): """Schedules `func` to run later. :raise QueueOverflow: If the queue of tasks is full. :return: :class:`Deferred`, which fires with the result of the running the task in a database thread. This can be cancelled while the database task is still enqueued, but will refuse to cancel once the task is running, instead raising `DatabaseTaskAlreadyRunning`. """ def cancel(done): if task in self.queue.pending: self.queue.pending.remove(task) else: raise DatabaseTaskAlreadyRunning() done = Deferred(cancel) def task(): d = deferToDatabase(func, *args, **kwargs) d.chainDeferred(done) return d self.queue.put(task) return done @asynchronous(timeout=FOREVER) def addTask(self, func, *args, **kwargs): """Schedules `func` to run later. Failures arising from the running the task in a database thread will be logged. :raise QueueOverflow: If the queue of tasks is full. :return: `None` """ done = self.deferTask(func, *args, **kwargs) done.addErrback(log.err, "Unhandled failure in database task.") return None @asynchronous def syncTask(self): """Schedules a "synchronise" task with the queue. Tasks are processed in order, so this is a convenient way to ensure that all previously added/deferred tasks have been processed. :raise QueueOverflow: If the queue of tasks is full. :return: :class:`Deferred` that will fire when this task is pulled out of the queue. Processing of the queue will continue without pause. """ def cancel(done): if task in self.queue.pending: self.queue.pending.remove(task) done = Deferred(cancel) def task(): done.callback(self) self.queue.put(task) return done @asynchronous(timeout=FOREVER) def startService(self): """Open the queue and start processing database tasks. :return: `None` """ super(DatabaseTasksService, self).startService() self.queue.size = self.limit # Open queue to puts. self.coop = cooperate(self._generateTasks()) @asynchronous(timeout=FOREVER) def stopService(self): """Close the queue and finish processing outstanding database tasks. :return: :class:`Deferred` which fires once all tasks have been run. """ super(DatabaseTasksService, self).stopService() # Feed the cooperative task so that it can shutdown. self.queue.size += 1 # Prevent QueueOverflow. self.queue.put(self.sentinel) # See _generateTasks. self.queue.size = 0 # Now close queue to puts. # This service has stopped when the coop task is done. return self.coop.whenDone() def _generateTasks(self): """Feed the cooperator. This pulls tasks from the queue while this service is running and executes them. If no tasks are pending it will wait for more. Once shutdown of the service commences this will continue pulling and executing tasks while there are tasks actually pending; it will not wait for additional tasks to be enqueued. """ queue = self.queue sentinel = self.sentinel def execute(task): if task is not sentinel: return task() # Execute tasks as long as we're running. while self.running: yield queue.get().addCallback(execute) # Execute all remaining tasks. while len(queue.pending) != 0: yield queue.get().addCallback(execute) maas-1.9.5+bzr4599.orig/src/maasserver/utils/dns.py0000644000000000000000000000655213056115004020120 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """DNS-related utilities.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) import re from django.core.exceptions import ValidationError from netaddr import ( AddrConversionError, IPAddress, ) str = None __metaclass__ = type __all__ = [ 'validate_hostname', ] def validate_domain_name(name): """Validator for domain names. :param name: Input value for a domain name. Must not include hostname. :raise ValidationError: If the domain name is not valid according to RFCs 952 and 1123. """ # Valid characters within a hostname label: ASCII letters, ASCII digits, # hyphens, and underscores. Not all are always valid. # Technically we could write all of this as a single regex, but it's not # very good for code maintenance. label_chars = re.compile('[a-zA-Z0-9_-]*$') if len(name) > 255: raise ValidationError( "Hostname is too long. Maximum allowed is 255 characters.") # A hostname consists of "labels" separated by dots. labels = name.split('.') for label in labels: if len(label) == 0: raise ValidationError("DNS name contains an empty label.") if len(label) > 63: raise ValidationError( "Label is too long: %r. Maximum allowed is 63 characters." % label) if label.startswith('-') or label.endswith('-'): raise ValidationError( "Label cannot start or end with hyphen: %r." % label) if not label_chars.match(label): raise ValidationError( "Label contains disallowed characters: %r." % label) def validate_hostname(hostname): """Validator for hostnames. :param hostname: Input value for a hostname. May include domain. :raise ValidationError: If the hostname is not valid according to RFCs 952 and 1123. """ # Valid characters within a hostname label: ASCII letters, ASCII digits, # hyphens, and underscores. Not all are always valid. # Technically we could write all of this as a single regex, but it's not # very good for code maintenance. if len(hostname) > 255: raise ValidationError( "Hostname is too long. Maximum allowed is 255 characters.") # A hostname consists of "labels" separated by dots. host_part = hostname.split('.')[0] if '_' in host_part: # The host label cannot contain underscores; the rest of the name can. raise ValidationError( "Host label cannot contain underscore: %r." % host_part) validate_domain_name(hostname) def get_ip_based_hostname(ip): """Given the specified IP address (which must be suitable to convert to a netaddr.IPAddress), creates an automatically generated hostname by converting the '.' or ':' characters in it to '-' characters. For IPv6 address which represent an IPv4-compatible or IPv4-mapped address, the IPv4 representation will be used. :param ip: The IPv4 or IPv6 address (can be an integer or string) """ try: hostname = unicode(IPAddress(ip).ipv4()).replace('.', '-') except AddrConversionError: hostname = unicode(IPAddress(ip).ipv6()).replace(':', '-') return hostname maas-1.9.5+bzr4599.orig/src/maasserver/utils/forms.py0000644000000000000000000000357613056115004020465 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Form utilities.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'compose_invalid_choice_text', 'get_QueryDict', 'set_form_error', ] from django.http import QueryDict def compose_invalid_choice_text(choice_of_what, valid_choices): """Compose an "invalid choice" string for form error messages. This returns a template string that is intended to be used as the argument to the 'error_messages' parameter in a Django form. :param choice_of_what: The name for what the selected item is supposed to be, to be inserted into the error string. :type choice_of_what: unicode :param valid_choices: Valid choices, in Django choices format: (name, value). :type valid_choices: sequence """ return "'%s' is not a valid %s. It should be one of: %s." % ( "%(value)s", choice_of_what, ", ".join("'%s'" % name for name, value in valid_choices), ) def get_QueryDict(params): """Convert `params` to a `QueryDict`.""" query_dict = QueryDict('', mutable=True) for k, v in params.items(): if isinstance(v, list): query_dict.setlist(k, v) else: query_dict[k] = v return query_dict def set_form_error(form, field_name, error_value): """Set an error on a form's field. This utility method encapsulates Django's arguably awkward way of settings errors inside a form's clean()/is_valid() method. This method will override any previously-registered error for 'field_name'. """ # Hey Django devs, this is a crap API to set errors. form.errors.setdefault(field_name, []).extend( form.error_class([error_value])) maas-1.9.5+bzr4599.orig/src/maasserver/utils/interfaces.py0000644000000000000000000000357013056115004021454 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Utilities related to network and cluster interfaces.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'get_name_and_vlan_from_cluster_interface', 'make_name_from_interface', ] from random import randint import re def make_name_from_interface(interface, alias=None): """Generate a cluster interface name based on a network interface name. The name is used as an identifier in API URLs, so awkward characters are not allowed: whitespace, colons, etc. If the interface name had any such characters in it, they are replaced with a double dash (`--`). If `interface` is `None`, or empty, a name will be made up. """ if alias: interface = "%s:%s" % (interface, alias) if interface is None or interface == u'': base_name = u'unnamed-%d' % randint(1000000, 9999999) else: base_name = interface return re.sub(u'[^\w:.-]', '--', base_name) def get_name_and_vlan_from_cluster_interface(cluster_name, interface): """Return a name suitable for a `Network` managed by a cluster interface. :param interface: Network interface name, e.g. `eth0:1`. :param cluster_name: Name of the cluster. :return: a tuple of the new name and the interface's VLAN tag. The VLAN tag may be None. """ name = interface vlan_tag = None if '.' in name: _, vlan_tag = name.split('.', 1) if ':' in vlan_tag: # Nasty: there's an alias after the VLAN tag. vlan_tag, _ = vlan_tag.split(':', 1) name = name.replace('.', '-') name = name.replace(':', '-') network_name = "-".join((cluster_name, name)) return network_name, vlan_tag maas-1.9.5+bzr4599.orig/src/maasserver/utils/jsenums.py0000644000000000000000000000732013056115004021012 0ustar 00000000000000# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Generate JavaScript enum definitions based on Python definitions. MAAS defines its enums as simple classes, with the enum items as attributes. Running this script produces a source text containing the JavaScript equivalents of the same enums, so that JavaScript code can make use of them. The script takes the filename of the enum modules. Each will be compiled and executed in an empty namespace, though they will have access to other MAAS libraries, including their dependencies. The resulting JavaScript module is printed to standard output. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from argparse import ArgumentParser from datetime import datetime from itertools import chain import json from operator import attrgetter import os.path import sys from textwrap import dedent # Header. Will be written on top of the output. header = dedent("""\ /* Generated file. DO NOT EDIT. This file was generated by %(script)s, on %(timestamp)s. */ YUI.add('maas.enums', function(Y) { Y.log('loading maas.enums'); var module = Y.namespace('maas.enums'); """ % { 'script': os.path.basename(sys.argv[0]), 'timestamp': datetime.now(), }) # Footer. Will be written at the bottom. footer = "}, '0.1');" def is_enum(item): """Does the given python item look like an enum? :param item: An item imported from a MAAS enum module. :return: Bool. """ return isinstance(item, type) and item.__name__.isupper() def get_enum_classes(namespace): """Collect all enum classes exported from `namespace`.""" return filter(is_enum, namespace.values()) def get_enums(filename): namespace = {} with open(filename, "rbU") as fd: source = fd.read() code = compile(source, filename, "exec") exec(code, namespace) return get_enum_classes(namespace) # This method is duplicated from provisioningserver/utils/enum.py # because jsenums is used by the packaging to build the JS file and # we don't want to force the packaging to require all the dependencies # that using provisioningserver/utils/enum.py would imply. def map_enum(enum_class): """Map out an enumeration class as a "NAME: value" dict.""" # Filter out anything that starts with '_', which covers private and # special methods. We can make this smarter later if we start using # a smarter enumeration base class etc. Or if we switch to a proper # enum mechanism, this function will act as a marker for pieces of # code that should be updated. return { key: value for key, value in vars(enum_class).items() if not key.startswith('_') } def serialize_enum(enum): """Represent a MAAS enum class in JavaScript.""" definitions = json.dumps(map_enum(enum), indent=4, sort_keys=True) definitions = '\n'.join( line.rstrip() for line in definitions.splitlines() ) return "module.%s = %s;\n" % (enum.__name__, definitions) def parse_args(): """Parse options & arguments.""" parser = ArgumentParser(description=__doc__) parser.add_argument( 'sources', metavar="FILENAME", nargs='+', help="File to search for enums.") return parser.parse_args() def dump(source_filenames): enums = chain.from_iterable( get_enums(filename) for filename in source_filenames) enums = sorted(enums, key=attrgetter("__name__")) dumps = [serialize_enum(enum) for enum in enums] return "\n".join([header] + dumps + [footer]) if __name__ == "__main__": args = parse_args() print(dump(args.sources)) maas-1.9.5+bzr4599.orig/src/maasserver/utils/mac.py0000644000000000000000000000113413056115004020063 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """MAC-related utilities.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'get_vendor_for_mac', ] from netaddr import ( EUI, NotRegisteredError, ) def get_vendor_for_mac(mac): """Return vendor for MAC.""" data = EUI(mac) try: return data.oui.registration().org except NotRegisteredError: return 'Unknown Vendor' maas-1.9.5+bzr4599.orig/src/maasserver/utils/orm.py0000644000000000000000000011774013056115004020133 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """ORM-related utilities.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'disable_all_database_connections', 'enable_all_database_connections', 'ExclusivelyConnected', 'FullyConnected', 'gen_retry_intervals', 'get_exception_class', 'get_first', 'get_one', 'in_transaction', 'is_serialization_failure', 'macs_contain', 'macs_do_not_contain', 'make_serialization_failure', 'post_commit', 'post_commit_do', 'psql_array', 'request_transaction_retry', 'retry_on_serialization_failure', 'savepoint', 'TotallyDisconnected', 'transactional', 'validate_in_transaction', 'with_connection', ] from contextlib import contextmanager from functools import wraps from itertools import ( chain, islice, repeat, takewhile, ) import re import threading from time import sleep import types from django.core.exceptions import ( MultipleObjectsReturned, ValidationError, ) from django.db import ( connection, connections, transaction, ) from django.db.models import Q from django.db.transaction import TransactionManagementError from django.db.utils import OperationalError from django.http import Http404 from maasserver.exceptions import ( MAASAPIBadRequest, MAASAPIForbidden, ) from maasserver.utils.async import DeferredHooks from provisioningserver.utils import flatten from provisioningserver.utils.backoff import ( exponential_growth, full_jitter, ) from provisioningserver.utils.network import parse_integer from provisioningserver.utils.twisted import callOut import psycopg2 from psycopg2.errorcodes import SERIALIZATION_FAILURE from twisted.internet.defer import Deferred def get_exception_class(items): """Return exception class to raise. If `items` looks like a Django ORM result set, returns the `MultipleObjectsReturned` class as defined in that model. Otherwise, returns the generic class. """ model = getattr(items, 'model', None) return getattr(model, 'MultipleObjectsReturned', MultipleObjectsReturned) def get_one(items, exception_class=None): """Assume there's at most one item in `items`, and return it (or None). If `items` contains more than one item, raise an error. If `items` looks like a Django ORM result set, the error will be of the same model-specific Django `MultipleObjectsReturned` type that `items.get()` would raise. Otherwise, a plain Django :class:`MultipleObjectsReturned` error. :param items: Any sequence. :param exception_class: The exception class to raise if there is an error. If not specified, will use MultipleObjectsReturned (from the appropriate model class, if it can be determined). :return: The one item in that sequence, or None if it was empty. """ # The only numbers we care about are zero, one, and "many." Fetch # just enough items to distinguish between these. Use islice so as # to support both sequences and iterators. retrieved_items = tuple(islice(items, 0, 2)) length = len(retrieved_items) if length == 0: return None elif length == 1: return retrieved_items[0] else: if exception_class is None: exception_class = get_exception_class(items) object_name = get_model_object_name(items) if object_name is None: object_name = "item" raise exception_class("Got more than one %s." % object_name.lower()) def get_first(items): """Get the first of `items`, or None.""" first_item = tuple(islice(items, 0, 1)) if len(first_item) == 0: return None else: return first_item[0] def psql_array(items, sql_type=None): """Return PostgreSQL array string and parameters.""" sql = ( "ARRAY[" + ",".join(["%s"] * len(items)) + "]") if sql_type is not None: sql += "::%s[]" % sql_type return sql, items def macs_contain(key, macs): """Get the "Django ORM predicate: 'key' contains all the given macs. This method returns a tuple of the where clause (as a string) and the parameters (as a list of strings) used to format the where clause. This is typically used with Django's QuerySet's where() method:: >>> from maasserver.models.node import Node >>> where, params = macs_contain('router', ["list", "of", "macs"]) >>> all_nodes = Node.objects.all() >>> filtered_nodes = all_nodes.extra(where=[where], params=params) """ where_clause = ( "%s @> ARRAY[" % key + ', '.join(["%s"] * len(macs)) + "]::macaddr[]") return where_clause, macs def macs_do_not_contain(key, macs): """Get the Django ORM predicate: 'key' doesn't contain any macs. This method returns a tuple of the where clause (as a string) and the parameters (as a list of strings) used to format the where clause. This is typically used with Django's QuerySet's where() method:: >>> from maasserver.models.node import Node >>> where, params = macs_do_not_contain( ... 'routers', ["list", "of", "macs"]) >>> all_nodes = Node.objects.all() >>> filtered_nodes = all_nodes.extra(where=[where], params=params) """ contains_any = " OR ".join([ "%s " % key + "@> ARRAY[%s]::macaddr[]"] * len(macs)) where_clause = "((%s IS NULL) OR NOT (%s))" % (key, contains_any) return where_clause, macs def get_psycopg2_exception(exception): """Find the root PostgreSQL error from an database exception. We may be dealing with a raw exception or with a wrapper provided by Django, put there by ``DatabaseErrorWrapper``. As a belt-n-braces measure this searches for instances of `psycopg2.Error`, then, if not found, in the exception's cause (``__cause__``), recursively. :return: The underlying `psycopg2.Error`, or `None` if there isn't one. """ try: exception = exception.__cause__ except AttributeError: return exception if isinstance(exception, psycopg2.Error) else None else: return get_psycopg2_exception(exception) def get_psycopg2_serialization_exception(exception): """Return the root-cause if `exception` is a serialization failure. PostgreSQL sets a specific error code, "40001", when a transaction breaks because of a serialization failure. :return: The underlying `psycopg2.Error` if it's a serialization failure, or `None` if there isn't one. :see: http://www.postgresql.org/docs/9.3/static/transaction-iso.html """ exception = get_psycopg2_exception(exception) if exception is None: return None elif exception.pgcode == SERIALIZATION_FAILURE: return exception else: return None def is_serialization_failure(exception): """Does `exception` represent a serialization failure? PostgreSQL sets a specific error code, "40001", when a transaction breaks because of a serialization failure. This is normally about the right time to try again. :see: http://www.postgresql.org/docs/9.3/static/transaction-iso.html """ return get_psycopg2_serialization_exception(exception) is not None class SerializationFailure(psycopg2.OperationalError): """Explicit serialization failure. A real serialization failure, arising out of psycopg2 (and thus signalled from the database) would *NOT* be an instance of this class. However, it is not obvious how to create a `psycopg2.OperationalError` with ``pgcode`` set to `SERIALIZATION_FAILURE` without subclassing. I suspect only the C interface can do that. """ pgcode = SERIALIZATION_FAILURE def make_serialization_failure(): """Make a serialization exception. Artificially construct an exception that resembles what Django's ORM would raise when PostgreSQL fails a transaction because of a serialization failure. :returns: an instance of :py:class:`OperationalError` that will pass the `is_serialization_failure` predicate. """ exception = OperationalError() exception.__cause__ = SerializationFailure() assert is_serialization_failure(exception) return exception def request_transaction_retry(): """Raise a serialization exception. This depends on the retry machinery being higher up in the stack, catching this, and then retrying the transaction, though it may choose to re-raise the error if too many retries have already been attempted. :raises OperationalError: """ raise make_serialization_failure() def gen_retry_intervals(base=0.01, rate=2.5, maximum=10.0): """Generate retry intervals based on an exponential series. Once any interval exceeds `maximum` the interval generated will forever be `maximum`; this effectively disconnects from the exponential series. All intervals will be subject to "jitter" as a final step. The defaults seem like reasonable coefficients for a capped, full-jitter, exponential back-off series, and were derived by experimentation at the command-line. Real-world experience may teach us better values. """ # An exponentially growing series... intervals = exponential_growth(base, rate) # from which we stop pulling one we've hit a maximum... intervals = takewhile((lambda i: i < maximum), intervals) # and thereafter return the maximum value indefinitely... intervals = chain(intervals, repeat(maximum)) # and to which we add some randomness. return full_jitter(intervals) def noop(): """Do nothing.""" def retry_on_serialization_failure(func, reset=noop): """Retry the wrapped function when it raises a serialization failure. It will call `func` a maximum of ten times, and will only retry if a serialization failure is detected. BE CAREFUL WHERE YOU USE THIS. In general it only makes sense to use this to wrap the *outermost* transactional block, e.g. outside of an `atomic` decorator. This is because we want a new transaction to be started on the way in, and rolled back on the way out before this function attempts to retry. :param reset: An optional callable that will be called between attempts. It is *not* called before the first attempt. If the last attempt fails with a serialization failure it will *not* be called. If an attempt fails with a non-serialization failure, it will *not* be called. """ @wraps(func) def retrier(*args, **kwargs): intervals = gen_retry_intervals() for _ in xrange(9): try: return func(*args, **kwargs) except OperationalError as error: if is_serialization_failure(error): reset() # Which may do nothing. sleep(next(intervals)) else: raise else: return func(*args, **kwargs) return retrier def gen_description_of_hooks(hooks): """Generate lines describing the given hooks. :param hooks: An iterable of :class:`Deferred` instances. """ for index, hook in enumerate(hooks): yield "== Hook %d: %r ==" % (index + 1, hook) for cb, eb in hook.callbacks: yield " +- callback: %r" % (cb[0],) yield " | args: %r" % (cb[1],) yield " | kwargs: %r" % (cb[2],) yield " | errback: %r" % (eb[0],) yield " | args: %r" % (eb[1],) yield " +--- kwargs: %r" % (eb[2],) class PostCommitHooks(DeferredHooks): """A specialised set of `DeferredHooks` for post-commit tasks. Can be used as a context manager, to check for orphaned post-commit hooks on the way in, and to run newly added hooks on the way out. """ def __enter__(self): if len(self.hooks) > 0: # Capture a textual description of the hooks to help us understand # why this is about to blow oodles of egg custard in our faces. description = "\n".join(gen_description_of_hooks(self.hooks)) # Crash when there are orphaned post-commit hooks. These might # only turn up in testing, where transactions are managed by the # test framework instead of this decorator. We need to fail hard # -- not just warn about it -- to ensure it gets fixed. self.reset() raise TransactionManagementError( "Orphaned post-commit hooks found:\n" + description) def __exit__(self, exc_type, exc_value, exc_tb): if exc_value is None: self.fire() else: self.reset() post_commit_hooks = PostCommitHooks() def post_commit(hook=None): """Add a post-commit hook, specific to this thread. :param hook: Optional, but if provided it must be either a `Deferred` instance or a callable. In the former case, see `DeferredHooks` for behaviour. In the latter case, the callable will be passed exactly one argument when fired, a `Failure`, or `None`. If the `hook` argument is not provided (or is None), a new `Deferred` will be created. :return: The `Deferred` that has been registered as a hook. """ if hook is None: hook = Deferred() elif isinstance(hook, Deferred): pass # This is fine as it is. elif callable(hook): hook = Deferred().addBoth(hook) else: raise AssertionError( "Not a Deferred or callable: %r" % (hook,)) post_commit_hooks.add(hook) return hook def post_commit_do(func, *args, **kwargs): """Call a function after a successful commit. This will arrange for the given `func` to be called with the given arguments after a successful commit. If there's an error committing the transaction, `func` will *not* be called. If there's an error in an earlier post-commit task, `func` will *not* be called. If `func` returns a `Deferred` it will be waited for. :return: The `Deferred` that has been registered as a hook. """ if callable(func): return post_commit().addCallback(callOut, func, *args, **kwargs) else: raise AssertionError("Not callable: %r" % (func,)) @contextmanager def connected(): """Context manager that ensures we're connected to the database. If there is not yet a connection to the database, this will connect on entry and disconnect on exit. Preexisting connections will be left alone. """ if connection.connection is None: connection.ensure_connection() try: yield finally: connection.close() else: yield def with_connection(func): """Ensure that we're connected to the database before calling `func`. If there is not yet a connection to the database, this will connect before calling the decorated function, and then it will disconnect when done. Preexisting connections will be left alone. This can be important when using non-transactional advisory locks. """ @wraps(func) def call_with_connection(*args, **kwargs): with connected(): return func(*args, **kwargs) # For convenience, when introspecting for example, expose the original # function on the function we're returning. call_with_connection.func = func return call_with_connection def transactional(func): """Decorator that wraps calls to `func` in a Django-managed transaction. It ensures that connections are closed if necessary. This keeps Django happy, especially in the test suite. In addition, if `func` is being invoked from outside of a transaction, this will retry if it fails with a serialization failure. """ func_within_txn = transaction.atomic(func) # For savepoints. func_outside_txn = retry_on_serialization_failure( func_within_txn, reset=post_commit_hooks.reset) @wraps(func) def call_within_transaction(*args, **kwargs): if connection.in_atomic_block: # Don't use the retry-capable function if we're already in a # transaction; retrying is pointless when the txn is broken. with post_commit_hooks.savepoint(): return func_within_txn(*args, **kwargs) else: # Use the retry-capable function, firing post-transaction hooks. # # If there is not yet a connection to the database, connect before # calling the decorated function, then disconnect when done. This # can be important when using non-transactional advisory locks # that may be held before, during, and/or after this transactional # block. # # Previously, close_old_connections() was used here, which would # close connections without realising that they were still in use # for non-transactional advisory locking. This had the effect of # releasing all locks prematurely: not good. # with connected(), post_commit_hooks: return func_outside_txn(*args, **kwargs) # For convenience, when introspecting for example, expose the original # function on the function we're returning. call_within_transaction.func = func return call_within_transaction @contextmanager def savepoint(): """Context manager to wrap the code within a savepoint. This also enters a savepoint context for post-commit hooks, and so should always be used in preference to `transaction.atomic()` when only a savepoint is needed. If either a transaction or a savepoint within a transaction is what you want, use the `transactional` decorator. If you want a _decorator_ specifically, use the `transactional` decorator. If you want a _savepoint decorator_ specifically, write one, or adapt this to do it. """ if connection.in_atomic_block: with post_commit_hooks.savepoint(): with transaction.atomic(): yield else: raise TransactionManagementError( "Savepoints cannot be created outside of a transaction.") def in_transaction(connection=connection): """Is `connection` in the midst of a transaction? This only enquires as to Django's perspective on the situation. It does not actually check that the database agrees with Django. :return: bool """ return ( # Django's new transaction management stuff is active. connection.in_atomic_block or ( # Django's "legacy" transaction management system is active. len(connection.transaction_state) > 0 and # Django is managing the transaction state. connection.transaction_state[-1] ) ) def validate_in_transaction(connection): """Ensure that `connection` is within a transaction. This only enquires as to Django's perspective on the situation. It does not actually check that the database agrees with Django. :raise TransactionManagementError: If no transaction is in progress. """ if not in_transaction(connection): raise TransactionManagementError( # XXX: GavinPanella 2015-08-07 bug=1482563: This error message is # specific to lobjects, but this lives in a general utils module. "PostgreSQL's large object support demands that all interactions " "are done in a transaction. Further, lobject() has been known to " "segfault when used outside of a transaction. This assertion has " "prevented the use of lobject() outside of a transaction. Please " "investigate.") class DisabledDatabaseConnection: """Instances of this class raise exceptions when used. Referencing an attribute elicits a :py:class:`RuntimeError`. Specifically, this is useful to help prevent Django's py:class:`~django.db.utils.ConnectionHandler` from handing out usable database connections to code running in the event-loop's thread (a.k.a. the reactor thread). """ def __getattr__(self, name): raise RuntimeError( "Database connections in this thread (%s) are " "disabled." % threading.currentThread().name) def __setattr__(self, name, value): raise RuntimeError( "Database connections in this thread (%s) are " "disabled." % threading.currentThread().name) def __delattr__(self, name): raise RuntimeError( "Database connections in this thread (%s) are " "disabled." % threading.currentThread().name) def disable_all_database_connections(): """Replace all connections in this thread with unusable stubs. Specifically, instances of :py:class:`~DisabledDatabaseConnection`. This should help prevent accidental use of the database from the reactor thread. Why? Database access means blocking IO, at least with the connections that Django hands out. While blocking IO isn't forbidden in the reactor thread, it ought to be avoided, because the reactor can't do anything else while it's happening, like handling other IO, or running delayed calls. Django's transaction and connection management code also assumes threads: it associates connections and transactions with the current thread, using threading.local. Using the database from the reactor thread is a recipe for intermingled transactions. """ for alias in connections: connection = connections[alias] if type(connection) is not DisabledDatabaseConnection: connections[alias] = DisabledDatabaseConnection() connection.close() def enable_all_database_connections(): """Re-enable database connections in this thread after having... ... been previously disabled with `disable_all_database_connections`. See `disable_all_database_connections` for the rationale. """ for alias in connections: # isinstance() fails because it references __bases__. if type(connections[alias]) is DisabledDatabaseConnection: del connections[alias] class TotallyDisconnected: """Context to disallow all database connections within a block.""" def __enter__(self): """Disable all database connections, closing those that are open.""" disable_all_database_connections() def __exit__(self, *exc_info): """Enable all database connections, but don't actually connect.""" enable_all_database_connections() class ExclusivelyConnected: """Context to only permit database connections within a block. This blows up with `AssertionError` if a database connection is open when the context is entered. On exit, all database connections open in the current thread will be closed without niceties, and no effort is made to suppress database failures at this point. """ def __enter__(self): """Assert that no connections are yet open.""" for alias in connections: if connections[alias].connection is not None: raise AssertionError("Connection %s is open." % (alias,)) def __exit__(self, *exc_info): """Close database connections in the current thread.""" for alias in connections: connections[alias].close() class FullyConnected: """Context to ensure that all databases are connected. On entry, connections will be establed to all defined databases. On exit, they'll all be closed again. Simple. """ def __enter__(self): """Assert that no connections are yet open.""" for alias in connections: connections[alias].ensure_connection() def __exit__(self, *exc_info): """Close database connections in the current thread.""" for alias in connections: connections[alias].close() def parse_item_operation(specifier): """ Returns a tuple indicating the specifier string, and its related operation (if one was found). If the first character in the specifier is '|', the operator will be OR. If the first character in the specifier is '&', the operator will be AND. If the first character in the specifier is '!', or the specifier starts with "not_", the operator will be AND(existing_query, ~(new_query)). If unspecified, the default operator is OR. :param specifier: a string containing the specifier. :return: tuple """ specifier = specifier.strip() from operator import ( and_ as AND, inv as INV, or_ as OR, ) AND_NOT = lambda current, next: AND(current, INV(next)) if specifier.startswith('|'): op = OR specifier = specifier[1:] elif specifier.startswith('&'): op = AND specifier = specifier[1:] elif specifier.startswith('not_'): op = AND_NOT specifier = specifier[4:] elif specifier.startswith('!'): op = AND_NOT specifier = specifier[1:] else: # Default to OR. op = OR return specifier, op def parse_item_specifier_type(specifier, spec_types={}, separator=':'): """ Returns a tuple that splits the string int a specifier, and its specifier type. Retruns a tuple of (specifier, specifier_type). If no specifier type could be found in the set, returns None in place of the specifier_type. :param specifier: The specifier string, such as "ip:10.0.0.1". :param spec_types: A dict whose keys are strings that will be recognized as specifier types. :param separator: Optional specifier. Defaults to ':'. :return: tuple """ if separator in specifier: tokens = specifier.split(separator, 1) if tokens[0] in spec_types: specifier_type = tokens[0] specifier = tokens[1].strip() else: specifier_type = None else: specifier_type = None return specifier, specifier_type def get_model_object_name(queryset): """Returns the model object name for the given `QuerySet`, or None if it cannot be determined. """ if hasattr(queryset, 'model'): if hasattr(queryset.model, '_meta'): metadata = getattr(queryset.model, '_meta') if hasattr(metadata, 'object_name'): return metadata.object_name return None class MAASQueriesMixin(object): """Contains utility functions that any mixin for model object manager queries may need to make use of.""" def get_id_list(self, raw_query): """Returns a list of IDs for each row in the specified raw query. This can be used to create additional filters to chain from a raw query, which would not otherwise be possible. Note that using this method risks a race condition, since a row could be inserted after the raw query runs. """ ids = [row.id for row in raw_query] return self.filter(id__in=ids) def get_id_filter(self, raw_query): """Returns a `QuerySet` for the specified raw query, by executing it and adding an 'in' filter with the ID of each object in the raw query. """ ids = self.get_id_list(raw_query) return self.filter(id__in=ids) def format_specifiers(self, specifiers): """Formats the given specifiers into a list. If the list of specifiers is given as a comma-separated list, it is inferred that the user would like a set of queries joined with logical AND operators. If the list of specifiers is given as a dict, it is inferred that each key is a specifier type, and each value is a list of specifier values. The specifier values inside each list will be joined with logical OR operators. The lists for each key will be joined with logical AND operators. For example, 'name:eth0,hostname:tasty-buscuits' might match interface eth0 on node 'tasty-biscuits'; that is, both constraints are required. """ if isinstance(specifiers, types.IntType): return [unicode(specifiers)] elif isinstance(specifiers, unicode): return [ '&' + specifier.strip() for specifier in specifiers.split(',') ] elif isinstance(specifiers, dict): return specifiers else: return list(flatten(specifiers)) def get_filter_function( self, specifier_type, spec_types, item, separator=':'): """Returns a function that must return a Q() based on some pervious Q(), an operation function (which will manipulate it), and the data that will be used as an argument to the filter operation function. :param:specifier_type: a string which will be used as a key to get the specifier from the spec_types dictionary. :param:spec_types: the dictionary of valid specifier types. :param:item: the string that will be used to filter by :param:separator: a string that must separate specifiers from their values. (for example, the default of ':' would be used if you wanted specifiers to look like "id:42".) :return: types.FunctionType or types.MethodType """ query = spec_types.get(specifier_type, None) while True: if isinstance(query, (types.FunctionType, types.MethodType)): # Found a function or method that will appending the filter # string for us. Parameters must be in the format: # (, , ), where # the operation_function must be a function that takes action # on the current_Q() to append a new query object (Q()). return query elif isinstance(query, types.TupleType): # Specifies a query to a subordinate specifier function. # This will be a tuple in the format: # (manager_object, filter_from_object) # That is, filter_from_object defines how to relate the object # we're querying back to the object that we care about, and # manager_object is a Django Manager instance. (manager_object, filter_from_object) = query sub_ids = manager_object.filter_by_specifiers( item).values_list(filter_from_object + '__id', flat=True) # Return a function to filter the current object based on # its IDs (as gathered from the query above to the related # object). kwargs = {"id__in": sub_ids} return lambda cq, op, i: op(cq, Q(**kwargs)) elif isinstance(query, unicode): if separator in query: # We got a string like "subnet:space". This means we want # to actually use the query specifier at the 'subnet' key, # but we want to convert the item from (for example) # "space1" to "space:space1". When we loop back around, # "subnet" will resolve to a tuple, and we'll query the # specifier-based filter for Subnet. query, subordinate = query.split(separator, 1) item = subordinate + separator + item elif '__' in query: # If the value for this query specifier contains the string # '__', assume it's a Django filter expression, and return # the appropriate query. Disambiguate what could be an # 'alias expression' by allowing the __ to appear before # the filter. (that is, prefix the filter string with __ # to query the current object.) if query.startswith('__'): query = query[2:] kwargs = {query: item} return lambda cq, op, i: op(cq, Q(**kwargs)) else: query = spec_types.get(query, None) elif query is None: # The None key is for the default query for this specifier. query = spec_types[None] else: break return None def get_specifiers_q( self, specifiers, specifier_types=None, separator=':', **kwargs): """Returns a Q object for objects matching the given specifiers. See documentation for `filter_by_specifiers()`. :return:django.db.models.Q """ if specifier_types is None: raise NotImplementedError("Subclass must specify specifier_types.") current_q = Q() if isinstance(specifiers, dict): # If we got a dictionary, treat it as one of the entries in a # LabeledConstraintMap. That is, each key is a specifier, and # each value is a list of values (which must be OR'd together). for key in specifiers.iterkeys(): assert isinstance(specifiers[key], list) constraints = [ key + separator + value for value in specifiers[key] ] # Leave off specifier_types here because this recursion # will go back to the subclass to get the types filled in. current_q &= self.get_specifiers_q( constraints, separator=separator) else: for item in specifiers: item, op = parse_item_operation(item) item, specifier_type = parse_item_specifier_type( item, spec_types=specifier_types, separator=separator) query = self.get_filter_function( specifier_type, specifier_types, item, separator=separator) current_q = query(current_q, op, item) if len(kwargs) > 0: current_q &= Q(**kwargs) return current_q def filter_by_specifiers(self, specifiers, separator=':', **kwargs): """Filters this object by the given specifiers. If additional keyword arguments are supplied, they will also be queried for, and treated as an AND. :return:QuerySet """ specifiers = self.format_specifiers(specifiers) query = self.get_specifiers_q( specifiers, separator=separator, **kwargs) return self.filter(query) def exclude_by_specifiers(self, specifiers, **kwargs): """Excludes subnets by the given list of specifiers (or single specifier). See documentation for `filter_by_specifiers()`. If additional keyword arguments are supplied, they will also be queried for, and treated as an AND. :return:QuerySet """ specifiers = self.format_specifiers(specifiers) query = self.get_specifiers_q(specifiers, **kwargs) return self.exclude(query) def _add_vlan_vid_query(self, current_q, op, item): """Query for a related VLAN with a specified VID (vlan__vid). Even though this is a rather specific query, it was placed in orm.py since it is shared by multiple subclasses. (It will not be used unless referred to by the specifier_types dictionary passed into get_specifiers_q() by the subclass.) """ if item.lower() == 'untagged': vid = 0 else: vid = parse_integer(item) if vid < 0 or vid >= 0xfff: raise ValidationError( "VLAN tag (VID) out of range " "(0-4094; 0 for untagged.)") current_q = op(current_q, Q(vlan__vid=vid)) return current_q def get_matching_object_map(self, specifiers, query): """This method is intended to be called with a query for foreign object IDs. For example, if called from the Interface object (with a list of interface specifiers), it might be called with a query string like 'node__id' (a "foreign" object ID). In general, this you will get a dictionary from this method in the form: { : [, [], ...]] ... } In other words, call this method when you want a map from a related object IDs (specified by 'query') to a list of objects (of the current type) which match a query. """ filter = self.filter_by_specifiers(specifiers) # We'll be looping through the list assuming a particular order later # in this function, so make sure the interfaces are grouped by their # attached nodes. matches = filter.order_by(query) matches = matches.values_list('id', query) foreign_object_map = {} object_ids = set() object_id = None foreign_object_matches = None for foreign_id, current_id in matches: if foreign_id is None: # Skip objects that do not have a corresponding foreign key. continue if current_id != object_id: # Encountered a new foreign ID in the list, so create an empty # list and add it to the map. (and add it to the set of matched # nodes) foreign_object_matches = [] foreign_object_map[current_id] = foreign_object_matches object_ids.add(current_id) object_id = current_id foreign_object_matches.append(foreign_id) return object_ids, foreign_object_map def get_object_by_specifiers_or_raise(self, specifiers, **kwargs): """Gets an object using the given specifier(s). If the specifier is empty, raises Http400. If multiple objects are returned, raises Http403. If the object cannot be found, raises Http404. :param:specifiers: unicode """ object_name = get_model_object_name(self) if isinstance(specifiers, unicode): specifiers = specifiers.strip() if specifiers is None or specifiers == "": raise MAASAPIBadRequest( "%s specifier required." % object_name) try: object = get_one(self.filter_by_specifiers(specifiers, **kwargs)) if object is None: raise Http404( 'No %s matches the given query.' % object_name) except self.model.MultipleObjectsReturned: raise MAASAPIForbidden( "Too many %s objects match the given query." % object_name) return object def get_object_id(self, name, prefix=None): """ Given the specified name and prefix, attempts to derive an object ID. By default (if a prefix is not supplied), uses the lowercase version of the current model object name as a prefix. For example, if the current model object name is "Fabric", and a string such as 'fabric-10' is supplied, int(10) will be returned. If an incorrect prefix is supplied, None will be returned. If an integer is supplied, the integer will be returned. If a string is supplied, that string will be parsed as an integer and returned (before trying to match against 'prefix-'). :param name: str :param prefix: str :return: int """ if name is None: return None if isinstance(name, types.IntType): return name try: object_id = parse_integer(name) return object_id except ValueError: # Move on to check if this is a "name" like "object-10". pass if prefix is None: prefix = get_model_object_name(self).lower() name = name.strip() match = re.match(r'%s-(\d+)$' % prefix, name) if match is not None: (object_id,) = match.groups() object_id = int(object_id) return object_id else: return None def _add_default_query(self, current_q, op, item): """If the item we're matching is an integer, first try to locate the object by its ID. Otherwise, search by name. """ object_id = self.get_object_id(item) if object_id is not None: return op(current_q, Q(id=object_id)) else: return op(current_q, Q(name=item)) maas-1.9.5+bzr4599.orig/src/maasserver/utils/osystems.py0000644000000000000000000002735113056115004021222 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Utilities for working with operating systems.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'get_distro_series_initial', 'get_release_requires_key', 'list_all_releases_requiring_keys', 'list_all_usable_osystems', 'list_all_usable_releases', 'list_all_usable_hwe_kernels', 'list_hwe_kernel_choices', 'list_osystem_choices', 'list_release_choices', 'list_commissioning_choices', 'make_hwe_kernel_ui_text', 'validate_hwe_kernel', ] from operator import itemgetter from distro_info import UbuntuDistroInfo from django.core.exceptions import ValidationError from maasserver.clusterrpc.osystems import gen_all_known_operating_systems from maasserver.models import ( BootResource, BootSourceCache, Config, ) def list_all_usable_osystems(): """Return all operating systems that can be used for nodes.""" osystems = [ osystem for osystem in gen_all_known_operating_systems() if len(osystem['releases']) > 0 ] return sorted(osystems, key=itemgetter('title')) def list_osystem_choices(osystems, include_default=True): """Return Django "choices" list for `osystem`. :param include_default: When true includes the 'Default OS' in choice selection. """ if include_default: choices = [('', 'Default OS')] else: choices = [] choices += [ (osystem['name'], osystem['title']) for osystem in osystems ] return choices def list_all_usable_releases(osystems): """Return dictionary of usable `releases` for each operating system.""" distro_series = {} for osystem in osystems: distro_series[osystem['name']] = sorted( [release for release in osystem['releases']], key=itemgetter('title')) return distro_series def list_all_usable_hwe_kernels(releases): """Return dictionary of usable `kernels` for each os/release.""" kernels = {} for osystem, osystems in releases.iteritems(): if osystem not in kernels: kernels[osystem] = {} for release in osystems: os_release = osystem + '/' + release['name'] kernels[osystem][release['name']] = list_hwe_kernel_choices( sorted([ i for i in BootResource.objects.get_usable_hwe_kernels( os_release) if release_a_newer_than_b(i, release['name'])])) return kernels def make_hwe_kernel_ui_text(hwe_kernel): if not hwe_kernel: return hwe_kernel release_letter = hwe_kernel.replace('hwe-', '') boot_sources = BootSourceCache.objects.filter( release__startswith=release_letter, subarch=hwe_kernel) if len(boot_sources) > 0: return "%s (%s)" % (boot_sources[0].release, hwe_kernel) else: ubuntu = UbuntuDistroInfo() for release in ubuntu.all: if release.startswith(release_letter): return "%s (%s)" % (release, hwe_kernel) return hwe_kernel def list_hwe_kernel_choices(hwe_kernels): return [(hwe_kernel, make_hwe_kernel_ui_text(hwe_kernel)) for hwe_kernel in hwe_kernels ] def list_all_releases_requiring_keys(osystems): """Return dictionary of OS name mapping to `releases` that require license keys.""" distro_series = {} for osystem in osystems: releases = [ release for release in osystem['releases'] if release['requires_license_key'] ] if len(releases) > 0: distro_series[osystem['name']] = sorted( releases, key=itemgetter('title')) return distro_series def get_release_requires_key(release): """Return asterisk for any release that requires a license key. This is used by the JS, to display the licese_key field. """ if release['requires_license_key']: return '*' return '' def list_release_choices(releases, include_default=True, with_key_required=True): """Return Django "choices" list for `releases`. :param include_default: When true includes the 'Default OS Release' in choice selection. :param with_key_required: When true includes the release_requires_key in the choice. """ if include_default: choices = [('', 'Default OS Release')] else: choices = [] for os_name, os_releases in releases.items(): for release in os_releases: if with_key_required: requires_key = get_release_requires_key(release) else: requires_key = '' choices.append(( '%s/%s%s' % (os_name, release['name'], requires_key), release['title'] )) return choices def get_osystem_from_osystems(osystems, name): """Return osystem from osystems with the given name.""" for osystem in osystems: if osystem['name'] == name: return osystem return None def get_release_from_osystem(osystem, name): """Return release from osystem with the given release name.""" for release in osystem['releases']: if release['name'] == name: return release return None def get_distro_series_initial(osystems, instance, with_key_required=True): """Returns the distro_series initial value for the instance. :param with_key_required: When true includes the release_requires_key in the choice. """ osystem_name = instance.osystem series = instance.distro_series osystem = get_osystem_from_osystems(osystems, osystem_name) if not with_key_required: key_required = '' elif osystem is not None: release = get_release_from_osystem(osystem, series) if release is not None: key_required = get_release_requires_key(release) else: key_required = '' else: # OS of the instance isn't part of the given OSes list so we can't # figure out if the key is required or not, default to not requiring # it. key_required = '' if osystem_name is not None and osystem_name != '': if series is None: series = '' return '%s/%s%s' % (osystem_name, series, key_required) return None def list_commissioning_choices(osystems): """Return Django "choices" list for releases that can be used for commissioning.""" ubuntu = get_osystem_from_osystems(osystems, 'ubuntu') if ubuntu is None: return [] else: releases = sorted(ubuntu['releases'], key=itemgetter('title')) return [ (release['name'], release['title']) for release in releases if release['can_commission'] ] def validate_osystem_and_distro_series(osystem, distro_series): """Validate `osystem` and `distro_series` are valid choices.""" if '/' in distro_series: series_os, release = distro_series.split('/', 1) if series_os != osystem: raise ValidationError( "%s in distro_series does not match with " "operating system %s." % (distro_series, osystem)) else: release = distro_series release = release.replace('*', '') usable_osystems = list_all_usable_osystems() found_osystem = get_osystem_from_osystems(usable_osystems, osystem) if found_osystem is None: raise ValidationError( "%s is not a support operating system." % osystem) found_release = get_release_from_osystem(found_osystem, release) if found_release is None: raise ValidationError( "%s/%s is not a support operating system and release " "combination." % (osystem, release)) return osystem, release def release_a_newer_than_b(a, b): """Compare two Ubuntu releases and return true if a >= b. The release names can be the full release name(e.g Precise, Trusty), or a hardware enablement(e.g hwe-p, hwe-t). The function wraps around the letter 'p' as Precise was the first version of Ubuntu MAAS supported """ def get_release_num(release): release = release.lower() if 'hwe-' in release: release = release.replace('hwe-', '') return ord(release[0]) # Compare release versions based off of the first letter of their # release name or the letter in hwe-. Wrap around the letter # 'p' as that is the first version of Ubuntu MAAS supported. num_a = get_release_num(a) num_b = get_release_num(b) num_wrap = ord('p') if((num_a >= num_wrap and num_b >= num_wrap and num_a >= num_b) or (num_a < num_wrap and num_b >= num_wrap and num_a < num_b) or (num_a < num_wrap and num_b < num_wrap and num_a >= num_b)): return True else: return False def validate_hwe_kernel( hwe_kernel, min_hwe_kernel, architecture, osystem, distro_series): """Validates that hwe_kernel works on the selected os/release/arch. Checks that the current hwe_kernel is avalible for the selected os/release/architecture combination, and that the selected hwe_kernel is >= min_hwe_kernel. If no hwe_kernel is selected one will be chosen. """ if (not osystem or (not architecture or architecture == '') or (not distro_series or distro_series == '')): return hwe_kernel # If we're not deploying Ubuntu we are just setting the kernel to be used # during deployment if osystem != "ubuntu": osystem = Config.objects.get_config('commissioning_osystem') distro_series = Config.objects.get_config( 'commissioning_distro_series') arch, subarch = architecture.split('/') if (subarch != 'generic' and ((hwe_kernel and hwe_kernel.startswith('hwe-')) or (min_hwe_kernel and min_hwe_kernel.startswith('hwe-')))): raise ValidationError( 'Subarchitecture(%s) must be generic when setting hwe_kernel.' % subarch) os_release = osystem + '/' + distro_series usable_kernels = BootResource.objects.get_usable_hwe_kernels( os_release, arch) if hwe_kernel and hwe_kernel.startswith('hwe-'): if hwe_kernel not in usable_kernels: raise ValidationError( '%s is not available for %s on %s.' % (hwe_kernel, os_release, architecture)) if not release_a_newer_than_b(hwe_kernel, distro_series): raise ValidationError( '%s is too old to use on %s.' % (hwe_kernel, os_release)) if((min_hwe_kernel and min_hwe_kernel.startswith('hwe-')) and (not release_a_newer_than_b(hwe_kernel, min_hwe_kernel))): raise ValidationError( 'hwe_kernel(%s) is older than min_hwe_kernel(%s).' % (hwe_kernel, min_hwe_kernel)) return hwe_kernel elif(min_hwe_kernel and min_hwe_kernel.startswith('hwe-')): for i in usable_kernels: if(release_a_newer_than_b(i, min_hwe_kernel) and release_a_newer_than_b(i, distro_series)): return i raise ValidationError( '%s has no kernels availible which meet min_hwe_kernel(%s).' % (distro_series, min_hwe_kernel)) return 'hwe-' + distro_series[0] def validate_min_hwe_kernel(min_hwe_kernel): """Check that the min_hwe_kernel is avalible.""" if not min_hwe_kernel or min_hwe_kernel == "": return "" usable_kernels = BootResource.objects.get_usable_hwe_kernels() if min_hwe_kernel not in usable_kernels: raise ValidationError('%s is not a usable kernel.' % min_hwe_kernel) return min_hwe_kernel maas-1.9.5+bzr4599.orig/src/maasserver/utils/signals.py0000644000000000000000000001100513056115004020761 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Signal utilities.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'connect_to_field_change', ] from django.db.models.signals import ( post_delete, post_init, post_save, pre_delete, pre_save, ) def connect_to_field_change(callback, model, fields, delete=False): """Call `callback` when any of `fields` on `model` are modified. The triggering event is when a model object of the given type is either saved to the database with any of the given fields having a different value than it had originally; or, optionally, a model object of the given type is deleted. In either case, no matter how many of the fields may have been changed, the callback is invoked exactly once. The signature of the callback method should be the following: >>> def callback(instance, old_values, deleted): ... pass Where `instance` is the object which has just being saved to the database, `old_values` is a tuple of the original values for `fields` (in the same order as `fields`), and `deleted` indicates whether it was a deletion that triggered the callback. :param callback: The callback function. :type callback: callable :param model: Specifies a particular sender to receive signals from. :type model: class :param fields: Names of the fields to monitor. :type fields: iterable of unicode :param delete: Should the deletion of an object be considered a change in the field? :type delete: bool :return: A ``(connect, disconnect)`` tuple, where ``connect`` and ``disconnect`` are no-argument functions that connect and disconnect fields changes respectively. ``connect`` has already been called when this function returns. """ # Capture the fields in case an iterator was passed. fields = tuple(fields) combined_fields_name = '__'.join(fields) last_seen_flag = '_fields_last_seen_values__%s' % combined_fields_name delta_flag = '_fields_delta__%s' % combined_fields_name def snapshot_values(instance): """Obtain the tuple of `fields` values for `instance`.""" return tuple( getattr(instance, field_name) for field_name in fields ) # Set 'last_seen_flag' to hold the field' current values. def record_last_seen_flag(sender, instance, **kwargs): original_values = snapshot_values(instance) setattr(instance, last_seen_flag, original_values) # Set 'delta_flag' to hold the fields' old and new values. def record_delta_flag(sender, instance, **kwargs): original_values = getattr(instance, last_seen_flag) new_values = snapshot_values(instance) setattr(instance, delta_flag, (new_values, original_values)) # Call the `callback` if any field has changed. def post_save_callback(sender, instance, created, **kwargs): (new_values, original_values) = getattr(instance, delta_flag) # Call the callback method is the field has changed. if original_values != new_values: callback(instance, original_values, deleted=False) setattr(instance, last_seen_flag, new_values) # Assemble the relevant signals and their handlers. signals = ( (post_init, record_last_seen_flag), (pre_save, record_delta_flag), (post_save, post_save_callback), ) if delete: # Call the `callback` if the instance is being deleted. def post_delete_callback(sender, instance, **kwargs): (new_values, original_values) = getattr(instance, delta_flag) callback(instance, original_values, deleted=True) signals += ( (pre_delete, record_delta_flag), (post_delete, post_delete_callback), ) def connect(): for signal, handler in signals: signal.connect(handler, sender=model, weak=False) def disconnect(): for signal, handler in signals: signal.disconnect(handler, sender=model, weak=False) connect.__doc__ = "Connect to %s for changes in %s." % ( model.__name__, " or ".join(fields)) disconnect.__doc__ = "Disconnect from %s for changes in (%s)." % ( model.__name__, " or ".join(fields)) # The caller expects to be connected initially. connect() return connect, disconnect maas-1.9.5+bzr4599.orig/src/maasserver/utils/storage.py0000644000000000000000000000751013056115004020773 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Utilities for working with the storage model.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "get_effective_filesystem", "used_for" ] from maasserver.enum import FILESYSTEM_TYPE def get_effective_filesystem(model): """Return the effective `Filesystem` for the `model`. A `BlockDevice` or `Partition` can have up to two `Filesystem` one with `acquired` set to False and another set to `True`. When the `Node` for `model` is in an allocated state the acquired `Filesystem` will be used over the non-acquired `Filesystem`. :param model: Model to get active `Filesystem` from. :type model: Either `BlockDevice` or `Partition`. :returns: Active `Filesystem` for `model`. :rtype: `Filesystem` """ from maasserver.models import BlockDevice, Partition assert isinstance(model, (BlockDevice, Partition)) node = model.get_node() filesystems = list(model.filesystem_set.all()) if node.is_in_allocated_state(): # Return the acquired filesystem. for filesystem in filesystems: if filesystem.acquired: return filesystem # No acquired filesystem, could be a filesystem that is not # mountable so we return that filesystem. for filesystem in filesystems: if not filesystem.is_mountable(): return filesystem return None else: # Not in allocated state so return the filesystem that is not an # acquired filesystem. for filesystem in filesystems: if not filesystem.acquired: return filesystem return None def used_for(model): """Return what the block device or partition is being used for." :param model: Model to get active `Filesystem` or `PartitionTable` from. :type model: Either `BlockDevice` or `Partition`. :returns: What the block device or partition is being used for. :rtype: `str` """ # Avoid circular imports from maasserver.models import BlockDevice filesystem = get_effective_filesystem(model) if filesystem is not None: if filesystem.mount_point: return ("%s formatted filesystem mounted at %s" % (filesystem.fstype, filesystem.mount_point)) elif filesystem.fstype == FILESYSTEM_TYPE.LVM_PV: return "LVM volume for %s" % filesystem.filesystem_group.name elif filesystem.fstype == FILESYSTEM_TYPE.RAID: return ("Active %s device for %s" % (filesystem.filesystem_group.group_type, filesystem.filesystem_group.name)) elif filesystem.fstype == FILESYSTEM_TYPE.RAID_SPARE: return ("Spare %s device for %s" % (filesystem.filesystem_group.group_type, filesystem.filesystem_group.name)) elif filesystem.fstype == FILESYSTEM_TYPE.BCACHE_CACHE: return "Cache device for %s" % filesystem.cache_set.get_name() elif filesystem.fstype == FILESYSTEM_TYPE.BCACHE_BACKING: return "Backing device for %s" % filesystem.filesystem_group.name else: return ("Unmounted %s formatted filesystem" % filesystem.fstype) elif isinstance(model, BlockDevice): partition_table = model.get_partitiontable() if partition_table is not None: partitions = partition_table.partitions.count() if partitions > 1: message = "%s partitioned with %d partitions" else: message = "%s partitioned with %d partition" return message % (partition_table.table_type, partitions) return "Unused" maas-1.9.5+bzr4599.orig/src/maasserver/utils/tests/0000755000000000000000000000000013056115004020114 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/utils/threads.py0000644000000000000000000001337513056115004020767 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Stuff relating to threads in the MAAS Region Controller. Threads in Python aren't great, but they're okay for what we need. However, Django's ORM closely weds database connections to threads, so we use specific pools to limit the number of connections each `regiond` process will consume. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "callOutToDatabase", "deferToDatabase", "install_database_pool", "install_database_unpool", "install_default_pool", "make_database_pool", "make_default_pool", ] from maasserver.utils.orm import ( ExclusivelyConnected, FullyConnected, TotallyDisconnected, ) from provisioningserver.utils.twisted import ( asynchronous, FOREVER, ThreadPool, ThreadUnpool, ) from twisted.internet import reactor from twisted.internet.defer import DeferredSemaphore from twisted.internet.threads import deferToThreadPool max_threads_for_default_pool = 50 max_threads_for_database_pool = 20 def make_default_pool(maxthreads=max_threads_for_default_pool): """Create a general thread-pool for non-database activity. Its sole consumer is the old-school web application, i.e. the plain HTTP service. All threads are fully connected to the database. """ return ThreadPool(0, maxthreads, "default", TotallyDisconnected) def make_database_pool(maxthreads=max_threads_for_database_pool): """Create a general thread-pool for database activity. Its consumer are the old-school web application, i.e. the plain HTTP and HTTP API services, and the WebSocket service, for the responsive web UI. All threads are fully connected to the database. """ return ThreadPool(0, maxthreads, "database", FullyConnected) def make_database_unpool(maxthreads=max_threads_for_database_pool): """Create a general non-thread-pool for database activity. Its consumer are the old-school web application, i.e. the plain HTTP and HTTP API services, and the WebSocket service, for the responsive web UI. Each thread is fully connected to the database. However, this is a :class:`ThreadUnpool`, which means that threads are not actually pooled: a new thread is created for each task. This is ideal for testing, to improve isolation between tests. """ return ThreadUnpool(DeferredSemaphore(maxthreads), ExclusivelyConnected) @asynchronous(timeout=FOREVER) def install_default_pool(maxthreads=max_threads_for_default_pool): """Install a custom pool as Twisted's global/reactor thread-pool. Disallow all database activity in the reactor thread-pool. Why such a strict policy? We've been following Django's model, where threads and database connections are wedded together. In MAAS this limits concurrency, contributes to crashes and deadlocks, and has spawned workarounds like post-commit hooks. From here on, using a database connection requires the use of a specific, separate, carefully-sized, thread-pool. """ if reactor.threadpool is None: # Start with ZERO threads to avoid pulling in all of Django's # configuration straight away; it may not be ready yet. reactor.threadpool = make_default_pool(maxthreads) reactor.callWhenRunning(reactor.threadpool.start) reactor.addSystemEventTrigger( "during", "shutdown", reactor.threadpool.stop) else: raise AssertionError( "Too late; global/reactor thread-pool has " "already been configured and installed.") @asynchronous(timeout=FOREVER) def install_database_pool(maxthreads=max_threads_for_database_pool): """Install a pool for database activity.""" if getattr(reactor, "threadpoolForDatabase", None) is None: # Start with ZERO threads to avoid pulling in all of Django's # configuration straight away; it may not be ready yet. reactor.threadpoolForDatabase = make_database_pool(maxthreads) reactor.callInDatabase = reactor.threadpoolForDatabase.callInThread reactor.callWhenRunning(reactor.threadpoolForDatabase.start) reactor.addSystemEventTrigger( "during", "shutdown", reactor.threadpoolForDatabase.stop) else: raise AssertionError( "Too late; database thread-pool has already " "been configured and installed.") @asynchronous(timeout=FOREVER) def install_database_unpool(maxthreads=max_threads_for_database_pool): """Install a pool for database activity particularly suited to testing. See `make_database_unpool` for details. """ try: reactor.threadpoolForDatabase except AttributeError: reactor.threadpoolForDatabase = make_database_unpool(maxthreads) reactor.callInDatabase = reactor.threadpoolForDatabase.callInThread reactor.callWhenRunning(reactor.threadpoolForDatabase.start) reactor.addSystemEventTrigger( "during", "shutdown", reactor.threadpoolForDatabase.stop) else: raise AssertionError( "Too late; database thread-pool has already " "been configured and installed.") def deferToDatabase(func, *args, **kwargs): """Call `func` in a thread where database activity is permitted.""" return deferToThreadPool( reactor, reactor.threadpoolForDatabase, func, *args, **kwargs) def callOutToDatabase(thing, func, *args, **kwargs): """Call out to the given `func` in a database thread, but return `thing`. This is identical to `callOutToThread` except that it uses the database thread-pool. """ return deferToDatabase(func, *args, **kwargs).addCallback(lambda _: thing) maas-1.9.5+bzr4599.orig/src/maasserver/utils/version.py0000644000000000000000000000730013056115004021011 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Version utilities.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "get_maas_doc_version", "get_maas_version_subversion", "get_maas_version_ui", ] import apt_pkg try: from bzrlib.branch import Branch from bzrlib.errors import NotBranchError except ImportError: Branch = None from maasserver.api.logger import maaslog # Initialize apt_pkg. apt_pkg.init() # Name of maas package to get version from. REGION_PACKAGE_NAME = "maas-region-controller-min" def get_version_from_apt(package): """Return the version output from `apt_pkg.Cache` for the given package or an error message if the package data is not valid.""" try: cache = apt_pkg.Cache(None) except SystemError: maaslog.error( 'Installed version could not be determined. Ensure ' '/var/lib/dpkg/status is valid.') return "" version = None if package in cache: apt_package = cache[package] version = apt_package.current_ver return version.ver_str if version is not None else "" def extract_version_subversion(version): """Return a tuple (version, subversion) from the given apt version.""" if "~" in version: main_version, extra = version.split("~", 1) return main_version, extra.split("-", 1)[0] elif "+" in version: main_version, extra = version.split("+", 1) return main_version, "+" + extra.split("-", 1)[0] else: return version.split("-", 1)[0], '' def get_maas_branch(): """Return the `bzrlib.branch.Branch` for this running MAAS.""" if Branch is None: return None try: return Branch.open(".") except NotBranchError: return None _cache = {} # A very simply memoize function: when we switch to Django 1.7 we should use # Django's lru_cache method. def simple_cache(fun): def wrapped(*args, **kwargs): key = hash(repr(fun) + repr(args) + repr(kwargs)) if key not in _cache: _cache[key] = fun(*args, **kwargs) return _cache[key] wrapped.__doc__ = "%s %s" % (fun.__doc__, "(cached)") return wrapped @simple_cache def get_maas_package_version(): """Return the apt version for the main MAAS package.""" return get_version_from_apt(REGION_PACKAGE_NAME) @simple_cache def get_maas_version_subversion(): """Return a tuple with the MAAS version and the MAAS subversion.""" apt_version = get_maas_package_version() if apt_version: return extract_version_subversion(apt_version) else: # Get the branch information branch = get_maas_branch() if branch is None: # Not installed not in branch, then no way to identify. This should # not happen, but just in case. return "unknown", '' else: return "from source (+bzr%s)" % branch.revno(), '' @simple_cache def get_maas_version_ui(): """Return the version string for the running MAAS region. The returned string is suitable to display in the UI. """ version, subversion = get_maas_version_subversion() return "%s (%s)" % (version, subversion) if subversion else version @simple_cache def get_maas_doc_version(): """Return the doc version for the running MAAS region.""" doc_prefix = 'docs' apt_version = get_maas_package_version() if apt_version: version, _ = extract_version_subversion(apt_version) return doc_prefix + '.'.join(version.split('.')[:2]) else: return doc_prefix maas-1.9.5+bzr4599.orig/src/maasserver/utils/views.py0000644000000000000000000002363613056115004020473 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """View utilities configuration.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'WebApplicationHandler', ] import httplib from itertools import count import logging import sys from time import sleep from weakref import WeakSet from django.core import signals from django.core.handlers.wsgi import WSGIHandler from django.core.urlresolvers import get_resolver from django.db import transaction from django.template.response import SimpleTemplateResponse from maasserver.utils.orm import ( gen_retry_intervals, is_serialization_failure, post_commit_hooks, ) from oauth.oauth import OAuthError from piston.authentication import initialize_server_request from piston.models import Nonce from provisioningserver.utils.twisted import retries from twisted.internet import reactor as clock from twisted.python import log from twisted.python.failure import Failure from twisted.web import wsgi logger = logging.getLogger(__name__) def log_failed_attempt(request, attempt, elapsed, remaining, pause): """Log about a failed attempt to answer the given request.""" logger.debug( "Attempt #%d for %s failed; will retry in %.0fms (%.1fs now elapsed, " "%.1fs remaining)", attempt, request.path, pause * 1000.0, elapsed, remaining) def log_final_failed_attempt(request, attempt, elapsed): """Log about the final failed attempt to answer the given request.""" logger.error( "Attempt #%d for %s failed; giving up (%.1fs elapsed in total)", attempt, request.path, elapsed) def delete_oauth_nonce(request): """Delete the OAuth nonce for the given request from the database. This is to allow the exact same request to be retried. """ _, oauth_request = initialize_server_request(request) if oauth_request is not None: try: consumer_key = oauth_request.get_parameter('oauth_consumer_key') token_key = oauth_request.get_parameter('oauth_token') nonce = oauth_request.get_parameter('oauth_nonce') except OAuthError: # Missing OAuth parameter: skip Nonce deletion. pass else: Nonce.objects.filter( consumer_key=consumer_key, token_key=token_key, key=nonce).delete() def reset_request(request): """Return a pristine new request object. Use this after a transaction failure, before retrying. This is needed so that we don't carry over messages, for example. TODO: this assumes we're using the cookies as a container for messages; we need to clear the session as well. This also resets the input stream. """ wsgi_input = request.environ.get("wsgi.input") if isinstance(wsgi_input, wsgi._InputStream): # This is what we are going to see within Twisted. The wrapped # file supports seeking so this is safe. wsgi_input._wrapped.seek(0) else: # Neither PEP 0333 nor PEP 3333 require that the input stream # supports seeking, but we need it, and it would be better that # this crashed here than continuing on if it's not available. wsgi_input.seek(0) return request.__class__(request.environ) class MAASDjangoTemplateResponse(SimpleTemplateResponse): def __init__(self, response=None): super(MAASDjangoTemplateResponse, self).__init__( '%d.html' % self.status_code) # If we are passed an original response object 'response', # transfer over the content from the original response # for type 200 responses, if such content exists. # Subsequently calling render() on the new object should # not replace the transfered content, while calling render() # on the new object when the original was content-less # will render as a template with the new status code. if response is not None and hasattr(response, 'status_code'): if response.status_code == httplib.OK and hasattr( response, 'content'): self.content = response.content class HttpResponseConflict(MAASDjangoTemplateResponse): status_code = httplib.CONFLICT class WebApplicationHandler(WSGIHandler): """Request handler that retries when there are serialisation failures. :ivar __retry_attempts: The number of times to attempt each request. :ivar __retry_timeout: The number of seconds after which this request will no longer be considered for a retry. :ivar __retry: A weak set containing responses that have been generated as a result of a serialization failure. """ def __init__(self, attempts=10, timeout=90.0): super(WebApplicationHandler, self).__init__() assert attempts >= 1, "The minimum attempts is 1, not %d" % attempts self.__retry_attempts = attempts self.__retry_timeout = timeout self.__retry = WeakSet() def handle_uncaught_exception(self, request, resolver, exc_info): """Override `BaseHandler.handle_uncaught_exception`. If a serialization failure is detected, a retry is requested. It's up to ``get_response`` to actually do the retry. """ upcall = super(WebApplicationHandler, self).handle_uncaught_exception response = upcall(request, resolver, exc_info) # Add it to the retry set if this response was caused by a # serialization failure. exc_type, exc_value, exc_traceback = exc_info if is_serialization_failure(exc_value): self.__retry.add(response) else: # Log the error to the regiond.log. failure = Failure( exc_value=exc_value, exc_type=exc_type, exc_tb=exc_traceback) log.err(failure, _why="500 Error - %s" % request.path) # Return the response regardless. This means that we'll get Django's # error page when there's a persistent serialization failure. return response def make_view_atomic(self, view): """Make `view` atomic and with a post-commit hook savepoint. This view will be executed within a transaction as it is -- that's a core purpose of this class -- so wrapping the view in an extra atomic layer means that it will run within a *savepoint*. This prevents middleware exception handlers that suppress exceptions from inadvertently allowing failed requests to be committed. In addition this also holds a post-commit hook savepoint around the view. If the view crashes those post-commit hooks that were created with this savepoint will be discarded. """ view_atomic = super(WebApplicationHandler, self).make_view_atomic(view) def view_atomic_with_post_commit_savepoint(*args, **kwargs): with post_commit_hooks.savepoint(): return view_atomic(*args, **kwargs) return view_atomic_with_post_commit_savepoint def get_response(self, request): """Override `BaseHandler.get_response`. Wrap Django's default get_response(). Middleware and templates will thus also run within the same transaction, but streaming responses will *not* run within the same transaction, or any transaction at all by default. """ django_get_response = super(WebApplicationHandler, self).get_response def get_response(request): # Up-call to Django's get_response() in a transaction. This # transaction may fail because of a serialization conflict, so # pass errors to handle_uncaught_exception(). try: with post_commit_hooks: with transaction.atomic(): return django_get_response(request) except SystemExit: # Allow sys.exit() to actually exit, reproducing behaviour # found in Django's BaseHandler. raise except: # Catch *everything* else, also reproducing behaviour found in # Django's BaseHandler. In practice, we should only really see # transaction failures here from the outermost atomic block as # all other exceptions are handled by django_get_response. The # setting DEBUG_PROPAGATE_EXCEPTIONS upsets this, so be on # your guard when tempted to use it. signals.got_request_exception.send( sender=self.__class__, request=request) return self.handle_uncaught_exception( request, get_resolver(None), sys.exc_info()) # Attempt to start new transactions for up to `__retry_timeout` # seconds, at intervals defined by `gen_retry_intervals`, but don't # try more than `__retry_attempts` times. retry_intervals = gen_retry_intervals() retry_details = retries(self.__retry_timeout, retry_intervals, clock) retry_attempts = self.__retry_attempts retry_set = self.__retry for attempt in count(1): response = get_response(request) if response in retry_set: elapsed, remaining, wait = next(retry_details) if attempt == retry_attempts or wait == 0: # Time's up: this was the final attempt. log_final_failed_attempt(request, attempt, elapsed) conflict_response = HttpResponseConflict(response) conflict_response.render() return conflict_response # We'll retry after a brief interlude. log_failed_attempt(request, attempt, elapsed, remaining, wait) delete_oauth_nonce(request) request = reset_request(request) sleep(wait) else: return response maas-1.9.5+bzr4599.orig/src/maasserver/utils/tests/__init__.py0000644000000000000000000000000013056115004022213 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/utils/tests/test_async.py0000644000000000000000000002460013056115004022644 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for asynchronous utilities.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from functools import partial from textwrap import dedent import threading from time import time from crochet import wait_for_reactor from maasserver.exceptions import IteratorReusedError from maasserver.testing.orm import PostCommitHooksTestMixin from maasserver.utils import async from maasserver.utils.async import DeferredHooks from maastesting.factory import factory from maastesting.matchers import ( IsFiredDeferred, IsUnfiredDeferred, MockCallsMatch, ) from maastesting.testcase import MAASTestCase from maastesting.twisted import TwistedLoggerFixture from mock import ( call, Mock, sentinel, ) from testtools.deferredruntest import extract_result from testtools.matchers import ( Contains, Equals, HasLength, Is, IsInstance, LessThan, ) from testtools.testcase import ExpectedException from twisted.internet import reactor from twisted.internet.defer import Deferred from twisted.internet.task import deferLater from twisted.python.failure import Failure from twisted.python.threadable import isInIOThread class TestGather(MAASTestCase): def test_gather_nothing(self): time_before = time() results = list(async.gather([], timeout=10)) time_after = time() self.assertThat(results, Equals([])) # gather() should return well within 9 seconds; this shows # that the call is not timing out. self.assertThat(time_after - time_before, LessThan(9)) class TestGatherScenarios(MAASTestCase): scenarios = ( ("synchronous", { # Return the call as-is. "wrap": lambda call: call, }), ("asynchronous", { # Defer the call to a later reactor iteration. "wrap": lambda call: partial(deferLater, reactor, 0, call), }), ) def test_gather_from_calls_without_errors(self): values = [ self.getUniqueInteger(), self.getUniqueString(), ] calls = [ self.wrap(lambda v=value: v) for value in values ] results = list(async.gather(calls)) self.assertItemsEqual(values, results) def test_returns_use_once_iterator(self): calls = [] results = async.gather(calls) self.assertIsInstance(results, async.UseOnceIterator) def test_gather_from_calls_with_errors(self): calls = [ (lambda: sentinel.okay), (lambda: 1 / 0), # ZeroDivisionError ] calls = [self.wrap(call) for call in calls] results = list(async.gather(calls)) self.assertThat(results, Contains(sentinel.okay)) results.remove(sentinel.okay) self.assertThat(results, HasLength(1)) failure = results[0] self.assertThat(failure, IsInstance(Failure)) self.assertThat(failure.type, Is(ZeroDivisionError)) class TestUseOnceIterator(MAASTestCase): def test_returns_correct_items_for_list(self): expected_values = list(range(10)) iterator = async.UseOnceIterator(expected_values) actual_values = [val for val in iterator] self.assertEqual(expected_values, actual_values) def test_raises_stop_iteration(self): iterator = async.UseOnceIterator([]) self.assertRaises(StopIteration, iterator.next) def test_raises_iterator_reused(self): iterator = async.UseOnceIterator([]) # Loop over the iterator to get to the point where we might try # and reuse it. list(iterator) self.assertRaises(IteratorReusedError, iterator.next) class TestDeferredHooks(MAASTestCase, PostCommitHooksTestMixin): def test__is_thread_local(self): dhooks = DeferredHooks() queues = [] for _ in xrange(3): thread = threading.Thread( target=lambda: queues.append(dhooks.hooks)) thread.start() thread.join() self.assertThat(queues, HasLength(3)) # Each queue is distinct (deque is unhashable; use the id() of each). self.assertThat(set(id(q) for q in queues), HasLength(3)) def test__add_appends_Deferred_to_queue(self): dhooks = DeferredHooks() self.assertThat(dhooks.hooks, HasLength(0)) dhooks.add(Deferred()) self.assertThat(dhooks.hooks, HasLength(1)) def test__add_cannot_be_called_in_the_reactor(self): dhooks = DeferredHooks() add_in_reactor = wait_for_reactor(dhooks.add) self.assertRaises(AssertionError, add_in_reactor, Deferred()) def test__fire_calls_hooks(self): dhooks = DeferredHooks() ds = Deferred(), Deferred() for d in ds: dhooks.add(d) dhooks.fire() for d in ds: self.assertIsNone(extract_result(d)) def test__fire_calls_hooks_in_reactor(self): def validate_in_reactor(_): self.assertTrue(isInIOThread()) dhooks = DeferredHooks() d = Deferred() d.addCallback(validate_in_reactor) dhooks.add(d) dhooks.fire() self.assertThat(d, IsFiredDeferred()) def test__fire_propagates_error_from_hook(self): error = factory.make_exception() dhooks = DeferredHooks() d = Deferred() d.addCallback(lambda _: Failure(error)) dhooks.add(d) self.assertRaises(type(error), dhooks.fire) def test__fire_always_consumes_all_hooks(self): dhooks = DeferredHooks() d1, d2 = Deferred(), Deferred() d1.addCallback(lambda _: 0 / 0) # d1 will fail. dhooks.add(d1) dhooks.add(d2) self.assertRaises(ZeroDivisionError, dhooks.fire) self.assertThat(dhooks.hooks, HasLength(0)) self.assertThat(d1, IsFiredDeferred()) self.assertThat(d2, IsFiredDeferred()) def test__reset_cancels_all_hooks(self): canceller = Mock() dhooks = DeferredHooks() d1, d2 = Deferred(canceller), Deferred(canceller) dhooks.add(d1) dhooks.add(d2) dhooks.reset() self.assertThat(dhooks.hooks, HasLength(0)) self.assertThat(canceller, MockCallsMatch(call(d1), call(d2))) def test__reset_cancels_in_reactor(self): def validate_in_reactor(_): self.assertTrue(isInIOThread()) dhooks = DeferredHooks() d = Deferred() d.addBoth(validate_in_reactor) dhooks.add(d) dhooks.reset() self.assertThat(dhooks.hooks, HasLength(0)) self.assertThat(d, IsFiredDeferred()) def test__reset_suppresses_CancelledError(self): logger = self.useFixture(TwistedLoggerFixture()) dhooks = DeferredHooks() d = Deferred() dhooks.add(d) dhooks.reset() self.assertThat(dhooks.hooks, HasLength(0)) self.assertThat(extract_result(d), Is(None)) self.assertEqual("", logger.output) def test__logs_failures_from_cancellers(self): logger = self.useFixture(TwistedLoggerFixture()) canceller = Mock() canceller.side_effect = factory.make_exception() dhooks = DeferredHooks() d = Deferred(canceller) dhooks.add(d) dhooks.reset() self.assertThat(dhooks.hooks, HasLength(0)) # The hook has not been fired, but because the user-supplied canceller # has failed we're not in a position to know what to do. This reflects # a programming error and not a run-time error that we ought to be # prepared for, so it is left as-is. self.assertThat(d, IsUnfiredDeferred()) self.assertDocTestMatches( dedent("""\ Failure when cancelling hook. Traceback (most recent call last): ... maastesting.factory.TestException#... """), logger.output) def test__logs_failures_from_cancellers_when_hook_already_fired(self): logger = self.useFixture(TwistedLoggerFixture()) def canceller(d): d.callback(None) raise factory.make_exception() dhooks = DeferredHooks() d = Deferred(canceller) dhooks.add(d) dhooks.reset() self.assertThat(dhooks.hooks, HasLength(0)) self.assertThat(d, IsFiredDeferred()) self.assertDocTestMatches( dedent("""\ Failure when cancelling hook. Traceback (most recent call last): ... maastesting.factory.TestException#... """), logger.output) def test__logs_failures_from_cancelled_hooks(self): logger = self.useFixture(TwistedLoggerFixture()) error = factory.make_exception() dhooks = DeferredHooks() d = Deferred() d.addBoth(lambda _: Failure(error)) dhooks.add(d) dhooks.reset() self.assertThat(dhooks.hooks, HasLength(0)) self.assertThat(d, IsFiredDeferred()) self.assertDocTestMatches( dedent("""\ Unhandled Error Traceback (most recent call last): ... maastesting.factory.TestException#... """), logger.output) def test__savepoint_saves_and_restores_hooks(self): d = Deferred() dhooks = DeferredHooks() dhooks.add(d) with dhooks.savepoint(): self.expectThat(list(dhooks.hooks), Equals([])) self.expectThat(list(dhooks.hooks), Equals([d])) def test__savepoint_restores_hooks_with_new_hooks_on_clean_exit(self): d1 = Deferred() d2 = Deferred() dhooks = DeferredHooks() dhooks.add(d1) with dhooks.savepoint(): dhooks.add(d2) self.expectThat(list(dhooks.hooks), Equals([d2])) self.expectThat(list(dhooks.hooks), Equals([d1, d2])) def test__savepoint_restores_hooks_only_on_dirty_exit(self): d1 = Deferred() d2 = Deferred() dhooks = DeferredHooks() dhooks.add(d1) exception_type = factory.make_exception_type() with ExpectedException(exception_type): with dhooks.savepoint(): dhooks.add(d2) raise exception_type() self.expectThat(list(dhooks.hooks), Equals([d1])) maas-1.9.5+bzr4599.orig/src/maasserver/utils/tests/test_converters.py0000644000000000000000000001123113056115004023715 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for converters utilities.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from textwrap import dedent from maasserver.utils.converters import ( human_readable_bytes, machine_readable_bytes, round_size_to_nearest_block, XMLToYAML, ) from maastesting.testcase import MAASTestCase class TestXMLToYAML(MAASTestCase): def test_xml_to_yaml_converts_xml(self): # This test is similar to the test above but this one # checks that tags with colons works as expected. xml = """ Some Content """ expected_result = dedent("""\ - list: - lldp:lldp: label: LLDP neighbors - lshw:list: Some Content """) yml = XMLToYAML(xml) self.assertEqual( yml.convert(), expected_result) class TestHumanReadableBytes(MAASTestCase): scenarios = [ ("bytes", dict( size=987, output="987.0", suffix="bytes")), ("KB", dict( size=1000 * 35 + 500, output="35.5", suffix="KB")), ("MB", dict( size=(1000 ** 2) * 28, output="28.0", suffix="MB")), ("GB", dict( size=(1000 ** 3) * 72, output="72.0", suffix="GB")), ("TB", dict( size=(1000 ** 4) * 150, output="150.0", suffix="TB")), ("PB", dict( size=(1000 ** 5), output="1.0", suffix="PB")), ("EB", dict( size=(1000 ** 6), output="1.0", suffix="EB")), ("ZB", dict( size=(1000 ** 7), output="1.0", suffix="ZB")), ("YB", dict( size=(1000 ** 8), output="1.0", suffix="YB")), ] def test__returns_size_with_suffix(self): self.assertEqual( '%s %s' % (self.output, self.suffix), human_readable_bytes(self.size)) def test__returns_size_without_suffix(self): self.assertEqual( self.output, human_readable_bytes(self.size, include_suffix=False)) class TestMachineReadableBytes(MAASTestCase): """Testing the human->machine byte count converter""" def test_suffixes(self): self.assertEqual(machine_readable_bytes('987'), 987) self.assertEqual(machine_readable_bytes('987K'), 987000) self.assertEqual(machine_readable_bytes('987M'), 987000000) self.assertEqual(machine_readable_bytes('987G'), 987000000000) self.assertEqual(machine_readable_bytes('987T'), 987000000000000) self.assertEqual(machine_readable_bytes('987P'), 987000000000000000) self.assertEqual(machine_readable_bytes('987E'), 987000000000000000000) self.assertEqual(machine_readable_bytes('987k'), 987000) self.assertEqual(machine_readable_bytes('987m'), 987000000) self.assertEqual(machine_readable_bytes('987g'), 987000000000) self.assertEqual(machine_readable_bytes('987t'), 987000000000000) self.assertEqual(machine_readable_bytes('987p'), 987000000000000000) self.assertEqual(machine_readable_bytes('987e'), 987000000000000000000) self.assertRaises(ValueError, machine_readable_bytes, '987Z') class TestRoundSizeToNearestBlock(MAASTestCase): def test__round_up_adds_extra_block(self): block_size = 4096 size = block_size + 1 self.assertEquals( 2 * block_size, round_size_to_nearest_block(size, block_size, True), "Should add an extra block to the size.") def test__round_up_doesnt_add_extra_block(self): block_size = 4096 size = block_size self.assertEquals( size, round_size_to_nearest_block(size, block_size, True), "Shouldn't add an extra block to the size.") def test__round_down_removes_block(self): block_size = 4096 size = block_size + 1 self.assertEquals( 1 * block_size, round_size_to_nearest_block(size, block_size, False), "Should remove block from the size.") def test__round_down_doesnt_remove_block(self): block_size = 4096 size = block_size * 2 self.assertEquals( size, round_size_to_nearest_block(size, block_size, False), "Shouldn't remove a block from the size.") maas-1.9.5+bzr4599.orig/src/maasserver/utils/tests/test_dblocks.py0000644000000000000000000003030413056115004023146 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.utils.dblocks`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from contextlib import ( closing, contextmanager, ) from random import randint import sys from django.db import ( connection, transaction, ) from maasserver.testing.dblocks import lock_held_in_other_thread from maasserver.testing.testcase import ( MAASServerTestCase, MAASTransactionServerTestCase, ) from maasserver.utils import dblocks from testtools.matchers import Equals def get_locks(): """Return the set of locks held.""" stmt = "SELECT objid FROM pg_locks WHERE classid = %s" with closing(connection.cursor()) as cursor: cursor.execute(stmt, [dblocks.classid]) return {result[0] for result in cursor.fetchall()} def random_objid(): """Return a 'high' objid that's won't coincide with predeclared locks.""" return randint(2 << 10, 2 << 16) @transaction.atomic def divide_by_zero(): """Do something stupid in a transaction.""" 0 / 0 @contextmanager def use_debug_cursor(): """Set `use_debug_cursor` on Django's default connection.""" use_debug_cursor = connection.use_debug_cursor connection.use_debug_cursor = True try: yield finally: connection.use_debug_cursor = use_debug_cursor def capture_queries_while_holding_lock(lock): """Capture SQL being issued to the database. Return as a single string with each statement separated by new-line "--" new-line. """ with use_debug_cursor(): del connection.queries[:] with lock: pass # Just being here is enough. return "\n--\n".join( query["sql"] for query in connection.queries) class TestDatabaseLock(MAASTransactionServerTestCase): scenarios = tuple( ("%s/%s" % mode, {"mode": mode}) for mode in dblocks.DatabaseLock.MODE_CHOICES ) def make_lock(self, objid): return dblocks.DatabaseLock(objid, mode=self.mode) def tearDown(self): super(TestDatabaseLock, self).tearDown() with closing(connection.cursor()) as cursor: cursor.execute("SELECT pg_advisory_unlock_all()") def test_create_lock(self): objid = random_objid() lock = self.make_lock(objid) self.assertEqual(lock, (dblocks.classid, objid)) def test_properties(self): lock = self.make_lock(random_objid()) self.assertEqual(lock, (lock.classid, lock.objid)) @transaction.atomic def test_lock_actually_locked(self): objid = random_objid() lock = self.make_lock(objid) locks_held_before = get_locks() with lock: locks_held = get_locks() locks_held_after = get_locks() locks_obtained = locks_held - locks_held_before self.assertEqual({objid}, locks_obtained) locks_released = locks_held - locks_held_after self.assertEqual({objid}, locks_released) @transaction.atomic def test_is_locked(self): objid = random_objid() lock = self.make_lock(objid) self.assertFalse(lock.is_locked()) with lock: self.assertTrue(lock.is_locked()) self.assertFalse(lock.is_locked()) def test_lock_remains_held_when_committing_transaction(self): objid = random_objid() lock = self.make_lock(objid) txn = transaction.atomic() self.assertFalse(lock.is_locked()) txn.__enter__() self.assertFalse(lock.is_locked()) lock.__enter__() self.assertTrue(lock.is_locked()) txn.__exit__(None, None, None) self.assertTrue(lock.is_locked()) lock.__exit__(None, None, None) self.assertFalse(lock.is_locked()) def test_lock_remains_held_when_aborting_transaction(self): objid = random_objid() lock = self.make_lock(objid) txn = transaction.atomic() self.assertFalse(lock.is_locked()) txn.__enter__() self.assertFalse(lock.is_locked()) lock.__enter__() self.assertTrue(lock.is_locked()) self.assertRaises(ZeroDivisionError, divide_by_zero) exc_info = sys.exc_info() txn.__exit__(*exc_info) self.assertTrue(lock.is_locked()) lock.__exit__(None, None, None) self.assertFalse(lock.is_locked()) def test_lock_is_held_around_transaction(self): objid = random_objid() lock = self.make_lock(objid) self.assertFalse(lock.is_locked()) with lock: self.assertTrue(lock.is_locked()) with transaction.atomic(): self.assertTrue(lock.is_locked()) self.assertTrue(lock.is_locked()) self.assertFalse(lock.is_locked()) def test_lock_is_held_around_breaking_transaction(self): objid = random_objid() lock = self.make_lock(objid) self.assertFalse(lock.is_locked()) with lock: self.assertTrue(lock.is_locked()) self.assertRaises(ZeroDivisionError, divide_by_zero) self.assertTrue(lock.is_locked()) self.assertFalse(lock.is_locked()) def test_lock_requires_preexisting_connection(self): connection.close() objid = random_objid() lock = self.make_lock(objid) self.assertRaises( dblocks.DatabaseLockAttemptWithoutConnection, lock.__enter__) def test_releasing_lock_fails_when_lock_not_held(self): objid = random_objid() lock = self.make_lock(objid) self.assertRaises(dblocks.DatabaseLockNotHeld, lock.__exit__) def test_repr(self): lock = self.make_lock(random_objid()) self.assertEqual( "" % ( lock[0], lock[1], self.mode[0], self.mode[1]), repr(lock)) class TestDatabaseLockVariations(MAASServerTestCase): def test_plain_variation(self): lock = dblocks.DatabaseLock(random_objid()) self.assertDocTestMatches( """\ SELECT pg_advisory_lock(...) -- SELECT pg_advisory_unlock(...) """, capture_queries_while_holding_lock(lock)) def test_try_variation(self): lock = dblocks.DatabaseLock(random_objid()) self.assertThat(lock.TRY, Equals(lock)) self.assertDocTestMatches( """\ SELECT pg_try_advisory_lock(...) -- SELECT pg_advisory_unlock(...) """, capture_queries_while_holding_lock(lock.TRY)) def test_shared_variation(self): lock = dblocks.DatabaseLock(random_objid()) self.assertThat(lock.SHARED, Equals(lock)) self.assertDocTestMatches( """\ SELECT pg_advisory_lock_shared(...) -- SELECT pg_advisory_unlock_shared(...) """, capture_queries_while_holding_lock(lock.SHARED)) def test_try_shared_variation(self): lock = dblocks.DatabaseLock(random_objid()) self.assertThat(lock.TRY.SHARED, Equals(lock)) self.assertDocTestMatches( """\ SELECT pg_try_advisory_lock_shared(...) -- SELECT pg_advisory_unlock_shared(...) """, capture_queries_while_holding_lock(lock.TRY.SHARED)) class TestDatabaseXactLock(MAASTransactionServerTestCase): scenarios = tuple( ("%s/%s" % mode, {"mode": mode}) for mode in dblocks.DatabaseXactLock.MODE_CHOICES ) def make_lock(self, objid): return dblocks.DatabaseXactLock(objid, mode=self.mode) def test_create_lock(self): objid = random_objid() lock = self.make_lock(objid) self.assertEqual(lock, (dblocks.classid, objid)) def test_properties(self): lock = self.make_lock(random_objid()) self.assertEqual(lock, (lock.classid, lock.objid)) def test_lock_actually_locked(self): objid = random_objid() lock = self.make_lock(objid) with transaction.atomic(): locks_held_before = get_locks() with lock: locks_held = get_locks() locks_held_after = get_locks() locks_held_after_txn = get_locks() locks_obtained = locks_held - locks_held_before self.assertEqual({objid}, locks_obtained) locks_released = locks_held - locks_held_after self.assertEqual(set(), locks_released) locks_released_with_txn = locks_held - locks_held_after_txn self.assertEqual({objid}, locks_released_with_txn) def test_is_locked(self): objid = random_objid() lock = self.make_lock(objid) with transaction.atomic(): self.assertFalse(lock.is_locked()) with lock: self.assertTrue(lock.is_locked()) self.assertTrue(lock.is_locked()) # The lock is released with the end of the transaction. self.assertFalse(lock.is_locked()) def test_obtaining_lock_fails_when_outside_of_transaction(self): objid = random_objid() lock = self.make_lock(objid) self.assertRaises( dblocks.DatabaseLockAttemptOutsideTransaction, lock.__enter__) def test_releasing_lock_does_nothing(self): objid = random_objid() lock = self.make_lock(objid) self.assertIsNone(lock.__exit__()) def test_repr(self): lock = self.make_lock(random_objid()) self.assertEqual( "" % ( lock[0], lock[1], self.mode[0], self.mode[1]), repr(lock)) class TestDatabaseXactLockVariations(MAASServerTestCase): def test_plain_variation(self): lock = dblocks.DatabaseXactLock(random_objid()) self.assertDocTestMatches( "SELECT pg_advisory_xact_lock(...)", capture_queries_while_holding_lock(lock)) def test_try_variation(self): lock = dblocks.DatabaseXactLock(random_objid()) self.assertThat(lock.TRY, Equals(lock)) self.assertDocTestMatches( "SELECT pg_try_advisory_xact_lock(...)", capture_queries_while_holding_lock(lock.TRY)) def test_shared_variation(self): lock = dblocks.DatabaseXactLock(random_objid()) self.assertThat(lock.SHARED, Equals(lock)) self.assertDocTestMatches( "SELECT pg_advisory_xact_lock_shared(...)", capture_queries_while_holding_lock(lock.SHARED)) def test_try_shared_variation(self): lock = dblocks.DatabaseXactLock(random_objid()) self.assertThat(lock.TRY.SHARED, Equals(lock)) self.assertDocTestMatches( "SELECT pg_try_advisory_xact_lock_shared(...)", capture_queries_while_holding_lock(lock.TRY.SHARED)) class TestTryingToAcquireLockedLock(MAASServerTestCase): """Test what happens when trying to acquire a lock that's already taken.""" scenarios = ( ("DatabaseLock", dict(make_lock=dblocks.DatabaseLock)), ("DatabaseXactLock", dict(make_lock=dblocks.DatabaseXactLock)), ) def test_try_variation_when_already_exclusively_locked(self): lock = self.make_lock(random_objid()) with lock_held_in_other_thread(lock): self.assertRaises( dblocks.DatabaseLockNotHeld, lock.TRY.__enter__) def test_try_variation_when_already_share_locked(self): lock = self.make_lock(random_objid()) with lock_held_in_other_thread(lock.SHARED): self.assertRaises( dblocks.DatabaseLockNotHeld, lock.TRY.__enter__) def test_try_shared_variation_when_already_exclusively_locked(self): lock = self.make_lock(random_objid()) with lock_held_in_other_thread(lock): self.assertRaises( dblocks.DatabaseLockNotHeld, lock.TRY.SHARED.__enter__) def test_try_shared_variation_when_already_share_locked(self): lock = self.make_lock(random_objid()) with lock_held_in_other_thread(lock.SHARED): with lock.SHARED: pass # No exception. maas-1.9.5+bzr4599.orig/src/maasserver/utils/tests/test_dbtasks.py0000644000000000000000000002442313056115004023165 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.utils.dbtasks`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random import threading from crochet import wait_for_reactor from maasserver.testing.testcase import MAASTransactionServerTestCase from maasserver.utils.dbtasks import ( DatabaseTaskAlreadyRunning, DatabaseTasksService, ) from maasserver.utils.orm import transactional from maastesting.factory import factory from maastesting.testcase import MAASTestCase from maastesting.twisted import TwistedLoggerFixture from mock import sentinel from testtools.matchers import ( Equals, HasLength, Is, IsInstance, MatchesAll, MatchesAny, MatchesStructure, Not, ) from twisted.internet import reactor from twisted.internet.defer import ( Deferred, DeferredQueue, inlineCallbacks, QueueOverflow, ) noop = lambda: None class TestDatabaseTaskService(MAASTestCase): """Tests for `DatabaseTasksService`.""" def test__init(self): limit = random.randint(1, 1000) service = DatabaseTasksService(limit) self.assertThat(service, MatchesStructure( # Our requested limit is saved. limit=Equals(limit), # The queue does not permit anything to go in it. queue=MatchesAll( IsInstance(DeferredQueue), MatchesStructure.byEquality(size=0, backlog=1), first_only=True, ), )) def test__init_default_limit(self): service = DatabaseTasksService() self.assertThat(service.limit, Equals(100)) def test__cannot_add_task_to_unstarted_service(self): service = DatabaseTasksService() self.assertRaises(QueueOverflow, service.addTask, noop) def test__cannot_add_task_to_stopped_service(self): service = DatabaseTasksService() service.startService() service.stopService() self.assertRaises(QueueOverflow, service.addTask, noop) def test__cannot_add_task_when_queue_is_full(self): service = DatabaseTasksService(0) service.startService() try: event = threading.Event() service.addTask(event.wait) try: self.assertRaises( QueueOverflow, service.addTask, noop) finally: event.set() finally: service.stopService() def test__cannot_defer_task_when_queue_is_full(self): service = DatabaseTasksService(0) service.startService() try: event = threading.Event() service.addTask(event.wait) try: self.assertRaises( QueueOverflow, lambda: service.deferTask(noop).wait()) finally: event.set() finally: service.stopService() def test__cannot_sync_task_when_queue_is_full(self): service = DatabaseTasksService(0) service.startService() try: event = threading.Event() service.addTask(event.wait) try: self.assertRaises( QueueOverflow, lambda: service.syncTask().wait()) finally: event.set() finally: service.stopService() def test__startup_creates_queue_with_previously_defined_limit(self): limit = random.randint(1, 1000) service = DatabaseTasksService(limit) service.startService() try: self.assertThat(service, MatchesStructure( # The queue has been set to the `limit` size, and only one # thing is allowed to wait on the queue. queue=MatchesAll( IsInstance(DeferredQueue), MatchesStructure.byEquality(size=limit, backlog=1), first_only=True, ), )) finally: service.stopService() def test__task_is_executed_in_other_thread(self): get_thread_ident = lambda: threading.currentThread().ident service = DatabaseTasksService() service.startService() try: ident_from_task = service.deferTask(get_thread_ident).wait() ident_from_here = get_thread_ident() self.expectThat(ident_from_task, IsInstance(long, int)) self.expectThat(ident_from_task, Not(Equals(ident_from_here))) finally: service.stopService() def test__arguments_are_passed_through_to_task(self): def return_args(*args, **kwargs): return sentinel.here, args, kwargs service = DatabaseTasksService() service.startService() try: result = service.deferTask( return_args, sentinel.arg, kw=sentinel.kw).wait() self.assertThat(result, Equals( (sentinel.here, (sentinel.arg,), {"kw": sentinel.kw}))) finally: service.stopService() def test__tasks_are_all_run_before_shutdown_completes(self): service = DatabaseTasksService() service.startService() try: queue = service.queue event = threading.Event() count = random.randint(20, 40) for _ in xrange(count): service.addTask(event.wait) # The queue has `count` tasks (or `count - 1` tasks; the first may # have already been pulled off the queue) still pending. self.assertThat(queue.pending, MatchesAny( HasLength(count), HasLength(count - 1))) finally: event.set() service.stopService() # The queue is empty and nothing is waiting. self.assertThat( queue, MatchesStructure.byEquality( waiting=[], pending=[])) @wait_for_reactor @inlineCallbacks def test__deferred_task_can_be_cancelled_when_enqueued(self): things = [] # This will NOT be populated by tasks. service = DatabaseTasksService() yield service.startService() try: event = threading.Event() service.deferTask(event.wait) service.deferTask(things.append, 1).cancel() finally: event.set() yield service.stopService() self.assertThat(things, Equals([])) @wait_for_reactor @inlineCallbacks def test__deferred_task_cannot_be_cancelled_when_running(self): # DatabaseTaskAlreadyRunning is raised when attempting to cancel a # database task that's already running. service = DatabaseTasksService() yield service.startService() try: ready = Deferred() d = service.deferTask(reactor.callFromThread, ready.callback, None) # Wait for the task to begin running. yield ready # We have the reactor thread. Even if the task completes its # status will not be updated until the reactor's next iteration. self.assertRaises(DatabaseTaskAlreadyRunning, d.cancel) finally: yield service.stopService() @wait_for_reactor @inlineCallbacks def test__sync_task_can_be_cancelled_when_enqueued(self): things = [] # This will NOT be populated by tasks. service = DatabaseTasksService() yield service.startService() try: event = threading.Event() service.deferTask(event.wait) service.syncTask().cancel() finally: event.set() yield service.stopService() self.assertThat(things, Equals([])) def test__sync_task_fires_with_service(self): service = DatabaseTasksService() service.startService() try: self.assertThat(service.syncTask().wait(), Is(service)) finally: service.stopService() def test__failure_in_deferred_task_does_not_crash_service(self): things = [] # This will be populated by tasks. exception_type = factory.make_exception_type() def be_bad(): raise exception_type("I'm being very naughty.") service = DatabaseTasksService() service.startService() try: service.deferTask(things.append, 1).wait() self.assertRaises(exception_type, service.deferTask(be_bad).wait) service.deferTask(things.append, 2).wait() finally: service.stopService() self.assertThat(things, Equals([1, 2])) def test__failure_in_added_task_does_not_crash_service(self): things = [] # This will be populated by tasks. exception_type = factory.make_exception_type() def be_bad(): raise exception_type("I'm bad, so bad.") service = DatabaseTasksService() service.startService() try: service.addTask(things.append, 1) service.addTask(be_bad) service.addTask(things.append, 2) finally: service.stopService() self.assertThat(things, Equals([1, 2])) def test__failure_in_task_is_logged(self): logger = self.useFixture(TwistedLoggerFixture()) service = DatabaseTasksService() service.startService() try: service.addTask(lambda: 0 / 0) finally: service.stopService() self.assertDocTestMatches( """\ Unhandled failure in database task. Traceback (most recent call last): ... exceptions.ZeroDivisionError: ... """, logger.output) class TestDatabaseTaskServiceWithActualDatabase(MAASTransactionServerTestCase): """Tests for `DatabaseTasksService` with the databse.""" def test__task_can_access_database_from_other_thread(self): @transactional def database_task(): # Merely being here means we've accessed the database. return sentinel.beenhere service = DatabaseTasksService() service.startService() try: result = service.deferTask(database_task).wait() self.assertThat(result, Is(sentinel.beenhere)) finally: service.stopService() maas-1.9.5+bzr4599.orig/src/maasserver/utils/tests/test_dns.py0000644000000000000000000001357513056115004022324 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test DNS utilities.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) from math import pow from django.core.exceptions import ValidationError from maasserver.utils.dns import ( get_ip_based_hostname, validate_domain_name, validate_hostname, ) from testtools.matchers import ( Equals, HasLength, ) str = None __metaclass__ = type __all__ = [] from maastesting.factory import factory from maastesting.testcase import MAASTestCase class TestHostnameValidator(MAASTestCase): """Tests for the validation of hostnames. Specifications based on: http://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names This does not support Internationalized Domain Names. To do so, we'd have to accept and store unicode, but use the Punycode-encoded version. The validator would have to validate both versions: the unicode input for invalid characters, and the encoded version for length. """ def make_maximum_hostname(self): """Create a hostname of the maximum permitted length. The maximum permitted length is 255 characters. The last label in the hostname will not be of the maximum length, so tests can still append a character to it without creating an invalid label. The hostname is not randomised, so do not count on it being unique. """ # A hostname may contain any number of labels, separated by dots. # Each of the labels has a maximum length of 63 characters, so this has # to be built up from multiple labels. ten_chars = ('a' * 9) + '.' hostname = ten_chars * 25 + ('b' * 5) self.assertThat(hostname, HasLength(255)) return hostname def assertAccepts(self, hostname): """Assertion: the validator accepts `hostname`.""" try: validate_hostname(hostname) except ValidationError as e: raise AssertionError(unicode(e)) def assertRejects(self, hostname): """Assertion: the validator rejects `hostname`.""" self.assertRaises(ValidationError, validate_hostname, hostname) def assertDomainValidatorAccepts(self, domain_name): """Assertion: the validator rejects `domain_name`.""" try: validate_domain_name(domain_name) except ValidationError as e: raise AssertionError(unicode(e)) def assertDomainValidatorRejects(self, hostname): """Assertion: the validator rejects `hostname`.""" self.assertRaises(ValidationError, validate_domain_name, hostname) def test_accepts_ascii_letters(self): self.assertAccepts('abcde') def test_accepts_dots(self): self.assertAccepts('abc.def') def test_accepts_subdomain(self): self.assertAccepts('abc.def.ubuntu.com') def test_rejects_adjacent_dots(self): self.assertRejects('abc..def') def test_rejects_leading_dot(self): self.assertRejects('.abc') def test_rejects_trailing_dot(self): self.assertRejects('abc.') def test_accepts_ascii_digits(self): self.assertAccepts('abc123') def test_accepts_leading_digits(self): # Leading digits used to be forbidden, but are now allowed. self.assertAccepts('123abc') def test_rejects_whitespace(self): self.assertRejects('a b') self.assertRejects('a\nb') self.assertRejects('a\tb') def test_rejects_other_ascii_characters(self): self.assertRejects('a?b') self.assertRejects('a!b') self.assertRejects('a,b') self.assertRejects('a:b') self.assertRejects('a;b') self.assertRejects('a+b') self.assertRejects('a=b') def test_accepts_underscore_in_domain(self): self.assertAccepts('host.local_domain') def test_rejects_underscore_in_host(self): self.assertRejects('host_name.local') def test_accepts_hyphen(self): self.assertAccepts('a-b') def test_rejects_hyphen_at_start_of_label(self): self.assertRejects('-ab') def test_rejects_hyphen_at_end_of_label(self): self.assertRejects('ab-') def test_accepts_maximum_valid_length(self): self.assertAccepts(self.make_maximum_hostname()) def test_rejects_oversized_hostname(self): self.assertRejects(self.make_maximum_hostname() + 'x') def test_accepts_maximum_label_length(self): self.assertAccepts('a' * 63) def test_rejects_oversized_label(self): self.assertRejects('b' * 64) def test_rejects_nonascii_letter(self): # The \u03be is the Greek letter xi. Perfectly good letter, just not # ASCII. self.assertRejects('\u03be') def test_accepts_domain_underscores(self): self.assertDomainValidatorAccepts('_foo') self.assertDomainValidatorAccepts('_foo._bar') self.assertDomainValidatorAccepts('_.o_O._') class TestIpBasedHostnameGenerator(MAASTestCase): def test_ipv4_numeric(self): self.expectThat( get_ip_based_hostname(2130706433), Equals("127-0-0-1")) self.expectThat( get_ip_based_hostname(int(pow(2, 32) - 1)), Equals("255-255-255-255")) def test_ipv4_text(self): ipv4 = factory.make_ipv4_address() self.expectThat( get_ip_based_hostname(ipv4), Equals(ipv4.replace('.', '-'))) self.expectThat( get_ip_based_hostname("172.16.0.1"), Equals("172-16-0-1")) def test_ipv6_text(self): ipv4 = factory.make_ipv6_address() self.expectThat( get_ip_based_hostname(ipv4), Equals(ipv4.replace(':', '-'))) self.expectThat( get_ip_based_hostname("2001:67c:1562::15"), Equals("2001-67c-1562--15")) maas-1.9.5+bzr4599.orig/src/maasserver/utils/tests/test_forms.py0000644000000000000000000000162613056115004022660 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for forms helpers.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.testing.factory import factory from maasserver.utils.forms import compose_invalid_choice_text from maastesting.testcase import MAASTestCase from testtools.matchers import ContainsAll class TestComposeInvalidChoiceText(MAASTestCase): def test_map_enum_includes_all_enum_values(self): choices = [ (factory.make_name('key'), factory.make_name('value')) for _ in range(2)] msg = compose_invalid_choice_text(factory.make_name(), choices) self.assertThat( msg, ContainsAll(["'%s'" % key for key, val in choices])) maas-1.9.5+bzr4599.orig/src/maasserver/utils/tests/test_interfaces.py0000644000000000000000000000764613056115004023665 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for network/cluster interface helpers.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from random import randint from maasserver.utils.interfaces import ( get_name_and_vlan_from_cluster_interface, make_name_from_interface, ) from maastesting.factory import factory from maastesting.testcase import MAASTestCase class TestMakeNameFromInterface(MAASTestCase): """Tests for `make_name_from_interface`.""" def test__passes_name_unchanged(self): name = factory.make_name('itf9:2') self.assertEqual(name, make_name_from_interface(name)) def test__escapes_weird_characters(self): self.assertEqual('x--y', make_name_from_interface('x?y')) self.assertEqual('x--y', make_name_from_interface('x y')) def test__makes_up_name_if_no_interface_given(self): self.assertNotIn(make_name_from_interface(None), (None, '')) self.assertNotIn(make_name_from_interface(''), (None, '')) def test__makes_up_unique_name_if_no_interface_given(self): self.assertNotEqual( make_name_from_interface(''), make_name_from_interface('')) class TestGetNameAndVlanFromClusterInterface(MAASTestCase): """Tests for `get_name_and_vlan_from_cluster_interface`.""" def make_interface(self): """Return a simple network interface name.""" return 'eth%d' % randint(0, 99) def test_returns_simple_name_unaltered(self): cluster = factory.make_name('cluster') interface = factory.make_name('iface') expected_name = '%s-%s' % (cluster, interface) self.assertEqual( (expected_name, None), get_name_and_vlan_from_cluster_interface(cluster, interface)) def test_substitutes_colon(self): cluster = factory.make_name('cluster') base_interface = self.make_interface() alias = randint(0, 99) interface = '%s:%d' % (base_interface, alias) expected_name = '%s-%s-%d' % (cluster, base_interface, alias) self.assertEqual( (expected_name, None), get_name_and_vlan_from_cluster_interface(cluster, interface)) def test_returns_with_vlan_tag(self): cluster = factory.make_name('cluster') base_interface = self.make_interface() vlan_tag = factory.make_vlan_tag() interface = '%s.%d' % (base_interface, vlan_tag) expected_name = '%s-%s-%d' % (cluster, base_interface, vlan_tag) self.assertEqual( (expected_name, '%d' % vlan_tag), get_name_and_vlan_from_cluster_interface(cluster, interface)) def test_returns_name_with_alias_and_vlan_tag(self): cluster = factory.make_name('cluster') base_interface = self.make_interface() vlan_tag = factory.make_vlan_tag() alias = randint(0, 99) interface = '%s:%d.%d' % (base_interface, alias, vlan_tag) expected_name = '%s-%s-%d-%d' % ( cluster, base_interface, alias, vlan_tag, ) self.assertEqual( (expected_name, '%d' % vlan_tag), get_name_and_vlan_from_cluster_interface(cluster, interface)) def test_returns_name_with_vlan_tag_and_alias(self): cluster = factory.make_name('cluster') base_interface = self.make_interface() vlan_tag = factory.make_vlan_tag() alias = randint(0, 99) interface = '%s.%d:%d' % (base_interface, vlan_tag, alias) expected_name = '%s-%s-%d-%d' % ( cluster, base_interface, vlan_tag, alias, ) self.assertEqual( (expected_name, '%d' % vlan_tag), get_name_and_vlan_from_cluster_interface(cluster, interface)) maas-1.9.5+bzr4599.orig/src/maasserver/utils/tests/test_jsenums.py0000644000000000000000000000333513056115004023215 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.utils.jsenums`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from inspect import getsourcefile from maasserver.utils.jsenums import ( dump, footer, get_enums, header, serialize_enum, ) from maastesting.testcase import MAASTestCase from provisioningserver.utils.enum import map_enum class ENUM: ALICE = 1 BOB = 2 class TestFunctions(MAASTestCase): def test_serialize_enum(self): # The name is used correctly, the keys are sorted, and everything is # indented correctly. self.assertEqual( 'module.ENUM = {\n' ' "ALICE": 1,\n' ' "BOB": 2\n' '};\n', serialize_enum(ENUM)) def test_get_enums(self): # This file contains a single enum, named "ENUM". enums = get_enums(getsourcefile(TestFunctions)) self.assertEqual(["ENUM"], [enum.__name__ for enum in enums]) [enum] = enums # Because the module has been executed in a different namespace, the # enum we've found is not the same object as the one in the current # global namespace. self.assertIsNot(ENUM, enum) # It does, however, have the same values. self.assertEqual(map_enum(ENUM), map_enum(enum)) def test_dump(self): self.assertEqual(header + "\n" + footer, dump([])) self.assertEqual( header + "\n" + serialize_enum(ENUM) + "\n" + footer, dump([getsourcefile(TestFunctions)])) maas-1.9.5+bzr4599.orig/src/maasserver/utils/tests/test_mac.py0000644000000000000000000000147513056115004022274 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test MAC utilities.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.utils.mac import get_vendor_for_mac from maastesting.testcase import MAASTestCase class TestGetVendorForMac(MAASTestCase): def test_get_vendor_for_mac_returns_vendor(self): self.assertEqual( "ELITEGROUP COMPUTER SYSTEMS CO., LTD.", get_vendor_for_mac('ec:a8:6b:fd:ae:3f')) def test_get_vendor_for_mac_returns_error_message_if_unknown_mac(self): self.assertEqual( "Unknown Vendor", get_vendor_for_mac('aa:bb:cc:dd:ee:ff')) maas-1.9.5+bzr4599.orig/src/maasserver/utils/tests/test_orm.py0000644000000000000000000010414413056115004022326 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test ORM utilities.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from itertools import ( islice, repeat, ) from random import randint import time from django.core.exceptions import MultipleObjectsReturned from django.db import ( connection, connections, transaction, ) from django.db.backends import BaseDatabaseWrapper from django.db.transaction import TransactionManagementError from django.db.utils import OperationalError from maasserver.fields import MAC from maasserver.models import Node from maasserver.testing.testcase import ( MAASServerTestCase, SerializationFailureTestCase, ) from maasserver.utils import orm from maasserver.utils.orm import ( disable_all_database_connections, DisabledDatabaseConnection, enable_all_database_connections, ExclusivelyConnected, FullyConnected, get_first, get_model_object_name, get_one, get_psycopg2_exception, get_psycopg2_serialization_exception, in_transaction, is_serialization_failure, macs_contain, macs_do_not_contain, make_serialization_failure, post_commit, post_commit_do, post_commit_hooks, psql_array, request_transaction_retry, retry_on_serialization_failure, savepoint, TotallyDisconnected, validate_in_transaction, ) from maastesting.djangotestcase import DjangoTransactionTestCase from maastesting.doubles import StubContext from maastesting.factory import factory from maastesting.matchers import ( HasLength, IsFiredDeferred, LessThanOrEqual, MockCalledOnceWith, MockCallsMatch, MockNotCalled, ) from maastesting.testcase import MAASTestCase from mock import ( ANY, call, Mock, sentinel, ) from provisioningserver.utils.twisted import ( callOut, DeferredValue, ) import psycopg2 from psycopg2.errorcodes import SERIALIZATION_FAILURE from testtools import ExpectedException from testtools.deferredruntest import extract_result from testtools.matchers import ( AllMatch, Equals, Is, IsInstance, MatchesPredicate, Not, ) from twisted.internet.defer import ( CancelledError, Deferred, passthru, ) from twisted.python.failure import Failure def setUp(): # Prevent real sleeps. orm.sleep = lambda _: None def tearDown(): # Re-enable real sleeps. orm.sleep = time.sleep class FakeModel: class MultipleObjectsReturned(MultipleObjectsReturned): pass def __init__(self, name): self.name == name def __repr__(self): return self.name class FakeQueryResult: """Something that looks, to `get_one`, close enough to a Django model.""" def __init__(self, model, items): self.model = model self.items = items def __iter__(self): return self.items.__iter__() def __repr__(self): return "" % self.items class TestGetOne(MAASTestCase): def test_get_one_returns_None_for_empty_list(self): self.assertIsNone(get_one([])) def test_get_one_returns_single_list_item(self): item = factory.make_string() self.assertEqual(item, get_one([item])) def test_get_one_returns_None_from_any_empty_sequence(self): self.assertIsNone(get_one("no item" for counter in range(0))) def test_get_one_returns_item_from_any_sequence_of_length_one(self): item = factory.make_string() self.assertEqual(item, get_one(item for counter in range(1))) def test_get_one_does_not_trigger_database_counting(self): # Avoid typical performance pitfall of querying objects *and* # the number of objects. item = factory.make_string() sequence = FakeQueryResult(type(item), [item]) sequence.__len__ = Mock(side_effect=Exception("len() was called")) self.assertEqual(item, get_one(sequence)) def test_get_one_does_not_iterate_long_sequence_indefinitely(self): # Avoid typical performance pitfall of retrieving all objects. # In rare failure cases, there may be large numbers. Fail fast. class InfinityException(Exception): """Iteration went on indefinitely.""" def infinite_sequence(): """Generator: count to infinity (more or less), then fail.""" for counter in range(3): yield counter raise InfinityException() # Raises MultipleObjectsReturned as spec'ed. It does not # iterate to infinity first! self.assertRaises( MultipleObjectsReturned, get_one, infinite_sequence()) def test_get_one_raises_model_error_if_query_result_is_too_big(self): self.assertRaises( FakeModel.MultipleObjectsReturned, get_one, FakeQueryResult(FakeModel, range(2))) def test_get_one_raises_generic_error_if_other_sequence_is_too_big(self): self.assertRaises(MultipleObjectsReturned, get_one, range(2)) class TestGetFirst(MAASTestCase): def test_get_first_returns_None_for_empty_list(self): self.assertIsNone(get_first([])) def test_get_first_returns_first_item(self): items = [factory.make_string() for counter in range(10)] self.assertEqual(items[0], get_first(items)) def test_get_first_accepts_any_sequence(self): item = factory.make_string() self.assertEqual(item, get_first(repeat(item))) def test_get_first_does_not_retrieve_beyond_first_item(self): class SecondItemRetrieved(Exception): """Second item as retrieved. It shouldn't be.""" def multiple_items(): yield "Item 1" raise SecondItemRetrieved() self.assertEqual("Item 1", get_first(multiple_items())) class TestGetPredicateUtilities(MAASTestCase): def test_macs_contain_returns_predicate(self): macs = ['11:22:33:44:55:66', 'aa:bb:cc:dd:ee:ff'] where, params = macs_contain('key', macs) self.assertEqual( (where, params), ('key @> ARRAY[%s, %s]::macaddr[]', macs)) def test_macs_contain_returns_predicate_using_MACs(self): macs = [MAC('11:22:33:44:55:66')] where, params = macs_contain('key', macs) self.assertEqual( (where, params), ('key @> ARRAY[%s]::macaddr[]', macs)) def test_macs_do_not_contain_returns_predicate(self): macs = ['11:22:33:44:55:66', 'aa:bb:cc:dd:ee:ff'] where, params = macs_do_not_contain('key', macs) self.assertEqual( (where, params), ( ( '((key IS NULL) OR NOT ' '(key @> ARRAY[%s]::macaddr[] OR ' 'key @> ARRAY[%s]::macaddr[]))' ), macs, )) class TestSerializationFailure(SerializationFailureTestCase): """Detecting SERIALIZABLE isolation failures.""" def test_serialization_failure_detectable_via_error_cause(self): error = self.assertRaises( OperationalError, self.cause_serialization_failure) self.assertEqual( SERIALIZATION_FAILURE, error.__cause__.pgcode) class TestGetPsycopg2Exception(MAASTestCase): """Tests for `get_psycopg2_exception`.""" def test__returns_psycopg2_error(self): exception = psycopg2.Error() self.assertIs(exception, get_psycopg2_exception(exception)) def test__returns_None_for_other_error(self): exception = factory.make_exception() self.assertIsNone(get_psycopg2_serialization_exception(exception)) def test__returns_psycopg2_error_root_cause(self): exception = Exception() exception.__cause__ = orm.SerializationFailure() self.assertIs(exception.__cause__, get_psycopg2_exception(exception)) class TestGetPsycopg2SerializationException(MAASTestCase): """Tests for `get_psycopg2_serialization_exception`.""" def test__returns_None_for_plain_psycopg2_error(self): exception = psycopg2.Error() self.assertIsNone(get_psycopg2_serialization_exception(exception)) def test__returns_None_for_other_error(self): exception = factory.make_exception() self.assertIsNone(get_psycopg2_serialization_exception(exception)) def test__returns_psycopg2_error_root_cause(self): exception = Exception() exception.__cause__ = orm.SerializationFailure() self.assertIs( exception.__cause__, get_psycopg2_serialization_exception(exception)) class TestIsSerializationFailure(SerializationFailureTestCase): """Tests relating to MAAS's use of SERIALIZABLE isolation.""" def test_detects_operational_error_with_matching_cause(self): error = self.assertRaises( OperationalError, self.cause_serialization_failure) self.assertTrue(is_serialization_failure(error)) def test_rejects_operational_error_without_matching_cause(self): error = OperationalError() cause = self.patch(error, "__cause__") cause.pgcode = factory.make_name("pgcode") self.assertFalse(is_serialization_failure(error)) def test_rejects_operational_error_with_unrelated_cause(self): error = OperationalError() error.__cause__ = object() self.assertFalse(is_serialization_failure(error)) def test_rejects_operational_error_without_cause(self): error = OperationalError() self.assertFalse(is_serialization_failure(error)) def test_rejects_non_operational_error_with_matching_cause(self): error = factory.make_exception() cause = self.patch(error, "__cause__") cause.pgcode = SERIALIZATION_FAILURE self.assertFalse(is_serialization_failure(error)) class TestRetryOnSerializationFailure(SerializationFailureTestCase): def make_mock_function(self): function_name = factory.make_name("function") function = Mock(__name__=function_name.encode("ascii")) return function def test_retries_on_serialization_failure(self): function = self.make_mock_function() function.side_effect = self.cause_serialization_failure function_wrapped = retry_on_serialization_failure(function) self.assertRaises(OperationalError, function_wrapped) expected_calls = [call()] * 10 self.assertThat(function, MockCallsMatch(*expected_calls)) def test_retries_on_serialization_failure_until_successful(self): serialization_error = self.assertRaises( OperationalError, self.cause_serialization_failure) function = self.make_mock_function() function.side_effect = [serialization_error, sentinel.result] function_wrapped = retry_on_serialization_failure(function) self.assertEqual(sentinel.result, function_wrapped()) self.assertThat(function, MockCallsMatch(call(), call())) def test_passes_args_to_wrapped_function(self): function = lambda a, b: (a, b) function_wrapped = retry_on_serialization_failure(function) self.assertEqual( (sentinel.a, sentinel.b), function_wrapped(sentinel.a, b=sentinel.b)) def test_calls_reset_between_retries(self): reset = Mock() function = self.make_mock_function() function.side_effect = self.cause_serialization_failure function_wrapped = retry_on_serialization_failure(function, reset) self.assertRaises(OperationalError, function_wrapped) expected_function_calls = [call()] * 10 self.expectThat(function, MockCallsMatch(*expected_function_calls)) # There's one fewer reset than calls to the function. expected_reset_calls = expected_function_calls[:-1] self.expectThat(reset, MockCallsMatch(*expected_reset_calls)) def test_does_not_call_reset_before_first_attempt(self): reset = Mock() function = self.make_mock_function() function.return_value = sentinel.all_is_okay function_wrapped = retry_on_serialization_failure(function, reset) function_wrapped() self.assertThat(reset, MockNotCalled()) class TestMakeSerializationFailure(MAASTestCase): """Tests for `make_serialization_failure`.""" def test__makes_a_serialization_failure(self): exception = make_serialization_failure() self.assertThat(exception, MatchesPredicate( is_serialization_failure, "%r is not a serialization failure.")) class TestRequestTransactionRetry(MAASTestCase): """Tests for `request_transaction_retry`.""" def test__raises_a_serialization_failure(self): exception = self.assertRaises( OperationalError, request_transaction_retry) self.assertThat(exception, MatchesPredicate( is_serialization_failure, "%r is not a serialization failure.")) class TestGenRetryIntervals(MAASTestCase): """Tests for `orm.gen_retry_intervals`.""" def remove_jitter(self): # Remove the effect of randomness. full_jitter = self.patch(orm, "full_jitter") full_jitter.side_effect = lambda thing: thing def test__unjittered_series_begins(self): self.remove_jitter() # Get the first 10 intervals, without jitter. intervals = islice(orm.gen_retry_intervals(), 10) # Convert from seconds to milliseconds, and round. intervals = [int(interval * 1000) for interval in intervals] # They start off small, but grow rapidly to the maximum. self.assertThat(intervals, Equals( [25, 62, 156, 390, 976, 2441, 6103, 10000, 10000, 10000])) def test__pulls_from_exponential_series_until_maximum_is_reached(self): self.remove_jitter() # repeat() is the tail-end of the interval series. repeat = self.patch(orm, "repeat") repeat.return_value = [sentinel.end] maximum = randint(10, 100) intervals = list(orm.gen_retry_intervals(maximum=maximum)) self.assertThat(intervals[-1], Is(sentinel.end)) self.assertThat(intervals[:-1], AllMatch(LessThanOrEqual(maximum))) class TestPostCommitHooks(MAASTestCase): """Tests for the `post_commit_hooks` singleton.""" def test__crashes_on_enter_if_hooks_exist(self): hook = Deferred() post_commit_hooks.add(hook) with ExpectedException(TransactionManagementError): with post_commit_hooks: pass # The hook has been cancelled, but CancelledError is suppressed in # hooks, so we don't see it here. self.assertThat(hook, IsFiredDeferred()) # The hook list is cleared so that the exception is raised only once. self.assertThat(post_commit_hooks.hooks, HasLength(0)) def test__fires_hooks_on_exit_if_no_exception(self): self.addCleanup(post_commit_hooks.reset) hooks_fire = self.patch_autospec(post_commit_hooks, "fire") with post_commit_hooks: post_commit_hooks.add(Deferred()) # Hooks are fired. self.assertThat(hooks_fire, MockCalledOnceWith()) def test__resets_hooks_on_exit_if_exception(self): self.addCleanup(post_commit_hooks.reset) hooks_fire = self.patch_autospec(post_commit_hooks, "fire") hooks_reset = self.patch_autospec(post_commit_hooks, "reset") exception_type = factory.make_exception_type() with ExpectedException(exception_type): with post_commit_hooks: post_commit_hooks.add(Deferred()) raise exception_type() # No hooks were fired; they were reset immediately. self.assertThat(hooks_fire, MockNotCalled()) self.assertThat(hooks_reset, MockCalledOnceWith()) class TestPostCommit(MAASTestCase): """Tests for the `post_commit` function.""" def setUp(self): super(TestPostCommit, self).setUp() self.addCleanup(post_commit_hooks.reset) def test__adds_Deferred_as_hook(self): hook = Deferred() hook_added = post_commit(hook) self.assertEqual([hook], list(post_commit_hooks.hooks)) self.assertThat(hook_added, Is(hook)) def test__adds_new_Deferred_as_hook_when_called_without_args(self): hook_added = post_commit() self.assertEqual([hook_added], list(post_commit_hooks.hooks)) self.assertThat(hook_added, IsInstance(Deferred)) def test__adds_callable_as_hook(self): hook = lambda arg: None hook_added = post_commit(hook) self.assertThat(post_commit_hooks.hooks, HasLength(1)) self.assertThat(hook_added, IsInstance(Deferred)) def test__fire_calls_back_with_None_to_Deferred_hook(self): hook = Deferred() spy = DeferredValue() spy.observe(hook) post_commit(hook) post_commit_hooks.fire() self.assertIsNone(extract_result(spy.get())) def test__fire_calls_back_with_None_to_new_Deferred_hook(self): hook_added = post_commit() spy = DeferredValue() spy.observe(hook_added) post_commit_hooks.fire() self.assertIsNone(extract_result(spy.get())) def test__reset_cancels_Deferred_hook(self): hook = Deferred() spy = DeferredValue() spy.observe(hook) post_commit(hook) post_commit_hooks.reset() self.assertRaises(CancelledError, extract_result, spy.get()) def test__reset_cancels_new_Deferred_hook(self): hook_added = post_commit() spy = DeferredValue() spy.observe(hook_added) post_commit_hooks.reset() self.assertRaises(CancelledError, extract_result, spy.get()) def test__fire_passes_None_to_callable_hook(self): hook = Mock() post_commit(hook) post_commit_hooks.fire() self.assertThat(hook, MockCalledOnceWith(None)) def test__reset_passes_Failure_to_callable_hook(self): hook = Mock() post_commit(hook) post_commit_hooks.reset() self.assertThat(hook, MockCalledOnceWith(ANY)) arg = hook.call_args[0][0] self.assertThat(arg, IsInstance(Failure)) self.assertThat(arg.value, IsInstance(CancelledError)) def test__rejects_other_hook_types(self): self.assertRaises(AssertionError, post_commit, sentinel.hook) class TestPostCommitDo(MAASTestCase): """Tests for the `post_commit_do` function.""" def setUp(self): super(TestPostCommitDo, self).setUp() self.addCleanup(post_commit_hooks.reset) def test__adds_callable_as_hook(self): hook = lambda arg: None post_commit_do(hook) self.assertThat(post_commit_hooks.hooks, HasLength(1)) def test__returns_actual_hook(self): hook = Mock() hook_added = post_commit_do(hook, sentinel.foo, bar=sentinel.bar) self.assertThat(hook_added, IsInstance(Deferred)) callback, errback = hook_added.callbacks.pop(0) # Errors are passed through; they're not passed to our hook. self.expectThat(errback, Equals((passthru, None, None))) # Our hook is set to be called via callOut. self.expectThat(callback, Equals( (callOut, (hook, sentinel.foo), {"bar": sentinel.bar}))) def test__fire_passes_only_args_to_hook(self): hook = Mock() post_commit_do(hook, sentinel.arg, foo=sentinel.bar) post_commit_hooks.fire() self.assertThat( hook, MockCalledOnceWith(sentinel.arg, foo=sentinel.bar)) def test__reset_does_not_call_hook(self): hook = Mock() post_commit_do(hook) post_commit_hooks.reset() self.assertThat(hook, MockNotCalled()) def test__rejects_other_hook_types(self): self.assertRaises(AssertionError, post_commit_do, sentinel.hook) class TestConnected(DjangoTransactionTestCase): """Tests for the `orm.connected` context manager.""" def test__ensures_connection(self): with orm.connected(): self.assertThat(connection.connection, Not(Is(None))) def test__opens_and_closes_connection_when_no_preexisting_connection(self): connection.close() self.assertThat(connection.connection, Is(None)) with orm.connected(): self.assertThat(connection.connection, Not(Is(None))) self.assertThat(connection.connection, Is(None)) def test__leaves_preexisting_connections_alone(self): connection.ensure_connection() preexisting_connection = connection.connection self.assertThat(connection.connection, Not(Is(None))) with orm.connected(): self.assertThat(connection.connection, Is(preexisting_connection)) self.assertThat(connection.connection, Is(preexisting_connection)) class TestWithConnection(DjangoTransactionTestCase): """Tests for the `orm.with_connection` decorator.""" def test__exposes_original_function(self): function = Mock(__name__=self.getUniqueString()) self.assertThat(orm.with_connection(function).func, Is(function)) def test__ensures_function_is_called_within_connected_context(self): context = self.patch(orm, "connected").return_value = StubContext() @orm.with_connection def function(arg, kwarg): self.assertThat(arg, Is(sentinel.arg)) self.assertThat(kwarg, Is(sentinel.kwarg)) self.assertTrue(context.active) return sentinel.result self.assertTrue(context.unused) self.assertThat( function(sentinel.arg, kwarg=sentinel.kwarg), Is(sentinel.result)) self.assertTrue(context.used) class TestTransactional(DjangoTransactionTestCase): def test__exposes_original_function(self): function = Mock(__name__=self.getUniqueString()) self.assertThat(orm.transactional(function).func, Is(function)) def test__calls_function_within_transaction_then_closes_connections(self): # Close the database connection to begin with. connection.close() # No transaction has been entered (what Django calls an atomic block), # and the connection has not yet been established. self.assertFalse(connection.in_atomic_block) self.expectThat(connection.connection, Is(None)) def check_inner(*args, **kwargs): # In here, the transaction (`atomic`) has been started but is not # over, and the connection to the database is open. self.assertTrue(connection.in_atomic_block) self.expectThat(connection.connection, Not(Is(None))) function = Mock() function.__name__ = self.getUniqueString() function.side_effect = check_inner # Call `function` via the `transactional` decorator. decorated_function = orm.transactional(function) decorated_function(sentinel.arg, kwarg=sentinel.kwarg) # `function` was called -- and therefore `check_inner` too -- # and the arguments passed correctly. self.assertThat(function, MockCalledOnceWith( sentinel.arg, kwarg=sentinel.kwarg)) # After the decorated function has returned the transaction has # been exited, and the connection has been closed. self.assertFalse(connection.in_atomic_block) self.expectThat(connection.connection, Is(None)) def test__leaves_preexisting_connections_open(self): # Ensure there's a database connection to begin with. connection.ensure_connection() # No transaction has been entered (what Django calls an atomic block), # but the connection has been established. self.assertFalse(connection.in_atomic_block) self.expectThat(connection.connection, Not(Is(None))) # Call a function via the `transactional` decorator. decorated_function = orm.transactional(lambda: None) decorated_function() # After the decorated function has returned the transaction has # been exited, but the preexisting connection remains open. self.assertFalse(connection.in_atomic_block) self.expectThat(connection.connection, Not(Is(None))) def test__closes_connections_only_when_leaving_atomic_block(self): # Close the database connection to begin with. connection.close() self.expectThat(connection.connection, Is(None)) @orm.transactional def inner(): # We're inside a `transactional` context here. self.expectThat(connection.connection, Not(Is(None))) return "inner" @orm.transactional def outer(): # We're inside a `transactional` context here too. self.expectThat(connection.connection, Not(Is(None))) # Call `inner`, thus nesting `transactional` contexts. return "outer > " + inner() self.assertEqual("outer > inner", outer()) # The connection has been closed. self.expectThat(connection.connection, Is(None)) def test__fires_post_commit_hooks_when_done(self): fire = self.patch(orm.post_commit_hooks, "fire") function = lambda: sentinel.something decorated_function = orm.transactional(function) self.assertIs(sentinel.something, decorated_function()) self.assertThat(fire, MockCalledOnceWith()) def test__crashes_if_hooks_exist_before_entering_transaction(self): post_commit(lambda failure: None) decorated_function = orm.transactional(lambda: None) self.assertRaises(TransactionManagementError, decorated_function) # The hook list is cleared so that the exception is raised only once. self.assertThat(post_commit_hooks.hooks, HasLength(0)) def test__creates_post_commit_hook_savepoint_on_inner_block(self): hooks = post_commit_hooks.hooks @orm.transactional def inner(): # We're inside a savepoint context here. self.assertThat(post_commit_hooks.hooks, Not(Is(hooks))) return "inner" @orm.transactional def outer(): # We're inside a transaction here, but not yet a savepoint. self.assertThat(post_commit_hooks.hooks, Is(hooks)) return "outer > " + inner() self.assertEqual("outer > inner", outer()) class TestTransactionalRetries(SerializationFailureTestCase): def test__retries_upon_serialization_failures(self): function = Mock() function.__name__ = self.getUniqueString() function.side_effect = self.cause_serialization_failure decorated_function = orm.transactional(function) self.assertRaises(OperationalError, decorated_function) expected_calls = [call()] * 10 self.assertThat(function, MockCallsMatch(*expected_calls)) def test__resets_post_commit_hooks_when_retrying(self): reset = self.patch(orm.post_commit_hooks, "reset") function = Mock() function.__name__ = self.getUniqueString() function.side_effect = self.cause_serialization_failure decorated_function = orm.transactional(function) self.assertRaises(OperationalError, decorated_function) # reset() is called 9 times by retry_on_serialization_failure() then # once more by transactional(). expected_reset_calls = [call()] * 10 self.assertThat(reset, MockCallsMatch(*expected_reset_calls)) class TestSavepoint(DjangoTransactionTestCase): """Tests for `savepoint`.""" def test__crashes_if_not_already_within_transaction(self): with ExpectedException(TransactionManagementError): with savepoint(): pass def test__creates_savepoint_for_transaction_and_post_commit_hooks(self): hooks = post_commit_hooks.hooks with transaction.atomic(): self.expectThat(connection.savepoint_ids, HasLength(0)) with savepoint(): # We're one savepoint in. self.assertThat(connection.savepoint_ids, HasLength(1)) # Post-commit hooks have been saved. self.assertThat(post_commit_hooks.hooks, Not(Is(hooks))) self.expectThat(connection.savepoint_ids, HasLength(0)) class TestInTransaction(DjangoTransactionTestCase): """Tests for `in_transaction`.""" def test__true_within_atomic_block(self): with transaction.atomic(): self.assertTrue(in_transaction()) def test__true_when_legacy_transaction_is_active(self): transaction.enter_transaction_management() try: self.assertTrue(in_transaction()) finally: transaction.leave_transaction_management() def test__false_when_no_transaction_is_active(self): self.assertFalse(in_transaction()) class TestValidateInTransaction(DjangoTransactionTestCase): """Tests for `validate_in_transaction`.""" def test__does_nothing_within_atomic_block(self): with transaction.atomic(): validate_in_transaction(connection) def test__does_nothing_when_legacy_transaction_is_active(self): transaction.enter_transaction_management() try: validate_in_transaction(connection) finally: transaction.leave_transaction_management() def test__explodes_when_no_transaction_is_active(self): self.assertRaises( TransactionManagementError, validate_in_transaction, connection) class TestPsqlArray(MAASTestCase): def test__returns_empty_array(self): self.assertEqual(("ARRAY[]", []), psql_array([])) def test__returns_params_in_array(self): self.assertEqual( "ARRAY[%s,%s,%s]", psql_array(['a', 'a', 'a'])[0]) def test__returns_params_in_tuple(self): params = [factory.make_name('param') for _ in range(3)] self.assertEqual( params, psql_array(params)[1]) def test__returns_cast_to_type(self): self.assertEqual( ("ARRAY[]::integer[]", []), psql_array([], sql_type="integer")) class TestDisablingDatabaseConnections(DjangoTransactionTestCase): def assertConnectionsEnabled(self): for alias in connections: self.assertThat( connections[alias], IsInstance(BaseDatabaseWrapper)) def assertConnectionsDisabled(self): for alias in connections: self.assertEqual( DisabledDatabaseConnection, type(connections[alias])) def test_disable_and_enable_connections(self): self.addCleanup(enable_all_database_connections) # By default connections are enabled. self.assertConnectionsEnabled() # Disable all connections. disable_all_database_connections() self.assertConnectionsDisabled() # Back to the start again. enable_all_database_connections() self.assertConnectionsEnabled() def test_disable_can_be_called_multiple_times(self): self.addCleanup(enable_all_database_connections) disable_all_database_connections() self.assertConnectionsDisabled() disable_all_database_connections() self.assertConnectionsDisabled() def test_DisabledDatabaseConnection(self): connection = DisabledDatabaseConnection() self.assertRaises(RuntimeError, getattr, connection, "connect") self.assertRaises(RuntimeError, getattr, connection, "__call__") self.assertRaises(RuntimeError, setattr, connection, "foo", "bar") self.assertRaises(RuntimeError, delattr, connection, "baz") class TestTotallyDisconnected(DjangoTransactionTestCase): """Tests for `TotallyDisconnected`.""" def test__enter_closes_open_connections_and_disables_new_ones(self): self.addCleanup(connection.close) connection.ensure_connection() with TotallyDisconnected(): self.assertRaises(RuntimeError, getattr, connection, "connect") connection.ensure_connection() def test__exit_removes_block_on_database_connections(self): with TotallyDisconnected(): self.assertRaises(RuntimeError, getattr, connection, "connect") connection.ensure_connection() class TestExclusivelyConnected(DjangoTransactionTestCase): """Tests for `ExclusivelyConnected`.""" def test__enter_blows_up_if_there_are_open_connections(self): self.addCleanup(connection.close) connection.ensure_connection() context = ExclusivelyConnected() self.assertRaises(AssertionError, context.__enter__) def test__enter_does_nothing_if_there_are_no_open_connections(self): connection.close() context = ExclusivelyConnected() context.__enter__() def test__exit_closes_open_connections(self): self.addCleanup(connection.close) connection.ensure_connection() self.assertThat(connection.connection, Not(Is(None))) context = ExclusivelyConnected() context.__exit__() self.assertThat(connection.connection, Is(None)) class TestFullyConnected(DjangoTransactionTestCase): """Tests for `FullyConnected`.""" def assertOpen(self, alias): self.assertThat(connections[alias].connection, Not(Is(None))) def assertClosed(self, alias): self.assertThat(connections[alias].connection, Is(None)) def test__opens_and_closes_connections(self): for alias in connections: connections[alias].close() for alias in connections: self.assertClosed(alias) with FullyConnected(): for alias in connections: self.assertOpen(alias) for alias in connections: self.assertClosed(alias) def test__closes_connections_even_if_open_on_entry(self): for alias in connections: connections[alias].ensure_connection() for alias in connections: self.assertOpen(alias) with FullyConnected(): for alias in connections: self.assertOpen(alias) for alias in connections: self.assertClosed(alias) class TestGetModelObjectName(MAASServerTestCase): def test__gets_model_object_name_from_manager(self): self.assertThat(get_model_object_name(Node.objects), Equals("Node")) def test__gets_model_object_name_from_queryset(self): self.assertThat( get_model_object_name(Node.objects.all()), Equals("Node")) def test__gets_model_object_name_returns_none_if_not_found(self): self.assertThat( get_model_object_name("crazytalk"), Is(None)) maas-1.9.5+bzr4599.orig/src/maasserver/utils/tests/test_osystems.py0000644000000000000000000004515413056115004023424 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.utils.osystems`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from operator import itemgetter import random from distro_info import UbuntuDistroInfo from django.core.exceptions import ValidationError from maasserver.clusterrpc.testing.osystems import ( make_rpc_osystem, make_rpc_release, ) from maasserver.models import ( BootResource, BootSourceCache, Config, ) from maasserver.testing.factory import factory from maasserver.testing.osystems import make_usable_osystem from maasserver.testing.testcase import MAASServerTestCase from maasserver.utils import osystems as osystems_module from maasserver.utils.orm import post_commit_hooks from maasserver.utils.osystems import ( get_distro_series_initial, get_release_requires_key, list_all_releases_requiring_keys, list_all_usable_osystems, list_all_usable_releases, list_commissioning_choices, list_osystem_choices, list_release_choices, make_hwe_kernel_ui_text, release_a_newer_than_b, validate_hwe_kernel, validate_osystem_and_distro_series, ) from maastesting.matchers import MockAnyCall class TestOsystems(MAASServerTestCase): def patch_gen_all_known_operating_systems(self, osystems): self.patch( osystems_module, 'gen_all_known_operating_systems').return_value = osystems def test_list_all_usable_osystems(self): osystems = [make_rpc_osystem() for _ in range(3)] self.patch_gen_all_known_operating_systems(osystems) self.assertItemsEqual(osystems, list_all_usable_osystems()) def test_list_all_usable_osystems_sorts_title(self): osystems = [make_rpc_osystem() for _ in range(3)] self.patch_gen_all_known_operating_systems(osystems) self.assertEqual( sorted(osystems, key=itemgetter('title')), list_all_usable_osystems()) def test_list_all_usable_osystems_removes_os_without_releases(self): osystems = [make_rpc_osystem() for _ in range(3)] without_releases = make_rpc_osystem(releases=[]) self.patch_gen_all_known_operating_systems( osystems + [without_releases]) self.assertItemsEqual(osystems, list_all_usable_osystems()) def test_list_osystem_choices_includes_default(self): self.assertEqual( [('', 'Default OS')], list_osystem_choices([], include_default=True)) def test_list_osystem_choices_doesnt_include_default(self): self.assertEqual([], list_osystem_choices([], include_default=False)) def test_list_osystem_choices_uses_name_and_title(self): osystem = make_rpc_osystem() self.assertEqual( [(osystem['name'], osystem['title'])], list_osystem_choices([osystem], include_default=False)) class TestReleases(MAASServerTestCase): def make_release_choice(self, osystem, release, include_asterisk=False): key = '%s/%s' % (osystem['name'], release['name']) if include_asterisk: return ('%s*' % key, release['title']) return (key, release['title']) def test_list_all_usable_releases(self): releases = [make_rpc_release() for _ in range(3)] osystem = make_rpc_osystem(releases=releases) self.assertItemsEqual( releases, list_all_usable_releases([osystem])[osystem['name']]) def test_list_all_usable_releases_sorts(self): releases = [make_rpc_release() for _ in range(3)] osystem = make_rpc_osystem(releases=releases) releases = sorted(releases, key=itemgetter('title')) self.assertEqual( releases, list_all_usable_releases([osystem])[osystem['name']]) def test_list_all_releases_requiring_keys(self): releases = [ make_rpc_release(requires_license_key=True) for _ in range(3)] release_without_license_key = make_rpc_release( requires_license_key=False) osystem = make_rpc_osystem( releases=releases + [release_without_license_key]) self.assertItemsEqual( releases, list_all_releases_requiring_keys([osystem])[osystem['name']]) def test_list_all_releases_requiring_keys_sorts(self): releases = [ make_rpc_release(requires_license_key=True) for _ in range(3)] release_without_license_key = make_rpc_release( requires_license_key=False) osystem = make_rpc_osystem( releases=releases + [release_without_license_key]) releases = sorted(releases, key=itemgetter('title')) self.assertEqual( releases, list_all_releases_requiring_keys([osystem])[osystem['name']]) def test_get_release_requires_key_returns_asterisk_when_required(self): release = make_rpc_release(requires_license_key=True) self.assertEqual('*', get_release_requires_key(release)) def test_get_release_requires_key_returns_empty_when_not_required(self): release = make_rpc_release(requires_license_key=False) self.assertEqual('', get_release_requires_key(release)) def test_list_release_choices_includes_default(self): self.assertEqual( [('', 'Default OS Release')], list_release_choices({}, include_default=True)) def test_list_release_choices_doesnt_include_default(self): self.assertEqual([], list_release_choices({}, include_default=False)) def test_list_release_choices(self): releases = [make_rpc_release() for _ in range(3)] osystem = make_rpc_osystem(releases=releases) choices = [ self.make_release_choice(osystem, release) for release in releases ] self.assertItemsEqual( choices, list_release_choices( list_all_usable_releases([osystem]), include_default=False)) def test_list_release_choices_sorts(self): releases = [make_rpc_release() for _ in range(3)] osystem = make_rpc_osystem(releases=releases) choices = [ self.make_release_choice(osystem, release) for release in sorted(releases, key=itemgetter('title')) ] self.assertEqual( choices, list_release_choices( list_all_usable_releases([osystem]), include_default=False)) def test_list_release_choices_includes_requires_key_asterisk(self): releases = [ make_rpc_release(requires_license_key=True) for _ in range(3)] osystem = make_rpc_osystem(releases=releases) choices = [ self.make_release_choice(osystem, release, include_asterisk=True) for release in releases ] self.assertItemsEqual( choices, list_release_choices( list_all_usable_releases([osystem]), include_default=False)) def test_get_distro_series_initial(self): releases = [make_rpc_release() for _ in range(3)] osystem = make_rpc_osystem(releases=releases) release = random.choice(releases) node = factory.make_Node( osystem=osystem['name'], distro_series=release['name']) self.assertEqual( '%s/%s' % (osystem['name'], release['name']), get_distro_series_initial( [osystem], node, with_key_required=False)) def test_get_distro_series_initial_without_key_required(self): releases = [ make_rpc_release(requires_license_key=True) for _ in range(3)] osystem = make_rpc_osystem(releases=releases) release = random.choice(releases) node = factory.make_Node( osystem=osystem['name'], distro_series=release['name']) self.assertEqual( '%s/%s' % (osystem['name'], release['name']), get_distro_series_initial( [osystem], node, with_key_required=False)) def test_get_distro_series_initial_with_key_required(self): releases = [ make_rpc_release(requires_license_key=True) for _ in range(3)] osystem = make_rpc_osystem(releases=releases) release = random.choice(releases) node = factory.make_Node( osystem=osystem['name'], distro_series=release['name']) self.assertEqual( '%s/%s*' % (osystem['name'], release['name']), get_distro_series_initial( [osystem], node, with_key_required=True)) def test_get_distro_series_initial_works_around_conflicting_os(self): # Test for bug 1456892. releases = [ make_rpc_release(requires_license_key=True) for _ in range(3)] osystem = make_rpc_osystem(releases=releases) release = random.choice(releases) node = factory.make_Node( osystem=osystem['name'], distro_series=release['name']) self.assertEqual( '%s/%s' % (osystem['name'], release['name']), get_distro_series_initial( [], node, with_key_required=True)) def test_list_commissioning_choices_returns_empty_list_if_not_ubuntu(self): osystem = make_rpc_osystem() self.assertEqual([], list_commissioning_choices([osystem])) def test_list_commissioning_choices_returns_commissioning_releases(self): comm_releases = [ make_rpc_release(can_commission=True) for _ in range(3)] no_comm_release = make_rpc_release() osystem = make_rpc_osystem( 'ubuntu', releases=comm_releases + [no_comm_release]) choices = [ (release['name'], release['title']) for release in comm_releases ] self.assertItemsEqual(choices, list_commissioning_choices([osystem])) def test_list_commissioning_choices_returns_sorted(self): comm_releases = [ make_rpc_release(can_commission=True) for _ in range(3)] osystem = make_rpc_osystem( 'ubuntu', releases=comm_releases) comm_releases = sorted( comm_releases, key=itemgetter('title')) choices = [ (release['name'], release['title']) for release in comm_releases ] self.assertEqual(choices, list_commissioning_choices([osystem])) def test_make_hwe_kernel_ui_text_finds_release_from_bootsourcecache(self): release = factory.pick_ubuntu_release() kernel = 'hwe-' + release[0] # Stub out the post commit tasks otherwise the test fails due to # unrun post-commit tasks at the end of the test. self.patch(BootSourceCache, "post_commit_do") # Force run the post commit tasks as we make new boot sources with post_commit_hooks: factory.make_BootSourceCache( os="ubuntu/%s" % release, subarch=kernel, release=release) self.assertEqual( '%s (%s)' % (release, kernel), make_hwe_kernel_ui_text(kernel)) def test_make_hwe_kernel_ui_finds_release_from_ubuntudistroinfo(self): self.assertEqual('trusty (hwe-t)', make_hwe_kernel_ui_text('hwe-t')) def test_make_hwe_kernel_ui_returns_kernel_when_none_found(self): # Since this is testing that our fall final fall back returns just the # kernel name when the release isn't found in BootSourceCache or # UbuntuDistroInfo we patch out UbuntuDistroInfo so nothing is found. self.patch(UbuntuDistroInfo, 'all').value = [] self.assertEqual( 'hwe-m', make_hwe_kernel_ui_text('hwe-m')) class TestValidateOsystemAndDistroSeries(MAASServerTestCase): def test__raises_error_of_osystem_and_distro_series_dont_match(self): os = factory.make_name("os") release = "%s/%s" % ( factory.make_name("os"), factory.make_name("release")) error = self.assertRaises( ValidationError, validate_osystem_and_distro_series, os, release) self.assertEquals( "%s in distro_series does not match with " "operating system %s." % (release, os), error.message) def test__raises_error_if_not_supported_osystem(self): os = factory.make_name("os") release = factory.make_name("release") error = self.assertRaises( ValidationError, validate_osystem_and_distro_series, os, release) self.assertEquals( "%s is not a support operating system." % os, error.message) def test__raises_error_if_not_supported_release(self): osystem = make_usable_osystem(self) release = factory.make_name("release") error = self.assertRaises( ValidationError, validate_osystem_and_distro_series, osystem['name'], release) self.assertEquals( "%s/%s is not a support operating system and release " "combination." % (osystem['name'], release), error.message) def test__returns_osystem_and_release_with_license_key_stripped(self): osystem = make_usable_osystem(self) release = osystem['default_release'] self.assertEquals( (osystem['name'], release), validate_osystem_and_distro_series(osystem['name'], release + '*')) class TestReleaseANewerThanB(MAASServerTestCase): def test_release_a_newer_than_b(self): # Since we wrap around 'p' we want to use 'p' as our starting point alphabet = ([chr(i) for i in xrange(ord('p'), ord('z') + 1)] + [chr(i) for i in xrange(ord('a'), ord('p'))]) previous_true = 0 for i in alphabet: true_count = 0 for j in alphabet: if release_a_newer_than_b('hwe-' + i, j): true_count += 1 previous_true += 1 self.assertEqual(previous_true, true_count) class TestValidateHweKernel(MAASServerTestCase): def test_validate_hwe_kernel_returns_default_kernel(self): self.patch( BootResource.objects, 'get_usable_hwe_kernels').return_value = ('hwe-t', 'hwe-u') hwe_kernel = validate_hwe_kernel( None, None, 'amd64/generic', 'ubuntu', 'trusty') self.assertEqual(hwe_kernel, 'hwe-t') def test_validate_hwe_kernel_set_kernel(self): self.patch( BootResource.objects, 'get_usable_hwe_kernels').return_value = ('hwe-t', 'hwe-v') hwe_kernel = validate_hwe_kernel( 'hwe-v', None, 'amd64/generic', 'ubuntu', 'trusty') self.assertEqual(hwe_kernel, 'hwe-v') def test_validate_hwe_kernel_fails_with_nongeneric_arch_and_kernel(self): exception_raised = False try: validate_hwe_kernel( 'hwe-v', None, 'armfh/hardbank', 'ubuntu', 'trusty') except ValidationError as e: self.assertEqual( 'Subarchitecture(hardbank) must be generic when setting ' + 'hwe_kernel.', e.message) exception_raised = True self.assertEqual(True, exception_raised) def test_validate_hwe_kernel_fails_with_missing_hwe_kernel(self): exception_raised = False self.patch( BootResource.objects, 'get_usable_hwe_kernels').return_value = ('hwe-t', 'hwe-u') try: validate_hwe_kernel( 'hwe-v', None, 'amd64/generic', 'ubuntu', 'trusty') except ValidationError as e: self.assertEqual( 'hwe-v is not available for ubuntu/trusty on amd64/generic.', e.message) exception_raised = True self.assertEqual(True, exception_raised) def test_validate_hwe_kernel_fails_with_old_kernel_and_newer_release(self): exception_raised = False self.patch( BootResource.objects, 'get_usable_hwe_kernels').return_value = ('hwe-t', 'hwe-v') try: validate_hwe_kernel( 'hwe-t', None, 'amd64/generic', 'ubuntu', 'vivid') except ValidationError as e: self.assertEqual( 'hwe-t is too old to use on ubuntu/vivid.', e.message) exception_raised = True self.assertEqual(True, exception_raised) def test_validate_hwe_kern_fails_with_old_kern_and_new_min_hwe_kern(self): exception_raised = False self.patch( BootResource.objects, 'get_usable_hwe_kernels').return_value = ('hwe-t', 'hwe-v') try: validate_hwe_kernel( 'hwe-t', 'hwe-v', 'amd64/generic', 'ubuntu', 'precise') except ValidationError as e: self.assertEqual( 'hwe_kernel(hwe-t) is older than min_hwe_kernel(hwe-v).', e.message) exception_raised = True self.assertEqual(True, exception_raised) def test_validate_hwe_kernel_fails_with_no_avalible_kernels(self): exception_raised = False self.patch( BootResource.objects, 'get_usable_hwe_kernels').return_value = ('hwe-t', 'hwe-v') try: validate_hwe_kernel( 'hwe-t', 'hwe-v', 'amd64/generic', 'ubuntu', 'precise') except ValidationError as e: self.assertEqual( 'hwe_kernel(hwe-t) is older than min_hwe_kernel(hwe-v).', e.message) exception_raised = True self.assertEqual(True, exception_raised) def test_validate_hwe_kern_fails_with_old_release_and_newer_hwe_kern(self): exception_raised = False try: validate_hwe_kernel( None, 'hwe-v', 'amd64/generic', 'ubuntu', 'trusty') except ValidationError as e: self.assertEqual( 'trusty has no kernels availible which meet' + ' min_hwe_kernel(hwe-v).', e.message) exception_raised = True self.assertEqual(True, exception_raised) def test_validate_hwe_kern_always_sets_kern_with_commissionable_os(self): self.patch( BootResource.objects, 'get_usable_hwe_kernels').return_value = ('hwe-t', 'hwe-v') mock_get_config = self.patch(Config.objects, "get_config") mock_get_config.return_value = 'trusty' kernel = validate_hwe_kernel( None, 'hwe-v', '%s/generic' % factory.make_name('arch'), factory.make_name("osystem"), factory.make_name("distro")) self.assertThat( mock_get_config, MockAnyCall('commissioning_osystem')) self.assertThat( mock_get_config, MockAnyCall('commissioning_distro_series')) self.assertEquals('hwe-v', kernel) maas-1.9.5+bzr4599.orig/src/maasserver/utils/tests/test_signals.py0000644000000000000000000001514313056115004023171 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for signals helpers.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.tests.models import FieldChangeTestModel from maasserver.utils.signals import connect_to_field_change from maastesting.djangotestcase import TestModelMixin from maastesting.matchers import ( IsCallable, MockCallsMatch, ) from mock import ( call, Mock, ) class ConnectToFieldChangeTest(TestModelMixin, MAASServerTestCase): """Testing for the method `connect_to_field_change`.""" app = 'maasserver.tests' def connect(self, callback, fields, delete=False): connect, disconnect = connect_to_field_change( callback, FieldChangeTestModel, fields, delete=delete) self.addCleanup(disconnect) return connect, disconnect def test_connect_to_field_change_calls_callback(self): callback = Mock() self.connect(callback, ['name1']) old_name1_value = factory.make_string() obj = FieldChangeTestModel(name1=old_name1_value) obj.save() obj.name1 = factory.make_string() obj.save() self.assertEqual( [call(obj, (old_name1_value,), deleted=False)], callback.mock_calls) def test_connect_to_field_change_returns_two_functions(self): callback = Mock() connect, disconnect = self.connect(callback, ['name1']) self.assertThat(connect, IsCallable()) self.assertThat(disconnect, IsCallable()) def test_returned_function_connect_and_disconnect(self): callback = Mock() connect, disconnect = self.connect(callback, ['name1']) obj = FieldChangeTestModel() obj.save() obj.name1 = "one" obj.save() expected_one = call(obj, ("",), deleted=False) # The callback has been called once, for name1="one". self.assertThat(callback, MockCallsMatch(expected_one)) # Disconnect and `callback` is not called any more. disconnect() obj.name1 = "two" obj.save() # The callback has still only been called once. self.assertThat(callback, MockCallsMatch(expected_one)) # Reconnect and `callback` is called again. connect() obj.name1 = "three" obj.save() expected_three = call(obj, ("one",), deleted=False) # The callback has been called twice, once for the change to "one" and # then for the change to "three". The delta is from "one" to "three" # because no snapshots were taken when disconnected. self.assertThat(callback, MockCallsMatch(expected_one, expected_three)) def test_connect_to_field_change_calls_callback_for_each_save(self): callback = Mock() self.connect(callback, ['name1']) old_name1_value = factory.make_string() obj = FieldChangeTestModel(name1=old_name1_value) obj.save() obj.name1 = factory.make_string() obj.save() obj.name1 = factory.make_string() obj.save() self.assertEqual(2, callback.call_count) def test_connect_to_field_change_calls_callback_for_each_real_save(self): callback = Mock() self.connect(callback, ['name1']) old_name1_value = factory.make_string() obj = FieldChangeTestModel(name1=old_name1_value) obj.save() obj.name1 = factory.make_string() obj.save() obj.save() self.assertEqual(1, callback.call_count) def test_connect_to_field_change_calls_multiple_callbacks(self): callback1 = Mock() self.connect(callback1, ['name1']) callback2 = Mock() self.connect(callback2, ['name1']) old_name1_value = factory.make_string() obj = FieldChangeTestModel(name1=old_name1_value) obj.save() obj.name1 = factory.make_string() obj.save() self.assertEqual((1, 1), (callback1.call_count, callback2.call_count)) def test_connect_to_field_change_ignores_changes_to_other_fields(self): callback = Mock() self.connect(callback, ['name1']) obj = FieldChangeTestModel(name2=factory.make_string()) obj.save() obj.name2 = factory.make_string() obj.save() self.assertEqual(0, callback.call_count) def test_connect_to_field_change_ignores_object_creation(self): callback = Mock() self.connect(callback, ['name1']) obj = FieldChangeTestModel(name1=factory.make_string()) obj.save() self.assertEqual(0, callback.call_count) def test_connect_to_field_change_ignores_deletion_by_default(self): obj = FieldChangeTestModel(name2=factory.make_string()) obj.save() callback = Mock() self.connect(callback, ['name1']) obj.delete() self.assertEqual(0, callback.call_count) def test_connect_to_field_change_listens_to_deletion_if_delete_True(self): callback = Mock() self.connect(callback, ['name1'], delete=True) old_name1_value = factory.make_string() obj = FieldChangeTestModel(name1=old_name1_value) obj.save() obj.delete() self.assertEqual( [call(obj, (old_name1_value,), deleted=True)], callback.mock_calls) def test_connect_to_field_change_notices_change_in_any_given_field(self): callback = Mock() self.connect(callback, ['name1', 'name2']) name1 = factory.make_name('name1') old_name2_value = factory.make_name('old') obj = FieldChangeTestModel(name1=name1, name2=old_name2_value) obj.save() obj.name2 = factory.make_name('new') obj.save() self.assertEqual( [call(obj, (name1, old_name2_value), deleted=False)], callback.mock_calls) def test_connect_to_field_change_only_calls_once_per_object_change(self): callback = Mock() self.connect(callback, ['name1', 'name2']) old_name1_value = factory.make_name('old1') old_name2_value = factory.make_name('old2') obj = FieldChangeTestModel( name1=old_name1_value, name2=old_name2_value) obj.save() obj.name1 = factory.make_name('new1') obj.name2 = factory.make_name('new2') obj.save() self.assertEqual( [call(obj, (old_name1_value, old_name2_value), deleted=False)], callback.mock_calls) maas-1.9.5+bzr4599.orig/src/maasserver/utils/tests/test_storage.py0000644000000000000000000001333513056115004023176 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for storage utilities.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.enum import ( FILESYSTEM_GROUP_RAID_TYPE_CHOICES, FILESYSTEM_GROUP_TYPE, FILESYSTEM_TYPE, NODE_STATUS, ) from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.utils.storage import ( get_effective_filesystem, used_for, ) class TestGetEffectiveFilesystem(MAASServerTestCase): scenarios = ( ("BlockDevice", { "factory": factory.make_BlockDevice, "filesystem_property": "block_device", }), ("Partition", { "factory": factory.make_Partition, "filesystem_property": "partition", }), ) def test__returns_None_when_no_filesystem(self): model = self.factory() self.assertIsNone(get_effective_filesystem(model)) def test__returns_filesystem_if_node_not_in_acquired_state(self): node = factory.make_Node(status=NODE_STATUS.READY) model = self.factory(node=node) filesystem = factory.make_Filesystem(**{ self.filesystem_property: model, }) self.assertEquals(filesystem, get_effective_filesystem(model)) def test__returns_acquired_filesystem(self): node = factory.make_Node(status=NODE_STATUS.ALLOCATED) model = self.factory(node=node) factory.make_Filesystem(**{ self.filesystem_property: model, }) filesystem = factory.make_Filesystem(**{ self.filesystem_property: model, "acquired": True, }) self.assertEquals(filesystem, get_effective_filesystem(model)) def test__returns_non_mountable_filesystem(self): node = factory.make_Node(status=NODE_STATUS.ALLOCATED) model = self.factory(node=node) filesystem = factory.make_Filesystem(**{ self.filesystem_property: model, "fstype": FILESYSTEM_TYPE.BCACHE_BACKING, }) self.assertEquals(filesystem, get_effective_filesystem(model)) def test__returns_none_when_allocated_state(self): node = factory.make_Node(status=NODE_STATUS.ALLOCATED) model = self.factory(node=node) factory.make_Filesystem(**{ self.filesystem_property: model, "fstype": FILESYSTEM_TYPE.EXT4, }) self.assertIsNone(get_effective_filesystem(model)) class TestUsedFor(MAASServerTestCase): def test__unused(self): block_device = factory.make_BlockDevice() self.assertEquals(used_for(block_device), "Unused") def test__fs_formatted(self): block_device = factory.make_BlockDevice() fs = factory.make_Filesystem(block_device=block_device) self.assertEqual( "Unmounted %s formatted filesystem" % fs.fstype, used_for(block_device)) def test__fs_formatted_and_mounted(self): block_device = factory.make_BlockDevice() fs = factory.make_Filesystem( block_device=block_device, mount_point="/mnt") self.assertEqual( ("%s formatted filesystem mounted at %s" % (fs.fstype, fs.mount_point)), used_for(block_device)) def test__partitioned(self): block_device = factory.make_BlockDevice() partition_table = factory.make_PartitionTable( block_device=block_device) partitions = partition_table.partitions.count() if partitions > 1: expected_message = "%s partitioned with %d partitions" else: expected_message = "%s partitioned with %d partition" self.assertEqual( expected_message % (partition_table.table_type, partitions), used_for(block_device)) def test__lvm(self): filesystem_group = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.LVM_VG) self.assertEqual( ("LVM volume for %s" % filesystem_group.name), used_for(filesystem_group.filesystems.first().block_device)) def test__raid_active(self): filesystem_group = factory.make_FilesystemGroup( group_type=factory.pick_choice(FILESYSTEM_GROUP_RAID_TYPE_CHOICES)) self.assertEqual( ("Active %s device for %s" % (filesystem_group.group_type, filesystem_group.name)), used_for(filesystem_group.filesystems.first().block_device)) def test__raid_spare(self): filesystem_group = factory.make_FilesystemGroup( group_type=factory.pick_choice(FILESYSTEM_GROUP_RAID_TYPE_CHOICES)) slave_block_device = factory.make_PhysicalBlockDevice() factory.make_Filesystem( block_device=slave_block_device, fstype=FILESYSTEM_TYPE.RAID_SPARE, filesystem_group=filesystem_group) self.assertEqual( ("Spare %s device for %s" % (filesystem_group.group_type, filesystem_group.name)), used_for(slave_block_device)) def test__bcache(self): cacheset = factory.make_CacheSet() blockdevice = cacheset.get_device() self.assertEqual( ("Cache device for %s" % cacheset.name), used_for(blockdevice)) def test__bcache_backing(self): filesystem_group = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.BCACHE) self.assertEqual( ("Backing device for %s" % filesystem_group.name), used_for(filesystem_group.filesystems.first().block_device)) maas-1.9.5+bzr4599.orig/src/maasserver/utils/tests/test_threads.py0000644000000000000000000001145213056115004023162 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.utils.threads`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random from crochet import wait_for_reactor from django.db import connection from maasserver.utils import ( orm, threads, ) from maastesting.testcase import MAASTestCase from mock import sentinel from provisioningserver.utils.twisted import ( ThreadPool, ThreadUnpool, ) from testtools.matchers import ( Equals, Is, IsInstance, ) from twisted.internet import reactor from twisted.internet.defer import ( DeferredSemaphore, inlineCallbacks, ) class TestMakeFunctions(MAASTestCase): """Tests for the `make_*` functions.""" def test__make_default_pool_creates_disconnected_pool(self): pool = threads.make_default_pool() self.assertThat(pool, IsInstance(ThreadPool)) self.assertThat( pool.context.contextFactory, Is(orm.TotallyDisconnected)) self.assertThat(pool.max, Equals( threads.max_threads_for_default_pool)) self.assertThat(pool.min, Equals(0)) def test__make_default_pool_accepts_max_threads_setting(self): maxthreads = random.randint(1, 1000) pool = threads.make_default_pool(maxthreads) self.assertThat(pool.max, Equals(maxthreads)) self.assertThat(pool.min, Equals(0)) def test__make_database_pool_creates_connected_pool(self): pool = threads.make_database_pool() self.assertThat(pool, IsInstance(ThreadPool)) self.assertThat(pool.context.contextFactory, Is(orm.FullyConnected)) self.assertThat(pool.max, Equals( threads.max_threads_for_database_pool)) self.assertThat(pool.min, Equals(0)) def test__make_database_pool_accepts_max_threads_setting(self): maxthreads = random.randint(1, 1000) pool = threads.make_database_pool(maxthreads) self.assertThat(pool.max, Equals(maxthreads)) self.assertThat(pool.min, Equals(0)) def test__make_database_unpool_creates_unpool(self): pool = threads.make_database_unpool() self.assertThat(pool, IsInstance(ThreadUnpool)) self.assertThat(pool.contextFactory, Is(orm.ExclusivelyConnected)) self.assertThat(pool.lock, IsInstance(DeferredSemaphore)) self.assertThat(pool.lock.limit, Equals( threads.max_threads_for_database_pool)) def test__make_database_unpool_accepts_max_threads_setting(self): maxthreads = random.randint(1, 1000) pool = threads.make_database_unpool(maxthreads) self.assertThat(pool.lock.limit, Equals(maxthreads)) class TestInstallFunctions(MAASTestCase): """Tests for the `install_*` functions.""" def test__install_default_pool_will_not_work_now(self): error = self.assertRaises( AssertionError, threads.install_default_pool) self.assertDocTestMatches("Too late; ...", unicode(error)) def test__default_pool_is_disconnected_pool(self): pool = reactor.threadpool self.assertThat(pool, IsInstance(ThreadPool)) self.assertThat( pool.context.contextFactory, Is(orm.TotallyDisconnected)) self.assertThat(pool.min, Equals(0)) def test__install_database_pool_will_not_work_now(self): error = self.assertRaises( AssertionError, threads.install_database_pool) self.assertDocTestMatches("Too late; ...", unicode(error)) def test__database_pool_is_connected_unpool(self): pool = reactor.threadpoolForDatabase self.assertThat(pool, IsInstance(ThreadUnpool)) self.assertThat(pool.contextFactory, Is(orm.ExclusivelyConnected)) class TestDeferToDatabase(MAASTestCase): @wait_for_reactor @inlineCallbacks def test__defers_to_database_threadpool(self): @orm.transactional def call_in_database_thread(a, b): orm.validate_in_transaction(connection) return sentinel.called, a, b result = yield threads.deferToDatabase( call_in_database_thread, sentinel.a, b=sentinel.b) self.assertThat(result, Equals( (sentinel.called, sentinel.a, sentinel.b))) class TestCallOutToDatabase(MAASTestCase): @wait_for_reactor @inlineCallbacks def test__calls_out_to_database_threadpool(self): @orm.transactional def call_in_database_thread(a, b): orm.validate_in_transaction(connection) return sentinel.bar, a, b result = yield threads.callOutToDatabase( sentinel.foo, call_in_database_thread, sentinel.a, b=sentinel.b) self.assertThat(result, Is(sentinel.foo)) maas-1.9.5+bzr4599.orig/src/maasserver/utils/tests/test_utils.py0000644000000000000000000003501613056115004022672 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for miscellaneous helpers.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import httplib import threading from urllib import urlencode from urlparse import ( urljoin, urlparse, ) from django.core.exceptions import ValidationError from django.core.urlresolvers import reverse from django.http import HttpRequest from django.test.client import RequestFactory from maasserver.enum import NODEGROUPINTERFACE_MANAGEMENT from maasserver.exceptions import NodeGroupMisconfiguration from maasserver.testing.config import RegionConfigurationFixture from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.utils import ( absolute_reverse, absolute_url_reverse, build_absolute_uri, find_nodegroup, get_local_cluster_UUID, make_validation_error_message, strip_domain, synchronised, ) from maastesting.testcase import MAASTestCase from mock import sentinel from netaddr import IPAddress from provisioningserver.testing.config import ClusterConfigurationFixture class TestAbsoluteReverse(MAASServerTestCase): def expected_from_maas_url_and_reverse(self, maas_url, reversed_url): # We need to remove the leading '/' from the reversed url, or # urljoin won't actually join. return urljoin(maas_url, reversed_url.lstrip("/")) def test_absolute_reverse_uses_maas_url_by_default(self): maas_url = factory.make_simple_http_url(path='') self.useFixture(RegionConfigurationFixture(maas_url=maas_url)) absolute_url = absolute_reverse('settings') expected_url = self.expected_from_maas_url_and_reverse( maas_url, reverse('settings')) self.assertEqual(expected_url, absolute_url) def test_absolute_reverse_uses_given_base_url(self): maas_url = factory.make_simple_http_url() absolute_url = absolute_reverse('settings', base_url=maas_url) expected_url = self.expected_from_maas_url_and_reverse( maas_url, reverse('settings')) self.assertEqual(expected_url, absolute_url) def test_absolute_reverse_uses_query_string(self): maas_url = factory.make_simple_http_url() self.useFixture(RegionConfigurationFixture(maas_url=maas_url)) parameters = {factory.make_string(): factory.make_string()} absolute_url = absolute_reverse('settings', query=parameters) reversed_url = '%s?%s' % (reverse('settings'), urlencode(parameters)) expected_url = self.expected_from_maas_url_and_reverse( maas_url, reversed_url) self.assertEqual(expected_url, absolute_url) def test_absolute_reverse_uses_kwargs(self): maas_url = factory.make_simple_http_url() nodegroup = factory.make_NodeGroup() self.useFixture(RegionConfigurationFixture(maas_url=maas_url)) absolute_url = absolute_reverse( 'cluster-edit', kwargs={'uuid': nodegroup.uuid}) reversed_url = reverse('cluster-edit', args=[nodegroup.uuid]) expected_url = self.expected_from_maas_url_and_reverse( maas_url, reversed_url) self.assertEqual(expected_url, absolute_url) def test_absolute_reverse_uses_args(self): nodegroup = factory.make_NodeGroup() maas_url = factory.make_simple_http_url() self.useFixture(RegionConfigurationFixture(maas_url=maas_url)) observed_url = absolute_reverse('cluster-edit', args=[nodegroup.uuid]) reversed_url = reverse('cluster-edit', args=[nodegroup.uuid]) expected_url = self.expected_from_maas_url_and_reverse( maas_url, reversed_url) self.assertEqual(expected_url, observed_url) class TestAbsoluteUrlReverse(MAASServerTestCase): def setUp(self): super(TestAbsoluteUrlReverse, self).setUp() self.useFixture(RegionConfigurationFixture()) def test_absolute_url_reverse_uses_path_from_maas_url(self): maas_url = factory.make_simple_http_url() self.useFixture(RegionConfigurationFixture(maas_url=maas_url)) path = urlparse(maas_url).path absolute_url = absolute_url_reverse('settings') expected_url = path + reverse('settings') self.assertEqual(expected_url, absolute_url) def test_absolute_url_reverse_copes_with_trailing_slash(self): maas_url = factory.make_simple_http_url() path = urlparse(maas_url).path + '/' self.useFixture(RegionConfigurationFixture(maas_url=maas_url)) absolute_url = absolute_url_reverse('settings') expected_url = path[:-1] + reverse('settings') self.assertEqual(expected_url, absolute_url) def test_absolute_url_reverse_uses_query_string(self): maas_url = factory.make_simple_http_url() path = urlparse(maas_url).path self.useFixture(RegionConfigurationFixture(maas_url=maas_url)) parameters = {factory.make_string(): factory.make_string()} absolute_url = absolute_url_reverse('settings', query=parameters) expected_url = path + "%s?%s" % ( reverse('settings'), urlencode(parameters)) self.assertEqual(expected_url, absolute_url) class TestBuildAbsoluteURI(MAASTestCase): """Tests for `build_absolute_uri`.""" def setUp(self): super(TestBuildAbsoluteURI, self).setUp() self.useFixture(RegionConfigurationFixture()) def make_request(self, host="example.com", port=80, script_name="", is_secure=False): """Return a :class:`HttpRequest` with the given parameters.""" request = HttpRequest() request.META["SERVER_NAME"] = host request.META["SERVER_PORT"] = port request.META["SCRIPT_NAME"] = script_name request.is_secure = lambda: is_secure return request def test_simple(self): request = self.make_request() self.assertEqual( "http://example.com/fred", build_absolute_uri(request, "/fred")) def test_different_port(self): request = self.make_request(port=1234) self.assertEqual( "http://example.com:1234/fred", build_absolute_uri(request, "/fred")) def test_script_name_is_ignored(self): # The given path already includes the script_name, so the # script_name passed in the request is not included again. request = self.make_request(script_name="/foo/bar") self.assertEqual( "http://example.com/foo/bar/fred", build_absolute_uri(request, "/foo/bar/fred")) def test_secure(self): request = self.make_request(port=443, is_secure=True) self.assertEqual( "https://example.com/fred", build_absolute_uri(request, "/fred")) def test_different_port_and_secure(self): request = self.make_request(port=9443, is_secure=True) self.assertEqual( "https://example.com:9443/fred", build_absolute_uri(request, "/fred")) def test_preserve_two_leading_slashes(self): # Whilst this shouldn't ordinarily happen, two leading slashes in the # path should be preserved, and not treated specially. request = self.make_request() self.assertEqual( "http://example.com//foo", build_absolute_uri(request, "//foo")) class TestStripDomain(MAASTestCase): def test_strip_domain(self): input_and_results = [ ('name.domain', 'name'), ('name', 'name'), ('name.domain.what', 'name'), ('name..domain', 'name'), ] inputs = [input for input, _ in input_and_results] results = [result for _, result in input_and_results] self.assertEqual(results, map(strip_domain, inputs)) class TestGetLocalClusterUUID(MAASTestCase): def test_get_local_cluster_UUID_returns_None_if_not_set(self): self.useFixture(ClusterConfigurationFixture()) self.assertIsNone(get_local_cluster_UUID()) def test_get_local_cluster_UUID_returns_cluster_UUID(self): uuid = factory.make_UUID() self.useFixture(ClusterConfigurationFixture(cluster_uuid=uuid)) self.assertEqual(uuid, get_local_cluster_UUID()) def make_request(origin_ip): """Return a fake HTTP request with the given remote address.""" return RequestFactory().post('/', REMOTE_ADDR=unicode(origin_ip)) class TestFindNodegroup(MAASServerTestCase): scenarios = [ ('ipv4', {'network_factory': factory.make_ipv4_network}), ('ipv6', {'network_factory': factory.make_ipv6_network}), ] def make_cluster_interface(self, network, management=None): """Create a cluster interface. The interface is managed by default. """ if management is None: management = factory.pick_enum( NODEGROUPINTERFACE_MANAGEMENT, but_not=[NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED]) cluster = factory.make_NodeGroup() return factory.make_NodeGroupInterface( cluster, network=network, management=management) def test_find_nodegroup_looks_up_nodegroup_by_controller_ip(self): nodegroup = factory.make_NodeGroup() interface = factory.make_NodeGroupInterface(nodegroup) self.assertEqual( nodegroup, find_nodegroup(make_request(interface.ip))) def test_find_nodegroup_returns_None_if_not_found(self): requesting_ip = factory.pick_ip_in_network(self.network_factory()) self.assertIsNone(find_nodegroup(make_request(requesting_ip))) # # Finding a node's nodegroup (aka cluster controller) in a nutshell: # # when 1 managed interface on the network = choose this one # when >1 managed interfaces on the network = misconfiguration # when 1 unmanaged interface on a network = choose this one # when >1 unmanaged interfaces on a network = choose any # def test_1_managed_interface(self): network = self.network_factory() interface = self.make_cluster_interface(network) self.assertEqual( interface.nodegroup, find_nodegroup( make_request(factory.pick_ip_in_network(network)))) def test_1_managed_interface_and_1_unmanaged(self): # The managed nodegroup is chosen in preference to the unmanaged # nodegroup. network = self.network_factory() interface = self.make_cluster_interface(network) self.make_cluster_interface( network, management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) self.assertEqual( interface.nodegroup, find_nodegroup( make_request(factory.pick_ip_in_network(network)))) def test_more_than_1_managed_interface(self): network = self.network_factory() requesting_ip = factory.pick_ip_in_network(network) self.make_cluster_interface(network=network) self.make_cluster_interface(network=network) exception = self.assertRaises( NodeGroupMisconfiguration, find_nodegroup, make_request(requesting_ip)) self.assertEqual( (httplib.CONFLICT, "Multiple clusters on the same network; only " "one cluster may manage the network of which " "%s is a member." % requesting_ip), (exception.api_error, "%s" % exception)) def test_1_unmanaged_interface(self): network = self.network_factory() interface = self.make_cluster_interface(network) self.assertEqual( interface.nodegroup, find_nodegroup( make_request(factory.pick_ip_in_network(network)))) def test_more_than_1_unmanaged_interface(self): network = self.network_factory() interfaces = [ self.make_cluster_interface( network, management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) for _ in range(2) ] self.assertEqual( interfaces[0].nodegroup, find_nodegroup( make_request(factory.pick_ip_in_network(network)))) def test_handles_mixed_IPv4_and_IPv6(self): matching_network = self.network_factory() requesting_ip = factory.pick_ip_in_network(matching_network) self.make_cluster_interface(factory.make_ipv4_network()) self.make_cluster_interface(factory.make_ipv6_network()) matching_interface = self.make_cluster_interface(matching_network) self.assertEqual( matching_interface.nodegroup, find_nodegroup(make_request(requesting_ip))) def test_includes_lower_bound(self): network = self.network_factory() interface = self.make_cluster_interface(network) self.assertEqual( interface.nodegroup, find_nodegroup(make_request(IPAddress(network.first)))) def test_includes_upper_bound(self): network = self.network_factory() interface = self.make_cluster_interface(network) self.assertEqual( interface.nodegroup, find_nodegroup(make_request(IPAddress(network.last)))) def test_excludes_lower_bound_predecessor(self): network = self.network_factory() self.make_cluster_interface(network) self.assertIsNone( find_nodegroup(make_request(IPAddress(network.first - 1)))) def test_excludes_upper_bound_successor(self): network = self.network_factory() self.make_cluster_interface(network) self.assertIsNone( find_nodegroup(make_request(IPAddress(network.last + 1)))) class TestSynchronised(MAASTestCase): def test_locks_when_calling(self): lock = threading.Lock() @synchronised(lock) def example_synchronised_function(): self.assertTrue(lock.locked()) return sentinel.called self.assertFalse(lock.locked()) self.assertEqual(sentinel.called, example_synchronised_function()) self.assertFalse(lock.locked()) class TestMakeValidationErrorMessage(MAASTestCase): def test__formats_message_with_all_errors(self): error = ValidationError({ "foo": [ValidationError("bar")], "alice": [ValidationError("bob")], "__all__": ["all is lost"], }) self.assertEqual( "* all is lost\n" "* alice: bob\n" "* foo: bar", make_validation_error_message(error)) maas-1.9.5+bzr4599.orig/src/maasserver/utils/tests/test_version.py0000644000000000000000000001754513056115004023226 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test version utilities.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random from bzrlib.errors import NotBranchError from maasserver.utils import version from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import MAASTestCase from mock import ( MagicMock, sentinel, ) from testtools.matchers import Is class TestGetVersionFromAPT(MAASTestCase): def test__creates_cache_with_None_progress(self): mock_Cache = self.patch(version.apt_pkg, "Cache") version.get_version_from_apt(version.REGION_PACKAGE_NAME) self.assertThat(mock_Cache, MockCalledOnceWith(None)) def test__returns_empty_string_if_package_not_in_cache(self): self.patch(version.apt_pkg, "Cache") self.assertEquals( "", version.get_version_from_apt(version.REGION_PACKAGE_NAME)) def test__returns_empty_string_if_not_current_ver_from_package(self): package = MagicMock() package.current_ver = None mock_cache = { version.REGION_PACKAGE_NAME: package, } self.patch(version.apt_pkg, "Cache").return_value = mock_cache self.assertEquals( "", version.get_version_from_apt(version.REGION_PACKAGE_NAME)) def test__returns_ver_str_from_package(self): package = MagicMock() package.current_ver.ver_str = sentinel.ver_str mock_cache = { version.REGION_PACKAGE_NAME: package, } self.patch(version.apt_pkg, "Cache").return_value = mock_cache self.assertIs( sentinel.ver_str, version.get_version_from_apt(version.REGION_PACKAGE_NAME)) class TestGetMAASBranch(MAASTestCase): def test__returns_None_if_Branch_is_None(self): self.patch(version, "Branch", None) self.assertIsNone(version.get_maas_branch()) def test__calls_Branch_open_with_current_dir(self): mock_open = self.patch(version.Branch, "open") mock_open.return_value = sentinel.branch self.expectThat(version.get_maas_branch(), Is(sentinel.branch)) self.expectThat(mock_open, MockCalledOnceWith(".")) def test__returns_None_on_NotBranchError(self): mock_open = self.patch(version.Branch, "open") mock_open.side_effect = NotBranchError("") self.assertIsNone(version.get_maas_branch()) class TestExtractVersionSubversion(MAASTestCase): scenarios = [ ("with ~", { "version": "1.8.0~alpha4+bzr356-0ubuntu1", "output": ("1.8.0", "alpha4+bzr356"), }), ("without ~", { "version": "1.8.0+bzr356-0ubuntu1", "output": ("1.8.0", "+bzr356"), }), ("without ~ or +", { "version": "1.8.0-0ubuntu1", "output": ("1.8.0", ""), }), ] def test__returns_version_subversion(self): self.assertEquals( self.output, version.extract_version_subversion(self.version)) class TestVersionTestCase(MAASTestCase): """MAASTestCase that resets the cache used by utility methods.""" def setUp(self): super(TestVersionTestCase, self).setUp() self.patch(version, "_cache", {}) class TestGetMAASPackageVersion(TestVersionTestCase): def test__calls_get_version_from_apt(self): mock_apt = self.patch(version, "get_version_from_apt") mock_apt.return_value = sentinel.version self.expectThat( version.get_maas_package_version(), Is(sentinel.version)) self.expectThat( mock_apt, MockCalledOnceWith(version.REGION_PACKAGE_NAME)) class TestGetMAASVersionSubversion(TestVersionTestCase): def test__returns_package_version(self): mock_apt = self.patch(version, "get_version_from_apt") mock_apt.return_value = "1.8.0~alpha4+bzr356-0ubuntu1" self.assertEquals( ("1.8.0", "alpha4+bzr356"), version.get_maas_version_subversion()) def test__returns_unknown_if_version_is_empty_and_not_bzr_branch(self): mock_version = self.patch(version, "get_version_from_apt") mock_version.return_value = "" mock_branch = self.patch(version, "get_maas_branch") mock_branch.return_value = None self.assertEquals( ("unknown", ""), version.get_maas_version_subversion()) def test__returns_from_source_and_revno_from_branch(self): mock_version = self.patch(version, "get_version_from_apt") mock_version.return_value = "" revno = random.randint(1, 5000) mock_branch = self.patch(version, "get_maas_branch") mock_branch.return_value.revno.return_value = revno self.assertEquals( ("from source (+bzr%s)" % revno, ""), version.get_maas_version_subversion()) class TestGetMAASVersionUI(TestVersionTestCase): def test__returns_package_version(self): mock_apt = self.patch(version, "get_version_from_apt") mock_apt.return_value = "1.8.0~alpha4+bzr356-0ubuntu1" self.assertEquals( "1.8.0 (alpha4+bzr356)", version.get_maas_version_ui()) def test__returns_unknown_if_version_is_empty_and_not_bzr_branch(self): mock_version = self.patch(version, "get_version_from_apt") mock_version.return_value = "" mock_branch = self.patch(version, "get_maas_branch") mock_branch.return_value = None self.assertEquals("unknown", version.get_maas_version_ui()) def test__returns_from_source_and_revno_from_branch(self): mock_version = self.patch(version, "get_version_from_apt") mock_version.return_value = "" revno = random.randint(1, 5000) mock_branch = self.patch(version, "get_maas_branch") mock_branch.return_value.revno.return_value = revno self.assertEquals( "from source (+bzr%s)" % revno, version.get_maas_version_ui()) class TestGetMAASDocVersion(TestVersionTestCase): def test__returns_doc_version_with_greater_than_1_decimals(self): mock_apt = self.patch(version, "get_version_from_apt") mock_apt.return_value = "1.8.0~alpha4+bzr356-0ubuntu1" self.assertEquals("docs1.8", version.get_maas_doc_version()) def test__returns_doc_version_with_equal_to_1_decimals(self): mock_apt = self.patch(version, "get_version_from_apt") mock_apt.return_value = "1.8~alpha4+bzr356-0ubuntu1" self.assertEquals("docs1.8", version.get_maas_doc_version()) def test__returns_just_doc_if_version_is_empty(self): mock_apt = self.patch(version, "get_version_from_apt") mock_apt.return_value = "" self.assertEquals("docs", version.get_maas_doc_version()) class TestVersionMethodsCached(TestVersionTestCase): scenarios = [ ("get_maas_package_version", dict(method="get_maas_package_version")), ("get_maas_version_subversion", dict( method="get_maas_version_subversion")), ("get_maas_version_ui", dict(method="get_maas_version_ui")), ("get_maas_doc_version", dict(method="get_maas_doc_version")), ] def test_method_is_cached(self): mock_apt = self.patch(version, "get_version_from_apt") mock_apt.return_value = "1.8.0~alpha4+bzr356-0ubuntu1" cached_method = getattr(version, self.method) first_return_value = cached_method() second_return_value = cached_method() # The return value is not empty (full unit tests have been performed # earlier). self.assertNotIn(first_return_value, [b'', u'', None]) self.assertEqual(first_return_value, second_return_value) # Apt has only been called once. self.expectThat( mock_apt, MockCalledOnceWith(version.REGION_PACKAGE_NAME)) maas-1.9.5+bzr4599.orig/src/maasserver/utils/tests/test_views.py0000644000000000000000000004350213056115004022666 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for :py:module:`maasserver.utils.views`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import httplib import io import logging from random import ( randint, random, ) from weakref import WeakSet from apiclient.multipart import encode_multipart_data from django.core import signals from django.core.handlers.wsgi import ( WSGIHandler, WSGIRequest, ) from django.core.urlresolvers import get_resolver from django.db import connection from django.http import HttpResponse from django.http.response import REASON_PHRASES from fixtures import FakeLogger from maasserver.testing.factory import factory from maasserver.testing.testcase import ( MAASServerTestCase, SerializationFailureTestCase, ) from maasserver.utils import views from maasserver.utils.orm import ( post_commit_hooks, validate_in_transaction, ) from maasserver.utils.views import HttpResponseConflict from maastesting.matchers import ( MockCalledOnceWith, MockCallsMatch, ) from maastesting.testcase import MAASTestCase from maastesting.utils import sample_binary_data from mock import ( ANY, call, sentinel, ) from piston.authentication import initialize_server_request from piston.models import Nonce from testtools.matchers import ( Contains, Equals, HasLength, Is, IsInstance, Not, ) from testtools.testcase import ExpectedException from twisted.internet.task import Clock from twisted.python import log from twisted.web import wsgi def make_request(env=None, oauth_env=None, missing_oauth_param=None): # Return a minimal WSGIRequest. if oauth_env is None: oauth_env = {} base_env = { "REQUEST_METHOD": "GET", "wsgi.input": wsgi._InputStream(io.BytesIO()), "SERVER_NAME": "server", "SERVER_PORT": 80, "HTTP_AUTHORIZATION": factory.make_oauth_header( missing_param=missing_oauth_param, **oauth_env), } if env is not None: base_env.update(env) request = WSGIRequest(base_env) return request class TestLogFunctions(MAASTestCase): """Tests for `log_failed_attempt` and `log_final_failed_attempt`.""" def capture_logs(self): return FakeLogger( views.__name__, level=logging.DEBUG, format="%(levelname)s: %(message)s") def test_log_failed_attempt_logs_warning(self): request = make_request() request.path = factory.make_name("path") attempt = randint(1, 10) elapsed = random() * 10 remaining = random() * 10 pause = random() with self.capture_logs() as logger: views.log_failed_attempt( request, attempt, elapsed, remaining, pause) self.assertEqual( "DEBUG: Attempt #%d for %s failed; will retry in %.0fms (%.1fs " "now elapsed, %.1fs remaining)\n" % ( attempt, request.path, pause * 1000.0, elapsed, remaining), logger.output) def test_log_final_failed_attempt_logs_error(self): request = make_request() request.path = factory.make_name("path") attempt = randint(1, 10) elapsed = random() * 10 with self.capture_logs() as logger: views.log_final_failed_attempt(request, attempt, elapsed) self.assertEqual( "ERROR: Attempt #%d for %s failed; giving up (%.1fs elapsed in " "total)\n" % (attempt, request.path, elapsed), logger.output) class TestResetRequest(MAASTestCase): """Tests for :py:func:`maasserver.utils.views.reset_request`.""" def test__clears_messages_from_cookies(self): request = make_request() request.COOKIES["messages"] = sentinel.messages request = views.reset_request(request) self.assertEqual({}, request.COOKIES) class TestDeleteOAuthNonce(MAASServerTestCase): """Tests for :py:func:`maasserver.utils.views.delete_oauth_nonce`.""" def test__deletes_nonce(self): oauth_consumer_key = factory.make_string(18) oauth_token = factory.make_string(18) oauth_nonce = randint(0, 99999) Nonce.objects.create( consumer_key=oauth_consumer_key, token_key=oauth_token, key=oauth_nonce) oauth_env = { 'oauth_consumer_key': oauth_consumer_key, 'oauth_token': oauth_token, 'oauth_nonce': oauth_nonce, } request = make_request(oauth_env=oauth_env) views.delete_oauth_nonce(request) with ExpectedException(Nonce.DoesNotExist): Nonce.objects.get( consumer_key=oauth_consumer_key, token_key=oauth_token, key=oauth_nonce) def test__skips_missing_nonce(self): oauth_consumer_key = factory.make_string(18) oauth_token = factory.make_string(18) oauth_nonce = randint(0, 99999) oauth_env = { 'oauth_consumer_key': oauth_consumer_key, 'oauth_token': oauth_token, 'oauth_nonce': oauth_nonce, } request = make_request(oauth_env=oauth_env) # No exception is raised. self.assertIsNone(views.delete_oauth_nonce(request)) def test__skips_non_oauth_request(self): request = make_request(env={'HTTP_AUTHORIZATION': ''}) # No exception is raised. self.assertIsNone(views.delete_oauth_nonce(request)) def test__skips_oauth_request_with_missing_param(self): missing_params = ('oauth_consumer_key', 'oauth_token', 'oauth_nonce') for missing_param in missing_params: request = make_request(missing_oauth_param=missing_param) # No exception is raised. self.assertIsNone(views.delete_oauth_nonce(request)) class TestWebApplicationHandler(SerializationFailureTestCase): def setUp(self): super(TestWebApplicationHandler, self).setUp() # Wire time.sleep() directly up to clock.advance() to avoid needless # sleeps, and to simulate the march of time without intervention. clock = self.patch(views, "clock", Clock()) self.patch(views, "sleep", clock.advance) def test__init_defaults(self): handler = views.WebApplicationHandler() self.expectThat( handler._WebApplicationHandler__retry_attempts, Equals(10)) self.expectThat( handler._WebApplicationHandler__retry_timeout, Equals(90)) self.expectThat( handler._WebApplicationHandler__retry, IsInstance(WeakSet)) self.expectThat( handler._WebApplicationHandler__retry, HasLength(0)) def test__init_attempts_can_be_set(self): attempts = randint(1, 100) handler = views.WebApplicationHandler(attempts) self.expectThat( handler._WebApplicationHandler__retry_attempts, Equals(attempts)) def test__init_timeout_can_be_set(self): handler = views.WebApplicationHandler(timeout=sentinel.timeout) self.expectThat( handler._WebApplicationHandler__retry_timeout, Is(sentinel.timeout)) def test__handle_uncaught_exception_notes_serialization_failure(self): handler = views.WebApplicationHandler() request = make_request() request.path = factory.make_name("path") failure = self.capture_serialization_failure() response = handler.handle_uncaught_exception( request=request, resolver=get_resolver(None), exc_info=failure) # HTTP 500 is returned... self.expectThat( response.status_code, Equals(httplib.INTERNAL_SERVER_ERROR)) # ... but the response is recorded as needing a retry. self.expectThat( handler._WebApplicationHandler__retry, Contains(response)) def test__handle_uncaught_exception_does_not_note_other_failure(self): handler = views.WebApplicationHandler() request = make_request() request.path = factory.make_name("path") failure_type = factory.make_exception_type() failure = failure_type, failure_type(), None response = handler.handle_uncaught_exception( request=request, resolver=get_resolver(None), exc_info=failure) # HTTP 500 is returned... self.expectThat( response.status_code, Equals(httplib.INTERNAL_SERVER_ERROR)) # ... but the response is NOT recorded as needing a retry. self.expectThat( handler._WebApplicationHandler__retry, Not(Contains(response))) def test__handle_uncaught_exception_logs_other_failure(self): handler = views.WebApplicationHandler() request = make_request() request.path = factory.make_name("path") exc_type = factory.make_exception_type() exc_info = exc_type, exc_type(), None mock_err = self.patch(log, "err") handler.handle_uncaught_exception( request=request, resolver=get_resolver(None), exc_info=exc_info) # Cannot use MockCalledOnceWith as the Failure objects will not match # even with them created the same. Must check the contents of the # failure. failure = mock_err.call_args[0][0] _why = mock_err.call_args_list[0][1]['_why'] self.expectThat(failure.type, Equals(exc_type)) self.expectThat(failure.value, Equals(exc_info[1])) self.expectThat(_why, Equals("500 Error - %s" % request.path)) def test__get_response_catches_serialization_failures(self): get_response = self.patch(WSGIHandler, "get_response") get_response.side_effect = ( lambda request: self.cause_serialization_failure()) handler = views.WebApplicationHandler(1) request = make_request() request.path = factory.make_name("path") response = handler.get_response(request) self.assertThat( get_response, MockCalledOnceWith(request)) self.assertThat( response, IsInstance(HttpResponseConflict)) def test__get_response_sends_signal_on_serialization_failures(self): get_response = self.patch(WSGIHandler, "get_response") get_response.side_effect = ( lambda request: self.cause_serialization_failure()) send_request_exception = self.patch_autospec( signals.got_request_exception, "send") handler = views.WebApplicationHandler(1) request = make_request() request.path = factory.make_name("path") handler.get_response(request) self.assertThat( send_request_exception, MockCalledOnceWith( sender=views.WebApplicationHandler, request=request)) def test__get_response_tries_only_once(self): get_response = self.patch(WSGIHandler, "get_response") get_response.return_value = sentinel.response handler = views.WebApplicationHandler() request = make_request() request.path = factory.make_name("path") response = handler.get_response(request) self.assertThat( get_response, MockCalledOnceWith(request)) self.assertThat( response, Is(sentinel.response)) def test__get_response_tries_multiple_times(self): handler = views.WebApplicationHandler(3) # An iterable of responses, the last of which will be # an HttpResponseConflict (HTTP 409 - Conflict) error # indicating that the request reached its maximum # number of retry attempts. responses = iter((sentinel.r1, sentinel.r2, sentinel.r3)) def set_retry(request): response = next(responses) handler._WebApplicationHandler__retry.add(response) return response get_response = self.patch(WSGIHandler, "get_response") get_response.side_effect = set_retry reset_request = self.patch_autospec(views, "reset_request") reset_request.side_effect = lambda request: request request = make_request() request.path = factory.make_name("path") response = handler.get_response(request) self.assertThat( get_response, MockCallsMatch( call(request), call(request), call(request))) self.assertThat(response, IsInstance(HttpResponseConflict)) self.expectThat(response.status_code, Equals(httplib.CONFLICT)) self.expectThat( response.reason_phrase, Equals(REASON_PHRASES[httplib.CONFLICT])) def test__get_response_logs_retry_and_resets_request(self): timeout = 1.0 + (random() * 99) handler = views.WebApplicationHandler(2, timeout) def set_retry(request): response = sentinel.response handler._WebApplicationHandler__retry.add(response) return response get_response = self.patch(WSGIHandler, "get_response") get_response.side_effect = set_retry self.patch_autospec(views, "log_failed_attempt") self.patch_autospec(views, "log_final_failed_attempt") reset_request = self.patch_autospec(views, "reset_request") reset_request.side_effect = lambda request: request request = make_request() request.path = factory.make_name("path") handler.get_response(request) self.expectThat( views.log_failed_attempt, MockCalledOnceWith(request, 1, ANY, ANY, ANY)) self.expectThat( views.log_final_failed_attempt, MockCalledOnceWith(request, 2, ANY)) self.expectThat(reset_request, MockCalledOnceWith(request)) def test__get_response_up_calls_in_transaction(self): handler = views.WebApplicationHandler(2) def check_in_transaction(request): validate_in_transaction(connection) get_response = self.patch(WSGIHandler, "get_response") get_response.side_effect = check_in_transaction request = make_request() request.path = factory.make_name("path") handler.get_response(request) self.assertThat(get_response, MockCalledOnceWith(request)) def test__get_response_restores_files_across_requests(self): handler = views.WebApplicationHandler(3) file_content = sample_binary_data file_name = 'content' recorder = [] def get_response_read_content_files(self, request): # Simple get_response method which returns the 'file_name' file # from the request in the response. content = request.FILES[file_name].read() # Record calls. recorder.append(content) response = HttpResponse( content=content, status=httplib.OK, mimetype=b"text/plain; charset=utf-8") handler._WebApplicationHandler__retry.add(response) return response self.patch( WSGIHandler, "get_response", get_response_read_content_files) body, headers = encode_multipart_data( [], [[file_name, io.BytesIO(file_content)]]) env = { 'REQUEST_METHOD': 'POST', 'wsgi.input': wsgi._InputStream(io.BytesIO(body)), 'CONTENT_TYPE': headers['Content-Type'], 'CONTENT_LENGTH': headers['Content-Length'], 'HTTP_MIME_VERSION': headers['MIME-Version'], } request = make_request(env) response = handler.get_response(request) self.assertEqual(file_content, response.content) self.assertEqual(recorder, [file_content] * 3) def test__get_response_deleted_nonces_across_requests(self): handler = views.WebApplicationHandler(3) user = factory.make_User() token = user.userprofile.get_authorisation_tokens()[0] recorder = [] def get_response_check_nonce(self, request): _, oauth_req = initialize_server_request(request) # get_or _create the Nonce object like the authentication # mechanism does. nonce_obj, created = Nonce.objects.get_or_create( consumer_key=token.consumer.key, token_key=token.key, key=oauth_req.get_parameter('oauth_nonce')) # Record calls. recorder.append(created) response = HttpResponse( content='', status=httplib.OK, mimetype=b"text/plain; charset=utf-8") handler._WebApplicationHandler__retry.add(response) return response self.patch( WSGIHandler, "get_response", get_response_check_nonce) oauth_env = { 'oauth_consumer_key': token.consumer.key, 'oauth_token': token.key, } request = make_request(oauth_env=oauth_env) handler.get_response(request) self.assertEqual(recorder, [True] * 3, "Nonce hasn't been cleaned up!") class TestWebApplicationHandlerAtomicViews(MAASServerTestCase): def test__make_view_atomic_wraps_view_with_post_commit_savepoint(self): hooks = post_commit_hooks.hooks savepoint_level = len(connection.savepoint_ids) def view(*args, **kwargs): # We're one more savepoint in. self.assertThat( connection.savepoint_ids, HasLength(savepoint_level + 1)) # Post-commit hooks have been saved. self.assertThat(post_commit_hooks.hooks, Not(Is(hooks))) # Return the args we were given. return args, kwargs handler = views.WebApplicationHandler() view_atomic = handler.make_view_atomic(view) self.assertThat(post_commit_hooks.hooks, Is(hooks)) self.assertThat( view_atomic(sentinel.arg, kwarg=sentinel.kwarg), Equals(((sentinel.arg, ), {"kwarg": sentinel.kwarg}))) self.assertThat(post_commit_hooks.hooks, Is(hooks)) maas-1.9.5+bzr4599.orig/src/maasserver/views/__init__.py0000644000000000000000000001705413056115004021067 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Views.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "AccountsEdit", "AccountsView", "HelpfulDeleteView", "PaginatedListView", "process_form", "settings", "settings_add_archive", "TextTemplateView", ] from abc import ( ABCMeta, abstractmethod, ) from django.contrib import messages from django.http import ( Http404, HttpResponseRedirect, ) from django.views.generic import ( DeleteView, ListView, TemplateView, ) class TextTemplateView(TemplateView): """A text-based :class:`django.views.generic.TemplateView`.""" def render_to_response(self, context, **response_kwargs): response_kwargs['content_type'] = 'text/plain' return super( TemplateView, self).render_to_response(context, **response_kwargs) class HelpfulDeleteView(DeleteView): """Extension to Django's :class:`django.views.generic.DeleteView`. This modifies `DeleteView` in a few ways: - Deleting a nonexistent object is considered successful. - There's a callback that lets you describe the object to the user. - User feedback is built in. - get_success_url defaults to returning the "next" URL. - Confirmation screen also deals nicely with already-deleted object. :ivar model: The model class this view is meant to delete. """ __metaclass__ = ABCMeta @abstractmethod def get_object(self): """Retrieve the object to be deleted.""" @abstractmethod def get_next_url(self): """URL of page to proceed to after deleting.""" def delete(self, *args, **kwargs): """Delete result of self.get_object(), if any.""" try: self.object = self.get_object() except Http404: feedback = self.compose_feedback_nonexistent() else: self.object.delete() feedback = self.compose_feedback_deleted(self.object) return self.move_on(feedback) def get(self, *args, **kwargs): """Prompt for confirmation of deletion request in the UI. This is where the view acts as a regular template view. If the object has been deleted in the meantime though, don't bother: we'll just redirect to the next URL and show a notice that the object is no longer there. """ try: return super(HelpfulDeleteView, self).get(*args, **kwargs) except Http404: return self.move_on(self.compose_feedback_nonexistent()) def compose_feedback_nonexistent(self): """Compose feedback message: "obj was already deleted".""" return "Not deleting: %s not found." % self.model._meta.verbose_name def compose_feedback_deleted(self, obj): """Compose feedback message: "obj has been deleted".""" return ("%s deleted." % self.name_object(obj)).capitalize() def name_object(self, obj): """Overridable: describe object being deleted to the user. The result text will be included in a user notice along the lines of " deleted." :param obj: Object that's been deleted from the database. :return: Description of the object, along the lines of "User ". """ return obj._meta.verbose_name def show_notice(self, notice): """Wrapper for messages.info.""" messages.info(self.request, notice) def move_on(self, feedback_message): """Redirect to the post-deletion page, showing the given message.""" self.show_notice(feedback_message) return HttpResponseRedirect(self.get_next_url()) class PaginatedListView(ListView): """Paginating extension to :class:`django.views.generic.ListView` Adds to the normal list view pagination support by including context variables for relative links to other pages, correctly preserving the existing query string and path. """ paginate_by = 50 def _make_page_link(self, page_number): """Gives relative url reference to `page_number` from current page The return will be one of: - A query string including the page number and other params - The final path segment if there are no params - '.' if there are no params and there is no final path segment See RFCs 1808 and 3986 for relative url resolution rules. The page number is not checked for sanity, pass only valid pages. """ new_query = self.request.GET.copy() if page_number == 1: if "page" in new_query: del new_query["page"] else: new_query["page"] = unicode(page_number) if not new_query: return self.request.path.rsplit("/", 1)[-1] or "." return "?" + new_query.urlencode() def get_context_data(self, **kwargs): """Gives context data also populated with page links If already on the first or last page, the same-document reference will be given for relative links in that direction, which may be safely replaced in the template with a non-anchor element. """ context = super(PaginatedListView, self).get_context_data(**kwargs) page_obj = context["page_obj"] if page_obj.has_previous(): context["first_page_link"] = self._make_page_link(1) context["previous_page_link"] = self._make_page_link( page_obj.previous_page_number()) else: context["first_page_link"] = context["previous_page_link"] = "" if page_obj.has_next(): context["next_page_link"] = self._make_page_link( page_obj.next_page_number()) context["last_page_link"] = self._make_page_link( page_obj.paginator.num_pages) else: context["next_page_link"] = context["last_page_link"] = "" return context def process_form(request, form_class, redirect_url, prefix, success_message=None, form_kwargs=None): """Utility method to process subforms (i.e. forms with a prefix). :param request: The request which contains the data to be validated. :type request: django.http.HttpRequest :param form_class: The form class used to perform the validation. :type form_class: django.forms.Form :param redirect_url: The url where the user should be redirected if the form validates successfully. :type redirect_url: unicode :param prefix: The prefix of the form. :type prefix: unicode :param success_message: An optional message that will be displayed if the form validates successfully. :type success_message: unicode :param form_kwargs: An optional dict that will passed to the form creation method. :type form_kwargs: dict or None :return: A tuple of the validated form and a response (the response will not be None only if the form has been validated correctly). :rtype: tuple """ if form_kwargs is None: form_kwargs = {} if '%s_submit' % prefix in request.POST: form = form_class( data=request.POST, prefix=prefix, **form_kwargs) if form.is_valid(): if success_message is not None: messages.info(request, success_message) form.save() return form, HttpResponseRedirect(redirect_url) else: form = form_class(prefix=prefix, **form_kwargs) return form, None maas-1.9.5+bzr4599.orig/src/maasserver/views/account.py0000644000000000000000000000332513056115004020760 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Account views.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "login", "logout", ] from django import forms from django.conf import settings as django_settings from django.contrib import messages from django.contrib.auth.views import ( login as dj_login, logout as dj_logout, ) from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect from django.shortcuts import render_to_response from django.template import RequestContext from maasserver.models import UserProfile def login(request): extra_context = { 'no_users': UserProfile.objects.all_users().count() == 0, 'create_command': django_settings.MAAS_CLI, } if request.user.is_authenticated(): return HttpResponseRedirect(reverse('index')) else: return dj_login(request, extra_context=extra_context) class LogoutForm(forms.Form): """Log-out confirmation form. There is nothing interesting in this form, but it's needed in order to get Django's CSRF protection during logout. """ def logout(request): if request.method == 'POST': form = LogoutForm(request.POST) if form.is_valid(): messages.info(request, "You have been logged out.") return dj_logout(request, next_page=reverse('login')) else: form = LogoutForm() return render_to_response( 'maasserver/logout_confirm.html', {'form': form}, context_instance=RequestContext(request), ) maas-1.9.5+bzr4599.orig/src/maasserver/views/clusters.py0000644000000000000000000001402413056115004021166 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Cluster views.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "ClusterDelete", "ClusterEdit", "ClusterInterfaceCreate", "ClusterInterfaceDelete", "ClusterInterfaceEdit", "ClusterListView", ] from django.contrib import messages from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect from django.shortcuts import get_object_or_404 from django.views.generic import ( CreateView, DeleteView, UpdateView, ) from django.views.generic.edit import ( FormMixin, ProcessFormView, ) from maasserver.enum import NODEGROUP_STATUS from maasserver.forms import ( NodeGroupEdit, NodeGroupInterfaceForm, ) from maasserver.models import ( BootResource, NodeGroup, NodeGroupInterface, ) from maasserver.views import PaginatedListView class ClusterListView(PaginatedListView, FormMixin, ProcessFormView): template_name = 'maasserver/cluster_listing.html' context_object_name = "cluster_list" status = None def get_queryset(self): return NodeGroup.objects.all().order_by('cluster_name') def get_context_data(self, **kwargs): context = super(ClusterListView, self).get_context_data(**kwargs) cluster_count = NodeGroup.objects.count() context['current_count'] = cluster_count # Display warnings (no images, cluster not connected) for clusters, # but only for the display of ENABLED clusters. context['display_warnings'] = self.status == NODEGROUP_STATUS.ENABLED context['region_has_images'] = BootResource.objects.exists() return context class ClusterEdit(UpdateView): model = NodeGroup template_name = 'maasserver/nodegroup_edit.html' form_class = NodeGroupEdit context_object_name = 'cluster' def get_form_kwargs(self): kwargs = super(ClusterEdit, self).get_form_kwargs() # The cluster form has a boolean checkbox. For those we need to know # whether a submission came in from the UI (where omitting the field # means "set to False") or from the API (where it means "leave # unchanged"). kwargs['ui_submission'] = True return kwargs def get_context_data(self, **kwargs): context = super(ClusterEdit, self).get_context_data(**kwargs) context['interfaces'] = ( self.object.nodegroupinterface_set.all().order_by('name')) return context def get_success_url(self): return reverse('cluster-list') def get_object(self): uuid = self.kwargs.get('uuid', None) return get_object_or_404(NodeGroup, uuid=uuid) def form_valid(self, form): messages.info(self.request, "Cluster updated.") return super(ClusterEdit, self).form_valid(form) class ClusterDelete(DeleteView): template_name = 'maasserver/nodegroup_confirm_delete.html' context_object_name = 'cluster_to_delete' def get_object(self): uuid = self.kwargs.get('uuid', None) return get_object_or_404(NodeGroup, uuid=uuid) def get_next_url(self): return reverse('cluster-list') def delete(self, request, *args, **kwargs): cluster = self.get_object() cluster.delete() messages.info(request, "Cluster %s deleted." % cluster.cluster_name) return HttpResponseRedirect(self.get_next_url()) class ClusterInterfaceDelete(DeleteView): template_name = 'maasserver/nodegroupinterface_confirm_delete.html' context_object_name = 'interface_to_delete' def get_object(self): uuid = self.kwargs.get('uuid', None) name = self.kwargs.get('name', None) return get_object_or_404( NodeGroupInterface, nodegroup__uuid=uuid, name=name) def get_next_url(self): uuid = self.kwargs.get('uuid', None) return reverse('cluster-edit', args=[uuid]) def delete(self, request, *args, **kwargs): interface = self.get_object() interface.delete() messages.info(request, "Interface %s deleted." % interface.name) return HttpResponseRedirect(self.get_next_url()) class ClusterInterfaceEdit(UpdateView): template_name = 'maasserver/nodegroupinterface_edit.html' form_class = NodeGroupInterfaceForm context_object_name = 'interface' def get_success_url(self): uuid = self.kwargs.get('uuid', None) return reverse('cluster-edit', args=[uuid]) def form_valid(self, form): messages.info(self.request, "Interface updated.") return super(ClusterInterfaceEdit, self).form_valid(form) def get_object(self): uuid = self.kwargs.get('uuid', None) name = self.kwargs.get('name', None) return get_object_or_404( NodeGroupInterface, nodegroup__uuid=uuid, name=name) class ClusterInterfaceCreate(CreateView): template_name = 'maasserver/nodegroupinterface_new.html' form_class = NodeGroupInterfaceForm context_object_name = 'interface' def get_form_kwargs(self): kwargs = super(ClusterInterfaceCreate, self).get_form_kwargs() assert kwargs.get('instance', None) is None kwargs['instance'] = NodeGroupInterface(nodegroup=self.get_nodegroup()) return kwargs def get_success_url(self): uuid = self.kwargs.get('uuid', None) return reverse('cluster-edit', args=[uuid]) def form_valid(self, form): self.object = form.save() messages.info(self.request, "Interface created.") return super(ClusterInterfaceCreate, self).form_valid(form) def get_nodegroup(self): nodegroup_uuid = self.kwargs.get('uuid', None) return get_object_or_404(NodeGroup, uuid=nodegroup_uuid) def get_context_data(self, **kwargs): context = super( ClusterInterfaceCreate, self).get_context_data(**kwargs) context['nodegroup'] = self.get_nodegroup() return context maas-1.9.5+bzr4599.orig/src/maasserver/views/combo.py0000644000000000000000000001700113056115004020417 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Combo view.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'get_combo_view', ] from functools import partial import os from convoy.combo import ( combine_files, parse_qs, ) from django.conf import settings from django.http import ( HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseRedirect, ) from maasserver.config import RegionConfiguration MERGE_VIEWS = { "jquery.js": { "location": settings.JQUERY_LOCATION, "content_type": "text/javascript; charset=UTF-8", "files": [ "jquery.min.js", ] }, "angular.js": { "location": settings.ANGULARJS_LOCATION, "content_type": "text/javascript; charset=UTF-8", "files": [ "angular.min.js", "angular-route.min.js", "angular-cookies.min.js", ] }, "ng-tags-input.js": { "content_type": "text/javascript; charset=UTF-8", "files": [ "js/angular/3rdparty/ng-tags-input.js", ] }, "maas-angular.js": { "content_type": "text/javascript; charset=UTF-8", "files": [ "js/angular/maas.js", "js/angular/factories/region.js", "js/angular/factories/nodes.js", "js/angular/factories/devices.js", "js/angular/factories/clusters.js", "js/angular/factories/zones.js", "js/angular/factories/general.js", "js/angular/factories/users.js", "js/angular/factories/events.js", "js/angular/factories/tags.js", "js/angular/factories/subnets.js", "js/angular/factories/spaces.js", "js/angular/factories/vlans.js", "js/angular/factories/fabrics.js", "js/angular/services/search.js", "js/angular/services/manager.js", "js/angular/services/managerhelper.js", "js/angular/services/error.js", "js/angular/services/validation.js", "js/angular/services/browser.js", "js/angular/services/converter.js", "js/angular/services/json.js", "js/angular/directives/error_overlay.js", "js/angular/directives/code_lines.js", "js/angular/directives/error_toggle.js", "js/angular/directives/call_to_action.js", "js/angular/directives/power_parameters.js", "js/angular/directives/os_select.js", "js/angular/directives/type.js", "js/angular/directives/accordion.js", "js/angular/directives/dbl_click_overlay.js", "js/angular/directives/contenteditable.js", "js/angular/directives/sticky_header.js", "js/angular/directives/placeholder.js", "js/angular/directives/enter_blur.js", "js/angular/directives/version_reloader.js", "js/angular/filters/nodes.js", "js/angular/filters/by_fabric.js", "js/angular/filters/by_vlan.js", "js/angular/filters/by_space.js", "js/angular/filters/remove_default_vlan.js", "js/angular/controllers/nodes_list.js", "js/angular/controllers/add_hardware.js", "js/angular/controllers/add_device.js", "js/angular/controllers/node_details.js", "js/angular/controllers/node_details_networking.js", "js/angular/controllers/node_details_storage.js", "js/angular/controllers/node_result.js", "js/angular/controllers/node_events.js", "js/angular/controllers/subnets_list.js", "js/angular/controllers/subnet_details.js", ] }, "yui.js": { "location": settings.YUI_LOCATION, "content_type": "text/javascript; charset=UTF-8", "files": [ "yui-base/yui-base-min.js", ] }, "maas-yui.js": { "content_type": "text/javascript; charset=UTF-8", "files": [ "js/io.js", "js/image.js", "js/image_views.js", "js/user_panel.js", "js/prefs.js", "js/shortpoll.js", "js/enums.js", "js/reveal.js", "js/os_distro_select.js", ] }, } def get_absolute_location(location=''): """Return the absolute location of a static resource. This utility exist to deal with the various places where MAAS can find static resources. If the given location is an absolute location, return it. If not, treat the location as a relative location. :param location: An optional absolute or relative location. :type location: unicode :return: The absolute path. :rtype: unicode """ if location.startswith(os.path.sep): return location else: with RegionConfiguration.open() as config: return os.path.join(config.static_root, location) def get_combo_view(location='', default_redirect=None): """Return a Django view to serve static resources using a combo loader. :param location: An optional absolute or relative location. :type location: unicode :param default_redirect: An optional address where requests for one file of an unknown file type will be redirected. If this parameter is omitted, such requests will lead to a "Bad request" response. :type location: unicode :return: A Django view method. :rtype: callable """ location = get_absolute_location(location) return partial( combo_view, location=location, default_redirect=default_redirect) def combo_view(request, location, default_redirect=None, encoding='utf8'): """Handle a request for combining a set of files. The files are searched in the absolute location `abs_location` (if defined) or in the relative location `rel_location`. """ fnames = parse_qs(request.META.get("QUERY_STRING", "")) if fnames: if fnames[0].endswith('.js'): content_type = 'text/javascript; charset=UTF-8' elif fnames[0].endswith('.css'): content_type = 'text/css' elif default_redirect is not None and len(fnames) == 1: return HttpResponseRedirect( "%s%s" % (default_redirect, fnames[0])) else: return HttpResponseBadRequest( "Invalid file type requested.", content_type="text/plain; charset=UTF-8") content = "".join( [content.decode(encoding) for content in combine_files( fnames, location, resource_prefix='/', rewrite_urls=True)]) return HttpResponse( content_type=content_type, status=200, content=content) return HttpResponseNotFound() def merge_view(request, filename): """Merge the `files` from `location` into one file. Return the HTTP response with `content_type`. """ merge_info = MERGE_VIEWS.get(filename, None) if merge_info is None: return HttpResponseNotFound() location = merge_info.get("location", None) if location is None: location = get_absolute_location() content = "".join( [content.decode('utf-8') for content in combine_files( merge_info["files"], location, resource_prefix='/', rewrite_urls=True)]) return HttpResponse( content_type=merge_info["content_type"], status=200, content=content) maas-1.9.5+bzr4599.orig/src/maasserver/views/images.py0000644000000000000000000006336413056115004020602 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Image views.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "ImagesView", "ImageDeleteView", ] from collections import defaultdict import json from distro_info import UbuntuDistroInfo from django.core.exceptions import PermissionDenied from django.core.urlresolvers import reverse from django.http import ( HttpResponse, HttpResponseForbidden, HttpResponseRedirect, ) from django.shortcuts import get_object_or_404 from django.views.generic.base import TemplateView from django.views.generic.edit import ( FormMixin, ProcessFormView, ) from maasserver.bootresources import ( import_resources, is_import_resources_running, ) from maasserver.bootsources import get_os_info_from_boot_sources from maasserver.clusterrpc.boot_images import ( get_common_available_boot_images, is_import_boot_images_running, ) from maasserver.clusterrpc.osystems import get_os_release_title from maasserver.enum import ( BOOT_RESOURCE_TYPE, NODE_STATUS, ) from maasserver.models import ( BootResource, BootSourceCache, BootSourceSelection, Config, LargeFile, Node, ) from maasserver.views import HelpfulDeleteView from requests import ConnectionError def format_size(size): """Formats the size into human readable.""" for x in ['bytes', 'KB', 'MB', 'GB']: if size < 1024.0: return "%3.1f %s" % (size, x) size /= 1024.0 return "%3.1f %s" % (size, ' TB') def get_distro_series_info_row(series): """Returns the distro series row information from python-distro-info. """ info = UbuntuDistroInfo() for row in info._avail(info._date): if row['series'] == series: return row return None def format_ubuntu_distro_series(series): """Formats the Ubuntu distro series into a version name.""" row = get_distro_series_info_row(series) if row is None: return series return row['version'] class ImagesView(TemplateView, FormMixin, ProcessFormView): template_name = 'maasserver/images.html' context_object_name = "images" status = None def __init__(self, *args, **kwargs): super(ImagesView, self).__init__(*args, **kwargs) # Load the Ubuntu info from the `BootSource`'s. This is done in # __init__ so that it is not done, more that once. try: sources, releases, arches = get_os_info_from_boot_sources('ubuntu') self.connection_error = False self.ubuntu_sources = sources self.ubuntu_releases = releases self.ubuntu_arches = arches except ConnectionError: self.connection_error = True self.ubuntu_sources = [] self.ubuntu_releases = set() self.ubuntu_arches = set() def get(self, request, *args, **kwargs): # Load all the nodes, so its not done on every call # to the method get_number_of_nodes_deployed_for. self.nodes = Node.objects.filter( status__in=[NODE_STATUS.DEPLOYED, NODE_STATUS.DEPLOYING]).only( 'osystem', 'distro_series') self.default_osystem = Config.objects.get_config( 'default_osystem') self.default_distro_series = Config.objects.get_config( 'default_distro_series') # Load list of boot resources that currently exist on all clusters. cluster_images = get_common_available_boot_images() self.clusters_syncing = is_import_boot_images_running() self.cluster_resources = ( BootResource.objects.get_resources_matching_boot_images( cluster_images)) # If the request is ajax, then return the list of resources as json. if request.is_ajax(): return self.ajax(request, *args, **kwargs) return super(ImagesView, self).get(request, *args, **kwargs) def get_context_data(self, **kwargs): """Return context data that is passed into the template.""" context = super(ImagesView, self).get_context_data(**kwargs) context['region_import_running'] = is_import_resources_running() context['cluster_import_running'] = self.clusters_syncing context['connection_error'] = self.connection_error context['ubuntu_streams_count'] = len(self.ubuntu_sources) context['ubuntu_releases'] = self.format_ubuntu_releases() context['ubuntu_arches'] = self.format_ubuntu_arches() context['other_resources'] = self.get_other_resources() context['generated_resources'] = self.get_generated_resources() context['uploaded_resources'] = self.get_uploaded_resources() return context def post(self, request, *args, **kwargs): """Handle a POST request.""" # Only administrators can change options on this page. if not self.request.user.is_superuser: return HttpResponseForbidden() if 'ubuntu_images' in request.POST: releases = request.POST.getlist('release') arches = request.POST.getlist('arch') self.update_source_selection( self.ubuntu_sources[0], 'ubuntu', releases, arches) return HttpResponseRedirect(reverse('images')) elif 'other_images' in request.POST: images = request.POST.getlist('image') self.update_other_images_source_selection(images) return HttpResponseRedirect(reverse('images')) else: # Unknown action: redirect to the images page (this # shouldn't happen). return HttpResponseRedirect(reverse('images')) def get_ubuntu_release_selections(self): """Return list of all selected releases for Ubuntu. If first item in tuple is true, then all releases are selected by wildcard.""" all_selected = False releases = set() for selection in BootSourceSelection.objects.all(): if selection.os == "ubuntu": if selection.release == "*": all_selected = True else: releases.add(selection.release) return all_selected, releases def format_ubuntu_releases(self): """Return formatted Ubuntu release selections for the template.""" releases = [] all_releases, selected_releases = self.get_ubuntu_release_selections() for release in sorted(list(self.ubuntu_releases), reverse=True): if all_releases or release in selected_releases: checked = True else: checked = False releases.append({ 'name': release, 'title': format_ubuntu_distro_series(release), 'checked': checked, }) return releases def get_ubuntu_arch_selections(self): """Return list of all selected arches for Ubuntu. If first item in tuple is true, then all arches are selected by wildcard.""" all_selected = False arches = set() for selection in BootSourceSelection.objects.all(): if selection.os == "ubuntu": for arch in selection.arches: if arch == "*": all_selected = True else: arches.add(arch) return all_selected, arches def format_ubuntu_arches(self): """Return formatted Ubuntu architecture selections for the template.""" arches = [] all_arches, selected_arches = self.get_ubuntu_arch_selections() for arch in sorted(list(self.ubuntu_arches)): if all_arches or arch in selected_arches: checked = True else: checked = False arches.append({ 'name': arch, 'title': arch, 'checked': checked, }) return arches def get_resource_title(self, resource): """Return the title for the resource based on the type and name.""" rtypes_with_split_names = [ BOOT_RESOURCE_TYPE.SYNCED, BOOT_RESOURCE_TYPE.GENERATED, ] if resource.rtype in rtypes_with_split_names: os, series = resource.name.split('/') if resource.name.startswith('ubuntu/'): return format_ubuntu_distro_series(series) else: title = get_os_release_title(os, series) if title is None: return resource.name else: return title else: if 'title' in resource.extra and len(resource.extra['title']) > 0: return resource.extra['title'] else: return resource.name def add_resource_template_attributes(self, resource): """Adds helper attributes to the resource.""" resource.title = self.get_resource_title(resource) resource.arch, resource.subarch = resource.split_arch() resource.number_of_nodes = self.get_number_of_nodes_deployed_for( resource) resource_set = resource.get_latest_set() if resource_set is None: resource.size = format_size(0) resource.last_update = resource.updated resource.complete = False resource.status = "Queued for download" resource.downloading = False else: resource.size = format_size(resource_set.total_size) resource.last_update = resource_set.updated resource.complete = resource_set.complete if not resource.complete: progress = resource_set.progress if progress > 0: resource.status = "Downloading %3.0f%%" % progress resource.downloading = True else: resource.status = "Queued for download" resource.downloading = False else: # See if the resource also exists on all the clusters. if resource in self.cluster_resources: resource.status = "Complete" resource.downloading = False else: resource.complete = False if self.clusters_syncing: resource.status = "Syncing to clusters" resource.downloading = True else: resource.status = "Waiting for clusters to sync" resource.downloading = False def node_has_architecture_for_resource(self, node, resource): """Return True if node is the same architecture as resource.""" arch, _ = resource.split_arch() node_arch, node_subarch = node.split_arch() return arch == node_arch and resource.supports_subarch(node_subarch) def get_number_of_nodes_deployed_for(self, resource): """Return number of nodes that are deploying the given os, series, and architecture.""" if resource.rtype == BOOT_RESOURCE_TYPE.UPLOADED: osystem = 'custom' distro_series = resource.name else: osystem, distro_series = resource.name.split('/') # Count the number of nodes with same os/release and architecture. count = 0 for node in self.nodes.filter( osystem=osystem, distro_series=distro_series): if self.node_has_architecture_for_resource(node, resource): count += 1 # Any node that is deployed without osystem and distro_series, # will be using the defaults. if (self.default_osystem == osystem and self.default_distro_series == distro_series): for node in self.nodes.filter( osystem="", distro_series=""): if self.node_has_architecture_for_resource(node, resource): count += 1 return count def update_source_selection(self, boot_source, os, releases, arches): # Remove all selections, that are not of release. BootSourceSelection.objects.filter( boot_source=boot_source, os=os).exclude( release__in=releases).delete() if len(releases) > 0: # Create or update the selections. for release in releases: selection, _ = BootSourceSelection.objects.get_or_create( boot_source=boot_source, os=os, release=release) selection.arches = arches selection.subarches = ["*"] selection.labels = ["*"] selection.save() else: # Create a selection that will cause nothing to be downloaded, # since no releases are selected. selection, _ = BootSourceSelection.objects.get_or_create( boot_source=boot_source, os=os, release="") selection.arches = arches selection.subarches = ["*"] selection.labels = ["*"] selection.save() # Start the import process, now that the selections have changed. import_resources() def get_other_synced_resources(self): """Return all synced resources that are not Ubuntu.""" resources = list(BootResource.objects.filter( rtype=BOOT_RESOURCE_TYPE.SYNCED).exclude( name__startswith='ubuntu/').order_by('-name', 'architecture')) for resource in resources: self.add_resource_template_attributes(resource) return resources def check_if_image_matches_resource(self, resource, image): """Return True if the resource matches the image.""" os, series = resource.name.split('/') arch, subarch = resource.split_arch() if os != image.os or series != image.release or arch != image.arch: return False if not resource.supports_subarch(subarch): return False return True def get_matching_resource_for_image(self, resources, image): """Return True if the image matches one of the resources.""" for resource in resources: if self.check_if_image_matches_resource(resource, image): return resource return None def get_other_resources(self): """Return all other resources if they are synced or not.""" # Get the resource that already exist in the resources = self.get_other_synced_resources() images = list(BootSourceCache.objects.exclude(os='ubuntu')) for image in images: resource = self.get_matching_resource_for_image(resources, image) if resource is None: image.exists = False image.complete = False image.size = '-' image.last_update = 'not synced' image.status = "" image.downloading = False image.number_of_nodes = '-' else: self.add_resource_template_attributes(resource) image.exists = True image.complete = resource.complete image.size = resource.size image.last_update = resource.last_update image.status = resource.status image.downloading = resource.downloading image.number_of_nodes = ( self.get_number_of_nodes_deployed_for(resource)) image.title = get_os_release_title(image.os, image.release) if image.title is None: image.title = '%s/%s' % (image.os, image.release) # Only superusers can change selections about other images, so we only # show the images that already exist for standard users. if not self.request.user.is_superuser: images = [ image for image in images if image.exists ] return images def update_other_images_source_selection(self, images): """Update `BootSourceSelection`'s to only include the selected images.""" # Remove all selections that are not Ubuntu. BootSourceSelection.objects.exclude(os='ubuntu').delete() # Break down the images into os/release with multiple arches. selections = defaultdict(list) for image in images: os, arch, _, release = image.split('/', 4) name = '%s/%s' % (os, release) selections[name].append(arch) # Create each selection for the source. for name, arches in selections.items(): os, release = name.split('/') cache = BootSourceCache.objects.filter( os=os, arch=arch, release=release).first() if cache is None: # It is possible the cache changed while waiting for the user # to perform an action. Ignore the selection as its no longer # available. continue # Create the selection for the source. BootSourceSelection.objects.create( boot_source=cache.boot_source, os=os, release=release, arches=arches, subarches=["*"], labels=["*"]) # Start the import process, now that the selections have changed. import_resources() def get_generated_resources(self): """Return all generated resources.""" resources = list(BootResource.objects.filter( rtype=BOOT_RESOURCE_TYPE.GENERATED).order_by( '-name', 'architecture')) for resource in resources: self.add_resource_template_attributes(resource) return resources def get_uploaded_resources(self): """Return all uploaded resources, for usage in the template.""" resources = list(BootResource.objects.filter( rtype=BOOT_RESOURCE_TYPE.UPLOADED).order_by( 'name', 'architecture')) for resource in resources: self.add_resource_template_attributes(resource) return resources def pick_latest_datetime(self, time, other_time): """Return the datetime that is the latest.""" if time is None: return other_time return max([time, other_time]) def calculate_unique_size_for_resources(self, resources): """Return size of all unique largefiles for the given resources.""" shas = set() size = 0 for resource in resources: resource_set = resource.get_latest_set() if resource_set is None: continue for rfile in resource_set.files.all(): try: largefile = rfile.largefile except LargeFile.DoesNotExist: continue if largefile.sha256 not in shas: size += largefile.total_size shas.add(largefile.sha256) return size def are_all_resources_complete(self, resources): """Return the complete status for all the given resources.""" for resource in resources: resource_set = resource.get_latest_set() if resource_set is None: return False if not resource_set.complete: return False return True def get_last_update_for_resources(self, resources): """Return the latest updated time for all resources.""" last_update = None for resource in resources: last_update = self.pick_latest_datetime( last_update, resource.updated) resource_set = resource.get_latest_set() if resource_set is not None: last_update = self.pick_latest_datetime( last_update, resource_set.updated) return last_update def get_number_of_nodes_for_resources(self, resources): """Return the number of nodes used by all resources.""" return sum([ self.get_number_of_nodes_deployed_for(resource) for resource in resources]) def get_progress_for_resources(self, resources): """Return the overall progress for all resources.""" size = 0 total_size = 0 for resource in resources: resource_set = resource.get_latest_set() if resource_set is not None: size += resource_set.size total_size += resource_set.total_size if size <= 0: # Handle division by zero return 0 return 100.0 * (size / float(total_size)) def resource_group_to_resource(self, group): """Convert the list of resources into one resource to be used in the UI.""" # Calculate all of the values using all of the resources for # this combination. last_update = self.get_last_update_for_resources(group) unique_size = self.calculate_unique_size_for_resources(group) number_of_nodes = self.get_number_of_nodes_for_resources(group) complete = self.are_all_resources_complete(group) progress = self.get_progress_for_resources(group) # Set the computed attributes on the first resource as that will # be the only one returned to the UI. resource = group[0] resource.arch, resource.subarch = resource.split_arch() resource.title = self.get_resource_title(resource) resource.complete = complete resource.size = format_size(unique_size) resource.last_update = last_update resource.number_of_nodes = number_of_nodes resource.complete = complete if not complete: if progress > 0: resource.status = "Downloading %3.0f%%" % progress resource.downloading = True else: resource.status = "Queued for download" resource.downloading = False else: # See if all the resources exist on all the clusters. cluster_has_resources = any( res in group for res in self.cluster_resources) if cluster_has_resources: resource.status = "Complete" resource.downloading = False else: resource.complete = False if self.clusters_syncing: resource.status = "Syncing to clusters" resource.downloading = True else: resource.status = "Waiting for clusters to sync" resource.downloading = False return resource def combine_resources(self, resources): """Return a list of resources combining all of subarchitecture resources into one resource.""" resource_group = defaultdict(list) for resource in resources: arch = resource.split_arch()[0] key = '%s/%s' % (resource.name, arch) resource_group[key].append(resource) return [ self.resource_group_to_resource(group) for _, group in resource_group.items() ] def ajax(self, request, *args, **kwargs): """Return all resources in a json object. This is used by the image model list on the client side to update the status of images.""" resources = self.combine_resources(BootResource.objects.all()) json_resources = [ dict( id=resource.id, rtype=resource.rtype, name=resource.name, title=resource.title, arch=resource.arch, size=resource.size, complete=resource.complete, status=resource.status, downloading=resource.downloading, numberOfNodes=resource.number_of_nodes, lastUpdate=resource.last_update.strftime( "%a, %d %b. %Y %H:%M:%S") ) for resource in resources ] data = dict( region_import_running=is_import_resources_running(), cluster_import_running=self.clusters_syncing, resources=json_resources) json_data = json.dumps(data) return HttpResponse(json_data, mimetype='application/json') class ImageDeleteView(HelpfulDeleteView): template_name = 'maasserver/image_confirm_delete.html' context_object_name = 'image_to_delete' model = BootResource def post(self, request, *args, **kwargs): if not request.user.is_superuser: raise PermissionDenied() return super(ImageDeleteView, self).post(request, *args, **kwargs) def get_object(self): resource_id = self.kwargs.get('resource_id', None) resource = get_object_or_404(BootResource, id=resource_id) if resource.rtype == BOOT_RESOURCE_TYPE.SYNCED: raise PermissionDenied() if resource.rtype == BOOT_RESOURCE_TYPE.UPLOADED: if 'title' in resource.extra: resource.title = resource.extra['title'] else: resource.title = resource.name else: os, release = resource.name.split('/') title = get_os_release_title(os, release) if title is not None: resource.title = title else: resource.title = resource.name return resource def get_next_url(self): return reverse('images') def name_object(self, obj): """See `HelpfulDeleteView`.""" title = "" if obj.rtype == BOOT_RESOURCE_TYPE.UPLOADED: if 'title' in obj.extra: title = obj.extra['title'] else: title = obj.name else: os, release = obj.name.split('/') rpc_title = get_os_release_title(os, release) if rpc_title is not None: title = rpc_title else: title = obj.name return "%s (%s)" % (title, obj.architecture) maas-1.9.5+bzr4599.orig/src/maasserver/views/index.py0000644000000000000000000000070313056115004020430 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Index view.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "IndexView", ] from django.views.generic.base import TemplateView class IndexView(TemplateView): template_name = 'maasserver/index.html' maas-1.9.5+bzr4599.orig/src/maasserver/views/prefs.py0000644000000000000000000000716113056115004020445 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Preferences views.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'SSHKeyCreateView', 'SSHKeyDeleteView', 'userprefsview', ] from django.contrib import messages from django.contrib.auth.forms import PasswordChangeForm from django.core.exceptions import PermissionDenied from django.core.urlresolvers import reverse from django.shortcuts import ( get_object_or_404, render_to_response, ) from django.template import RequestContext from django.views.generic import CreateView from maasserver.forms import ( ProfileForm, SSHKeyForm, SSLKeyForm, ) from maasserver.models import ( SSHKey, SSLKey, ) from maasserver.views import ( HelpfulDeleteView, process_form, ) class SSHKeyCreateView(CreateView): form_class = SSHKeyForm template_name = 'maasserver/prefs_add_sshkey.html' def get_form_kwargs(self): kwargs = super(SSHKeyCreateView, self).get_form_kwargs() kwargs['user'] = self.request.user return kwargs def form_valid(self, form): messages.info(self.request, "SSH key added.") return super(SSHKeyCreateView, self).form_valid(form) def get_success_url(self): return reverse('prefs') class SSLKeyCreateView(CreateView): form_class = SSLKeyForm template_name = 'maasserver/prefs_add_sslkey.html' def get_form_kwargs(self): kwargs = super(SSLKeyCreateView, self).get_form_kwargs() kwargs['user'] = self.request.user return kwargs def form_valid(self, form): messages.info(self.request, "SSL key added.") return super(SSLKeyCreateView, self).form_valid(form) def get_success_url(self): return reverse('prefs') class SSLKeyDeleteView(HelpfulDeleteView): template_name = 'maasserver/prefs_confirm_delete_sslkey.html' context_object_name = 'sslkey' model = SSLKey def get_object(self): keyid = self.kwargs.get('keyid', None) key = get_object_or_404(SSLKey, id=keyid) if key.user != self.request.user: raise PermissionDenied("Can't delete this key. It's not yours.") return key def get_next_url(self): return reverse('prefs') class SSHKeyDeleteView(HelpfulDeleteView): template_name = 'maasserver/prefs_confirm_delete_sshkey.html' context_object_name = 'key' model = SSHKey def get_object(self): keyid = self.kwargs.get('keyid', None) key = get_object_or_404(SSHKey, id=keyid) if key.user != self.request.user: raise PermissionDenied("Can't delete this key. It's not yours.") return key def get_next_url(self): return reverse('prefs') def userprefsview(request): user = request.user # Process the profile update form. profile_form, response = process_form( request, ProfileForm, reverse('prefs'), 'profile', "Profile updated.", {'instance': user}) if response is not None: return response # Process the password change form. password_form, response = process_form( request, PasswordChangeForm, reverse('prefs'), 'password', "Password updated.", {'user': user}) if response is not None: return response return render_to_response( 'maasserver/prefs.html', { 'profile_form': profile_form, 'password_form': password_form, }, context_instance=RequestContext(request)) maas-1.9.5+bzr4599.orig/src/maasserver/views/rpc.py0000644000000000000000000000362213056115004020110 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Views relating to the region<-->cluster RPC mechanism. Each region controller process starts its own RPC endpoint, and this provides the means for clusters to discover what they are. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "info", ] import json from django.http import HttpResponse from maasserver import eventloop def info(request): """View returning a JSON document with information about RPC endpoints. Currently the only information returned is a list of `(host, port)` tuples on which the region has listening RPC endpoints. When the `rpc-advertise` service is not running this returns `None` instead of the list of event-loop endpoints. This denotes something along the lines of "I don't know". The cluster should not act on this, and instead ask again later. """ try: advertiser = eventloop.services.getServiceNamed("rpc-advertise") except KeyError: # RPC advertising service has not been created, so we declare # that there are no endpoints *at all*. endpoints = None else: if advertiser.running: endpoints = {} for name, addr, port in advertiser.dump(): if name in endpoints: endpoints[name].append((addr, port)) else: endpoints[name] = [(addr, port)] else: # RPC advertising service is not running, so we declare that # there are no endpoints *at all*. endpoints = None # Each endpoint is an entry point into this event-loop. info = {"eventloops": endpoints} return HttpResponse(json.dumps(info), content_type="application/json") maas-1.9.5+bzr4599.orig/src/maasserver/views/settings.py0000644000000000000000000002261313056115004021165 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Settings views.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "AccountsAdd", "AccountsDelete", "AccountsEdit", "AccountsView", "settings", ] from django.contrib import messages from django.contrib.auth.forms import AdminPasswordChangeForm from django.contrib.auth.models import User from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect from django.shortcuts import ( get_object_or_404, render_to_response, ) from django.template import RequestContext from django.views.generic import ( CreateView, DeleteView, DetailView, ) from django.views.generic.base import TemplateView from django.views.generic.detail import SingleObjectTemplateResponseMixin from django.views.generic.edit import ModelFormMixin from maasserver.clusterrpc.osystems import gen_all_known_operating_systems from maasserver.exceptions import CannotDeleteUserException from maasserver.forms import ( BootSourceSettingsForm, CommissioningForm, DeployForm, EditUserForm, GlobalKernelOptsForm, MAASAndNetworkForm, NewUserCreationForm, StorageSettingsForm, ThirdPartyDriversForm, UbuntuForm, WindowsForm, ) from maasserver.models import ( BootSource, LicenseKey, UserProfile, ) from maasserver.utils.osystems import ( get_osystem_from_osystems, get_release_from_osystem, ) from maasserver.views import process_form from metadataserver.models import CommissioningScript class AccountsView(DetailView): """Read-only view of user's account information.""" template_name = 'maasserver/user_view.html' context_object_name = 'view_user' def get_object(self): username = self.kwargs.get('username', None) user = get_object_or_404(User, username=username) return user class AccountsAdd(CreateView): """Add-user view.""" form_class = NewUserCreationForm template_name = 'maasserver/user_add.html' context_object_name = 'new_user' def get_success_url(self): return reverse('settings') def form_valid(self, form): messages.info(self.request, "User added.") return super(AccountsAdd, self).form_valid(form) class AccountsDelete(DeleteView): template_name = 'maasserver/user_confirm_delete.html' context_object_name = 'user_to_delete' def get_object(self): username = self.kwargs.get('username', None) user = get_object_or_404(User, username=username) return user.userprofile def get_next_url(self): return reverse('settings') def delete(self, request, *args, **kwargs): profile = self.get_object() username = profile.user.username try: profile.delete() messages.info(request, "User %s deleted." % username) except CannotDeleteUserException as e: messages.info(request, unicode(e)) return HttpResponseRedirect(self.get_next_url()) class AccountsEdit(TemplateView, ModelFormMixin, SingleObjectTemplateResponseMixin): model = User template_name = 'maasserver/user_edit.html' def get_object(self): username = self.kwargs.get('username', None) return get_object_or_404(User, username=username) def respond(self, request, profile_form, password_form): """Generate a response.""" return self.render_to_response({ 'profile_form': profile_form, 'password_form': password_form, }) def get(self, request, *args, **kwargs): """Called by `TemplateView`: handle a GET request.""" self.object = user = self.get_object() profile_form = EditUserForm(instance=user, prefix='profile') password_form = AdminPasswordChangeForm(user=user, prefix='password') return self.respond(request, profile_form, password_form) def post(self, request, *args, **kwargs): """Called by `TemplateView`: handle a POST request.""" self.object = user = self.get_object() next_page = reverse('settings') # Process the profile-editing form, if that's what was submitted. profile_form, response = process_form( request, EditUserForm, next_page, 'profile', "Profile updated.", {'instance': user}) if response is not None: return response # Process the password change form, if that's what was submitted. password_form, response = process_form( request, AdminPasswordChangeForm, next_page, 'password', "Password updated.", {'user': user}) if response is not None: return response return self.respond(request, profile_form, password_form) def has_osystems_supporting_license_keys(osystems): """Return True if the given osystems supports releases with license keys. """ for osystem in osystems: for release in osystem['releases']: if release['requires_license_key']: return True return False def set_license_key_titles(license_key, osystems): """Sets the osystem_title and distro_series_title field on the license_key. Uses the given "osystems" to get the titles. """ osystem = get_osystem_from_osystems(osystems, license_key.osystem) if osystem is None: license_key.osystem_title = license_key.osystem license_key.distro_series_title = license_key.distro_series return license_key.osystem_title = osystem['title'] release = get_release_from_osystem(osystem, license_key.distro_series) if release is None: license_key.distro_series_title = license_key.distro_series return license_key.distro_series_title = release['title'] def settings(request): user_list = UserProfile.objects.all_users().order_by('username') # Process boot source settings form. show_boot_source = BootSource.objects.count() < 2 boot_source_form, response = process_form( request, BootSourceSettingsForm, reverse('settings'), 'boot_source', "Configuration updated.") if response is not None: return response # Process Third Party Drivers form. third_party_drivers_form, response = process_form( request, ThirdPartyDriversForm, reverse('settings'), 'third_party_drivers', "Configuration updated.") if response is not None: return response # Process disk erasing on release form. storage_settings_form, response = process_form( request, StorageSettingsForm, reverse('settings'), 'storage_settings', "Configuration updated.") if response is not None: return response # Process the MAAS & network form. maas_and_network_form, response = process_form( request, MAASAndNetworkForm, reverse('settings'), 'maas_and_network', "Configuration updated.") if response is not None: return response # Process the Commissioning form. commissioning_form, response = process_form( request, CommissioningForm, reverse('settings'), 'commissioning', "Configuration updated.") if response is not None: return response # Process the Deploy form. deploy_form, response = process_form( request, DeployForm, reverse('settings'), 'deploy', "Configuration updated.") if response is not None: return response # Process the Ubuntu form. ubuntu_form, response = process_form( request, UbuntuForm, reverse('settings'), 'ubuntu', "Configuration updated.") if response is not None: return response # Process the Windows form. windows_form, response = process_form( request, WindowsForm, reverse('settings'), 'windows', "Configuration updated.") if response is not None: return response # Process the Global Kernel Opts form. kernelopts_form, response = process_form( request, GlobalKernelOptsForm, reverse('settings'), 'kernelopts', "Configuration updated.") if response is not None: return response # Commissioning scripts. commissioning_scripts = CommissioningScript.objects.all() # License keys w/ titles for osystem and distro_series osystems = list(gen_all_known_operating_systems()) show_license_keys = has_osystems_supporting_license_keys(osystems) license_keys = LicenseKey.objects.all() for license_key in license_keys: set_license_key_titles(license_key, osystems) return render_to_response( 'maasserver/settings.html', { 'user_list': user_list, 'commissioning_scripts': commissioning_scripts, 'show_license_keys': show_license_keys, 'license_keys': license_keys, 'maas_and_network_form': maas_and_network_form, 'show_boot_source': show_boot_source, 'boot_source_form': boot_source_form, 'third_party_drivers_form': third_party_drivers_form, 'storage_settings_form': storage_settings_form, 'commissioning_form': commissioning_form, 'deploy_form': deploy_form, 'ubuntu_form': ubuntu_form, 'windows_form': windows_form, 'kernelopts_form': kernelopts_form, }, context_instance=RequestContext(request)) maas-1.9.5+bzr4599.orig/src/maasserver/views/settings_commissioning_scripts.py0000644000000000000000000000376613056115004025702 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Commissioning Scripts Settings views.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "CommissioningScriptCreate", "CommissioningScriptDelete", ] from django.contrib import messages from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect from django.shortcuts import get_object_or_404 from django.views.generic import ( CreateView, DeleteView, ) from maasserver.forms import CommissioningScriptForm from metadataserver.models import CommissioningScript # The anchor of the commissioning scripts slot on the settings page. COMMISSIONING_SCRIPTS_ANCHOR = 'commissioning_scripts' class CommissioningScriptDelete(DeleteView): template_name = ( 'maasserver/settings_confirm_delete_commissioning_script.html') context_object_name = 'script_to_delete' def get_object(self): id = self.kwargs.get('id', None) return get_object_or_404(CommissioningScript, id=id) def get_next_url(self): return reverse('settings') + '#' + COMMISSIONING_SCRIPTS_ANCHOR def delete(self, request, *args, **kwargs): script = self.get_object() script.delete() messages.info( request, "Commissioning script %s deleted." % script.name) return HttpResponseRedirect(self.get_next_url()) class CommissioningScriptCreate(CreateView): template_name = 'maasserver/settings_add_commissioning_script.html' form_class = CommissioningScriptForm context_object_name = 'commissioningscript' def get_success_url(self): return reverse('settings') + '#' + COMMISSIONING_SCRIPTS_ANCHOR def form_valid(self, form): messages.info(self.request, "Commissioning script created.") return super(CommissioningScriptCreate, self).form_valid(form) maas-1.9.5+bzr4599.orig/src/maasserver/views/settings_license_keys.py0000644000000000000000000000540513056115004023722 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """License Key Settings views.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "LicenseKeyCreate", "LicenseKeyDelete", "LicenseKeyEdit", ] from django.contrib import messages from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect from django.shortcuts import get_object_or_404 from django.views.generic import ( CreateView, DeleteView, UpdateView, ) from maasserver.forms import LicenseKeyForm from maasserver.models import LicenseKey # The anchor of the license keys slot on the settings page. LICENSE_KEY_ANCHOR = 'license_keys' class LicenseKeyDelete(DeleteView): template_name = ( 'maasserver/settings_confirm_delete_license_key.html') context_object_name = 'license_key_to_delete' def get_object(self): osystem = self.kwargs.get('osystem', None) distro_series = self.kwargs.get('distro_series', None) return get_object_or_404( LicenseKey, osystem=osystem, distro_series=distro_series) def get_next_url(self): return reverse('settings') + '#' + LICENSE_KEY_ANCHOR def delete(self, request, *args, **kwargs): license_key = self.get_object() license_key.delete() messages.info( request, "License key %s/%s deleted." % ( license_key.osystem, license_key.distro_series, )) return HttpResponseRedirect(self.get_next_url()) class LicenseKeyCreate(CreateView): template_name = 'maasserver/settings_add_license_key.html' form_class = LicenseKeyForm context_object_name = 'licensekey' def get_success_url(self): return reverse('settings') + '#' + LICENSE_KEY_ANCHOR def form_valid(self, form): messages.info(self.request, "License key created.") return super(LicenseKeyCreate, self).form_valid(form) class LicenseKeyEdit(UpdateView): """View for editing a license key.""" model = LicenseKey form_class = LicenseKeyForm template_name = 'maasserver/settings_edit_license_key.html' def get_object(self): osystem = self.kwargs.get('osystem', None) distro_series = self.kwargs.get('distro_series', None) return get_object_or_404( LicenseKey, osystem=osystem, distro_series=distro_series) def get_success_url(self): return reverse('settings') + '#' + LICENSE_KEY_ANCHOR def form_valid(self, form): messages.info(self.request, "License key updated.") return super(LicenseKeyEdit, self).form_valid(form) maas-1.9.5+bzr4599.orig/src/maasserver/views/tests/0000755000000000000000000000000013056115004020111 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/views/zones.py0000644000000000000000000000535113056115004020463 0ustar 00000000000000# Copyright 2013-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Zones views.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'ZoneAdd', 'ZoneDelete', 'ZoneEdit', 'ZoneListView', 'ZoneView', ] from apiclient.utils import urlencode from django.contrib import messages from django.core.urlresolvers import reverse from django.shortcuts import get_object_or_404 from django.views.generic import ( CreateView, DetailView, UpdateView, ) from maasserver.forms import ZoneForm from maasserver.models import Zone from maasserver.views import ( HelpfulDeleteView, PaginatedListView, ) class ZoneListView(PaginatedListView): context_object_name = "zone_list" def get_queryset(self): return Zone.objects.all().order_by('name') class ZoneView(DetailView): """Mixin class used to fetch a zone by name.""" context_object_name = 'zone' def get_object(self): zone_name = self.kwargs.get('name', None) return get_object_or_404(Zone, name=zone_name) def get_context_data(self, **kwargs): context = super(ZoneView, self).get_context_data(**kwargs) query_string = urlencode( [('query', 'zone:(%s)' % self.get_object().name)]) context["node_list_link"] = ( reverse('index') + "#/nodes" + "?" + query_string) return context class ZoneAdd(CreateView): """View for creating a physical zone.""" form_class = ZoneForm template_name = 'maasserver/zone_add.html' context_object_name = 'new_zone' def get_success_url(self): return reverse('zone-list') def form_valid(self, form): messages.info(self.request, "Zone added.") return super(ZoneAdd, self).form_valid(form) class ZoneEdit(UpdateView): """View for editing a physical zone.""" model = Zone form_class = ZoneForm template_name = 'maasserver/zone_edit.html' def get_object(self): zone_name = self.kwargs.get('name', None) return get_object_or_404(Zone, name=zone_name) def get_success_url(self): return reverse('zone-list') class ZoneDelete(HelpfulDeleteView): """View for deleting a physical zone.""" template_name = 'maasserver/zone_confirm_delete.html' context_object_name = 'zone_to_delete' model = Zone def get_object(self): name = self.kwargs.get('name', None) return get_object_or_404(Zone, name=name) def get_next_url(self): return reverse('zone-list') def name_object(self, obj): """See `HelpfulDeleteView`.""" return "Zone %s" % obj.name maas-1.9.5+bzr4599.orig/src/maasserver/views/tests/__init__.py0000644000000000000000000000000013056115004022210 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/views/tests/test_account.py0000644000000000000000000000604013056115004023156 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test maasserver account views.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from django.conf import settings from django.contrib.auth import SESSION_KEY from django.core.urlresolvers import reverse from lxml.html import ( fromstring, tostring, ) from maasserver.testing import ( extract_redirect, get_content_links, ) from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase class TestLogin(MAASServerTestCase): def test_login_contains_input_tags_if_user(self): factory.make_User() response = self.client.get('/accounts/login/') doc = fromstring(response.content) self.assertFalse(response.context['no_users']) self.assertEqual(1, len(doc.cssselect('input#id_username'))) self.assertEqual(1, len(doc.cssselect('input#id_password'))) def test_login_displays_createadmin_message_if_no_user(self): path = factory.make_string() self.patch(settings, 'MAAS_CLI', path) response = self.client.get('/accounts/login/') self.assertTrue(response.context['no_users']) self.assertEqual(path, response.context['create_command']) def test_login_redirects_when_authenticated(self): password = factory.make_string() user = factory.make_User(password=password) self.client.login(username=user.username, password=password) response = self.client.get('/accounts/login/') self.assertEqual('/', extract_redirect(response)) def test_login_sets_autocomplete_off_in_production(self): self.patch(settings, 'DEBUG', False) factory.make_User() response = self.client.get('/accounts/login/') doc = fromstring(response.content) form = doc.cssselect("form")[0] self.assertIn('autocomplete="off"', tostring(form)) def test_login_sets_autocomplete_on_in_debug_mode(self): self.patch(settings, 'DEBUG', True) factory.make_User() response = self.client.get('/accounts/login/') doc = fromstring(response.content) form = doc.cssselect("form")[0] self.assertNotIn('autocomplete="off"', tostring(form)) class TestLogout(MAASServerTestCase): def test_logout_link_present_on_homepage(self): self.client_log_in() response = self.client.get(reverse('index')) logout_link = reverse('logout') self.assertIn( logout_link, get_content_links(response, element='#user-options')) def test_loggout_uses_POST(self): # Using POST for logging out, along with Django's csrf_token # tag, guarantees that we're protected against CSRF attacks on # the loggout page. self.client_log_in() self.client.post(reverse('logout')) self.assertNotIn(SESSION_KEY, self.client.session.keys()) maas-1.9.5+bzr4599.orig/src/maasserver/views/tests/test_clusters.py0000644000000000000000000002725313056115004023377 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test maasserver clusters views.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import httplib from django.core.urlresolvers import reverse from lxml.html import fromstring from maasserver.enum import ( NODEGROUP_STATE, NODEGROUP_STATUS, NODEGROUPINTERFACE_MANAGEMENT, ) from maasserver.models import ( BootResource, NodeGroup, NodeGroupInterface, ) from maasserver.testing import ( extract_redirect, get_content_links, ) from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase from maasserver.views.clusters import ClusterListView from netaddr import IPNetwork from testtools.matchers import ( ContainsAll, HasLength, MatchesStructure, ) class ClusterListingTest(MAASServerTestCase): def get_url(self): """Return the listing url used in this scenario.""" return reverse('cluster-list') def make_listing_view(self, status): view = ClusterListView() view.status = status return view def test_listing_is_paginated(self): self.patch(ClusterListView, "paginate_by", 2) self.client_log_in(as_admin=True) for _ in range(3): factory.make_NodeGroup() response = self.client.get(self.get_url()) self.assertEqual(httplib.OK, response.status_code) doc = fromstring(response.content) self.assertThat( doc.cssselect('div.pagination'), HasLength(1), "Couldn't find pagination tag.") class ClusterListingStateTest(MAASServerTestCase): scenarios = [ ('disconnected', { 'state': NODEGROUP_STATE.DISCONNECTED, 'text': '-', 'connection': '✗', }), ('out-of-sync', { 'state': NODEGROUP_STATE.OUT_OF_SYNC, 'text': NODEGROUP_STATE.OUT_OF_SYNC, 'connection': '✓', }), ('syncing', { 'state': NODEGROUP_STATE.SYNCING, 'text': NODEGROUP_STATE.SYNCING, 'connection': '✓', }), ('synced', { 'state': NODEGROUP_STATE.SYNCED, 'text': NODEGROUP_STATE.SYNCED, 'connection': '✓', }), ] def test_listing_displays_connected_image_status(self): self.client_log_in(as_admin=True) factory.make_BootResource() nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED, name=self.state) def mock_get_state(self): # Return a state, which is set to the name of the node. return self.name self.patch(NodeGroup, 'get_state', mock_get_state) response = self.client.get( reverse('cluster-list')) document = fromstring(response.content) images_col = document.xpath( "//td[@id='%s_images']" % nodegroup.uuid)[0] connection_col = document.xpath( "//td[@id='%s_connection']" % nodegroup.uuid)[0] self.assertEqual( self.text, images_col.text_content().strip()) self.assertEqual( self.connection, connection_col.text_content().strip()) class ClusterListingNoImagesTest(MAASServerTestCase): def test_listing_displays_no_images_available(self): self.client_log_in(as_admin=True) BootResource.objects.all().delete() nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED) def mock_get_state(self): return NODEGROUP_STATE.OUT_OF_SYNC self.patch(NodeGroup, 'get_state', mock_get_state) response = self.client.get( reverse('cluster-list')) document = fromstring(response.content) images_col = document.xpath( "//td[@id='%s_images']" % nodegroup.uuid)[0] self.assertEqual( "No images available", images_col.text_content().strip()) class ClusterListingAccess(MAASServerTestCase): def test_admin_sees_cluster_tab(self): self.client_log_in(as_admin=True) links = get_content_links( self.client.get(reverse('index')), element='#main-nav') self.assertIn(reverse('cluster-list'), links) def test_non_admin_doesnt_see_cluster_tab(self): self.client_log_in(as_admin=False) links = get_content_links( self.client.get(reverse('index')), element='#main-nav') self.assertNotIn(reverse('cluster-list'), links) class ClusterDeleteTest(MAASServerTestCase): def test_can_delete_cluster(self): self.client_log_in(as_admin=True) nodegroup = factory.make_NodeGroup() delete_link = reverse('cluster-delete', args=[nodegroup.uuid]) response = self.client.post(delete_link, {'post': 'yes'}) self.assertEqual( (httplib.FOUND, reverse('cluster-list')), (response.status_code, extract_redirect(response))) self.assertFalse( NodeGroup.objects.filter(uuid=nodegroup.uuid).exists()) class ClusterEditTest(MAASServerTestCase): def test_cluster_page_contains_links_to_edit_and_delete_interfaces(self): self.client_log_in(as_admin=True) nodegroup = factory.make_NodeGroup() interfaces = set() for _ in range(3): interfaces.add( factory.make_NodeGroupInterface( nodegroup=nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED)) links = get_content_links( self.client.get(reverse('cluster-edit', args=[nodegroup.uuid]))) interface_edit_links = [ reverse( 'cluster-interface-edit', args=[nodegroup.uuid, interface.name]) for interface in interfaces] interface_delete_links = [ reverse( 'cluster-interface-delete', args=[nodegroup.uuid, interface.name]) for interface in interfaces] self.assertThat( links, ContainsAll(interface_edit_links + interface_delete_links)) def test_can_edit_cluster(self): self.client_log_in(as_admin=True) nodegroup = factory.make_NodeGroup() edit_link = reverse('cluster-edit', args=[nodegroup.uuid]) data = { 'cluster_name': factory.make_name('cluster_name'), 'name': factory.make_name('name'), 'status': factory.pick_enum(NODEGROUP_STATUS), } response = self.client.post(edit_link, data) self.assertEqual(httplib.FOUND, response.status_code, response.content) self.assertThat( reload_object(nodegroup), MatchesStructure.byEquality(**data)) def test_contains_link_to_add_interface(self): self.client_log_in(as_admin=True) nodegroup = factory.make_NodeGroup() links = get_content_links( self.client.get(reverse('cluster-edit', args=[nodegroup.uuid]))) self.assertIn( reverse('cluster-interface-create', args=[nodegroup.uuid]), links) def test_admin_can_disable_default_disable_ipv4_flag(self): self.client_log_in(as_admin=True) nodegroup = factory.make_NodeGroup(default_disable_ipv4=True) edit_link = reverse('cluster-edit', args=[nodegroup.uuid]) # In a UI submission, omitting a boolean means setting it to False. data = { 'ui_submission': True, } response = self.client.post(edit_link, data) self.assertEqual(httplib.FOUND, response.status_code) self.assertFalse(reload_object(nodegroup).default_disable_ipv4) class ClusterInterfaceDeleteTest(MAASServerTestCase): def test_can_delete_cluster_interface(self): self.client_log_in(as_admin=True) nodegroup = factory.make_NodeGroup() interface = factory.make_NodeGroupInterface( nodegroup=nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) delete_link = reverse( 'cluster-interface-delete', args=[nodegroup.uuid, interface.name]) response = self.client.post(delete_link, {'post': 'yes'}) self.assertEqual( (httplib.FOUND, reverse('cluster-edit', args=[nodegroup.uuid])), (response.status_code, extract_redirect(response))) self.assertFalse( NodeGroupInterface.objects.filter(id=interface.id).exists()) def test_interface_delete_supports_interface_alias(self): self.client_log_in(as_admin=True) nodegroup = factory.make_NodeGroup( management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) interface = factory.make_NodeGroupInterface( nodegroup=nodegroup, name="eth0:0") delete_link = reverse( 'cluster-interface-delete', args=[nodegroup.uuid, interface.name]) # The real test is that reverse() does not blow up when the # interface's name contains an alias. self.assertIsInstance(delete_link, (bytes, unicode)) class ClusterInterfaceEditTest(MAASServerTestCase): def test_can_edit_cluster_interface(self): self.client_log_in(as_admin=True) nodegroup = factory.make_NodeGroup( management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) interface = factory.make_NodeGroupInterface( nodegroup=nodegroup) edit_link = reverse( 'cluster-interface-edit', args=[nodegroup.uuid, interface.name]) data = factory.get_interface_fields() del data['subnet'] response = self.client.post(edit_link, data) self.assertEqual( (httplib.FOUND, reverse('cluster-edit', args=[nodegroup.uuid])), (response.status_code, extract_redirect(response))) interface = reload_object(interface) self.assertThat( interface, MatchesStructure.byEquality(**data)) cidr = unicode( IPNetwork("%s/%s" % (data['ip'], data['subnet_mask'])).cidr) self.assertThat( interface.subnet, MatchesStructure.byEquality(cidr=cidr)) def test_interface_edit_supports_interface_alias(self): self.client_log_in(as_admin=True) nodegroup = factory.make_NodeGroup( management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) interface = factory.make_NodeGroupInterface( nodegroup=nodegroup, name="eth0:0") edit_link = reverse( 'cluster-interface-edit', args=[nodegroup.uuid, interface.name]) # The real test is that reverse() does not blow up when the # interface's name contains an alias. self.assertIsInstance(edit_link, (bytes, unicode)) class ClusterInterfaceCreateTest(MAASServerTestCase): def test_can_create_cluster_interface(self): self.client_log_in(as_admin=True) nodegroup = factory.make_NodeGroup( management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) create_link = reverse( 'cluster-interface-create', args=[nodegroup.uuid]) data = factory.get_interface_fields() del data['subnet'] response = self.client.post(create_link, data) self.assertEqual( (httplib.FOUND, reverse('cluster-edit', args=[nodegroup.uuid])), (response.status_code, extract_redirect(response))) interface = NodeGroupInterface.objects.get( nodegroup__uuid=nodegroup.uuid, name=data['name']) self.assertThat( reload_object(interface), MatchesStructure.byEquality(**data)) maas-1.9.5+bzr4599.orig/src/maasserver/views/tests/test_combo.py0000644000000000000000000001574313056115004022633 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test combo view.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from collections import Callable import httplib import os from django.core.urlresolvers import reverse from django.test.client import RequestFactory from maasserver import config from maasserver.testing import extract_redirect from maasserver.testing.config import RegionConfigurationFixture from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.views.combo import ( get_absolute_location, get_combo_view, MERGE_VIEWS, ) from maastesting.fixtures import ImportErrorFixture class TestUtilities(MAASServerTestCase): def test_get_abs_location_returns_absolute_location_if_not_None(self): abs_location = '%s%s' % (os.path.sep, factory.make_string()) self.assertEqual( abs_location, get_absolute_location(location=abs_location)) def test_get_abs_location_returns_rel_loc_if_not_in_dev_environment(self): self.useFixture(RegionConfigurationFixture()) self.useFixture(ImportErrorFixture('maastesting', 'root')) static_root = factory.make_string() self.patch(config.RegionConfiguration, 'static_root', static_root) rel_location = os.path.join( factory.make_string(), factory.make_string()) expected_location = os.path.join(static_root, rel_location) observed = get_absolute_location(location=rel_location) self.assertEqual(expected_location, observed) def test_get_abs_location_returns_rel_loc_if_in_dev_environment(self): rel_location = os.path.join( factory.make_string(), factory.make_string()) rel_location_base = os.path.join( os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'static') expected_location = os.path.join(rel_location_base, rel_location) self.assertEqual( expected_location, get_absolute_location(location=rel_location)) def test_get_combo_view_returns_callable(self): rel_location = os.path.join( factory.make_string(), factory.make_string()) view = get_combo_view(rel_location) self.assertIsInstance(view, Callable) def test_get_combo_view_loads_from_disk(self): test_file_contents = factory.make_string() # Create a valid file with a proper extension (the combo loader only # serves JS or CSS files) test_file_name = "%s.js" % factory.make_string() test_file = self.make_file( name=test_file_name, contents=test_file_contents) directory = os.path.dirname(test_file) view = get_combo_view(directory) # Create a request for test file. rf = RequestFactory() request = rf.get("/test/?%s" % test_file_name) response = view(request) expected_content = '/* %s */\n%s\n' % ( test_file_name, test_file_contents) self.assertEqual( (httplib.OK, expected_content), (response.status_code, response.content)) def test_get_combo_redirects_if_unknown_type(self): # The optional parameter 'default_redirect' allows to configure # a default address where requests for files of unknown types will be # redirected. # Create a test file with an unknown extension. test_file_name = "%s.%s" % ( factory.make_string(), factory.make_string()) redirect_root = factory.make_string() view = get_combo_view( factory.make_string(), default_redirect=redirect_root) rf = RequestFactory() request = rf.get("/test/?%s" % test_file_name) response = view(request) self.assertEqual( '%s%s' % (redirect_root, test_file_name), extract_redirect(response)) # String used by convoy to replace missing files. CONVOY_MISSING_FILE = "/* [missing] */" class TestComboLoaderView(MAASServerTestCase): """Test combo loader views.""" def test_yui_load_js(self): requested_files = [ 'oop/oop.js', 'event-custom-base/event-custom-base.js' ] url = '%s?%s' % (reverse('combo-yui'), '&'.join(requested_files)) response = self.client.get(url) self.assertIn('text/javascript', response['Content-Type']) for requested_file in requested_files: self.assertIn(requested_file, response.content) # No sign of a missing js file. self.assertNotIn(CONVOY_MISSING_FILE, response.content) # The file contains a link to YUI's licence. self.assertIn('http://yuilibrary.com/license/', response.content) def test_yui_load_css(self): requested_files = [ 'widget-base/assets/skins/sam/widget-base.css', 'widget-stack/assets/skins/sam/widget-stack.css', ] url = '%s?%s' % (reverse('combo-yui'), '&'.join(requested_files)) response = self.client.get(url) self.assertIn('text/css', response['Content-Type']) for requested_file in requested_files: self.assertIn(requested_file, response.content) # No sign of a missing css file. self.assertNotIn(CONVOY_MISSING_FILE, response.content) # The file contains a link to YUI's licence. self.assertIn('http://yuilibrary.com/license/', response.content) def test_yui_combo_no_file_returns_not_found(self): response = self.client.get(reverse('combo-yui')) self.assertEqual(httplib.NOT_FOUND, response.status_code) def test_yui_combo_other_file_extension_returns_bad_request(self): url = '%s?%s' % (reverse('combo-yui'), 'file.wrongextension') response = self.client.get(url) self.assertEqual( (httplib.BAD_REQUEST, "Invalid file type requested."), (response.status_code, response.content)) class TestMergeLoaderView(MAASServerTestCase): """Test merge loader views.""" def test_loads_all_views_correctly(self): for filename, merge_info in MERGE_VIEWS.items(): url = reverse('merge', args=[filename]) response = self.client.get(url) self.assertEquals( merge_info["content_type"], response['Content-Type'], "Content-type for %s does not match." % filename) # Has all required files. for requested_file in merge_info["files"]: self.assertIn( requested_file, response.content.decode("utf-8")) # No sign of a missing js file. self.assertNotIn( CONVOY_MISSING_FILE, response.content.decode("utf-8")) def test_load_unknown_returns_302_blocked_by_middleware(self): response = self.client.get(reverse('merge', args=["unknown.js"])) self.assertEqual(httplib.FOUND, response.status_code) maas-1.9.5+bzr4599.orig/src/maasserver/views/tests/test_general.py0000644000000000000000000002632313056115004023145 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test maasserver API.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import httplib from random import randint from xmlrpclib import Fault from django.conf.urls import patterns from django.core.exceptions import PermissionDenied from django.core.urlresolvers import reverse from django.http import Http404 from django.test.client import RequestFactory from django.utils.html import escape from lxml.html import fromstring from maasserver.components import register_persistent_error from maasserver.testing import extract_redirect from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.views import ( HelpfulDeleteView, PaginatedListView, ) from testtools.matchers import ContainsAll class Test404500(MAASServerTestCase): """Test pages displayed when an error 404 or an error 500 occur.""" def test_404(self): self.client_log_in() response = self.client.get('/no-found-page/') doc = fromstring(response.content) self.assertIn( "Error: Page not found", doc.cssselect('title')[0].text) self.assertSequenceEqual( ['The requested URL /no-found-page/ was not found on this ' 'server.'], [elem.text.strip() for elem in doc.cssselect('h2')]) def test_500(self): self.client_log_in() from maasserver.urls import urlpatterns urlpatterns += patterns( '', (r'^500/$', 'django.views.defaults.server_error'), ) response = self.client.get('/500/') doc = fromstring(response.content) self.assertIn( "Internal server error", doc.cssselect('title')[0].text) self.assertSequenceEqual( ['Internal server error.'], [elem.text.strip() for elem in doc.cssselect('h2')]) class FakeDeletableModel: """A fake model class, with a delete method.""" class Meta: app_label = 'maasserver' object_name = 'fake' verbose_name = "fake object" _meta = Meta deleted = False def delete(self): self.deleted = True class FakeDeleteView(HelpfulDeleteView): """A fake `HelpfulDeleteView` instance. Goes through most of the motions. There are a few special features to help testing along: - If there's no object, get_object() raises Http404. - Info messages are captured in self.notices. """ model = FakeDeletableModel template_name = 'not-a-real-template' def __init__(self, obj=None, next_url=None, request=None): self.obj = obj self.next_url = next_url self.request = request self.notices = [] def get_object(self): if self.obj is None: raise Http404() else: return self.obj def get_next_url(self): return self.next_url def raise_permission_denied(self): """Helper to substitute for get_object.""" raise PermissionDenied() def show_notice(self, notice): self.notices.append(notice) class HelpfulDeleteViewTest(MAASServerTestCase): def test_delete_deletes_object(self): obj = FakeDeletableModel() # HttpResponseRedirect does not allow next_url to be None. view = FakeDeleteView(obj, next_url=factory.make_string()) view.delete() self.assertTrue(obj.deleted) self.assertEqual([view.compose_feedback_deleted(obj)], view.notices) def test_delete_is_gentle_with_missing_objects(self): # Deleting a nonexistent object is basically treated as successful. # HttpResponseRedirect does not allow next_url to be None. view = FakeDeleteView(next_url=factory.make_string()) response = view.delete() self.assertEqual(httplib.FOUND, response.status_code) self.assertEqual([view.compose_feedback_nonexistent()], view.notices) def test_delete_is_not_gentle_with_permission_violations(self): view = FakeDeleteView() view.get_object = view.raise_permission_denied self.assertRaises(PermissionDenied, view.delete) def test_get_asks_for_confirmation_and_does_nothing_yet(self): obj = FakeDeletableModel() next_url = factory.make_string() request = RequestFactory().get('/foo') view = FakeDeleteView(obj, request=request, next_url=next_url) response = view.get(request) self.assertEqual(httplib.OK, response.status_code) self.assertNotIn(next_url, response.get('Location', '')) self.assertFalse(obj.deleted) self.assertEqual([], view.notices) def test_get_skips_confirmation_for_missing_objects(self): next_url = factory.make_string() request = RequestFactory().get('/foo') view = FakeDeleteView(next_url=next_url, request=request) response = view.get(request) self.assertEqual(next_url, extract_redirect(response)) self.assertEqual([view.compose_feedback_nonexistent()], view.notices) def test_compose_feedback_nonexistent_names_class(self): class_name = factory.make_string() self.patch(FakeDeletableModel.Meta, 'verbose_name', class_name) view = FakeDeleteView() self.assertEqual( "Not deleting: %s not found." % class_name, view.compose_feedback_nonexistent()) def test_compose_feedback_deleted_uses_name_object(self): object_name = factory.make_string() view = FakeDeleteView(FakeDeletableModel()) view.name_object = lambda _obj: object_name self.assertEqual( "%s deleted." % object_name.capitalize(), view.compose_feedback_deleted(view.obj)) class SimpleFakeModel: """Pretend model object for testing""" def __init__(self, counter): self.id = counter class SimpleListView(PaginatedListView): """Simple paginated view for testing""" paginate_by = 2 query_results = None def __init__(self, query_results): self.query_results = list(query_results) def get_queryset(self): """Return precanned list of objects Really this should return a QuerySet object, but for basic usage a list is close enough. """ return self.query_results class PaginatedListViewTests(MAASServerTestCase): """Check PaginatedListView page links inserted into context are correct""" def test_single_page(self): view = SimpleListView.as_view(query_results=[SimpleFakeModel(1)]) request = RequestFactory().get('/index') response = view(request) context = response.context_data self.assertEqual("", context["first_page_link"]) self.assertEqual("", context["previous_page_link"]) self.assertEqual("", context["next_page_link"]) self.assertEqual("", context["last_page_link"]) def test_on_first_page(self): view = SimpleListView.as_view( query_results=[SimpleFakeModel(i) for i in range(5)]) request = RequestFactory().get('/index') response = view(request) context = response.context_data self.assertEqual("", context["first_page_link"]) self.assertEqual("", context["previous_page_link"]) self.assertEqual("?page=2", context["next_page_link"]) self.assertEqual("?page=3", context["last_page_link"]) def test_on_second_page(self): view = SimpleListView.as_view( query_results=[SimpleFakeModel(i) for i in range(7)]) request = RequestFactory().get('/index?page=2') response = view(request) context = response.context_data self.assertEqual("index", context["first_page_link"]) self.assertEqual("index", context["previous_page_link"]) self.assertEqual("?page=3", context["next_page_link"]) self.assertEqual("?page=4", context["last_page_link"]) def test_on_final_page(self): view = SimpleListView.as_view( query_results=[SimpleFakeModel(i) for i in range(5)]) request = RequestFactory().get('/index?page=3') response = view(request) context = response.context_data self.assertEqual("index", context["first_page_link"]) self.assertEqual("?page=2", context["previous_page_link"]) self.assertEqual("", context["next_page_link"]) self.assertEqual("", context["last_page_link"]) def test_relative_to_directory(self): view = SimpleListView.as_view( query_results=[SimpleFakeModel(i) for i in range(6)]) request = RequestFactory().get('/index/?page=2') response = view(request) context = response.context_data self.assertEqual(".", context["first_page_link"]) self.assertEqual(".", context["previous_page_link"]) self.assertEqual("?page=3", context["next_page_link"]) self.assertEqual("?page=3", context["last_page_link"]) def test_preserves_query_string(self): view = SimpleListView.as_view( query_results=[SimpleFakeModel(i) for i in range(6)]) request = RequestFactory().get('/index?lookup=value') response = view(request) context = response.context_data self.assertEqual("", context["first_page_link"]) self.assertEqual("", context["previous_page_link"]) # Does this depend on dict hash values for order or does django sort? self.assertEqual("?lookup=value&page=2", context["next_page_link"]) self.assertEqual("?lookup=value&page=3", context["last_page_link"]) def test_preserves_query_string_with_page(self): view = SimpleListView.as_view( query_results=[SimpleFakeModel(i) for i in range(8)]) request = RequestFactory().get('/index?page=3&lookup=value') response = view(request) context = response.context_data self.assertEqual("?lookup=value", context["first_page_link"]) # Does this depend on dict hash values for order or does django sort? self.assertEqual("?lookup=value&page=2", context["previous_page_link"]) self.assertEqual("?lookup=value&page=4", context["next_page_link"]) self.assertEqual("?lookup=value&page=4", context["last_page_link"]) class PermanentErrorDisplayTest(MAASServerTestCase): def test_permanent_error_displayed(self): self.client_log_in() fault_codes = [ randint(1, 100), randint(101, 200), ] errors = [] for fault in fault_codes: # Create component with make_string to be sure to display all # the errors. component = factory.make_name('component') error_message = factory.make_name('error') errors.append(Fault(fault, error_message)) register_persistent_error(component, error_message) links = [ reverse('index'), reverse('prefs'), ] for link in links: response = self.client.get(link) self.assertThat( response.content, ContainsAll( [escape(error.faultString) for error in errors])) maas-1.9.5+bzr4599.orig/src/maasserver/views/tests/test_images.py0000644000000000000000000011651513056115004023000 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test maasserver images views.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import datetime import httplib import json import random from django.core.urlresolvers import reverse from lxml.html import fromstring from maasserver.enum import ( BOOT_RESOURCE_TYPE, NODE_STATUS, ) from maasserver.models import ( BootResource, BootSourceCache, BootSourceSelection, Config, ) from maasserver.models.testing import UpdateBootSourceCacheDisconnected from maasserver.testing import extract_redirect from maasserver.testing.factory import factory from maasserver.testing.orm import ( get_one, reload_object, ) from maasserver.testing.testcase import MAASServerTestCase from maasserver.views import images as images_view from maasserver.views.images import format_size from maastesting.matchers import ( MockCalledOnceWith, MockCalledWith, ) from requests import ConnectionError from testtools.matchers import ( ContainsAll, HasLength, ) class UbuntuImagesTest(MAASServerTestCase): def setUp(self): super(UbuntuImagesTest, self).setUp() self.useFixture(UpdateBootSourceCacheDisconnected()) def patch_get_os_info_from_boot_sources( self, sources, releases=None, arches=None): if releases is None: releases = [factory.make_name('release') for _ in range(3)] if arches is None: arches = [factory.make_name('arch') for _ in range(3)] mock_get_os_info = self.patch( images_view, 'get_os_info_from_boot_sources') mock_get_os_info.return_value = (sources, releases, arches) return mock_get_os_info def test_shows_connection_error(self): self.client_log_in(as_admin=True) mock_get_os_info = self.patch( images_view, 'get_os_info_from_boot_sources') mock_get_os_info.side_effect = ConnectionError() response = self.client.get(reverse('images')) doc = fromstring(response.content) warnings = doc.cssselect('div#connection-error') self.assertEqual(1, len(warnings)) def test_shows_no_ubuntu_sources(self): self.client_log_in(as_admin=True) response = self.client.get(reverse('images')) doc = fromstring(response.content) warnings = doc.cssselect('div#no-ubuntu-sources') self.assertEqual(1, len(warnings)) def test_shows_too_many_ubuntu_sources(self): self.client_log_in(as_admin=True) sources = [factory.make_BootSource() for _ in range(2)] self.patch_get_os_info_from_boot_sources(sources) response = self.client.get(reverse('images')) doc = fromstring(response.content) warnings = doc.cssselect('div#too-many-ubuntu-sources') self.assertEqual(1, len(warnings)) def test_shows_release_options(self): self.client_log_in(as_admin=True) sources = [factory.make_BootSource()] releases = [factory.make_name('release') for _ in range(3)] self.patch_get_os_info_from_boot_sources(sources, releases=releases) response = self.client.get(reverse('images')) doc = fromstring(response.content) releases_content = doc.cssselect( 'ul#ubuntu-releases')[0].text_content() self.assertThat(releases_content, ContainsAll(releases)) def test_shows_architecture_options(self): self.client_log_in(as_admin=True) sources = [factory.make_BootSource()] arches = [factory.make_name('arch') for _ in range(3)] self.patch_get_os_info_from_boot_sources(sources, arches=arches) response = self.client.get(reverse('images')) doc = fromstring(response.content) arches_content = doc.cssselect( 'ul#ubuntu-arches')[0].text_content() self.assertThat(arches_content, ContainsAll(arches)) def test_shows_missing_images_warning_if_not_ubuntu_boot_resources(self): self.client_log_in() response = self.client.get(reverse('images')) doc = fromstring(response.content) warnings = doc.cssselect('div#missing-ubuntu-images') self.assertEqual(1, len(warnings)) def test_hides_import_button_if_not_admin(self): self.client_log_in() sources = [factory.make_BootSource()] self.patch_get_os_info_from_boot_sources(sources) response = self.client.get(reverse('images')) doc = fromstring(response.content) import_button = doc.cssselect( '#ubuntu-images')[0].cssselect('input[type="submit"]') self.assertEqual(0, len(import_button)) def test_shows_import_button_if_admin(self): self.client_log_in(as_admin=True) sources = [factory.make_BootSource()] self.patch_get_os_info_from_boot_sources(sources) response = self.client.get(reverse('images')) doc = fromstring(response.content) import_button = doc.cssselect( '#ubuntu-images')[0].cssselect('input[type="submit"]') self.assertEqual(1, len(import_button)) def test_post_returns_forbidden_if_not_admin(self): self.client_log_in() response = self.client.post( reverse('images'), {'ubuntu_images': 1}) self.assertEqual(httplib.FORBIDDEN, response.status_code) def test_import_calls_import_resources(self): self.client_log_in(as_admin=True) sources = [factory.make_BootSource()] self.patch_get_os_info_from_boot_sources(sources) mock_import = self.patch(images_view, 'import_resources') response = self.client.post( reverse('images'), {'ubuntu_images': 1}) self.assertEqual(httplib.FOUND, response.status_code) self.assertThat(mock_import, MockCalledOnceWith()) def test_import_sets_empty_selections(self): self.client_log_in(as_admin=True) source = factory.make_BootSource() self.patch_get_os_info_from_boot_sources([source]) self.patch(images_view, 'import_resources') response = self.client.post( reverse('images'), {'ubuntu_images': 1}) self.assertEqual(httplib.FOUND, response.status_code) selections = BootSourceSelection.objects.filter(boot_source=source) self.assertThat(selections, HasLength(1)) self.assertEqual( (selections[0].os, selections[0].release, selections[0].arches, selections[0].subarches, selections[0].labels), ("ubuntu", "", [], ["*"], ["*"])) def test_import_sets_release_selections(self): self.client_log_in(as_admin=True) source = factory.make_BootSource() releases = [factory.make_name('release') for _ in range(3)] self.patch_get_os_info_from_boot_sources([source]) self.patch(images_view, 'import_resources') response = self.client.post( reverse('images'), {'ubuntu_images': 1, 'release': releases}) self.assertEqual(httplib.FOUND, response.status_code) selections = BootSourceSelection.objects.filter(boot_source=source) self.assertThat(selections, HasLength(len(releases))) self.assertItemsEqual( releases, [selection.release for selection in selections]) def test_import_sets_arches_on_selections(self): self.client_log_in(as_admin=True) source = factory.make_BootSource() releases = [factory.make_name('release') for _ in range(3)] arches = [factory.make_name('arches') for _ in range(3)] self.patch_get_os_info_from_boot_sources([source]) self.patch(images_view, 'import_resources') response = self.client.post( reverse('images'), {'ubuntu_images': 1, 'release': releases, 'arch': arches}) self.assertEqual(httplib.FOUND, response.status_code) selections = BootSourceSelection.objects.filter(boot_source=source) self.assertThat(selections, HasLength(len(releases))) self.assertItemsEqual( [arches, arches, arches], [selection.arches for selection in selections]) def test_import_removes_old_selections(self): self.client_log_in(as_admin=True) source = factory.make_BootSource() release = factory.make_name('release') delete_selection = BootSourceSelection.objects.create( boot_source=source, os='ubuntu', release=factory.make_name('release')) keep_selection = BootSourceSelection.objects.create( boot_source=source, os='ubuntu', release=release) self.patch_get_os_info_from_boot_sources([source]) self.patch(images_view, 'import_resources') response = self.client.post( reverse('images'), {'ubuntu_images': 1, 'release': [release]}) self.assertEqual(httplib.FOUND, response.status_code) self.assertIsNone(reload_object(delete_selection)) self.assertIsNotNone(reload_object(keep_selection)) class OtherImagesTest(MAASServerTestCase): def setUp(self): super(OtherImagesTest, self).setUp() self.useFixture(UpdateBootSourceCacheDisconnected()) def make_other_resource(self, os=None, arch=None, subarch=None, release=None): if os is None: os = factory.make_name('os') if arch is None: arch = factory.make_name('arch') if subarch is None: subarch = factory.make_name('subarch') if release is None: release = factory.make_name('release') name = '%s/%s' % (os, release) architecture = '%s/%s' % (arch, subarch) resource = factory.make_BootResource( rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, architecture=architecture) resource_set = factory.make_BootResourceSet(resource) factory.make_boot_resource_file_with_content(resource_set) return resource def test_hides_other_synced_images_section(self): self.client_log_in() BootSourceCache.objects.all().delete() response = self.client.get(reverse('images')) doc = fromstring(response.content) section = doc.cssselect('div#other-sync-images') self.assertEqual( 0, len(section), "Didn't hide the other images section.") def test_shows_other_synced_images_section(self): self.client_log_in(as_admin=True) factory.make_BootSourceCache() response = self.client.get(reverse('images')) doc = fromstring(response.content) section = doc.cssselect('div#other-sync-images') self.assertEqual( 1, len(section), "Didn't show the other images section.") def test_hides_image_from_boot_source_cache_without_admin(self): self.client_log_in() factory.make_BootSourceCache() response = self.client.get(reverse('images')) doc = fromstring(response.content) rows = doc.cssselect('table#other-resources > tbody > tr') self.assertEqual( 0, len(rows), "Didn't hide unselected boot image from non-admin.") def test_shows_image_from_boot_source_cache_with_admin(self): self.client_log_in(as_admin=True) cache = factory.make_BootSourceCache() response = self.client.get(reverse('images')) doc = fromstring(response.content) title = doc.cssselect( 'table#other-resources > tbody > ' 'tr > td')[1].text_content().strip() self.assertEqual('%s/%s' % (cache.os, cache.release), title) def test_shows_checkbox_for_boot_source_cache(self): self.client_log_in(as_admin=True) factory.make_BootSourceCache() response = self.client.get(reverse('images')) doc = fromstring(response.content) checkbox = doc.cssselect( 'table#other-resources > tbody > tr > td > input') self.assertEqual( 1, len(checkbox), "Didn't show checkbox for boot image.") def test_shows_last_update_time_for_synced_resource(self): self.client_log_in(as_admin=True) cache = factory.make_BootSourceCache() self.make_other_resource( os=cache.os, arch=cache.arch, subarch=cache.subarch, release=cache.release) response = self.client.get(reverse('images')) doc = fromstring(response.content) last_update = doc.cssselect( 'table#other-resources > tbody > ' 'tr > td')[5].text_content().strip() self.assertNotEqual('not synced', last_update) def test_shows_number_of_nodes_for_synced_resource(self): self.client_log_in(as_admin=True) cache = factory.make_BootSourceCache() resource = self.make_other_resource( os=cache.os, arch=cache.arch, subarch=cache.subarch, release=cache.release) factory.make_Node( status=NODE_STATUS.DEPLOYED, osystem=cache.os, distro_series=cache.release, architecture=resource.architecture) response = self.client.get(reverse('images')) doc = fromstring(response.content) number_of_nodes = doc.cssselect( 'table#other-resources > tbody > ' 'tr > td')[4].text_content().strip() self.assertEqual( 1, int(number_of_nodes), "Incorrect number of deployed nodes for resource.") def test_shows_apply_button_if_admin(self): self.client_log_in(as_admin=True) factory.make_BootSourceCache() response = self.client.get(reverse('images')) doc = fromstring(response.content) apply_button = doc.cssselect( '#other-sync-images')[0].cssselect('input[type="submit"]') self.assertEqual( 1, len(apply_button), "Didn't show apply button for admin.") def test_hides_apply_button_if_import_running(self): self.client_log_in(as_admin=True) factory.make_BootSourceCache() self.patch( images_view, 'is_import_resources_running').return_value = True response = self.client.get(reverse('images')) doc = fromstring(response.content) apply_button = doc.cssselect( '#other-sync-images')[0].cssselect('input[type="submit"]') self.assertEqual( 0, len(apply_button), "Didn't hide apply button when import running.") def test_calls_get_os_release_title_for_other_resource(self): self.client_log_in() title = factory.make_name('title') cache = factory.make_BootSourceCache() resource = self.make_other_resource( os=cache.os, arch=cache.arch, subarch=cache.subarch, release=cache.release) mock_get_title = self.patch(images_view, 'get_os_release_title') mock_get_title.return_value = title response = self.client.get(reverse('images')) doc = fromstring(response.content) row_title = doc.cssselect( 'table#other-resources > tbody > ' 'tr > td')[1].text_content().strip() self.assertEqual(title, row_title) os, release = resource.name.split('/') self.assertThat(mock_get_title, MockCalledWith(os, release)) def test_post_returns_forbidden_if_not_admin(self): self.client_log_in() response = self.client.post( reverse('images'), {'other_images': 1}) self.assertEqual(httplib.FORBIDDEN, response.status_code) def test_post_clears_all_other_os_selections(self): self.client_log_in(as_admin=True) source = factory.make_BootSource() ubuntu_selection = BootSourceSelection.objects.create( boot_source=source, os='ubuntu') other_selection = BootSourceSelection.objects.create( boot_source=source, os=factory.make_name('os')) self.patch(images_view, 'import_resources') response = self.client.post( reverse('images'), {'other_images': 1, 'image': []}) self.assertEqual(httplib.FOUND, response.status_code) self.assertIsNotNone(reload_object(ubuntu_selection)) self.assertIsNone(reload_object(other_selection)) def test_post_creates_selection_with_multiple_arches(self): self.client_log_in(as_admin=True) source = factory.make_BootSource() os = factory.make_name('os') release = factory.make_name('release') arches = [factory.make_name('arch') for _ in range(3)] images = [] for arch in arches: factory.make_BootSourceCache( boot_source=source, os=os, release=release, arch=arch) images.append('%s/%s/subarch/%s' % (os, arch, release)) self.patch(images_view, 'import_resources') response = self.client.post( reverse('images'), {'other_images': 1, 'image': images}) self.assertEqual(httplib.FOUND, response.status_code) selection = get_one(BootSourceSelection.objects.filter( boot_source=source, os=os, release=release)) self.assertIsNotNone(selection) self.assertItemsEqual(arches, selection.arches) def test_post_calls_import_resources(self): self.client_log_in(as_admin=True) mock_import = self.patch(images_view, 'import_resources') response = self.client.post( reverse('images'), {'other_images': 1, 'image': []}) self.assertEqual(httplib.FOUND, response.status_code) self.assertThat(mock_import, MockCalledOnceWith()) class GeneratedImagesTest(MAASServerTestCase): def make_generated_resource(self, os=None, arch=None, subarch=None, release=None): if os is None: os = factory.make_name('os') if arch is None: arch = factory.make_name('arch') if subarch is None: subarch = factory.make_name('subarch') if release is None: release = factory.make_name('release') name = '%s/%s' % (os, release) architecture = '%s/%s' % (arch, subarch) resource = factory.make_BootResource( rtype=BOOT_RESOURCE_TYPE.GENERATED, name=name, architecture=architecture) resource_set = factory.make_BootResourceSet(resource) factory.make_boot_resource_file_with_content(resource_set) return resource def test_hides_generated_images_section(self): self.client_log_in() response = self.client.get(reverse('images')) doc = fromstring(response.content) section = doc.cssselect('div#generated-images') self.assertEqual( 0, len(section), "Didn't hide the generated images section.") def test_shows_generated_images_section(self): self.client_log_in() self.make_generated_resource() response = self.client.get(reverse('images')) doc = fromstring(response.content) section = doc.cssselect('div#generated-images') self.assertEqual( 1, len(section), "Didn't show the generated images section.") def test_shows_generated_resources(self): self.client_log_in() resources = [self.make_generated_resource() for _ in range(3)] names = [resource.name for resource in resources] response = self.client.get(reverse('images')) doc = fromstring(response.content) table_content = doc.cssselect( 'table#generated-resources')[0].text_content() self.assertThat(table_content, ContainsAll(names)) def test_shows_delete_button_for_generated_resource(self): self.client_log_in(as_admin=True) self.make_generated_resource() response = self.client.get(reverse('images')) doc = fromstring(response.content) delete_btn = doc.cssselect( 'table#generated-resources > tbody > tr > td > ' 'a[title="Delete image"]') self.assertEqual( 1, len(delete_btn), "Didn't show delete button for generated image.") def test_hides_delete_button_for_generated_resource_when_not_admin(self): self.client_log_in() self.make_generated_resource() response = self.client.get(reverse('images')) doc = fromstring(response.content) delete_btn = doc.cssselect( 'table#generated-resources > tbody > tr > td > ' 'a[title="Delete image"]') self.assertEqual( 0, len(delete_btn), "Didn't hide delete button for generated image when not admin.") def test_calls_get_os_release_title_for_generated_resource(self): self.client_log_in() title = factory.make_name('title') resource = self.make_generated_resource() mock_get_title = self.patch(images_view, 'get_os_release_title') mock_get_title.return_value = title response = self.client.get(reverse('images')) doc = fromstring(response.content) row_title = doc.cssselect( 'table#generated-resources > tbody > ' 'tr > td')[1].text_content().strip() self.assertEqual(title, row_title) os, release = resource.name.split('/') self.assertThat(mock_get_title, MockCalledOnceWith(os, release)) class UploadedImagesTest(MAASServerTestCase): def make_uploaded_resource(self, name=None): if name is None: name = factory.make_name('name') arch = factory.make_name('arch') subarch = factory.make_name('subarch') architecture = '%s/%s' % (arch, subarch) resource = factory.make_BootResource( rtype=BOOT_RESOURCE_TYPE.UPLOADED, name=name, architecture=architecture) resource_set = factory.make_BootResourceSet(resource) factory.make_boot_resource_file_with_content(resource_set) return resource def test_shows_no_custom_images_message(self): self.client_log_in() response = self.client.get(reverse('images')) doc = fromstring(response.content) warnings = doc.cssselect('div#no-custom-images') self.assertEqual(1, len(warnings)) def test_shows_uploaded_resources(self): self.client_log_in() names = [factory.make_name('name') for _ in range(3)] [self.make_uploaded_resource(name) for name in names] response = self.client.get(reverse('images')) doc = fromstring(response.content) table_content = doc.cssselect( 'table#uploaded-resources')[0].text_content() self.assertThat(table_content, ContainsAll(names)) def test_shows_uploaded_resources_name_if_title_blank(self): self.client_log_in() name = factory.make_name('name') resource = self.make_uploaded_resource(name) resource.extra['title'] = '' resource.save() response = self.client.get(reverse('images')) doc = fromstring(response.content) name_col = doc.cssselect( 'table#uploaded-resources > tbody > tr > td')[1].text_content() self.assertEqual(name, name_col.strip()) def test_shows_delete_button_for_uploaded_resource(self): self.client_log_in(as_admin=True) self.make_uploaded_resource() response = self.client.get(reverse('images')) doc = fromstring(response.content) delete_btn = doc.cssselect( 'table#uploaded-resources > tbody > tr > td > ' 'a[title="Delete image"]') self.assertEqual(1, len(delete_btn)) def test_hides_delete_button_for_uploaded_resource_when_not_admin(self): self.client_log_in() self.make_uploaded_resource() response = self.client.get(reverse('images')) doc = fromstring(response.content) delete_btn = doc.cssselect( 'table#uploaded-resources > tbody > tr > td > ' 'a[title="Delete image"]') self.assertEqual(0, len(delete_btn)) class TestImageAjax(MAASServerTestCase): def get_images_ajax(self): return self.client.get( reverse('images'), HTTP_X_REQUESTED_WITH='XMLHttpRequest') def test__returns_json(self): self.client_log_in() response = self.get_images_ajax() self.assertEqual('application/json', response['Content-Type']) def test__returns_region_import_running_True(self): self.client_log_in() self.patch( images_view, 'is_import_resources_running').return_value = True response = self.get_images_ajax() json_obj = json.loads(response.content) self.assertTrue(json_obj['region_import_running']) def test__returns_region_import_running_False(self): self.client_log_in() self.patch( images_view, 'is_import_resources_running').return_value = False response = self.get_images_ajax() json_obj = json.loads(response.content) self.assertFalse(json_obj['region_import_running']) def test__returns_cluster_import_running_True(self): self.client_log_in() self.patch( images_view, 'is_import_boot_images_running').return_value = True response = self.get_images_ajax() json_obj = json.loads(response.content) self.assertTrue(json_obj['cluster_import_running']) def test__returns_cluster_import_running_False(self): self.client_log_in() self.patch( images_view, 'is_import_boot_images_running').return_value = False response = self.get_images_ajax() json_obj = json.loads(response.content) self.assertFalse(json_obj['cluster_import_running']) def test_returns_resources(self): self.client_log_in() resources = [factory.make_usable_boot_resource() for _ in range(3)] resource_ids = [resource.id for resource in resources] response = self.get_images_ajax() json_obj = json.loads(response.content) json_ids = [ json_resource['id'] for json_resource in json_obj['resources'] ] self.assertItemsEqual(resource_ids, json_ids) def test_returns_resources_datetime_format(self): """Ensure the date/time format is correct""" self.client_log_in() resource = factory.make_usable_boot_resource() response = self.get_images_ajax() json_obj = json.loads(response.content) json_updated = datetime.datetime.strptime( json_obj['resources'][0]['lastUpdate'], "%a, %d %b. %Y %H:%M:%S") self.assertEqual(resource.updated.timetuple(), json_updated.timetuple()) def test_returns_resource_attributes(self): self.client_log_in() factory.make_usable_boot_resource() response = self.get_images_ajax() json_obj = json.loads(response.content) json_resource = json_obj['resources'][0] self.assertThat( json_resource, ContainsAll([ 'id', 'rtype', 'name', 'title', 'arch', 'size', 'complete', 'status', 'downloading', 'numberOfNodes', 'lastUpdate'])) def test_returns_ubuntu_release_version_name(self): self.client_log_in() # Use trusty as known to map to "14.04 LTS" version = '14.04 LTS' name = 'ubuntu/trusty' factory.make_usable_boot_resource( rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name) response = self.get_images_ajax() json_obj = json.loads(response.content) json_resource = json_obj['resources'][0] self.assertEqual(version, json_resource['title']) def test_shows_number_of_nodes_deployed_for_resource(self): self.client_log_in() resource = factory.make_usable_boot_resource( rtype=BOOT_RESOURCE_TYPE.SYNCED) os_name, series = resource.name.split('/') number_of_nodes = random.randint(1, 4) for _ in range(number_of_nodes): factory.make_Node( status=NODE_STATUS.DEPLOYED, osystem=os_name, distro_series=series, architecture=resource.architecture) response = self.get_images_ajax() json_obj = json.loads(response.content) json_resource = json_obj['resources'][0] self.assertEqual(number_of_nodes, json_resource['numberOfNodes']) def test_shows_number_of_nodes_deployed_for_resource_with_defaults(self): self.client_log_in() resource = factory.make_usable_boot_resource( rtype=BOOT_RESOURCE_TYPE.SYNCED) os_name, series = resource.name.split('/') Config.objects.set_config('default_osystem', os_name) Config.objects.set_config('default_distro_series', series) number_of_nodes = random.randint(1, 4) for _ in range(number_of_nodes): factory.make_Node( status=NODE_STATUS.DEPLOYED, architecture=resource.architecture) response = self.get_images_ajax() json_obj = json.loads(response.content) json_resource = json_obj['resources'][0] self.assertEqual(number_of_nodes, json_resource['numberOfNodes']) def test_shows_number_of_nodes_deployed_for_ubuntu_subarch_resource(self): self.client_log_in() resource = factory.make_usable_boot_resource( rtype=BOOT_RESOURCE_TYPE.SYNCED) arch, subarch = resource.split_arch() extra_subarch = factory.make_name('subarch') resource.extra['subarches'] = ','.join([subarch, extra_subarch]) resource.save() os_name, series = resource.name.split('/') node_architecture = '%s/%s' % (arch, extra_subarch) number_of_nodes = random.randint(1, 4) for _ in range(number_of_nodes): factory.make_Node( status=NODE_STATUS.DEPLOYED, osystem=os_name, distro_series=series, architecture=node_architecture) response = self.get_images_ajax() json_obj = json.loads(response.content) json_resource = json_obj['resources'][0] self.assertEqual(number_of_nodes, json_resource['numberOfNodes']) def test_combines_subarch_resources_into_one_resource(self): self.client_log_in() name = 'ubuntu/%s' % factory.make_name('series') arch = factory.make_name('arch') subarches = [factory.make_name('subarch') for _ in range(3)] for subarch in subarches: factory.make_usable_boot_resource( rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, architecture='%s/%s' % (arch, subarch)) response = self.get_images_ajax() json_obj = json.loads(response.content) self.assertEqual( 1, len(json_obj['resources']), 'More than one resource was returned.') def test_combined_subarch_resource_calculates_unique_size(self): self.client_log_in() name = 'ubuntu/%s' % factory.make_name('series') arch = factory.make_name('arch') subarches = [factory.make_name('subarch') for _ in range(3)] largefile = factory.make_LargeFile() for subarch in subarches: resource = factory.make_BootResource( rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, architecture='%s/%s' % (arch, subarch)) resource_set = factory.make_BootResourceSet(resource) factory.make_BootResourceFile(resource_set, largefile) response = self.get_images_ajax() json_obj = json.loads(response.content) json_resource = json_obj['resources'][0] self.assertEqual( format_size(largefile.total_size), json_resource['size']) def test_combined_subarch_resource_calculates_num_of_nodes_deployed(self): self.client_log_in() osystem = 'ubuntu' series = factory.make_name('series') name = '%s/%s' % (osystem, series) arch = factory.make_name('arch') subarches = [factory.make_name('subarch') for _ in range(3)] for subarch in subarches: factory.make_usable_boot_resource( rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, architecture='%s/%s' % (arch, subarch)) number_of_nodes = random.randint(1, 4) for _ in range(number_of_nodes): subarch = random.choice(subarches) node_architecture = '%s/%s' % (arch, subarch) factory.make_Node( status=NODE_STATUS.DEPLOYED, osystem=osystem, distro_series=series, architecture=node_architecture) response = self.get_images_ajax() json_obj = json.loads(response.content) json_resource = json_obj['resources'][0] self.assertEqual(number_of_nodes, json_resource['numberOfNodes']) def test_combined_subarch_resource_calculates_complete_True(self): self.client_log_in() name = 'ubuntu/%s' % factory.make_name('series') arch = factory.make_name('arch') subarches = [factory.make_name('subarch') for _ in range(3)] resources = [ factory.make_usable_boot_resource( rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, architecture='%s/%s' % (arch, subarch)) for subarch in subarches ] self.patch( BootResource.objects, 'get_resources_matching_boot_images').return_value = resources response = self.get_images_ajax() json_obj = json.loads(response.content) json_resource = json_obj['resources'][0] self.assertTrue(json_resource['complete']) def test_combined_subarch_resource_calculates_complete_False(self): self.client_log_in() name = 'ubuntu/%s' % factory.make_name('series') arch = factory.make_name('arch') subarches = [factory.make_name('subarch') for _ in range(3)] incomplete_subarch = subarches.pop() factory.make_BootResource( rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, architecture='%s/%s' % (arch, incomplete_subarch)) for subarch in subarches: factory.make_usable_boot_resource( rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, architecture='%s/%s' % (arch, subarch)) response = self.get_images_ajax() json_obj = json.loads(response.content) json_resource = json_obj['resources'][0] self.assertFalse(json_resource['complete']) def test_combined_subarch_resource_calculates_progress(self): self.client_log_in() name = 'ubuntu/%s' % factory.make_name('series') arch = factory.make_name('arch') subarches = [factory.make_name('subarch') for _ in range(3)] largefile = factory.make_LargeFile() largefile.total_size = largefile.total_size * 2 largefile.save() for subarch in subarches: resource = factory.make_BootResource( rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, architecture='%s/%s' % (arch, subarch)) resource_set = factory.make_BootResourceSet(resource) factory.make_BootResourceFile(resource_set, largefile) response = self.get_images_ajax() json_obj = json.loads(response.content) json_resource = json_obj['resources'][0] self.assertEqual("Downloading 50%", json_resource['status']) def test_combined_subarch_resource_shows_queued_if_no_progress(self): self.client_log_in() name = 'ubuntu/%s' % factory.make_name('series') arch = factory.make_name('arch') subarches = [factory.make_name('subarch') for _ in range(3)] largefile = factory.make_LargeFile(content="") for subarch in subarches: resource = factory.make_BootResource( rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, architecture='%s/%s' % (arch, subarch)) resource_set = factory.make_BootResourceSet(resource) factory.make_BootResourceFile(resource_set, largefile) response = self.get_images_ajax() json_obj = json.loads(response.content) json_resource = json_obj['resources'][0] self.assertEqual("Queued for download", json_resource['status']) def test_combined_subarch_resource_shows_complete_status(self): self.client_log_in() name = 'ubuntu/%s' % factory.make_name('series') arch = factory.make_name('arch') subarches = [factory.make_name('subarch') for _ in range(3)] resources = [ factory.make_usable_boot_resource( rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, architecture='%s/%s' % (arch, subarch)) for subarch in subarches ] self.patch( BootResource.objects, 'get_resources_matching_boot_images').return_value = resources response = self.get_images_ajax() json_obj = json.loads(response.content) json_resource = json_obj['resources'][0] self.assertEqual("Complete", json_resource['status']) def test_combined_subarch_resource_shows_waiting_for_cluster_to_sync(self): self.client_log_in() name = 'ubuntu/%s' % factory.make_name('series') arch = factory.make_name('arch') subarches = [factory.make_name('subarch') for _ in range(3)] for subarch in subarches: factory.make_usable_boot_resource( rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, architecture='%s/%s' % (arch, subarch)) self.patch( BootResource.objects, 'get_resources_matching_boot_images').return_value = [] response = self.get_images_ajax() json_obj = json.loads(response.content) json_resource = json_obj['resources'][0] self.assertEqual( "Waiting for clusters to sync", json_resource['status']) def test_combined_subarch_resource_shows_clusters_syncing(self): self.client_log_in() name = 'ubuntu/%s' % factory.make_name('series') arch = factory.make_name('arch') subarches = [factory.make_name('subarch') for _ in range(3)] for subarch in subarches: factory.make_usable_boot_resource( rtype=BOOT_RESOURCE_TYPE.SYNCED, name=name, architecture='%s/%s' % (arch, subarch)) self.patch( BootResource.objects, 'get_resources_matching_boot_images').return_value = [] self.patch( images_view, 'is_import_boot_images_running').return_value = True response = self.get_images_ajax() json_obj = json.loads(response.content) json_resource = json_obj['resources'][0] self.assertEqual( "Syncing to clusters", json_resource['status']) class TestImageDelete(MAASServerTestCase): def test_non_admin_cannot_delete(self): self.client_log_in() resource = factory.make_BootResource(rtype=BOOT_RESOURCE_TYPE.UPLOADED) response = self.client.post( reverse('image-delete', args=[resource.id])) self.assertEqual(httplib.FORBIDDEN, response.status_code) self.assertIsNotNone(reload_object(resource)) def test_deletes_resource(self): self.client_log_in(as_admin=True) resource = factory.make_BootResource(rtype=BOOT_RESOURCE_TYPE.UPLOADED) response = self.client.post( reverse('image-delete', args=[resource.id]), {'post': 'yes'}) self.assertEqual(httplib.FOUND, response.status_code) self.assertIsNone(reload_object(resource)) def test_redirects_to_images(self): self.client_log_in(as_admin=True) resource = factory.make_BootResource(rtype=BOOT_RESOURCE_TYPE.UPLOADED) response = self.client.post( reverse('image-delete', args=[resource.id]), {'post': 'yes'}) self.assertEqual(reverse('images'), extract_redirect(response)) maas-1.9.5+bzr4599.orig/src/maasserver/views/tests/test_prefs.py0000644000000000000000000002173513056115004022651 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test maasserver preferences views.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import httplib from apiclient.creds import convert_tuple_to_string from django.contrib.auth.models import User from django.core.urlresolvers import reverse from lxml.html import fromstring from maasserver.models import SSHKey from maasserver.models.user import get_creds_tuple from maasserver.testing import ( extract_redirect, get_content_links, get_data, get_prefixed_form_data, ) from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase class UserPrefsViewTest(MAASServerTestCase): def test_prefs_GET_profile(self): # The preferences page displays a form with the user's personal # information. self.client_log_in() user = self.logged_in_user user.last_name = 'Steve Bam' user.save() response = self.client.get('/account/prefs/') doc = fromstring(response.content) self.assertSequenceEqual( ['Steve Bam'], [elem.value for elem in doc.cssselect('input#id_profile-last_name')]) def test_prefs_GET_api(self): # The preferences page displays the API access tokens. self.client_log_in() user = self.logged_in_user # Create a few tokens. for _ in range(3): user.userprofile.create_authorisation_token() response = self.client.get('/account/prefs/') doc = fromstring(response.content) # The OAuth tokens are displayed. for token in user.userprofile.get_authorisation_tokens(): # The token string is a compact representation of the keys. self.assertSequenceEqual( [convert_tuple_to_string(get_creds_tuple(token))], [elem.value.strip() for elem in doc.cssselect('input#%s' % token.key)]) def test_prefs_POST_profile(self): # The preferences page allows the user the update its profile # information. self.client_log_in() params = { 'last_name': 'John Doe', 'email': 'jon@example.com', } response = self.client.post( '/account/prefs/', get_prefixed_form_data('profile', params)) self.assertEqual(httplib.FOUND, response.status_code) user = User.objects.get(id=self.logged_in_user.id) self.assertAttributes(user, params) def test_prefs_POST_password(self): # The preferences page allows the user to change their password. self.client_log_in() self.logged_in_user.set_password('password') old_pw = self.logged_in_user.password response = self.client.post( '/account/prefs/', get_prefixed_form_data( 'password', { 'old_password': 'test', 'new_password1': 'new', 'new_password2': 'new', })) self.assertEqual(httplib.FOUND, response.status_code) user = User.objects.get(id=self.logged_in_user.id) # The password is SHA1ized, we just make sure that it has changed. self.assertNotEqual(old_pw, user.password) def test_prefs_displays_message_when_no_public_keys_are_configured(self): self.client_log_in() response = self.client.get('/account/prefs/') self.assertIn("No SSH key configured.", response.content) def test_prefs_displays_add_ssh_key_button(self): self.client_log_in() response = self.client.get('/account/prefs/') add_key_link = reverse('prefs-add-sshkey') self.assertIn(add_key_link, get_content_links(response)) def test_prefs_displays_compact_representation_of_users_keys(self): self.client_log_in() _, keys = factory.make_user_with_keys(user=self.logged_in_user) response = self.client.get('/account/prefs/') for key in keys: self.assertIn(key.display_html(), response.content) def test_prefs_displays_link_to_delete_ssh_keys(self): self.client_log_in() _, keys = factory.make_user_with_keys(user=self.logged_in_user) response = self.client.get('/account/prefs/') links = get_content_links(response) for key in keys: del_key_link = reverse('prefs-delete-sshkey', args=[key.id]) self.assertIn(del_key_link, links) class KeyManagementTest(MAASServerTestCase): def test_add_key_GET(self): # The 'Add key' page displays a form to add a key. self.client_log_in() response = self.client.get(reverse('prefs-add-sshkey')) doc = fromstring(response.content) self.assertEqual(1, len(doc.cssselect('textarea#id_key'))) # The page features a form that submits to itself. self.assertSequenceEqual( ['.'], [elem.get('action').strip() for elem in doc.cssselect( '#content form')]) def test_add_key_POST_adds_key(self): self.client_log_in() key_string = get_data('data/test_rsa0.pub') response = self.client.post( reverse('prefs-add-sshkey'), {'key': key_string}) self.assertEqual(httplib.FOUND, response.status_code) self.assertTrue(SSHKey.objects.filter(key=key_string).exists()) def test_add_key_POST_fails_if_key_already_exists_for_the_user(self): self.client_log_in() key_string = get_data('data/test_rsa0.pub') key = SSHKey(user=self.logged_in_user, key=key_string) key.save() response = self.client.post( reverse('prefs-add-sshkey'), {'key': key_string}) self.assertEqual(httplib.OK, response.status_code) self.assertIn( "This key has already been added for this user.", response.content) self.assertItemsEqual([key], SSHKey.objects.filter(key=key_string)) def test_key_can_be_added_if_same_key_already_setup_for_other_user(self): self.client_log_in() key_string = get_data('data/test_rsa0.pub') key = SSHKey(user=factory.make_User(), key=key_string) key.save() response = self.client.post( reverse('prefs-add-sshkey'), {'key': key_string}) new_key = SSHKey.objects.get(key=key_string, user=self.logged_in_user) self.assertEqual(httplib.FOUND, response.status_code) self.assertItemsEqual( [key, new_key], SSHKey.objects.filter(key=key_string)) def test_delete_key_GET(self): # The 'Delete key' page displays a confirmation page with a form. self.client_log_in() key = factory.make_SSHKey(self.logged_in_user) del_link = reverse('prefs-delete-sshkey', args=[key.id]) response = self.client.get(del_link) doc = fromstring(response.content) self.assertIn( "Are you sure you want to delete the following key?", response.content) # The page features a form that submits to itself. self.assertSequenceEqual( ['.'], [elem.get('action').strip() for elem in doc.cssselect( '#content form')]) def test_delete_key_GET_cannot_access_someone_elses_key(self): self.client_log_in() key = factory.make_SSHKey(factory.make_User()) del_link = reverse('prefs-delete-sshkey', args=[key.id]) response = self.client.get(del_link) self.assertEqual(httplib.FORBIDDEN, response.status_code) def test_delete_key_GET_nonexistent_key_redirects_to_prefs(self): # Deleting a nonexistent key requires no confirmation. It just # "succeeds" instantaneously. self.client_log_in() key = factory.make_SSHKey(self.logged_in_user) del_link = reverse('prefs-delete-sshkey', args=[key.id]) key.delete() response = self.client.get(del_link) self.assertEqual('/account/prefs/', extract_redirect(response)) def test_delete_key_POST(self): # A POST request deletes the key, and redirects to the prefs. self.client_log_in() key = factory.make_SSHKey(self.logged_in_user) del_link = reverse('prefs-delete-sshkey', args=[key.id]) response = self.client.post(del_link, {'post': 'yes'}) self.assertEqual('/account/prefs/', extract_redirect(response)) self.assertFalse(SSHKey.objects.filter(id=key.id).exists()) def test_delete_key_POST_ignores_nonexistent_key(self): # Deleting a key that's already been deleted? Basically that's # success. self.client_log_in() key = factory.make_SSHKey(self.logged_in_user) del_link = reverse('prefs-delete-sshkey', args=[key.id]) key.delete() response = self.client.post(del_link, {'post': 'yes'}) self.assertEqual('/account/prefs/', extract_redirect(response)) maas-1.9.5+bzr4599.orig/src/maasserver/views/tests/test_rpc.py0000644000000000000000000000654013056115004022313 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test maasserver RPC views.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import json from crochet import run_in_reactor from django.core.urlresolvers import reverse from maasserver import eventloop from maasserver.testing.eventloop import RegionEventLoopFixture from maasserver.utils.threads import deferToDatabase from maastesting.djangotestcase import DjangoTransactionTestCase from netaddr import IPAddress from provisioningserver.utils.network import get_all_interface_addresses from testtools.matchers import ( Equals, GreaterThan, IsInstance, KeysEqual, LessThan, MatchesAll, MatchesDict, MatchesListwise, MatchesSetwise, ) from twisted.internet.defer import inlineCallbacks is_valid_port = MatchesAll( IsInstance(int), GreaterThan(0), LessThan(2 ** 16)) class RPCViewTest(DjangoTransactionTestCase): def test_rpc_info_when_rpc_advertise_not_present(self): getServiceNamed = self.patch_autospec( eventloop.services, "getServiceNamed") getServiceNamed.side_effect = KeyError response = self.client.get(reverse('rpc-info')) self.assertEqual("application/json", response["Content-Type"]) info = json.loads(response.content) self.assertEqual({"eventloops": None}, info) def test_rpc_info_when_rpc_advertise_not_running(self): response = self.client.get(reverse('rpc-info')) self.assertEqual("application/json", response["Content-Type"]) info = json.loads(response.content) self.assertEqual({"eventloops": None}, info) def test_rpc_info_when_rpc_advertise_running(self): self.useFixture(RegionEventLoopFixture("rpc", "rpc-advertise")) eventloop.start().wait(5) self.addCleanup(lambda: eventloop.reset().wait(5)) getServiceNamed = eventloop.services.getServiceNamed @run_in_reactor @inlineCallbacks def wait_for_startup(): # Wait for the rpc and the rpc-advertise services to start. yield getServiceNamed("rpc").starting yield getServiceNamed("rpc-advertise").starting # Force an update, because it's very hard to track when the # first iteration of the rpc-advertise service has completed. yield deferToDatabase(getServiceNamed("rpc-advertise").update) wait_for_startup().wait(5) response = self.client.get(reverse('rpc-info')) self.assertEqual("application/json", response["Content-Type"]) info = json.loads(response.content) self.assertThat(info, KeysEqual("eventloops")) self.assertThat(info["eventloops"], MatchesDict({ # Each entry in the endpoints dict is a mapping from an # event loop to a list of (host, port) tuples. Each tuple is # a potential endpoint for connecting into that event loop. eventloop.loop.name: MatchesSetwise(*( MatchesListwise((Equals(addr), is_valid_port)) for addr in get_all_interface_addresses() if not IPAddress(addr).is_link_local() and IPAddress(addr).version == 4 )), })) maas-1.9.5+bzr4599.orig/src/maasserver/views/tests/test_settings.py0000644000000000000000000004335713056115004023376 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test maasserver settings views.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import httplib from django.conf import settings from django.contrib.auth.models import User from django.core.urlresolvers import reverse from lxml.html import fromstring from maasserver.clusterrpc.testing.osystems import ( make_rpc_osystem, make_rpc_release, ) from maasserver.models import ( BootSource, Config, UserProfile, ) from maasserver.models.testing import UpdateBootSourceCacheDisconnected from maasserver.storage_layouts import get_storage_layout_choices from maasserver.testing import ( extract_redirect, get_prefixed_form_data, ) from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.osystems import ( make_usable_osystem, patch_usable_osystems, ) from maasserver.testing.testcase import MAASServerTestCase from maasserver.views import settings as settings_view class SettingsTest(MAASServerTestCase): def test_settings_list_users(self): # The settings page displays a list of the users with links to view, # delete or edit each user. Note that the link to delete the the # logged-in user is not display. self.client_log_in(as_admin=True) [factory.make_User() for _ in range(3)] users = UserProfile.objects.all_users() response = self.client.get(reverse('settings')) doc = fromstring(response.content) tab = doc.cssselect('#users')[0] all_links = [elem.get('href') for elem in tab.cssselect('a')] # "Add a user" link. self.assertIn(reverse('accounts-add'), all_links) for user in users: # Use the longhand way of matching an ID here - instead of tr#id - # because the ID may contain non [a-zA-Z-]+ characters. These are # not allowed in symbols, which is how cssselect treats text # following "#" in a selector. rows = tab.cssselect('tr[id="%s"]' % user.username) # Only one row for the user. self.assertEqual(1, len(rows)) row = rows[0] links = [elem.get('href') for elem in row.cssselect('a')] # The username is shown... self.assertSequenceEqual( [user.username], [link.text.strip() for link in row.cssselect('a.user')]) # ...with a link to view the user's profile. self.assertSequenceEqual( [reverse('accounts-view', args=[user.username])], [link.get('href') for link in row.cssselect('a.user')]) # A link to edit the user is shown. self.assertIn( reverse('accounts-edit', args=[user.username]), links) if user != self.logged_in_user: # A link to delete the user is shown. self.assertIn( reverse('accounts-del', args=[user.username]), links) else: # No link to delete the user is shown if the user is the # logged-in user. self.assertNotIn( reverse('accounts-del', args=[user.username]), links) def test_settings_maas_and_network_POST(self): self.client_log_in(as_admin=True) # Disable the DNS machinery so that we can skip the required # setup. self.patch(settings, "DNS_CONNECT", False) new_name = factory.make_string() new_proxy = "http://%s.example.com:1234/" % factory.make_string() response = self.client.post( reverse('settings'), get_prefixed_form_data( prefix='maas_and_network', data={ 'maas_name': new_name, 'http_proxy': new_proxy, })) self.assertEqual(httplib.FOUND, response.status_code, response.content) self.assertEqual( (new_name, new_proxy), (Config.objects.get_config('maas_name'), Config.objects.get_config('http_proxy'))) def test_settings_commissioning_POST(self): self.client_log_in(as_admin=True) release = make_rpc_release(can_commission=True) osystem = make_rpc_osystem('ubuntu', releases=[release]) patch_usable_osystems(self, [osystem]) new_commissioning = release['name'] response = self.client.post( reverse('settings'), get_prefixed_form_data( prefix='commissioning', data={ 'commissioning_distro_series': ( new_commissioning), })) self.assertEqual(httplib.FOUND, response.status_code) self.assertEqual( ( new_commissioning, ), ( Config.objects.get_config('commissioning_distro_series'), )) def test_settings_hides_license_keys_if_no_OS_supporting_keys(self): self.client_log_in(as_admin=True) response = self.client.get(reverse('settings')) doc = fromstring(response.content) license_keys = doc.cssselect('#license_keys') self.assertEqual( 0, len(license_keys), "Didn't hide the license key section.") def test_settings_shows_license_keys_if_OS_supporting_keys(self): self.client_log_in(as_admin=True) release = make_rpc_release(requires_license_key=True) osystem = make_rpc_osystem(releases=[release]) self.patch( settings_view, 'gen_all_known_operating_systems').return_value = [osystem] response = self.client.get(reverse('settings')) doc = fromstring(response.content) license_keys = doc.cssselect('#license_keys') self.assertEqual( 1, len(license_keys), "Didn't show the license key section.") def test_settings_third_party_drivers_POST(self): self.client_log_in(as_admin=True) new_enable_third_party_drivers = factory.pick_bool() response = self.client.post( reverse('settings'), get_prefixed_form_data( prefix='third_party_drivers', data={ 'enable_third_party_drivers': ( new_enable_third_party_drivers), })) self.assertEqual(httplib.FOUND, response.status_code) self.assertEqual( ( new_enable_third_party_drivers, ), ( Config.objects.get_config('enable_third_party_drivers'), )) def test_settings_storage_POST(self): self.client_log_in(as_admin=True) new_storage_layout = factory.pick_choice(get_storage_layout_choices()) new_enable_disk_erasing_on_release = factory.pick_bool() response = self.client.post( reverse('settings'), get_prefixed_form_data( prefix='storage_settings', data={ 'default_storage_layout': new_storage_layout, 'enable_disk_erasing_on_release': ( new_enable_disk_erasing_on_release), })) self.assertEqual(httplib.FOUND, response.status_code) self.assertEqual( ( new_storage_layout, new_enable_disk_erasing_on_release, ), ( Config.objects.get_config('default_storage_layout'), Config.objects.get_config('enable_disk_erasing_on_release'), )) def test_settings_deploy_POST(self): self.client_log_in(as_admin=True) osystem = make_usable_osystem(self) osystem_name = osystem['name'] release_name = osystem['default_release'] response = self.client.post( reverse('settings'), get_prefixed_form_data( prefix='deploy', data={ 'default_osystem': osystem_name, 'default_distro_series': '%s/%s' % ( osystem_name, release_name, ), })) self.assertEqual(httplib.FOUND, response.status_code, response.content) self.assertEqual( ( osystem_name, release_name, ), ( Config.objects.get_config('default_osystem'), Config.objects.get_config('default_distro_series'), )) def test_settings_ubuntu_POST(self): self.client_log_in(as_admin=True) new_main_archive = 'http://test.example.com/archive' new_ports_archive = 'http://test2.example.com/archive' response = self.client.post( reverse('settings'), get_prefixed_form_data( prefix='ubuntu', data={ 'main_archive': new_main_archive, 'ports_archive': new_ports_archive, })) self.assertEqual(httplib.FOUND, response.status_code, response.content) self.assertEqual( ( new_main_archive, new_ports_archive, ), ( Config.objects.get_config('main_archive'), Config.objects.get_config('ports_archive'), )) def test_settings_kernelopts_POST(self): self.client_log_in(as_admin=True) new_kernel_opts = "--new='arg' --flag=1 other" response = self.client.post( reverse('settings'), get_prefixed_form_data( prefix='kernelopts', data={ 'kernel_opts': new_kernel_opts, })) self.assertEqual(httplib.FOUND, response.status_code) self.assertEqual( new_kernel_opts, Config.objects.get_config('kernel_opts')) def test_settings_boot_source_is_shown(self): self.client_log_in(as_admin=True) response = self.client.get(reverse('settings')) doc = fromstring(response.content) boot_source = doc.cssselect('#boot_source') self.assertEqual( 1, len(boot_source), "Didn't show boot image settings section.") def test_settings_boot_source_is_not_shown(self): self.useFixture(UpdateBootSourceCacheDisconnected()) self.client_log_in(as_admin=True) for _ in range(2): factory.make_BootSource() response = self.client.get(reverse('settings')) doc = fromstring(response.content) boot_source = doc.cssselect('#boot_source') self.assertEqual( 0, len(boot_source), "Didn't hide boot image settings section.") def test_settings_boot_source_POST_creates_new_source(self): self.useFixture(UpdateBootSourceCacheDisconnected()) self.client_log_in(as_admin=True) url = "http://test.example.com/archive" keyring = "/usr/local/testing/path.gpg" response = self.client.post( reverse('settings'), get_prefixed_form_data( prefix='boot_source', data={ 'boot_source_url': url, 'boot_source_keyring': keyring, })) self.assertEqual(httplib.FOUND, response.status_code, response.content) boot_source = BootSource.objects.first() self.assertIsNotNone(boot_source) self.assertEqual( (url, keyring), (boot_source.url, boot_source.keyring_filename)) def test_settings_boot_source_POST_updates_source(self): self.useFixture(UpdateBootSourceCacheDisconnected()) self.client_log_in(as_admin=True) boot_source = factory.make_BootSource() url = "http://test.example.com/archive" keyring = "/usr/local/testing/path.gpg" response = self.client.post( reverse('settings'), get_prefixed_form_data( prefix='boot_source', data={ 'boot_source_url': url, 'boot_source_keyring': keyring, })) self.assertEqual(httplib.FOUND, response.status_code, response.content) boot_source = reload_object(boot_source) self.assertEqual( (url, keyring), (boot_source.url, boot_source.keyring_filename)) class NonAdminSettingsTest(MAASServerTestCase): def test_settings_import_boot_images_reserved_to_admin(self): self.client_log_in() response = self.client.post( reverse('settings'), {'import_all_boot_images': 1}) self.assertEqual(reverse('login'), extract_redirect(response)) # Settable attributes on User. user_attributes = [ 'email', 'is_superuser', 'last_name', 'username', ] def make_user_attribute_params(user): """Compose a dict of form parameters for a user's account data. By default, each attribute in the dict maps to the user's existing value for that atrribute. """ return { attr: getattr(user, attr) for attr in user_attributes } def make_password_params(password): """Create a dict of parameters for setting a given password.""" return { 'password1': password, 'password2': password, } def subset_dict(input_dict, keys_subset): """Return a subset of `input_dict` restricted to `keys_subset`. All keys in `keys_subset` must be in `input_dict`. """ return {key: input_dict[key] for key in keys_subset} class UserManagementTest(MAASServerTestCase): def test_add_user_POST(self): self.client_log_in(as_admin=True) params = { 'username': factory.make_string(), 'last_name': factory.make_string(30), 'email': factory.make_email_address(), 'is_superuser': factory.pick_bool(), } password = factory.make_string() params.update(make_password_params(password)) response = self.client.post(reverse('accounts-add'), params) self.assertEqual(httplib.FOUND, response.status_code) user = User.objects.get(username=params['username']) self.assertAttributes(user, subset_dict(params, user_attributes)) self.assertTrue(user.check_password(password)) def test_edit_user_POST_profile_updates_attributes(self): self.client_log_in(as_admin=True) user = factory.make_User() params = make_user_attribute_params(user) params.update({ 'last_name': factory.make_name('Newname'), 'email': 'new-%s@example.com' % factory.make_string(), 'is_superuser': True, 'username': factory.make_name('newname'), }) response = self.client.post( reverse('accounts-edit', args=[user.username]), get_prefixed_form_data('profile', params)) self.assertEqual(httplib.FOUND, response.status_code) self.assertAttributes( reload_object(user), subset_dict(params, user_attributes)) def test_edit_user_POST_updates_password(self): self.client_log_in(as_admin=True) user = factory.make_User() new_password = factory.make_string() params = make_password_params(new_password) response = self.client.post( reverse('accounts-edit', args=[user.username]), get_prefixed_form_data('password', params)) self.assertEqual(httplib.FOUND, response.status_code) self.assertTrue(reload_object(user).check_password(new_password)) def test_delete_user_GET(self): # The user delete page displays a confirmation page with a form. self.client_log_in(as_admin=True) user = factory.make_User() del_link = reverse('accounts-del', args=[user.username]) response = self.client.get(del_link) doc = fromstring(response.content) confirmation_message = ( 'Are you sure you want to delete the user "%s"?' % user.username) self.assertSequenceEqual( [confirmation_message], [elem.text.strip() for elem in doc.cssselect('h2')]) # The page features a form that submits to itself. self.assertSequenceEqual( ['.'], [elem.get('action').strip() for elem in doc.cssselect( '#content form')]) def test_delete_user_POST(self): # A POST request to the user delete finally deletes the user. self.client_log_in(as_admin=True) user = factory.make_User() user_id = user.id del_link = reverse('accounts-del', args=[user.username]) response = self.client.post(del_link, {'post': 'yes'}) self.assertEqual(httplib.FOUND, response.status_code) self.assertItemsEqual([], User.objects.filter(id=user_id)) def test_view_user(self): # The user page feature the basic information about the user. self.client_log_in(as_admin=True) user = factory.make_User() del_link = reverse('accounts-view', args=[user.username]) response = self.client.get(del_link) doc = fromstring(response.content) content_text = doc.cssselect('#content')[0].text_content() self.assertIn(user.username, content_text) self.assertIn(user.email, content_text) def test_account_views_are_routable_for_full_range_of_usernames(self): # Usernames can include characters in the regex [\w.@+-]. self.client_log_in(as_admin=True) user = factory.make_User(username="abc-123@example.com") for view in "edit", "view", "del": path = reverse("accounts-%s" % view, args=[user.username]) self.assertIsInstance(path, (bytes, unicode)) maas-1.9.5+bzr4599.orig/src/maasserver/views/tests/test_settings_commissioning_scripts.py0000644000000000000000000000762713056115004030103 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test maasserver clusters views.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import httplib from django.core.urlresolvers import reverse from lxml.html import fromstring from maasserver.testing import ( extract_redirect, get_content_links, ) from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.views.settings_commissioning_scripts import ( COMMISSIONING_SCRIPTS_ANCHOR, ) from metadataserver.models import CommissioningScript from testtools.matchers import ( ContainsAll, MatchesStructure, ) class CommissioningScriptListingTest(MAASServerTestCase): def test_settings_contains_names_and_content_of_scripts(self): self.client_log_in(as_admin=True) scripts = { factory.make_CommissioningScript(), factory.make_CommissioningScript(), } response = self.client.get(reverse('settings')) names = [script.name for script in scripts] contents = [script.content for script in scripts] self.assertThat(response.content, ContainsAll(names + contents)) def test_settings_link_to_upload_script(self): self.client_log_in(as_admin=True) links = get_content_links(self.client.get(reverse('settings'))) script_add_link = reverse('commissioning-script-add') self.assertIn(script_add_link, links) def test_settings_contains_links_to_delete_scripts(self): self.client_log_in(as_admin=True) scripts = { factory.make_CommissioningScript(), factory.make_CommissioningScript(), } links = get_content_links(self.client.get(reverse('settings'))) script_delete_links = [ reverse('commissioning-script-delete', args=[script.id]) for script in scripts] self.assertThat(links, ContainsAll(script_delete_links)) def test_settings_contains_commissioning_scripts_slot_anchor(self): self.client_log_in(as_admin=True) response = self.client.get(reverse('settings')) document = fromstring(response.content) slots = document.xpath( "//div[@id='%s']" % COMMISSIONING_SCRIPTS_ANCHOR) self.assertEqual( 1, len(slots), "Missing anchor '%s'" % COMMISSIONING_SCRIPTS_ANCHOR) class CommissioningScriptDeleteTest(MAASServerTestCase): def test_can_delete_commissioning_script(self): self.client_log_in(as_admin=True) script = factory.make_CommissioningScript() delete_link = reverse('commissioning-script-delete', args=[script.id]) response = self.client.post(delete_link, {'post': 'yes'}) self.assertEqual( (httplib.FOUND, reverse('settings')), (response.status_code, extract_redirect(response))) self.assertFalse( CommissioningScript.objects.filter(id=script.id).exists()) class CommissioningScriptUploadTest(MAASServerTestCase): def test_can_create_commissioning_script(self): self.client_log_in(as_admin=True) content = factory.make_string() name = factory.make_name('filename') create_link = reverse('commissioning-script-add') filepath = self.make_file(name=name, contents=content) with open(filepath) as fp: response = self.client.post( create_link, {'name': name, 'content': fp}) self.assertEqual( (httplib.FOUND, reverse('settings')), (response.status_code, extract_redirect(response))) new_script = CommissioningScript.objects.get(name=name) self.assertThat( new_script, MatchesStructure.byEquality(name=name, content=content)) maas-1.9.5+bzr4599.orig/src/maasserver/views/tests/test_settings_license_keys.py0000644000000000000000000001550613056115004026126 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test maasserver license key settings views.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import httplib from django.core.urlresolvers import reverse from lxml.html import fromstring from maasserver import forms from maasserver.clusterrpc.testing.osystems import ( make_rpc_osystem, make_rpc_release, ) from maasserver.models import LicenseKey from maasserver.testing import ( extract_redirect, get_content_links, ) from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.osystems import patch_usable_osystems from maasserver.testing.testcase import MAASServerTestCase from maasserver.views import settings as settings_view from maasserver.views.settings_license_keys import LICENSE_KEY_ANCHOR from testtools.matchers import ContainsAll def make_osystem_requiring_license_key(osystem=None, distro_series=None): if osystem is None: osystem = factory.make_name('osystem') if distro_series is None: distro_series = factory.make_name('distro_series') rpc_release = make_rpc_release( distro_series, requires_license_key=True) rpc_osystem = make_rpc_osystem(osystem, releases=[rpc_release]) return rpc_osystem class LicenseKeyListingTest(MAASServerTestCase): def make_license_key_with_os(self, osystem=None, distro_series=None, license_key=None): license_key = factory.make_LicenseKey( osystem=osystem, distro_series=distro_series, license_key=license_key) osystem = make_osystem_requiring_license_key( license_key.osystem, license_key.distro_series) return license_key, osystem def make_license_keys(self, count): keys = [] osystems = [] for _ in range(count): key, osystem = self.make_license_key_with_os() keys.append(key) osystems.append(osystem) patch_usable_osystems(self, osystems=osystems) self.patch( settings_view, 'gen_all_known_operating_systems').return_value = osystems return keys, osystems def test_settings_contains_osystem_and_distro_series(self): self.client_log_in(as_admin=True) keys, _ = self.make_license_keys(3) response = self.client.get(reverse('settings')) os_titles = [key.osystem for key in keys] series_titles = [key.distro_series for key in keys] self.assertThat( response.content, ContainsAll(os_titles + series_titles)) def test_settings_link_to_add_license_key(self): self.client_log_in(as_admin=True) self.make_license_keys(3) links = get_content_links(self.client.get(reverse('settings'))) script_add_link = reverse('license-key-add') self.assertIn(script_add_link, links) def test_settings_contains_links_to_delete(self): self.client_log_in(as_admin=True) keys, _ = self.make_license_keys(3) links = get_content_links(self.client.get(reverse('settings'))) license_key_delete_links = [ reverse( 'license-key-delete', args=[key.osystem, key.distro_series]) for key in keys] self.assertThat(links, ContainsAll(license_key_delete_links)) def test_settings_contains_links_to_edit(self): self.client_log_in(as_admin=True) keys, _ = self.make_license_keys(3) links = get_content_links(self.client.get(reverse('settings'))) license_key_delete_links = [ reverse( 'license-key-edit', args=[key.osystem, key.distro_series]) for key in keys] self.assertThat(links, ContainsAll(license_key_delete_links)) def test_settings_contains_commissioning_scripts_slot_anchor(self): self.client_log_in(as_admin=True) self.make_license_keys(3) response = self.client.get(reverse('settings')) document = fromstring(response.content) slots = document.xpath( "//div[@id='%s']" % LICENSE_KEY_ANCHOR) self.assertEqual( 1, len(slots), "Missing anchor '%s'" % LICENSE_KEY_ANCHOR) class LicenseKeyAddTest(MAASServerTestCase): def test_can_create_license_key(self): self.client_log_in(as_admin=True) osystem = make_osystem_requiring_license_key() patch_usable_osystems(self, osystems=[osystem]) self.patch(forms, 'validate_license_key').return_value = True series = osystem['default_release'] key = factory.make_name('key') add_link = reverse('license-key-add') definition = { 'osystem': osystem['name'], 'distro_series': series, 'license_key': key, } response = self.client.post(add_link, definition) self.assertEqual( (httplib.FOUND, reverse('settings')), (response.status_code, extract_redirect(response))) new_license_key = LicenseKey.objects.get( osystem=osystem['name'], distro_series=series) self.assertAttributes(new_license_key, definition) class LicenseKeyEditTest(MAASServerTestCase): def test_can_update_license_key(self): self.client_log_in(as_admin=True) key = factory.make_LicenseKey() osystem = make_osystem_requiring_license_key( key.osystem, key.distro_series) patch_usable_osystems(self, osystems=[osystem]) self.patch(forms, 'validate_license_key').return_value = True new_key = factory.make_name('key') edit_link = reverse( 'license-key-edit', args=[key.osystem, key.distro_series]) definition = { 'osystem': key.osystem, 'distro_series': key.distro_series, 'license_key': new_key, } response = self.client.post(edit_link, definition) self.assertEqual( (httplib.FOUND, reverse('settings')), (response.status_code, extract_redirect(response))) self.assertAttributes(reload_object(key), definition) class LicenseKeyDeleteTest(MAASServerTestCase): def test_can_delete_license_key(self): self.client_log_in(as_admin=True) key = factory.make_LicenseKey() delete_link = reverse( 'license-key-delete', args=[key.osystem, key.distro_series]) response = self.client.post(delete_link, {'post': 'yes'}) self.assertEqual( (httplib.FOUND, reverse('settings')), (response.status_code, extract_redirect(response))) self.assertFalse( LicenseKey.objects.filter( osystem=key.osystem, distro_series=key.distro_series).exists()) maas-1.9.5+bzr4599.orig/src/maasserver/views/tests/test_zones.py0000644000000000000000000003523013056115004022663 0ustar 00000000000000# Copyright 2013-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test maasserver zones views.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import httplib from urllib import urlencode from django.core.exceptions import ValidationError from django.core.urlresolvers import reverse from lxml.html import fromstring from maasserver.models import Zone from maasserver.models.zone import DEFAULT_ZONE_NAME from maasserver.testing import ( extract_redirect, get_content_links, ) from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase from maasserver.views.zones import ( ZoneAdd, ZoneListView, ) from testtools.matchers import ( Contains, ContainsAll, Equals, MatchesAll, Not, ) class ZoneListingViewTest(MAASServerTestCase): def test_zone_list_link_present_on_homepage(self): self.client_log_in() response = self.client.get(reverse('index')) zone_list_link = reverse('zone-list') self.assertIn( zone_list_link, get_content_links(response, element='#main-nav')) def test_zone_list_displays_zone_details(self): # Zone listing displays the zone name and the zone description. self.client_log_in() [factory.make_Zone() for _ in range(3)] zones = Zone.objects.all() response = self.client.get(reverse('zone-list')) zone_names = [zone.name for zone in zones] truncated_zone_descriptions = [ zone.description[:20] for zone in zones] self.assertThat(response.content, ContainsAll(zone_names)) self.assertThat( response.content, ContainsAll(truncated_zone_descriptions)) def test_zone_list_displays_sorted_list_of_zones(self): # Zones are alphabetically sorted on the zone list page. self.client_log_in() [factory.make_Zone(sortable_name=True) for _ in range(3)] zones = Zone.objects.all() sorted_zones = sorted(zones, key=lambda x: x.name.lower()) response = self.client.get(reverse('zone-list')) zone_links = [ reverse('zone-view', args=[zone.name]) for zone in sorted_zones] self.assertEqual( zone_links, [link for link in get_content_links(response) if link.startswith('/zones/')]) def test_zone_list_displays_links_to_zone_query(self): """Ensures zone list displays links to the right node/device lists""" self.client_log_in() [factory.make_Zone(sortable_name=True) for _ in range(3)] zones = Zone.objects.all() sorted_zones = sorted(zones, key=lambda x: x.name.lower()) response = self.client.get(reverse('zone-list')) zone_node_links = [ reverse('index') + "#/nodes" + "?" + urlencode({'query': 'zone:(%s)' % zone.name}) for zone in sorted_zones] zone_device_links = [reverse('index') + "#/nodes" + "?" + urlencode({'query': 'zone:(%s)' % zone.name, 'tab': 'devices'}) for zone in sorted_zones] node_links_on_page = [link for link in get_content_links(response) if link.startswith('/#/nodes') and '&tab=devices' not in link] device_links_on_page = [link for link in get_content_links(response) if link.startswith('/#/nodes') and '&tab=devices' in link] self.assertEqual(zone_device_links, device_links_on_page) self.assertEqual(zone_node_links, node_links_on_page) class ZoneListingViewTestNonAdmin(MAASServerTestCase): def test_zone_list_does_not_contain_edit_and_delete_links(self): self.client_log_in() zones = [factory.make_Zone() for _ in range(3)] response = self.client.get(reverse('zone-list')) zone_edit_links = [ reverse('zone-edit', args=[zone.name]) for zone in zones] zone_delete_links = [ reverse('zone-del', args=[zone.name]) for zone in zones] all_links = get_content_links(response) self.assertThat( all_links, MatchesAll(*[Not(Equals(link)) for link in zone_edit_links])) self.assertThat( all_links, MatchesAll(*[Not(Equals(link)) for link in zone_delete_links])) def test_zone_list_does_not_contain_add_link(self): self.client_log_in() response = self.client.get(reverse('zone-list')) add_link = reverse('zone-add') self.assertNotIn(add_link, get_content_links(response)) def test_zone_listing_is_paginated(self): self.patch(ZoneListView, "paginate_by", 3) self.client_log_in(as_admin=True) # Create 4 zones. [factory.make_Zone() for _ in range(4)] response = self.client.get(reverse('zone-list')) self.assertEqual(httplib.OK, response.status_code) doc = fromstring(response.content) self.assertEqual( 1, len(doc.cssselect('div.pagination')), "Couldn't find pagination tag.") class ZoneListingViewTestAdmin(MAASServerTestCase): def test_zone_list_contains_edit_links(self): self.client_log_in(as_admin=True) zones = [factory.make_Zone() for _ in range(3)] default_zone = Zone.objects.get_default_zone() zone_edit_links = [ reverse('zone-edit', args=[zone.name]) for zone in zones] zone_delete_links = [ reverse('zone-del', args=[zone.name]) for zone in zones] zone_default_edit = reverse('zone-edit', args=[default_zone]) zone_default_delete = reverse('zone-del', args=[default_zone]) response = self.client.get(reverse('zone-list')) all_links = get_content_links(response) self.assertThat(all_links, ContainsAll( zone_edit_links + zone_delete_links)) self.assertThat(all_links, Not(Contains(zone_default_edit))) self.assertThat(all_links, Not(Contains(zone_default_delete))) def test_zone_list_contains_add_link(self): self.client_log_in(as_admin=True) response = self.client.get(reverse('zone-list')) add_link = reverse('zone-add') self.assertIn(add_link, get_content_links(response)) class ZoneAddTestNonAdmin(MAASServerTestCase): def test_cannot_add_zone(self): self.client_log_in() name = factory.make_name('zone') response = self.client.post(reverse('zone-add'), {'name': name}) # This returns an inappropriate response (302 FOUND, redirect to the # login page; should be 403 FORBIDDEN) but does not actually create the # zone, and that's the main thing. self.assertEqual(reverse('login'), extract_redirect(response)) self.assertEqual([], list(Zone.objects.filter(name=name))) class ZoneAddTestAdmin(MAASServerTestCase): def test_adds_zone(self): self.client_log_in(as_admin=True) definition = { 'name': factory.make_name('zone'), 'description': factory.make_string(), } response = self.client.post(reverse('zone-add'), definition) self.assertEqual(httplib.FOUND, response.status_code) zone = Zone.objects.get(name=definition['name']) self.assertEqual(definition['description'], zone.description) self.assertEqual(reverse('zone-list'), extract_redirect(response)) def test_description_is_optional(self): self.client_log_in(as_admin=True) name = factory.make_name('zone') response = self.client.post(reverse('zone-add'), {'name': name}) self.assertEqual(httplib.FOUND, response.status_code) zone = Zone.objects.get(name=name) self.assertEqual('', zone.description) def test_get_success_url_returns_valid_url(self): self.client_log_in(as_admin=True) url = ZoneAdd().get_success_url() self.assertIn("/zones", url) class ZoneDetailViewTest(MAASServerTestCase): def test_zone_detail_displays_zone_detail(self): # The Zone detail view displays the zone name and the zone # description. self.client_log_in() zone = factory.make_Zone() response = self.client.get(reverse('zone-view', args=[zone.name])) self.assertThat(response.content, Contains(zone.name)) self.assertThat( response.content, Contains(zone.description)) def test_zone_detail_displays_node_count(self): self.client_log_in() zone = factory.make_Zone() node = factory.make_Node() node.zone = zone response = self.client.get(reverse('zone-view', args=[zone.name])) document = fromstring(response.content) count_text = document.get_element_by_id("#nodecount").text_content() self.assertThat( count_text, Contains(unicode(zone.node_set.count()))) def test_zone_detail_links_to_node_list(self): self.client_log_in() zone = factory.make_Zone() node = factory.make_Node() node.zone = zone response = self.client.get(reverse('zone-view', args=[zone.name])) zone_node_link = ( reverse('index') + "#/nodes" + "?" + urlencode({'query': 'zone:(%s)' % zone.name})) all_links = get_content_links(response) self.assertIn(zone_node_link, all_links) class ZoneDetailViewNonAdmin(MAASServerTestCase): def test_zone_detail_does_not_contain_edit_link(self): self.client_log_in() zone = factory.make_Zone() response = self.client.get(reverse('zone-view', args=[zone.name])) zone_edit_link = reverse('zone-edit', args=[zone.name]) self.assertNotIn(zone_edit_link, get_content_links(response)) def test_zone_detail_does_not_contain_delete_link(self): self.client_log_in() zone = factory.make_Zone() response = self.client.get(reverse('zone-view', args=[zone.name])) zone_delete_link = reverse('zone-del', args=[zone.name]) self.assertNotIn(zone_delete_link, get_content_links(response)) class ZoneDetailViewAdmin(MAASServerTestCase): def test_zone_detail_contains_edit_link(self): self.client_log_in(as_admin=True) zone = factory.make_Zone() response = self.client.get(reverse('zone-view', args=[zone.name])) zone_edit_link = reverse('zone-edit', args=[zone.name]) self.assertIn(zone_edit_link, get_content_links(response)) def test_zone_detail_contains_delete_link(self): self.client_log_in(as_admin=True) zone = factory.make_Zone() response = self.client.get(reverse('zone-view', args=[zone.name])) zone_delete_link = reverse('zone-del', args=[zone.name]) self.assertIn(zone_delete_link, get_content_links(response)) def test_zone_detail_for_default_zone_does_not_contain_delete_link(self): self.client_log_in(as_admin=True) response = self.client.get( reverse('zone-view', args=[DEFAULT_ZONE_NAME])) zone_delete_link = reverse('zone-del', args=[DEFAULT_ZONE_NAME]) self.assertNotIn(zone_delete_link, get_content_links(response)) class ZoneEditNonAdminTest(MAASServerTestCase): def test_cannot_access_zone_edit(self): self.client_log_in() zone = factory.make_Zone() response = self.client.post(reverse('zone-edit', args=[zone.name])) self.assertEqual(reverse('login'), extract_redirect(response)) class ZoneEditAdminTest(MAASServerTestCase): def test_zone_edit(self): self.client_log_in(as_admin=True) zone = factory.make_Zone() new_name = factory.make_name('name') new_description = factory.make_name('description') response = self.client.post( reverse('zone-edit', args=[zone.name]), data={ 'name': new_name, 'description': new_description, }) self.assertEqual( reverse('zone-list'), extract_redirect(response), response.content) zone = reload_object(zone) self.assertEqual( (new_name, new_description), (zone.name, zone.description), ) class ZoneDeleteNonAdminTest(MAASServerTestCase): def test_cannot_delete(self): self.client_log_in() zone = factory.make_Zone() response = self.client.post(reverse('zone-del', args=[zone.name])) self.assertEqual(reverse('login'), extract_redirect(response)) self.assertIsNotNone(reload_object(zone)) class ZoneDeleteAdminTest(MAASServerTestCase): def test_deletes_zone(self): self.client_log_in(as_admin=True) zone = factory.make_Zone() response = self.client.post( reverse('zone-del', args=[zone.name]), {'post': 'yes'}) self.assertEqual(httplib.FOUND, response.status_code) self.assertIsNone(reload_object(zone)) def test_rejects_deletion_of_default_zone(self): self.client_log_in(as_admin=True) try: self.client.post( reverse('zone-del', args=[DEFAULT_ZONE_NAME]), {'post': 'yes'}) except ValidationError: # Right now, this generates an error because the deletion # is prevented in the model code and not at the form level. # This is not so bad because we make sure that the deletion link # for the default zone isn't shown anywhere. # If we move validation to the form level, this exception goes # away and we'll have to check the HTTP response for a validation # failure. pass # The default zone is still there. self.assertIsNotNone(Zone.objects.get_default_zone()) def test_redirects_to_listing(self): self.client_log_in(as_admin=True) zone = factory.make_Zone() response = self.client.post( reverse('zone-del', args=[zone.name]), {'post': 'yes'}) self.assertEqual(reverse('zone-list'), extract_redirect(response)) def test_does_not_delete_nodes(self): self.client_log_in(as_admin=True) zone = factory.make_Zone() node = factory.make_Node(zone=zone) response = self.client.post( reverse('zone-del', args=[zone.name]), {'post': 'yes'}) self.assertEqual(httplib.FOUND, response.status_code) self.assertIsNone(reload_object(zone)) node = reload_object(node) self.assertIsNotNone(node) self.assertEqual(Zone.objects.get_default_zone(), node.zone) maas-1.9.5+bzr4599.orig/src/maasserver/websockets/__init__.py0000644000000000000000000000000013056115004022062 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/websockets/base.py0000644000000000000000000004226013056115004021253 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """The base class that all handlers must extend.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "HandlerError", "HandlerPKError", "HandlerValidationError", "Handler", ] from operator import attrgetter from django.core.exceptions import ValidationError from django.http import HttpRequest from django.utils.encoding import is_protected_type from djorm_pgarray.fields import ArrayField from maasserver import concurrency from maasserver.utils.forms import get_QueryDict from maasserver.utils.orm import transactional from maasserver.utils.threads import deferToDatabase from provisioningserver.utils.twisted import ( asynchronous, IAsynchronous, ) class HandlerError(Exception): """Generic exception a handler can raise.""" class HandlerNoSuchMethodError(HandlerError): """Raised when an handler doesn't have that method.""" class HandlerPKError(HandlerError): """Raised when object is missing its primary key.""" class HandlerValidationError(HandlerError, ValidationError): """Raised when object fails to validate on create or update.""" class HandlerDoesNotExistError(HandlerError): """Raised when an object by its `pk` doesn't exist.""" class HandlerPermissionError(HandlerError): """Raised when permission is denied for the user of a given action.""" class HandlerOptions(object): """Configuraton class for `Handler`. Provides the needed defaults to the internal `Meta` class used on the handler. """ abstract = False allowed_methods = [ 'list', 'get', 'create', 'update', 'delete', 'set_active', ] handler_name = None object_class = None queryset = None pk = 'id' pk_type = int fields = None exclude = None list_fields = None list_exclude = None non_changeable = None form = None listen_channels = [] def __new__(cls, meta=None): overrides = {} # Meta class will override the defaults based on the values it # already has set. if meta: for override_name in dir(meta): # Skip over internal field names. if not override_name.startswith('_'): overrides[override_name] = getattr(meta, override_name) # Construct the new object with the overrides from meta. return object.__new__( type(b'HandlerOptions', (cls,), overrides)) class HandlerMetaclass(type): """Sets up the _meta field on the created class.""" def __new__(cls, name, bases, attrs): # Construct the class with the _meta field. new_class = super( HandlerMetaclass, cls).__new__(cls, name, bases, attrs) new_class._meta = HandlerOptions(getattr(new_class, 'Meta', None)) # Setup the handlers name based on the naming of the class. if not getattr(new_class._meta, 'handler_name', None): class_name = new_class.__name__ name_bits = [bit for bit in class_name.split('Handler') if bit] handler_name = ''.join(name_bits).lower() new_class._meta.handler_name = handler_name # Setup the object_class if the queryset is provided. if new_class._meta.queryset is not None: new_class._meta.object_class = new_class._meta.queryset.model # Copy the fields and exclude to list_fields and list_exclude # if empty. if new_class._meta.list_fields is None: new_class._meta.list_fields = new_class._meta.fields if new_class._meta.list_exclude is None: new_class._meta.list_exclude = new_class._meta.exclude return new_class class Handler: """Base handler for all handlers in the WebSocket protocol. Each handler should extend this class to get the basic implementation of exposing a collection over the WebSocket protocol. The classes that extend this class must be present in `maasserver.websockets.handlers` for it to be exposed. Example: class SampleHandler(Handler): class Meta: queryset = Sample.objects.all() """ __metaclass__ = HandlerMetaclass def __init__(self, user, cache): self.user = user self.cache = cache # Holds a set of all pks that the client has loaded and has on their # end of the connection. This is used to inform the client of the # correct notifications based on what items the client has. if "loaded_pks" not in self.cache: self.cache["loaded_pks"] = set() def full_dehydrate(self, obj, for_list=False): """Convert the given object into a dictionary. :param for_list: True when the object is being converted to belong in a list. """ if for_list: allowed_fields = self._meta.list_fields exclude_fields = self._meta.list_exclude else: allowed_fields = self._meta.fields exclude_fields = self._meta.exclude data = {} for field in self._meta.object_class._meta.fields: # Convert the field name to unicode as some are stored in bytes. field_name = unicode(field.name) # Skip fields that are not allowed. if allowed_fields is not None and field_name not in allowed_fields: continue if exclude_fields is not None and field_name in exclude_fields: continue # Get the value from the field and set it in data. The value # will pass through the dehydrate method if present. field_obj = getattr(obj, field_name) dehydrate_method = getattr( self, "dehydrate_%s" % field_name, None) if dehydrate_method is not None: data[field_name] = dehydrate_method(field_obj) else: value = field._get_val_from_obj(obj) if is_protected_type(value): data[field_name] = value elif isinstance(field, ArrayField): data[field_name] = field.to_python(value) else: data[field_name] = field.value_to_string(obj) # Return the data after the final dehydrate. return self.dehydrate(obj, data, for_list=for_list) def dehydrate(self, obj, data, for_list=False): """Add any extra info to the `data` before finalizing the final object. :param obj: object being dehydrated. :param data: dictionary to place extra info. :param for_list: True when the object is being converted to belong in a list. """ return data def full_hydrate(self, obj, data): """Convert the given dictionary to a object.""" allowed_fields = self._meta.fields exclude_fields = self._meta.exclude non_changeable_fields = self._meta.non_changeable for field in self._meta.object_class._meta.fields: field_name = field.name # Skip fields that are not allowed. if field_name == self._meta.pk: continue if allowed_fields is not None and field_name not in allowed_fields: continue if exclude_fields is not None and field_name in exclude_fields: continue if (non_changeable_fields is not None and field_name in non_changeable_fields): continue # Update the field if its in the provided data. Passing the value # through its hydrate method if present. if field_name in data: value = data[field_name] hydrate_method = getattr(self, "hydrate_%s" % field_name, None) if hydrate_method is not None: value = hydrate_method(value) setattr(obj, field_name, value) # Return the hydrated object once its done the final hydrate. return self.hydrate(obj, data) def hydrate(self, obj, data): """Add any extra info to the `obj` before finalizing the finale object. :param obj: obj being hydrated. :param data: dictionary to use to set object. """ return obj def get_object(self, params): """Get object by using the `pk` in `params`.""" if self._meta.pk not in params: raise HandlerPKError("Missing %s in params" % self._meta.pk) pk = params[self._meta.pk] try: obj = self._meta.object_class.objects.get(**{ self._meta.pk: pk, }) except self._meta.object_class.DoesNotExist: raise HandlerDoesNotExistError(pk) return obj def get_queryset(self): """Return `QuerySet` used by this handler. Override if you need to modify the queryset based on the current user. """ return self._meta.queryset def get_form_class(self, action): """Return the form class used for `action`. Override if you need to provide a form based on the current user. """ return self._meta.form def preprocess_form(self, action, params): """Process the `params` to before passing the data to the form. Default implementation just converts `params` to a `QueryDict`. """ return get_QueryDict(params) @asynchronous def execute(self, method_name, params): """Execute the given method on the handler. Checks to make sure the method is valid and allowed perform executing the method. """ if method_name in self._meta.allowed_methods: try: method = getattr(self, method_name) except AttributeError: raise HandlerNoSuchMethodError(method_name) else: # Handler methods are predominantly transactional and thus # blocking/synchronous. Genuinely non-blocking/asynchronous # methods must out themselves explicitly. if IAsynchronous.providedBy(method): # The @asynchronous decorator will DTRT. return method(params) else: # This is going to block and hold a database connection so # we limit its concurrency. return concurrency.webapp.run( deferToDatabase, transactional(method), params) else: raise HandlerNoSuchMethodError(method_name) def list(self, params): """List objects. :param offset: Offset into the queryset to return. :param limit: Maximum number of objects to return. """ queryset = self.get_queryset() queryset = queryset.order_by(self._meta.pk) if "start" in params: queryset = queryset.filter(**{ "%s__gt" % self._meta.pk: params["start"] }) if "limit" in params: queryset = queryset[:params["limit"]] objs = list(queryset) getpk = attrgetter(self._meta.pk) self.cache["loaded_pks"].update(getpk(obj) for obj in objs) return [ self.full_dehydrate(obj, for_list=True) for obj in objs ] def get(self, params): """Get object. :param pk: Object with primary key to return. """ obj = self.get_object(params) getpk = attrgetter(self._meta.pk) self.cache["loaded_pks"].add(getpk(obj)) return self.full_dehydrate(obj) def create(self, params): """Create the object from data.""" # Create by using form form_class = self.get_form_class("create") if form_class is not None: data = self.preprocess_form("create", params) request = HttpRequest() request.user = self.user form = form_class(request=request, data=data) if form.is_valid(): try: obj = form.save() except ValidationError as e: raise HandlerValidationError(e.error_dict) return self.full_dehydrate(obj) else: raise HandlerValidationError(form.errors) # Create by updating the fields on the object. obj = self._meta.object_class() obj = self.full_hydrate(obj, params) obj.save() return self.full_dehydrate(obj) def update(self, params): """Update the object.""" obj = self.get_object(params) # Update by using form. form_class = self.get_form_class("update") if form_class is not None: data = self.preprocess_form("update", params) form = form_class(data=data, instance=obj) if form.is_valid(): try: obj = form.save() except ValidationError as e: raise HandlerValidationError(e.error_dict) return self.full_dehydrate(obj) else: raise HandlerValidationError(form.errors) # Update by updating the fields on the object. obj = self.full_hydrate(obj, params) obj.save() return self.full_dehydrate(obj) def delete(self, params): """Delete the object.""" obj = self.get_object(params) obj.delete() def set_active(self, params): """Set the active node for this connection. This is the node that is being viewed in detail by the client. """ # Calling this method without a primary key will clear the currently # active object. if self._meta.pk not in params: if 'active_pk' in self.cache: del self.cache['active_pk'] return # Get the object data and set it as active. obj_data = self.get(params) self.cache['active_pk'] = obj_data[self._meta.pk] return obj_data def on_listen(self, channel, action, pk): """Called by the protocol when a channel notification occurs. Do not override this method instead override `listen`. """ pk = self._meta.pk_type(pk) if action == "delete": if pk in self.cache['loaded_pks']: self.cache['loaded_pks'].remove(pk) return (self._meta.handler_name, action, pk) else: return None try: obj = self.listen(channel, action, pk) except HandlerDoesNotExistError: obj = None if action == "create" and obj is not None: if pk in self.cache['loaded_pks']: # The user already knows about this node, so its not a create # to the user but an update. return self.on_listen_for_active_pk("update", pk, obj) else: self.cache['loaded_pks'].add(pk) return self.on_listen_for_active_pk(action, pk, obj) elif action == "update": if pk in self.cache['loaded_pks']: if obj is None: # The user no longer has access to this object. To the # client this is a delete action. self.cache['loaded_pks'].remove(pk) return (self._meta.handler_name, "delete", pk) else: # Just a normal update to the client. return self.on_listen_for_active_pk(action, pk, obj) elif obj is not None: # User just got access to this new object. Send the message to # the client as a create action instead of an update. self.cache['loaded_pks'].add(pk) return self.on_listen_for_active_pk("create", pk, obj) else: # User doesn't have access to this object, so do nothing. pass else: # Unknown action or the user doesn't have permission to view the # newly created object, so do nothing. pass return None def on_listen_for_active_pk(self, action, pk, obj): """Return the correct data for `obj` depending on if its the active primary key.""" if 'active_pk' in self.cache and pk == self.cache['active_pk']: # Active so send all the data for the object. return ( self._meta.handler_name, action, self.full_dehydrate(obj, for_list=False), ) else: # Not active so only send the data like it was comming from # the list call. return ( self._meta.handler_name, action, self.full_dehydrate(obj, for_list=True), ) def listen(self, channel, action, pk): """Called when the handler listens for events on channels with `Meta.listen_channels`. :param channel: Channel event occured on. :param action: Action that caused this event. :param pk: Id of the object. """ return self.get_object({ self._meta.pk: pk }) maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/0000755000000000000000000000000013056115004021563 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/websockets/listener.py0000644000000000000000000003251513056115004022170 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Listens for NOTIFY events from the postgres database.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ ] from collections import defaultdict from contextlib import closing from errno import ENOENT from django.db import connections from django.db.utils import load_backend from provisioningserver.utils.enum import map_enum from provisioningserver.utils.twisted import ( callOut, synchronous, ) from twisted.internet import ( defer, error, interfaces, reactor, task, ) from twisted.internet.defer import ( CancelledError, Deferred, succeed, ) from twisted.internet.task import deferLater from twisted.internet.threads import deferToThread from twisted.python import log from twisted.python.failure import Failure from zope.interface import implements class ACTIONS: """Notify action types.""" CREATE = "create" UPDATE = "update" DELETE = "delete" class PostgresListenerNotifyError(Exception): """Error raised when the listener gets a notify message that cannot be decoded or is not being handled.""" class PostgresListener: """Listens for NOTIFY messages from postgres. A new connection is made to postgres with the isolation level of autocommit. This connection is only used for listening for notifications. Any query that needs to take place because of a notification should use its own connection. This class runs inside of the reactor. Any long running action that occurrs based on a notification should defer its action to a thread to not block the reactor. :ivar connection: A database connection within one of Django's wrapper. :ivar connectionFileno: The fileno of the underlying database connection. :ivar connecting: a :class:`Deferred` while connecting, `None` at all other times. :ivar disconnecting: a :class:`Deferred` while disconnecting, `None` at all other times. """ implements(interfaces.IReadDescriptor) # Seconds to wait to handle new notifications. When the notifications set # is empty it will wait this amount of time to check again for new # notifications. HANDLE_NOTIFY_DELAY = 0.5 def __init__(self, alias="default"): self.alias = alias self.listeners = defaultdict(list) self.autoReconnect = False self.connection = None self.connectionFileno = None self.notifications = set() self.notifier = task.LoopingCall(self.handleNotifies) self.connecting = None self.disconnecting = None def start(self): """Start the listener.""" self.autoReconnect = True return self.tryConnection() def stop(self): """Stop the listener.""" self.autoReconnect = False return self.loseConnection() def connected(self): """Return True if connected.""" if self.connection is None: return False if self.connection.connection is None: return False return self.connection.connection.closed == 0 def logPrefix(self): """Return nice name for twisted logging.""" return "maas.websocket.listener" def logMsg(self, *args, **kwargs): """Helper to log message with the correct logPrefix.""" kwargs['system'] = self.logPrefix() log.msg(*args, **kwargs) def logErr(self, *args, **kwargs): """Helper to log error with the correct logPrefix.""" kwargs['system'] = self.logPrefix() log.err(*args, **kwargs) def doRead(self): """Poll the connection and process any notifications.""" try: self.connection.connection.poll() except: # If the connection goes down then `OperationalError` is raised. # It contains no pgcode or pgerror to identify the reason so no # special consideration can be made for it. Hence all errors are # treated the same, and we assume that the connection is broken. # # We do NOT return a failure, which would signal to the reactor # that the connection is broken in some way, because the reactor # will end up removing this instance from its list of selectables # but not from its list of readable fds, or something like that. # The point is that the reactor's accounting gets muddled. Things # work correctly if we manage the disconnection ourselves. # self.loseConnection(Failure(error.ConnectionLost())) else: # Add each notify to to the notifications set. This removes # duplicate notifications when one entity in the database is # updated multiple times in a short interval. Accumulating # notifications and allowing the listener to pick them up in # batches is imperfect but good enough, and simple. notifies = self.connection.connection.notifies if len(notifies) != 0: for notify in notifies: self.notifications.add((notify.channel, notify.payload)) # Delete the contents of the connection's notifies list so # that we don't process them a second time. del notifies[:] def fileno(self): """Return the fileno of the connection.""" return self.connectionFileno def startReading(self): """Add this listener to the reactor.""" self.connectionFileno = self.connection.connection.fileno() reactor.addReader(self) def stopReading(self): """Remove this listener from the reactor.""" try: reactor.removeReader(self) except IOError as error: # ENOENT here means that the fd has already been unregistered # from the underlying poller. It is as yet unclear how we get # into this state, so for now we ignore it. See epoll_ctl(2). if error.errno != ENOENT: raise finally: self.connectionFileno = None def register(self, channel, handler): """Register listening for notifications from a channel. When a notification is received for that `channel` the `handler` will be called with the action and object id. """ self.listeners[channel].append(handler) @synchronous def createConnection(self): """Create new database connection.""" db = connections.databases[self.alias] backend = load_backend(db['ENGINE']) return backend.DatabaseWrapper( db, self.alias, allow_thread_sharing=True) @synchronous def startConnection(self): """Start the database connection.""" self.connection = self.createConnection() self.connection.connect() self.connection.enter_transaction_management() self.connection.set_autocommit(True) @synchronous def stopConnection(self): """Stop database connection.""" # The connection is often in an unexpected state here -- for # unexplained reasons -- so be careful when unpealing layers. connection_wrapper, self.connection = self.connection, None if connection_wrapper is not None: connection = connection_wrapper.connection if connection is not None and not connection.closed: connection_wrapper.commit() connection_wrapper.leave_transaction_management() connection_wrapper.close() def tryConnection(self): """Keep retrying to make the connection.""" if self.connecting is None: if self.disconnecting is not None: raise RuntimeError( "Cannot attempt to make new connection before " "pending disconnection has finished.") def cb_connect(_): self.logMsg("Listening for database notifications.") def eb_connect(failure): msgFormat = "Unable to connect to database: %(error)s" self.logMsg(format=msgFormat, error=failure.getErrorMessage()) if failure.check(CancelledError): return failure elif self.autoReconnect: return deferLater(reactor, 3, connect) else: return failure def connect(interval=self.HANDLE_NOTIFY_DELAY): d = deferToThread(self.startConnection) d.addCallback(callOut, deferToThread, self.registerChannels) d.addCallback(callOut, self.startReading) d.addCallback(callOut, self.runHandleNotify, interval) # On failure ensure that the database connection is stopped. d.addErrback(callOut, deferToThread, self.stopConnection) d.addCallbacks(cb_connect, eb_connect) return d def done(): self.connecting = None self.connecting = connect().addBoth(callOut, done) return self.connecting def loseConnection(self, reason=Failure(error.ConnectionDone())): """Request that the connection be dropped.""" if self.disconnecting is None: d = self.disconnecting = Deferred() d.addBoth(callOut, self.stopReading) d.addBoth(callOut, self.cancelHandleNotify) d.addBoth(callOut, deferToThread, self.stopConnection) d.addBoth(callOut, self.connectionLost, reason) def done(): self.disconnecting = None d.addBoth(callOut, done) if self.connecting is None: # Already/never connected: begin shutdown now. self.disconnecting.callback(None) else: # Still connecting: cancel before disconnect. self.connecting.addErrback(Failure.trap, CancelledError) self.connecting.chainDeferred(self.disconnecting) self.connecting.cancel() return self.disconnecting def connectionLost(self, reason): """Reconnect when the connection is lost.""" self.connection = None if reason.check(error.ConnectionDone): self.logMsg("Connection closed.") elif reason.check(error.ConnectionLost): self.logMsg("Connection lost.") else: self.logErr(reason, "Connection lost.") if self.autoReconnect: reactor.callLater(3, self.tryConnection) def registerChannels(self): """Register the all the channels.""" for channel in self.listeners.keys(): with closing(self.connection.cursor()) as cursor: for action in map_enum(ACTIONS).values(): cursor.execute("LISTEN %s_%s;" % (channel, action)) def convertChannel(self, channel): """Convert the postgres channel to a registered channel and action. The postgres channel is structured as {channel}_{action}. This is split to match the correct handler and action for that handler. :raise PostgresListenerNotifyError: When {channel} is not registered or {action} is not in `ACTIONS`. """ channel, action = channel.split('_', 1) if channel not in self.listeners: raise PostgresListenerNotifyError( "%s is not a registered channel." % channel) if action not in map_enum(ACTIONS).values(): raise PostgresListenerNotifyError( "%s action is not supported." % action) return channel, action def runHandleNotify(self, delay=0, clock=reactor): """Defer later the `handleNotify`.""" if not self.notifier.running: self.notifier.start(delay, now=False) def cancelHandleNotify(self): """Cancel the deferred `handleNotify` call.""" if self.notifier.running: done = self.notifier.deferred self.notifier.stop() return done else: return succeed(None) def handleNotifies(self, clock=reactor): """Process all notify message in the notifications set.""" def gen_notifications(notifications): while len(notifications) != 0: yield notifications.pop() return task.coiterate( self.handleNotify(notification, clock=clock) for notification in gen_notifications(self.notifications)) def handleNotify(self, notification, clock=reactor): """Process a notify message in the notifications set.""" channel, payload = notification try: channel, action = self.convertChannel(channel) except PostgresListenerNotifyError: # Log the error and continue processing the remaining # notifications. self.logErr() else: defers = [] handlers = self.listeners[channel] # XXX: There could be an arbitrary number of listeners. Should we # limit concurrency here? Perhaps even do one at a time. for handler in handlers: d = defer.maybeDeferred(handler, action, payload) d.addErrback(self.logErr) defers.append(d) return defer.DeferredList(defers) maas-1.9.5+bzr4599.orig/src/maasserver/websockets/protocol.py0000644000000000000000000003771713056115004022215 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """The MAAS WebSockets protocol.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ ] from collections import deque from Cookie import SimpleCookie from functools import partial import json from urlparse import ( parse_qs, urlparse, ) from django.conf import settings from django.contrib.auth import ( BACKEND_SESSION_KEY, load_backend, SESSION_KEY, ) from django.contrib.auth.models import User from django.core.exceptions import ValidationError from django.utils.importlib import import_module from maasserver.eventloop import services from maasserver.models.nodegroup import NodeGroup from maasserver.utils.orm import transactional from maasserver.utils.threads import deferToDatabase from maasserver.websockets import handlers from maasserver.websockets.listener import PostgresListener from maasserver.websockets.websockets import STATUSES from provisioningserver.utils.twisted import ( deferred, synchronous, ) from twisted.internet.defer import inlineCallbacks from twisted.internet.protocol import ( Factory, Protocol, ) from twisted.python import log from twisted.web.server import NOT_DONE_YET class MSG_TYPE: #: Request made from client. REQUEST = 0 #: Response from server. RESPONSE = 1 #: Notify message from server. NOTIFY = 2 class RESPONSE_TYPE: #: SUCCESS = 0 #: ERROR = 1 def get_cookie(cookies, cookie_name): """Return the sessionid value from `cookies`.""" if cookies is None: return None cookies = SimpleCookie(cookies.encode('utf-8')) if cookie_name in cookies: return cookies[cookie_name].value else: return None class WebSocketProtocol(Protocol): """The web-socket protocol that supports the web UI. :ivar factory: Set by the factory that spawned this protocol. """ def __init__(self): self.messages = deque() self.user = None self.cache = {} def connectionMade(self): """Connection has been made to client.""" # Using the provided cookies on the connection request, authenticate # the client. If this fails or if the CSRF token can't be found, it # will call loseConnection. A websocket connection is only allowed # from an authenticated user. cookies = self.transport.cookies d = self.authenticate( get_cookie(cookies, 'sessionid'), get_cookie(cookies, 'csrftoken'), ) # Only add the client to the list of known clients if/when the # authentication succeeds. def authenticated(user): if user is None: # This user could not be authenticated. No further interaction # should take place. The connection is already being dropped. pass else: # This user is a keeper. Record it and process any message # that have already been received. self.user = user self.processMessages() self.factory.clients.append(self) d.addCallback(authenticated) def connectionLost(self, reason): """Connection to the client has been lost.""" # If the connection is lost before the authentication happens, the # 'client' will not have been added to the list. if self in self.factory.clients: self.factory.clients.remove(self) def loseConnection(self, status, reason): """Close connection with status and reason.""" msgFormat = "Closing connection: %(status)r (%(reason)r)" log.msg(format=msgFormat, status=status, reason=reason) self.transport._receiver._transport.loseConnection( status, reason.encode("utf-8")) def getMessageField(self, message, field): """Get `field` value from `message`. Closes connection with `PROTOCOL_ERROR` if `field` doesn't exist in `message`. """ if field not in message: self.loseConnection( STATUSES.PROTOCOL_ERROR, "Missing %s field in the received message." % field) return None return message[field] @synchronous @transactional def getUserFromSessionId(self, session_id): """Return the user from `session_id`.""" session_engine = self.factory.getSessionEngine() session_wrapper = session_engine.SessionStore(session_id) user_id = session_wrapper.get(SESSION_KEY) backend = session_wrapper.get(BACKEND_SESSION_KEY) if backend is None: return None auth_backend = load_backend(backend) if user_id is not None and auth_backend is not None: user = auth_backend.get_user(user_id) # Get the user again prefetching the SSHKey for the user. This is # done so a query is not made for each action that is possible on # a node in the node listing. return User.objects.filter( id=user.id).prefetch_related('sshkey_set').first() else: return None @deferred def authenticate(self, session_id, csrftoken): """Authenticate the connection. - Check that the CSRF token is valid. - Authenticate the user using the session id. This returns the authenticated user or ``None``. The latter means that the connection is being dropped, and that processing should cease. """ # Check the CSRF token. tokens = parse_qs( urlparse(self.transport.uri).query).get('csrftoken') if tokens is None or csrftoken not in tokens: self.loseConnection( STATUSES.PROTOCOL_ERROR, "Invalid CSRF token.") return None # Authenticate user. def got_user(user): if user is None: self.loseConnection( STATUSES.PROTOCOL_ERROR, "Failed to authenticate user.") return None else: return user def got_user_error(failure): self.loseConnection( STATUSES.PROTOCOL_ERROR, "Error authenticating user: %s" % failure.getErrorMessage()) return None d = deferToDatabase(self.getUserFromSessionId, session_id) d.addCallbacks(got_user, got_user_error) return d def dataReceived(self, data): """Received message from client and queue up the message.""" try: message = json.loads(data) except ValueError: # Only accept JSON data over the protocol. Close the connect # with invalid data. self.loseConnection( STATUSES.PROTOCOL_ERROR, "Invalid data expecting JSON object.") return "" self.messages.append(message) self.processMessages() return NOT_DONE_YET def processMessages(self): """Process all the queued messages.""" if self.user is None: # User is not authenticated yet, don't process messages. Once the # user is authenticated this method will be called to process the # queued messages. return [] # Process all the messages in the queue. handledMessages = [] while len(self.messages) > 0: message = self.messages.popleft() handledMessages.append(message) msg_type = self.getMessageField(message, "type") if msg_type is None: return handledMessages if msg_type != MSG_TYPE.REQUEST: # Only support request messages from the client. self.loseConnection( STATUSES.PROTOCOL_ERROR, "Invalid message type.") return handledMessages if self.handleRequest(message) is None: # Handling of request has failed, stop processing the messages # in the queue because the connection will be lost. return handledMessages return handledMessages def handleRequest(self, message): """Handle the request message.""" # Get the required request_id. request_id = self.getMessageField(message, "request_id") if request_id is None: return None # Decode the method to be called. msg_method = self.getMessageField(message, "method") if msg_method is None: return None try: handler_name, method = msg_method.split(".", 1) except ValueError: # Invalid method. Method format is "handler.method". self.loseConnection( STATUSES.PROTOCOL_ERROR, "Invalid method formatting.") return None # Create the handler for the call. handler_class = self.factory.getHandler(handler_name) if handler_class is None: self.loseConnection( STATUSES.PROTOCOL_ERROR, "Handler %s does not exist." % handler_name) return None handler = self.buildHandler(handler_class) d = handler.execute(method, message.get("params", {})) d.addCallbacks( partial(self.sendResult, request_id), partial(self.sendError, request_id, handler, method)) return d def sendResult(self, request_id, result): """Send final result to client.""" result_msg = { "type": MSG_TYPE.RESPONSE, "request_id": request_id, "rtype": RESPONSE_TYPE.SUCCESS, "result": result, } self.transport.write(json.dumps(result_msg).encode("utf-8")) return result def sendError(self, request_id, handler, method, failure): """Log and send error to client.""" if isinstance(failure.value, ValidationError): # When the error is a validation issue, send the error as a JSON # object. The client will use this to JSON to render the error # messages for the correct fields. error = json.dumps(failure.value.error_dict) else: error = failure.getErrorMessage() why = "Error on request (%s) %s.%s: %s" % ( request_id, handler._meta.handler_name, method, error) log.err(failure, _why=why) error_msg = { "type": MSG_TYPE.RESPONSE, "request_id": request_id, "rtype": RESPONSE_TYPE.ERROR, "error": error, } self.transport.write(json.dumps(error_msg).encode("utf-8")) return None def sendNotify(self, name, action, data): """Send the notify message with data.""" notify_msg = { "type": MSG_TYPE.NOTIFY, "name": name, "action": action, "data": data, } self.transport.write(json.dumps(notify_msg).encode("utf-8")) def buildHandler(self, handler_class): """Return an initialised instance of `handler_class`.""" handler_name = handler_class._meta.handler_name handler_cache = self.cache.setdefault(handler_name, {}) return handler_class(self.user, handler_cache) class WebSocketFactory(Factory): """Factory for WebSocketProtocol.""" protocol = WebSocketProtocol def __init__(self): self.handlers = {} self.clients = [] self.listener = PostgresListener() self.cacheHandlers() self.registerNotifiers() def startFactory(self): """Start the thread pool and the listener.""" self.registerRPCEvents() return self.listener.start() def stopFactory(self): """Stop the thread pool and the listener.""" stopped = self.listener.stop() self.unregisterRPCEvents() return stopped def getSessionEngine(self): """Returns the session engine being used by Django. Used by the protocol to validate the sessionid. """ return import_module(settings.SESSION_ENGINE) def cacheHandlers(self): """Cache all the websocket handlers.""" for name in dir(handlers): # Ignore internals if name.startswith("_"): continue # Only care about class that have _meta attribute, as that # means its a handler. cls = getattr(handlers, name) if not hasattr(cls, '_meta'): continue meta = cls._meta # Skip over abstract handlers as they only provide helpers for # children classes and should not be exposed over the channel. if meta.abstract: continue if (meta.handler_name is not None and meta.handler_name not in self.handlers): self.handlers[meta.handler_name] = cls def getHandler(self, name): """Return handler by name from the handler cache.""" return self.handlers.get(name) def registerNotifiers(self): """Registers all of the postgres channels in the handlers.""" for handler in self.handlers.values(): for channel in handler._meta.listen_channels: self.listener.register( channel, partial(self.onNotify, handler, channel)) @inlineCallbacks def onNotify(self, handler_class, channel, action, obj_id): for client in self.clients: handler = client.buildHandler(handler_class) data = yield deferToDatabase( self.processNotify, handler, channel, action, obj_id) if data is not None: (name, client_action, data) = data client.sendNotify(name, client_action, data) @transactional def processNotify(self, handler, channel, action, obj_id): return handler.on_listen(channel, action, obj_id) def registerRPCEvents(self): """Register for connected and disconnected events from the RPC service.""" rpc_service = services.getServiceNamed("rpc") rpc_service.events.connected.registerHandler( self.updateCluster) rpc_service.events.disconnected.registerHandler( self.updateCluster) def unregisterRPCEvents(self): """Unregister from connected and disconnected events from the RPC service.""" rpc_service = services.getServiceNamed("rpc") rpc_service.events.connected.unregisterHandler( self.updateCluster) rpc_service.events.disconnected.unregisterHandler( self.updateCluster) def updateCluster(self, ident): """Called when a cluster connects or disconnects from this region over the RPC connection. This is hard-coded to call the `ClusterHandler` as at the moment it is the only handler that needs this event. """ # The `ClusterHandler` expects the `on_listen` call to use the `id` # of the `Cluster` object, not the uuid. The `uuid` for the cluster # is converted into its `id`, and send to the onNotify call for the # `ClusterHandler`. d = deferToDatabase(self.getCluster, ident) d.addCallback(self.sendOnNotifyToCluster) d.addErrback( log.err, "Failed to send 'update' notification for cluster(%s) when " "RPC event fired." % ident) return d @synchronous @transactional def getCluster(self, cluster_uuid): """Return `NodeGroup` with `cluster_uuid`.""" try: return NodeGroup.objects.get(uuid=cluster_uuid) except NodeGroup.DoesNotExist: return None def sendOnNotifyToCluster(self, cluster): """Send onNotify to the `ClusterHandler` for `cluster`.""" cluster_handler = self.getHandler("cluster") if cluster_handler is None or cluster is None: return else: return self.onNotify( cluster_handler, "cluster", "update", cluster.id) maas-1.9.5+bzr4599.orig/src/maasserver/websockets/tests/0000755000000000000000000000000013056115004021125 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/websockets/websockets.py0000644000000000000000000005372513056115004022522 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). # # Copyright (c) Twisted Matrix Laboratories. # # http://twistedmatrix.com/trac/ticket/4173 """ The WebSockets protocol (RFC 6455), provided as a resource which wraps a factory. """ __all__ = ["WebSocketsResource", "IWebSocketsFrameReceiver", "lookupProtocolForFactory", "WebSocketsProtocol", "WebSocketsProtocolWrapper", "CONTROLS", "STATUSES"] from hashlib import sha1 from struct import ( pack, unpack, ) from twisted.internet.protocol import Protocol from twisted.protocols.tls import TLSMemoryBIOProtocol from twisted.python import log from twisted.python.constants import ( ValueConstant, Values, ) from twisted.web.resource import IResource from twisted.web.server import NOT_DONE_YET from zope.interface import ( directlyProvides, implementer, Interface, providedBy, ) class _WSException(Exception): """ Internal exception for control flow inside the WebSockets frame parser. """ class CONTROLS(Values): """ Control frame specifiers. @since: 13.2 """ CONTINUE = ValueConstant(0) TEXT = ValueConstant(1) BINARY = ValueConstant(2) CLOSE = ValueConstant(8) PING = ValueConstant(9) PONG = ValueConstant(10) class STATUSES(Values): """ Closing status codes. @since: 13.2 """ NORMAL = ValueConstant(1000) GOING_AWAY = ValueConstant(1001) PROTOCOL_ERROR = ValueConstant(1002) UNSUPPORTED_DATA = ValueConstant(1003) NONE = ValueConstant(1005) ABNORMAL_CLOSE = ValueConstant(1006) INVALID_PAYLOAD = ValueConstant(1007) POLICY_VIOLATION = ValueConstant(1008) MESSAGE_TOO_BIG = ValueConstant(1009) MISSING_EXTENSIONS = ValueConstant(1010) INTERNAL_ERROR = ValueConstant(1011) TLS_HANDSHAKE_FAILED = ValueConstant(1056) # The GUID for WebSockets, from RFC 6455. _WS_GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" def _makeAccept(key): """ Create an B{accept} response for a given key. @type key: C{str} @param key: The key to respond to. @rtype: C{str} @return: An encoded response. """ return sha1("%s%s" % (key, _WS_GUID)).digest().encode("base64").strip() def _mask(buf, key): """ Mask or unmask a buffer of bytes with a masking key. @type buf: C{str} @param buf: A buffer of bytes. @type key: C{str} @param key: The masking key. Must be exactly four bytes. @rtype: C{str} @return: A masked buffer of bytes. """ key = [ord(i) for i in key] buf = list(buf) for i, char in enumerate(buf): buf[i] = chr(ord(char) ^ key[i % 4]) return "".join(buf) def _makeFrame(buf, opcode, fin, mask=None): """ Make a frame. This function always creates unmasked frames, and attempts to use the smallest possible lengths. @type buf: C{str} @param buf: A buffer of bytes. @type opcode: C{CONTROLS} @param opcode: Which type of frame to create. @type fin: C{bool} @param fin: Whether or not we're creating a final frame. @type mask: C{int} or C{NoneType} @param mask: If specified, the masking key to apply on the created frame. @rtype: C{str} @return: A packed frame. """ bufferLength = len(buf) if mask is not None: lengthMask = 0x80 else: lengthMask = 0 if bufferLength > 0xffff: length = "%s%s" % (chr(lengthMask | 0x7f), pack(">Q", bufferLength)) elif bufferLength > 0x7d: length = "%s%s" % (chr(lengthMask | 0x7e), pack(">H", bufferLength)) else: length = chr(lengthMask | bufferLength) if fin: header = 0x80 else: header = 0x01 header = chr(header | opcode.value) if mask is not None: buf = "%s%s" % (mask, _mask(buf, mask)) frame = "%s%s%s" % (header, length, buf) return frame def _parseFrames(frameBuffer, needMask=True): """ Parse frames in a highly compliant manner. It modifies C{frameBuffer} removing the parsed content from it. @param frameBuffer: A buffer of bytes. @type frameBuffer: C{list} @param needMask: If C{True}, refuse any frame which is not masked. @type needMask: C{bool} """ start = 0 payload = "".join(frameBuffer) while True: # If there's not at least two bytes in the buffer, bail. if len(payload) - start < 2: break # Grab the header. This single byte holds some flags and an opcode header = ord(payload[start]) if header & 0x70: # At least one of the reserved flags is set. Pork chop sandwiches! raise _WSException("Reserved flag in frame (%d)" % (header,)) fin = header & 0x80 # Get the opcode, and translate it to a local enum which we actually # care about. opcode = header & 0xf try: opcode = CONTROLS.lookupByValue(opcode) except ValueError: raise _WSException("Unknown opcode %d in frame" % opcode) # Get the payload length and determine whether we need to look for an # extra length. length = ord(payload[start + 1]) masked = length & 0x80 if not masked and needMask: # The client must mask the data sent raise _WSException("Received data not masked") length &= 0x7f # The offset we'll be using to walk through the frame. We use this # because the offset is variable depending on the length and mask. offset = 2 # Extra length fields. if length == 0x7e: if len(payload) - start < 4: break length = payload[start + 2:start + 4] length = unpack(">H", length)[0] offset += 2 elif length == 0x7f: if len(payload) - start < 10: break # Protocol bug: The top bit of this long long *must* be cleared; # that is, it is expected to be interpreted as signed. length = payload[start + 2:start + 10] length = unpack(">Q", length)[0] offset += 8 if masked: if len(payload) - (start + offset) < 4: # This is not strictly necessary, but it's more explicit so # that we don't create an invalid key. break key = payload[start + offset:start + offset + 4] offset += 4 if len(payload) - (start + offset) < length: break data = payload[start + offset:start + offset + length] if masked: data = _mask(data, key) if opcode == CONTROLS.CLOSE: if len(data) >= 2: # Gotta unpack the opcode and return usable data here. code = STATUSES.lookupByValue(unpack(">H", data[:2])[0]) data = code, data[2:] else: # No reason given; use generic data. data = STATUSES.NONE, "" yield opcode, data, bool(fin) start += offset + length if len(payload) > start: frameBuffer[:] = [payload[start:]] else: frameBuffer[:] = [] class IWebSocketsFrameReceiver(Interface): """ An interface for receiving WebSockets frames. @since: 13.2 """ def makeConnection(transport): """ Notification about the connection. @param transport: A L{WebSocketsTransport} instance wrapping an underlying transport. @type transport: L{WebSocketsTransport}. """ def frameReceived(opcode, data, fin): """ Callback when a frame is received. @type opcode: C{CONTROLS} @param opcode: The type of frame received. @type data: C{bytes} @param data: The content of the frame received. @type fin: C{bool} @param fin: Whether or not the frame is final. """ class WebSocketsTransport(object): """ A frame transport for WebSockets. @ivar _transport: A reference to the real transport. @since: 13.2 """ _disconnecting = False def __init__(self, transport): self._transport = transport def sendFrame(self, opcode, data, fin): """ Build a frame packet and send it over the wire. @type opcode: C{CONTROLS} @param opcode: The type of frame to send. @type data: C{bytes} @param data: The content of the frame to send. @type fin: C{bool} @param fin: Whether or not we're sending a final frame. """ packet = _makeFrame(data, opcode, fin) self._transport.write(packet) def loseConnection(self, code=STATUSES.NORMAL, reason=""): """ Close the connection. This includes telling the other side we're closing the connection. If the other side didn't signal that the connection is being closed, then we might not see their last message, but since their last message should, according to the spec, be a simple acknowledgement, it shouldn't be a problem. @param code: The closing frame status code. @type code: L{STATUSES} @param reason: Optionally, a utf-8 encoded text explaining why the connection was closed. @param reason: C{bytes} """ # Send a closing frame. It's only polite. (And might keep the browser # from hanging.) if not self._disconnecting: data = "%s%s" % (pack(">H", code.value), reason) frame = _makeFrame(data, CONTROLS.CLOSE, True) self._transport.write(frame) self._disconnecting = True self._transport.loseConnection() class WebSocketsProtocol(Protocol): """ A protocol parsing WebSockets frames and interacting with a L{IWebSocketsFrameReceiver} instance. @ivar _receiver: The L{IWebSocketsFrameReceiver} provider handling the frames. @type _receiver: L{IWebSocketsFrameReceiver} provider @ivar _buffer: The pending list of frames not processed yet. @type _buffer: C{list} @since: 13.2 """ _buffer = None def __init__(self, receiver): self._receiver = receiver def connectionMade(self): """ Log the new connection and initialize the buffer list. """ peer = self.transport.getPeer() log.msg(format="Opening connection with %(peer)s", peer=peer) self._buffer = [] self._receiver.makeConnection(WebSocketsTransport(self.transport)) def _parseFrames(self): """ Find frames in incoming data and pass them to the underlying protocol. """ for opcode, data, fin in _parseFrames(self._buffer): self._receiver.frameReceived(opcode, data, fin) if opcode == CONTROLS.CLOSE: # The other side wants us to close. code, reason = data msgFormat = "Closing connection: %(code)r" if reason: msgFormat += " (%(reason)r)" log.msg(format=msgFormat, reason=reason, code=code) # Close the connection. self.transport.loseConnection() return elif opcode == CONTROLS.PING: # 5.5.2 PINGs must be responded to with PONGs. # 5.5.3 PONGs must contain the data that was sent with the # provoking PING. self.transport.write(_makeFrame(data, CONTROLS.PONG, True)) def dataReceived(self, data): """ Append the data to the buffer list and parse the whole. @type data: C{bytes} @param data: The buffer received. """ self._buffer.append(data) try: self._parseFrames() except _WSException: # Couldn't parse all the frames, something went wrong, let's bail. log.err() self.transport.loseConnection() @implementer(IWebSocketsFrameReceiver) class _WebSocketsProtocolWrapperReceiver(): """ A L{IWebSocketsFrameReceiver} which accumulates data frames and forwards the payload to its C{wrappedProtocol}. @ivar _wrappedProtocol: The connected protocol @type _wrappedProtocol: C{IProtocol} provider. @ivar _transport: A reference to the L{WebSocketsTransport} @type _transport: L{WebSocketsTransport} @ivar _messages: The pending list of payloads received. @types _messages: C{list} """ def __init__(self, wrappedProtocol): self._wrappedProtocol = wrappedProtocol def makeConnection(self, transport): """ Keep a reference to the given C{transport} and instantiate the list of messages. """ self._transport = transport self._messages = [] def frameReceived(self, opcode, data, fin): """ For each frame received, accumulate the data (ignoring the opcode), and forwarding the messages if C{fin} is set. @type opcode: C{CONTROLS} @param opcode: The type of frame received. @type data: C{bytes} @param data: The content of the frame received. @type fin: C{bool} @param fin: Whether or not the frame is final. """ if opcode not in (CONTROLS.BINARY, CONTROLS.TEXT, CONTROLS.CONTINUE): return self._messages.append(data) if fin: content = "".join(self._messages) self._messages[:] = [] self._wrappedProtocol.dataReceived(content) class WebSocketsProtocolWrapper(WebSocketsProtocol): """ A L{WebSocketsProtocol} which wraps a regular C{IProtocol} provider, ignoring the frame mechanism. @ivar _wrappedProtocol: The connected protocol @type _wrappedProtocol: C{IProtocol} provider. @ivar defaultOpcode: The opcode used when C{transport.write} is called. Defaults to L{CONTROLS.TEXT}, can be L{CONTROLS.BINARY}. @type defaultOpcode: L{CONTROLS} @since: 13.2 """ def __init__(self, wrappedProtocol, defaultOpcode=CONTROLS.TEXT): self.wrappedProtocol = wrappedProtocol self.defaultOpcode = defaultOpcode WebSocketsProtocol.__init__( self, _WebSocketsProtocolWrapperReceiver(wrappedProtocol)) def makeConnection(self, transport): """ Upon connection, provides the transport interface, and forwards ourself as the transport to C{self.wrappedProtocol}. @type transport: L{twisted.internet.interfaces.ITransport} provider. @param transport: The transport to use for the protocol. """ directlyProvides(self, providedBy(transport)) WebSocketsProtocol.makeConnection(self, transport) self.wrappedProtocol.makeConnection(self) def write(self, data): """ Write to the websocket protocol, transforming C{data} in a frame. @type data: C{bytes} @param data: Data buffer used for the frame content. """ self._receiver._transport.sendFrame(self.defaultOpcode, data, True) def writeSequence(self, data): """ Send all chunks from C{data} using C{write}. @type data: C{list} of C{bytes} @param data: Data buffers used for the frames content. """ for chunk in data: self.write(chunk) def loseConnection(self): """ Try to lose the connection gracefully when closing by sending a close frame. """ self._receiver._transport.loseConnection() def __getattr__(self, name): """ Forward all non-local attributes and methods to C{self.transport}. """ return getattr(self.transport, name) def connectionLost(self, reason): """ Forward C{connectionLost} to C{self.wrappedProtocol}. @type reason: L{twisted.python.failure.Failure} @param reason: A failure instance indicating the reason why the connection was lost. """ self.wrappedProtocol.connectionLost(reason) @implementer(IResource) class WebSocketsResource(object): """ A resource for serving a protocol through WebSockets. This class wraps a factory and connects it to WebSockets clients. Each connecting client will be connected to a new protocol of the factory. Due to unresolved questions of logistics, this resource cannot have children. @param lookupProtocol: A callable returning a tuple of (protocol instance, matched protocol name or C{None}) when called with a valid connection. It's called with a list of asked protocols from the client and the connecting client request. If the returned protocol name is specified, it is used as I{Sec-WebSocket-Protocol} value. If the protocol is a L{WebSocketsProtocol} instance, it will be connected directly, otherwise it will be wrapped by L{WebSocketsProtocolWrapper}. For simple use cases using a factory, you can use L{lookupProtocolForFactory}. @type lookupProtocol: C{callable}. @since: 13.2 """ isLeaf = True def __init__(self, lookupProtocol): self._lookupProtocol = lookupProtocol def getChildWithDefault(self, name, request): """ Reject attempts to retrieve a child resource. All path segments beyond the one which refers to this resource are handled by the WebSocket connection. @type name: C{bytes} @param name: A single path component from a requested URL. @type request: L{twisted.web.iweb.IRequest} provider @param request: The request received. """ raise RuntimeError( "Cannot get IResource children from WebSocketsResource") def putChild(self, path, child): """ Reject attempts to add a child resource to this resource. The WebSocket connection handles all path segments beneath this resource, so L{IResource} children can never be found. @type path: C{bytes} @param path: A single path component. @type child: L{IResource} provider @param child: A resource to put underneat this one. """ raise RuntimeError( "Cannot put IResource children under WebSocketsResource") def render(self, request): """ Render a request. We're not actually rendering a request. We are secretly going to handle a WebSockets connection instead. @param request: The connecting client request. @type request: L{Request} @return: a string if the request fails, otherwise C{NOT_DONE_YET}. """ request.defaultContentType = None # If we fail at all, we'll fail with 400 and no response. failed = False if request.method != "GET": # 4.2.1.1 GET is required. failed = True upgrade = request.getHeader("Upgrade") if upgrade is None or "websocket" not in upgrade.lower(): # 4.2.1.3 Upgrade: WebSocket is required. failed = True connection = request.getHeader("Connection") if connection is None or "upgrade" not in connection.lower(): # 4.2.1.4 Connection: Upgrade is required. failed = True key = request.getHeader("Sec-WebSocket-Key") if key is None: # 4.2.1.5 The challenge key is required. failed = True version = request.getHeader("Sec-WebSocket-Version") if version != "13": # 4.2.1.6 Only version 13 works. failed = True # 4.4 Forward-compatible version checking. request.setHeader("Sec-WebSocket-Version", "13") if failed: request.setResponseCode(400) return "" askedProtocols = request.requestHeaders.getRawHeaders( "Sec-WebSocket-Protocol") protocol, protocolName = self._lookupProtocol(askedProtocols, request) # If a protocol is not created, we deliver an error status. if not protocol: request.setResponseCode(502) return "" # We are going to finish this handshake. We will return a valid status # code. # 4.2.2.5.1 101 Switching Protocols request.setResponseCode(101) # 4.2.2.5.2 Upgrade: websocket request.setHeader("Upgrade", "WebSocket") # 4.2.2.5.3 Connection: Upgrade request.setHeader("Connection", "Upgrade") # 4.2.2.5.4 Response to the key challenge request.setHeader("Sec-WebSocket-Accept", _makeAccept(key)) # 4.2.2.5.5 Optional codec declaration if protocolName: request.setHeader("Sec-WebSocket-Protocol", protocolName) # Provoke request into flushing headers and finishing the handshake. request.write("") # And now take matters into our own hands. We shall manage the # transport's lifecycle. transport, request.transport = request.transport, None # Set the cookies on the transport. So the protocol can view the # cookies. transport.cookies = request.getHeader("cookie") # Set the uri on the transport. This allows the protocol to view the # uri. transport.uri = request.uri if not isinstance(protocol, WebSocketsProtocol): protocol = WebSocketsProtocolWrapper(protocol) # Connect the transport to our factory, and make things go. We need to # do some stupid stuff here; see #3204, which could fix it. if isinstance(transport.protocol, TLSMemoryBIOProtocol): transport.protocol.wrappedProtocol = protocol else: transport.protocol = protocol protocol.makeConnection(transport) return NOT_DONE_YET def lookupProtocolForFactory(factory): """ Return a suitable C{lookupProtocol} argument for L{WebSocketsResource} which ignores the protocol names and just return a protocol instance built by C{factory}. @since: 13.2 """ def lookupProtocol(protocolNames, request): protocol = factory.buildProtocol(request.transport.getPeer()) return protocol, None return lookupProtocol maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/__init__.py0000644000000000000000000000320013056115004023667 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Handlers for the WebSocket connections.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "NodeHandler", "DeviceHandler", "GeneralHandler" "ClusterHandler", "UserHandler", "ZoneHandler", "FabricHandler", "SpaceHandler", "SubnetHandler", "VLANHandler", ] from maasserver.utils import ignore_unused from maasserver.websockets.handlers.cluster import ClusterHandler from maasserver.websockets.handlers.device import DeviceHandler from maasserver.websockets.handlers.event import EventHandler from maasserver.websockets.handlers.fabric import FabricHandler from maasserver.websockets.handlers.general import GeneralHandler from maasserver.websockets.handlers.node import NodeHandler from maasserver.websockets.handlers.space import SpaceHandler from maasserver.websockets.handlers.subnet import SubnetHandler from maasserver.websockets.handlers.tag import TagHandler from maasserver.websockets.handlers.user import UserHandler from maasserver.websockets.handlers.vlan import VLANHandler from maasserver.websockets.handlers.zone import ZoneHandler ignore_unused(ClusterHandler) ignore_unused(DeviceHandler) ignore_unused(EventHandler) ignore_unused(FabricHandler) ignore_unused(GeneralHandler) ignore_unused(NodeHandler) ignore_unused(SpaceHandler) ignore_unused(SubnetHandler) ignore_unused(TagHandler) ignore_unused(UserHandler) ignore_unused(VLANHandler) ignore_unused(ZoneHandler) maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/cluster.py0000644000000000000000000000621013056115004023615 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """The cluster handler for the WebSocket connection.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "ClusterHandler", ] from maasserver.clusterrpc.power_parameters import ( get_all_power_types_from_clusters, ) from maasserver.models.nodegroup import NodeGroup from maasserver.websockets.handlers.timestampedmodel import ( TimestampedModelHandler, ) def dehydrate_ip_address(ip_address): """Dehydrate `IPAddress` to string.""" if ip_address is None: return None else: return "%s" % ip_address class ClusterHandler(TimestampedModelHandler): class Meta: queryset = ( NodeGroup.objects.all() .prefetch_related('nodegroupinterface_set') .prefetch_related('nodegroupinterface_set__subnet')) pk = 'id' allowed_methods = ['list', 'get', 'set_active'] exclude = [ "api_token", "api_key", "dhcp_key", "maas_url", ] listen_channels = [ "nodegroup", ] def dehydrate(self, obj, data, for_list=False): """Add extra fields to `data`.""" data["connected"] = obj.is_connected() data["state"] = obj.get_state() data["power_types"] = self.dehydrate_power_types(obj) data["interfaces"] = self.dehydrate_interfaces(obj) return data def dehydrate_power_types(self, obj): """Return all the power types.""" return get_all_power_types_from_clusters(nodegroups=[obj]) def dehydrate_interface(self, interface): """Dehydrate a `NodeGroupInterface`.""" return { "id": interface.id, "ip": "%s" % interface.ip, "name": interface.name, "management": interface.management, "interface": interface.interface, "subnet_mask": dehydrate_ip_address(interface.subnet_mask), "broadcast_ip": dehydrate_ip_address(interface.broadcast_ip), "router_ip": dehydrate_ip_address(interface.router_ip), "dynamic_range": { "low": dehydrate_ip_address(interface.ip_range_low), "high": dehydrate_ip_address(interface.ip_range_high), }, "static_range": { "low": dehydrate_ip_address( interface.static_ip_range_low), "high": dehydrate_ip_address( interface.static_ip_range_high), }, "foreign_dhcp_ip": dehydrate_ip_address( interface.foreign_dhcp_ip), "network": ( "%s" % interface.network if interface.network is not None else None), } def dehydrate_interfaces(self, obj): """Dehydrate all `NodeGroupInterface` for obj.""" return [ self.dehydrate_interface(interface) for interface in obj.nodegroupinterface_set.all() ] maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/device.py0000644000000000000000000003061213056115004023376 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """The device handler for the WebSocket connection.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "DeviceHandler", ] from maasserver.clusterrpc import dhcp from maasserver.enum import ( IPADDRESS_TYPE, NODE_PERMISSION, ) from maasserver.exceptions import NodeActionError from maasserver.forms import ( DeviceForm, DeviceWithMACsForm, ) from maasserver.models.node import Device from maasserver.models.nodegroupinterface import NodeGroupInterface from maasserver.models.staticipaddress import StaticIPAddress from maasserver.models.subnet import Subnet from maasserver.node_action import compile_node_actions from maasserver.websockets.base import ( HandlerDoesNotExistError, HandlerError, ) from maasserver.websockets.handlers.timestampedmodel import ( TimestampedModelHandler, ) from provisioningserver.logger import get_maas_logger maaslog = get_maas_logger("websockets.device") class DEVICE_IP_ASSIGNMENT: """The vocabulary of a `Device`'s possible IP assignment type. This value is calculated by looking at the overall model for a `Device`. This is not set directly on the model.""" #: Device is outside of MAAS control. EXTERNAL = "external" #: Device receives ip address from `NodeGroupInterface` dynamic range. DYNAMIC = "dynamic" #: Device has ip address assigned from `NodeGroupInterface` static range. STATIC = "static" def update_host_maps(static_mappings, nodegroups): """Helper to call update_host_maps will static mappings for all `nodegroups`.""" static_mappings = { nodegroup: dict(static_mappings) for nodegroup in nodegroups } return list(dhcp.update_host_maps(static_mappings)) def get_Interface_from_list(interfaces, mac): """Return the `Interface` object based on the mac value.""" for obj in interfaces: if obj.mac_address == mac: return obj return None def log_static_allocations(device, external_static_ips, assigned_sticky_ips): """Log the allocation of the static ip address.""" all_ips = [ static_ip.ip for static_ip, _ in external_static_ips ] all_ips.extend([ static_ip.ip for static_ip, _ in assigned_sticky_ips ]) if len(all_ips) > 0: maaslog.info( "%s: Sticky IP address(es) allocated: %s", device.hostname, ', '.join(all_ips)) class DeviceHandler(TimestampedModelHandler): class Meta: queryset = ( Device.devices.filter(installable=False, parent=None) .select_related('nodegroup', 'owner') .prefetch_related('interface_set__ip_addresses__subnet') .prefetch_related('nodegroup__nodegroupinterface_set') .prefetch_related('zone') .prefetch_related('tags')) pk = 'system_id' pk_type = unicode allowed_methods = ['list', 'get', 'set_active', 'create', 'action'] exclude = [ "id", "installable", "boot_interface", "boot_cluster_ip", "boot_disk", "token", "netboot", "agent_name", "cpu_count", "memory", "power_state", "routers", "architecture", "boot_type", "bios_boot_method", "status", "power_parameters", "power_state_updated", "disable_ipv4", "osystem", "power_type", "error_description", "error", "license_key", "distro_series", "min_hwe_kernel", "hwe_kernel", "gateway_link_ipv4", "gateway_link_ipv6", "enable_ssh", "skip_networking", "skip_storage", ] list_fields = [ "system_id", "hostname", "owner", "zone", "parent", "pxe_mac", ] listen_channels = [ "device", ] def get_queryset(self): """Return `QuerySet` for devices only vewable by `user`.""" nodes = super(DeviceHandler, self).get_queryset() return Device.devices.get_nodes( self.user, NODE_PERMISSION.VIEW, from_nodes=nodes) def dehydrate_owner(self, user): """Return owners username.""" if user is None: return "" else: return user.username def dehydrate_zone(self, zone): """Return zone name.""" return { "id": zone.id, "name": zone.name, } def dehydrate_nodegroup(self, nodegroup): """Return the nodegroup name.""" if nodegroup is None: return None else: return { "id": nodegroup.id, "uuid": nodegroup.uuid, "name": nodegroup.name, "cluster_name": nodegroup.cluster_name, } def dehydrate(self, obj, data, for_list=False): """Add extra fields to `data`.""" data["fqdn"] = obj.fqdn data["actions"] = compile_node_actions(obj, self.user).keys() boot_interface = obj.get_boot_interface() data["primary_mac"] = ( "%s" % boot_interface.mac_address if boot_interface is not None else "") data["extra_macs"] = [ "%s" % interface.mac_address for interface in obj.interface_set.all() if interface != boot_interface ] data["ip_assignment"] = self.dehydrate_ip_assignment( obj, boot_interface) data["ip_address"] = self.dehydrate_ip_address( obj, boot_interface) data["tags"] = [ tag.name for tag in obj.tags.all() ] return data def _get_first_none_discovered_ip(self, ip_addresses): for ip in ip_addresses: if ip.alloc_type != IPADDRESS_TYPE.DISCOVERED: return ip def _get_first_discovered_ip_with_ip(self, ip_addresses): for ip in ip_addresses: if ip.alloc_type == IPADDRESS_TYPE.DISCOVERED and ip.ip: return ip def dehydrate_ip_assignment(self, obj, interface): """Return the calculated `DEVICE_IP_ASSIGNMENT` based on the model.""" if interface is None: return "" # We get the IP address from the all() so the cache is used. ip_addresses = list(interface.ip_addresses.all()) first_ip = self._get_first_none_discovered_ip(ip_addresses) if first_ip is not None: if first_ip.alloc_type == IPADDRESS_TYPE.DHCP: return DEVICE_IP_ASSIGNMENT.DYNAMIC elif first_ip.subnet is None: return DEVICE_IP_ASSIGNMENT.EXTERNAL else: return DEVICE_IP_ASSIGNMENT.STATIC return DEVICE_IP_ASSIGNMENT.DYNAMIC def dehydrate_ip_address(self, obj, interface): """Return the IP address for the device.""" if interface is None: return None # Get ip address from StaticIPAddress if available. ip_addresses = list(interface.ip_addresses.all()) first_ip = self._get_first_none_discovered_ip(ip_addresses) if first_ip is not None: if first_ip.alloc_type == IPADDRESS_TYPE.DHCP: discovered_ip = self._get_first_discovered_ip_with_ip( ip_addresses) if discovered_ip: return "%s" % discovered_ip.ip elif first_ip.ip: return "%s" % first_ip.ip # Currently has no assigned IP address. return None def get_object(self, params): """Get object by using the `pk` in `params`.""" obj = super(DeviceHandler, self).get_object(params) if self.user.is_superuser or obj.owner == self.user: return obj raise HandlerDoesNotExistError(params[self._meta.pk]) def get_form_class(self, action): """Return the form class used for `action`.""" if action == "create": return DeviceWithMACsForm elif action == "update": return DeviceForm else: raise HandlerError("Unknown action: %s" % action) def get_mac_addresses(self, data): """Convert the given `data` into a list of mac addresses. This is used by the create method and the hydrate method. The `primary_mac` will always be the first entry in the list. """ macs = data.get("extra_macs", []) if "primary_mac" in data: macs.insert(0, data["primary_mac"]) return macs def preprocess_form(self, action, params): """Process the `params` to before passing the data to the form.""" new_params = { "mac_addresses": self.get_mac_addresses(params), "hostname": params.get("hostname"), } # Cleanup any fields that have a None value. new_params = { key: value for key, value in new_params.viewitems() if value is not None } return super(DeviceHandler, self).preprocess_form(action, new_params) def create(self, params): """Create the object from params.""" # XXX blake_r 03-04-15 bug=1440102: This is very ugly and a repeat # of code in other places. Needs to be refactored. # Create the object with the form and then create all of the interfaces # based on the users choices. data = super(DeviceHandler, self).create(params) device_obj = Device.objects.get(system_id=data['system_id']) interfaces = list(device_obj.interface_set.all()) external_static_ips = [] assigned_sticky_ips = [] # Acquire all of the needed ip address based on the user selection. for nic in params["interfaces"]: interface = get_Interface_from_list(interfaces, nic["mac"]) ip_assignment = nic["ip_assignment"] if ip_assignment == DEVICE_IP_ASSIGNMENT.EXTERNAL: subnet = Subnet.objects.get_best_subnet_for_ip( nic["ip_address"]) sticky_ip = StaticIPAddress.objects.create( alloc_type=IPADDRESS_TYPE.USER_RESERVED, ip=nic["ip_address"], subnet=subnet, user=self.user) interface.ip_addresses.add(sticky_ip) external_static_ips.append( (sticky_ip, interface)) elif ip_assignment == DEVICE_IP_ASSIGNMENT.DYNAMIC: dhcp_ip = StaticIPAddress.objects.create( alloc_type=IPADDRESS_TYPE.DHCP, ip=None) interface.ip_addresses.add(dhcp_ip) elif ip_assignment == DEVICE_IP_ASSIGNMENT.STATIC: # Link the MAC address to the cluster interface. cluster_interface = NodeGroupInterface.objects.get( id=nic["interface"]) ip = StaticIPAddress.objects.create( alloc_type=IPADDRESS_TYPE.DISCOVERED, ip=None, subnet=cluster_interface.subnet) interface.ip_addresses.add(ip) # Convert an empty string to None. ip_address = nic.get("ip_address") if not ip_address: ip_address = None # Claim the static ip. sticky_ips = interface.claim_static_ips( requested_address=ip_address) assigned_sticky_ips.extend([ (static_ip, interface) for static_ip in sticky_ips ]) log_static_allocations( device_obj, external_static_ips, assigned_sticky_ips) return self.full_dehydrate(device_obj) def action(self, params): """Perform the action on the object.""" obj = self.get_object(params) action_name = params.get("action") actions = compile_node_actions(obj, self.user) action = actions.get(action_name) if action is None: raise NodeActionError( "%s action is not available for this device." % action_name) extra_params = params.get("extra", {}) return action.execute(**extra_params) maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/event.py0000644000000000000000000001070313056115004023257 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """The event handler for the WebSocket connection.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "EventHandler", ] import datetime from maasserver.models.event import Event from maasserver.models.eventtype import LOGGING_LEVELS from maasserver.models.node import Node from maasserver.websockets.base import ( HandlerDoesNotExistError, HandlerPKError, ) from maasserver.websockets.handlers.timestampedmodel import ( TimestampedModelHandler, ) def dehydrate_event_type_level(level): """Dehydrate the `EventType.level`.""" return LOGGING_LEVELS[level].lower() class EventHandler(TimestampedModelHandler): class Meta: queryset = Event.objects.all().select_related("type") pk = 'id' allowed_methods = ['list', 'clear'] exclude = ["node"] listen_channels = [ "event", ] def __init__(self, *args, **kwargs): super(EventHandler, self).__init__(*args, **kwargs) if "node_ids" not in self.cache: self.cache["node_ids"] = [] def dehydrate_type(self, event_type): """Dehydrate the `EventType` on this event.""" return { "level": dehydrate_event_type_level(event_type.level), "name": event_type.name, "description": event_type.description, } def dehydrate(self, obj, data, for_list=False): """Add extra fields to `data`.""" data['node_id'] = obj.node_id return data def get_node(self, params): """Get node object from params""" if "node_id" not in params: raise HandlerPKError("Missing node_id in params") node_id = params["node_id"] try: node = Node.objects.get(id=node_id) except Node.DoesNotExist: raise HandlerDoesNotExistError(node_id) return node def list(self, params): """List objects. :param system_id: `Node.system_id` for the events. :param offset: Offset into the queryset to return. :param limit: Maximum number of objects to return. """ node = self.get_node(params) self.cache['node_ids'].append(node.id) queryset = self.get_queryset() queryset = queryset.filter(node=node) queryset = queryset.order_by('-id') # List events that where created in the past maximum number of days. max_days = params.get("max_days", 30) created_after = datetime.datetime.now() - datetime.timedelta(max_days) queryset = queryset.filter(created__gte=created_after) if "start" in params: queryset = queryset.filter(id__lt=params["start"]) if "limit" in params: queryset = queryset[:params["limit"]] return [ self.full_dehydrate(obj, for_list=True) for obj in queryset ] def clear(self, params): """Clears the current node for events. Called by the client to inform the region it no longer cares about events for this node. """ node = self.get_node(params) if node.id in self.cache["node_ids"]: self.cache["node_ids"].remove(node.id) return None def on_listen(self, channel, action, pk): """Called by the protocol when a channel notification occurs.""" pk = self._meta.pk_type(pk) if action == "delete": # Possible to get a delete for an event that is currently, not # being viewed by the user because it belongs to a differnet node. # Since there is no way to get the deleted event, we just send the # primary key, it will only be removed if the client has an event # with that id. return (self._meta.handler_name, action, pk) obj = self.listen(channel, action, pk) if obj is None: return None if obj.node_id not in self.cache["node_ids"]: # Notification is not for a node that is being listed, # do nothing with the notification. return None # Client is listening for events for this node, send the new event. return ( self._meta.handler_name, action, self.full_dehydrate(obj, for_list=True), ) maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/fabric.py0000644000000000000000000000234413056115004023366 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """The fabric handler for the WebSocket connection.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "FabricHandler", ] from maasserver.models.fabric import Fabric from maasserver.websockets.handlers.timestampedmodel import ( TimestampedModelHandler, ) class FabricHandler(TimestampedModelHandler): class Meta: queryset = ( Fabric.objects.all().prefetch_related( "vlan_set__interface_set")) pk = 'id' allowed_methods = ['list', 'get', 'set_active'] listen_channels = [ "fabric", ] def dehydrate(self, obj, data, for_list=False): data["name"] = obj.get_name() data["vlan_ids"] = [ vlan.id for vlan in obj.vlan_set.all() ] data["nodes_count"] = len({ interface.node_id for vlan in obj.vlan_set.all() for interface in vlan.interface_set.all() if interface.node_id is not None }) return data maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/general.py0000644000000000000000000001116413056115004023555 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """The general handler for the WebSocket connection.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "GeneralHandler", ] from maasserver.enum import ( BOND_LACP_RATE_CHOICES, BOND_MODE_CHOICES, BOND_XMIT_HASH_POLICY_CHOICES, NODE_PERMISSION, ) from maasserver.models.bootresource import BootResource from maasserver.models.candidatename import gen_candidate_names from maasserver.models.config import Config from maasserver.models.node import Node from maasserver.node_action import ACTIONS_DICT from maasserver.utils.osystems import ( list_all_usable_hwe_kernels, list_all_usable_osystems, list_all_usable_releases, list_hwe_kernel_choices, list_osystem_choices, list_release_choices, ) from maasserver.utils.version import get_maas_version_ui from maasserver.websockets.base import Handler class GeneralHandler(Handler): """Provides general methods that can be called from the client.""" class Meta: allowed_methods = [ 'architectures', 'hwe_kernels', 'default_min_hwe_kernel', 'osinfo', 'node_actions', 'device_actions', 'random_hostname', 'bond_options', 'version', ] def architectures(self, params): """Return all supported architectures.""" return BootResource.objects.get_usable_architectures() def hwe_kernels(self, params): """Return all supported hwe_kernels.""" return list_hwe_kernel_choices( BootResource.objects.get_usable_hwe_kernels()) def default_min_hwe_kernel(self, params): """Return the default_min_hwe_kernel.""" return Config.objects.get_config('default_min_hwe_kernel') def osinfo(self, params): """Return all available operating systems and releases information.""" osystems = list_all_usable_osystems() releases = list_all_usable_releases(osystems) kernels = list_all_usable_hwe_kernels(releases) return { "osystems": list_osystem_choices(osystems, include_default=False), "releases": list_release_choices(releases, include_default=False), "kernels": kernels, "default_osystem": Config.objects.get_config("default_osystem"), "default_release": Config.objects.get_config( "default_distro_series"), } def dehydrate_actions(self, actions): """Dehydrate all the actions.""" return [ { "name": name, "title": action.display, "sentence": action.display_sentence, } for name, action in actions.items() ] def node_actions(self, params): """Return all possible node actions.""" if self.user.is_superuser: actions = ACTIONS_DICT else: # Standard users will not be able to use any admin actions. Hide # them as they will never be actionable on any node. actions = dict() for name, action in ACTIONS_DICT.items(): permission = action.permission if action.installable_permission is not None: permission = action.installable_permission if permission != NODE_PERMISSION.ADMIN: actions[name] = action return self.dehydrate_actions(actions) def device_actions(self, params): """Return all possible device actions.""" # Remove the actions that can only be performed on installable nodes. actions = { name: action for name, action in ACTIONS_DICT.items() if not action.installable_only } return self.dehydrate_actions(actions) def random_hostname(self, params): """Return a random hostname.""" for new_hostname in gen_candidate_names(): try: Node.objects.get(hostname=new_hostname) except Node.DoesNotExist: return new_hostname return "" def bond_options(self, params): """Return all the possible bond options.""" return { "modes": BOND_MODE_CHOICES, "lacp_rates": BOND_LACP_RATE_CHOICES, "xmit_hash_policies": BOND_XMIT_HASH_POLICY_CHOICES, } def version(self, params): """Return the MAAS version.""" return get_maas_version_ui() maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/node.py0000644000000000000000000013375213056115004023075 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """The node handler for the WebSocket connection.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "NodeHandler", ] import logging from operator import itemgetter from django.core.exceptions import ValidationError from lxml import etree from maasserver.enum import ( FILESYSTEM_FORMAT_TYPE_CHOICES, FILESYSTEM_FORMAT_TYPE_CHOICES_DICT, INTERFACE_LINK_TYPE, IPADDRESS_TYPE, NODE_PERMISSION, NODE_STATUS, ) from maasserver.exceptions import NodeActionError from maasserver.forms import ( AddPartitionForm, AdminNodeWithMACAddressesForm, CreateBcacheForm, CreateCacheSetForm, CreateLogicalVolumeForm, CreateRaidForm, CreateVolumeGroupForm, FormatBlockDeviceForm, FormatPartitionForm, MountBlockDeviceForm, MountPartitionForm, UpdatePhysicalBlockDeviceForm, UpdateVirtualBlockDeviceForm, ) from maasserver.forms_interface import ( BondInterfaceForm, InterfaceForm, PhysicalInterfaceForm, VLANInterfaceForm, ) from maasserver.forms_interface_link import InterfaceLinkForm from maasserver.models.blockdevice import BlockDevice from maasserver.models.cacheset import CacheSet from maasserver.models.config import Config from maasserver.models.event import Event from maasserver.models.filesystemgroup import VolumeGroup from maasserver.models.interface import Interface from maasserver.models.node import Node from maasserver.models.nodegroup import NodeGroup from maasserver.models.nodeprobeddetails import get_single_probed_details from maasserver.models.partition import Partition from maasserver.models.physicalblockdevice import PhysicalBlockDevice from maasserver.models.subnet import Subnet from maasserver.models.tag import Tag from maasserver.models.virtualblockdevice import VirtualBlockDevice from maasserver.node_action import compile_node_actions from maasserver.rpc import getClientFor from maasserver.third_party_drivers import get_third_party_driver from maasserver.utils.converters import ( human_readable_bytes, XMLToYAML, ) from maasserver.utils.orm import transactional from maasserver.utils.osystems import make_hwe_kernel_ui_text from maasserver.utils.threads import deferToDatabase from maasserver.websockets.base import ( HandlerDoesNotExistError, HandlerError, HandlerPermissionError, ) from maasserver.websockets.handlers.event import dehydrate_event_type_level from maasserver.websockets.handlers.timestampedmodel import ( dehydrate_datetime, TimestampedModelHandler, ) from metadataserver.enum import RESULT_TYPE from metadataserver.models import NodeResult from provisioningserver.drivers.power import POWER_QUERY_TIMEOUT from provisioningserver.logger import get_maas_logger from provisioningserver.power.poweraction import ( PowerActionFail, UnknownPowerType, ) from provisioningserver.rpc.cluster import PowerQuery from provisioningserver.rpc.exceptions import NoConnectionsAvailable from provisioningserver.tags import merge_details_cleanly from provisioningserver.utils.twisted import ( asynchronous, deferWithTimeout, ) from twisted.internet.defer import ( CancelledError, inlineCallbacks, returnValue, ) maaslog = get_maas_logger("websockets.node") class NodeHandler(TimestampedModelHandler): class Meta: queryset = ( Node.nodes.filter(installable=True) .select_related('nodegroup', 'pxe_mac', 'owner') .prefetch_related( 'interface_set__ip_addresses__subnet__vlan__fabric') .prefetch_related('interface_set__ip_addresses__subnet__space') .prefetch_related('nodegroup__nodegroupinterface_set__subnet') .prefetch_related('interface_set__vlan__fabric') .prefetch_related('zone') .prefetch_related('tags') .prefetch_related('blockdevice_set__physicalblockdevice') .prefetch_related('blockdevice_set__virtualblockdevice')) pk = 'system_id' pk_type = unicode allowed_methods = [ 'list', 'get', 'create', 'update', 'action', 'set_active', 'check_power', 'create_physical', 'create_vlan', 'create_bond', 'update_interface', 'delete_interface', 'link_subnet', 'unlink_subnet', 'update_filesystem', 'update_disk_tags', 'update_disk', 'delete_disk', 'delete_partition', 'delete_volume_group', 'delete_cache_set', 'create_partition', 'create_cache_set', 'create_bcache', 'create_raid', 'create_volume_group', 'create_logical_volume', 'set_boot_disk', ] form = AdminNodeWithMACAddressesForm exclude = [ "installable", "parent", "boot_interface", "boot_cluster_ip", "token", "netboot", "agent_name", "power_state_updated", "gateway_link_ipv4", "gateway_link_ipv6", "enable_ssh", "skip_networking", "skip_storage", ] list_fields = [ "system_id", "hostname", "owner", "cpu_count", "memory", "power_state", "zone", ] listen_channels = [ "node", ] def get_queryset(self): """Return `QuerySet` for nodes only vewable by `user`.""" nodes = super(NodeHandler, self).get_queryset() return Node.nodes.get_nodes( self.user, NODE_PERMISSION.VIEW, from_nodes=nodes) def dehydrate_owner(self, user): """Return owners username.""" if user is None: return "" else: return user.username def dehydrate_zone(self, zone): """Return zone name.""" return { "id": zone.id, "name": zone.name, } def dehydrate_nodegroup(self, nodegroup): """Return the nodegroup name.""" if nodegroup is None: return None else: return { "id": nodegroup.id, "uuid": nodegroup.uuid, "name": nodegroup.name, "cluster_name": nodegroup.cluster_name, } def dehydrate_routers(self, routers): """Return list of routers.""" if routers is None: return [] return [ "%s" % router for router in routers ] def dehydrate_power_parameters(self, power_parameters): """Return power_parameters None if empty.""" if power_parameters == '': return None else: return power_parameters def dehydrate(self, obj, data, for_list=False): """Add extra fields to `data`.""" data["fqdn"] = obj.fqdn data["status"] = obj.display_status() data["actions"] = compile_node_actions(obj, self.user).keys() data["memory"] = obj.display_memory() data["extra_macs"] = [ "%s" % mac_address for mac_address in obj.get_extra_macs() ] boot_interface = obj.get_boot_interface() if boot_interface is not None: data["pxe_mac"] = "%s" % boot_interface.mac_address data["pxe_mac_vendor"] = obj.get_pxe_mac_vendor() else: data["pxe_mac"] = data["pxe_mac_vendor"] = "" blockdevices = self.get_blockdevices_for(obj) physical_blockdevices = [ blockdevice for blockdevice in blockdevices if isinstance(blockdevice, PhysicalBlockDevice) ] data["physical_disk_count"] = len(physical_blockdevices) data["storage"] = "%3.1f" % ( sum([ blockdevice.size for blockdevice in physical_blockdevices ]) / (1000 ** 3)) data["storage_tags"] = self.get_all_storage_tags(blockdevices) subnets = self.get_all_subnets(obj) data["subnets"] = [subnet.cidr for subnet in subnets] data["fabrics"] = self.get_all_fabric_names(obj, subnets) data["spaces"] = self.get_all_space_names(subnets) data["tags"] = [ tag.name for tag in obj.tags.all() ] if not for_list: data["show_os_info"] = self.dehydrate_show_os_info(obj) data["osystem"] = obj.get_osystem() data["distro_series"] = obj.get_distro_series() data["hwe_kernel"] = make_hwe_kernel_ui_text(obj.hwe_kernel) # Network data["interfaces"] = [ self.dehydrate_interface(interface, obj) for interface in obj.interface_set.all().order_by('name') ] data["on_network"] = obj.on_network() # Devices devices = [ self.dehydrate_device(device) for device in obj.children.all() ] data["devices"] = sorted( devices, key=itemgetter("fqdn")) # Storage data["disks"] = [ self.dehydrate_blockdevice(blockdevice, obj) for blockdevice in blockdevices ] data["disks"] = data["disks"] + [ self.dehydrate_volume_group(volume_group) for volume_group in VolumeGroup.objects.filter_by_node(obj) ] + [ self.dehydrate_cache_set(cache_set) for cache_set in CacheSet.objects.get_cache_sets_for_node(obj) ] data["disks"] = sorted(data["disks"], key=itemgetter("name")) data["supported_filesystems"] = [ {'key': key, 'ui': ui} for key, ui in FILESYSTEM_FORMAT_TYPE_CHOICES ] data["storage_layout_issues"] = obj.storage_layout_issues() # Events data["events"] = self.dehydrate_events(obj) # Machine output data = self.dehydrate_summary_output(obj, data) data["commissioning_results"] = self.dehydrate_node_results( obj, RESULT_TYPE.COMMISSIONING) data["installation_results"] = self.dehydrate_node_results( obj, RESULT_TYPE.INSTALLATION) # Third party drivers if Config.objects.get_config('enable_third_party_drivers'): driver = get_third_party_driver(obj) if "module" in driver and "comment" in driver: data["third_party_driver"] = { "module": driver["module"], "comment": driver["comment"], } return data def dehydrate_show_os_info(self, obj): """Return True if OS information should show in the UI.""" return ( obj.status == NODE_STATUS.DEPLOYING or obj.status == NODE_STATUS.FAILED_DEPLOYMENT or obj.status == NODE_STATUS.DEPLOYED or obj.status == NODE_STATUS.RELEASING or obj.status == NODE_STATUS.FAILED_RELEASING or obj.status == NODE_STATUS.DISK_ERASING or obj.status == NODE_STATUS.FAILED_DISK_ERASING) def dehydrate_device(self, device): """Return the `Device` formatted for JSON encoding.""" return { "fqdn": device.fqdn, "interfaces": [ self.dehydrate_interface(interface, device) for interface in device.interface_set.all().order_by('id') ], } def dehydrate_blockdevice(self, blockdevice, obj): """Return `BlockDevice` formatted for JSON encoding.""" # model and serial are currently only avalible on physical block # devices if isinstance(blockdevice, PhysicalBlockDevice): model = blockdevice.model serial = blockdevice.serial else: serial = model = "" partition_table = blockdevice.get_partitiontable() if partition_table is not None: partition_table_type = partition_table.table_type else: partition_table_type = "" is_boot = blockdevice.id == obj.get_boot_disk().id data = { "id": blockdevice.id, "is_boot": is_boot, "name": blockdevice.get_name(), "tags": blockdevice.tags, "type": blockdevice.type, "path": blockdevice.path, "size": blockdevice.size, "size_human": human_readable_bytes(blockdevice.size), "used_size": blockdevice.used_size, "used_size_human": human_readable_bytes( blockdevice.used_size), "available_size": blockdevice.available_size, "available_size_human": human_readable_bytes( blockdevice.available_size), "block_size": blockdevice.block_size, "model": model, "serial": serial, "partition_table_type": partition_table_type, "used_for": blockdevice.used_for, "filesystem": self.dehydrate_filesystem( blockdevice.get_effective_filesystem()), "partitions": self.dehydrate_partitions( blockdevice.get_partitiontable()), } if isinstance(blockdevice, VirtualBlockDevice): data["parent"] = { "id": blockdevice.filesystem_group.id, "uuid": blockdevice.filesystem_group.uuid, "type": blockdevice.filesystem_group.group_type, } return data def dehydrate_volume_group(self, volume_group): """Return `VolumeGroup` formatted for JSON encoding.""" size = volume_group.get_size() available_size = volume_group.get_lvm_free_space() used_size = volume_group.get_lvm_allocated_size() return { "id": volume_group.id, "name": volume_group.name, "tags": [], "type": volume_group.group_type, "path": "", "size": size, "size_human": human_readable_bytes(size), "used_size": used_size, "used_size_human": human_readable_bytes(used_size), "available_size": available_size, "available_size_human": human_readable_bytes(available_size), "block_size": volume_group.get_virtual_block_device_block_size(), "model": "", "serial": "", "partition_table_type": "", "used_for": "volume group", "filesystem": None, "partitions": None, } def dehydrate_cache_set(self, cache_set): """Return `CacheSet` formatted for JSON encoding.""" device = cache_set.get_device() used_size = device.get_used_size() available_size = device.get_available_size() bcache_devices = sorted([ bcache.name for bcache in cache_set.filesystemgroup_set.all() ]) return { "id": cache_set.id, "name": cache_set.name, "tags": [], "type": "cache-set", "path": "", "size": device.size, "size_human": human_readable_bytes(device.size), "used_size": used_size, "used_size_human": human_readable_bytes(used_size), "available_size": available_size, "available_size_human": human_readable_bytes(available_size), "block_size": device.get_block_size(), "model": "", "serial": "", "partition_table_type": "", "used_for": ", ".join(bcache_devices), "filesystem": None, "partitions": None, } def dehydrate_partitions(self, partition_table): """Return `PartitionTable` formatted for JSON encoding.""" if partition_table is None: return None partitions = [] for partition in partition_table.partitions.all(): partitions.append({ "filesystem": self.dehydrate_filesystem( partition.get_effective_filesystem()), "name": partition.get_name(), "path": partition.path, "type": partition.type, "id": partition.id, "size": partition.size, "size_human": human_readable_bytes(partition.size), "used_for": partition.used_for, }) return partitions def dehydrate_filesystem(self, filesystem): """Return `Filesystem` formatted for JSON encoding.""" if filesystem is None: return None return { "label": filesystem.label, "mount_point": filesystem.mount_point, "fstype": filesystem.fstype, "is_format_fstype": ( filesystem.fstype in FILESYSTEM_FORMAT_TYPE_CHOICES_DICT), } def dehydrate_interface(self, interface, obj): """Dehydrate a `interface` into a interface definition.""" # Sort the links by ID that way they show up in the same order in # the UI. links = sorted(interface.get_links(), key=itemgetter("id")) for link in links: # Replace the subnet object with the subnet_id. The client will # use this information to pull the subnet information from the # websocket. subnet = link.pop("subnet", None) if subnet is not None: link["subnet_id"] = subnet.id data = { "id": interface.id, "type": interface.type, "name": interface.get_name(), "enabled": interface.is_enabled(), "is_boot": interface == obj.boot_interface, "mac_address": "%s" % interface.mac_address, "vlan_id": interface.vlan_id, "parents": [ nic.id for nic in interface.parents.all() ], "children": [ nic.child.id for nic in interface.children_relationships.all() ], "links": links, } # When the node is commissioning display the discovered IP address for # this interface. This will only be shown on interfaces that are # connected to a MAAS managed subnet. if obj.status == NODE_STATUS.COMMISSIONING: discovereds = interface.get_discovered() if discovereds is not None: for discovered in discovereds: # Replace the subnet object with the subnet_id. The client # will use this information to pull the subnet information # from the websocket. discovered["subnet_id"] = discovered.pop("subnet").id data["discovered"] = discovereds return data def dehydrate_summary_output(self, obj, data): """Dehydrate the machine summary output.""" # Produce a "clean" composite details document. probed_details = merge_details_cleanly( get_single_probed_details(obj.system_id)) # We check here if there's something to show instead of after # the call to get_single_probed_details() because here the # details will be guaranteed well-formed. if len(probed_details.xpath('/*/*')) == 0: data['summary_xml'] = None data['summary_yaml'] = None else: data['summary_xml'] = etree.tostring( probed_details, encoding=unicode, pretty_print=True) data['summary_yaml'] = XMLToYAML( etree.tostring( probed_details, encoding=unicode, pretty_print=True)).convert() return data def dehydrate_node_results(self, obj, result_type): """Dehydrate node results with the given `result_type`.""" return [ { "id": result.id, "result": result.script_result, "name": result.name, "data": result.data, "line_count": len(result.data.splitlines()), "created": dehydrate_datetime(result.created), } for result in NodeResult.objects.filter( node=obj, result_type=result_type) ] def dehydrate_events(self, obj): """Dehydrate the node events. The latests 50 not including DEBUG events will be dehydrated. The `EventsHandler` needs to be used if more are required. """ events = ( Event.objects.filter(node=obj) .exclude(type__level=logging.DEBUG) .select_related("type") .order_by('-id')[:50]) return [ { "id": event.id, "type": { "id": event.type.id, "name": event.type.name, "description": event.type.description, "level": dehydrate_event_type_level(event.type.level), }, "description": event.description, "created": dehydrate_datetime(event.created), } for event in events ] def get_all_storage_tags(self, blockdevices): """Return list of all storage tags in `blockdevices`.""" tags = set() for blockdevice in blockdevices: tags = tags.union(blockdevice.tags) return list(tags) def get_all_subnets(self, obj): subnets = set() for interface in obj.interface_set.all(): for ip_address in interface.ip_addresses.all(): if ip_address.subnet is not None: subnets.add(ip_address.subnet) return list(subnets) def get_all_fabric_names(self, obj, subnets): fabric_names = set() for interface in obj.interface_set.all(): fabric_names.add(interface.vlan.fabric.name) for subnet in subnets: fabric_names.add(subnet.vlan.fabric.name) return list(fabric_names) def get_all_space_names(self, subnets): space_names = set() for subnet in subnets: space_names.add(subnet.space.name) return list(space_names) def get_blockdevices_for(self, obj): """Return only `BlockDevice`s using the prefetched query.""" return [ blockdevice.actual_instance for blockdevice in obj.blockdevice_set.all() ] def get_object(self, params): """Get object by using the `pk` in `params`.""" obj = super(NodeHandler, self).get_object(params) if self.user.is_superuser: return obj if obj.owner is None or obj.owner == self.user: return obj raise HandlerDoesNotExistError(params[self._meta.pk]) def get_mac_addresses(self, data): """Convert the given `data` into a list of mac addresses. This is used by the create method and the hydrate method. The `pxe_mac` will always be the first entry in the list. """ macs = data.get("extra_macs", []) if "pxe_mac" in data: macs.insert(0, data["pxe_mac"]) return macs def get_form_class(self, action): """Return the form class used for `action`.""" if action in ("create", "update"): return AdminNodeWithMACAddressesForm else: raise HandlerError("Unknown action: %s" % action) def preprocess_form(self, action, params): """Process the `params` to before passing the data to the form.""" new_params = {} # Only copy the allowed fields into `new_params` to be passed into # the form. new_params["mac_addresses"] = self.get_mac_addresses(params) new_params["hostname"] = params.get("hostname") new_params["architecture"] = params.get("architecture") new_params["power_type"] = params.get("power_type") if "zone" in params: new_params["zone"] = params["zone"]["name"] if "nodegroup" in params: new_params["nodegroup"] = params["nodegroup"]["uuid"] if "min_hwe_kernel" in params: new_params["min_hwe_kernel"] = params["min_hwe_kernel"] # Cleanup any fields that have a None value. new_params = { key: value for key, value in new_params.viewitems() if value is not None } return super(NodeHandler, self).preprocess_form(action, new_params) def create(self, params): """Create the object from params.""" # Only admin users can perform create. if not self.user.is_superuser: raise HandlerPermissionError() # Create the object, then save the power parameters because the # form will not save this information. data = super(NodeHandler, self).create(params) node_obj = Node.objects.get(system_id=data['system_id']) node_obj.power_parameters = params.get("power_parameters", {}) node_obj.save() # Start the commissioning process right away, which has the # desired side effect of initializing the node's power state. node_obj.start_commissioning(self.user) return self.full_dehydrate(node_obj) def update(self, params): """Update the object from params.""" # Only admin users can perform update. if not self.user.is_superuser: raise HandlerPermissionError() # Update the node with the form. The form will not update the # nodegroup or power_parameters, so we perform that logic here. data = super(NodeHandler, self).update(params) node_obj = Node.objects.get(system_id=data['system_id']) node_obj.nodegroup = NodeGroup.objects.get( uuid=params['nodegroup']['uuid']) node_obj.power_parameters = params.get("power_parameters") if node_obj.power_parameters is None: node_obj.power_parameters = {} # Update the tags for the node and disks. self.update_tags(node_obj, params['tags']) node_obj.save() return self.full_dehydrate(node_obj) def update_filesystem(self, params): node = self.get_object(params) block_id = params.get('block_id') partition_id = params.get('partition_id') fstype = params.get('fstype') mount_point = params.get('mount_point') if node.status not in [NODE_STATUS.ALLOCATED, NODE_STATUS.READY]: raise HandlerError( "Node must be allocated or ready to edit storage") if not self.user.is_superuser and node.owner_id != self.user.id: raise HandlerPermissionError() if partition_id: self.update_partition_filesystem( node, block_id, partition_id, fstype, mount_point) else: self.update_blockdevice_filesystem( node, block_id, fstype, mount_point) def update_partition_filesystem( self, node, block_id, partition_id, fstype, mount_point): partition = Partition.objects.get( id=partition_id, partition_table__block_device__node=node) fs = partition.get_effective_filesystem() if not fstype: if fs: fs.delete() return if fs is None or fstype != fs.fstype: form = FormatPartitionForm(partition, {'fstype': fstype}) if not form.is_valid(): raise HandlerError(form.errors) form.save() fs = partition.get_effective_filesystem() if mount_point != fs.mount_point: if not mount_point: fs.mount_point = None fs.save() else: form = MountPartitionForm( partition, {'mount_point': mount_point}) if not form.is_valid(): raise HandlerError(form.errors) else: form.save() def update_blockdevice_filesystem( self, node, block_id, fstype, mount_point): blockdevice = BlockDevice.objects.get(id=block_id, node=node) fs = blockdevice.get_effective_filesystem() if not fstype: if fs: fs.delete() return if fs is None or fstype != fs.fstype: form = FormatBlockDeviceForm(blockdevice, {'fstype': fstype}) if not form.is_valid(): raise HandlerError(form.errors) form.save() fs = blockdevice.get_effective_filesystem() if mount_point != fs.mount_point: if not mount_point: fs.mount_point = None fs.save() else: form = MountBlockDeviceForm( blockdevice, {'mount_point': mount_point}) if not form.is_valid(): raise HandlerError(form.errors) else: form.save() def update_tags(self, node_obj, tags): # Loop through the nodes current tags. If the tag exists in `tags` then # nothing needs to be done so its removed from `tags`. If it does not # exists then the tag was removed from the node and should be removed # from the nodes set of tags. for tag in node_obj.tags.all(): if tag.name not in tags: node_obj.tags.remove(tag) else: tags.remove(tag.name) # All the tags remaining in `tags` are tags that are not linked to # node. Get or create that tag and add the node to the tags set. for tag_name in tags: tag_obj, _ = Tag.objects.get_or_create(name=tag_name) if tag_obj.is_defined: raise HandlerError( "Cannot add tag %s to node because it has a " "definition." % tag_name) tag_obj.node_set.add(node_obj) tag_obj.save() def update_disk_tags(self, params): """Update all the tags on all disks.""" node = self.get_object(params) disk_obj = BlockDevice.objects.get(id=params['block_id'], node=node) disk_obj.tags = params['tags'] disk_obj.save(update_fields=['tags']) def update_disk(self, params): """Update disk information.""" # Only admin users can perform delete. if not self.user.is_superuser: raise HandlerPermissionError() node = self.get_object(params) device = BlockDevice.objects.get( id=params['block_id'], node=node).actual_instance if device.type == 'physical': form = UpdatePhysicalBlockDeviceForm( instance=device, data=params) elif device.type == 'virtual': form = UpdateVirtualBlockDeviceForm( instance=device, data=params) else: raise HandlerError( 'Cannot update block device of type %s' % device.type) if not form.is_valid(): raise HandlerError(form.errors) else: form.save() def delete_disk(self, params): # Only admin users can perform delete. if not self.user.is_superuser: raise HandlerPermissionError() node = self.get_object(params) block_id = params.get('block_id') if block_id is not None: block_device = BlockDevice.objects.get(id=block_id, node=node) block_device.delete() def delete_partition(self, params): # Only admin users can perform delete. if not self.user.is_superuser: raise HandlerPermissionError() node = self.get_object(params) partition_id = params.get('partition_id') if partition_id is not None: partition = Partition.objects.get( id=partition_id, partition_table__block_device__node=node) partition.delete() def delete_volume_group(self, params): # Only admin users can perform delete. if not self.user.is_superuser: raise HandlerPermissionError() node = self.get_object(params) volume_group_id = params.get('volume_group_id') if volume_group_id is not None: volume_group = VolumeGroup.objects.get(id=volume_group_id) if volume_group.get_node() != node: raise VolumeGroup.DoesNotExist() volume_group.delete() def delete_cache_set(self, params): # Only admin users can perform delete. if not self.user.is_superuser: raise HandlerPermissionError() node = self.get_object(params) cache_set_id = params.get('cache_set_id') if cache_set_id is not None: cache_set = CacheSet.objects.get(id=cache_set_id) if cache_set.get_node() != node: raise CacheSet.DoesNotExist() cache_set.delete() def create_partition(self, params): """Create a partition.""" # Only admin users can perform delete. if not self.user.is_superuser: raise HandlerPermissionError() node = self.get_object(params) disk_obj = BlockDevice.objects.get(id=params['block_id'], node=node) form = AddPartitionForm( disk_obj, { 'size': params['partition_size'], }) if not form.is_valid(): raise HandlerError(form.errors) else: partition = form.save() if 'fstype' in params: self.update_partition_filesystem( node, disk_obj.id, partition.id, params.get("fstype"), params.get("mount_point")) def create_cache_set(self, params): """Create a cache set.""" # Only admin users can perform delete. if not self.user.is_superuser: raise HandlerPermissionError() node = self.get_object(params) block_id = params.get('block_id') partition_id = params.get('partition_id') data = {} if partition_id is not None: data["cache_partition"] = partition_id elif block_id is not None: data["cache_device"] = block_id else: raise HandlerError( "Either block_id or partition_id is required.") form = CreateCacheSetForm(node=node, data=data) if not form.is_valid(): raise HandlerError(form.errors) else: form.save() def create_bcache(self, params): """Create a bcache.""" # Only admin users can perform delete. if not self.user.is_superuser: raise HandlerPermissionError() node = self.get_object(params) block_id = params.get('block_id') partition_id = params.get('partition_id') data = { "name": params["name"], "cache_set": params["cache_set"], "cache_mode": params["cache_mode"], } if partition_id is not None: data["backing_partition"] = partition_id elif block_id is not None: data["backing_device"] = block_id else: raise HandlerError( "Either block_id or partition_id is required.") form = CreateBcacheForm(node=node, data=data) if not form.is_valid(): raise HandlerError(form.errors) else: bcache = form.save() if 'fstype' in params: self.update_blockdevice_filesystem( node, bcache.virtual_device.id, params.get("fstype"), params.get("mount_point")) def create_raid(self, params): """Create a RAID.""" # Only admin users can perform delete. if not self.user.is_superuser: raise HandlerPermissionError() node = self.get_object(params) form = CreateRaidForm(node=node, data=params) if not form.is_valid(): raise HandlerError(form.errors) else: raid = form.save() if 'fstype' in params: self.update_blockdevice_filesystem( node, raid.virtual_device.id, params.get("fstype"), params.get("mount_point")) def create_volume_group(self, params): """Create a volume group.""" # Only admin users can perform delete. if not self.user.is_superuser: raise HandlerPermissionError() node = self.get_object(params) form = CreateVolumeGroupForm(node=node, data=params) if not form.is_valid(): raise HandlerError(form.errors) else: form.save() def create_logical_volume(self, params): """Create a logical volume.""" # Only admin users can perform delete. if not self.user.is_superuser: raise HandlerPermissionError() node = self.get_object(params) volume_group = VolumeGroup.objects.get(id=params['volume_group_id']) if volume_group.get_node() != node: raise VolumeGroup.DoesNotExist() form = CreateLogicalVolumeForm( volume_group, { 'name': params['name'], 'size': params['size'], }) if not form.is_valid(): raise HandlerError(form.errors) else: logical_volume = form.save() if 'fstype' in params: self.update_blockdevice_filesystem( node, logical_volume.id, params.get("fstype"), params.get("mount_point")) def set_boot_disk(self, params): """Set the disk as the boot disk.""" # Only admin users can perform delete. if not self.user.is_superuser: raise HandlerPermissionError() node = self.get_object(params) device = BlockDevice.objects.get( id=params['block_id'], node=node).actual_instance if device.type != 'physical': raise HandlerError( "Only a physical disk can be set as the boot disk.") node.boot_disk = device node.save() def action(self, params): """Perform the action on the object.""" obj = self.get_object(params) action_name = params.get("action") actions = compile_node_actions(obj, self.user) action = actions.get(action_name) if action is None: raise NodeActionError( "%s action is not available for this node." % action_name) extra_params = params.get("extra", {}) return action.execute(**extra_params) def _create_link_on_interface(self, interface, params): """Create a link on a new interface.""" mode = params.get("mode", None) subnet_id = params.get("subnet", None) if mode is not None: if mode != INTERFACE_LINK_TYPE.LINK_UP: link_form = InterfaceLinkForm(instance=interface, data=params) if link_form.is_valid(): link_form.save() else: raise ValidationError(link_form.errors) elif subnet_id is not None: link_ip = interface.ip_addresses.get( alloc_type=IPADDRESS_TYPE.STICKY, ip__isnull=True) link_ip.subnet = Subnet.objects.get(id=subnet_id) link_ip.save() def create_physical(self, params): """Create physical interface.""" # Only admin users can perform create. if not self.user.is_superuser: raise HandlerPermissionError() node = self.get_object(params) form = PhysicalInterfaceForm(node=node, data=params) if form.is_valid(): interface = form.save() self._create_link_on_interface(interface, params) else: raise ValidationError(form.errors) def create_vlan(self, params): """Create VLAN interface.""" # Only admin users can perform create. if not self.user.is_superuser: raise HandlerPermissionError() node = self.get_object(params) params['parents'] = [params.pop('parent')] form = VLANInterfaceForm(node=node, data=params) if form.is_valid(): interface = form.save() self._create_link_on_interface(interface, params) else: raise ValidationError(form.errors) def create_bond(self, params): """Create bond interface.""" # Only admin users can perform create. if not self.user.is_superuser: raise HandlerPermissionError() node = self.get_object(params) form = BondInterfaceForm(node=node, data=params) if form.is_valid(): interface = form.save() self._create_link_on_interface(interface, params) else: raise ValidationError(form.errors) def update_interface(self, params): """Update the interface.""" # Only admin users can perform update. if not self.user.is_superuser: raise HandlerPermissionError() node = self.get_object(params) interface = Interface.objects.get(node=node, id=params["interface_id"]) interface_form = InterfaceForm.get_interface_form(interface.type) form = interface_form(instance=interface, data=params) if form.is_valid(): form.save() else: raise ValidationError(form.errors) def delete_interface(self, params): """Delete the interface.""" # Only admin users can perform delete. if not self.user.is_superuser: raise HandlerPermissionError() node = self.get_object(params) interface = Interface.objects.get(node=node, id=params["interface_id"]) interface.delete() def link_subnet(self, params): """Create or update the link.""" # Only admin users can perform update. if not self.user.is_superuser: raise HandlerPermissionError() node = self.get_object(params) interface = Interface.objects.get(node=node, id=params["interface_id"]) subnet = None if "subnet" in params: subnet = Subnet.objects.get(id=params["subnet"]) if "link_id" in params: # We are updating an already existing link. interface.update_link_by_id( params["link_id"], params["mode"], subnet, ip_address=params.get("ip_address", None)) else: # We are creating a new link. interface.link_subnet( params["mode"], subnet, ip_address=params.get("ip_address", None)) def unlink_subnet(self, params): """Delete the link.""" # Only admin users can perform unlink. if not self.user.is_superuser: raise HandlerPermissionError() node = self.get_object(params) interface = Interface.objects.get(node=node, id=params["interface_id"]) interface.unlink_subnet_by_id(params["link_id"]) @asynchronous @inlineCallbacks def check_power(self, params): """Check the power state of the node.""" # XXX: This is largely the same function as # update_power_state_of_node. @transactional def get_node_cluster_and_power_info(): obj = self.get_object(params) if obj.installable: node_info = obj.system_id, obj.hostname nodegroup_info = obj.nodegroup.cluster_name, obj.nodegroup.uuid try: power_info = obj.get_effective_power_info() except UnknownPowerType: return node_info, nodegroup_info, None else: return node_info, nodegroup_info, power_info else: raise HandlerError( "%s: Unable to query power state; not an " "installable node" % obj.hostname) @transactional def update_power_state(state): obj = self.get_object(params) obj.update_power_state(state) # Grab info about the node, its cluster, and its power parameters from # the database. If it can't be queried we can return early, but first # update the node's power state with what we know we don't know. node_info, cluster_info, power_info = ( yield deferToDatabase(get_node_cluster_and_power_info)) if power_info is None or not power_info.can_be_queried: yield deferToDatabase(update_power_state, "unknown") returnValue("unknown") # Get a client to talk to the node's cluster. If we're not connected # we can return early, albeit with an exception. cluster_name, cluster_uuid = cluster_info try: client = yield getClientFor(cluster_uuid) except NoConnectionsAvailable: maaslog.error( "Unable to get RPC connection for cluster '%s' (%s)", cluster_name, cluster_uuid) raise HandlerError("Unable to connect to cluster controller") # Query the power state via the node's cluster. node_id, node_hostname = node_info try: response = yield deferWithTimeout( POWER_QUERY_TIMEOUT, client, PowerQuery, system_id=node_id, hostname=node_hostname, power_type=power_info.power_type, context=power_info.power_parameters) except CancelledError: # We got fed up waiting. The query may later discover the node's # power state but by then we won't be paying attention. maaslog.error("%s: Timed-out querying power.", node_hostname) state = "error" except PowerActionFail: # We discard the reason. That will have been logged elsewhere. # Here we're signalling something very simple back to the user. state = "error" except NotImplementedError: # The power driver has declared that it doesn't after all know how # to query the power for this node, so "unknown" seems appropriate. state = "unknown" else: state = response["state"] yield deferToDatabase(update_power_state, state) returnValue(state) maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/space.py0000644000000000000000000000250313056115004023230 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """The space handler for the WebSocket connection.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "SpaceHandler", ] from maasserver.models.space import Space from maasserver.websockets.handlers.timestampedmodel import ( TimestampedModelHandler, ) class SpaceHandler(TimestampedModelHandler): class Meta: queryset = ( Space.objects.all().prefetch_related( "subnet_set__staticipaddress_set__interface_set")) pk = 'id' allowed_methods = ['list', 'get', 'set_active'] listen_channels = [ "space", ] def dehydrate(self, obj, data, for_list=False): data["name"] = obj.get_name() data["subnet_ids"] = [ subnet.id for subnet in obj.subnet_set.all() ] data["nodes_count"] = len({ interface.node_id for subnet in obj.subnet_set.all() for ipaddress in subnet.staticipaddress_set.all() for interface in ipaddress.interface_set.all() if interface.node_id is not None }) return data maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/subnet.py0000644000000000000000000000301413056115004023433 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """The subnet handler for the WebSocket connection.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "SubnetHandler", ] from maasserver.models.subnet import Subnet from maasserver.websockets.handlers.timestampedmodel import ( TimestampedModelHandler, ) from provisioningserver.utils.network import IPRangeStatistics class SubnetHandler(TimestampedModelHandler): class Meta: queryset = ( Subnet.objects.all() .select_related('space', 'vlan') .prefetch_related('vlan__fabric') .prefetch_related('nodegroupinterface_set__nodegroup') .prefetch_related('staticipaddress_set__user') .prefetch_related( 'staticipaddress_set__interface_set__node')) pk = 'id' allowed_methods = ['list', 'get', 'set_active'] listen_channels = [ "subnet", ] def dehydrate(self, subnet, data, for_list=False): full_range = subnet.get_iprange_usage() metadata = IPRangeStatistics(full_range) data['statistics'] = metadata.render_json() if not for_list: data["ip_addresses"] = subnet.render_json_for_related_ips( with_username=True, with_node_summary=True) return data maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/tag.py0000644000000000000000000000130513056115004022707 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """The tag handler for the WebSocket connection.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "TagHandler", ] from maasserver.models.tag import Tag from maasserver.websockets.handlers.timestampedmodel import ( TimestampedModelHandler, ) class TagHandler(TimestampedModelHandler): class Meta: queryset = Tag.objects.all() pk = 'id' allowed_methods = ['list', 'get'] listen_channels = [ "tag", ] maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/tests/0000755000000000000000000000000013056115004022725 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/timestampedmodel.py0000644000000000000000000000237713056115004025503 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """The nodes handler for the WebSocket connection.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "dehydrate_datetime", "TimestampedModelHandler", ] from maasserver.websockets.base import Handler DATETIME_FORMAT = "%a, %d %b. %Y %H:%M:%S" def dehydrate_datetime(datetime): """Convert the `datetime` to string with `DATETIME_FORMAT`.""" return datetime.strftime(DATETIME_FORMAT) class TimestampedModelHandler(Handler): class Meta: abstract = True def __new__(cls, *args, **kwargs): cls = super(TimestampedModelHandler, cls).__new__(cls, *args, **kwargs) if cls._meta.non_changeable is None: cls._meta.non_changeable = [] for field in ["created", "updated"]: if field not in cls._meta.non_changeable: cls._meta.non_changeable.append(field) return cls def dehydrate_created(self, datetime): return dehydrate_datetime(datetime) def dehydrate_updated(self, datetime): return dehydrate_datetime(datetime) maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/user.py0000644000000000000000000000426713056115004023124 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """The user handler for the WebSocket connection.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "UserHandler", ] from django.contrib.auth.models import User from maasserver.models.user import SYSTEM_USERS from maasserver.websockets.base import ( Handler, HandlerDoesNotExistError, ) class UserHandler(Handler): class Meta: queryset = User.objects.filter(is_active=True) pk = 'id' allowed_methods = ['list', 'get', 'auth_user'] fields = [ "id", "username", "first_name", "last_name", "email", "is_superuser", "sshkeys_count", ] listen_channels = [ "user", ] def get_queryset(self): """Return `QuerySet` for users only viewable by `user`.""" users = super(UserHandler, self).get_queryset() if self.user.is_superuser: # Super users can view all users, except for the built-in users return users.exclude(username__in=SYSTEM_USERS) else: # Standard users can only view their self. We filter by username # so a queryset is still returned instead of just a list with # only the user in it. return users.filter(username=self.user.username) def get_object(self, params): """Get object by using the `pk` in `params`.""" obj = super(UserHandler, self).get_object(params) if self.user.is_superuser: # Super user can get any user. return obj elif obj == self.user: # Standard user can only get self. return obj else: raise HandlerDoesNotExistError(params[self._meta.pk]) def dehydrate(self, obj, data, for_list=False): data["sshkeys_count"] = obj.sshkey_set.count() return data def auth_user(self, params): """Return the authenticated user.""" return self.full_dehydrate(self.user) maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/vlan.py0000644000000000000000000000233013056115004023073 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """The vlan handler for the WebSocket connection.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "VLANHandler", ] from maasserver.models.vlan import VLAN from maasserver.websockets.handlers.timestampedmodel import ( TimestampedModelHandler, ) class VLANHandler(TimestampedModelHandler): class Meta: queryset = ( VLAN.objects.all() .prefetch_related("interface_set") .prefetch_related("subnet_set")) pk = 'id' allowed_methods = ['list', 'get', 'set_active'] listen_channels = [ "vlan", ] def dehydrate(self, obj, data, for_list=False): data["name"] = obj.get_name() data["subnet_ids"] = [ subnet.id for subnet in obj.subnet_set.all() ] data["nodes_count"] = len({ interface.node_id for interface in obj.interface_set.all() if interface.node_id is not None }) return data maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/zone.py0000644000000000000000000000133213056115004023107 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """The zone handler for the WebSocket connection.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "ZoneHandler", ] from maasserver.models.zone import Zone from maasserver.websockets.handlers.timestampedmodel import ( TimestampedModelHandler, ) class ZoneHandler(TimestampedModelHandler): class Meta: queryset = Zone.objects.all() pk = 'id' allowed_methods = ['list', 'get', 'set_active'] listen_channels = [ "zone", ] maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/tests/__init__.py0000644000000000000000000000000013056115004025024 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/tests/test_cluster.py0000644000000000000000000001130713056115004026021 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.websockets.handlers.cluster`""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.clusterrpc.power_parameters import ( get_all_power_types_from_clusters, ) from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.websockets.handlers.cluster import ( ClusterHandler, dehydrate_ip_address, ) from maasserver.websockets.handlers.timestampedmodel import dehydrate_datetime from maastesting.djangotestcase import count_queries class TestClusterHandler(MAASServerTestCase): def dehydrate_interface(self, interface): """Dehydrate a `NodeGroupInterface`.""" return { "id": interface.id, "ip": "%s" % interface.ip, "name": interface.name, "management": interface.management, "interface": interface.interface, "subnet_mask": dehydrate_ip_address(interface.subnet_mask), "broadcast_ip": dehydrate_ip_address(interface.broadcast_ip), "router_ip": dehydrate_ip_address(interface.router_ip), "dynamic_range": { "low": dehydrate_ip_address(interface.ip_range_low), "high": dehydrate_ip_address(interface.ip_range_high), }, "static_range": { "low": dehydrate_ip_address( interface.static_ip_range_low), "high": dehydrate_ip_address( interface.static_ip_range_high), }, "foreign_dhcp_ip": dehydrate_ip_address( interface.foreign_dhcp_ip), "network": ( "%s" % interface.network if interface.network is not None else None), } def dehydrate_cluster(self, cluster): power_types = get_all_power_types_from_clusters(nodegroups=[cluster]) data = { "id": cluster.id, "cluster_name": cluster.cluster_name, "name": cluster.name, "status": cluster.status, "uuid": cluster.uuid, "default_disable_ipv4": cluster.default_disable_ipv4, "connected": cluster.is_connected(), "state": cluster.get_state(), "power_types": power_types, "updated": dehydrate_datetime(cluster.updated), "created": dehydrate_datetime(cluster.created), "interfaces": [ self.dehydrate_interface(interface) for interface in cluster.nodegroupinterface_set.all() ], } return data def make_nodegroup(self, number): """Create `number` of new nodegroups.""" for counter in range(number): nodegroup = factory.make_NodeGroup() for _ in range(3): factory.make_NodeGroupInterface(nodegroup) def test_get(self): user = factory.make_User() handler = ClusterHandler(user, {}) nodegroup = factory.make_NodeGroup() for _ in range(3): factory.make_NodeGroupInterface(nodegroup) self.assertEquals( self.dehydrate_cluster(nodegroup), handler.get({"id": nodegroup.id})) def test_list(self): user = factory.make_User() handler = ClusterHandler(user, {}) nodegroup = factory.make_NodeGroup() for _ in range(3): factory.make_NodeGroupInterface(nodegroup) self.assertItemsEqual( [self.dehydrate_cluster(nodegroup)], handler.list({})) def test_list_num_queries_is_independent_of_num_clusters(self): user = factory.make_User() handler = ClusterHandler(user, {}) self.make_nodegroup(10) query_10_count, _ = count_queries(handler.list, {}) self.make_nodegroup(10) query_20_count, _ = count_queries(handler.list, {}) # This check is to notify the developer that a change was made that # affects the number of queries performed when doing a cluster listing. # It is important to keep this number as low as possible. A larger # number means regiond has to do more work slowing down its process # and slowing down the client waiting for the response. self.assertEquals( query_10_count, 3, "Number of queries has changed; make sure this is expected.") self.assertEquals( query_10_count, query_20_count, "Number of queries is not independent to the number of clusters.") maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/tests/test_device.py0000644000000000000000000005124113056115004025600 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.websockets.handlers.device`""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import re from maasserver.enum import ( IPADDRESS_TYPE, NODEGROUP_STATUS, NODEGROUPINTERFACE_MANAGEMENT, ) from maasserver.exceptions import NodeActionError from maasserver.fields import MAC from maasserver.forms import ( DeviceForm, DeviceWithMACsForm, ) from maasserver.models import interface as interface_module from maasserver.models.interface import Interface from maasserver.models.staticipaddress import StaticIPAddress from maasserver.node_action import compile_node_actions from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase from maasserver.websockets.base import ( HandlerDoesNotExistError, HandlerError, HandlerValidationError, ) from maasserver.websockets.handlers.device import ( DEVICE_IP_ASSIGNMENT, DeviceHandler, ) from maasserver.websockets.handlers.timestampedmodel import dehydrate_datetime from maastesting.djangotestcase import count_queries from testtools import ExpectedException from testtools.matchers import ( Equals, Is, ) class TestDeviceHandler(MAASServerTestCase): def dehydrate_ip_assignment(self, device): boot_interface = device.get_boot_interface() if boot_interface is None: return "" ip_address = boot_interface.ip_addresses.exclude( alloc_type=IPADDRESS_TYPE.DISCOVERED).first() if ip_address is not None: if ip_address.alloc_type == IPADDRESS_TYPE.DHCP: return DEVICE_IP_ASSIGNMENT.DYNAMIC elif ip_address.subnet is None: return DEVICE_IP_ASSIGNMENT.EXTERNAL else: return DEVICE_IP_ASSIGNMENT.STATIC return DEVICE_IP_ASSIGNMENT.DYNAMIC def dehydrate_ip_address(self, device): """Return the IP address for the device.""" boot_interface = device.get_boot_interface() if boot_interface is None: return None static_ip = boot_interface.ip_addresses.exclude( alloc_type=IPADDRESS_TYPE.DISCOVERED).first() if static_ip is not None: ip = static_ip.get_ip() if ip: return "%s" % ip return None def dehydrate_device(self, node, user, for_list=False): boot_interface = node.get_boot_interface() data = { "actions": compile_node_actions(node, user).keys(), "created": dehydrate_datetime(node.created), "extra_macs": [ "%s" % mac_address.mac_address for mac_address in node.get_extra_macs() ], "fqdn": node.fqdn, "hostname": node.hostname, "primary_mac": ( "" if boot_interface is None else "%s" % boot_interface.mac_address), "parent": ( node.parent.system_id if node.parent is not None else None), "ip_address": self.dehydrate_ip_address(node), "ip_assignment": self.dehydrate_ip_assignment(node), "nodegroup": { "id": node.nodegroup.id, "uuid": node.nodegroup.uuid, "name": node.nodegroup.name, "cluster_name": node.nodegroup.cluster_name, }, "owner": "" if node.owner is None else node.owner.username, "swap_size": node.swap_size, "system_id": node.system_id, "tags": [ tag.name for tag in node.tags.all() ], "updated": dehydrate_datetime(node.updated), "zone": { "id": node.zone.id, "name": node.zone.name, }, } if for_list: allowed_fields = DeviceHandler.Meta.list_fields + [ "actions", "fqdn", "extra_macs", "tags", "primary_mac", "ip_address", "ip_assignment", ] for key in data.keys(): if key not in allowed_fields: del data[key] return data def make_device_with_ip_address( self, nodegroup=None, ip_assignment=None, owner=None): """The `DEVICE_IP_ASSIGNMENT` is based on what data exists in the model for a device. This will setup the model to make sure the device will match `ip_assignment`.""" if nodegroup is None: nodegroup = factory.make_NodeGroup(status=NODEGROUP_STATUS.ENABLED) if ip_assignment is None: ip_assignment = factory.pick_enum(DEVICE_IP_ASSIGNMENT) if owner is None: owner = factory.make_User() device = factory.make_Node( nodegroup=nodegroup, installable=False, interface=True, owner=owner) interface = device.get_boot_interface() if ip_assignment == DEVICE_IP_ASSIGNMENT.EXTERNAL: subnet = factory.make_Subnet() factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.USER_RESERVED, ip=factory.pick_ip_in_network(subnet.get_ipnetwork()), subnet=subnet, user=owner) elif ip_assignment == DEVICE_IP_ASSIGNMENT.DYNAMIC: factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.DHCP, ip="", interface=interface) else: self.patch_autospec(interface_module, "update_host_maps") subnet = factory.make_Subnet(vlan=interface.vlan) factory.make_NodeGroupInterface( nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP, subnet=subnet) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.DISCOVERED, ip="", interface=interface, subnet=subnet) interface.claim_static_ips() return device def make_devices(self, nodegroup, number, owner=None): """Create `number` of new devices.""" for counter in range(number): self.make_device_with_ip_address(nodegroup=nodegroup, owner=owner) def test_get(self): owner = factory.make_User() handler = DeviceHandler(owner, {}) device = self.make_device_with_ip_address(owner=owner) self.assertEquals( self.dehydrate_device(device, owner), handler.get({"system_id": device.system_id})) def test_list(self): owner = factory.make_User() handler = DeviceHandler(owner, {}) device = self.make_device_with_ip_address(owner=owner) self.assertItemsEqual( [self.dehydrate_device(device, owner, for_list=True)], handler.list({})) def test_list_ignores_nodes(self): owner = factory.make_User() handler = DeviceHandler(owner, {}) device = self.make_device_with_ip_address(owner=owner) # Create a device with parent. node = factory.make_Node(owner=owner) device_with_parent = self.make_device_with_ip_address(owner=owner) device_with_parent.parent = node device_with_parent.save() self.assertItemsEqual( [self.dehydrate_device(device, owner, for_list=True)], handler.list({})) def test_list_ignores_devices_with_parents(self): owner = factory.make_User() handler = DeviceHandler(owner, {}) device = self.make_device_with_ip_address(owner=owner) # Create a node. factory.make_Node(owner=owner) self.assertItemsEqual( [self.dehydrate_device(device, owner, for_list=True)], handler.list({})) def test_list_num_queries_is_independent_of_num_devices(self): self.patch(interface_module, "update_host_maps") owner = factory.make_User() handler = DeviceHandler(owner, {}) nodegroup = factory.make_NodeGroup(status=NODEGROUP_STATUS.ENABLED) self.make_devices(nodegroup, 10, owner=owner) query_10_count, _ = count_queries(handler.list, {}) self.make_devices(nodegroup, 10, owner=owner) query_20_count, _ = count_queries(handler.list, {}) # This check is to notify the developer that a change was made that # affects the number of queries performed when doing a node listing. # It is important to keep this number as low as possible. A larger # number means regiond has to do more work slowing down its process # and slowing down the client waiting for the response. self.assertEquals( query_10_count, 7, "Number of queries has changed; make sure this is expected.") self.assertEquals( query_10_count, query_20_count, "Number of queries is not independent to the number of nodes.") def test_list_returns_devices_only_viewable_by_user(self): self.patch(interface_module, "update_host_maps") user = factory.make_User() # Create another user. factory.make_User() device = self.make_device_with_ip_address(owner=user) # Create another device not ownered by user. self.make_device_with_ip_address() handler = DeviceHandler(user, {}) self.assertItemsEqual([ self.dehydrate_device(device, user, for_list=True), ], handler.list({})) def test_get_object_returns_device_if_super_user(self): admin = factory.make_admin() owner = factory.make_User() device = self.make_device_with_ip_address(owner=owner) handler = DeviceHandler(admin, {}) self.assertEquals( device.system_id, handler.get_object({"system_id": device.system_id}).system_id) def test_get_object_returns_node_if_owner(self): owner = factory.make_User() device = self.make_device_with_ip_address(owner=owner) handler = DeviceHandler(owner, {}) self.assertEquals( device.system_id, handler.get_object({"system_id": device.system_id}).system_id) def test_get_object_raises_exception_if_owner_by_another_user(self): user = factory.make_User() device = self.make_device_with_ip_address() handler = DeviceHandler(user, {}) with ExpectedException(HandlerDoesNotExistError): handler.get_object({"system_id": device.system_id}) def test_get_form_class_returns_DeviceWithMACsForm_for_create(self): user = factory.make_User() handler = DeviceHandler(user, {}) self.assertIs(DeviceWithMACsForm, handler.get_form_class("create")) def test_get_form_class_returns_DeviceForm_for_update(self): user = factory.make_User() handler = DeviceHandler(user, {}) self.assertIs(DeviceForm, handler.get_form_class("update")) def test_get_form_class_raises_error_for_unknown_action(self): user = factory.make_User() handler = DeviceHandler(user, {}) self.assertRaises( HandlerError, handler.get_form_class, factory.make_name()) def test_create_raises_validation_error_for_missing_macs(self): user = factory.make_User() handler = DeviceHandler(user, {}) params = { "hostname": factory.make_name("hostname"), } with ExpectedException( HandlerValidationError, re.escape("{u'mac_addresses': [u'This field is required.']}")): handler.create(params) def test_create_creates_device_with_dynamic_ip_assignment(self): user = factory.make_User() handler = DeviceHandler(user, {}) mac = factory.make_mac_address() hostname = factory.make_name("hostname") created_device = handler.create({ "hostname": hostname, "primary_mac": mac, "interfaces": [{ "mac": mac, "ip_assignment": DEVICE_IP_ASSIGNMENT.DYNAMIC, }], }) self.expectThat(created_device["hostname"], Equals(hostname)) self.expectThat(created_device["primary_mac"], Equals(mac)) self.expectThat(created_device["extra_macs"], Equals([])) self.expectThat( created_device["ip_assignment"], Equals(DEVICE_IP_ASSIGNMENT.DYNAMIC)) self.expectThat(created_device["ip_address"], Is(None)) self.expectThat(created_device["owner"], Equals(user.username)) def test_create_creates_device_with_external_ip_assignment(self): user = factory.make_User() handler = DeviceHandler(user, {}) mac = factory.make_mac_address() hostname = factory.make_name("hostname") ip_address = factory.make_ipv4_address() created_device = handler.create({ "hostname": hostname, "primary_mac": mac, "interfaces": [{ "mac": mac, "ip_assignment": DEVICE_IP_ASSIGNMENT.EXTERNAL, "ip_address": ip_address, }], }) self.expectThat( created_device["ip_assignment"], Equals(DEVICE_IP_ASSIGNMENT.EXTERNAL)) self.expectThat(created_device["ip_address"], Equals(ip_address)) self.expectThat( StaticIPAddress.objects.filter(ip=ip_address).count(), Equals(1), "StaticIPAddress was not created.") def test_create_creates_device_with_static_ip_assignment_implicit(self): self.patch(interface_module, "update_host_maps") user = factory.make_User() handler = DeviceHandler(user, {}) mac = factory.make_mac_address() hostname = factory.make_name("hostname") nodegroup = factory.make_NodeGroup(status=NODEGROUP_STATUS.ENABLED) nodegroup_interface = factory.make_NodeGroupInterface( nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) created_device = handler.create({ "hostname": hostname, "primary_mac": mac, "interfaces": [{ "mac": mac, "ip_assignment": DEVICE_IP_ASSIGNMENT.STATIC, "interface": nodegroup_interface.id, }], }) self.expectThat( created_device["ip_assignment"], Equals(DEVICE_IP_ASSIGNMENT.STATIC)) static_interface = Interface.objects.get(mac_address=MAC(mac)) subnet = static_interface.ip_addresses.first().subnet linked_ngi = subnet.nodegroupinterface_set.first() self.expectThat( linked_ngi, Equals(nodegroup_interface), "Link between Interface and NodeGroupInterface was not created.") ip_address = created_device["ip_address"] self.expectThat( StaticIPAddress.objects.filter(ip=ip_address).count(), Equals(1), "StaticIPAddress was not created.") def test_create_creates_device_with_static_ip_assignment_explicit(self): self.patch(interface_module, "update_host_maps") user = factory.make_User() handler = DeviceHandler(user, {}) mac = factory.make_mac_address() hostname = factory.make_name("hostname") nodegroup = factory.make_NodeGroup(status=NODEGROUP_STATUS.ENABLED) nodegroup_interface = factory.make_NodeGroupInterface( nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) ip_address = nodegroup_interface.static_ip_range_low created_device = handler.create({ "hostname": hostname, "primary_mac": mac, "interfaces": [{ "mac": mac, "ip_assignment": DEVICE_IP_ASSIGNMENT.STATIC, "interface": nodegroup_interface.id, "ip_address": ip_address, }], }) self.expectThat( created_device["ip_assignment"], Equals(DEVICE_IP_ASSIGNMENT.STATIC)) self.expectThat(created_device["ip_address"], Equals(ip_address)) static_interface = Interface.objects.get(mac_address=MAC(mac)) subnet = static_interface.ip_addresses.first().subnet linked_ngi = subnet.nodegroupinterface_set.first() self.expectThat( linked_ngi, Equals(nodegroup_interface), "Link between Interface and NodeGroupInterface was not created.") self.expectThat( StaticIPAddress.objects.filter(ip=ip_address).count(), Equals(1), "StaticIPAddress was not created.") def test_create_creates_device_with_static_and_external_ip(self): self.patch(interface_module, "update_host_maps") user = factory.make_User() handler = DeviceHandler(user, {}) hostname = factory.make_name("hostname") nodegroup = factory.make_NodeGroup(status=NODEGROUP_STATUS.ENABLED) nodegroup_interface = factory.make_NodeGroupInterface( nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) mac_static = factory.make_mac_address() static_ip_address = nodegroup_interface.static_ip_range_low mac_external = factory.make_mac_address() external_ip_address = factory.make_ipv4_address() created_device = handler.create({ "hostname": hostname, "primary_mac": mac_static, "extra_macs": [ mac_external ], "interfaces": [ { "mac": mac_static, "ip_assignment": DEVICE_IP_ASSIGNMENT.STATIC, "interface": nodegroup_interface.id, "ip_address": static_ip_address, }, { "mac": mac_external, "ip_assignment": DEVICE_IP_ASSIGNMENT.EXTERNAL, "ip_address": external_ip_address, }, ], }) self.expectThat( created_device["primary_mac"], Equals(mac_static)) self.expectThat( created_device["extra_macs"], Equals([mac_external])) self.expectThat( created_device["ip_assignment"], Equals(DEVICE_IP_ASSIGNMENT.STATIC)) self.expectThat( created_device["ip_address"], Equals(static_ip_address)) static_interface = Interface.objects.get(mac_address=MAC(mac_static)) subnet = static_interface.ip_addresses.first().subnet linked_ngi = subnet.nodegroupinterface_set.first() self.expectThat( linked_ngi, Equals(nodegroup_interface), "Link between Interface and NodeGroupInterface was not created.") self.expectThat( StaticIPAddress.objects.filter(ip=static_ip_address).count(), Equals(1), "Static StaticIPAddress was not created.") self.expectThat( StaticIPAddress.objects.filter(ip=external_ip_address).count(), Equals(1), "External StaticIPAddress was not created.") def test_missing_action_raises_error(self): user = factory.make_User() device = self.make_device_with_ip_address(owner=user) handler = DeviceHandler(user, {}) with ExpectedException(NodeActionError): handler.action({"system_id": device.system_id}) def test_invalid_action_raises_error(self): user = factory.make_User() device = self.make_device_with_ip_address(owner=user) handler = DeviceHandler(user, {}) self.assertRaises( NodeActionError, handler.action, {"system_id": device.system_id, "action": "unknown"}) def test_not_available_action_raises_error(self): user = factory.make_User() device = self.make_device_with_ip_address(owner=user) handler = DeviceHandler(user, {}) self.assertRaises( NodeActionError, handler.action, {"system_id": device.system_id, "action": "unknown"}) def test_action_performs_action(self): user = factory.make_User() device = factory.make_Node(owner=user, installable=False) handler = DeviceHandler(user, {}) handler.action({"system_id": device.system_id, "action": "delete"}) self.assertIsNone(reload_object(device)) def test_action_performs_action_passing_extra(self): user = factory.make_User() device = self.make_device_with_ip_address(owner=user) zone = factory.make_Zone() handler = DeviceHandler(user, {}) handler.action({ "system_id": device.system_id, "action": "set-zone", "extra": { "zone_id": zone.id, }}) device = reload_object(device) self.expectThat(device.zone, Equals(zone)) maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/tests/test_event.py0000644000000000000000000002120013056115004025452 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.websockets.handlers.event`""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import datetime import random from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.websockets.base import ( HandlerDoesNotExistError, HandlerPKError, ) from maasserver.websockets.handlers.event import ( dehydrate_event_type_level, EventHandler, ) from maasserver.websockets.handlers.timestampedmodel import dehydrate_datetime from maastesting.matchers import MockCalledOnceWith from mock import sentinel from testtools.matchers import ( Equals, Is, ) class TestEventHandler(MAASServerTestCase): def dehydrate_event(self, event): data = { "id": event.id, "node_id": event.node.id, "action": event.action, "description": event.description, "type": { "level": dehydrate_event_type_level(event.type.level), "name": event.type.name, "description": event.type.description, }, "updated": dehydrate_datetime(event.updated), "created": dehydrate_datetime(event.created), } return data def dehydrate_events(self, events): return [ self.dehydrate_event(event) for event in events ] def make_event_in_the_past(self, node, days_old): event = factory.make_Event(node=node) event.created -= datetime.timedelta(days_old) event.save() return event def test_list_raises_error_if_missing_node_id(self): user = factory.make_User() handler = EventHandler(user, {}) self.assertRaises(HandlerPKError, handler.list, {}) def test_list_raises_error_if_node_doesnt_exist(self): user = factory.make_User() handler = EventHandler(user, {}) node = factory.make_Node() node.delete() self.assertRaises( HandlerDoesNotExistError, handler.list, {"node_id": node.id}) def test_list_places_node_id_in_cache(self): user = factory.make_User() cache = {} handler = EventHandler(user, cache) node = factory.make_Node() handler.list({"node_id": node.id}) self.assertEquals([node.id], cache["node_ids"]) def test_list_only_returns_events_for_node(self): user = factory.make_User() handler = EventHandler(user, {}) node = factory.make_Node() events = [ factory.make_Event(node=node) for _ in range(3) ] # Other events. for _ in range(3): factory.make_Event() self.assertItemsEqual( self.dehydrate_events(events), handler.list({"node_id": node.id})) def test_list_returns_newest_event_first(self): user = factory.make_User() handler = EventHandler(user, {}) node = factory.make_Node() events = [ factory.make_Event(node=node) for _ in range(3) ] # Other events. for _ in range(3): factory.make_Event() self.assertEquals( self.dehydrate_events(reversed(events)), handler.list({"node_id": node.id})) def test_list_default_max_days_of_30(self): user = factory.make_User() handler = EventHandler(user, {}) node = factory.make_Node() events = [ factory.make_Event(node=node) for _ in range(3) ] # Event older than 30 days. self.make_event_in_the_past(node, 31) self.assertItemsEqual( self.dehydrate_events(events), handler.list({"node_id": node.id})) def test_list_uses_max_days(self): user = factory.make_User() handler = EventHandler(user, {}) node = factory.make_Node() maxdays = random.randint(3, 50) events = [ self.make_event_in_the_past(node, maxdays - 1) for _ in range(3) ] for _ in range(3): self.make_event_in_the_past(node, maxdays + 1) self.assertItemsEqual( self.dehydrate_events(events), handler.list({"node_id": node.id, "max_days": maxdays})) def test_list_start(self): user = factory.make_User() handler = EventHandler(user, {}) node = factory.make_Node() events = list(reversed([ factory.make_Event(node=node) for _ in range(6) ])) expected_output = self.dehydrate_events(events[3:]) self.assertItemsEqual( expected_output, handler.list({"node_id": node.id, "start": events[2].id})) def test_list_limit(self): user = factory.make_User() handler = EventHandler(user, {}) node = factory.make_Node() events = list(reversed([ factory.make_Event(node=node) for _ in range(6) ])) expected_output = self.dehydrate_events(events[:3]) self.assertItemsEqual( expected_output, handler.list({"node_id": node.id, "limit": 3})) def test_list_start_and_limit(self): user = factory.make_User() handler = EventHandler(user, {}) node = factory.make_Node() events = list(reversed([ factory.make_Event(node=node) for _ in range(9) ])) expected_output = self.dehydrate_events(events[3:6]) self.assertItemsEqual( expected_output, handler.list( {"node_id": node.id, "start": events[2].id, "limit": 3})) def test_clear_raises_error_if_missing_node_id(self): user = factory.make_User() handler = EventHandler(user, {}) self.assertRaises(HandlerPKError, handler.clear, {}) def test_clear_raises_error_if_node_id_doesnt_exist(self): user = factory.make_User() handler = EventHandler(user, {}) node = factory.make_Node() node.delete() self.assertRaises( HandlerDoesNotExistError, handler.clear, {"node_id": node.id}) def test_clear_removes_node_id_from_cache(self): user = factory.make_User() handler = EventHandler(user, {}) node = factory.make_Node() handler.cache["node_ids"].append(node.id) self.expectThat(handler.clear({"node_id": node.id}), Is(None)) self.expectThat(handler.cache["node_ids"], Equals([])) def test_on_listen_calls_listen(self): user = factory.make_User() handler = EventHandler(user, {}) mock_listen = self.patch(handler, "listen") mock_listen.return_value = None pk = random.randint(1, 1000) handler.on_listen(sentinel.channel, sentinel.action, pk) self.assertThat( mock_listen, MockCalledOnceWith( sentinel.channel, sentinel.action, pk)) def test_on_listen_returns_None_if_listen_returns_None(self): user = factory.make_User() handler = EventHandler(user, {}) mock_listen = self.patch(handler, "listen") mock_listen.return_value = None self.assertIsNone( handler.on_listen( sentinel.channel, sentinel.action, random.randint(1, 1000))) def test_on_listen_delete_returns_handler_name_and_pk(self): user = factory.make_User() pk = random.randint(1, 1000) handler = EventHandler(user, {}) self.assertEquals( (handler._meta.handler_name, "delete", pk), handler.on_listen( sentinel.channel, "delete", pk)) def test_on_listen_returns_None_if_event_node_id_not_in_cache(self): user = factory.make_User() handler = EventHandler(user, {}) node = factory.make_Node() event = factory.make_Event(node=node) self.assertIsNone( handler.on_listen( sentinel.channel, "create", event.id)) def test_on_listen_returns_handler_name_action_and_event(self): user = factory.make_User() handler = EventHandler(user, {}) node = factory.make_Node() event = factory.make_Event(node=node) handler.cache["node_ids"].append(node.id) self.assertEquals( ( handler._meta.handler_name, "create", self.dehydrate_event(event) ), handler.on_listen(sentinel.channel, "create", event.id)) maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/tests/test_fabric.py0000644000000000000000000000422613056115004025570 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.websockets.handlers.fabric`""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.models.fabric import Fabric from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.websockets.handlers.fabric import FabricHandler from maasserver.websockets.handlers.timestampedmodel import dehydrate_datetime class TestFabricHandler(MAASServerTestCase): def dehydrate_fabric(self, fabric): data = { "id": fabric.id, "name": fabric.get_name(), "class_type": fabric.class_type, "updated": dehydrate_datetime(fabric.updated), "created": dehydrate_datetime(fabric.created), "vlan_ids": [ vlan.id for vlan in fabric.vlan_set.all() ], "nodes_count": len({ interface.node_id for vlan in fabric.vlan_set.all() for interface in vlan.interface_set.all() if interface.node_id is not None }), } return data def test_get(self): user = factory.make_User() handler = FabricHandler(user, {}) fabric = factory.make_Fabric() vlan = fabric.get_default_vlan() for _ in range(3): node = factory.make_Node(interface=True) interface = node.get_boot_interface() interface.vlan = vlan interface.save() self.assertEquals( self.dehydrate_fabric(fabric), handler.get({"id": fabric.id})) def test_list(self): user = factory.make_User() handler = FabricHandler(user, {}) factory.make_Fabric() expected_fabrics = [ self.dehydrate_fabric(fabric) for fabric in Fabric.objects.all() ] self.assertItemsEqual( expected_fabrics, handler.list({})) maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/tests/test_general.py0000644000000000000000000001557513056115004025770 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.websockets.handlers.general`""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from distro_info import UbuntuDistroInfo from maasserver.enum import ( BOND_LACP_RATE_CHOICES, BOND_MODE_CHOICES, BOND_XMIT_HASH_POLICY_CHOICES, BOOT_RESOURCE_TYPE, NODE_PERMISSION, ) from maasserver.models import BootSourceCache from maasserver.models.config import Config from maasserver.node_action import ACTIONS_DICT from maasserver.testing.factory import factory from maasserver.testing.osystems import make_osystem_with_releases from maasserver.testing.testcase import MAASServerTestCase from maasserver.utils.orm import post_commit_hooks from maasserver.websockets.handlers import general from maasserver.websockets.handlers.general import GeneralHandler from mock import sentinel class TestGeneralHandler(MAASServerTestCase): def dehydrate_actions(self, actions): return [ { "name": name, "title": action.display, "sentence": action.display_sentence, } for name, action in actions.items() ] def test_architectures(self): arches = [ "%s/%s" % (factory.make_name("arch"), factory.make_name("subarch")) for _ in range(3) ] for arch in arches: factory.make_usable_boot_resource(architecture=arch) handler = GeneralHandler(factory.make_User(), {}) self.assertEquals(sorted(arches), handler.architectures({})) def test_hwe_kernels(self): ubuntu_releases = UbuntuDistroInfo() expected_output = [] # Stub out the post commit tasks otherwise the test fails due to # unrun post-commit tasks at the end of the test. self.patch(BootSourceCache, "post_commit_do") # Start with the first release MAAS supported. We do this # because the lookup between hwe- kernel and release can fail # when multiple releases start with the same letter. For # example both warty(4.10) and wily(15.10) will have an hwe-w # kernel. Because of this the mapping between kernel and # release will pick the release which was downloaded # first. Since precise no release has used the same first # letter so we do not have this problem with supported # releases. for release in ubuntu_releases.all[ ubuntu_releases.all.index('precise'):]: release = release.decode("utf-8") kernel = 'hwe-' + release[0] arch = factory.make_name('arch') architecture = "%s/%s" % (arch, kernel) factory.make_usable_boot_resource( name="ubuntu/" + release, extra={'subarches': kernel}, architecture=architecture, rtype=BOOT_RESOURCE_TYPE.SYNCED) # Force run the post commit tasks as we make new boot sources with post_commit_hooks: factory.make_BootSourceCache( os="ubuntu", arch=arch, subarch=kernel, release=release) expected_output.append((kernel, '%s (%s)' % (release, kernel))) handler = GeneralHandler(factory.make_User(), {}) self.assertItemsEqual( sorted(expected_output, key=lambda choice: choice[0]), sorted(handler.hwe_kernels({}), key=lambda choice: choice[0])) def test_osinfo(self): handler = GeneralHandler(factory.make_User(), {}) osystem = make_osystem_with_releases(self) releases = [ ("%s/%s" % (osystem["name"], release["name"]), release["title"]) for release in osystem["releases"] ] expected_osinfo = { "osystems": [ (osystem["name"], osystem["title"]), ], "releases": releases, "kernels": None, "default_osystem": Config.objects.get_config("default_osystem"), "default_release": Config.objects.get_config( "default_distro_series"), } self.assertItemsEqual(expected_osinfo, handler.osinfo({})) def test_node_actions_for_admin(self): handler = GeneralHandler(factory.make_admin(), {}) actions_expected = self.dehydrate_actions(ACTIONS_DICT) self.assertItemsEqual(actions_expected, handler.node_actions({})) def test_node_actions_for_non_admin(self): handler = GeneralHandler(factory.make_User(), {}) actions_expected = dict() for name, action in ACTIONS_DICT.items(): permission = action.permission if action.installable_permission is not None: permission = action.installable_permission if permission != NODE_PERMISSION.ADMIN: actions_expected[name] = action actions_expected = self.dehydrate_actions(actions_expected) self.assertItemsEqual(actions_expected, handler.node_actions({})) def test_device_actions_for_non_admin(self): handler = GeneralHandler(factory.make_User(), {}) actions_expected = self.dehydrate_actions({ name: action for name, action in ACTIONS_DICT.items() if not action.installable_only }) self.assertItemsEqual(actions_expected, handler.device_actions({})) def test_random_hostname_checks_hostname_existence(self): existing_node = factory.make_Node(hostname="hostname") hostnames = [existing_node.hostname, "new-hostname"] self.patch( general, "gen_candidate_names", lambda: iter(hostnames)) handler = GeneralHandler(factory.make_User(), {}) self.assertEqual("new-hostname", handler.random_hostname({})) def test_random_hostname_returns_empty_string_if_all_used(self): existing_node = factory.make_Node(hostname='hostname') hostnames = [existing_node.hostname] self.patch( general, "gen_candidate_names", lambda: iter(hostnames)) handler = GeneralHandler(factory.make_User(), {}) self.assertEqual("", handler.random_hostname({})) def test_bond_options(self): handler = GeneralHandler(factory.make_User(), {}) self.assertEquals({ "modes": BOND_MODE_CHOICES, "lacp_rates": BOND_LACP_RATE_CHOICES, "xmit_hash_policies": BOND_XMIT_HASH_POLICY_CHOICES, }, handler.bond_options({})) def test_version(self): handler = GeneralHandler(factory.make_User(), {}) self.patch_autospec( general, "get_maas_version_ui").return_value = sentinel.version self.assertEquals(sentinel.version, handler.version({})) maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/tests/test_node.py0000644000000000000000000026761513056115004025304 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.websockets.handlers.node`""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import logging from operator import itemgetter import random import re from django.contrib.auth.models import User from django.core.exceptions import ValidationError from lxml import etree from maasserver.enum import ( BOND_MODE, CACHE_MODE_TYPE, FILESYSTEM_FORMAT_TYPE_CHOICES, FILESYSTEM_FORMAT_TYPE_CHOICES_DICT, FILESYSTEM_GROUP_TYPE, FILESYSTEM_TYPE, INTERFACE_LINK_TYPE, INTERFACE_TYPE, IPADDRESS_TYPE, NODE_STATUS, ) from maasserver.exceptions import NodeActionError from maasserver.forms import AdminNodeWithMACAddressesForm from maasserver.models import interface as interface_module from maasserver.models.blockdevice import BlockDevice from maasserver.models.cacheset import CacheSet from maasserver.models.config import Config from maasserver.models.filesystemgroup import ( Bcache, RAID, VolumeGroup, ) from maasserver.models.interface import Interface from maasserver.models.node import Node from maasserver.models.nodeprobeddetails import get_single_probed_details from maasserver.models.partition import ( Partition, PARTITION_ALIGNMENT_SIZE, ) from maasserver.node_action import compile_node_actions from maasserver.rpc.testing.fixtures import MockLiveRegionToClusterRPCFixture from maasserver.testing.architecture import make_usable_architecture from maasserver.testing.eventloop import ( RegionEventLoopFixture, RunningEventLoopFixture, ) from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.osystems import make_usable_osystem from maasserver.testing.testcase import ( MAASServerTestCase, MAASTransactionServerTestCase, ) from maasserver.third_party_drivers import get_third_party_driver from maasserver.utils.converters import ( human_readable_bytes, round_size_to_nearest_block, XMLToYAML, ) from maasserver.utils.orm import ( get_one, transactional, ) from maasserver.utils.osystems import make_hwe_kernel_ui_text from maasserver.utils.threads import deferToDatabase from maasserver.websockets.base import ( HandlerDoesNotExistError, HandlerError, HandlerPermissionError, HandlerValidationError, ) from maasserver.websockets.handlers import node as node_module from maasserver.websockets.handlers.event import dehydrate_event_type_level from maasserver.websockets.handlers.node import ( Node as node_model, NodeHandler, ) from maasserver.websockets.handlers.timestampedmodel import dehydrate_datetime from maastesting.djangotestcase import count_queries from maastesting.matchers import MockCalledOnceWith from maastesting.twisted import ( always_fail_with, always_succeed_with, ) from metadataserver.enum import RESULT_TYPE from metadataserver.models.commissioningscript import ( LIST_MODALIASES_OUTPUT_NAME, LLDP_OUTPUT_NAME, ) from mock import ( ANY, sentinel, ) from netaddr import IPAddress from provisioningserver.power.poweraction import PowerActionFail from provisioningserver.rpc.cluster import PowerQuery from provisioningserver.rpc.exceptions import NoConnectionsAvailable from provisioningserver.tags import merge_details_cleanly from provisioningserver.utils.twisted import asynchronous from testtools import ExpectedException from testtools.matchers import Equals from twisted.internet.defer import CancelledError class TestNodeHandler(MAASServerTestCase): def dehydrate_node( self, node, handler, for_list=False, include_summary=False): boot_interface = node.get_boot_interface() pxe_mac_vendor = node.get_pxe_mac_vendor() blockdevices = [ blockdevice.actual_instance for blockdevice in node.blockdevice_set.all() ] driver = get_third_party_driver(node) disks = [ handler.dehydrate_blockdevice(blockdevice, node) for blockdevice in blockdevices ] disks = disks + [ handler.dehydrate_volume_group(volume_group) for volume_group in VolumeGroup.objects.filter_by_node(node) ] + [ handler.dehydrate_cache_set(cache_set) for cache_set in CacheSet.objects.get_cache_sets_for_node(node) ] disks = sorted(disks, key=itemgetter("name")) subnets = handler.get_all_subnets(node) data = { "actions": compile_node_actions(node, handler.user).keys(), "architecture": node.architecture, "boot_type": node.boot_type, "boot_disk": node.boot_disk, "bios_boot_method": node.bios_boot_method, "commissioning_results": handler.dehydrate_node_results( node, RESULT_TYPE.COMMISSIONING), "cpu_count": node.cpu_count, "created": dehydrate_datetime(node.created), "devices": sorted([ { "fqdn": device.fqdn, "interfaces": [ handler.dehydrate_interface(interface, device) for interface in device.interface_set.all().order_by( 'id') ], } for device in node.children.all().order_by('id') ], key=itemgetter('fqdn')), "disable_ipv4": node.disable_ipv4, "physical_disk_count": node.physicalblockdevice_set.count(), "disks": disks, "storage_layout_issues": node.storage_layout_issues(), "supported_filesystems": [ {'key': key, 'ui': ui} for key, ui in FILESYSTEM_FORMAT_TYPE_CHOICES], "distro_series": node.get_distro_series(), "error": node.error, "error_description": node.error_description, "events": handler.dehydrate_events(node), "extra_macs": [ "%s" % mac_address for mac_address in node.get_extra_macs() ], "fqdn": node.fqdn, "hwe_kernel": make_hwe_kernel_ui_text(node.hwe_kernel), "hostname": node.hostname, "id": node.id, "installation_results": handler.dehydrate_node_results( node, RESULT_TYPE.INSTALLATION), "interfaces": [ handler.dehydrate_interface(interface, node) for interface in node.interface_set.all().order_by('name') ], "on_network": node.on_network(), "license_key": node.license_key, "memory": node.display_memory(), "min_hwe_kernel": node.min_hwe_kernel, "nodegroup": handler.dehydrate_nodegroup(node.nodegroup), "osystem": node.get_osystem(), "owner": handler.dehydrate_owner(node.owner), "power_parameters": handler.dehydrate_power_parameters( node.power_parameters), "power_state": node.power_state, "power_type": node.power_type, "pxe_mac": ( "" if boot_interface is None else "%s" % boot_interface.mac_address), "pxe_mac_vendor": "" if pxe_mac_vendor is None else pxe_mac_vendor, "routers": handler.dehydrate_routers(node.routers), "show_os_info": handler.dehydrate_show_os_info(node), "status": node.display_status(), "storage": "%3.1f" % (sum([ blockdevice.size for blockdevice in node.physicalblockdevice_set.all() ]) / (1000 ** 3)), "storage_tags": handler.get_all_storage_tags(blockdevices), "subnets": [subnet.cidr for subnet in subnets], "fabrics": handler.get_all_fabric_names(node, subnets), "spaces": handler.get_all_space_names(subnets), "swap_size": node.swap_size, "system_id": node.system_id, "tags": [ tag.name for tag in node.tags.all() ], "third_party_driver": { "module": driver["module"] if "module" in driver else "", "comment": driver["comment"] if "comment" in driver else "", }, "updated": dehydrate_datetime(node.updated), "zone": handler.dehydrate_zone(node.zone), } if for_list: allowed_fields = NodeHandler.Meta.list_fields + [ "actions", "fqdn", "status", "pxe_mac", "pxe_mac_vendor", "extra_macs", "tags", "subnets", "fabrics", "spaces", "physical_disk_count", "storage", "storage_tags", ] for key in data.keys(): if key not in allowed_fields: del data[key] if include_summary: data = handler.dehydrate_summary_output(node, data) return data def make_nodes(self, nodegroup, number): """Create `number` of new nodes.""" for counter in range(number): node = factory.make_Node( nodegroup=nodegroup, interface=True, status=NODE_STATUS.READY) factory.make_PhysicalBlockDevice(node) # Make some devices. for _ in range(3): factory.make_Node( installable=False, parent=node, interface=True) def test_dehydrate_owner_empty_when_None(self): owner = factory.make_User() handler = NodeHandler(owner, {}) self.assertEquals("", handler.dehydrate_owner(None)) def test_dehydrate_owner_username(self): owner = factory.make_User() handler = NodeHandler(owner, {}) self.assertEquals(owner.username, handler.dehydrate_owner(owner)) def test_dehydrate_zone(self): owner = factory.make_User() handler = NodeHandler(owner, {}) zone = factory.make_Zone() self.assertEquals({ "id": zone.id, "name": zone.name, }, handler.dehydrate_zone(zone)) def test_dehydrate_nodegroup_returns_None_when_None(self): owner = factory.make_User() handler = NodeHandler(owner, {}) self.assertIsNone(handler.dehydrate_nodegroup(None)) def test_dehydrate_nodegroup(self): owner = factory.make_User() node = factory.make_Node(owner=owner) handler = NodeHandler(owner, {}) self.assertEquals({ "id": node.nodegroup.id, "uuid": node.nodegroup.uuid, "name": node.nodegroup.name, "cluster_name": node.nodegroup.cluster_name, }, handler.dehydrate_nodegroup(node.nodegroup)) def test_dehydrate_routers_returns_empty_list_when_None(self): owner = factory.make_User() handler = NodeHandler(owner, {}) self.assertEquals([], handler.dehydrate_routers(None)) def test_dehydrate_routers_returns_list_of_strings(self): owner = factory.make_User() handler = NodeHandler(owner, {}) routers = [ IPAddress(factory.make_ipv4_address()) for _ in range(3) ] expected = [ "%s" % router for router in routers ] self.assertEquals(expected, handler.dehydrate_routers(routers)) def test_dehydrate_power_parameters_returns_None_when_empty(self): owner = factory.make_User() handler = NodeHandler(owner, {}) self.assertIsNone(handler.dehydrate_power_parameters('')) def test_dehydrate_power_parameters_returns_params(self): owner = factory.make_User() handler = NodeHandler(owner, {}) params = { factory.make_name("key"): factory.make_name("value") for _ in range(3) } self.assertEquals(params, handler.dehydrate_power_parameters(params)) def test_dehydrate_show_os_info_returns_true(self): owner = factory.make_User() node = factory.make_Node(owner=owner, status=NODE_STATUS.DEPLOYED) handler = NodeHandler(owner, {}) self.assertTrue(handler.dehydrate_show_os_info(node)) def test_dehydrate_show_os_info_returns_false(self): owner = factory.make_User() node = factory.make_Node(owner=owner, status=NODE_STATUS.READY) handler = NodeHandler(owner, {}) self.assertFalse(handler.dehydrate_show_os_info(node)) def test_dehydrate_device(self): owner = factory.make_User() node = factory.make_Node(owner=owner) handler = NodeHandler(owner, {}) device = factory.make_Node(installable=False, parent=node) interface = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=device) self.assertEquals({ "fqdn": device.fqdn, "interfaces": [handler.dehydrate_interface(interface, device)], }, handler.dehydrate_device(device)) def test_dehydrate_block_device_with_PhysicalBlockDevice_with_ptable(self): owner = factory.make_User() node = factory.make_Node(owner=owner) handler = NodeHandler(owner, {}) blockdevice = factory.make_PhysicalBlockDevice(node=node) partition_table = factory.make_PartitionTable(block_device=blockdevice) is_boot = blockdevice.id == node.get_boot_disk().id self.assertEquals({ "id": blockdevice.id, "is_boot": is_boot, "name": blockdevice.get_name(), "tags": blockdevice.tags, "type": blockdevice.type, "path": blockdevice.path, "size": blockdevice.size, "size_human": human_readable_bytes(blockdevice.size), "used_size": blockdevice.used_size, "used_size_human": human_readable_bytes(blockdevice.used_size), "available_size": blockdevice.available_size, "available_size_human": human_readable_bytes( blockdevice.available_size), "block_size": blockdevice.block_size, "model": blockdevice.model, "serial": blockdevice.serial, "partition_table_type": partition_table.table_type, "used_for": blockdevice.used_for, "filesystem": handler.dehydrate_filesystem( blockdevice.get_effective_filesystem()), "partitions": handler.dehydrate_partitions( blockdevice.get_partitiontable()), }, handler.dehydrate_blockdevice(blockdevice, node)) def test_dehydrate_block_device_with_PhysicalBlockDevice_wo_ptable(self): owner = factory.make_User() node = factory.make_Node(owner=owner) handler = NodeHandler(owner, {}) blockdevice = factory.make_PhysicalBlockDevice(node=node) is_boot = blockdevice.id == node.get_boot_disk().id self.assertEquals({ "id": blockdevice.id, "is_boot": is_boot, "name": blockdevice.get_name(), "tags": blockdevice.tags, "type": blockdevice.type, "path": blockdevice.path, "size": blockdevice.size, "size_human": human_readable_bytes(blockdevice.size), "used_size": blockdevice.used_size, "used_size_human": human_readable_bytes(blockdevice.used_size), "available_size": blockdevice.available_size, "available_size_human": human_readable_bytes( blockdevice.available_size), "block_size": blockdevice.block_size, "model": blockdevice.model, "serial": blockdevice.serial, "partition_table_type": "", "used_for": blockdevice.used_for, "filesystem": handler.dehydrate_filesystem( blockdevice.get_effective_filesystem()), "partitions": handler.dehydrate_partitions( blockdevice.get_partitiontable()), }, handler.dehydrate_blockdevice(blockdevice, node)) def test_dehydrate_block_device_with_VirtualBlockDevice(self): owner = factory.make_User() node = factory.make_Node(owner=owner) handler = NodeHandler(owner, {}) blockdevice = factory.make_VirtualBlockDevice(node=node) self.assertEquals({ "id": blockdevice.id, "is_boot": False, "name": blockdevice.get_name(), "tags": blockdevice.tags, "type": blockdevice.type, "path": blockdevice.path, "size": blockdevice.size, "size_human": human_readable_bytes(blockdevice.size), "used_size": blockdevice.used_size, "used_size_human": human_readable_bytes(blockdevice.used_size), "available_size": blockdevice.available_size, "available_size_human": human_readable_bytes( blockdevice.available_size), "block_size": blockdevice.block_size, "model": "", "serial": "", "partition_table_type": "", "used_for": blockdevice.used_for, "filesystem": handler.dehydrate_filesystem( blockdevice.get_effective_filesystem()), "partitions": handler.dehydrate_partitions( blockdevice.get_partitiontable()), "parent": { "id": blockdevice.filesystem_group.id, "type": blockdevice.filesystem_group.group_type, "uuid": blockdevice.filesystem_group.uuid, }, }, handler.dehydrate_blockdevice(blockdevice, node)) def test_dehydrate_volume_group(self): owner = factory.make_User() node = factory.make_Node(owner=owner) handler = NodeHandler(owner, {}) volume_group = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.LVM_VG, node=node) self.assertEquals({ "id": volume_group.id, "name": volume_group.name, "tags": [], "type": volume_group.group_type, "path": "", "size": volume_group.get_size(), "size_human": human_readable_bytes(volume_group.get_size()), "used_size": volume_group.get_lvm_allocated_size(), "used_size_human": human_readable_bytes( volume_group.get_lvm_allocated_size()), "available_size": volume_group.get_lvm_free_space(), "available_size_human": human_readable_bytes( volume_group.get_lvm_free_space()), "block_size": volume_group.get_virtual_block_device_block_size(), "model": "", "serial": "", "partition_table_type": "", "used_for": "volume group", "filesystem": None, "partitions": None, }, handler.dehydrate_volume_group(volume_group)) def test_dehydrate_cache_set(self): owner = factory.make_User() node = factory.make_Node(owner=owner) handler = NodeHandler(owner, {}) cache_set = factory.make_CacheSet(node=node) backings = [] for _ in range(3): backing = factory.make_PhysicalBlockDevice(node=node) fs = factory.make_Filesystem( block_device=backing, fstype=FILESYSTEM_TYPE.BCACHE_BACKING) backings.append( factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.BCACHE, filesystems=[fs], cache_set=cache_set)) self.assertEquals({ "id": cache_set.id, "name": cache_set.name, "tags": [], "type": "cache-set", "path": "", "size": cache_set.get_device().size, "size_human": human_readable_bytes( cache_set.get_device().size), "used_size": cache_set.get_device().get_used_size(), "used_size_human": human_readable_bytes( cache_set.get_device().get_used_size()), "available_size": cache_set.get_device().get_available_size(), "available_size_human": human_readable_bytes( cache_set.get_device().get_available_size()), "block_size": cache_set.get_device().get_block_size(), "model": "", "serial": "", "partition_table_type": "", "used_for": ", ".join(sorted([ backing_device.name for backing_device in backings ])), "filesystem": None, "partitions": None, }, handler.dehydrate_cache_set(cache_set)) def test_dehydrate_partitions_returns_None(self): owner = factory.make_User() handler = NodeHandler(owner, {}) self.assertIsNone(handler.dehydrate_partitions(None)) def test_dehydrate_partitions_returns_list_of_partitions(self): owner = factory.make_User() node = factory.make_Node(owner=owner) handler = NodeHandler(owner, {}) blockdevice = factory.make_PhysicalBlockDevice( node=node, size=10 * 1024 ** 3, block_size=512) partition_table = factory.make_PartitionTable(block_device=blockdevice) partitions = [ factory.make_Partition( partition_table=partition_table, size=1 * 1024 ** 3) for _ in range(3) ] expected = [] for partition in partitions: expected.append({ "filesystem": handler.dehydrate_filesystem( partition.get_effective_filesystem()), "name": partition.get_name(), "path": partition.path, "type": partition.type, "id": partition.id, "size": partition.size, "size_human": human_readable_bytes(partition.size), "used_for": partition.used_for, }) self.assertEquals( expected, handler.dehydrate_partitions(partition_table)) def test_dehydrate_filesystem_returns_None(self): owner = factory.make_User() handler = NodeHandler(owner, {}) self.assertIsNone(handler.dehydrate_filesystem(None)) def test_dehydrate_filesystem(self): owner = factory.make_User() handler = NodeHandler(owner, {}) filesystem = factory.make_Filesystem() self.assertEquals({ "label": filesystem.label, "mount_point": filesystem.mount_point, "fstype": filesystem.fstype, "is_format_fstype": ( filesystem.fstype in FILESYSTEM_FORMAT_TYPE_CHOICES_DICT), }, handler.dehydrate_filesystem(filesystem)) def test_dehydrate_interface_for_ready_node(self): owner = factory.make_User() node = factory.make_Node(owner=owner, status=NODE_STATUS.READY) handler = NodeHandler(owner, {}) interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, node=node) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.AUTO, ip="", subnet=factory.make_Subnet(), interface=interface) expected_links = interface.get_links() for link in expected_links: link["subnet_id"] = link.pop("subnet").id self.assertEquals({ "id": interface.id, "type": interface.type, "name": interface.get_name(), "enabled": interface.is_enabled(), "is_boot": interface == node.boot_interface, "mac_address": "%s" % interface.mac_address, "vlan_id": interface.vlan_id, "parents": [ nic.id for nic in interface.parents.all() ], "children": [ nic.child.id for nic in interface.children_relationships.all() ], "links": expected_links, }, handler.dehydrate_interface(interface, node)) def test_dehydrate_interface_for_commissioning_node(self): owner = factory.make_User() node = factory.make_Node(owner=owner, status=NODE_STATUS.COMMISSIONING) handler = NodeHandler(owner, {}) interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, node=node) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.AUTO, ip="", subnet=factory.make_Subnet(), interface=interface) expected_links = interface.get_links() for link in expected_links: link["subnet_id"] = link.pop("subnet").id discovered_subnet = factory.make_Subnet() factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.DISCOVERED, ip=factory.pick_ip_in_network(discovered_subnet.get_ipnetwork()), subnet=discovered_subnet, interface=interface) expected_discovered = interface.get_discovered() for discovered in expected_discovered: discovered["subnet_id"] = discovered.pop("subnet").id self.assertEquals({ "id": interface.id, "type": interface.type, "name": interface.get_name(), "enabled": interface.is_enabled(), "is_boot": interface == node.boot_interface, "mac_address": "%s" % interface.mac_address, "vlan_id": interface.vlan_id, "parents": [ nic.id for nic in interface.parents.all() ], "children": [ nic.child.id for nic in interface.children_relationships.all() ], "links": expected_links, "discovered": expected_discovered, }, handler.dehydrate_interface(interface, node)) def test_dehydrate_summary_output_returns_None(self): owner = factory.make_User() node = factory.make_Node(owner=owner) handler = NodeHandler(owner, {}) observed = handler.dehydrate_summary_output(node, {}) self.assertEquals({ "summary_xml": None, "summary_yaml": None, }, observed) def test_dehydrate_summary_output_returns_data(self): owner = factory.make_User() node = factory.make_Node(owner=owner) handler = NodeHandler(owner, {}) lldp_data = "bar".encode("utf-8") factory.make_NodeResult_for_commissioning( node=node, name=LLDP_OUTPUT_NAME, script_result=0, data=lldp_data) observed = handler.dehydrate_summary_output(node, {}) probed_details = merge_details_cleanly( get_single_probed_details(node.system_id)) self.assertEquals({ "summary_xml": etree.tostring( probed_details, encoding=unicode, pretty_print=True), "summary_yaml": XMLToYAML( etree.tostring( probed_details, encoding=unicode, pretty_print=True)).convert(), }, observed) def test_dehydrate_node_results(self): owner = factory.make_User() node = factory.make_Node(owner=owner) handler = NodeHandler(owner, {}) lldp_data = "bar".encode("utf-8") result = factory.make_NodeResult_for_commissioning( node=node, name=LLDP_OUTPUT_NAME, script_result=0, data=lldp_data) self.assertEquals([{ "id": result.id, "result": result.script_result, "name": result.name, "data": result.data, "line_count": 1, "created": dehydrate_datetime(result.created), }], handler.dehydrate_node_results(node, RESULT_TYPE.COMMISSIONING)) def test_dehydrate_events_only_includes_lastest_50(self): owner = factory.make_User() node = factory.make_Node(owner=owner) handler = NodeHandler(owner, {}) event_type = factory.make_EventType(level=logging.INFO) events = [ factory.make_Event(node=node, type=event_type) for _ in range(100) ] expected = [ { "id": event.id, "type": { "id": event_type.id, "name": event_type.name, "description": event_type.description, "level": dehydrate_event_type_level(event_type.level), }, "description": event.description, "created": dehydrate_datetime(event.created), } for event in list(reversed(events))[:50] ] self.assertEquals(expected, handler.dehydrate_events(node)) def test_dehydrate_events_doesnt_include_debug(self): owner = factory.make_User() node = factory.make_Node(owner=owner) handler = NodeHandler(owner, {}) event_type = factory.make_EventType(level=logging.DEBUG) for _ in range(5): factory.make_Event(node=node, type=event_type) self.assertEquals([], handler.dehydrate_events(node)) def make_node_with_subnets(self): user = factory.make_User() handler = NodeHandler(user, {}) space1 = factory.make_Space() fabric1 = factory.make_Fabric(name=factory.make_name("fabric")) vlan1 = factory.make_VLAN(fabric=fabric1) subnet1 = factory.make_Subnet(space=space1, vlan=vlan1) node = factory.make_Node_with_Interface_on_Subnet( subnet=subnet1, vlan=vlan1) node.save() # Bond interface with a VLAN on top. With the bond set to STATIC # and the VLAN set to AUTO. fabric2 = factory.make_Fabric(name=factory.make_name("fabric")) vlan2 = factory.make_VLAN(fabric=fabric2) space2 = factory.make_Space() bond_subnet = factory.make_Subnet(space=space1, vlan=vlan1) vlan_subnet = factory.make_Subnet(space=space2, vlan=vlan2) nic1 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=node, vlan=vlan1) nic2 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=node, vlan=vlan2) bond = factory.make_Interface( INTERFACE_TYPE.BOND, parents=[nic1, nic2], vlan=vlan1) vlan_int = factory.make_Interface( INTERFACE_TYPE.VLAN, vlan=vlan2, parents=[bond]) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip=factory.pick_ip_in_network(bond_subnet.get_ipnetwork()), subnet=bond_subnet, interface=bond) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip="", subnet=vlan_subnet, interface=vlan_int) # LINK_UP interface with no subnet. fabric3 = factory.make_Fabric(name=factory.make_name("fabric")) vlan3 = factory.make_VLAN(fabric=fabric3) nic3 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, vlan=vlan3, node=node) nic3_ip = factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip="", subnet=None, interface=nic3) nic3_ip.subnet = None nic3_ip.save() self.patch_autospec(interface_module, "update_host_maps") boot_interface = node.get_boot_interface() boot_interface.claim_static_ips() node.boot_interface = boot_interface node.save() subnets = [subnet1, bond_subnet, vlan_subnet] fabrics = [fabric1, fabric2, fabric3] spaces = [space1, space2] return (handler, node, subnets, fabrics, spaces) def test_get_all_subnets(self): (handler, node, subnets, _, _) = self.make_node_with_subnets() self.assertItemsEqual(subnets, handler.get_all_subnets(node)) def test_get_all_fabric_names(self): (handler, node, _, fabrics, _) = self.make_node_with_subnets() fabric_names = [fabric.name for fabric in fabrics] node_subnets = handler.get_all_subnets(node) self.assertItemsEqual( fabric_names, handler.get_all_fabric_names(node, node_subnets)) def test_get_all_space_names(self): (handler, node, _, _, spaces) = self.make_node_with_subnets() space_names = [space.name for space in spaces] node_subnets = handler.get_all_subnets(node) self.assertItemsEqual( space_names, handler.get_all_space_names(node_subnets)) def test_get(self): user = factory.make_User() handler = NodeHandler(user, {}) node = factory.make_Node_with_Interface_on_Subnet() factory.make_FilesystemGroup(node=node) node.owner = user node.save() for _ in range(100): factory.make_Event(node=node) lldp_data = "bar".encode("utf-8") factory.make_NodeResult_for_commissioning( node=node, name=LLDP_OUTPUT_NAME, script_result=0, data=lldp_data) factory.make_PhysicalBlockDevice(node) Config.objects.set_config( name='enable_third_party_drivers', value=True) data = "pci:v00001590d00000047sv00001590sd00000047bc*sc*i*" factory.make_NodeResult_for_commissioning( node=node, name=LIST_MODALIASES_OUTPUT_NAME, script_result=0, data=data.encode("utf-8")) # Bond interface with a VLAN on top. With the bond set to STATIC # and the VLAN set to AUTO. bond_subnet = factory.make_Subnet() vlan_subnet = factory.make_Subnet() nic1 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, node=node) nic2 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, node=node) bond = factory.make_Interface( INTERFACE_TYPE.BOND, parents=[nic1, nic2]) vlan = factory.make_Interface(INTERFACE_TYPE.VLAN, parents=[bond]) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip=factory.pick_ip_in_network(bond_subnet.get_ipnetwork()), subnet=bond_subnet, interface=bond) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip="", subnet=vlan_subnet, interface=vlan) # LINK_UP interface with no subnet. nic3 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, node=node) nic3_ip = factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip="", subnet=None, interface=nic3) nic3_ip.subnet = None nic3_ip.save() # Make some devices. for _ in range(3): factory.make_Node( installable=False, parent=node, interface=True) self.patch_autospec(interface_module, "update_host_maps") boot_interface = node.get_boot_interface() boot_interface.claim_static_ips() node.boot_interface = boot_interface node.save() self.assertEquals( self.dehydrate_node(node, handler, include_summary=True), handler.get({"system_id": node.system_id})) def test_list(self): user = factory.make_User() handler = NodeHandler(user, {}) node = factory.make_Node(status=NODE_STATUS.ALLOCATED, owner=user) factory.make_PhysicalBlockDevice(node) self.assertItemsEqual( [self.dehydrate_node(node, handler, for_list=True)], handler.list({})) def test_list_ignores_devices(self): owner = factory.make_User() handler = NodeHandler(owner, {}) # Create a device. factory.make_Node(owner=owner, installable=False) node = factory.make_Node(owner=owner) self.assertItemsEqual( [self.dehydrate_node(node, handler, for_list=True)], handler.list({})) def test_list_num_queries_is_independent_of_num_nodes(self): user = factory.make_User() user_ssh_prefetch = User.objects.filter( id=user.id).prefetch_related('sshkey_set').first() handler = NodeHandler(user_ssh_prefetch, {}) nodegroup = factory.make_NodeGroup() self.make_nodes(nodegroup, 10) query_10_count, _ = count_queries(handler.list, {}) self.make_nodes(nodegroup, 10) query_20_count, _ = count_queries(handler.list, {}) # This check is to notify the developer that a change was made that # affects the number of queries performed when doing a node listing. # It is important to keep this number as low as possible. A larger # number means regiond has to do more work slowing down its process # and slowing down the client waiting for the response. self.assertEquals( query_10_count, 11, "Number of queries has changed; make sure this is expected.") self.assertEquals( query_10_count, query_20_count, "Number of queries is not independent to the number of nodes.") def test_list_returns_nodes_only_viewable_by_user(self): user = factory.make_User() other_user = factory.make_User() node = factory.make_Node(status=NODE_STATUS.READY) ownered_node = factory.make_Node( owner=user, status=NODE_STATUS.ALLOCATED) factory.make_Node( owner=other_user, status=NODE_STATUS.ALLOCATED) handler = NodeHandler(user, {}) self.assertItemsEqual([ self.dehydrate_node(node, handler, for_list=True), self.dehydrate_node(ownered_node, handler, for_list=True), ], handler.list({})) def test_get_object_returns_node_if_super_user(self): user = factory.make_admin() node = factory.make_Node() handler = NodeHandler(user, {}) self.assertEquals( node, handler.get_object({"system_id": node.system_id})) def test_get_object_returns_node_if_owner(self): user = factory.make_User() node = factory.make_Node(owner=user) handler = NodeHandler(user, {}) self.assertEquals( node, handler.get_object({"system_id": node.system_id})) def test_get_object_returns_node_if_owner_empty(self): user = factory.make_User() node = factory.make_Node() handler = NodeHandler(user, {}) self.assertEquals( node, handler.get_object({"system_id": node.system_id})) def test_get_object_raises_error_if_owner_by_another_user(self): user = factory.make_User() node = factory.make_Node(owner=factory.make_User()) handler = NodeHandler(user, {}) self.assertRaises( HandlerDoesNotExistError, handler.get_object, {"system_id": node.system_id}) def test_get_form_class_for_create(self): user = factory.make_admin() handler = NodeHandler(user, {}) self.assertEquals( AdminNodeWithMACAddressesForm, handler.get_form_class("create")) def test_get_form_class_for_update(self): user = factory.make_admin() handler = NodeHandler(user, {}) self.assertEquals( AdminNodeWithMACAddressesForm, handler.get_form_class("update")) def test_get_form_class_raises_error_for_unknown_action(self): user = factory.make_User() handler = NodeHandler(user, {}) self.assertRaises( HandlerError, handler.get_form_class, factory.make_name()) def test_create_raise_permissions_error_for_non_admin(self): user = factory.make_User() handler = NodeHandler(user, {}) self.assertRaises( HandlerPermissionError, handler.create, {}) def test_create_raises_validation_error_for_missing_pxe_mac(self): user = factory.make_admin() handler = NodeHandler(user, {}) nodegroup = factory.make_NodeGroup() zone = factory.make_Zone() params = { "architecture": make_usable_architecture(self), "zone": { "name": zone.name, }, "nodegroup": { "uuid": nodegroup.uuid, }, } with ExpectedException( HandlerValidationError, re.escape("{u'mac_addresses': [u'This field is required.']}")): handler.create(params) def test_create_raises_validation_error_for_missing_architecture(self): user = factory.make_admin() handler = NodeHandler(user, {}) nodegroup = factory.make_NodeGroup() zone = factory.make_Zone() params = { "pxe_mac": factory.make_mac_address(), "zone": { "name": zone.name, }, "nodegroup": { "uuid": nodegroup.uuid, }, } with ExpectedException( HandlerValidationError, re.escape( "{u'architecture': [u'Architecture must be " "defined for installable nodes.']}")): handler.create(params) def test_create_creates_node(self): user = factory.make_admin() handler = NodeHandler(user, {}) nodegroup = factory.make_NodeGroup() zone = factory.make_Zone() mac = factory.make_mac_address() hostname = factory.make_name("hostname") architecture = make_usable_architecture(self) self.patch(node_model, "start_commissioning") created_node = handler.create({ "hostname": hostname, "pxe_mac": mac, "architecture": architecture, "zone": { "name": zone.name, }, "nodegroup": { "uuid": nodegroup.uuid, }, "power_type": "ether_wake", "power_parameters": { "mac_address": mac, }, }) self.expectThat(created_node["hostname"], Equals(hostname)) self.expectThat(created_node["pxe_mac"], Equals(mac)) self.expectThat(created_node["extra_macs"], Equals([])) self.expectThat(created_node["architecture"], Equals(architecture)) self.expectThat(created_node["zone"]["id"], Equals(zone.id)) self.expectThat(created_node["nodegroup"]["id"], Equals(nodegroup.id)) self.expectThat(created_node["power_type"], Equals("ether_wake")) self.expectThat(created_node["power_parameters"], Equals({ "mac_address": mac, })) def test_create_starts_auto_commissioning(self): user = factory.make_admin() handler = NodeHandler(user, {}) nodegroup = factory.make_NodeGroup() zone = factory.make_Zone() mac = factory.make_mac_address() hostname = factory.make_name("hostname") architecture = make_usable_architecture(self) mock_start_commissioning = self.patch(node_model, "start_commissioning") handler.create({ "hostname": hostname, "pxe_mac": mac, "architecture": architecture, "zone": { "name": zone.name, }, "nodegroup": { "uuid": nodegroup.uuid, }, "power_type": "ether_wake", "power_parameters": { "mac_address": mac, }, }) self.assertThat(mock_start_commissioning, MockCalledOnceWith(user)) def test_update_raise_permissions_error_for_non_admin(self): user = factory.make_User() handler = NodeHandler(user, {}) self.assertRaises( HandlerPermissionError, handler.update, {}) def test_update_raises_validation_error_for_invalid_architecture(self): user = factory.make_admin() handler = NodeHandler(user, {}) node = factory.make_Node(interface=True) node_data = self.dehydrate_node(node, handler) arch = factory.make_name("arch") node_data["architecture"] = arch with ExpectedException( HandlerValidationError, re.escape( "{u'architecture': [u\"'%s' is not a valid architecture. " "It should be one of: ''.\"]}" % arch)): handler.update(node_data) def test_update_updates_node(self): user = factory.make_admin() handler = NodeHandler(user, {}) node = factory.make_Node(interface=True) node_data = self.dehydrate_node(node, handler) new_nodegroup = factory.make_NodeGroup() new_zone = factory.make_Zone() new_hostname = factory.make_name("hostname") new_architecture = make_usable_architecture(self) node_data["hostname"] = new_hostname node_data["architecture"] = new_architecture node_data["zone"] = { "name": new_zone.name, } node_data["nodegroup"] = { "uuid": new_nodegroup.uuid, } node_data["power_type"] = "ether_wake" power_mac = factory.make_mac_address() node_data["power_parameters"] = { "mac_address": power_mac, } updated_node = handler.update(node_data) self.expectThat(updated_node["hostname"], Equals(new_hostname)) self.expectThat(updated_node["architecture"], Equals(new_architecture)) self.expectThat(updated_node["zone"]["id"], Equals(new_zone.id)) self.expectThat( updated_node["nodegroup"]["id"], Equals(new_nodegroup.id)) self.expectThat(updated_node["power_type"], Equals("ether_wake")) self.expectThat(updated_node["power_parameters"], Equals({ "mac_address": power_mac, })) def test_update_adds_tags_to_node(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node(interface=True, architecture=architecture) tags = [ factory.make_Tag(definition='').name for _ in range(3) ] node_data = self.dehydrate_node(node, handler) node_data["tags"] = tags updated_node = handler.update(node_data) self.assertItemsEqual(tags, updated_node["tags"]) def test_update_removes_tag_from_node(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node(interface=True, architecture=architecture) tags = [] for _ in range(3): tag = factory.make_Tag(definition='') tag.node_set.add(node) tag.save() tags.append(tag.name) node_data = self.dehydrate_node(node, handler) removed_tag = tags.pop() node_data["tags"].remove(removed_tag) updated_node = handler.update(node_data) self.assertItemsEqual(tags, updated_node["tags"]) def test_update_creates_tag_for_node(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node(interface=True, architecture=architecture) tag_name = factory.make_name("tag") node_data = self.dehydrate_node(node, handler) node_data["tags"].append(tag_name) updated_node = handler.update(node_data) self.assertItemsEqual([tag_name], updated_node["tags"]) def test_unmount_blockdevice_filesystem(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node( interface=True, architecture=architecture, status=NODE_STATUS.ALLOCATED) block_device = factory.make_PhysicalBlockDevice(node=node) fs = factory.make_Filesystem(block_device=block_device) handler.update_filesystem({ 'system_id': node.system_id, 'block_id': block_device.id, 'fstype': fs.fstype, 'mount_point': None }) self.assertEquals( None, block_device.get_effective_filesystem().mount_point) def test_unmount_partition_filesystem(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node( interface=True, architecture=architecture, status=NODE_STATUS.ALLOCATED) partition = factory.make_Partition(node=node) fs = factory.make_Filesystem(partition=partition) handler.update_filesystem({ 'system_id': node.system_id, 'block_id': partition.partition_table.block_device.id, 'partition_id': partition.id, 'fstype': fs.fstype, 'mount_point': None }) self.assertEquals( None, partition.get_effective_filesystem().mount_point) def test_mount_blockdevice_filesystem(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node( interface=True, architecture=architecture, status=NODE_STATUS.ALLOCATED) block_device = factory.make_PhysicalBlockDevice(node=node) fs = factory.make_Filesystem(block_device=block_device) handler.update_filesystem({ 'system_id': node.system_id, 'block_id': block_device.id, 'fstype': fs.fstype, 'mount_point': '/mnt' }) self.assertEquals( '/mnt', block_device.get_effective_filesystem().mount_point) def test_mount_partition_filesystem(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node( interface=True, architecture=architecture, status=NODE_STATUS.ALLOCATED) partition = factory.make_Partition(node=node) fs = factory.make_Filesystem(partition=partition) handler.update_filesystem({ 'system_id': node.system_id, 'block_id': partition.partition_table.block_device.id, 'partition_id': partition.id, 'fstype': fs.fstype, 'mount_point': '/mnt' }) self.assertEquals( '/mnt', partition.get_effective_filesystem().mount_point) def test_change_blockdevice_filesystem(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node( interface=True, architecture=architecture, status=NODE_STATUS.ALLOCATED) block_device = factory.make_PhysicalBlockDevice(node=node) fs = factory.make_Filesystem(block_device=block_device) new_fstype = factory.pick_choice( FILESYSTEM_FORMAT_TYPE_CHOICES, (fs.fstype)) handler.update_filesystem({ 'system_id': node.system_id, 'block_id': block_device.id, 'fstype': new_fstype, 'mount_point': None }) self.assertEquals( new_fstype, block_device.get_effective_filesystem().fstype) def test_change_partition_filesystem(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node( interface=True, architecture=architecture, status=NODE_STATUS.ALLOCATED) partition = factory.make_Partition(node=node) fs = factory.make_Filesystem(partition=partition) new_fstype = factory.pick_choice( FILESYSTEM_FORMAT_TYPE_CHOICES, (fs.fstype)) handler.update_filesystem({ 'system_id': node.system_id, 'block_id': partition.partition_table.block_device.id, 'partition_id': partition.id, 'fstype': new_fstype, 'mount_point': None }) self.assertEquals( new_fstype, partition.get_effective_filesystem().fstype) def test_new_blockdevice_filesystem(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node( interface=True, architecture=architecture, status=NODE_STATUS.ALLOCATED) block_device = factory.make_PhysicalBlockDevice(node=node) fstype = factory.pick_choice(FILESYSTEM_FORMAT_TYPE_CHOICES) handler.update_filesystem({ 'system_id': node.system_id, 'block_id': block_device.id, 'fstype': fstype, 'mount_point': None }) self.assertEquals( fstype, block_device.get_effective_filesystem().fstype) def test_new_partition_filesystem(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node( interface=True, architecture=architecture, status=NODE_STATUS.ALLOCATED) partition = factory.make_Partition(node=node) fstype = factory.pick_choice(FILESYSTEM_FORMAT_TYPE_CHOICES) handler.update_filesystem({ 'system_id': node.system_id, 'block_id': partition.partition_table.block_device.id, 'partition_id': partition.id, 'fstype': fstype, 'mount_point': None }) self.assertEquals( fstype, partition.get_effective_filesystem().fstype) def test_delete_blockdevice_filesystem(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node( interface=True, architecture=architecture, status=NODE_STATUS.READY) block_device = factory.make_PhysicalBlockDevice(node=node) factory.make_Filesystem(block_device=block_device) handler.update_filesystem({ 'system_id': node.system_id, 'block_id': block_device.id, 'fstype': '', 'mount_point': None }) self.assertEquals( None, block_device.get_effective_filesystem()) def test_delete_partition_filesystem(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node( interface=True, architecture=architecture, status=NODE_STATUS.READY) partition = factory.make_Partition(node=node) factory.make_Filesystem(partition=partition) handler.update_filesystem({ 'system_id': node.system_id, 'block_id': partition.partition_table.block_device.id, 'partition_id': partition.id, 'fstype': '', 'mount_point': None }) self.assertEquals( None, partition.get_effective_filesystem()) def test_update_disk_for_physical_block_device(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node( interface=True, architecture=architecture, status=NODE_STATUS.ALLOCATED) block_device = factory.make_PhysicalBlockDevice(node=node) new_name = factory.make_name("new") handler.update_disk({ 'system_id': node.system_id, 'block_id': block_device.id, 'name': new_name, }) self.assertEquals(new_name, reload_object(block_device).name) def test_update_disk_for_virtual_block_device(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node( interface=True, architecture=architecture, status=NODE_STATUS.ALLOCATED) block_device = factory.make_VirtualBlockDevice(node=node) new_name = factory.make_name("new") handler.update_disk({ 'system_id': node.system_id, 'block_id': block_device.id, 'name': new_name, }) self.assertEquals(new_name, reload_object(block_device).name) def test_delete_disk(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node( interface=True, architecture=architecture, status=NODE_STATUS.ALLOCATED) block_device = factory.make_PhysicalBlockDevice(node=node) handler.delete_disk({ 'system_id': node.system_id, 'block_id': block_device.id, }) self.assertIsNone(reload_object(block_device)) def test_delete_partition(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node( interface=True, architecture=architecture, status=NODE_STATUS.ALLOCATED) partition = factory.make_Partition(node=node) handler.delete_partition({ 'system_id': node.system_id, 'partition_id': partition.id, }) self.assertIsNone(reload_object(partition)) def test_delete_volume_group(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node( interface=True, architecture=architecture, status=NODE_STATUS.ALLOCATED) volume_group = factory.make_FilesystemGroup( node=node, group_type=FILESYSTEM_GROUP_TYPE.LVM_VG) handler.delete_volume_group({ 'system_id': node.system_id, 'volume_group_id': volume_group.id, }) self.assertIsNone(reload_object(volume_group)) def test_delete_cache_set(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node( interface=True, architecture=architecture, status=NODE_STATUS.ALLOCATED) cache_set = factory.make_CacheSet(node=node) handler.delete_cache_set({ 'system_id': node.system_id, 'cache_set_id': cache_set.id, }) self.assertIsNone(reload_object(cache_set)) def test_create_partition(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node( interface=True, architecture=architecture, status=NODE_STATUS.ALLOCATED) block_device = factory.make_BlockDevice(node=node) partition_table = factory.make_PartitionTable( block_device=block_device, node=node) size = partition_table.block_device.size / 2 handler.create_partition({ 'system_id': node.system_id, 'block_id': partition_table.block_device_id, 'partition_size': size }) partition = partition_table.partitions.first() self.assertEquals( 2, Partition.objects.count()) self.assertEquals( human_readable_bytes( round_size_to_nearest_block( size, PARTITION_ALIGNMENT_SIZE, False)), human_readable_bytes(partition.size)) def test_create_partition_with_filesystem(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node( interface=True, architecture=architecture, status=NODE_STATUS.ALLOCATED) block_device = factory.make_BlockDevice(node=node) partition_table = factory.make_PartitionTable( block_device=block_device, node=node) partition = partition_table.partitions.first() size = partition_table.block_device.size / 2 fstype = factory.pick_choice(FILESYSTEM_FORMAT_TYPE_CHOICES) mount_point = factory.make_absolute_path() handler.create_partition({ 'system_id': node.system_id, 'block_id': partition_table.block_device_id, 'partition_size': size, 'fstype': fstype, 'mount_point': mount_point, }) partition = partition_table.partitions.first() self.assertEquals( 2, Partition.objects.count()) self.assertEquals( human_readable_bytes( round_size_to_nearest_block( size, PARTITION_ALIGNMENT_SIZE, False)), human_readable_bytes(partition.size)) self.assertEquals( fstype, partition.get_effective_filesystem().fstype) self.assertEquals( mount_point, partition.get_effective_filesystem().mount_point) def test_create_cache_set_for_partition(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node(interface=True, architecture=architecture) partition = factory.make_Partition(node=node) handler.create_cache_set({ 'system_id': node.system_id, 'partition_id': partition.id }) cache_set = CacheSet.objects.get_cache_sets_for_node(node).first() self.assertIsNotNone(cache_set) self.assertEquals(partition, cache_set.get_filesystem().partition) def test_create_cache_set_for_block_device(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node(interface=True, architecture=architecture) block_device = factory.make_PhysicalBlockDevice(node=node) handler.create_cache_set({ 'system_id': node.system_id, 'block_id': block_device.id }) cache_set = CacheSet.objects.get_cache_sets_for_node(node).first() self.assertIsNotNone(cache_set) self.assertEquals( block_device.id, cache_set.get_filesystem().block_device.id) def test_create_bcache_for_partition(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node(interface=True, architecture=architecture) partition = factory.make_Partition(node=node) name = factory.make_name("bcache") cache_set = factory.make_CacheSet(node=node) cache_mode = factory.pick_enum(CACHE_MODE_TYPE) handler.create_bcache({ 'system_id': node.system_id, 'partition_id': partition.id, 'block_id': partition.partition_table.block_device.id, 'name': name, 'cache_set': cache_set.id, 'cache_mode': cache_mode, }) bcache = Bcache.objects.filter_by_node(node).first() self.assertIsNotNone(bcache) self.assertEquals(name, bcache.name) self.assertEquals(cache_set, bcache.cache_set) self.assertEquals(cache_mode, bcache.cache_mode) self.assertEquals( partition, bcache.get_bcache_backing_filesystem().partition) def test_create_bcache_for_partition_with_filesystem(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node(interface=True, architecture=architecture) partition = factory.make_Partition(node=node) name = factory.make_name("bcache") cache_set = factory.make_CacheSet(node=node) cache_mode = factory.pick_enum(CACHE_MODE_TYPE) fstype = factory.pick_choice(FILESYSTEM_FORMAT_TYPE_CHOICES) mount_point = factory.make_absolute_path() handler.create_bcache({ 'system_id': node.system_id, 'partition_id': partition.id, 'block_id': partition.partition_table.block_device.id, 'name': name, 'cache_set': cache_set.id, 'cache_mode': cache_mode, 'fstype': fstype, 'mount_point': mount_point, }) bcache = Bcache.objects.filter_by_node(node).first() self.assertIsNotNone(bcache) self.assertEquals(name, bcache.name) self.assertEquals(cache_set, bcache.cache_set) self.assertEquals(cache_mode, bcache.cache_mode) self.assertEquals( partition, bcache.get_bcache_backing_filesystem().partition) self.assertEquals( fstype, bcache.virtual_device.get_effective_filesystem().fstype) self.assertEquals( mount_point, bcache.virtual_device.get_effective_filesystem().mount_point) def test_create_bcache_for_block_device(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node(interface=True, architecture=architecture) block_device = factory.make_PhysicalBlockDevice(node=node) name = factory.make_name("bcache") cache_set = factory.make_CacheSet(node=node) cache_mode = factory.pick_enum(CACHE_MODE_TYPE) handler.create_bcache({ 'system_id': node.system_id, 'block_id': block_device.id, 'name': name, 'cache_set': cache_set.id, 'cache_mode': cache_mode, }) bcache = Bcache.objects.filter_by_node(node).first() self.assertIsNotNone(bcache) self.assertEquals(name, bcache.name) self.assertEquals(cache_set, bcache.cache_set) self.assertEquals(cache_mode, bcache.cache_mode) self.assertEquals( block_device.id, bcache.get_bcache_backing_filesystem().block_device.id) def test_create_bcache_for_block_device_with_filesystem(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node(interface=True, architecture=architecture) block_device = factory.make_PhysicalBlockDevice(node=node) name = factory.make_name("bcache") cache_set = factory.make_CacheSet(node=node) cache_mode = factory.pick_enum(CACHE_MODE_TYPE) fstype = factory.pick_choice(FILESYSTEM_FORMAT_TYPE_CHOICES) mount_point = factory.make_absolute_path() handler.create_bcache({ 'system_id': node.system_id, 'block_id': block_device.id, 'name': name, 'cache_set': cache_set.id, 'cache_mode': cache_mode, 'fstype': fstype, 'mount_point': mount_point, }) bcache = Bcache.objects.filter_by_node(node).first() self.assertIsNotNone(bcache) self.assertEquals(name, bcache.name) self.assertEquals(cache_set, bcache.cache_set) self.assertEquals(cache_mode, bcache.cache_mode) self.assertEquals( block_device.id, bcache.get_bcache_backing_filesystem().block_device.id) self.assertEquals( fstype, bcache.virtual_device.get_effective_filesystem().fstype) self.assertEquals( mount_point, bcache.virtual_device.get_effective_filesystem().mount_point) def test_create_raid(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node(interface=True, architecture=architecture) disk0 = factory.make_PhysicalBlockDevice(node=node) disk1 = factory.make_PhysicalBlockDevice(node=node) disk2 = factory.make_PhysicalBlockDevice(node=node) spare_disk = factory.make_PhysicalBlockDevice(node=node) name = factory.make_name("md") handler.create_raid({ 'system_id': node.system_id, 'name': name, 'level': 'raid-5', 'block_devices': [disk0.id, disk1.id, disk2.id], 'spare_devices': [spare_disk.id], }) raid = RAID.objects.filter_by_node(node).first() self.assertIsNotNone(raid) self.assertEquals(name, raid.name) self.assertEquals("raid-5", raid.group_type) def test_create_raid_with_filesystem(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node(interface=True, architecture=architecture) disk0 = factory.make_PhysicalBlockDevice(node=node) disk1 = factory.make_PhysicalBlockDevice(node=node) disk2 = factory.make_PhysicalBlockDevice(node=node) spare_disk = factory.make_PhysicalBlockDevice(node=node) name = factory.make_name("md") fstype = factory.pick_choice(FILESYSTEM_FORMAT_TYPE_CHOICES) mount_point = factory.make_absolute_path() handler.create_raid({ 'system_id': node.system_id, 'name': name, 'level': 'raid-5', 'block_devices': [disk0.id, disk1.id, disk2.id], 'spare_devices': [spare_disk.id], 'fstype': fstype, 'mount_point': mount_point, }) raid = RAID.objects.filter_by_node(node).first() self.assertIsNotNone(raid) self.assertEquals(name, raid.name) self.assertEquals("raid-5", raid.group_type) self.assertEquals( fstype, raid.virtual_device.get_effective_filesystem().fstype) self.assertEquals( mount_point, raid.virtual_device.get_effective_filesystem().mount_point) def test_create_volume_group(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node(interface=True, architecture=architecture) disk = factory.make_PhysicalBlockDevice(node=node) partition = factory.make_Partition(node=node) name = factory.make_name("vg") handler.create_volume_group({ 'system_id': node.system_id, 'name': name, 'block_devices': [disk.id], 'partitions': [partition.id], }) volume_group = VolumeGroup.objects.filter_by_node(node).first() self.assertIsNotNone(volume_group) self.assertEquals(name, volume_group.name) def test_create_logical_volume(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node(interface=True, architecture=architecture) volume_group = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.LVM_VG, node=node) name = factory.make_name("lv") size = volume_group.get_lvm_free_space() handler.create_logical_volume({ 'system_id': node.system_id, 'name': name, 'volume_group_id': volume_group.id, 'size': size, }) logical_volume = volume_group.virtual_devices.first() self.assertIsNotNone(logical_volume) self.assertEquals( "%s-%s" % (volume_group.name, name), logical_volume.get_name()) self.assertEquals(size, logical_volume.size) def test_create_logical_volume_with_filesystem(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node(interface=True, architecture=architecture) volume_group = factory.make_FilesystemGroup( group_type=FILESYSTEM_GROUP_TYPE.LVM_VG, node=node) name = factory.make_name("lv") size = volume_group.get_lvm_free_space() fstype = factory.pick_choice(FILESYSTEM_FORMAT_TYPE_CHOICES) mount_point = factory.make_absolute_path() handler.create_logical_volume({ 'system_id': node.system_id, 'name': name, 'volume_group_id': volume_group.id, 'size': size, 'fstype': fstype, 'mount_point': mount_point, }) logical_volume = volume_group.virtual_devices.first() self.assertIsNotNone(logical_volume) self.assertEquals( "%s-%s" % (volume_group.name, name), logical_volume.get_name()) self.assertEquals(size, logical_volume.size) self.assertEquals( fstype, logical_volume.get_effective_filesystem().fstype) self.assertEquals( mount_point, logical_volume.get_effective_filesystem().mount_point) def test_set_boot_disk(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node(interface=True, architecture=architecture) boot_disk = factory.make_PhysicalBlockDevice(node=node) handler.set_boot_disk({ 'system_id': node.system_id, 'block_id': boot_disk.id, }) self.assertEquals(boot_disk.id, reload_object(node).get_boot_disk().id) def test_set_boot_disk_raises_error_for_none_physical(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node(interface=True, architecture=architecture) boot_disk = factory.make_VirtualBlockDevice(node=node) error = self.assertRaises(HandlerError, handler.set_boot_disk, { 'system_id': node.system_id, 'block_id': boot_disk.id, }) self.assertEquals( error.message, "Only a physical disk can be set as the boot disk.") def test_update_raise_HandlerError_if_tag_has_definition(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node(interface=True, architecture=architecture) tag = factory.make_Tag() node_data = self.dehydrate_node(node, handler) node_data["tags"].append(tag.name) self.assertRaises(HandlerError, handler.update, node_data) def test_update_tags_on_block_device(self): user = factory.make_admin() handler = NodeHandler(user, {}) architecture = make_usable_architecture(self) node = factory.make_Node(interface=True, architecture=architecture) block_device = factory.make_PhysicalBlockDevice(node=node) tags = [ factory.make_name("tag") for _ in range(3) ] handler.update_disk_tags({ 'system_id': node.system_id, 'block_id': block_device.id, 'tags': tags }) # Refresh the block_device to check that the values were updated block_device = BlockDevice.objects.get(id=block_device.id) self.assertItemsEqual( tags, block_device.tags) def test_missing_action_raises_error(self): user = factory.make_User() node = factory.make_Node() handler = NodeHandler(user, {}) self.assertRaises( NodeActionError, handler.action, {"system_id": node.system_id}) def test_invalid_action_raises_error(self): user = factory.make_User() node = factory.make_Node() handler = NodeHandler(user, {}) self.assertRaises( NodeActionError, handler.action, {"system_id": node.system_id, "action": "unknown"}) def test_not_available_action_raises_error(self): user = factory.make_User() node = factory.make_Node(status=NODE_STATUS.DEPLOYED, owner=user) handler = NodeHandler(user, {}) self.assertRaises( NodeActionError, handler.action, {"system_id": node.system_id, "action": "unknown"}) def test_action_performs_action(self): admin = factory.make_admin() factory.make_SSHKey(admin) node = factory.make_Node(status=NODE_STATUS.ALLOCATED, owner=admin) handler = NodeHandler(admin, {}) handler.action({"system_id": node.system_id, "action": "delete"}) self.assertIsNone(reload_object(node)) def test_action_performs_action_passing_extra(self): user = factory.make_User() factory.make_SSHKey(user) self.patch(Node, 'on_network').return_value = True node = factory.make_Node(status=NODE_STATUS.ALLOCATED, owner=user) osystem = make_usable_osystem(self) handler = NodeHandler(user, {}) handler.action({ "system_id": node.system_id, "action": "deploy", "extra": { "osystem": osystem["name"], "distro_series": osystem["releases"][0]["name"], }}) node = reload_object(node) self.expectThat(node.osystem, Equals(osystem["name"])) self.expectThat( node.distro_series, Equals(osystem["releases"][0]["name"])) def test_create_physical_creates_interface(self): user = factory.make_admin() node = factory.make_Node(interface=False) handler = NodeHandler(user, {}) name = factory.make_name("eth") mac_address = factory.make_mac_address() fabric = factory.make_Fabric() vlan = fabric.get_default_vlan() handler.create_physical({ "system_id": node.system_id, "name": name, "mac_address": mac_address, "vlan": vlan.id, }) self.assertEquals( 1, node.interface_set.count(), "Should have one interface on the node.") def test_create_physical_creates_link_auto(self): user = factory.make_admin() node = factory.make_Node(interface=False) handler = NodeHandler(user, {}) name = factory.make_name("eth") mac_address = factory.make_mac_address() fabric = factory.make_Fabric() vlan = fabric.get_default_vlan() subnet = factory.make_Subnet(vlan=vlan) handler.create_physical({ "system_id": node.system_id, "name": name, "mac_address": mac_address, "vlan": vlan.id, "mode": INTERFACE_LINK_TYPE.AUTO, "subnet": subnet.id, }) new_interface = node.interface_set.first() self.assertIsNotNone(new_interface) auto_ip = new_interface.ip_addresses.filter( alloc_type=IPADDRESS_TYPE.AUTO, subnet=subnet) self.assertIsNotNone(auto_ip) def test_create_physical_creates_link_up(self): user = factory.make_admin() node = factory.make_Node(interface=False) handler = NodeHandler(user, {}) name = factory.make_name("eth") mac_address = factory.make_mac_address() fabric = factory.make_Fabric() vlan = fabric.get_default_vlan() handler.create_physical({ "system_id": node.system_id, "name": name, "mac_address": mac_address, "vlan": vlan.id, "mode": INTERFACE_LINK_TYPE.LINK_UP, }) new_interface = node.interface_set.first() self.assertIsNotNone(new_interface) link_up_ip = new_interface.ip_addresses.filter( alloc_type=IPADDRESS_TYPE.STICKY, subnet=None) self.assertIsNotNone(link_up_ip) def test_create_physical_creates_link_up_with_subnet(self): user = factory.make_admin() node = factory.make_Node(interface=False) handler = NodeHandler(user, {}) name = factory.make_name("eth") mac_address = factory.make_mac_address() fabric = factory.make_Fabric() vlan = fabric.get_default_vlan() subnet = factory.make_Subnet(vlan=vlan) handler.create_physical({ "system_id": node.system_id, "name": name, "mac_address": mac_address, "vlan": vlan.id, "mode": INTERFACE_LINK_TYPE.LINK_UP, "subnet": subnet.id, }) new_interface = node.interface_set.first() self.assertIsNotNone(new_interface) link_up_ip = new_interface.ip_addresses.filter( alloc_type=IPADDRESS_TYPE.STICKY, ip=None, subnet=subnet) self.assertIsNotNone(link_up_ip) def test_create_vlan_creates_vlan(self): user = factory.make_admin() node = factory.make_Node() handler = NodeHandler(user, {}) interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, node=node) new_vlan = factory.make_VLAN(fabric=interface.vlan.fabric) handler.create_vlan({ "system_id": node.system_id, "parent": interface.id, "vlan": new_vlan.id, }) vlan_interface = get_one( Interface.objects.filter( node=node, type=INTERFACE_TYPE.VLAN, parents=interface)) self.assertIsNotNone(vlan_interface) def test_create_vlan_creates_link_auto(self): user = factory.make_admin() node = factory.make_Node() handler = NodeHandler(user, {}) interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, node=node) new_vlan = factory.make_VLAN(fabric=interface.vlan.fabric) new_subnet = factory.make_Subnet(vlan=new_vlan) handler.create_vlan({ "system_id": node.system_id, "parent": interface.id, "vlan": new_vlan.id, "mode": INTERFACE_LINK_TYPE.AUTO, "subnet": new_subnet.id, }) vlan_interface = get_one( Interface.objects.filter( node=node, type=INTERFACE_TYPE.VLAN, parents=interface)) self.assertIsNotNone(vlan_interface) auto_ip = vlan_interface.ip_addresses.filter( alloc_type=IPADDRESS_TYPE.AUTO, subnet=new_subnet) self.assertIsNotNone(auto_ip) def test_create_vlan_creates_link_up(self): user = factory.make_admin() node = factory.make_Node() handler = NodeHandler(user, {}) interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, node=node) new_vlan = factory.make_VLAN(fabric=interface.vlan.fabric) handler.create_vlan({ "system_id": node.system_id, "parent": interface.id, "vlan": new_vlan.id, "mode": INTERFACE_LINK_TYPE.LINK_UP, }) vlan_interface = get_one( Interface.objects.filter( node=node, type=INTERFACE_TYPE.VLAN, parents=interface)) self.assertIsNotNone(vlan_interface) link_up_ip = vlan_interface.ip_addresses.filter( alloc_type=IPADDRESS_TYPE.STICKY, ip=None) self.assertIsNotNone(link_up_ip) def test_create_vlan_creates_link_up_with_subnet(self): user = factory.make_admin() node = factory.make_Node() handler = NodeHandler(user, {}) interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, node=node) new_vlan = factory.make_VLAN(fabric=interface.vlan.fabric) new_subnet = factory.make_Subnet(vlan=new_vlan) handler.create_vlan({ "system_id": node.system_id, "parent": interface.id, "vlan": new_vlan.id, "mode": INTERFACE_LINK_TYPE.LINK_UP, "subnet": new_subnet.id, }) vlan_interface = get_one( Interface.objects.filter( node=node, type=INTERFACE_TYPE.VLAN, parents=interface)) self.assertIsNotNone(vlan_interface) link_up_ip = vlan_interface.ip_addresses.filter( alloc_type=IPADDRESS_TYPE.STICKY, ip=None, subnet=new_subnet) self.assertIsNotNone(link_up_ip) def test_create_bond_creates_bond(self): user = factory.make_admin() node = factory.make_Node() handler = NodeHandler(user, {}) nic1 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, node=node) nic2 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=node, vlan=nic1.vlan) bond_mode = factory.pick_enum(BOND_MODE) name = factory.make_name("bond") handler.create_bond({ "system_id": node.system_id, "name": name, "parents": [nic1.id, nic2.id], "mac_address": "%s" % nic1.mac_address, "vlan": nic1.vlan.id, "bond_mode": bond_mode }) bond_interface = get_one( Interface.objects.filter( node=node, type=INTERFACE_TYPE.BOND, parents=nic1, name=name, vlan=nic1.vlan)) self.assertIsNotNone(bond_interface) self.assertEquals(bond_mode, bond_interface.params["bond_mode"]) def test_create_bond_raises_ValidationError(self): user = factory.make_admin() node = factory.make_Node() handler = NodeHandler(user, {}) nic1 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, node=node) nic2 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, node=node, vlan=nic1.vlan) with ExpectedException(ValidationError): handler.create_bond({ "system_id": node.system_id, "parents": [nic1.id, nic2.id], }) def test_update_interface(self): user = factory.make_admin() node = factory.make_Node() handler = NodeHandler(user, {}) interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, node=node) new_name = factory.make_name("name") new_fabric = factory.make_Fabric() new_vlan = new_fabric.get_default_vlan() handler.update_interface({ "system_id": node.system_id, "interface_id": interface.id, "name": new_name, "vlan": new_vlan.id, }) interface = reload_object(interface) self.assertEquals(new_name, interface.name) self.assertEquals(new_vlan, interface.vlan) def test_update_interface_raises_ValidationError(self): user = factory.make_admin() node = factory.make_Node() handler = NodeHandler(user, {}) interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, node=node) new_name = factory.make_name("name") with ExpectedException(ValidationError): handler.update_interface({ "system_id": node.system_id, "interface_id": interface.id, "name": new_name, "vlan": random.randint(1000, 5000), }) def test_delete_interface(self): user = factory.make_admin() node = factory.make_Node() handler = NodeHandler(user, {}) interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, node=node) handler.delete_interface({ "system_id": node.system_id, "interface_id": interface.id, }) self.assertIsNone(reload_object(interface)) def test_link_subnet_calls_update_link_by_id_if_link_id(self): user = factory.make_admin() node = factory.make_Node() handler = NodeHandler(user, {}) interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, node=node) subnet = factory.make_Subnet() link_id = random.randint(0, 100) mode = factory.pick_enum(INTERFACE_LINK_TYPE) ip_address = factory.make_ip_address() self.patch_autospec(Interface, "update_link_by_id") handler.link_subnet({ "system_id": node.system_id, "interface_id": interface.id, "link_id": link_id, "subnet": subnet.id, "mode": mode, "ip_address": ip_address, }) self.assertThat( Interface.update_link_by_id, MockCalledOnceWith( ANY, link_id, mode, subnet, ip_address=ip_address)) def test_link_subnet_calls_link_subnet_if_not_link_id(self): user = factory.make_admin() node = factory.make_Node() handler = NodeHandler(user, {}) interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, node=node) subnet = factory.make_Subnet() mode = factory.pick_enum(INTERFACE_LINK_TYPE) ip_address = factory.make_ip_address() self.patch_autospec(Interface, "link_subnet") handler.link_subnet({ "system_id": node.system_id, "interface_id": interface.id, "subnet": subnet.id, "mode": mode, "ip_address": ip_address, }) self.assertThat( Interface.link_subnet, MockCalledOnceWith( ANY, mode, subnet, ip_address=ip_address)) def test_unlink_subnet(self): user = factory.make_admin() node = factory.make_Node() handler = NodeHandler(user, {}) interface = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, node=node) link_ip = factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.AUTO, ip="", interface=interface) handler.delete_interface({ "system_id": node.system_id, "interface_id": interface.id, "link_id": link_ip.id, }) self.assertIsNone(reload_object(link_ip)) class TestNodeHandlerCheckPower(MAASTransactionServerTestCase): @asynchronous def make_node(self, power_type="ipmi"): """Makes a node that is committed in the database.""" return deferToDatabase( transactional(factory.make_Node), power_type=power_type) def make_handler_with_user(self): user = factory.make_User() return NodeHandler(user, {}) def call_check_power(self, node): params = {"system_id": node.system_id} handler = self.make_handler_with_user() return handler.check_power(params).wait() def prepare_rpc(self, nodegroup, side_effect=None): self.useFixture(RegionEventLoopFixture("rpc")) self.useFixture(RunningEventLoopFixture()) self.rpc_fixture = self.useFixture(MockLiveRegionToClusterRPCFixture()) protocol = self.rpc_fixture.makeCluster(nodegroup, PowerQuery) if side_effect is None: protocol.PowerQuery.side_effect = always_succeed_with({}) else: protocol.PowerQuery.side_effect = side_effect def assertCheckPower(self, node, state): result_state = self.call_check_power(node) self.expectThat(result_state, Equals(state)) self.expectThat(reload_object(node).power_state, Equals(state)) def test__raises_HandlerError_when_NoConnectionsAvailable(self): node = self.make_node().wait() user = factory.make_User() handler = NodeHandler(user, {}) mock_getClientFor = self.patch(node_module, "getClientFor") mock_getClientFor.side_effect = NoConnectionsAvailable() with ExpectedException(HandlerError): handler.check_power({"system_id": node.system_id}).wait() def test__sets_power_state_to_unknown_when_no_power_type(self): node = self.make_node(power_type="").wait() self.prepare_rpc( node.nodegroup, side_effect=always_succeed_with({"state": "on"})) self.assertCheckPower(node, "unknown") def test__sets_power_state_to_unknown_when_power_cannot_be_started(self): node = self.make_node(power_type="ether_wake").wait() self.prepare_rpc( node.nodegroup, side_effect=always_succeed_with({"state": "on"})) self.assertCheckPower(node, "unknown") def test__sets_power_state_to_PowerQuery_result(self): node = self.make_node().wait() power_state = random.choice(["on", "off"]) self.prepare_rpc( node.nodegroup, side_effect=always_succeed_with({"state": power_state})) self.assertCheckPower(node, power_state) def test__sets_power_state_to_error_on_time_out(self): node = self.make_node().wait() getClientFor = self.patch(node_module, 'getClientFor') getClientFor.return_value = sentinel.client deferWithTimeout = self.patch(node_module, 'deferWithTimeout') deferWithTimeout.side_effect = always_fail_with(CancelledError()) self.assertCheckPower(node, "error") def test__sets_power_state_to_unknown_on_NotImplementedError(self): node = self.make_node().wait() self.prepare_rpc(node.nodegroup, side_effect=NotImplementedError()) self.assertCheckPower(node, "unknown") def test__sets_power_state_to_error_on_PowerActionFail(self): node = self.make_node().wait() self.prepare_rpc(node.nodegroup, side_effect=PowerActionFail()) self.assertCheckPower(node, "error") maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/tests/test_space.py0000644000000000000000000000432013056115004025430 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.websockets.handlers.space`""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.models.space import Space from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.websockets.handlers.space import SpaceHandler from maasserver.websockets.handlers.timestampedmodel import dehydrate_datetime class TestSpaceHandler(MAASServerTestCase): def dehydrate_space(self, space): data = { "id": space.id, "name": space.get_name(), "updated": dehydrate_datetime(space.updated), "created": dehydrate_datetime(space.created), "subnet_ids": [ subnet.id for subnet in space.subnet_set.all() ], "nodes_count": len({ interface.node_id for subnet in space.subnet_set.all() for ipaddress in subnet.staticipaddress_set.all() for interface in ipaddress.interface_set.all() if interface.node_id is not None }), } return data def test_get(self): user = factory.make_User() handler = SpaceHandler(user, {}) space = factory.make_Space() for _ in range(3): node = factory.make_Node(interface=True) interface = node.get_boot_interface() subnet = factory.make_Subnet(space=space, vlan=interface.vlan) factory.make_StaticIPAddress(subnet=subnet, interface=interface) self.assertEquals( self.dehydrate_space(space), handler.get({"id": space.id})) def test_list(self): user = factory.make_User() handler = SpaceHandler(user, {}) factory.make_Space() expected_spaces = [ self.dehydrate_space(space) for space in Space.objects.all() ] self.assertItemsEqual( expected_spaces, handler.list({})) maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/tests/test_subnet.py0000644000000000000000000000432413056115004025641 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.websockets.handlers.subnet`""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.models.subnet import Subnet from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.websockets.handlers.subnet import SubnetHandler from maasserver.websockets.handlers.timestampedmodel import dehydrate_datetime from provisioningserver.utils.network import IPRangeStatistics from testtools.matchers import Equals class TestSubnetHandler(MAASServerTestCase): def dehydrate_subnet(self, subnet, for_list=False): data = { "id": subnet.id, "updated": dehydrate_datetime(subnet.updated), "created": dehydrate_datetime(subnet.created), "name": subnet.name, "dns_servers": [server for server in subnet.dns_servers], "vlan": subnet.vlan_id, "space": subnet.space_id, "cidr": subnet.cidr, "gateway_ip": subnet.gateway_ip, } full_range = subnet.get_iprange_usage() metadata = IPRangeStatistics(full_range) data['statistics'] = metadata.render_json() if not for_list: data["ip_addresses"] = subnet.render_json_for_related_ips( with_username=True, with_node_summary=True) return data def test_get(self): user = factory.make_User() handler = SubnetHandler(user, {}) subnet = factory.make_Subnet() expected_data = self.dehydrate_subnet(subnet) result = handler.get({"id": subnet.id}) self.assertThat(result, Equals(expected_data)) def test_list(self): user = factory.make_User() handler = SubnetHandler(user, {}) factory.make_Subnet() expected_subnets = [ self.dehydrate_subnet(subnet, for_list=True) for subnet in Subnet.objects.all() ] self.assertItemsEqual( expected_subnets, handler.list({})) maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/tests/test_tag.py0000644000000000000000000000310613056115004025111 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.websockets.handlers.tag`""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.models.tag import Tag from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.websockets.handlers.tag import TagHandler from maasserver.websockets.handlers.timestampedmodel import dehydrate_datetime class TestTagHandler(MAASServerTestCase): def dehydrate_tag(self, tag): data = { "id": tag.id, "name": tag.name, "definition": tag.definition, "comment": tag.comment, "kernel_opts": tag.kernel_opts, "updated": dehydrate_datetime(tag.updated), "created": dehydrate_datetime(tag.created), } return data def test_get(self): user = factory.make_User() handler = TagHandler(user, {}) tag = factory.make_Tag() self.assertEquals( self.dehydrate_tag(tag), handler.get({"id": tag.id})) def test_list(self): user = factory.make_User() handler = TagHandler(user, {}) factory.make_Tag() expected_tags = [ self.dehydrate_tag(tag) for tag in Tag.objects.all() ] self.assertItemsEqual( expected_tags, handler.list({})) maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/tests/test_timestampedmodel.py0000644000000000000000000000345213056115004027677 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.websockets.handlers.timestampedmodel`""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from datetime import datetime from maasserver.websockets.handlers.timestampedmodel import ( TimestampedModelHandler, ) from maastesting.testcase import MAASTestCase class TestTimeStampedModelHandler(MAASTestCase): def test_has_abstract_set_to_true(self): handler = TimestampedModelHandler(None, {}) self.assertTrue(handler._meta.abstract) def test_adds_created_and_updated_to_non_changeable(self): handler = TimestampedModelHandler(None, {}) self.assertItemsEqual( ["created", "updated"], handler._meta.non_changeable) def test_doesnt_overwrite_other_non_changeable_fields(self): class TestHandler(TimestampedModelHandler): class Meta: non_changeable = ["other", "extra"] handler = TestHandler(None, {}) self.assertItemsEqual( ["other", "extra", "created", "updated"], handler._meta.non_changeable) def test_dehydrate_created_converts_datetime_to_string(self): now = datetime.now() handler = TimestampedModelHandler(None, {}) self.assertEquals( now.strftime('%a, %d %b. %Y %H:%M:%S'), handler.dehydrate_created(now)) def test_dehydrate_updated_converts_datetime_to_string(self): now = datetime.now() handler = TimestampedModelHandler(None, {}) self.assertEquals( now.strftime('%a, %d %b. %Y %H:%M:%S'), handler.dehydrate_updated(now)) maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/tests/test_user.py0000644000000000000000000000523413056115004025320 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.websockets.handlers.user`""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from django.contrib.auth.models import User from maasserver.models.user import SYSTEM_USERS from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.websockets.base import HandlerDoesNotExistError from maasserver.websockets.handlers.user import UserHandler class TestUserHandler(MAASServerTestCase): def dehydrate_user(self, user, sshkeys_count=0): data = { "id": user.id, "username": user.username, "first_name": user.first_name, "last_name": user.last_name, "email": user.email, "is_superuser": user.is_superuser, "sshkeys_count": sshkeys_count, } return data def test_get_for_admin(self): user = factory.make_User() admin = factory.make_admin() handler = UserHandler(admin, {}) self.assertEquals( self.dehydrate_user(user), handler.get({"id": user.id})) def test_get_for_user_getting_self(self): user = factory.make_User() handler = UserHandler(user, {}) self.assertEquals( self.dehydrate_user(user), handler.get({"id": user.id})) def test_get_for_user_not_getting_self(self): user = factory.make_User() other_user = factory.make_User() handler = UserHandler(user, {}) self.assertRaises( HandlerDoesNotExistError, handler.get, {"id": other_user.id}) def test_list_for_admin(self): admin = factory.make_admin() handler = UserHandler(admin, {}) factory.make_User() expected_users = [ self.dehydrate_user(user) for user in User.objects.exclude(username__in=SYSTEM_USERS) ] self.assertItemsEqual( expected_users, handler.list({})) def test_list_for_standard_user(self): user = factory.make_User() handler = UserHandler(user, {}) # Other users for _ in range(3): factory.make_User() self.assertItemsEqual( [self.dehydrate_user(user)], handler.list({})) def test_auth_user(self): user = factory.make_User() handler = UserHandler(user, {}) self.assertEquals( self.dehydrate_user(user), handler.auth_user({})) maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/tests/test_vlan.py0000644000000000000000000000420613056115004025300 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.websockets.handlers.vlan`""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.models.vlan import VLAN from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.websockets.handlers.timestampedmodel import dehydrate_datetime from maasserver.websockets.handlers.vlan import VLANHandler class TestVLANHandler(MAASServerTestCase): def dehydrate_vlan(self, vlan): data = { "id": vlan.id, "name": vlan.get_name(), "vid": vlan.vid, "mtu": vlan.mtu, "fabric": vlan.fabric_id, "updated": dehydrate_datetime(vlan.updated), "created": dehydrate_datetime(vlan.created), "subnet_ids": [ subnet.id for subnet in vlan.subnet_set.all() ], "nodes_count": len({ interface.node_id for interface in vlan.interface_set.all() if interface.node_id is not None }), } return data def test_get(self): user = factory.make_User() handler = VLANHandler(user, {}) vlan = factory.make_VLAN() for _ in range(3): factory.make_Subnet(vlan=vlan) for _ in range(3): node = factory.make_Node(interface=True) interface = node.get_boot_interface() interface.vlan = vlan interface.save() self.assertEquals( self.dehydrate_vlan(vlan), handler.get({"id": vlan.id})) def test_list(self): user = factory.make_User() handler = VLANHandler(user, {}) factory.make_VLAN() expected_vlans = [ self.dehydrate_vlan(vlan) for vlan in VLAN.objects.all() ] self.assertItemsEqual( expected_vlans, handler.list({})) maas-1.9.5+bzr4599.orig/src/maasserver/websockets/handlers/tests/test_zone.py0000644000000000000000000000302313056115004025307 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.websockets.handlers.zone`""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.models.zone import Zone from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.websockets.handlers.timestampedmodel import dehydrate_datetime from maasserver.websockets.handlers.zone import ZoneHandler class TestZoneHandler(MAASServerTestCase): def dehydrate_zone(self, zone): data = { "id": zone.id, "name": zone.name, "description": zone.description, "updated": dehydrate_datetime(zone.updated), "created": dehydrate_datetime(zone.created), } return data def test_get(self): user = factory.make_User() handler = ZoneHandler(user, {}) zone = factory.make_Zone() self.assertEquals( self.dehydrate_zone(zone), handler.get({"id": zone.id})) def test_list(self): user = factory.make_User() handler = ZoneHandler(user, {}) factory.make_Zone() expected_zones = [ self.dehydrate_zone(zone) for zone in Zone.objects.all() ] self.assertItemsEqual( expected_zones, handler.list({})) maas-1.9.5+bzr4599.orig/src/maasserver/websockets/tests/__init__.py0000644000000000000000000000000013056115004023224 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maasserver/websockets/tests/test_base.py0000644000000000000000000007175613056115004023470 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.websockets.base`""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from operator import attrgetter from django.db.models.query import QuerySet from maasserver.forms import ( AdminNodeForm, AdminNodeWithMACAddressesForm, ) from maasserver.models.node import Node from maasserver.models.zone import Zone from maasserver.testing.architecture import make_usable_architecture from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase from maasserver.websockets import base from maasserver.websockets.base import ( Handler, HandlerDoesNotExistError, HandlerNoSuchMethodError, HandlerPKError, HandlerValidationError, ) from maastesting.matchers import ( MockCalledOnceWith, MockNotCalled, ) from maastesting.testcase import MAASTestCase from mock import ( ANY, MagicMock, sentinel, ) from provisioningserver.utils.twisted import asynchronous from testtools.matchers import ( Equals, Is, IsInstance, MatchesStructure, ) from testtools.testcase import ExpectedException def make_handler(name, **kwargs): meta = type(b"Meta", (object,), kwargs) return object.__new__( type(name.encode("utf-8"), (Handler,), {"Meta": meta})) class TestHandlerMeta(MAASTestCase): def test_creates_handler_with_default_meta(self): handler = Handler(None, {}) self.assertThat(handler._meta, MatchesStructure( abstract=Is(False), allowed_methods=Equals( ["list", "get", "create", "update", "delete", "set_active"]), handler_name=Equals(""), object_class=Is(None), queryset=Is(None), pk=Equals("id"), fields=Is(None), exclude=Is(None), list_fields=Is(None), list_exclude=Is(None), non_changeable=Is(None), form=Is(None))) def test_creates_handler_with_options(self): handler = make_handler( "TestHandler", abstract=True, allowed_methods=["list"], handler_name="testing", queryset=Node.objects.all(), pk="system_id", fields=["hostname", "distro_series"], exclude=["system_id"], list_fields=["hostname"], list_exclude=["hostname"], non_changeable=["system_id"], form=sentinel.form) self.assertThat(handler._meta, MatchesStructure( abstract=Is(True), allowed_methods=Equals(["list"]), handler_name=Equals("testing"), object_class=Is(Node), queryset=IsInstance(QuerySet), pk=Equals("system_id"), fields=Equals(["hostname", "distro_series"]), exclude=Equals(["system_id"]), list_fields=Equals(["hostname"]), list_exclude=Equals(["hostname"]), non_changeable=Equals(["system_id"]), form=Is(sentinel.form))) def test_sets_handler_name_based_on_class_name(self): names = [ ("TestHandler", "test"), ("TestHandlerNew", "testnew"), ("AlwaysLowerHandler", "alwayslower") ] for class_name, handler_name in names: obj = make_handler(class_name) self.expectThat(obj._meta.handler_name, Equals(handler_name)) def test_sets_object_class_based_on_queryset(self): handler = make_handler( "TestHandler", queryset=Node.objects.all()) self.assertIs(Node, handler._meta.object_class) def test_copy_fields_and_excludes_to_list_fields_and_list_excludes(self): fields = [factory.make_name("field") for _ in range(3)] exclude = [factory.make_name("field") for _ in range(3)] handler = make_handler( "TestHandler", fields=fields, exclude=exclude) self.assertEquals(fields, handler._meta.list_fields) self.assertEquals(exclude, handler._meta.list_exclude) def test_copy_fields_and_excludes_doesnt_overwrite_lists_if_set(self): fields = [factory.make_name("field") for _ in range(3)] exclude = [factory.make_name("field") for _ in range(3)] list_fields = [factory.make_name("field") for _ in range(3)] list_exclude = [factory.make_name("field") for _ in range(3)] handler = make_handler( "TestHandler", fields=fields, exclude=exclude, list_fields=list_fields, list_exclude=list_exclude) self.assertEquals(list_fields, handler._meta.list_fields) self.assertEquals(list_exclude, handler._meta.list_exclude) class TestHandler(MAASServerTestCase): def make_nodes_handler(self, **kwargs): kwargs["queryset"] = Node.objects.all() kwargs["object_class"] = Node kwargs["pk"] = "system_id" kwargs["pk_type"] = unicode handler = make_handler("TestNodesHandler", **kwargs) handler.__init__(factory.make_User(), {}) return handler def make_mock_node_with_fields(self, **kwargs): return object.__new__( type(b"MockNode", (object,), kwargs)) def test_full_dehydrate_only_includes_allowed_fields(self): handler = self.make_nodes_handler(fields=["hostname", "power_type"]) node = factory.make_Node() self.assertEquals({ "hostname": node.hostname, "power_type": node.power_type, }, handler.full_dehydrate(node)) def test_full_dehydrate_excludes_fields(self): handler = self.make_nodes_handler( fields=["hostname", "power_type"], exclude=["power_type"]) node = factory.make_Node() self.assertEquals({ "hostname": node.hostname, }, handler.full_dehydrate(node)) def test_full_dehydrate_only_includes_list_fields_when_for_list(self): handler = self.make_nodes_handler( list_fields=["power_type", "power_state"]) node = factory.make_Node() self.assertEquals({ "power_type": node.power_type, "power_state": node.power_state, }, handler.full_dehydrate(node, for_list=True)) def test_full_dehydrate_excludes_list_fields_when_for_list(self): handler = self.make_nodes_handler( list_fields=["power_type", "power_state"], list_exclude=["power_type"]) node = factory.make_Node() self.assertEquals({ "power_state": node.power_state, }, handler.full_dehydrate(node, for_list=True)) def test_full_dehydrate_calls_field_dehydrate_method_if_exists(self): handler = self.make_nodes_handler(fields=["hostname"]) mock_dehydrate_hostname = self.patch( handler, "dehydrate_hostname") mock_dehydrate_hostname.return_value = sentinel.hostname node = factory.make_Node() self.expectThat({ "hostname": sentinel.hostname, }, Equals(handler.full_dehydrate(node))) self.expectThat( mock_dehydrate_hostname, MockCalledOnceWith(node.hostname)) def test_full_dehydrate_calls_final_dehydrate_method(self): handler = self.make_nodes_handler(fields=["hostname"]) mock_dehydrate = self.patch_autospec(handler, "dehydrate") mock_dehydrate.return_value = sentinel.final_dehydrate node = factory.make_Node() self.expectThat( sentinel.final_dehydrate, Equals(handler.full_dehydrate(node))) self.expectThat( mock_dehydrate, MockCalledOnceWith( node, {"hostname": node.hostname}, for_list=False)) def test_dehydrate_does_nothing(self): handler = self.make_nodes_handler() self.assertEquals( sentinel.nothing, handler.dehydrate(sentinel.obj, sentinel.nothing)) def test_full_hydrate_only_doesnt_set_primary_key_field(self): system_id = factory.make_name("system_id") hostname = factory.make_name("hostname") handler = self.make_nodes_handler( fields=["system_id", "hostname"]) node = self.make_mock_node_with_fields( system_id=system_id, hostname=factory.make_name("hostname")) handler.full_hydrate(node, { "system_id": factory.make_name("system_id"), "hostname": hostname, }) self.expectThat(system_id, Equals(node.system_id)) self.expectThat(hostname, Equals(node.hostname)) def test_full_hydrate_only_sets_allowed_fields(self): hostname = factory.make_name("hostname") power_state = "on" handler = self.make_nodes_handler(fields=["hostname", "power_state"]) node = self.make_mock_node_with_fields( hostname=factory.make_name("hostname"), power_state="off", power_type="ipmi") handler.full_hydrate(node, { "hostname": hostname, "power_state": power_state, "power_type": "etherwake", }) self.expectThat(hostname, Equals(node.hostname)) self.expectThat(power_state, Equals(node.power_state)) self.expectThat("ipmi", Equals(node.power_type)) def test_full_hydrate_only_sets_non_excluded_fields(self): hostname = factory.make_name("hostname") handler = self.make_nodes_handler( fields=["hostname", "power_state"], exclude=["power_state"]) node = self.make_mock_node_with_fields( hostname=factory.make_name("hostname"), power_state="off", power_type="ipmi") handler.full_hydrate(node, { "hostname": hostname, "power_state": "on", "power_type": "etherwake", }) self.expectThat(hostname, Equals(node.hostname)) self.expectThat("off", Equals(node.power_state)) self.expectThat("ipmi", Equals(node.power_type)) def test_full_hydrate_only_doesnt_set_fields_not_allowed_to_change(self): hostname = factory.make_name("hostname") handler = self.make_nodes_handler( fields=["hostname", "power_state"], non_changeable=["power_state"]) node = self.make_mock_node_with_fields( hostname=factory.make_name("hostname"), power_state="off", power_type="ipmi") handler.full_hydrate(node, { "hostname": hostname, "power_state": "on", "power_type": "etherwake", }) self.expectThat(hostname, Equals(node.hostname)) self.expectThat("off", Equals(node.power_state)) self.expectThat("ipmi", Equals(node.power_type)) def test_full_hydrate_calls_fields_hydrate_method_if_present(self): call_hostname = factory.make_name("hostname") hostname = factory.make_name("hostname") handler = self.make_nodes_handler(fields=["hostname"]) node = self.make_mock_node_with_fields( hostname=factory.make_name("hostname")) mock_hydrate_hostname = self.patch(handler, "hydrate_hostname") mock_hydrate_hostname.return_value = hostname handler.full_hydrate(node, { "hostname": call_hostname, }) self.expectThat(hostname, Equals(node.hostname)) self.expectThat( mock_hydrate_hostname, MockCalledOnceWith(call_hostname)) def test_full_hydrate_calls_final_hydrate_method(self): hostname = factory.make_name("hostname") handler = self.make_nodes_handler(fields=["hostname"]) node = self.make_mock_node_with_fields( hostname=factory.make_name("hostname")) mock_hydrate = self.patch_autospec(handler, "hydrate") mock_hydrate.return_value = sentinel.final_hydrate self.expectThat( sentinel.final_hydrate, Equals( handler.full_hydrate(node, { "hostname": hostname, }))) self.expectThat( mock_hydrate, MockCalledOnceWith( node, {"hostname": hostname})) def test_hydrate_does_nothing(self): handler = self.make_nodes_handler() self.assertEquals( sentinel.obj, handler.hydrate(sentinel.obj, sentinel.nothing)) def test_get_object_raises_HandlerPKError(self): handler = self.make_nodes_handler() self.assertRaises( HandlerPKError, handler.get_object, {"host": "test"}) def test_get_object_raises_HandlerDoesNotExistError(self): handler = self.make_nodes_handler() self.assertRaises( HandlerDoesNotExistError, handler.get_object, {"system_id": factory.make_name("system_id")}) def test_get_object_returns_object(self): handler = self.make_nodes_handler() node = factory.make_Node() self.assertEquals( node.hostname, handler.get_object( {"system_id": node.system_id}).hostname) def test_execute_only_allows_meta_allowed_methods(self): handler = self.make_nodes_handler(allowed_methods=['list']) with ExpectedException(HandlerNoSuchMethodError): handler.execute("get", {}).wait() def test_execute_raises_HandlerNoSuchMethodError(self): handler = self.make_nodes_handler(allowed_methods=['extra_method']) with ExpectedException(HandlerNoSuchMethodError): handler.execute("extra_method", {}).wait() def test_execute_calls_method_with_params(self): # Methods are assumed by default to be synchronous and are called in a # thread that originates from a specific threadpool. handler = self.make_nodes_handler() params = {"system_id": factory.make_name("system_id")} self.patch(base, "deferToDatabase").return_value = sentinel.thing result = handler.execute("get", params).wait() self.assertThat(result, Is(sentinel.thing)) self.assertThat(base.deferToDatabase, MockCalledOnceWith(ANY, params)) [func, _] = base.deferToDatabase.call_args[0] self.assertThat(func.func, Equals(handler.get)) def test_execute_calls_asynchronous_method_with_params(self): # An asynchronous method -- decorated with @asynchronous -- is called # directly, not in a thread. handler = self.make_nodes_handler() handler.get = asynchronous(lambda params: sentinel.thing) params = {"system_id": factory.make_name("system_id")} result = handler.execute("get", params).wait() self.assertThat(result, Is(sentinel.thing)) def test_list(self): output = [ {"hostname": factory.make_Node().hostname} for _ in range(3) ] handler = self.make_nodes_handler(fields=['hostname']) self.assertItemsEqual(output, handler.list({})) def test_list_start(self): nodes = [ factory.make_Node() for _ in range(6) ] nodes = sorted(nodes, key=attrgetter("system_id")) output = [ {"hostname": node.hostname} for node in nodes[3:] ] handler = self.make_nodes_handler(fields=['hostname']) self.assertItemsEqual( output, handler.list({"start": nodes[2].system_id})) def test_list_limit(self): output = [ {"hostname": factory.make_Node().hostname} for _ in range(3) ] for _ in range(3): factory.make_Node() handler = self.make_nodes_handler(fields=['hostname']) self.assertItemsEqual(output, handler.list({"limit": 3})) def test_list_start_and_limit(self): nodes = [ factory.make_Node() for _ in range(9) ] nodes = sorted(nodes, key=attrgetter("system_id")) output = [ {"hostname": node.hostname} for node in nodes[3:6] ] handler = self.make_nodes_handler(fields=['hostname']) self.assertItemsEqual( output, handler.list({"start": nodes[2].system_id, "limit": 3})) def test_list_adds_to_loaded_pks(self): pks = [ factory.make_Node().system_id for _ in range(3) ] handler = self.make_nodes_handler(fields=['hostname']) handler.list({}) self.assertItemsEqual(pks, handler.cache['loaded_pks']) def test_list_unions_the_loaded_pks(self): pks = [ factory.make_Node().system_id for _ in range(3) ] handler = self.make_nodes_handler(fields=['hostname']) # Make two calls to list making sure the loaded_pks contains all of # the primary keys listed. handler.list({"limit": 1}) handler.list({"start": pks[0]}) self.assertItemsEqual(pks, handler.cache['loaded_pks']) def test_get(self): node = factory.make_Node() handler = self.make_nodes_handler(fields=['hostname']) self.assertEquals( {"hostname": node.hostname}, handler.get({"system_id": node.system_id})) def test_create_without_form(self): # Use a zone as its simple and easy to create without a form, unlike # Node which requires a form. handler = make_handler( "TestZoneHandler", queryset=Zone.objects.all(), fields=['name', 'description']) name = factory.make_name("zone") json_obj = handler.create({"name": name}) self.expectThat({ "name": name, "description": "", }, Equals(json_obj)) self.expectThat(name, Equals(Zone.objects.get(name=name).name)) def test_create_with_form_creates_node(self): hostname = factory.make_name("hostname") arch = make_usable_architecture(self) nodegroup = factory.make_NodeGroup() handler = self.make_nodes_handler( fields=['hostname', 'architecture'], form=AdminNodeWithMACAddressesForm) json_obj = handler.create({ "hostname": hostname, "architecture": arch, "mac_addresses": [factory.make_mac_address()], "nodegroup": nodegroup.uuid, }) self.expectThat({ "hostname": hostname, "architecture": arch, }, Equals(json_obj)) def test_create_with_form_uses_form_from_get_form_class(self): hostname = factory.make_name("hostname") arch = make_usable_architecture(self) nodegroup = factory.make_NodeGroup() handler = self.make_nodes_handler( fields=['hostname', 'architecture']) self.patch( handler, "get_form_class").return_value = AdminNodeWithMACAddressesForm json_obj = handler.create({ "hostname": hostname, "architecture": arch, "mac_addresses": [factory.make_mac_address()], "nodegroup": nodegroup.uuid, }) self.expectThat({ "hostname": hostname, "architecture": arch, }, Equals(json_obj)) def test_create_with_form_passes_request_with_user_set(self): hostname = factory.make_name("hostname") arch = make_usable_architecture(self) mock_form = MagicMock() mock_form.return_value.is_valid.return_value = True mock_form.return_value.save.return_value = factory.make_Node() handler = self.make_nodes_handler(fields=[], form=mock_form) handler.create({ "hostname": hostname, "architecture": arch, }) # Extract the passed request. passed_request = mock_form.call_args_list[0][1]['request'] self.assertIs(handler.user, passed_request.user) def test_create_with_form_raises_HandlerValidationError(self): hostname = factory.make_name("hostname") arch = make_usable_architecture(self) handler = self.make_nodes_handler( fields=['hostname', 'architecture'], form=AdminNodeWithMACAddressesForm) self.assertRaises( HandlerValidationError, handler.create, { "hostname": hostname, "architecture": arch, }) def test_update_without_form(self): handler = self.make_nodes_handler(fields=['hostname']) node = factory.make_Node() hostname = factory.make_name("hostname") json_obj = handler.update({ "system_id": node.system_id, "hostname": hostname, }) self.expectThat({ "hostname": hostname, }, Equals(json_obj)) self.expectThat( reload_object(node).hostname, Equals(hostname)) def test_update_with_form_updates_node(self): arch = make_usable_architecture(self) node = factory.make_Node(architecture=arch) hostname = factory.make_name("hostname") handler = self.make_nodes_handler( fields=['hostname'], form=AdminNodeForm) json_obj = handler.update({ "system_id": node.system_id, "hostname": hostname, }) self.expectThat({ "hostname": hostname, }, Equals(json_obj)) self.expectThat( reload_object(node).hostname, Equals(hostname)) def test_update_with_form_uses_form_from_get_form_class(self): arch = make_usable_architecture(self) node = factory.make_Node(architecture=arch) hostname = factory.make_name("hostname") handler = self.make_nodes_handler(fields=['hostname']) self.patch( handler, "get_form_class").return_value = AdminNodeForm json_obj = handler.update({ "system_id": node.system_id, "hostname": hostname, }) self.expectThat({ "hostname": hostname, }, Equals(json_obj)) self.expectThat( reload_object(node).hostname, Equals(hostname)) def test_delete_deletes_object(self): node = factory.make_Node() handler = self.make_nodes_handler() handler.delete({"system_id": node.system_id}) self.assertIsNone(reload_object(node)) def test_set_active_does_nothing_if_no_active_obj_and_missing_pk(self): handler = self.make_nodes_handler() mock_get = self.patch(handler, "get") handler.set_active({}) self.assertThat(mock_get, MockNotCalled()) def test_set_active_clears_active_if_missing_pk(self): handler = self.make_nodes_handler() handler.cache["active_pk"] = factory.make_name("system_id") handler.set_active({}) self.assertFalse("active_pk" in handler.cache) def test_set_active_returns_data_and_sets_active(self): node = factory.make_Node() handler = self.make_nodes_handler(fields=['system_id']) node_data = handler.set_active({"system_id": node.system_id}) self.expectThat(node_data["system_id"], Equals(node.system_id)) self.expectThat(handler.cache["active_pk"], Equals(node.system_id)) def test_on_listen_calls_listen(self): handler = self.make_nodes_handler() pk = factory.make_name("system_id") mock_listen = self.patch(handler, "listen") mock_listen.side_effect = HandlerDoesNotExistError() handler.on_listen(sentinel.channel, sentinel.action, pk) self.assertThat( mock_listen, MockCalledOnceWith( sentinel.channel, sentinel.action, pk)) def test_on_listen_returns_None_if_unknown_action( self): handler = self.make_nodes_handler() mock_listen = self.patch(handler, "listen") mock_listen.side_effect = HandlerDoesNotExistError() self.assertIsNone( handler.on_listen( sentinel.channel, factory.make_name("action"), sentinel.pk)) def test_on_listen_delete_removes_pk_from_loaded(self): handler = self.make_nodes_handler() node = factory.make_Node() handler.cache["loaded_pks"].add(node.system_id) self.assertEquals( (handler._meta.handler_name, "delete", node.system_id), handler.on_listen( sentinel.channel, "delete", node.system_id)) self.assertTrue( node.system_id not in handler.cache["loaded_pks"], "on_listen delete did not remove system_id from loaded_pks") def test_on_listen_delete_returns_None_if_pk_not_in_loaded(self): handler = self.make_nodes_handler() node = factory.make_Node() self.assertEquals( None, handler.on_listen( sentinel.channel, "delete", node.system_id)) def test_on_listen_create_adds_pk_to_loaded(self): handler = self.make_nodes_handler(fields=['hostname']) node = factory.make_Node(owner=handler.user) self.assertEquals( ( handler._meta.handler_name, "create", {"hostname": node.hostname} ), handler.on_listen(sentinel.channel, "create", node.system_id)) self.assertTrue( node.system_id in handler.cache["loaded_pks"], "on_listen create did not add system_id to loaded_pks") def test_on_listen_create_returns_update_if_pk_already_known(self): handler = self.make_nodes_handler(fields=['hostname']) node = factory.make_Node(owner=handler.user) handler.cache["loaded_pks"].add(node.system_id) self.assertEquals( ( handler._meta.handler_name, "update", {"hostname": node.hostname} ), handler.on_listen(sentinel.channel, "create", node.system_id)) def test_on_listen_update_returns_delete_action_if_obj_is_None(self): handler = self.make_nodes_handler() node = factory.make_Node() handler.cache["loaded_pks"].add(node.system_id) self.patch(handler, "listen").return_value = None self.assertEquals( (handler._meta.handler_name, "delete", node.system_id), handler.on_listen( sentinel.channel, "update", node.system_id)) self.assertTrue( node.system_id not in handler.cache["loaded_pks"], "on_listen update did not remove system_id from loaded_pks") def test_on_listen_update_returns_update_action_if_obj_not_None(self): handler = self.make_nodes_handler(fields=['hostname']) node = factory.make_Node() handler.cache["loaded_pks"].add(node.system_id) self.assertEquals( ( handler._meta.handler_name, "update", {"hostname": node.hostname}, ), handler.on_listen( sentinel.channel, "update", node.system_id)) self.assertTrue( node.system_id in handler.cache["loaded_pks"], "on_listen update removed system_id from loaded_pks") def test_on_listen_update_returns_create_action_if_not_in_loaded(self): handler = self.make_nodes_handler(fields=['hostname']) node = factory.make_Node() self.assertEquals( ( handler._meta.handler_name, "create", {"hostname": node.hostname}, ), handler.on_listen( sentinel.channel, "update", node.system_id)) self.assertTrue( node.system_id in handler.cache["loaded_pks"], "on_listen update didnt add system_id to loaded_pks") def test_on_listen_update_call_full_dehydrate_for_list_if_not_active(self): node = factory.make_Node() handler = self.make_nodes_handler() handler.cache["loaded_pks"].add(node.system_id) mock_dehydrate = self.patch(handler, "full_dehydrate") mock_dehydrate.return_value = sentinel.data self.expectThat( handler.on_listen( sentinel.channel, "update", node.system_id), Equals((handler._meta.handler_name, "update", sentinel.data))) self.expectThat( mock_dehydrate, MockCalledOnceWith(node, for_list=True)) def test_on_listen_update_call_full_dehydrate_not_for_list_if_active(self): node = factory.make_Node() handler = self.make_nodes_handler() handler.cache["loaded_pks"].add(node.system_id) handler.cache["active_pk"] = node.system_id mock_dehydrate = self.patch(handler, "full_dehydrate") mock_dehydrate.return_value = sentinel.data self.expectThat( handler.on_listen( sentinel.channel, "update", node.system_id), Equals((handler._meta.handler_name, "update", sentinel.data))) self.expectThat( mock_dehydrate, MockCalledOnceWith(node, for_list=False)) def test_listen_calls_get_object_with_pk_on_other_actions(self): handler = self.make_nodes_handler() mock_get_object = self.patch(handler, "get_object") mock_get_object.return_value = sentinel.obj self.expectThat( handler.listen(sentinel.channel, "update", sentinel.pk), Equals(sentinel.obj)) self.expectThat( mock_get_object, MockCalledOnceWith({handler._meta.pk: sentinel.pk})) maas-1.9.5+bzr4599.orig/src/maasserver/websockets/tests/test_listener.py0000644000000000000000000033737213056115004024402 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.websockets.listner`""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from collections import namedtuple import errno import random from crochet import wait_for_reactor from django.contrib.auth.models import User from django.db import connection from maasserver.enum import ( INTERFACE_TYPE, IPADDRESS_TYPE, ) from maasserver.models.blockdevice import BlockDevice from maasserver.models.cacheset import CacheSet from maasserver.models.event import Event from maasserver.models.fabric import Fabric from maasserver.models.filesystem import Filesystem from maasserver.models.filesystemgroup import FilesystemGroup from maasserver.models.interface import Interface from maasserver.models.node import Node from maasserver.models.nodegroup import NodeGroup from maasserver.models.nodegroupinterface import NodeGroupInterface from maasserver.models.partition import Partition from maasserver.models.partitiontable import PartitionTable from maasserver.models.physicalblockdevice import PhysicalBlockDevice from maasserver.models.space import Space from maasserver.models.sshkey import SSHKey from maasserver.models.sslkey import SSLKey from maasserver.models.staticipaddress import StaticIPAddress from maasserver.models.subnet import Subnet from maasserver.models.tag import Tag from maasserver.models.virtualblockdevice import VirtualBlockDevice from maasserver.models.vlan import VLAN from maasserver.models.zone import Zone from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maasserver.triggers import register_all_triggers from maasserver.utils.orm import transactional from maasserver.utils.threads import deferToDatabase from maasserver.websockets import listener as listener_module from maasserver.websockets.listener import ( PostgresListener, PostgresListenerNotifyError, ) from maastesting.djangotestcase import DjangoTransactionTestCase from maastesting.matchers import ( MockCalledOnceWith, MockCalledWith, MockNotCalled, ) from metadataserver.models import NodeResult from mock import ( ANY, sentinel, ) from provisioningserver.utils.twisted import DeferredValue from psycopg2 import OperationalError from testtools import ExpectedException from testtools.matchers import ( Equals, Is, IsInstance, Not, ) from twisted.internet import ( error, reactor, ) from twisted.internet.defer import ( CancelledError, Deferred, DeferredList, inlineCallbacks, ) from twisted.python.failure import Failure FakeNotify = namedtuple("FakeNotify", ["channel", "payload"]) class TestPostgresListener(MAASServerTestCase): @transactional def send_notification(self, event, obj_id): cursor = connection.cursor() cursor.execute("NOTIFY %s, '%s';" % (event, obj_id)) cursor.close() @wait_for_reactor @inlineCallbacks def test__calls_handler_on_notification(self): listener = PostgresListener() dv = DeferredValue() listener.register("node", lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.send_notification, "node_create", 1) yield dv.get(timeout=2) self.assertEqual(('create', '1'), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__tryConnection_connects_to_database(self): listener = PostgresListener() yield listener.tryConnection() try: self.assertTrue(listener.connected()) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__tryConnection_logs_error(self): listener = PostgresListener() exception_type = factory.make_exception_type() exception_message = factory.make_name("message") startConnection = self.patch(listener, "startConnection") startConnection.side_effect = exception_type(exception_message) mock_logMsg = self.patch(listener, "logMsg") with ExpectedException(exception_type): yield listener.tryConnection() self.assertThat( mock_logMsg, MockCalledOnceWith( format="Unable to connect to database: %(error)s", error=exception_message)) @wait_for_reactor @inlineCallbacks def test__tryConnection_will_retry_in_3_seconds_if_autoReconnect_set(self): listener = PostgresListener() listener.autoReconnect = True startConnection = self.patch(listener, "startConnection") startConnection.side_effect = factory.make_exception() deferLater = self.patch(listener_module, "deferLater") deferLater.return_value = sentinel.retry result = yield listener.tryConnection() self.assertThat(result, Is(sentinel.retry)) self.assertThat(deferLater, MockCalledWith(reactor, 3, ANY)) @wait_for_reactor @inlineCallbacks def test__tryConnection_will_not_retry_if_autoReconnect_not_set(self): listener = PostgresListener() listener.autoReconnect = False exception_type = factory.make_exception_type() exception_message = factory.make_name("message") startConnection = self.patch(listener, "startConnection") startConnection.side_effect = exception_type(exception_message) deferLater = self.patch(listener_module, "deferLater") deferLater.return_value = sentinel.retry with ExpectedException(exception_type): yield listener.tryConnection() self.assertThat(deferLater, MockNotCalled()) @wait_for_reactor @inlineCallbacks def test__stopping_cancels_start(self): listener = PostgresListener() # Start then stop immediately, without waiting for start to complete. starting = listener.start() starting_spy = DeferredValue() starting_spy.observe(starting) stopping = listener.stop() # Both `starting` and `stopping` have callbacks yet to fire. self.assertThat(starting.callbacks, Not(Equals([]))) self.assertThat(stopping.callbacks, Not(Equals([]))) # Wait for the listener to stop. yield stopping # Neither `starting` nor `stopping` have callbacks. This is because # `stopping` chained itself onto the end of `starting`. self.assertThat(starting.callbacks, Equals([])) self.assertThat(stopping.callbacks, Equals([])) # Confirmation that `starting` was cancelled. with ExpectedException(CancelledError): yield starting_spy.get() @wait_for_reactor def test__multiple_starts_return_same_Deferred(self): listener = PostgresListener() self.assertThat(listener.start(), Is(listener.start())) return listener.stop() @wait_for_reactor def test__multiple_stops_return_same_Deferred(self): listener = PostgresListener() self.assertThat(listener.stop(), Is(listener.stop())) return listener.stop() @wait_for_reactor @inlineCallbacks def test__tryConnection_calls_registerChannels_after_startConnection(self): listener = PostgresListener() exception_type = factory.make_exception_type() self.patch(listener, "startConnection") mock_registerChannels = self.patch(listener, "registerChannels") mock_registerChannels.side_effect = exception_type with ExpectedException(exception_type): yield listener.tryConnection() self.assertThat( mock_registerChannels, MockCalledOnceWith()) @wait_for_reactor @inlineCallbacks def test__tryConnection_adds_self_to_reactor(self): listener = PostgresListener() # Spy on calls to reactor.addReader. self.patch(reactor, "addReader").side_effect = reactor.addReader yield listener.tryConnection() try: self.assertThat( reactor.addReader, MockCalledOnceWith(listener)) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__tryConnection_closes_connection_on_failure(self): listener = PostgresListener() exc_type = factory.make_exception_type() startReading = self.patch(listener, "startReading") startReading.side_effect = exc_type("no reason") with ExpectedException(exc_type): yield listener.tryConnection() self.assertThat(listener.connection, Is(None)) @wait_for_reactor @inlineCallbacks def test__tryConnection_logs_success(self): listener = PostgresListener() mock_logMsg = self.patch(listener, "logMsg") yield listener.tryConnection() try: self.assertThat( mock_logMsg, MockCalledOnceWith("Listening for database notifications.")) finally: yield listener.stop() @wait_for_reactor def test__connectionLost_logs_reason(self): listener = PostgresListener() self.patch(listener, "logErr") failure = Failure(factory.make_exception()) listener.connectionLost(failure) self.assertThat( listener.logErr, MockCalledOnceWith( failure, "Connection lost.")) @wait_for_reactor def test__connectionLost_does_not_log_reason_when_lost_cleanly(self): listener = PostgresListener() self.patch(listener, "logErr") listener.connectionLost(Failure(error.ConnectionDone())) self.assertThat(listener.logErr, MockNotCalled()) def test_register_adds_channel_and_handler(self): listener = PostgresListener() channel = factory.make_name("channel") listener.register(channel, sentinel.handler) self.assertEqual( [sentinel.handler], listener.listeners[channel]) def test__convertChannel_raises_exception_if_not_valid_channel(self): listener = PostgresListener() self.assertRaises( PostgresListenerNotifyError, listener.convertChannel, "node_create") def test__convertChannel_raises_exception_if_not_valid_action(self): listener = PostgresListener() self.assertRaises( PostgresListenerNotifyError, listener.convertChannel, "node_unknown") @wait_for_reactor @inlineCallbacks def test__doRead_removes_self_from_reactor_on_error(self): listener = PostgresListener() connection = self.patch(listener, "connection") connection.connection.poll.side_effect = OperationalError() self.patch(reactor, "removeReader") self.patch(listener, "connectionLost") failure = listener.doRead() # No failure is returned; see the comment in PostgresListener.doRead() # that explains why we don't do that. self.assertThat(failure, Is(None)) # The listener has begun disconnecting. self.assertThat(listener.disconnecting, IsInstance(Deferred)) # Wait for disconnection to complete. yield listener.disconnecting # The listener has removed itself from the reactor. self.assertThat(reactor.removeReader, MockCalledOnceWith(listener)) # connectionLost() has been called with a simple ConnectionLost. self.assertThat(listener.connectionLost, MockCalledOnceWith(ANY)) [failure] = listener.connectionLost.call_args[0] self.assertThat(failure, IsInstance(Failure)) self.assertThat(failure.value, IsInstance(error.ConnectionLost)) def test__doRead_adds_notifies_to_notifications(self): listener = PostgresListener() notifications = [ FakeNotify( channel=factory.make_name("channel_action"), payload=factory.make_name("payload")) for _ in range(3) ] connection = self.patch(listener, "connection") connection.connection.poll.return_value = None # Add the notifications twice, so it can test that duplicates are # accumulated together. connection.connection.notifies = notifications + notifications self.patch(listener, "handleNotify") listener.doRead() self.assertItemsEqual( listener.notifications, set(notifications)) @wait_for_reactor @inlineCallbacks def test__listener_ignores_ENOENT_when_removing_itself_from_reactor(self): listener = PostgresListener() self.patch(reactor, "addReader") self.patch(reactor, "removeReader") # removeReader() is going to have a nasty accident. enoent = IOError("ENOENT") enoent.errno = errno.ENOENT reactor.removeReader.side_effect = enoent # The listener starts and stops without issue. yield listener.start() yield listener.stop() # addReader() and removeReader() were both called. self.assertThat(reactor.addReader, MockCalledOnceWith(listener)) self.assertThat(reactor.removeReader, MockCalledOnceWith(listener)) @wait_for_reactor @inlineCallbacks def test__listener_waits_for_notifier_to_complete(self): listener = PostgresListener() yield listener.start() try: self.assertTrue(listener.notifier.running) finally: yield listener.stop() self.assertFalse(listener.notifier.running) class TransactionalHelpersMixin: """Helpers performing actions in transactions.""" def make_listener_without_delay(self): listener = PostgresListener() self.patch(listener, "HANDLE_NOTIFY_DELAY", 0) return listener @transactional def get_node(self, system_id): node = Node.objects.get(system_id=system_id) return node @transactional def create_node(self, params=None): if params is None: params = {} params['with_boot_disk'] = False return factory.make_Node(**params) @transactional def update_node(self, system_id, params): node = Node.objects.get(system_id=system_id) for key, value in params.items(): setattr(node, key, value) return node.save() @transactional def delete_node(self, system_id): node = Node.objects.get(system_id=system_id) node.delete() @transactional def create_device_with_parent(self, params=None): if params is None: params = {} parent = factory.make_Node(with_boot_disk=False) params["installable"] = False params["parent"] = parent device = factory.make_Node(**params) return device, parent @transactional def get_node_boot_interface(self, system_id): node = Node.objects.get(system_id=system_id) return node.get_boot_interface() @transactional def create_nodegroup(self, params=None): if params is None: params = {} return factory.make_NodeGroup(**params) @transactional def update_nodegroup(self, id, params): nodegroup = NodeGroup.objects.get(id=id) for key, value in params.items(): setattr(nodegroup, key, value) return nodegroup.save() @transactional def delete_nodegroup(self, id): nodegroup = NodeGroup.objects.get(id=id) nodegroup.delete() @transactional def create_nodegroupinterface(self, nodegroup, params=None): if params is None: params = {} return factory.make_NodeGroupInterface(nodegroup, **params) @transactional def update_nodegroupinterface(self, id, params): interface = NodeGroupInterface.objects.get(id=id) for key, value in params.items(): setattr(interface, key, value) return interface.save() @transactional def update_nodegroupinterface_subnet_mask(self, id, subnet_mask): interface = NodeGroupInterface.objects.get(id=id) # This is now a special case, since the subnet_mask no longer # belongs to the NodeGroupInterface. We'll need to explicitly # save *only* the Subnet, without saving the NodeGroupInterface. # Otherwise, Django will ignorantly update the table, and our # test case may pass with a false-positive. interface.subnet_mask = subnet_mask interface.subnet.save() @transactional def delete_nodegroupinterface(self, id): interface = NodeGroupInterface.objects.get(id=id) interface.delete() @transactional def create_fabric(self, params=None): if params is None: params = {} return factory.make_Fabric(**params) @transactional def update_fabric(self, id, params): fabric = Fabric.objects.get(id=id) for key, value in params.items(): setattr(fabric, key, value) return fabric.save() @transactional def delete_fabric(self, id): fabric = Fabric.objects.get(id=id) fabric.delete() @transactional def create_space(self, params=None): if params is None: params = {} return factory.make_Space(**params) @transactional def update_space(self, id, params): space = Space.objects.get(id=id) for key, value in params.items(): setattr(space, key, value) return space.save() @transactional def delete_space(self, id): space = Space.objects.get(id=id) space.delete() @transactional def create_subnet(self, params=None): if params is None: params = {} return factory.make_Subnet(**params) @transactional def update_subnet(self, id, params): subnet = Subnet.objects.get(id=id) for key, value in params.items(): setattr(subnet, key, value) return subnet.save() @transactional def delete_subnet(self, id): subnet = Subnet.objects.get(id=id) subnet.delete() @transactional def create_vlan(self, params=None): if params is None: params = {} return factory.make_VLAN(**params) @transactional def update_vlan(self, id, params): vlan = VLAN.objects.get(id=id) for key, value in params.items(): setattr(vlan, key, value) return vlan.save() @transactional def delete_vlan(self, id): vlan = VLAN.objects.get(id=id) vlan.delete() @transactional def create_zone(self, params=None): if params is None: params = {} return factory.make_Zone(**params) @transactional def update_zone(self, id, params): zone = Zone.objects.get(id=id) for key, value in params.items(): setattr(zone, key, value) return zone.save() @transactional def delete_zone(self, id): zone = Zone.objects.get(id=id) zone.delete() @transactional def create_tag(self, params=None): if params is None: params = {} return factory.make_Tag(**params) @transactional def add_node_to_tag(self, node, tag): node.tags.add(tag) node.save() @transactional def remove_node_from_tag(self, node, tag): node.tags.remove(tag) node.save() @transactional def update_tag(self, id, params): tag = Tag.objects.get(id=id) for key, value in params.items(): setattr(tag, key, value) return tag.save() @transactional def delete_tag(self, id): tag = Tag.objects.get(id=id) tag.delete() @transactional def create_user(self, params=None): if params is None: params = {} return factory.make_User(**params) @transactional def update_user(self, id, params): user = User.objects.get(id=id) for key, value in params.items(): setattr(user, key, value) return user.save() @transactional def delete_user(self, id): user = User.objects.get(id=id) user.consumers.all().delete() user.delete() @transactional def create_event(self, params=None): if params is None: params = {} return factory.make_Event(**params) @transactional def update_event(self, id, params): event = Event.objects.get(id=id) for key, value in params.items(): setattr(event, key, value) return event.save() @transactional def delete_event(self, id): event = Event.objects.get(id=id) event.delete() @transactional def create_staticipaddress(self, params=None): if params is None: params = {} return factory.make_StaticIPAddress(**params) @transactional def update_staticipaddress(self, id, params): ip = StaticIPAddress.objects.get(id=id) for key, value in params.items(): setattr(ip, key, value) return ip.save() @transactional def delete_staticipaddress(self, id): sip = StaticIPAddress.objects.get(id=id) sip.delete() @transactional def get_ipaddress_subnet(self, id): ipaddress = StaticIPAddress.objects.get(id=id) return ipaddress.subnet @transactional def get_ipaddress_vlan(self, id): ipaddress = StaticIPAddress.objects.get(id=id) return ipaddress.subnet.vlan @transactional def get_ipaddress_fabric(self, id): ipaddress = StaticIPAddress.objects.get(id=id) return ipaddress.subnet.vlan.fabric @transactional def get_ipaddress_space(self, id): ipaddress = StaticIPAddress.objects.get(id=id) return ipaddress.subnet.space @transactional def create_noderesult(self, params=None): if params is None: params = {} return factory.make_NodeResult_for_commissioning(**params) @transactional def delete_noderesult(self, id): result = NodeResult.objects.get(id=id) result.delete() @transactional def create_interface(self, params=None): if params is None: params = {} return factory.make_Interface(INTERFACE_TYPE.PHYSICAL, **params) @transactional def delete_interface(self, id): interface = Interface.objects.get(id=id) interface.delete() @transactional def update_interface(self, id, params): interface = Interface.objects.get(id=id) for key, value in params.items(): setattr(interface, key, value) return interface.save() @transactional def get_interface_vlan(self, id): interface = Interface.objects.get(id=id) return interface.vlan @transactional def get_interface_fabric(self, id): interface = Interface.objects.get(id=id) return interface.vlan.fabric @transactional def create_blockdevice(self, params=None): if params is None: params = {} return factory.make_BlockDevice(**params) @transactional def create_physicalblockdevice(self, params=None): if params is None: params = {} return factory.make_PhysicalBlockDevice(**params) @transactional def create_virtualblockdevice(self, params=None): if params is None: params = {} return factory.make_VirtualBlockDevice(**params) @transactional def delete_blockdevice(self, id): blockdevice = BlockDevice.objects.get(id=id) blockdevice.delete() @transactional def update_blockdevice(self, id, params): blockdevice = BlockDevice.objects.get(id=id) for key, value in params.items(): setattr(blockdevice, key, value) return blockdevice.save() @transactional def update_physicalblockdevice(self, id, params): blockdevice = PhysicalBlockDevice.objects.get(id=id) for key, value in params.items(): setattr(blockdevice, key, value) return blockdevice.save() @transactional def update_virtualblockdevice(self, id, params): blockdevice = VirtualBlockDevice.objects.get(id=id) for key, value in params.items(): setattr(blockdevice, key, value) return blockdevice.save() @transactional def create_partitiontable(self, params=None): if params is None: params = {} return factory.make_PartitionTable(**params) @transactional def delete_partitiontable(self, id): partitiontable = PartitionTable.objects.get(id=id) partitiontable.delete() @transactional def update_partitiontable(self, id, params): partitiontable = PartitionTable.objects.get(id=id) for key, value in params.items(): setattr(partitiontable, key, value) return partitiontable.save() @transactional def create_partition(self, params=None): if params is None: params = {} return factory.make_Partition(**params) @transactional def delete_partition(self, id): partition = Partition.objects.get(id=id) partition.delete() @transactional def update_partition(self, id, params): partition = Partition.objects.get(id=id) for key, value in params.items(): setattr(partition, key, value) return partition.save() @transactional def create_filesystem(self, params=None): if params is None: params = {} return factory.make_Filesystem(**params) @transactional def delete_filesystem(self, id): filesystem = Filesystem.objects.get(id=id) filesystem.delete() @transactional def update_filesystem(self, id, params): filesystem = Filesystem.objects.get(id=id) for key, value in params.items(): setattr(filesystem, key, value) return filesystem.save() @transactional def create_filesystemgroup(self, params=None): if params is None: params = {} return factory.make_FilesystemGroup(**params) @transactional def delete_filesystemgroup(self, id): filesystemgroup = FilesystemGroup.objects.get(id=id) filesystemgroup.delete() @transactional def update_filesystemgroup(self, id, params): filesystemgroup = FilesystemGroup.objects.get(id=id) for key, value in params.items(): setattr(filesystemgroup, key, value) return filesystemgroup.save() @transactional def create_cacheset(self, params=None): if params is None: params = {} return factory.make_CacheSet(**params) @transactional def delete_cacheset(self, id): cacheset = CacheSet.objects.get(id=id) cacheset.delete() @transactional def update_cacheset(self, id, params): cacheset = CacheSet.objects.get(id=id) for key, value in params.items(): setattr(cacheset, key, value) return cacheset.save() @transactional def create_sshkey(self, params=None): if params is None: params = {} return factory.make_SSHKey(**params) @transactional def delete_sshkey(self, id): key = SSHKey.objects.get(id=id) key.delete() @transactional def create_sslkey(self, params=None): if params is None: params = {} return factory.make_SSLKey(**params) @transactional def delete_sslkey(self, id): key = SSLKey.objects.get(id=id) key.delete() class TestNodeListener(DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the triggers code.""" scenarios = ( ('node', { 'params': {'installable': True}, 'listener': 'node', }), ('device', { 'params': {'installable': False}, 'listener': 'device', }), ) @wait_for_reactor @inlineCallbacks def test__calls_handler_on_create_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: node = yield deferToDatabase(self.create_node, self.params) yield dv.get(timeout=2) self.assertEqual(('create', node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_on_update_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) node = yield deferToDatabase(self.create_node, self.params) yield listener.start() try: yield deferToDatabase( self.update_node, node.system_id, {'hostname': factory.make_name('hostname')}) yield dv.get(timeout=2) self.assertEqual(('update', node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_on_delete_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) node = yield deferToDatabase(self.create_node, self.params) yield listener.start() try: yield deferToDatabase(self.delete_node, node.system_id) yield dv.get(timeout=2) self.assertEqual(('delete', node.system_id), dv.value) finally: yield listener.stop() class TestDeviceWithParentListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the triggers code.""" @wait_for_reactor @inlineCallbacks def test__calls_handler_on_create_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("node", lambda *args: dv.set(args)) parent = yield deferToDatabase(self.create_node) yield listener.start() try: yield deferToDatabase( self.create_node, { "installable": False, "parent": parent, }) yield dv.get(timeout=2) self.assertEqual(('update', parent.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_on_update_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("node", lambda *args: dv.set(args)) device, parent = yield deferToDatabase(self.create_device_with_parent) yield listener.start() try: yield deferToDatabase( self.update_node, device.system_id, {'hostname': factory.make_name('hostname')}) yield dv.get(timeout=2) self.assertEqual(('update', parent.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_on_delete_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("node", lambda *args: dv.set(args)) device, parent = yield deferToDatabase(self.create_device_with_parent) yield listener.start() try: yield deferToDatabase(self.delete_node, device.system_id) yield dv.get(timeout=2) self.assertEqual(('update', parent.system_id), dv.value) finally: yield listener.stop() class TestClusterListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the cluster triggers code.""" @wait_for_reactor @inlineCallbacks def test__calls_handler_on_create_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("nodegroup", lambda *args: dv.set(args)) yield listener.start() try: nodegroup = yield deferToDatabase(self.create_nodegroup) yield dv.get(timeout=2) self.assertEqual(('create', '%s' % nodegroup.id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_on_update_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("nodegroup", lambda *args: dv.set(args)) nodegroup = yield deferToDatabase(self.create_nodegroup) yield listener.start() try: yield deferToDatabase( self.update_nodegroup, nodegroup.id, {'cluster_name': factory.make_name('cluster_name')}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % nodegroup.id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_on_delete_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("nodegroup", lambda *args: dv.set(args)) nodegroup = yield deferToDatabase(self.create_nodegroup) yield listener.start() try: yield deferToDatabase(self.delete_nodegroup, nodegroup.id) yield dv.get(timeout=2) self.assertEqual(('delete', '%s' % nodegroup.id), dv.value) finally: yield listener.stop() class TestClusterInterfaceListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the cluster interface triggers code.""" @wait_for_reactor @inlineCallbacks def test__calls_nodegroup_update_handler_on_create_notification(self): yield deferToDatabase(register_all_triggers) nodegroup = yield deferToDatabase(self.create_nodegroup) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("nodegroup", lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase( self.create_nodegroupinterface, nodegroup) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % nodegroup.id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_nodegroup_update_handler_on_update_notification(self): yield deferToDatabase(register_all_triggers) nodegroup = yield deferToDatabase(self.create_nodegroup) interface = yield deferToDatabase( self.create_nodegroupinterface, nodegroup) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("nodegroup", lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase( self.update_nodegroupinterface, interface.id, {'name': factory.make_name('name')}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % nodegroup.id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_nodegroup_update_handler_on_update_subnet_mask(self): yield deferToDatabase(register_all_triggers) nodegroup = yield deferToDatabase(self.create_nodegroup) interface = yield deferToDatabase( self.create_nodegroupinterface, nodegroup, params=dict(ip='10.0.0.1', subnet_mask='255.255.255.0')) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("nodegroup", lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase( self.update_nodegroupinterface_subnet_mask, interface.id, '255.255.0.0') yield dv.get(timeout=2) self.assertEqual(('update', '%s' % nodegroup.id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_nodegroup_update_handler_on_delete_notification(self): yield deferToDatabase(register_all_triggers) nodegroup = yield deferToDatabase(self.create_nodegroup) interface = yield deferToDatabase( self.create_nodegroupinterface, nodegroup) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("nodegroup", lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.delete_nodegroupinterface, interface.id) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % nodegroup.id), dv.value) finally: yield listener.stop() class TestZoneListener(DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the zone triggers code.""" @wait_for_reactor @inlineCallbacks def test__calls_handler_on_create_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("zone", lambda *args: dv.set(args)) yield listener.start() try: zone = yield deferToDatabase(self.create_zone) yield dv.get(timeout=2) self.assertEqual(('create', '%s' % zone.id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_on_update_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("zone", lambda *args: dv.set(args)) zone = yield deferToDatabase(self.create_zone) yield listener.start() try: yield deferToDatabase( self.update_zone, zone.id, {'cluster_name': factory.make_name('cluster_name')}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % zone.id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_on_delete_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("zone", lambda *args: dv.set(args)) zone = yield deferToDatabase(self.create_zone) yield listener.start() try: yield deferToDatabase(self.delete_zone, zone.id) yield dv.get(timeout=2) self.assertEqual(('delete', '%s' % zone.id), dv.value) finally: yield listener.stop() class TestTagListener(DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the tag triggers code.""" @wait_for_reactor @inlineCallbacks def test__calls_handler_on_create_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("tag", lambda *args: dv.set(args)) yield listener.start() try: tag = yield deferToDatabase(self.create_tag) yield dv.get(timeout=2) self.assertEqual(('create', '%s' % tag.id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_on_update_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("tag", lambda *args: dv.set(args)) tag = yield deferToDatabase(self.create_tag) yield listener.start() try: yield deferToDatabase( self.update_tag, tag.id, {'name': factory.make_name('tag')}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % tag.id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_on_delete_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("tag", lambda *args: dv.set(args)) tag = yield deferToDatabase(self.create_tag) yield listener.start() try: yield deferToDatabase(self.delete_tag, tag.id) yield dv.get(timeout=2) self.assertEqual(('delete', '%s' % tag.id), dv.value) finally: yield listener.stop() class TestNodeTagListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the triggers on maasserver_node_tags table.""" scenarios = ( ('node', { 'params': {'installable': True}, 'listener': 'node', }), ('device', { 'params': {'installable': False}, 'listener': 'device', }), ) @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_create(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) tag = yield deferToDatabase(self.create_tag) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.add_node_to_tag, node, tag) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_delete(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) tag = yield deferToDatabase(self.create_tag) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.remove_node_from_tag, node, tag) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_node_handler_with_update_on_tag_rename(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) tag = yield deferToDatabase(self.create_tag) yield deferToDatabase(self.add_node_to_tag, node, tag) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: tag = yield deferToDatabase( self.update_tag, tag.id, {'name': factory.make_name("tag")}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() class TestDeviceWithParentTagListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the triggers on maasserver_node_tags table.""" @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_create(self): yield deferToDatabase(register_all_triggers) device, parent = yield deferToDatabase(self.create_device_with_parent) tag = yield deferToDatabase(self.create_tag) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("node", lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.add_node_to_tag, device, tag) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % parent.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_delete(self): yield deferToDatabase(register_all_triggers) device, parent = yield deferToDatabase(self.create_device_with_parent) tag = yield deferToDatabase(self.create_tag) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("node", lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.remove_node_from_tag, device, tag) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % parent.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_node_handler_with_update_on_tag_rename(self): yield deferToDatabase(register_all_triggers) device, parent = yield deferToDatabase(self.create_device_with_parent) tag = yield deferToDatabase(self.create_tag) yield deferToDatabase(self.add_node_to_tag, device, tag) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("node", lambda *args: dv.set(args)) yield listener.start() try: tag = yield deferToDatabase( self.update_tag, tag.id, {'name': factory.make_name("tag")}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % parent.system_id), dv.value) finally: yield listener.stop() class TestUserListener(DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the user triggers code.""" @wait_for_reactor @inlineCallbacks def test__calls_handler_on_create_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("user", lambda *args: dv.set(args)) yield listener.start() try: user = yield deferToDatabase(self.create_user) yield dv.get(timeout=2) self.assertEqual(('create', '%s' % user.id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_on_update_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("user", lambda *args: dv.set(args)) user = yield deferToDatabase(self.create_user) yield listener.start() try: yield deferToDatabase( self.update_user, user.id, {'username': factory.make_name('username')}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % user.id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_on_delete_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("user", lambda *args: dv.set(args)) user = yield deferToDatabase(self.create_user) yield listener.start() try: yield deferToDatabase(self.delete_user, user.id) yield dv.get(timeout=2) self.assertEqual(('delete', '%s' % user.id), dv.value) finally: yield listener.stop() class TestEventListener(DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the event triggers code.""" @wait_for_reactor @inlineCallbacks def test__calls_handler_on_create_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("event", lambda *args: dv.set(args)) yield listener.start() try: event = yield deferToDatabase(self.create_event) yield dv.get(timeout=2) self.assertEqual(('create', '%s' % event.id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_on_update_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("event", lambda *args: dv.set(args)) event = yield deferToDatabase(self.create_event) yield listener.start() try: yield deferToDatabase( self.update_event, event.id, {'description': factory.make_name('description')}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % event.id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_on_delete_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("event", lambda *args: dv.set(args)) event = yield deferToDatabase(self.create_event) yield listener.start() try: yield deferToDatabase(self.delete_event, event.id) yield dv.get(timeout=2) self.assertEqual(('delete', '%s' % event.id), dv.value) finally: yield listener.stop() class TestNodeEventListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the triggers on maasserver_event table that notifies its node.""" scenarios = ( ('node', { 'params': {'installable': True}, 'listener': 'node', }), ('device', { 'params': {'installable': False}, 'listener': 'device', }), ) @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_create(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.create_event, {"node": node}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() class TestDeviceWithParentEventListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the triggers on maasserver_event table that notifies its node.""" @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_create(self): yield deferToDatabase(register_all_triggers) device, parent = yield deferToDatabase(self.create_device_with_parent) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("node", lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.create_event, {"node": device}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % parent.system_id), dv.value) finally: yield listener.stop() class TestNodeStaticIPAddressListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the triggers on maasserver_interfacestaticipaddresslink table that notifies its node.""" scenarios = ( ('node', { 'params': {'installable': True, 'interface': True}, 'listener': 'node', }), ('device', { 'params': {'installable': False, 'interface': True}, 'listener': 'device', }), ) @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_create(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) interface = yield deferToDatabase( self.get_node_boot_interface, node.system_id) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase( self.create_staticipaddress, {"interface": interface}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_delete(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) interface = yield deferToDatabase( self.get_node_boot_interface, node.system_id) sip = yield deferToDatabase( self.create_staticipaddress, {"interface": interface}) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.delete_staticipaddress, sip.id) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() class TestDeviceWithParentStaticIPAddressListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the triggers on maasserver_interfacestaticipaddresslink table that notifies its node.""" @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_create(self): yield deferToDatabase(register_all_triggers) device, parent = yield deferToDatabase( self.create_device_with_parent, {"interface": True}) interface = yield deferToDatabase( self.get_node_boot_interface, device.system_id) listener = PostgresListener() dv = DeferredValue() listener.register("node", lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase( self.create_staticipaddress, {"interface": interface}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % parent.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_delete(self): yield deferToDatabase(register_all_triggers) device, parent = yield deferToDatabase( self.create_device_with_parent, {"interface": True}) interface = yield deferToDatabase( self.get_node_boot_interface, device.system_id) sip = yield deferToDatabase( self.create_staticipaddress, {"interface": interface}) listener = PostgresListener() dv = DeferredValue() listener.register("node", lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.delete_staticipaddress, sip.id) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % parent.system_id), dv.value) finally: yield listener.stop() class TestNodeNodeResultListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the triggers on metadataserver_noderesult table that notifies its node.""" scenarios = ( ('node', { 'params': {'installable': True}, 'listener': 'node', }), ('device', { 'params': {'installable': False}, 'listener': 'device', }), ) @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_create(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.create_noderesult, {"node": node}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_delete(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) result = yield deferToDatabase(self.create_noderesult, {"node": node}) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.delete_noderesult, result.id) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() class TestDeviceWithParentNodeResultListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the triggers on metadataserver_noderesult table that notifies its node.""" @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_create(self): yield deferToDatabase(register_all_triggers) device, parent = yield deferToDatabase(self.create_device_with_parent) listener = PostgresListener() dv = DeferredValue() listener.register("node", lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.create_noderesult, {"node": device}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % parent.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_delete(self): yield deferToDatabase(register_all_triggers) device, parent = yield deferToDatabase(self.create_device_with_parent) result = yield deferToDatabase( self.create_noderesult, {"node": device}) listener = PostgresListener() dv = DeferredValue() listener.register("node", lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.delete_noderesult, result.id) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % parent.system_id), dv.value) finally: yield listener.stop() class TestNodeInterfaceListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the triggers on maasserver_interface table that notifies its node.""" scenarios = ( ('node', { 'params': {'installable': True}, 'listener': 'node', }), ('device', { 'params': {'installable': False}, 'listener': 'device', }), ) @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_create(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.create_interface, {"node": node}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_delete(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) interface = yield deferToDatabase( self.create_interface, {"node": node}) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.delete_interface, interface.id) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_update(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) interface = yield deferToDatabase( self.create_interface, {"node": node}) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.update_interface, interface.id, { "mac_address": factory.make_MAC() }) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_old_node_on_update(self): yield deferToDatabase(register_all_triggers) node1 = yield deferToDatabase(self.create_node, self.params) node2 = yield deferToDatabase(self.create_node, self.params) interface = yield deferToDatabase( self.create_interface, {"node": node1}) dvs = [DeferredValue(), DeferredValue()] def set_defer_value(*args): for dv in dvs: if not dv.isSet: dv.set(args) break listener = PostgresListener() listener.register(self.listener, set_defer_value) yield listener.start() try: yield deferToDatabase(self.update_interface, interface.id, { "node": node2 }) yield dvs[0].get(timeout=2) yield dvs[1].get(timeout=2) self.assertItemsEqual([ ('update', '%s' % node1.system_id), ('update', '%s' % node2.system_id), ], [dvs[0].value, dvs[1].value]) finally: yield listener.stop() class TestDeviceWithParentInterfaceListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the triggers on maasserver_interface table that notifies its node.""" @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_create(self): yield deferToDatabase(register_all_triggers) device, parent = yield deferToDatabase(self.create_device_with_parent) listener = PostgresListener() dv = DeferredValue() listener.register("node", lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.create_interface, {"node": device}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % parent.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_delete(self): yield deferToDatabase(register_all_triggers) device, parent = yield deferToDatabase(self.create_device_with_parent) interface = yield deferToDatabase( self.create_interface, {"node": device}) listener = PostgresListener() dv = DeferredValue() listener.register("node", lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.delete_interface, interface.id) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % parent.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_update(self): yield deferToDatabase(register_all_triggers) device, parent = yield deferToDatabase(self.create_device_with_parent) interface = yield deferToDatabase( self.create_interface, {"node": device}) listener = PostgresListener() dv = DeferredValue() listener.register("node", lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.update_interface, interface.id, { "mac_address": factory.make_MAC() }) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % parent.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_old_node_on_update(self): yield deferToDatabase(register_all_triggers) device1, parent1 = yield deferToDatabase( self.create_device_with_parent) device2, parent2 = yield deferToDatabase( self.create_device_with_parent) interface = yield deferToDatabase( self.create_interface, {"node": device1}) dvs = [DeferredValue(), DeferredValue()] def set_defer_value(*args): for dv in dvs: if not dv.isSet: dv.set(args) break listener = PostgresListener() listener.register("node", set_defer_value) yield listener.start() try: yield deferToDatabase(self.update_interface, interface.id, { "node": device2 }) yield dvs[0].get(timeout=2) yield dvs[1].get(timeout=2) self.assertItemsEqual([ ('update', '%s' % parent1.system_id), ('update', '%s' % parent2.system_id), ], [dvs[0].value, dvs[1].value]) finally: yield listener.stop() class TestFabricListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the cluster triggers code.""" @wait_for_reactor @inlineCallbacks def test__calls_handler_on_create_notification_with_blank_name(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dvs = [DeferredValue(), DeferredValue()] save_dvs = dvs[:] listener.register("fabric", lambda *args: dvs.pop().set(args)) yield listener.start() try: fabric = yield deferToDatabase(self.create_fabric) results = yield DeferredList( (dv.get(timeout=2) for dv in save_dvs)) self.assertItemsEqual( [('create', '%s' % fabric.id), ('update', '%s' % fabric.id)], [res for (suc, res) in results]) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_on_create_notification_with_name(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("fabric", lambda *args: dv.set(args)) yield listener.start() try: fabric = yield deferToDatabase( self.create_fabric, {'name': factory.make_name('name')}) yield dv.get(timeout=2) self.assertEqual(('create', '%s' % fabric.id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_on_update_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("fabric", lambda *args: dv.set(args)) fabric = yield deferToDatabase(self.create_fabric) yield listener.start() try: yield deferToDatabase( self.update_fabric, fabric.id, {'name': factory.make_name('name')}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % fabric.id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_on_delete_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("fabric", lambda *args: dv.set(args)) fabric = yield deferToDatabase(self.create_fabric) yield listener.start() try: yield deferToDatabase(self.delete_fabric, fabric.id) yield dv.get(timeout=2) self.assertEqual(('delete', '%s' % fabric.id), dv.value) finally: yield listener.stop() class TestVLANListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the cluster triggers code.""" @wait_for_reactor @inlineCallbacks def test__calls_handler_on_create_notification(self): fabric = yield deferToDatabase(self.create_fabric) yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("vlan", lambda *args: dv.set(args)) yield listener.start() try: vlan = yield deferToDatabase(self.create_vlan, {'fabric': fabric}) yield dv.get(timeout=2) self.assertEqual(('create', '%s' % vlan.id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_on_update_notification(self): fabric = yield deferToDatabase(self.create_fabric) yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("vlan", lambda *args: dv.set(args)) vlan = yield deferToDatabase(self.create_vlan, {'fabric': fabric}) yield listener.start() try: yield deferToDatabase( self.update_vlan, vlan.id, {'name': factory.make_name('name')}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % vlan.id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_on_delete_notification(self): fabric = yield deferToDatabase(self.create_fabric) yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("vlan", lambda *args: dv.set(args)) vlan = yield deferToDatabase(self.create_vlan, {'fabric': fabric}) yield listener.start() try: yield deferToDatabase(self.delete_vlan, vlan.id) yield dv.get(timeout=2) self.assertEqual(('delete', '%s' % vlan.id), dv.value) finally: yield listener.stop() class TestSubnetListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the cluster triggers code.""" @wait_for_reactor @inlineCallbacks def test__calls_handler_on_create_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("subnet", lambda *args: dv.set(args)) yield listener.start() try: subnet = yield deferToDatabase(self.create_subnet) yield dv.get(timeout=2) self.assertEqual(('create', '%s' % subnet.id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_on_update_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("subnet", lambda *args: dv.set(args)) subnet = yield deferToDatabase(self.create_subnet) yield listener.start() try: yield deferToDatabase( self.update_subnet, subnet.id, {'name': factory.make_name('name')}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % subnet.id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_on_delete_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("subnet", lambda *args: dv.set(args)) subnet = yield deferToDatabase(self.create_subnet) yield listener.start() try: yield deferToDatabase(self.delete_subnet, subnet.id) yield dv.get(timeout=2) self.assertEqual(('delete', '%s' % subnet.id), dv.value) finally: yield listener.stop() class TestSpaceListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the cluster triggers code.""" @wait_for_reactor @inlineCallbacks def test__calls_handler_on_create_notification_with_blank_name(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dvs = [DeferredValue(), DeferredValue()] save_dvs = dvs[:] listener.register("space", lambda *args: dvs.pop().set(args)) yield listener.start() try: space = yield deferToDatabase(self.create_space) results = yield DeferredList( (dv.get(timeout=2) for dv in save_dvs)) self.assertItemsEqual( [('create', '%s' % space.id), ('update', '%s' % space.id)], [res for (suc, res) in results]) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_on_create_notification_with_name(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("space", lambda *args: dv.set(args)) yield listener.start() try: space = yield deferToDatabase( self.create_space, {'name': factory.make_name('name')}) yield dv.get(timeout=2) self.assertEqual(('create', '%s' % space.id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_on_update_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("space", lambda *args: dv.set(args)) space = yield deferToDatabase(self.create_space) yield listener.start() try: yield deferToDatabase( self.update_space, space.id, {'name': factory.make_name('name')}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % space.id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_on_delete_notification(self): yield deferToDatabase(register_all_triggers) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("space", lambda *args: dv.set(args)) space = yield deferToDatabase(self.create_space) yield listener.start() try: yield deferToDatabase(self.delete_space, space.id) yield dv.get(timeout=2) self.assertEqual(('delete', '%s' % space.id), dv.value) finally: yield listener.stop() class TestNodeNetworkListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the triggers on maasserver_fabric, maasserver_space, maasserver_subnet, and maasserver_vlan tables that notifies affected nodes.""" scenarios = ( ('node', { 'params': {'installable': True, 'interface': True}, 'listener': 'node', }), ('device', { 'params': {'installable': False, 'interface': True}, 'listener': 'device', }), ) @wait_for_reactor @inlineCallbacks def test__calls_handler_iface_with_update_on_fabric_update(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) interface = yield deferToDatabase( self.get_node_boot_interface, node.system_id) yield deferToDatabase( self.create_staticipaddress, {"interface": interface}) fabric = yield deferToDatabase(self.get_interface_fabric, interface.id) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase( self.update_fabric, fabric.id, {"name": factory.make_name("name")}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_iface_with_update_on_vlan_update(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) interface = yield deferToDatabase( self.get_node_boot_interface, node.system_id) yield deferToDatabase( self.create_staticipaddress, {"interface": interface}) vlan = yield deferToDatabase(self.get_interface_vlan, interface.id) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase( self.update_vlan, vlan.id, {"name": factory.make_name("name")}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_subnet_update(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) interface = yield deferToDatabase( self.get_node_boot_interface, node.system_id) ipaddress = yield deferToDatabase( self.create_staticipaddress, {"interface": interface}) subnet = yield deferToDatabase(self.get_ipaddress_subnet, ipaddress.id) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase( self.update_subnet, subnet.id, {"name": factory.make_name("name")}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_space_update(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) interface = yield deferToDatabase( self.get_node_boot_interface, node.system_id) ipaddress = yield deferToDatabase( self.create_staticipaddress, {"interface": interface}) space = yield deferToDatabase(self.get_ipaddress_space, ipaddress.id) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase( self.update_space, space.id, {"name": factory.make_name("name")}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_ip_address_update(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) interface = yield deferToDatabase( self.get_node_boot_interface, node.system_id) subnet = yield deferToDatabase(self.create_subnet) selected_ip = factory.pick_ip_in_network(subnet.get_ipnetwork()) ipaddress = yield deferToDatabase( self.create_staticipaddress, { "alloc_type": IPADDRESS_TYPE.AUTO, "interface": interface, "subnet": subnet, "ip": "", }) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase( self.update_staticipaddress, ipaddress.id, {"ip": selected_ip}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() class TestDeviceWithParentNetworkListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the triggers on maasserver_fabric, maasserver_space, maasserver_subnet, and maasserver_vlan tables that notifies affected nodes.""" @wait_for_reactor @inlineCallbacks def test__calls_handler_iface_with_update_on_fabric_update(self): yield deferToDatabase(register_all_triggers) device, parent = yield deferToDatabase( self.create_device_with_parent, {"interface": True}) interface = yield deferToDatabase( self.get_node_boot_interface, device.system_id) yield deferToDatabase( self.create_staticipaddress, {"interface": interface}) fabric = yield deferToDatabase(self.get_interface_fabric, interface.id) listener = PostgresListener() dv = DeferredValue() listener.register("node", lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase( self.update_fabric, fabric.id, {"name": factory.make_name("name")}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % parent.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_iface_with_update_on_vlan_update(self): yield deferToDatabase(register_all_triggers) device, parent = yield deferToDatabase( self.create_device_with_parent, {"interface": True}) interface = yield deferToDatabase( self.get_node_boot_interface, device.system_id) yield deferToDatabase( self.create_staticipaddress, {"interface": interface}) vlan = yield deferToDatabase(self.get_interface_vlan, interface.id) listener = PostgresListener() dv = DeferredValue() listener.register("node", lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase( self.update_vlan, vlan.id, {"name": factory.make_name("name")}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % parent.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_subnet_update(self): yield deferToDatabase(register_all_triggers) device, parent = yield deferToDatabase( self.create_device_with_parent, {"interface": True}) interface = yield deferToDatabase( self.get_node_boot_interface, device.system_id) ipaddress = yield deferToDatabase( self.create_staticipaddress, {"interface": interface}) subnet = yield deferToDatabase(self.get_ipaddress_subnet, ipaddress.id) listener = PostgresListener() dv = DeferredValue() listener.register("node", lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase( self.update_subnet, subnet.id, {"name": factory.make_name("name")}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % parent.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_space_update(self): yield deferToDatabase(register_all_triggers) device, parent = yield deferToDatabase( self.create_device_with_parent, {"interface": True}) interface = yield deferToDatabase( self.get_node_boot_interface, device.system_id) ipaddress = yield deferToDatabase( self.create_staticipaddress, {"interface": interface}) space = yield deferToDatabase(self.get_ipaddress_space, ipaddress.id) listener = PostgresListener() dv = DeferredValue() listener.register("node", lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase( self.update_space, space.id, {"name": factory.make_name("name")}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % parent.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_ip_address_update(self): yield deferToDatabase(register_all_triggers) device, parent = yield deferToDatabase( self.create_device_with_parent, {"interface": True}) interface = yield deferToDatabase( self.get_node_boot_interface, device.system_id) subnet = yield deferToDatabase(self.create_subnet) selected_ip = factory.pick_ip_in_network(subnet.get_ipnetwork()) ipaddress = yield deferToDatabase( self.create_staticipaddress, { "alloc_type": IPADDRESS_TYPE.AUTO, "interface": interface, "subnet": subnet, "ip": "", }) listener = PostgresListener() dv = DeferredValue() listener.register("node", lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase( self.update_staticipaddress, ipaddress.id, {"ip": selected_ip}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % parent.system_id), dv.value) finally: yield listener.stop() class TestStaticIPAddressSubnetListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the triggers on maasserver_staticipaddress tables that notifies affected subnets.""" @wait_for_reactor @inlineCallbacks def test__calls_update_on_subnet(self): yield deferToDatabase(register_all_triggers) subnet = yield deferToDatabase(self.create_subnet) selected_ip = factory.pick_ip_in_network(subnet.get_ipnetwork()) ipaddress = yield deferToDatabase( self.create_staticipaddress, { "alloc_type": IPADDRESS_TYPE.AUTO, "subnet": subnet, "ip": "", }) listener = PostgresListener() dv = DeferredValue() listener.register("subnet", lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase( self.update_staticipaddress, ipaddress.id, {"ip": selected_ip}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % subnet.id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_update_on_old_and_new_subnet(self): yield deferToDatabase(register_all_triggers) old_subnet = yield deferToDatabase(self.create_subnet) new_subnet = yield deferToDatabase(self.create_subnet) selected_ip = factory.pick_ip_in_network(new_subnet.get_ipnetwork()) ipaddress = yield deferToDatabase( self.create_staticipaddress, { "alloc_type": IPADDRESS_TYPE.AUTO, "subnet": old_subnet, "ip": "", }) dvs = [DeferredValue(), DeferredValue()] def set_defer_value(*args): for dv in dvs: if not dv.isSet: dv.set(args) break listener = PostgresListener() listener.register("subnet", set_defer_value) yield listener.start() try: yield deferToDatabase(self.update_staticipaddress, ipaddress.id, { "ip": selected_ip, "subnet": new_subnet, }) yield dvs[0].get(timeout=2) yield dvs[1].get(timeout=2) self.assertItemsEqual([ ('update', '%s' % old_subnet.id), ('update', '%s' % new_subnet.id), ], [dvs[0].value, dvs[1].value]) finally: yield listener.stop() class TestNodeBlockDeviceListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the triggers on maasserver_blockdevice, maasserver_physicalblockdevice, and maasserver_virtualblockdevice tables that notifies its node.""" scenarios = ( ('node', { 'params': {'installable': True}, 'listener': 'node', }), ) @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_create(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.create_blockdevice, {"node": node}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_delete(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) blockdevice = yield deferToDatabase( self.create_blockdevice, {"node": node}) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.delete_blockdevice, blockdevice.id) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_update(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) blockdevice = yield deferToDatabase( self.create_blockdevice, {"node": node}) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.update_blockdevice, blockdevice.id, { "size": random.randint(3000 * 1000, 1000 * 1000 * 1000) }) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_physicalblockdevice_update(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) blockdevice = yield deferToDatabase( self.create_physicalblockdevice, {"node": node}) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase( self.update_physicalblockdevice, blockdevice.id, { "model": factory.make_name("model") }) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_virtualblockdevice_update(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) blockdevice = yield deferToDatabase( self.create_virtualblockdevice, {"node": node}) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase( self.update_virtualblockdevice, blockdevice.id, { "uuid": factory.make_UUID() }) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() class TestNodePartitionTableListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the triggers on maasserver_partitiontable tables that notifies its node.""" scenarios = ( ('node', { 'params': {'installable': True}, 'listener': 'node', }), ) @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_create(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.create_partitiontable, {"node": node}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_delete(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) partitiontable = yield deferToDatabase( self.create_partitiontable, {"node": node}) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase( self.delete_partitiontable, partitiontable.id) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_update(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) partitiontable = yield deferToDatabase( self.create_partitiontable, {"node": node}) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase( self.update_partitiontable, partitiontable.id, { "size": random.randint(3000 * 1000, 1000 * 1000 * 1000) }) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() class TestNodePartitionListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the triggers on maasserver_partition tables that notifies its node.""" scenarios = ( ('node', { 'params': {'installable': True}, 'listener': 'node', }), ) @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_create(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.create_partition, {"node": node}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_delete(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) partition = yield deferToDatabase( self.create_partition, {"node": node}) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.delete_partition, partition.id) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_update(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) partition = yield deferToDatabase( self.create_partition, {"node": node}) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: # Only downsize the partition otherwise the test may fail due # to the random number being generated is greater than the mock # available disk space yield deferToDatabase(self.update_partition, partition.id, { "size": partition.size - 1, }) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() class TestNodeFilesystemListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the triggers on maasserver_filesystem tables that notifies its node.""" scenarios = ( ('node', { 'params': {'installable': True}, 'listener': 'node', }), ) @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_create(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) partition = yield deferToDatabase( self.create_partition, {"node": node}) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase( self.create_filesystem, {"partition": partition}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_delete(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) partition = yield deferToDatabase( self.create_partition, {"node": node}) filesystem = yield deferToDatabase( self.create_filesystem, {"partition": partition}) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.delete_filesystem, filesystem.id) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_update(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) partition = yield deferToDatabase( self.create_partition, {"node": node}) filesystem = yield deferToDatabase( self.create_filesystem, {"partition": partition}) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.update_filesystem, filesystem.id, { "size": random.randint(3000 * 1000, 1000 * 1000 * 1000) }) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() class TestNodeFilesystemgroupListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the triggers on maasserver_filesystemgroup tables that notifies its node.""" scenarios = ( ('node', { 'params': {'installable': True, 'with_boot_disk': True}, 'listener': 'node', }), ) @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_create(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) yield deferToDatabase(self.create_partitiontable, {'node': node}) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.create_filesystemgroup, {"node": node}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_delete(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) yield deferToDatabase(self.create_partitiontable, {'node': node}) filesystemgroup = yield deferToDatabase( self.create_filesystemgroup, { "node": node, "group_type": "raid-5"}) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase( self.delete_filesystemgroup, filesystemgroup.id) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_update(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) yield deferToDatabase(self.create_partitiontable, {'node': node}) filesystemgroup = yield deferToDatabase( self.create_filesystemgroup, { "node": node, "group_type": "raid-5"}) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase( self.update_filesystemgroup, filesystemgroup.id, { "size": random.randint(3000 * 1000, 1000 * 1000 * 1000) }) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() class TestNodeCachesetListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the triggers on maasserver_cacheset tables that notifies its node.""" scenarios = ( ('node', { 'params': {'installable': True}, 'listener': 'node', }), ) @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_create(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) partition = yield deferToDatabase( self.create_partition, {"node": node}) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase( self.create_cacheset, {"node": node, "partition": partition}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_delete(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) partition = yield deferToDatabase( self.create_partition, {"node": node}) cacheset = yield deferToDatabase( self.create_cacheset, {"node": node, "partition": partition}) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.delete_cacheset, cacheset.id) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_update(self): yield deferToDatabase(register_all_triggers) node = yield deferToDatabase(self.create_node, self.params) partition = yield deferToDatabase( self.create_partition, {"node": node}) cacheset = yield deferToDatabase( self.create_cacheset, {"node": node, "partition": partition}) listener = PostgresListener() dv = DeferredValue() listener.register(self.listener, lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.update_cacheset, cacheset.id, { "size": random.randint(3000 * 1000, 1000 * 1000 * 1000) }) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % node.system_id), dv.value) finally: yield listener.stop() class TestUserSSHKeyListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the maasserver_sshkey table that notifies its user.""" @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_create(self): yield deferToDatabase(register_all_triggers) user = yield deferToDatabase(self.create_user) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("user", lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.create_sshkey, {"user": user}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % user.id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_delete(self): yield deferToDatabase(register_all_triggers) user = yield deferToDatabase(self.create_user) sshkey = yield deferToDatabase(self.create_sshkey, {"user": user}) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("user", lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.delete_sshkey, sshkey.id) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % user.id), dv.value) finally: yield listener.stop() class TestUserSSLKeyListener( DjangoTransactionTestCase, TransactionalHelpersMixin): """End-to-end test of both the listeners code and the maasserver_sslkey table that notifies its user.""" @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_create(self): yield deferToDatabase(register_all_triggers) user = yield deferToDatabase(self.create_user) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("user", lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.create_sslkey, {"user": user}) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % user.id), dv.value) finally: yield listener.stop() @wait_for_reactor @inlineCallbacks def test__calls_handler_with_update_on_delete(self): yield deferToDatabase(register_all_triggers) user = yield deferToDatabase(self.create_user) sslkey = yield deferToDatabase(self.create_sslkey, {"user": user}) listener = self.make_listener_without_delay() dv = DeferredValue() listener.register("user", lambda *args: dv.set(args)) yield listener.start() try: yield deferToDatabase(self.delete_sslkey, sslkey.id) yield dv.get(timeout=2) self.assertEqual(('update', '%s' % user.id), dv.value) finally: yield listener.stop() maas-1.9.5+bzr4599.orig/src/maasserver/websockets/tests/test_protocol.py0000644000000000000000000007705713056115004024417 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.websockets.protocol`""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from collections import deque import json import random from crochet import wait_for_reactor from django.core.exceptions import ValidationError from maasserver.eventloop import services from maasserver.testing.factory import factory as maas_factory from maasserver.testing.testcase import ( MAASServerTestCase, MAASTransactionServerTestCase, ) from maasserver.utils.orm import transactional from maasserver.utils.threads import deferToDatabase from maasserver.websockets import protocol as protocol_module from maasserver.websockets.base import Handler from maasserver.websockets.handlers import ( DeviceHandler, NodeHandler, ) from maasserver.websockets.protocol import ( MSG_TYPE, RESPONSE_TYPE, WebSocketFactory, WebSocketProtocol, ) from maasserver.websockets.websockets import STATUSES from maastesting.matchers import ( IsFiredDeferred, MockCalledOnceWith, MockCalledWith, ) from maastesting.testcase import MAASTestCase from mock import ( MagicMock, sentinel, ) from provisioningserver.utils.twisted import synchronous from testtools.matchers import ( Equals, Is, ) from twisted.internet import defer from twisted.internet.defer import ( fail, inlineCallbacks, succeed, ) from twisted.web.server import NOT_DONE_YET class TestWebSocketProtocol(MAASServerTestCase): def make_protocol(self, patch_authenticate=True, transport_uri=''): self.patch(protocol_module, "PostgresListener") factory = WebSocketFactory() self.patch(factory, "registerRPCEvents") self.patch(factory, "unregisterRPCEvents") factory.startFactory() self.addCleanup(factory.stopFactory) protocol = factory.buildProtocol(None) protocol.transport = MagicMock() protocol.transport.uri = transport_uri if patch_authenticate: self.patch(protocol, "authenticate") return protocol, factory def make_ws_uri(self, csrftoken=None): """Make a websocket URI. In practice, the URI usually looks like: '/MAAS/ws?csrftoken=' but in practice the code only cares about the presence of the CSRF token in the query string. """ url = "/%s/%s" % ( maas_factory.make_name("path"), maas_factory.make_name("path")) if csrftoken is not None: url += "?csrftoken=%s" % csrftoken return url def get_written_transport_message(self, protocol): call = protocol.transport.write.call_args_list.pop() return json.loads(call[0][0]) def test_connectionMade_sets_user_and_processes_messages(self): protocol, factory = self.make_protocol() self.patch_autospec(protocol, "authenticate") self.patch_autospec(protocol, "processMessages") protocol.authenticate.return_value = defer.succeed(sentinel.user) protocol.connectionMade() self.addCleanup(protocol.connectionLost, "") self.assertThat(protocol.user, Is(sentinel.user)) self.assertThat(protocol.processMessages, MockCalledOnceWith()) def test_connectionMade_adds_self_to_factory_if_auth_succeeds(self): protocol, factory = self.make_protocol() mock_authenticate = self.patch(protocol, "authenticate") user = maas_factory.make_User() mock_authenticate.return_value = defer.succeed(user) protocol.connectionMade() self.addCleanup(lambda: protocol.connectionLost("")) self.assertItemsEqual([protocol], factory.clients) def test_connectionMade_doesnt_add_self_to_factory_if_auth_fails(self): protocol, factory = self.make_protocol() mock_authenticate = self.patch(protocol, "authenticate") fake_error = maas_factory.make_name() mock_authenticate.return_value = defer.fail(Exception(fake_error)) protocol.connectionMade() self.addCleanup(lambda: protocol.connectionLost("")) self.assertNotIn(protocol, factory.clients) def test_connectionMade_extracts_sessionid_and_csrftoken(self): protocol, factory = self.make_protocol(patch_authenticate=False) sessionid = maas_factory.make_name("sessionid") csrftoken = maas_factory.make_name("csrftoken") cookies = { maas_factory.make_name("key"): maas_factory.make_name("value") for _ in range(3) } cookies["sessionid"] = sessionid cookies["csrftoken"] = csrftoken protocol.transport.cookies = "; ".join( "%s=%s" % (key, value) for key, value in cookies.items()) mock_authenticate = self.patch(protocol, "authenticate") protocol.connectionMade() self.addCleanup(lambda: protocol.connectionLost("")) self.assertThat( mock_authenticate, MockCalledOnceWith(sessionid, csrftoken)) def test_connectionLost_removes_self_from_factory(self): protocol, factory = self.make_protocol() mock_authenticate = self.patch(protocol, "authenticate") mock_authenticate.return_value = defer.succeed(None) protocol.connectionMade() protocol.connectionLost("") self.assertItemsEqual([], factory.clients) def test_connectionLost_succeeds_if_client_hasnt_been_recorded(self): protocol, factory = self.make_protocol() self.assertIsNone(protocol.connectionLost("")) self.assertItemsEqual([], factory.clients) def test_loseConnection_writes_to_log(self): protocol, factory = self.make_protocol() mock_log_msg = self.patch_autospec(protocol_module.log, "msg") status = random.randint(1000, 1010) reason = maas_factory.make_name("reason") protocol.loseConnection(status, reason) self.assertThat( mock_log_msg, MockCalledOnceWith( format="Closing connection: %(status)r (%(reason)r)", status=status, reason=reason)) def test_loseConnection_calls_loseConnection_with_status_and_reason(self): protocol, factory = self.make_protocol() status = random.randint(1000, 1010) reason = maas_factory.make_name("reason") protocol.loseConnection(status, reason) self.assertThat( protocol.transport._receiver._transport.loseConnection, MockCalledOnceWith(status, reason.encode("utf-8"))) def test_getMessageField_returns_value_in_message(self): protocol, factory = self.make_protocol() key = maas_factory.make_name("key") value = maas_factory.make_name("value") message = {key: value} self.assertEquals(value, protocol.getMessageField(message, key)) def test_getMessageField_calls_loseConnection_if_key_missing(self): protocol, factory = self.make_protocol() key = maas_factory.make_name("key") mock_loseConnection = self.patch_autospec(protocol, "loseConnection") self.expectThat(protocol.getMessageField({}, key), Is(None)) self.expectThat( mock_loseConnection, MockCalledOnceWith( STATUSES.PROTOCOL_ERROR, "Missing %s field in the received message." % key)) @synchronous @transactional def get_user_and_session_id(self): self.client_log_in() user = self.logged_in_user session_id = self.client.session._session_key return user, session_id @wait_for_reactor @inlineCallbacks def test_getUserFromSessionId_returns_User(self): user, session_id = yield deferToDatabase(self.get_user_and_session_id) protocol, factory = self.make_protocol() protocol_user = yield deferToDatabase( lambda: protocol.getUserFromSessionId(session_id)) self.assertEquals(user, protocol_user) def test_getUserFromSessionId_returns_None_for_invalid_key(self): self.client_log_in() session_id = maas_factory.make_name("sessionid") protocol, factory = self.make_protocol() self.assertIs( None, protocol.getUserFromSessionId(session_id)) @wait_for_reactor @inlineCallbacks def test_authenticate_calls_loseConnection_if_user_is_None(self): csrftoken = maas_factory.make_name("csrftoken") uri = self.make_ws_uri(csrftoken) protocol, factory = self.make_protocol( patch_authenticate=False, transport_uri=uri) mock_loseConnection = self.patch_autospec(protocol, "loseConnection") mock_getUserFromSessionId = self.patch_autospec( protocol, "getUserFromSessionId") mock_getUserFromSessionId.return_value = None yield protocol.authenticate( maas_factory.make_name("sessionid"), csrftoken) self.expectThat( mock_loseConnection, MockCalledOnceWith( STATUSES.PROTOCOL_ERROR, "Failed to authenticate user.")) @wait_for_reactor @inlineCallbacks def test_authenticate_calls_loseConnection_if_error_getting_user(self): csrftoken = maas_factory.make_name("csrftoken") uri = self.make_ws_uri(csrftoken) protocol, factory = self.make_protocol( patch_authenticate=False, transport_uri=uri) mock_loseConnection = self.patch_autospec(protocol, "loseConnection") mock_getUserFromSessionId = self.patch_autospec( protocol, "getUserFromSessionId") mock_getUserFromSessionId.side_effect = maas_factory.make_exception( "unknown reason") yield protocol.authenticate( maas_factory.make_name("sessionid"), csrftoken, ) self.expectThat( mock_loseConnection, MockCalledOnceWith( STATUSES.PROTOCOL_ERROR, "Error authenticating user: unknown reason")) @wait_for_reactor @inlineCallbacks def test_authenticate_calls_loseConnection_if_invalid_csrftoken(self): user, session_id = yield deferToDatabase(self.get_user_and_session_id) csrftoken = maas_factory.make_name("csrftoken") uri = self.make_ws_uri(csrftoken) protocol, factory = self.make_protocol( patch_authenticate=False, transport_uri=uri) mock_loseConnection = self.patch_autospec(protocol, "loseConnection") other_csrftoken = maas_factory.make_name("csrftoken") yield protocol.authenticate(session_id, other_csrftoken) self.expectThat(protocol.user, Equals(None)) self.expectThat( mock_loseConnection, MockCalledOnceWith( STATUSES.PROTOCOL_ERROR, "Invalid CSRF token.")) @wait_for_reactor @inlineCallbacks def test_authenticate_calls_loseConnection_if_csrftoken_is_missing(self): user, session_id = yield deferToDatabase(self.get_user_and_session_id) uri = self.make_ws_uri(csrftoken=None) protocol, factory = self.make_protocol( patch_authenticate=False, transport_uri=uri) mock_loseConnection = self.patch_autospec(protocol, "loseConnection") other_csrftoken = maas_factory.make_name("csrftoken") yield protocol.authenticate(session_id, other_csrftoken) self.expectThat(protocol.user, Equals(None)) self.expectThat( mock_loseConnection, MockCalledOnceWith( STATUSES.PROTOCOL_ERROR, "Invalid CSRF token.")) def test_dataReceived_calls_loseConnection_if_json_error(self): protocol, factory = self.make_protocol() mock_loseConnection = self.patch_autospec(protocol, "loseConnection") self.expectThat(protocol.dataReceived("{{{{"), Is("")) self.expectThat( mock_loseConnection, MockCalledOnceWith( STATUSES.PROTOCOL_ERROR, "Invalid data expecting JSON object.")) def test_dataReceived_adds_message_to_queue(self): protocol, factory = self.make_protocol() self.patch_autospec(protocol, "processMessages") message = {"type": MSG_TYPE.REQUEST} self.expectThat( protocol.dataReceived(json.dumps(message)), Is(NOT_DONE_YET)) self.expectThat(protocol.messages, Equals(deque([message]))) def test_dataReceived_calls_processMessages(self): protocol, factory = self.make_protocol() mock_processMessages = self.patch_autospec(protocol, "processMessages") message = {"type": MSG_TYPE.REQUEST} self.expectThat( protocol.dataReceived(json.dumps(message)), Is(NOT_DONE_YET)) self.expectThat(mock_processMessages, MockCalledOnceWith()) def test_processMessages_does_nothing_if_no_user(self): protocol = WebSocketProtocol() protocol.messages = deque([ {"type": MSG_TYPE.REQUEST, "request_id": 1}, {"type": MSG_TYPE.REQUEST, "request_id": 2}, ]) self.assertEquals([], protocol.processMessages()) def test_processMessages_process_all_messages_in_the_queue(self): protocol, factory = self.make_protocol() protocol.user = maas_factory.make_User() self.patch_autospec( protocol, "handleRequest").return_value = NOT_DONE_YET messages = [ {"type": MSG_TYPE.REQUEST, "request_id": 1}, {"type": MSG_TYPE.REQUEST, "request_id": 2}, ] protocol.messages = deque(messages) self.assertEquals(messages, protocol.processMessages()) def test_processMessages_calls_loseConnection_if_missing_type_field(self): protocol, factory = self.make_protocol() protocol.user = maas_factory.make_User() mock_loseConnection = self.patch_autospec(protocol, "loseConnection") self.patch_autospec( protocol, "handleRequest").return_value = NOT_DONE_YET messages = [ {"request_id": 1}, {"type": MSG_TYPE.REQUEST, "request_id": 2}, ] protocol.messages = deque(messages) self.expectThat([messages[0]], Equals(protocol.processMessages())) self.expectThat( mock_loseConnection, MockCalledOnceWith( STATUSES.PROTOCOL_ERROR, "Missing type field in the received message.")) def test_processMessages_calls_loseConnection_if_type_not_request(self): protocol, factory = self.make_protocol() protocol.user = maas_factory.make_User() mock_loseConnection = self.patch_autospec(protocol, "loseConnection") self.patch_autospec( protocol, "handleRequest").return_value = NOT_DONE_YET messages = [ {"type": MSG_TYPE.RESPONSE, "request_id": 1}, {"type": MSG_TYPE.REQUEST, "request_id": 2}, ] protocol.messages = deque(messages) self.expectThat([messages[0]], Equals(protocol.processMessages())) self.expectThat( mock_loseConnection, MockCalledOnceWith( STATUSES.PROTOCOL_ERROR, "Invalid message type.")) def test_processMessages_stops_processing_msgs_handleRequest_fails(self): protocol, factory = self.make_protocol() protocol.user = maas_factory.make_User() self.patch_autospec( protocol, "handleRequest").return_value = None messages = [ {"type": MSG_TYPE.REQUEST, "request_id": 1}, {"type": MSG_TYPE.REQUEST, "request_id": 2}, ] protocol.messages = deque(messages) self.expectThat([messages[0]], Equals(protocol.processMessages())) def test_processMessages_calls_handleRequest_with_message(self): protocol, factory = self.make_protocol() protocol.user = maas_factory.make_User() mock_handleRequest = self.patch_autospec( protocol, "handleRequest") mock_handleRequest.return_value = NOT_DONE_YET message = {"type": MSG_TYPE.REQUEST, "request_id": 1} protocol.messages = deque([message]) self.expectThat([message], Equals(protocol.processMessages())) self.expectThat( mock_handleRequest, MockCalledOnceWith(message)) def test_handleRequest_calls_loseConnection_if_missing_request_id(self): protocol, factory = self.make_protocol() protocol.user = maas_factory.make_User() mock_loseConnection = self.patch_autospec(protocol, "loseConnection") message = {"type": MSG_TYPE.REQUEST} self.expectThat( protocol.handleRequest(message), Is(None)) self.expectThat( mock_loseConnection, MockCalledOnceWith( STATUSES.PROTOCOL_ERROR, "Missing request_id field in the received message.")) def test_handleRequest_calls_loseConnection_if_missing_method(self): protocol, factory = self.make_protocol() protocol.user = maas_factory.make_User() mock_loseConnection = self.patch_autospec(protocol, "loseConnection") message = { "type": MSG_TYPE.REQUEST, "request_id": 1, } self.expectThat( protocol.handleRequest(message), Is(None)) self.expectThat( mock_loseConnection, MockCalledOnceWith( STATUSES.PROTOCOL_ERROR, "Missing method field in the received message.")) def test_handleRequest_calls_loseConnection_if_bad_method(self): protocol, factory = self.make_protocol() protocol.user = maas_factory.make_User() mock_loseConnection = self.patch_autospec(protocol, "loseConnection") message = { "type": MSG_TYPE.REQUEST, "request_id": 1, "method": "nodes", } self.expectThat( protocol.handleRequest(message), Is(None)) self.expectThat( mock_loseConnection, MockCalledOnceWith( STATUSES.PROTOCOL_ERROR, "Invalid method formatting.")) def test_handleRequest_calls_loseConnection_if_unknown_handler(self): protocol, factory = self.make_protocol() protocol.user = maas_factory.make_User() mock_loseConnection = self.patch_autospec(protocol, "loseConnection") message = { "type": MSG_TYPE.REQUEST, "request_id": 1, "method": "unknown.list", } self.expectThat( protocol.handleRequest(message), Is(None)) self.expectThat( mock_loseConnection, MockCalledOnceWith( STATUSES.PROTOCOL_ERROR, "Handler unknown does not exist.")) @synchronous @transactional def make_node(self): return maas_factory.make_Node() @wait_for_reactor def clean_node(self, node): @synchronous @transactional def delete_node(): node.delete() return deferToDatabase(delete_node) def test_handleRequest_builds_handler(self): protocol, factory = self.make_protocol() protocol.user = sentinel.user handler_class = MagicMock() handler_name = maas_factory.make_name("handler") handler_class._meta.handler_name = handler_name handler = handler_class.return_value handler.execute.return_value = succeed(None) # Inject mock handler into the factory. factory.handlers[handler_name] = handler_class d = protocol.handleRequest({ "type": MSG_TYPE.REQUEST, "request_id": random.randint(1, 999999), "method": "%s.get" % handler_name, }) self.assertThat(d, IsFiredDeferred()) self.assertThat(handler_class, MockCalledOnceWith( protocol.user, protocol.cache[handler_name])) # The cache passed into the handler constructor *is* the one found in # the protocol's cache; they're not merely equal. self.assertIs( protocol.cache[handler_name], handler_class.call_args[0][1]) @wait_for_reactor @inlineCallbacks def test_handleRequest_sends_response(self): node = yield deferToDatabase(self.make_node) # Need to delete the node as the transaction is committed self.addCleanup(self.clean_node, node) protocol, factory = self.make_protocol() protocol.user = MagicMock() message = { "type": MSG_TYPE.REQUEST, "request_id": 1, "method": "node.get", "params": { "system_id": node.system_id, } } yield protocol.handleRequest(message) sent_obj = self.get_written_transport_message(protocol) self.expectThat(sent_obj["type"], Equals(MSG_TYPE.RESPONSE)) self.expectThat(sent_obj["request_id"], Equals(1)) self.expectThat(sent_obj["rtype"], Equals(RESPONSE_TYPE.SUCCESS)) self.expectThat(sent_obj["result"]["hostname"], Equals(node.hostname)) @wait_for_reactor @inlineCallbacks def test_handleRequest_sends_validation_error(self): node = yield deferToDatabase(self.make_node) # Need to delete the node as the transaction is committed self.addCleanup(self.clean_node, node) protocol, factory = self.make_protocol() protocol.user = MagicMock() error_dict = { "error": "bad" } self.patch(Handler, "execute").return_value = fail( ValidationError(error_dict)) message = { "type": MSG_TYPE.REQUEST, "request_id": 1, "method": "node.get", "params": { "system_id": node.system_id, } } yield protocol.handleRequest(message) sent_obj = self.get_written_transport_message(protocol) self.expectThat(sent_obj["type"], Equals(MSG_TYPE.RESPONSE)) self.expectThat(sent_obj["request_id"], Equals(1)) self.expectThat(sent_obj["rtype"], Equals(RESPONSE_TYPE.ERROR)) self.expectThat(sent_obj["error"], Equals(json.dumps(error_dict))) @wait_for_reactor @inlineCallbacks def test_handleRequest_sends_error(self): node = yield deferToDatabase(self.make_node) # Need to delete the node as the transaction is committed self.addCleanup(self.clean_node, node) protocol, factory = self.make_protocol() protocol.user = MagicMock() self.patch(Handler, "execute").return_value = fail( maas_factory.make_exception("error")) message = { "type": MSG_TYPE.REQUEST, "request_id": 1, "method": "node.get", "params": { "system_id": node.system_id, } } yield protocol.handleRequest(message) sent_obj = self.get_written_transport_message(protocol) self.expectThat(sent_obj["type"], Equals(MSG_TYPE.RESPONSE)) self.expectThat(sent_obj["request_id"], Equals(1)) self.expectThat(sent_obj["rtype"], Equals(RESPONSE_TYPE.ERROR)) self.expectThat(sent_obj["error"], Equals("error")) def test_sendNotify_sends_correct_json(self): protocol, factory = self.make_protocol() name = maas_factory.make_name("name") action = maas_factory.make_name("action") data = maas_factory.make_name("data") message = { "type": MSG_TYPE.NOTIFY, "name": name, "action": action, "data": data, } protocol.sendNotify(name, action, data) self.assertEquals( message, self.get_written_transport_message(protocol)) class MakeProtocolFactoryMixin: def make_factory(self, rpc_service=None): factory = WebSocketFactory() if rpc_service is None: rpc_service = MagicMock() self.patch(services, "getServiceNamed").return_value = rpc_service return factory def make_protocol_with_factory(self, user=None, rpc_service=None): factory = self.make_factory(rpc_service=rpc_service) factory.startFactory() self.addCleanup(factory.stopFactory) protocol = factory.buildProtocol(None) protocol.transport = MagicMock() if user is None: user = maas_factory.make_User() mock_authenticate = self.patch(protocol, "authenticate") mock_authenticate.return_value = defer.succeed(user) protocol.connectionMade() self.addCleanup(lambda: protocol.connectionLost("")) return protocol, factory ALL_NOTIFIERS = ( "device", "event", "fabric", "node", "nodegroup", "space", "subnet", "tag", "user", "vlan", "zone", ) ALL_HANDLERS = ( "cluster", "device", "event", "fabric", "general", "node", "space", "subnet", "tag", "user", "vlan", "zone", ) class TestWebSocketFactory(MAASTestCase, MakeProtocolFactoryMixin): def test_loads_all_handlers(self): factory = self.make_factory() self.assertItemsEqual(ALL_HANDLERS, factory.handlers.keys()) def test_get_SessionEngine_calls_import_module_with_SESSION_ENGINE(self): mock_import = self.patch_autospec(protocol_module, "import_module") factory = self.make_factory() factory.getSessionEngine() self.assertThat( mock_import, MockCalledOnceWith(protocol_module.settings.SESSION_ENGINE)) def test_getHandler_returns_None_on_missing_handler(self): factory = self.make_factory() self.assertIsNone(factory.getHandler("unknown")) def test_getHandler_returns_NodeHandler(self): factory = self.make_factory() self.assertIs( NodeHandler, factory.getHandler("node")) def test_getHandler_returns_DeviceHandler(self): factory = self.make_factory() self.assertIs( DeviceHandler, factory.getHandler("device")) def test_buildProtocol_returns_WebSocketProtocol(self): factory = self.make_factory() self.assertIsInstance( factory.buildProtocol(sentinel.addr), WebSocketProtocol) @wait_for_reactor @inlineCallbacks def test_startFactory_starts_listener(self): factory = self.make_factory() yield factory.startFactory() try: self.expectThat(factory.listener.connected(), Equals(True)) finally: yield factory.stopFactory() @wait_for_reactor @inlineCallbacks def test_startFactory_registers_rpc_handlers(self): rpc_service = MagicMock() factory = self.make_factory(rpc_service) yield factory.startFactory() try: self.expectThat( rpc_service.events.connected.registerHandler, MockCalledOnceWith(factory.updateCluster)) self.expectThat( rpc_service.events.disconnected.registerHandler, MockCalledOnceWith(factory.updateCluster)) finally: yield factory.stopFactory() @wait_for_reactor @inlineCallbacks def test_stopFactory_stops_listener(self): factory = self.make_factory() yield factory.startFactory() yield factory.stopFactory() self.expectThat(factory.listener.connected(), Equals(False)) @wait_for_reactor @inlineCallbacks def test_stopFactory_unregisters_rpc_handlers(self): rpc_service = MagicMock() factory = self.make_factory(rpc_service) yield factory.startFactory() yield factory.stopFactory() self.expectThat( rpc_service.events.connected.unregisterHandler, MockCalledOnceWith(factory.updateCluster)) self.expectThat( rpc_service.events.disconnected.unregisterHandler, MockCalledOnceWith(factory.updateCluster)) def test_registerNotifiers_registers_all_notifiers(self): factory = self.make_factory() self.assertItemsEqual( ALL_NOTIFIERS, factory.listener.listeners.keys()) @transactional def make_user(self): return maas_factory.make_User() @wait_for_reactor @inlineCallbacks def test_onNotify_creates_handler_class_with_protocol_user(self): user = yield deferToDatabase(self.make_user) protocol, factory = self.make_protocol_with_factory(user=user) mock_class = MagicMock() mock_class.return_value.on_listen.return_value = None yield factory.onNotify( mock_class, sentinel.channel, sentinel.action, sentinel.obj_id) self.assertIs( protocol.user, mock_class.call_args[0][0]) @wait_for_reactor @inlineCallbacks def test_onNotify_creates_handler_class_with_protocol_cache(self): user = yield deferToDatabase(self.make_user) protocol, factory = self.make_protocol_with_factory(user=user) handler_class = MagicMock() handler_class.return_value.on_listen.return_value = None handler_class._meta.handler_name = maas_factory.make_name("handler") yield factory.onNotify( handler_class, sentinel.channel, sentinel.action, sentinel.obj_id) self.assertThat( handler_class, MockCalledOnceWith( user, protocol.cache[handler_class._meta.handler_name])) # The cache passed into the handler constructor *is* the one found in # the protocol's cache; they're not merely equal. self.assertIs( protocol.cache[handler_class._meta.handler_name], handler_class.call_args[0][1]) @wait_for_reactor @inlineCallbacks def test_onNotify_calls_handler_class_on_listen(self): user = yield deferToDatabase(self.make_user) protocol, factory = self.make_protocol_with_factory(user=user) mock_class = MagicMock() mock_class.return_value.on_listen.return_value = None yield factory.onNotify( mock_class, sentinel.channel, sentinel.action, sentinel.obj_id) self.assertThat( mock_class.return_value.on_listen, MockCalledWith(sentinel.channel, sentinel.action, sentinel.obj_id)) @wait_for_reactor @inlineCallbacks def test_onNotify_calls_sendNotify_on_protocol(self): user = yield deferToDatabase(self.make_user) protocol, factory = self.make_protocol_with_factory(user=user) name = maas_factory.make_name("name") action = maas_factory.make_name("action") data = maas_factory.make_name("data") mock_class = MagicMock() mock_class.return_value.on_listen.return_value = (name, action, data) mock_sendNotify = self.patch(protocol, "sendNotify") yield factory.onNotify( mock_class, sentinel.channel, action, sentinel.obj_id) self.assertThat( mock_sendNotify, MockCalledWith(name, action, data)) class TestWebSocketFactoryTransactional( MAASTransactionServerTestCase, MakeProtocolFactoryMixin): @wait_for_reactor @inlineCallbacks def test_updateCluster_calls_onNotify_for_cluster_update(self): user = yield deferToDatabase(transactional(maas_factory.make_User)) cluster = yield deferToDatabase( transactional(maas_factory.make_NodeGroup)) protocol, factory = self.make_protocol_with_factory(user=user) mock_onNotify = self.patch(factory, "onNotify") cluster_handler = MagicMock() factory.handlers["cluster"] = cluster_handler yield factory.updateCluster(cluster.uuid) self.assertThat( mock_onNotify, MockCalledOnceWith( cluster_handler, "cluster", "update", cluster.id)) maas-1.9.5+bzr4599.orig/src/maasserver/websockets/tests/test_websockets.py0000644000000000000000000007334113056115004024717 0ustar 00000000000000# # Copyright (c) Twisted Matrix Laboratories. # # http://twistedmatrix.com/trac/ticket/4173 """ The WebSockets Protocol, according to RFC 6455 (http://tools.ietf.org/html/rfc6455). When "RFC" is mentioned, it refers to this RFC. Some tests reference HyBi-10 (http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-10) or HyBi-07 (http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-07), which are drafts of RFC 6455. """ from maasserver.websockets.websockets import ( _makeAccept, _makeFrame, _mask, _parseFrames, _WSException, CONTROLS, IWebSocketsFrameReceiver, lookupProtocolForFactory, STATUSES, WebSocketsProtocol, WebSocketsProtocolWrapper, WebSocketsResource, WebSocketsTransport, ) from maastesting.testcase import MAASTestCase from twisted.internet.protocol import ( Factory, Protocol, ) from twisted.protocols.tls import TLSMemoryBIOProtocol from twisted.python import log from twisted.test.proto_helpers import ( AccumulatingProtocol, StringTransportWithDisconnection, ) from twisted.web.http_headers import Headers from twisted.web.resource import ( IResource, Resource, ) from twisted.web.server import ( NOT_DONE_YET, Request, ) from twisted.web.test.test_web import ( DummyChannel, DummyRequest, ) from zope.interface import implementer from zope.interface.verify import verifyObject class TestFrameHelpers(MAASTestCase): """ Test functions helping building and parsing WebSockets frames. """ def test_makeAcceptRFC(self): """ L{_makeAccept} makes responses according to the RFC. """ key = "dGhlIHNhbXBsZSBub25jZQ==" self.assertEqual(_makeAccept(key), "s3pPLMBiTxaQ9kYGzzhZRbK+xOo=") def test_maskNoop(self): """ Blank keys perform a no-op mask. """ key = "\x00\x00\x00\x00" self.assertEqual(_mask("Test", key), "Test") def test_maskNoopLong(self): """ Blank keys perform a no-op mask regardless of the length of the input. """ key = "\x00\x00\x00\x00" self.assertEqual(_mask("LongTest", key), "LongTest") def test_maskNoopOdd(self): """ Masking works even when the data to be masked isn't a multiple of four in length. """ key = "\x00\x00\x00\x00" self.assertEqual(_mask("LongestTest", key), "LongestTest") def test_maskHello(self): """ A sample mask for "Hello" according to RFC 6455, 5.7. """ key = "\x37\xfa\x21\x3d" self.assertEqual(_mask("Hello", key), "\x7f\x9f\x4d\x51\x58") def test_parseUnmaskedText(self): """ A sample unmasked frame of "Hello" from HyBi-10, 4.7. """ frame = ["\x81\x05Hello"] frames = list(_parseFrames(frame, needMask=False)) self.assertEqual(len(frames), 1) self.assertEqual(frames[0], (CONTROLS.TEXT, "Hello", True)) self.assertEqual(frame, []) def test_parseUnmaskedLargeText(self): """ L{_parseFrames} handles frame with text longer than 125 bytes. """ frame = ["\x81\x7e\x00\xc8", "x" * 200] frames = list(_parseFrames(frame, needMask=False)) self.assertEqual(len(frames), 1) self.assertEqual(frames[0], (CONTROLS.TEXT, "x" * 200, True)) self.assertEqual(frame, []) def test_parseUnmaskedTextWithMaskNeeded(self): """ L{_parseFrames} raises L{_WSException} if the frame is not masked and C{needMask} is set to C{True}. """ frame = ["\x81\x05Hello"] error = self.assertRaises( _WSException, list, _parseFrames(frame, needMask=True)) self.assertEqual("Received data not masked", str(error)) def test_parseUnmaskedHugeText(self): """ L{_parseFrames} handles frame with text longer than 64 kB. """ frame = ["\x81\x7f\x00\x00\x00\x00\x00\x01\x86\xa0", "x" * 100000] frames = list(_parseFrames(frame, needMask=False)) self.assertEqual(len(frames), 1) self.assertEqual(frames[0], (CONTROLS.TEXT, "x" * 100000, True)) self.assertEqual(frame, []) def test_parseMaskedText(self): """ A sample masked frame of "Hello" from HyBi-10, 4.7. """ frame = ["\x81\x857\xfa!=\x7f\x9fMQX"] frames = list(_parseFrames(frame)) self.assertEqual(len(frames), 1) self.assertEqual(frames[0], (CONTROLS.TEXT, "Hello", True)) self.assertEqual(frame, []) def test_parseMaskedPartialText(self): """ L{_parseFrames} stops parsing if a masked frame isn't long enough to contain the length of the text. """ frame = ["\x81\x827\xfa"] frames = list(_parseFrames(frame)) self.assertEqual(len(frames), 0) self.assertEqual(frame, ["\x81\x827\xfa"]) def test_parseUnmaskedTextFragments(self): """ Fragmented masked packets are handled. From HyBi-10, 4.7. """ frame = ["\x01\x03Hel\x80\x02lo"] frames = list(_parseFrames(frame, needMask=False)) self.assertEqual(len(frames), 2) self.assertEqual(frames[0], (CONTROLS.TEXT, "Hel", False)) self.assertEqual(frames[1], (CONTROLS.CONTINUE, "lo", True)) self.assertEqual(frame, []) def test_parsePing(self): """ Ping packets are decoded. From HyBi-10, 4.7. """ frame = ["\x89\x05Hello"] frames = list(_parseFrames(frame, needMask=False)) self.assertEqual(len(frames), 1) self.assertEqual(frames[0], (CONTROLS.PING, "Hello", True)) self.assertEqual(frame, []) def test_parsePong(self): """ Pong packets are decoded. From HyBi-10, 4.7. """ frame = ["\x8a\x05Hello"] frames = list(_parseFrames(frame, needMask=False)) self.assertEqual(len(frames), 1) self.assertEqual(frames[0], (CONTROLS.PONG, "Hello", True)) self.assertEqual(frame, []) def test_parseCloseEmpty(self): """ A HyBi-07 close packet may have no body. In that case, it decodes with the generic error code 1000, and has no particular justification or error message. """ frame = ["\x88\x00"] frames = list(_parseFrames(frame, needMask=False)) self.assertEqual(len(frames), 1) self.assertEqual( frames[0], (CONTROLS.CLOSE, (STATUSES.NONE, ""), True)) self.assertEqual(frame, []) def test_parseCloseReason(self): """ A HyBi-07 close packet must have its first two bytes be a numeric error code, and may optionally include trailing text explaining why the connection was closed. """ frame = ["\x88\x0b\x03\xe8No reason"] frames = list(_parseFrames(frame, needMask=False)) self.assertEqual(len(frames), 1) self.assertEqual( frames[0], (CONTROLS.CLOSE, (STATUSES.NORMAL, "No reason"), True)) self.assertEqual(frame, []) def test_parsePartialNoLength(self): """ Partial frames are stored for later decoding. """ frame = ["\x81"] frames = list(_parseFrames(frame, needMask=False)) self.assertEqual(len(frames), 0) self.assertEqual(frame, ["\x81"]) def test_parsePartialTruncatedLengthInt(self): """ Partial frames are stored for later decoding, even if they are cut on length boundaries. """ frame = ["\x81\xfe"] frames = list(_parseFrames(frame, needMask=False)) self.assertEqual(len(frames), 0) self.assertEqual(frame, ["\x81\xfe"]) def test_parsePartialTruncatedLengthDouble(self): """ Partial frames are stored for later decoding, even if they are marked as being extra-long. """ frame = ["\x81\xff"] frames = list(_parseFrames(frame, needMask=False)) self.assertEqual(len(frames), 0) self.assertEqual(frame, ["\x81\xff"]) def test_parsePartialNoData(self): """ Partial frames with full headers but no data are stored for later decoding. """ frame = ["\x81\x05"] frames = list(_parseFrames(frame, needMask=False)) self.assertEqual(len(frames), 0) self.assertEqual(frame, ["\x81\x05"]) def test_parsePartialTruncatedData(self): """ Partial frames with full headers and partial data are stored for later decoding. """ frame = ["\x81\x05Hel"] frames = list(_parseFrames(frame, needMask=False)) self.assertEqual(len(frames), 0) self.assertEqual(frame, ["\x81\x05Hel"]) def test_parseReservedFlag(self): """ L{_parseFrames} raises a L{_WSException} error when the header uses a reserved flag. """ frame = ["\x72\x05"] error = self.assertRaises(_WSException, list, _parseFrames(frame)) self.assertEqual("Reserved flag in frame (114)", str(error)) def test_parseUnknownOpcode(self): """ L{_parseFrames} raises a L{_WSException} error when the error uses an unknown opcode. """ frame = ["\x8f\x05"] error = self.assertRaises(_WSException, list, _parseFrames(frame)) self.assertEqual("Unknown opcode 15 in frame", str(error)) def test_makeHello(self): """ L{_makeFrame} makes valid HyBi-07 packets. """ frame = "\x81\x05Hello" buf = _makeFrame("Hello", CONTROLS.TEXT, True) self.assertEqual(frame, buf) def test_makeLargeFrame(self): """ L{_makeFrame} prefixes the payload by the length on 2 bytes if the payload is more than 125 bytes. """ frame = "\x81\x7e\x00\xc8" + "x" * 200 buf = _makeFrame("x" * 200, CONTROLS.TEXT, True) self.assertEqual(frame, buf) def test_makeHugeFrame(self): """ L{_makeFrame} prefixes the payload by the length on 8 bytes if the payload is more than 64 kB. """ frame = "\x81\x7f\x00\x00\x00\x00\x00\x01\x86\xa0" + "x" * 100000 buf = _makeFrame("x" * 100000, CONTROLS.TEXT, True) self.assertEqual(frame, buf) def test_makeNonFinFrame(self): """ L{_makeFrame} can build fragmented frames. """ frame = "\x01\x05Hello" buf = _makeFrame("Hello", CONTROLS.TEXT, False) self.assertEqual(frame, buf) def test_makeMaskedFrame(self): """ L{_makeFrame} can build masked frames. """ frame = "\x81\x857\xfa!=\x7f\x9fMQX" buf = _makeFrame("Hello", CONTROLS.TEXT, True, mask="7\xfa!=") self.assertEqual(frame, buf) @implementer(IWebSocketsFrameReceiver) class SavingEchoReceiver(object): """ A test receiver saving the data received and sending it back. """ def makeConnection(self, transport): self.transport = transport self.received = [] def frameReceived(self, opcode, data, fin): self.received.append((opcode, data, fin)) if opcode == CONTROLS.TEXT: self.transport.sendFrame(opcode, data, fin) class WebSocketsProtocolTest(MAASTestCase): """ Tests for L{WebSocketsProtocol}. """ def setUp(self): super(WebSocketsProtocolTest, self).setUp() self.receiver = SavingEchoReceiver() self.protocol = WebSocketsProtocol(self.receiver) self.factory = Factory.forProtocol(lambda: self.protocol) self.transport = StringTransportWithDisconnection() self.protocol.makeConnection(self.transport) self.transport.protocol = self.protocol def test_frameReceived(self): """ L{WebSocketsProtocol.dataReceived} translates bytes into frames, and then write it back encoded into frames. """ self.protocol.dataReceived( _makeFrame("Hello", CONTROLS.TEXT, True, mask="abcd")) self.assertEqual("\x81\x05Hello", self.transport.value()) self.assertEqual([(CONTROLS.TEXT, "Hello", True)], self.receiver.received) def test_ping(self): """ When a C{PING} frame is received, the frame is resent with a C{PONG}, and the application receiver is notified about it. """ self.protocol.dataReceived( _makeFrame("Hello", CONTROLS.PING, True, mask="abcd")) self.assertEqual("\x8a\x05Hello", self.transport.value()) self.assertEqual([(CONTROLS.PING, "Hello", True)], self.receiver.received) def test_close(self): """ When a C{CLOSE} frame is received, the protocol closes the connection and logs a message. """ loggedMessages = [] def logConnectionLostMsg(eventDict): loggedMessages.append(log.textFromEventDict(eventDict)) log.addObserver(logConnectionLostMsg) self.protocol.dataReceived( _makeFrame("", CONTROLS.CLOSE, True, mask="abcd")) self.assertFalse(self.transport.connected) self.assertEqual(["Closing connection: "], loggedMessages) def test_invalidFrame(self): """ If an invalid frame is received, L{WebSocketsProtocol} closes the connection. """ self.protocol.dataReceived("\x72\x05") self.assertFalse(self.transport.connected) class WebSocketsTransportTest(MAASTestCase): """ Tests for L{WebSocketsTransport}. """ def test_loseConnection(self): """ L{WebSocketsTransport.loseConnection} sends a close frame and closes the transport afterwards. """ transport = StringTransportWithDisconnection() transport.protocol = Protocol() webSocketsTranport = WebSocketsTransport(transport) webSocketsTranport.loseConnection() self.assertFalse(transport.connected) self.assertEqual("\x88\x02\x03\xe8", transport.value()) # We can call loseConnection again without side effects webSocketsTranport.loseConnection() def test_loseConnectionCodeAndReason(self): """ L{WebSocketsTransport.loseConnection} accepts a code and a reason which are used to build the closing frame. """ transport = StringTransportWithDisconnection() transport.protocol = Protocol() webSocketsTranport = WebSocketsTransport(transport) webSocketsTranport.loseConnection(STATUSES.GOING_AWAY, "Going away") self.assertEqual("\x88\x0c\x03\xe9Going away", transport.value()) class WebSocketsProtocolWrapperTest(MAASTestCase): """ Tests for L{WebSocketsProtocolWrapper}. """ def setUp(self): super(WebSocketsProtocolWrapperTest, self).setUp() self.accumulatingProtocol = AccumulatingProtocol() self.protocol = WebSocketsProtocolWrapper(self.accumulatingProtocol) self.transport = StringTransportWithDisconnection() self.protocol.makeConnection(self.transport) self.transport.protocol = self.protocol def test_dataReceived(self): """ L{WebSocketsProtocolWrapper.dataReceived} forwards frame content to the underlying protocol. """ self.protocol.dataReceived( _makeFrame("Hello", CONTROLS.TEXT, True, mask="abcd")) self.assertEqual("Hello", self.accumulatingProtocol.data) def test_controlFrames(self): """ L{WebSocketsProtocolWrapper} doesn't forward data from control frames to the underlying protocol. """ self.protocol.dataReceived( _makeFrame("Hello", CONTROLS.PING, True, mask="abcd")) self.protocol.dataReceived( _makeFrame("Hello", CONTROLS.PONG, True, mask="abcd")) self.protocol.dataReceived( _makeFrame("", CONTROLS.CLOSE, True, mask="abcd")) self.assertEqual("", self.accumulatingProtocol.data) def test_loseConnection(self): """ L{WebSocketsProtocolWrapper.loseConnection} sends a close frame and disconnects the transport. """ self.protocol.loseConnection() self.assertFalse(self.transport.connected) self.assertEqual("\x88\x02\x03\xe8", self.transport.value()) def test_write(self): """ L{WebSocketsProtocolWrapper.write} creates and writes a frame from the payload passed. """ self.accumulatingProtocol.transport.write("Hello") self.assertEqual("\x81\x05Hello", self.transport.value()) def test_writeSequence(self): """ L{WebSocketsProtocolWrapper.writeSequence} writes a frame for every chunk passed. """ self.accumulatingProtocol.transport.writeSequence(["Hello", "World"]) self.assertEqual("\x81\x05Hello\x81\x05World", self.transport.value()) def test_getHost(self): """ L{WebSocketsProtocolWrapper.getHost} returns the transport C{getHost}. """ self.assertEqual(self.transport.getHost(), self.accumulatingProtocol.transport.getHost()) def test_getPeer(self): """ L{WebSocketsProtocolWrapper.getPeer} returns the transport C{getPeer}. """ self.assertEqual(self.transport.getPeer(), self.accumulatingProtocol.transport.getPeer()) def test_connectionLost(self): """ L{WebSocketsProtocolWrapper.connectionLost} forwards the connection lost call to the underlying protocol. """ self.transport.loseConnection() self.assertTrue(self.accumulatingProtocol.closed) class WebSocketsResourceTest(MAASTestCase): """ Tests for L{WebSocketsResource}. """ def setUp(self): super(WebSocketsResourceTest, self).setUp() class SavingEchoFactory(Factory): def buildProtocol(oself, addr): return self.echoProtocol factory = SavingEchoFactory() self.echoProtocol = WebSocketsProtocol(SavingEchoReceiver()) self.resource = WebSocketsResource(lookupProtocolForFactory(factory)) def assertRequestFail(self, request): """ Helper method checking that the provided C{request} fails with a I{400} request code, without data or headers. @param request: The request to render. @type request: L{DummyRequest} """ result = self.resource.render(request) self.assertEqual("", result) self.assertEqual({}, request.outgoingHeaders) self.assertEqual([], request.written) self.assertEqual(400, request.responseCode) def test_getChildWithDefault(self): """ L{WebSocketsResource.getChildWithDefault} raises a C{RuntimeError} when called. """ self.assertRaises( RuntimeError, self.resource.getChildWithDefault, "foo", DummyRequest("/")) def test_putChild(self): """ L{WebSocketsResource.putChild} raises C{RuntimeError} when called. """ self.assertRaises( RuntimeError, self.resource.putChild, "foo", Resource()) def test_IResource(self): """ L{WebSocketsResource} implements L{IResource}. """ self.assertTrue(verifyObject(IResource, self.resource)) def test_render(self): """ When rendering a request, L{WebSocketsResource} uses the C{Sec-WebSocket-Key} header to generate a C{Sec-WebSocket-Accept} value. It creates a L{WebSocketsProtocol} instance connected to the protocol provided by the user factory. """ request = DummyRequest("/") request.requestHeaders = Headers() transport = StringTransportWithDisconnection() transport.protocol = Protocol() request.transport = transport request.headers.update({ "upgrade": "Websocket", "connection": "Upgrade", "sec-websocket-key": "secure", "sec-websocket-version": "13"}) result = self.resource.render(request) self.assertEqual(NOT_DONE_YET, result) self.assertEqual( {"connection": "Upgrade", "upgrade": "WebSocket", "sec-websocket-accept": "oYBv54i42V5dw6KnZqOFroecUTc="}, request.outgoingHeaders) self.assertEqual([""], request.written) self.assertEqual(101, request.responseCode) self.assertIdentical(None, request.transport) self.assertIsInstance(transport.protocol._receiver, SavingEchoReceiver) self.assertEqual(request.getHeader("cookie"), transport.cookies) self.assertEqual(request.uri, transport.uri) def test_renderProtocol(self): """ If protocols are specified via the C{Sec-WebSocket-Protocol} header, L{WebSocketsResource} passes them to its C{lookupProtocol} argument, which can decide which protocol to return, and which is accepted. """ def lookupProtocol(names, otherRequest): self.assertEqual(["foo", "bar"], names) self.assertIdentical(request, otherRequest) return self.echoProtocol, "bar" self.resource = WebSocketsResource(lookupProtocol) request = DummyRequest("/") request.requestHeaders = Headers( {"sec-websocket-protocol": ["foo", "bar"]}) transport = StringTransportWithDisconnection() transport.protocol = Protocol() request.transport = transport request.headers.update({ "upgrade": "Websocket", "connection": "Upgrade", "sec-websocket-key": "secure", "sec-websocket-version": "13"}) result = self.resource.render(request) self.assertEqual(NOT_DONE_YET, result) self.assertEqual( {"connection": "Upgrade", "upgrade": "WebSocket", "sec-websocket-protocol": "bar", "sec-websocket-accept": "oYBv54i42V5dw6KnZqOFroecUTc="}, request.outgoingHeaders) self.assertEqual([""], request.written) self.assertEqual(101, request.responseCode) def test_renderWrongUpgrade(self): """ If the C{Upgrade} header contains an invalid value, L{WebSocketsResource} returns a failed request. """ request = DummyRequest("/") request.headers.update({ "upgrade": "wrong", "connection": "Upgrade", "sec-websocket-key": "secure", "sec-websocket-version": "13"}) self.assertRequestFail(request) def test_renderNoUpgrade(self): """ If the C{Upgrade} header is not set, L{WebSocketsResource} returns a failed request. """ request = DummyRequest("/") request.headers.update({ "connection": "Upgrade", "sec-websocket-key": "secure", "sec-websocket-version": "13"}) self.assertRequestFail(request) def test_renderPOST(self): """ If the method is not C{GET}, L{WebSocketsResource} returns a failed request. """ request = DummyRequest("/") request.method = "POST" request.headers.update({ "upgrade": "Websocket", "connection": "Upgrade", "sec-websocket-key": "secure", "sec-websocket-version": "13"}) self.assertRequestFail(request) def test_renderWrongConnection(self): """ If the C{Connection} header contains an invalid value, L{WebSocketsResource} returns a failed request. """ request = DummyRequest("/") request.headers.update({ "upgrade": "Websocket", "connection": "Wrong", "sec-websocket-key": "secure", "sec-websocket-version": "13"}) self.assertRequestFail(request) def test_renderNoConnection(self): """ If the C{Connection} header is not set, L{WebSocketsResource} returns a failed request. """ request = DummyRequest("/") request.headers.update({ "upgrade": "Websocket", "sec-websocket-key": "secure", "sec-websocket-version": "13"}) self.assertRequestFail(request) def test_renderNoKey(self): """ If the C{Sec-WebSocket-Key} header is not set, L{WebSocketsResource} returns a failed request. """ request = DummyRequest("/") request.headers.update({ "upgrade": "Websocket", "connection": "Upgrade", "sec-websocket-version": "13"}) self.assertRequestFail(request) def test_renderWrongVersion(self): """ If the value of the C{Sec-WebSocket-Version} is not 13, L{WebSocketsResource} returns a failed request. """ request = DummyRequest("/") request.headers.update({ "upgrade": "Websocket", "connection": "Upgrade", "sec-websocket-key": "secure", "sec-websocket-version": "11"}) result = self.resource.render(request) self.assertEqual("", result) self.assertEqual({"sec-websocket-version": "13"}, request.outgoingHeaders) self.assertEqual([], request.written) self.assertEqual(400, request.responseCode) def test_renderNoProtocol(self): """ If the underlying factory doesn't return any protocol, L{WebSocketsResource} returns a failed request with a C{502} code. """ request = DummyRequest("/") request.requestHeaders = Headers() request.transport = StringTransportWithDisconnection() self.echoProtocol = None request.headers.update({ "upgrade": "Websocket", "connection": "Upgrade", "sec-websocket-key": "secure", "sec-websocket-version": "13"}) result = self.resource.render(request) self.assertEqual("", result) self.assertEqual({}, request.outgoingHeaders) self.assertEqual([], request.written) self.assertEqual(502, request.responseCode) def test_renderSecureRequest(self): """ When the rendered request is over HTTPS, L{WebSocketsResource} wraps the protocol of the C{TLSMemoryBIOProtocol} instance. """ request = DummyRequest("/") request.requestHeaders = Headers() transport = StringTransportWithDisconnection() secureProtocol = TLSMemoryBIOProtocol(Factory(), Protocol()) transport.protocol = secureProtocol request.transport = transport request.headers.update({ "upgrade": "Websocket", "connection": "Upgrade", "sec-websocket-key": "secure", "sec-websocket-version": "13"}) result = self.resource.render(request) self.assertEqual(NOT_DONE_YET, result) self.assertEqual( {"connection": "Upgrade", "upgrade": "WebSocket", "sec-websocket-accept": "oYBv54i42V5dw6KnZqOFroecUTc="}, request.outgoingHeaders) self.assertEqual([""], request.written) self.assertEqual(101, request.responseCode) self.assertIdentical(None, request.transport) self.assertIsInstance( transport.protocol.wrappedProtocol, WebSocketsProtocol) self.assertIsInstance( transport.protocol.wrappedProtocol._receiver, SavingEchoReceiver) def test_renderRealRequest(self): """ The request managed by L{WebSocketsResource.render} doesn't contain unnecessary HTTP headers like I{Content-Type}. """ channel = DummyChannel() channel.transport = StringTransportWithDisconnection() channel.transport.protocol = channel request = Request(channel, False) headers = { "upgrade": "Websocket", "connection": "Upgrade", "sec-websocket-key": "secure", "sec-websocket-version": "13"} for key, value in headers.items(): request.requestHeaders.setRawHeaders(key, [value]) request.method = "GET" request.clientproto = "HTTP/1.1" result = self.resource.render(request) self.assertEqual(NOT_DONE_YET, result) self.assertEqual( [("Connection", ["Upgrade"]), ("Upgrade", ["WebSocket"]), ("Sec-Websocket-Accept", ["oYBv54i42V5dw6KnZqOFroecUTc="])], list(request.responseHeaders.getAllRawHeaders())) self.assertEqual( "HTTP/1.1 101 Switching Protocols\r\n" "Transfer-Encoding: chunked\r\n" "Connection: Upgrade\r\n" "Upgrade: WebSocket\r\n" "Sec-Websocket-Accept: oYBv54i42V5dw6KnZqOFroecUTc=\r\n\r\n", channel.transport.value()) self.assertEqual(101, request.code) self.assertIdentical(None, request.transport) def test_renderIProtocol(self): """ If the protocol returned by C{lookupProtocol} isn't a C{WebSocketsProtocol}, L{WebSocketsResource} wraps it automatically with L{WebSocketsProtocolWrapper}. """ def lookupProtocol(names, otherRequest): return AccumulatingProtocol(), None self.resource = WebSocketsResource(lookupProtocol) request = DummyRequest("/") request.requestHeaders = Headers() transport = StringTransportWithDisconnection() transport.protocol = Protocol() request.transport = transport request.headers.update({ "upgrade": "Websocket", "connection": "Upgrade", "sec-websocket-key": "secure", "sec-websocket-version": "13"}) result = self.resource.render(request) self.assertEqual(NOT_DONE_YET, result) self.assertIsInstance(transport.protocol, WebSocketsProtocolWrapper) self.assertIsInstance(transport.protocol.wrappedProtocol, AccumulatingProtocol) maas-1.9.5+bzr4599.orig/src/maastesting/__init__.py0000644000000000000000000000335013056115004020073 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Testing infrastructure for MAAS.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "bindir", "root", ] import copy from os.path import ( abspath, dirname, join, pardir, realpath, ) import re from sys import executable from warnings import filterwarnings import mock # The root of the source tree. root = abspath(join(dirname(realpath(__file__)), pardir, pardir)) # The directory containing the current interpreter. bindir = abspath(dirname(executable)) # Construct a regular expression that matches all of MAAS's core # packages, and their subpackages. packages = { "apiclient", "maas", "maascli", "maasserver", "maastesting", "metadataserver", "provisioningserver", } packages_expr = r"^(?:%s)\b" % "|".join( re.escape(package) for package in packages) # Enable some warnings that we ought to pay heed to. filterwarnings('error', category=BytesWarning, module=packages_expr) filterwarnings('default', category=DeprecationWarning, module=packages_expr) filterwarnings('default', category=ImportWarning, module=packages_expr) # Ignore noisy deprecation warnings inside Twisted. filterwarnings('ignore', category=DeprecationWarning, module=r"^twisted\b") # Make sure that sentinel objects are not copied. sentinel_type = type(mock.sentinel.foo) copy._copy_dispatch[sentinel_type] = copy._copy_immutable copy._deepcopy_dispatch[sentinel_type] = copy._copy_immutable try: import maasfascist maasfascist # Silence lint. except ImportError: pass maas-1.9.5+bzr4599.orig/src/maastesting/crochet.py0000644000000000000000000001026713056115004017770 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Support for testing with `crochet`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "EventualResultCatchingMixin", ] import crochet from testtools.content import ( Content, UTF8_TEXT, ) from testtools.matchers import Equals class EventualResultCatchingMixin: """A mix-in for tests that checks for unfired/unhandled `EventualResults`. It reports about all py:class:`crochet.EventualResults` that are unfired or whose results have not been retrieved. A test detail is recorded for each, then the test is force-failed at the last moment. """ def setUp(self): super(EventualResultCatchingMixin, self).setUp() # Every EventualResult that crochet creates is registered into this # registry. We'll check it after the test has finished. registry = crochet._main._registry # The registry stores EventualResults in a WeakSet, which means that # unfired and unhandled results can be garbage collected before we get # to see them. Here we patch in a regular set so that nothing gets # garbage collected until we've been able to check the results. self.addCleanup(setattr, registry, "_results", registry._results) registry._results = set() # While unravelling clean-ups is a good time to check the results. Any # meaningful work represented by an EventualResult should have done # should been done by now. self.addCleanup(self.__checkResults, registry._results) def __checkResults(self, eventual_results): fail_count = 0 # Go through all the EventualResults created in this test. for eventual_result in eventual_results: # If the result has been retrieved, fine, otherwise look closer. if not eventual_result._result_retrieved: fail_count += 1 try: # Is there a result waiting to be retrieved? result = eventual_result.wait(timeout=0) except crochet.TimeoutError: # No result yet. This could be because the result is wired # up to a Deferred that hasn't fired yet, or because it # hasn't yet been connected. if eventual_result._deferred is None: message = [ "*** EventualResult has not fired:\n", "%r\n" % (eventual_result,), "*** It was not connected to a Deferred.\n", ] else: message = [ "*** EventualResult has not fired:\n", "%r\n" % (eventual_result,), "*** It was connected to a Deferred:\n", "%r\n" % (eventual_result._deferred,), ] else: # A result, but nothing has collected it. This can be # caused by forgetting to call wait(). message = [ "*** EventualResult has fired:\n", "%r\n" % (eventual_result,), "*** It contained the following result:\n", "%r\n" % (result,), "*** but it was not collected.\n", "*** Was result.wait() called?\n", ] # Record the details with a unique name. message = [block.encode("utf-8") for block in message] self.addDetail( "Unfired/unhandled EventualResult #%d" % fail_count, Content(UTF8_TEXT, lambda: message)) # Use expectThat() so that other clean-up tasks run to completion # before, at the last moment, the test is failed. self.expectThat( fail_count, Equals(0), "Unfired and/or unhandled " "EventualResult(s); see test details.") maas-1.9.5+bzr4599.orig/src/maastesting/djangoclient.py0000644000000000000000000000423413056115004020777 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Django client with sensible handling of data.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'SensibleClient', ] from functools import wraps from django.test import client def transparent_encode_multipart(func): """Wrap an HTTP client method, transparently encoding multipart data. This wraps Django's `Client` HTTP verb methods -- put, get, etc. -- in a way that's both convenient and compatible across Django versions. It augments those methods to accept a dict of data to be sent as part of the request body, in MIME multipart encoding. Since Django 1.5, these HTTP verb methods require data in the form of a byte string. The application (that's us) need to take care of MIME encoding. """ @wraps(func) def maybe_encode_multipart( self, path, data=b"", content_type=None, **extra): if isinstance(data, bytes): if content_type is None: content_type = 'application/octet-stream' elif content_type is None: content_type = client.MULTIPART_CONTENT data = client.encode_multipart(client.BOUNDARY, data) else: raise TypeError( "Cannot combine data (%r) with content-type (%r)." % (data, content_type)) return func(self, path, data, content_type, **extra) return maybe_encode_multipart class SensibleClient(client.Client): """A Django test client that transparently encodes multipart data.""" # get(), post(), and head() handle their own payload-encoding and accept # dicts as `data`, so they're not wrapped. The following all accept # byte-strings as `data` so they are transparently wrapped. delete = transparent_encode_multipart(client.Client.delete) options = transparent_encode_multipart(client.Client.options) patch = transparent_encode_multipart(client.Client.patch) put = transparent_encode_multipart(client.Client.put) maas-1.9.5+bzr4599.orig/src/maastesting/djangoloader.py0000644000000000000000000001214313056115004020765 0ustar 00000000000000#!/usr/bin/env python # Copyright 2013-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test loader for the Django parts of MAAS.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) __metaclass__ = type __all__ = [ "MAASDjangoTestRunner", "MAASDjangoTestSuite", "MAASDjangoTestLoader", ] import threading import unittest from django.conf import settings from django.test.simple import DjangoTestSuiteRunner from django_nose import NoseTestSuiteRunner from maastesting.loader import MAASTestLoader from postgresfixture import ClusterFixture import south.management.commands from testtools import try_imports reorder_suite = try_imports(( "django.test.simple.reorder_suite", "django.test.runner.reorder_suite", )) class MAASDjangoTestRunner(NoseTestSuiteRunner): """Custom test runner; ensures that the test database cluster is up.""" def setup_databases(self, *args, **kwargs): """Fire up the db cluster, then punt to original implementation.""" self.cluster = ClusterFixture("db", preserve=True) self.cluster.setUp() try: # Create a database in the PostgreSQL cluster for each database # connection configured in Django's settings that points to the # same datadir. for database in settings.DATABASES.values(): if database["HOST"] == self.cluster.datadir: self.cluster.createdb(database["NAME"]) # Call-up to super-classes. up = super(MAASDjangoTestRunner, self) return up.setup_databases(*args, **kwargs) except: # Clean-up the cluster now, or it'll be left running; django-nose # does not know to clean it up itself, and lacks a fixture-like # mechanism to aid with reverting a half-configured environment. self.cluster.cleanUp() # Now we can let the original error wreak havoc. raise def teardown_databases(self, *args, **kwargs): """Tear-down the test database cluster. This is *not* called if there's a failure during bring-up of any of the test databases, hence there is also tear-down code embedded in `setup_databases`. """ super(MAASDjangoTestRunner, self).teardown_databases(*args, **kwargs) self.cluster.cleanUp() class MAASDjangoTestSuite(unittest.TestSuite): """A MAAS and Django-specific test suite. This ensures that PostgreSQL clusters are up and running, and calls into Django's test framework to ensure that fixtures and so forth are all in place. """ # This lock guards against concurrent invocations of run_outer(); # only the outermost suite should call run_outer(), all the others # should call up to the superclass's run() method. outer_lock = threading.Lock() def run_outer(self, result, debug=False): # This is how South ensures that migrations are run during test # setup. For real. This is not a joke. south.management.commands.patch_for_test_db_setup() # We create one of Django's runners for set-up and tear-down # methods; it's not used to run the tests. runner = DjangoTestSuiteRunner(verbosity=2, interactive=False) runner.setup_test_environment() try: with ClusterFixture("db", preserve=True) as cluster: # Create a database in the PostgreSQL cluster for each # database connection configured in Django's settings that # points to the same datadir. for database in settings.DATABASES.values(): if database["HOST"] == cluster.datadir: cluster.createdb(database["NAME"]) old_config = runner.setup_databases() try: return super(MAASDjangoTestSuite, self).run(result, debug) finally: runner.teardown_databases(old_config) finally: runner.teardown_test_environment() def run(self, result, debug=False): # `False` means don't block when acquiring the lock, and implies # that run_outer() is already being invoked. We must call up to # the superclass's run() instead. if self.outer_lock.acquire(False): try: self.run_outer(result, debug) finally: self.outer_lock.release() else: super(MAASDjangoTestSuite, self).run(result, debug) class MAASDjangoTestLoader(MAASTestLoader): """A MAAS and Django-specific test loader. See `maastesting.loader.MAASTestLoader`. This also reorders the test suite, which is something that Django's test framework does. The purpose of this behaviour is not understood, but we reproduce it here anyway. """ suiteClass = MAASDjangoTestSuite def loadTestsFromName(self, name, module=None): suite = super(MAASDjangoTestLoader, self).loadTestsFromName(name) return reorder_suite(suite, (unittest.TestCase,)) maas-1.9.5+bzr4599.orig/src/maastesting/djangotestcase.py0000644000000000000000000002216513056115004021337 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Django-enabled test cases.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'count_queries', 'DjangoTestCase', 'DjangoTransactionTestCase', 'TestModelMixin', ] from contextlib import closing from itertools import izip from time import ( sleep, time, ) from django.conf import settings from django.core.management.commands import syncdb from django.core.signals import request_started from django.db import ( connection, connections, DEFAULT_DB_ALIAS, reset_queries, ) from django.db.models import loading from django.db.utils import DatabaseError import django.test from maastesting.djangoclient import SensibleClient from maastesting.testcase import MAASTestCase class CountQueries: """Context manager: count number of database queries issued in context. :ivar num_queries: The number of database queries that were performed while this context was active. """ def __init__(self): self.connection = connections[DEFAULT_DB_ALIAS] self.num_queries = 0 def __enter__(self): self.old_debug_cursor = self.connection.use_debug_cursor self.connection.use_debug_cursor = True self.starting_count = len(self.connection.queries) request_started.disconnect(reset_queries) def __exit__(self, exc_type, exc_value, traceback): self.connection.use_debug_cursor = self.old_debug_cursor request_started.connect(reset_queries) if exc_type is not None: return final_count = len(self.connection.queries) self.num_queries = final_count - self.starting_count def count_queries(func, *args, **kwargs): """Execute `func`, and count the number of database queries performed. :param func: Callable to be executed. :param *args: Positional arguments to `func`. :param **kwargs: Keyword arguments to `func`. :return: A tuple of: the number of queries performed while `func` was executing, and the value it returned. """ counter = CountQueries() with counter: result = func(*args, **kwargs) return counter.num_queries, result def get_rogue_database_activity(): """Return details of rogue database activity. This excludes itself, naturally, and also auto-vacuum activity which is governed by PostgreSQL and not something to be concerned about. :return: A list of dicts, where each dict represents a complete row from the ``pg_stat_activity`` table, mapping column names to values. """ with connection.temporary_connection() as cursor: cursor.execute("""\ SELECT * FROM pg_stat_activity WHERE pid != pg_backend_pid() AND query NOT LIKE 'autovacuum:%' """) names = tuple(column.name for column in cursor.description) return [dict(izip(names, row)) for row in cursor] def terminate_rogue_database_activity(): """Terminate rogue database activity. This excludes itself, naturally, and also auto-vacuum activity which is governed by PostgreSQL and not something to be concerned about. :return: A set of PIDs that could not be terminated, presumably because they're running under a different role and we're not a superuser. """ with connection.temporary_connection() as cursor: cursor.execute("""\ SELECT pid, pg_terminate_backend(pid) FROM pg_stat_activity WHERE pid != pg_backend_pid() AND query NOT LIKE 'autovacuum:%' """) return {pid for pid, success in cursor if not success} def check_for_rogue_database_activity(test): """Check for rogue database activity and fail the test if found. All database activity outside of this thread should have terminated by the time this is called, but in practice it won't have. We have unconsciously lived with this situation for a long time, so we give it a few seconds to finish up before failing. This also attempts to terminate rogue activity, and reports on its success or failure. """ cutoff = time() + 5.0 # Give it 5 seconds. while time() < cutoff: database_activity = get_rogue_database_activity() if len(database_activity) == 0: break # All quiet on the database front. else: pause = max(0.0, min(0.2, cutoff - time())) sleep(pause) # Somat's still wriggling. else: not_terminated = terminate_rogue_database_activity() if len(not_terminated) == 0: not_terminated_message = ( "Rogue activity successfully terminated.") else: not_terminated_message = ( "Rogue activity NOT all terminated (pids: %s)." % " ".join( unicode(pid) for pid in sorted(not_terminated))) test.fail( "Rogue database activity:\n--\n" + "\n--\n".join( "\n".join( "%s=%s" % (name, activity[name]) for name in sorted(activity) ) for activity in database_activity ) + "\n--\n" + not_terminated_message + "\n" ) class DjangoTestCase( MAASTestCase, django.test.TestCase): """A Django `TestCase` for MAAS. Supports test resources and (non-Django) fixtures. """ client_class = SensibleClient # The database may be used in tests. See `MAASTestCase` for details. database_use_permitted = True def __get_connection_txid(self): """Get PostgreSQL's current transaction ID.""" with closing(connection.cursor()) as cursor: cursor.execute("SELECT txid_current()") return cursor.fetchone()[0] def _fixture_setup(self): """Record the transaction ID before the test is run.""" super(DjangoTestCase, self)._fixture_setup() self.__txid_before = self.__get_connection_txid() def _fixture_teardown(self): """Compare the transaction ID now to before the test ran. If they differ, do a full database flush because the new transaction could have been the result of a commit, and we don't want to leave stale test state around. """ try: self.__txid_after = self.__get_connection_txid() except DatabaseError: # We don't know if a transaction was committed to disk or if the # transaction simply broke, so assume the worse and flush all # databases. super(DjangoTestCase, self)._fixture_teardown() django.test.TransactionTestCase._fixture_teardown(self) else: super(DjangoTestCase, self)._fixture_teardown() if self.__txid_after != self.__txid_before: # We're in a different transaction now to the one we started # in, so force a flush of all databases to ensure all's well. django.test.TransactionTestCase._fixture_teardown(self) # Don't let unfinished database activity get away with it. check_for_rogue_database_activity(self) class DjangoTransactionTestCase( MAASTestCase, django.test.TransactionTestCase): """A Django `TransactionTestCase` for MAAS. A version of `MAASTestCase` that supports transactions. The basic Django TestCase class uses transactions to speed up tests so this class should only be used when tests involve transactions. """ client_class = SensibleClient # The database may be used in tests. See `MAASTestCase` for details. database_use_permitted = True def _fixture_teardown(self): super(DjangoTransactionTestCase, self)._fixture_teardown() # Don't let unfinished database activity get away with it. check_for_rogue_database_activity(self) class TestModelMixin: """Mix-in for test cases that create their own models. Use this as a mix-in base class for test cases that need to create model classes that exist only in the scope of the tests. The `TestModelMixin` base class must come before the base `TestCase` class in the test case's list of base classes. :cvar app: The Django application that the test models should belong to. Typically either "maasserver.tests" or "metadataserver.tests". """ app = None def _pre_setup(self): # Add the models to the db. self._original_installed_apps = settings.INSTALLED_APPS assert self.app is not None, "TestCase.app must be defined!" settings.INSTALLED_APPS = list(settings.INSTALLED_APPS) settings.INSTALLED_APPS.append(self.app) loading.cache.loaded = False # Use Django's 'syncdb' rather than South's. syncdb.Command().handle_noargs( verbosity=0, interactive=False, database=DEFAULT_DB_ALIAS) super(TestModelMixin, self)._pre_setup() def _post_teardown(self): super(TestModelMixin, self)._post_teardown() # Restore the settings. settings.INSTALLED_APPS = self._original_installed_apps loading.cache.loaded = False maas-1.9.5+bzr4599.orig/src/maastesting/doubles.py0000644000000000000000000000307313056115004017773 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Miscellaneous test doubles. See http://www.martinfowler.com/bliki/TestDouble.html for the nomenclature used. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "StubContext", ] class StubContext: """A stub context manager. :ivar entered: A boolean indicating if the context has been entered. :ivar exited: A boolean indicating if the context has been exited. :ivar unused: A boolean indicating if the context has yet to be used (i.e. it has been neither entered nor exited). :ivar active: A boolean indicating if the context is currently active (i.e. it has been entered but not exited). :ivar unused: A boolean indicating if the context has been used (i.e. it has been both entered and exited). :ivar exc_info: The ``exc_info`` tuple passed into ``__exit__``. """ def __init__(self): super(StubContext, self).__init__() self.entered = False self.exited = False @property def unused(self): return not self.entered and not self.exited @property def active(self): return self.entered and not self.exited @property def used(self): return self.entered and self.exited def __enter__(self): self.entered = True def __exit__(self, *exc_info): self.exc_info = exc_info self.exited = True maas-1.9.5+bzr4599.orig/src/maastesting/factory.py0000644000000000000000000005274113056115004020013 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test object factories.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "factory", "NO_VALUE", "TooManyRandomRetries", ] import datetime from functools import partial import httplib import io from itertools import ( count, imap, islice, repeat, ) import os import os.path import random import string import subprocess import time import urllib2 import urlparse from uuid import uuid1 from maastesting.fixtures import TempDirectory import mock from netaddr import ( IPAddress, IPNetwork, ) # Occasionally a parameter needs separate values for None and "no value # given, make one up." In that case, use NO_VALUE as the default and # accept None as a normal value. NO_VALUE = object() class TooManyRandomRetries(Exception): """Something that relies on luck did not get lucky. Some factory methods need to generate random items until they find one that meets certain requirements. This exception indicates that it took too many retries, which may mean that no matching item is possible. """ def network_clashes(network, other_networks): """Does the IP range for `network` clash with any in `other_networks`? :param network: An `IPNetwork`. :param other_networks: An iterable of `IPNetwork` items. :return: Whether the IP range for `network` overlaps with any of those for the networks in `other_networks`. """ for other_network in other_networks: if network in other_network or other_network in network: return True return False class Factory: random_letters = imap( random.choice, repeat(string.letters + string.digits)) random_letters_with_spaces = imap( random.choice, repeat(string.letters + string.digits + ' ')) # See django.contrib.auth.forms.UserCreationForm.username. random_letters_for_usernames = imap( random.choice, repeat(string.letters + '.@+-')) random_http_responses = imap( random.choice, repeat(tuple(httplib.responses))) random_octet = partial(random.randint, 0, 255) random_octets = iter(random_octet, None) def make_string(self, size=10, spaces=False, prefix=""): if spaces: return prefix + "".join( islice(self.random_letters_with_spaces, size)) else: return prefix + "".join(islice(self.random_letters, size)) def make_bytes(self, size=10): """Return a `bytes` filled with random data.""" return os.urandom(size) def make_username(self, size=10): """Create an arbitrary user name (but not the actual user).""" return "".join(islice(self.random_letters_for_usernames, size)) def make_email_address(self, login_size=10): """Generate an arbitrary email address.""" return "%s@example.com" % self.make_string(size=login_size) def make_status_code(self): """Return an arbitrary HTTP status code.""" return next(self.random_http_responses) exception_type_names = (b"TestException#%d" % i for i in count(1)) def make_exception_type(self, bases=(Exception,), **namespace): return type(next(self.exception_type_names), bases, namespace) def make_exception(self, message=None, bases=(Exception,), **namespace): exc_type = self.make_exception_type(bases, **namespace) return exc_type() if message is None else exc_type(message) def make_absolute_path( self, directories=3, directory_length=10, path_seperator='/'): return path_seperator + path_seperator.join( self.make_string(size=directory_length) for _ in range(directories) ) def pick_bool(self): """Return an arbitrary Boolean value (`True` or `False`).""" return random.choice((True, False)) def pick_port(self, port_min=1024, port_max=65535): assert port_min >= 0 and port_max <= 65535 return random.randint(port_min, port_max) def make_vlan_tag(self, allow_none=False, but_not=None): """Create a random VLAN tag. :param allow_none: Whether `None` ("no VLAN") can be allowed as an outcome. If `True`, `None` will be included in the possible results with a deliberately over-represented probability, in order to help trip up bugs that might only show up once in about 4094 calls otherwise. :param but_not: A list of tags that should not be returned. Any zero or `None` entries will be ignored. """ if but_not is None: but_not = [] if allow_none and self.pick_bool(): return None else: for _ in range(100): vlan_tag = random.randint(1, 0xffe) if vlan_tag not in but_not: return vlan_tag raise TooManyRandomRetries("Could not find an available VLAN tag.") def make_ipv4_address(self): octets = islice(self.random_octets, 4) return '%d.%d.%d.%d' % tuple(octets) def make_ipv6_address(self): # We return from the fc00::/7 space because that's a private # space and shouldn't cause problems of addressing the outside # world. network = IPNetwork('fc00::/7') # We can't use random.choice() because there are too many # elements in network. random_address_index = random.randint(0, network.size - 1) return unicode(IPAddress(network[random_address_index])) def make_ip_address(self): if random.randint(0, 1): return self.make_ipv6_address() else: return self.make_ipv4_address() def make_UUID(self): return unicode(uuid1()) def _make_random_network( self, slash=None, but_not=None, disjoint_from=None, random_address_factory=None): """Generate a random IP network. :param slash: Netmask or bit width of the network, e.g. 24 or '255.255.255.0' for what used to be known as a class-C network. :param but_not: Optional iterable of `IPNetwork` objects whose values should not be returned. Use this when you need a different network from any returned previously. The new network may overlap any of these, but it won't be identical. :param disjoint_from: Optional iterable of `IPNetwork` objects whose IP ranges the new network must not overlap. :param random_address_factory: A callable that returns a random IP address. If not provided, will default to Factory.make_ipv4_address(). :return: A network spanning at least 8 IP addresses (at most 29 bits). :rtype: :class:`IPNetwork` """ if but_not is None: but_not = [] but_not = frozenset(but_not) if disjoint_from is None: disjoint_from = [] if slash is None: slash = random.randint(16, 29) if random_address_factory is None: random_address_factory = self.make_ipv4_address # Look randomly for a network that matches our criteria. for _ in range(100): network = IPNetwork( '%s/%s' % (random_address_factory(), slash)).cidr forbidden = (network in but_not) clashes = network_clashes(network, disjoint_from) if not forbidden and not clashes: return network raise TooManyRandomRetries("Could not find available network") def make_ipv4_network(self, slash=None, but_not=None, disjoint_from=None): """Generate a random IPv4 network. :param slash: Netmask or bit width of the network, e.g. 24 or '255.255.255.0' for what used to be known as a class-C network. :param but_not: Optional iterable of `IPNetwork` objects whose values should not be returned. Use this when you need a different network from any returned previously. The new network may overlap any of these, but it won't be identical. :param disjoint_from: Optional iterable of `IPNetwork` objects whose IP ranges the new network must not overlap. :return: A network spanning at least 8 IP addresses (at most 29 bits). :rtype: :class:`IPNetwork` """ if slash is None: slash = random.randint(16, 29) return self._make_random_network( slash=slash, but_not=but_not, disjoint_from=disjoint_from, random_address_factory=self.make_ipv4_address) def make_ipv6_network(self, slash=None, but_not=None, disjoint_from=None): """Generate a random IPv6 network. :param slash: Netmask or bit width of the network. If not specified, will default to a bit width of between 112 (65536 addresses) and 125 (8 addresses); :param but_not: Optional iterable of `IPNetwork` objects whose values should not be returned. Use this when you need a different network from any returned previously. The new network may overlap any of these, but it won't be identical. :param disjoint_from: Optional iterable of `IPNetwork` objects whose IP ranges the new network must not overlap. :return: A network spanning at least 8 IP addresses. :rtype: :class:`IPNetwork` """ if slash is None: slash = random.randint(112, 125) return self._make_random_network( slash=slash, but_not=but_not, disjoint_from=disjoint_from, random_address_factory=self.make_ipv6_address) def make_ip4_or_6_network(self, host_bits=None): """Generate a random IPv4 or IPv6 network.""" slash = None if random.randint(0, 1) == 0: if host_bits is not None: slash = 32 - host_bits return self.make_ipv4_network(slash=slash) else: if host_bits is not None: slash = 128 - host_bits return self.make_ipv6_network(slash=slash) def pick_ip_in_dynamic_range(self, ngi, but_not=None): if but_not is None: but_not = [] first = ngi.get_dynamic_ip_range().first last = ngi.get_dynamic_ip_range().last but_not = [IPAddress(but) for but in but_not if but is not None] for _ in range(100): address = IPAddress(random.randint(first, last)) if address not in but_not: return bytes(address) raise TooManyRandomRetries( "Could not find available IP in static range") def pick_ip_in_static_range(self, ngi, but_not=None): if but_not is None: but_not = [] first = ngi.get_static_ip_range().first last = ngi.get_static_ip_range().last but_not = [IPAddress(but) for but in but_not if but is not None] for _ in range(100): address = IPAddress(random.randint(first, last)) if address not in but_not: return bytes(address) raise TooManyRandomRetries( "Could not find available IP in static range") def pick_ip_in_network(self, network, but_not=None): if but_not is None: but_not = [] but_not = [IPAddress(but) for but in but_not if but is not None] for _ in range(100): address = IPAddress(random.randint(network.first, network.last)) if address not in but_not: return bytes(address) raise TooManyRandomRetries("Could not find available IP in network") def make_ipv4_range(self, network=None, but_not=None): """Return a pair of IPv4 addresses. :param network: Return IP addresses within this network. :param but_not: A pair of addresses that should not be returned. :return: A pair of `IPAddress`. """ if network is None: network = self.make_ipv4_network() if but_not is not None: low, high = but_not but_not = (IPAddress(low), IPAddress(high)) for _ in range(100): ip_range = tuple(sorted( IPAddress(factory.pick_ip_in_network(network)) for _ in range(2) )) if ip_range[0] < ip_range[1] and ip_range != but_not: return ip_range raise TooManyRandomRetries("Could not find available IP range") make_ip_range = make_ipv4_range # DEPRECATED. def make_ipv6_range(self, network=None, but_not=None): """Return a pair of IPv6 addresses. :param network: Return IP addresses within this network. :param but_not: A pair of addresses that should not be returned. :return: A pair of `IPAddress`. """ if network is None: network = self.make_ipv6_network() return self.make_ip_range(network=network, but_not=but_not) def make_mac_address(self, delimiter=":"): assert isinstance(delimiter, unicode) octets = islice(self.random_octets, 6) return delimiter.join(format(octet, "02x") for octet in octets) def make_random_leases(self, num_leases=1): """Create a dict of arbitrary ip-to-mac address mappings.""" # This could be a dict comprehension, but the current loop # guards against shortfalls as random IP addresses collide. leases = {} while len(leases) < num_leases: leases[self.make_ipv4_address()] = self.make_mac_address() return leases def make_date(self, year=2011): start = time.mktime(datetime.datetime(year, 1, 1).timetuple()) end = time.mktime(datetime.datetime(year + 1, 1, 1).timetuple()) stamp = random.randrange(start, end) return datetime.datetime.fromtimestamp(stamp) def make_timedelta(self): return datetime.timedelta( days=random.randint(0, 3 * 365), seconds=random.randint(0, 24 * 60 * 60 - 1), microseconds=random.randint(0, 999999)) def make_file(self, location, name=None, contents=None): """Create a file, and write data to it. Prefer the eponymous convenience wrapper in :class:`maastesting.testcase.MAASTestCase`. It creates a temporary directory and arranges for its eventual cleanup. :param location: Directory. Use a temporary directory for this, and make sure it gets cleaned up after the test! :param name: Optional name for the file. If none is given, one will be made up. :param contents: Optional contents for the file. If omitted, some arbitrary ASCII text will be written. :type contents: unicode, but containing only ASCII characters. :return: Path to the file. """ if name is None: name = self.make_string() if contents is None: contents = self.make_string().encode('ascii') path = os.path.join(location, name) with open(path, 'w') as f: f.write(contents) return path def make_name(self, prefix=None, sep='-', size=6): """Generate a random name. :param prefix: Optional prefix. Pass one to help make test failures and tracebacks easier to read! If you don't, you might as well use `make_string`. :param sep: Separator that will go between the prefix and the random portion of the name. Defaults to a dash. :param size: Length of the random portion of the name. Don't get hung up on this; you may need more if uniqueness is really important or less if it doesn't but legibility does, but generally, use the default. :return: A randomized unicode string. """ return sep.join( filter(None, [prefix, self.make_string(size=size)])) def make_hostname(self, prefix='host', *args, **kwargs): """Generate a random hostname. The returned hostname is lowercase because python's urlparse implicitely lowercases the hostnames.""" return self.make_name(prefix=prefix, *args, **kwargs).lower() # Always select from a scheme that allows parameters in the URL so # that we can round-trip a URL with params successfully (otherwise # the params don't get parsed out of the path). _make_parsed_url_schemes = tuple( scheme for scheme in urlparse.uses_params if scheme != "") def make_parsed_url( self, scheme=None, netloc=None, path=None, port=None, params=None, query=None, fragment=None): """Generate a random parsed URL object. Contains randomly generated values for all parts of a URL: scheme, location, path, parameters, query, and fragment. However, each part can be overridden individually. If port=None or port=True, make_port() will be used to select a random port, while port=False will create a netloc for the URL that does not specify a port. To specify a port in netloc, port parameter must be False. :return: Instance of :py:class:`urlparse.ParseResult`. """ if port is not False and netloc is not None and netloc.count(':') == 1: raise AssertionError( 'A port number has been requested, however the given netloc ' 'spec %r already contains a port number.' % (netloc,)) if scheme is None: # Select a scheme that allows parameters; see above. scheme = random.choice(self._make_parsed_url_schemes) if port is None or port is True: port = self.pick_port() if netloc is None: netloc = "%s.example.com" % self.make_name("netloc").lower() if isinstance(port, (int, long)) and not isinstance(port, bool): netloc += ":%d" % port if path is None: # A leading forward-slash will be added in geturl() if we # don't, so ensure it's here now so tests can compare URLs # without worrying about it. path = self.make_name("/path") else: # Same here with the forward-slash prefix. if not path.startswith("/"): path = "/" + path if params is None: params = self.make_name("params") if query is None: query = self.make_name("query") if fragment is None: fragment = self.make_name("fragment") return urlparse.ParseResult( scheme, netloc, path, params, query, fragment) def make_url( self, scheme=None, netloc=None, path=None, params=None, query=None, fragment=None): """Generate a random URL. Contains randomly generated values for all parts of a URL: scheme, location, path, parameters, query, and fragment. However, each part can be overridden individually. :return: string """ return self.make_parsed_url( scheme, netloc, path, params, query, fragment).geturl() def make_simple_http_url(self, netloc=None, path=None, port=None): """Create an arbitrary HTTP URL with only a location and path.""" return self.make_parsed_url( scheme="http", netloc=netloc, path=path, port=port, params="", query="", fragment="").geturl() def make_names(self, *prefixes): """Generate random names. Yields a name for each prefix specified. :param prefixes: Zero or more prefixes. See `make_name`. """ for prefix in prefixes: yield self.make_name(prefix) def make_tarball(self, location, contents): """Create a tarball containing the given files. :param location: Path to a directory where the tarball can be stored. :param contents: A dict mapping file names to file contents. Where the value is `None`, the file will contain arbitrary data. :return: Path to a gzip-compressed tarball. """ tarball = os.path.join(location, '%s.tar.gz' % self.make_name()) with TempDirectory() as working_dir: source = working_dir.path for name, content in contents.items(): self.make_file(source, name, content) subprocess.check_call(['tar', '-C', source, '-czf', tarball, '.']) return tarball def make_response(self, status_code, content, content_type=None): """Return a similar response to that which `urllib2` returns.""" if content_type is None: headers_raw = b"" else: if isinstance(content_type, unicode): content_type = content_type.encode("ascii") headers_raw = b"Content-Type: %s" % content_type headers = httplib.HTTPMessage(io.BytesIO(headers_raw)) return urllib2.addinfourl( fp=io.BytesIO(content), headers=headers, url=None, code=status_code) def make_streams(self, stdin=None, stdout=None, stderr=None): """Make a fake return value for a SSHClient.exec_command.""" # stdout.read() is called so stdout can't be None. if stdout is None: stdout = mock.Mock() return (stdin, stdout, stderr) def make_CalledProcessError(self): """Make a fake :py:class:`subprocess.CalledProcessError`.""" return subprocess.CalledProcessError( returncode=random.randint(1, 10), cmd=[self.make_name("command")], output=factory.make_bytes()) # Create factory singleton. factory = Factory() maas-1.9.5+bzr4599.orig/src/maastesting/fakemethod.py0000644000000000000000000000625213056115004020447 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). # pylint: disable-msg=E0702 """Test helper, copied from the Launchpad source tree.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'FakeMethod', 'MultiFakeMethod', ] class FakeMethod: """Catch any function or method call, and record the fact. Use this for easy stubbing. The call operator can return a fixed value, or raise a fixed exception object. This is useful when unit-testing code that does things you don't want to integration-test, e.g. because it wants to talk to remote systems. """ def __init__(self, result=None, failure=None): """Set up a fake function or method. :param result: Value to return. :param failure: Exception to raise. """ self.result = result self.failure = failure # A log of arguments for each call to this method. self.calls = [] def __call__(self, *args, **kwargs): """Catch an invocation to the method. Increment `call_count`, and adds the arguments to `calls`. Accepts any and all parameters. Raises the failure passed to the constructor, if any; otherwise, returns the result value passed to the constructor. """ self.calls.append((args, kwargs)) if self.failure is None: return self.result else: # pylint thinks this raises None, which is clearly not # possible. That's why this test disables pylint message # E0702. raise self.failure @property def call_count(self): return len(self.calls) def extract_args(self): """Return just the calls' positional-arguments tuples.""" return [args for args, kwargs in self.calls] def extract_kwargs(self): """Return just the calls' keyword-arguments dicts.""" return [kwargs for args, kwargs in self.calls] class MultiFakeMethod: """Return a method whose behavior is derived from a list of methods. When called repeatedly, this method will call all the methods used to built it in turn, one after the other. This can be used, for instance, to simulate a temporary failure:: >>> raised_exception = Exception("...") >>> number_of_failures = 3 # ...or whatever >>> simulate_failures = MultiFakeMethod( ... [FakeMethod(failure=raised_exception)] * number_of_failures + ... [FakeMethod()]) """ def __init__(self, methods): self.methods = methods self._call_count = -1 def __call__(self, *args, **kwargs): self._call_count = self._call_count + 1 if self._call_count < len(self.methods): return self.methods[self._call_count](*args, **kwargs) else: raise ValueError( "No more method to call. This MultiFakeMethod has been " "called %d times and it only contains %d method(s)." % ( self._call_count + 1, len(self.methods))) maas-1.9.5+bzr4599.orig/src/maastesting/fixtures.py0000644000000000000000000003220613056115004020207 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Miscellaneous fixtures, here until they find a better home.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "CaptureStandardIO", "DetectLeakedFileDescriptors", "DisplayFixture", "LoggerSilencerFixture", "ProxiesDisabledFixture", "SeleniumFixture", "TempDirectory", ] import __builtin__ import codecs from errno import ENOENT from io import BytesIO import logging import os from subprocess import ( CalledProcessError, PIPE, Popen, ) import sys import fixtures from fixtures import ( EnvironmentVariableFixture, Fixture, ) from testtools.monkey import MonkeyPatcher from twisted.python.reflect import namedObject class ImportErrorFixture(Fixture): """Fixture to generate an artificial ImportError when the interpreter would otherwise successfully import the given module. While this fixture is within context, any import of the form: from import will raise an ImportError. :param module_name: name of the module to import from :param sub_name: submodule to import from the module named """ def __init__(self, module_name, sub_name): super(ImportErrorFixture, self).__init__() self.module_name = module_name self.sub_name = sub_name def setUp(self): super(ImportErrorFixture, self).setUp() def mock_import(name, *import_args, **kwargs): if name == self.module_name: module_list = import_args[2] if self.sub_name in module_list: raise ImportError("ImportErrorFixture raising ImportError " "exception on targeted import: %s.%s" % ( self.module_name, self.sub_name)) return self.__real_import(name, *import_args, **kwargs) self.__real_import = __builtin__.__import__ __builtin__.__import__ = mock_import self.addCleanup(setattr, __builtin__, "__import__", self.__real_import) class LoggerSilencerFixture(Fixture): """Fixture to change the log level of loggers. All the loggers with names self.logger_names will have their log level changed to self.level (logging.ERROR by default). """ def __init__(self, names, level=logging.ERROR): super(LoggerSilencerFixture, self).__init__() self.names = names self.level = level def setUp(self): super(LoggerSilencerFixture, self).setUp() for name in self.names: logger = logging.getLogger(name) self.addCleanup(logger.setLevel, logger.level) logger.setLevel(self.level) class DisplayFixture(Fixture): """Fixture to create a virtual display with `xvfb-run`. This will set the ``DISPLAY`` environment variable once it's up and running (and reset it when it shuts down). """ def __init__(self, size=(1280, 1024), depth=24): super(DisplayFixture, self).__init__() self.width, self.height = size self.depth = depth @property def command(self): """The command this fixture will start. ``xvfb-run`` is the executable used, to which the following arguments are passed: ``--server-args=`` ``-ac`` disables host-based access control mechanisms. See Xserver(1). ``-screen`` forces a screen configuration. At the time of writing there is some disagreement between xvfb-run(1) and Xvfb(1) about what the default is. ``--auto-servernum`` Try to get a free server number, starting at 99. See xvfb-run(1). ``xvfb-run`` is asked to chain to ``bash``, which echos the ``DISPLAY`` environment variable and execs ``cat``. This lets us shut down the framebuffer simply by closing the process's stdin. """ spec = "{self.width}x{self.height}x{self.depth}".format(self=self) args = "-ac -screen 0 %s" % spec return ( "xvfb-run", "--server-args", args, "--auto-servernum", "--", "bash", "-c", "echo $DISPLAY && exec cat", ) def setUp(self): super(DisplayFixture, self).setUp() self.process = Popen(self.command, stdin=PIPE, stdout=PIPE) self.display = self.process.stdout.readline().strip() if not self.display or self.process.poll() is not None: raise CalledProcessError(self.process.returncode, self.command) self.useFixture(EnvironmentVariableFixture("DISPLAY", self.display)) self.addCleanup(self.shutdown) def shutdown(self): self.process.stdin.close() if self.process.wait() != 0: raise CalledProcessError(self.process.returncode, self.command) class SeleniumFixture(Fixture): """Set-up a JavaScript-enabled testing browser instance.""" # browser-name -> (driver-name, driver-args) browsers = { "Chrome": ( b"selenium.webdriver.Chrome", ("/usr/lib/chromium-browser/chromedriver",), ), "Firefox": ( b"selenium.webdriver.Firefox", (), ), "PhantomJS": ( b"selenium.webdriver.PhantomJS", (), ), } logger_names = ['selenium.webdriver.remote.remote_connection'] def __init__(self, browser_name): super(SeleniumFixture, self).__init__() if browser_name in self.browsers: driver, driver_args = self.browsers[browser_name] self.driver = namedObject(driver) self.driver_args = driver_args else: raise ValueError("Unrecognised browser: %s" % (browser_name,)) def setUp(self): super(SeleniumFixture, self).setUp() self.browser = self.driver(*self.driver_args) self.useFixture(LoggerSilencerFixture(self.logger_names)) self.addCleanup(self.browser.quit) class ProxiesDisabledFixture(Fixture): """Disables all HTTP/HTTPS proxies set in the environment.""" def setUp(self): super(ProxiesDisabledFixture, self).setUp() self.useFixture(EnvironmentVariableFixture("http_proxy")) self.useFixture(EnvironmentVariableFixture("https_proxy")) class TempDirectory(fixtures.TempDir): """Create a temporary directory, ensuring Unicode paths.""" def setUp(self): super(TempDirectory, self).setUp() if isinstance(self.path, bytes): encoding = sys.getfilesystemencoding() self.path = self.path.decode(encoding) class TempWDFixture(TempDirectory): """Change the current working directory into a temp dir. This will restore the original WD and delete the temp directory on cleanup. """ def setUp(self): cwd = os.getcwd() super(TempWDFixture, self).setUp() self.addCleanup(os.chdir, cwd) os.chdir(self.path) class ChromiumWebDriverFixture(Fixture): """Starts and starts the selenium Chromium webdriver.""" def setUp(self): super(ChromiumWebDriverFixture, self).setUp() # Import late to avoid hard dependency. from selenium.webdriver.chrome.service import Service as ChromeService service = ChromeService( "/usr/lib/chromium-browser/chromedriver", 4444) # Set the LD_LIBRARY_PATH so the chrome driver can find the required # libraries. self.useFixture(EnvironmentVariableFixture( "LD_LIBRARY_PATH", "/usr/lib/chromium-browser/libs")) service.start() # Stop service on cleanup. self.addCleanup(service.stop) class CaptureStandardIO(Fixture): """Capture stdin, stdout, and stderr. Reading from `sys.stdin` will yield *unicode* strings, much like the default in Python 3. This differs from the usual behaviour in Python 2, so beware. Writing unicode strings to `sys.stdout` or `sys.stderr` will work; they'll be encoded with the `encoding` chosen when creating this fixture. `addInput(...)` should be used to prepare more input to be read. The `output` and `error` properties can be used to obtain what's been written to stdout and stderr. The buffers used internally have the same lifetime as the fixture *instance* itself, so the `output`, and `error` properties remain useful even after the fixture has been cleaned-up, and there's no need to capture them before exiting. However, `clearInput()`, `clearOutput()`, `clearError()`, and `clearAll()` can be used to truncate the buffers during this fixture's lifetime. """ stdin = None stdout = None stderr = None def __init__(self, encoding="utf-8"): super(CaptureStandardIO, self).__init__() self.codec = codecs.lookup(encoding) # Create new buffers. self._buf_in = BytesIO() self._buf_out = BytesIO() self._buf_err = BytesIO() def setUp(self): super(CaptureStandardIO, self).setUp() self.patcher = MonkeyPatcher() self.addCleanup(self.patcher.restore) # Convenience. reader = self.codec.streamreader writer = self.codec.streamwriter # Patch sys.std* and self.std*. self._addStream("stdin", reader(self._buf_in)) self._addStream("stdout", writer(self._buf_out)) self._addStream("stderr", writer(self._buf_err)) self.patcher.patch() def _addStream(self, name, stream): self.patcher.add_patch(self, name, stream) self.patcher.add_patch(sys, name, stream) def addInput(self, data): """Add input to be read later, as a unicode string.""" position = self._buf_in.tell() stream = self.codec.streamwriter(self._buf_in) try: self._buf_in.seek(0, 2) stream.write(data) finally: self._buf_in.seek(position) def getInput(self): """The input remaining to be read, as a unicode string.""" position = self._buf_in.tell() if self.stdin is None: stream = self.codec.streamreader(self._buf_in) else: stream = self.stdin try: return stream.read() finally: self._buf_in.seek(position) def getOutput(self): """The output written thus far, as a unicode string.""" if self.stdout is not None: self.stdout.flush() output_bytes = self._buf_out.getvalue() output_string, _ = self.codec.decode(output_bytes) return output_string def getError(self): """The error written thus far, as a unicode string.""" if self.stderr is not None: self.stderr.flush() error_bytes = self._buf_err.getvalue() error_string, _ = self.codec.decode(error_bytes) return error_string def clearInput(self): """Truncate the input buffer.""" self._buf_in.seek(0, 0) self._buf_in.truncate() if self.stdin is not None: self.stdin.seek(0, 0) def clearOutput(self): """Truncate the output buffer.""" self._buf_out.seek(0, 0) self._buf_out.truncate() if self.stdout is not None: self.stdout.seek(0, 0) def clearError(self): """Truncate the error buffer.""" self._buf_err.seek(0, 0) self._buf_err.truncate() if self.stderr is not None: self.stderr.seek(0, 0) def clearAll(self): """Truncate all buffers.""" self.clearInput() self.clearOutput() self.clearError() class DetectLeakedFileDescriptors(fixtures.Fixture): """Detect FDs that have leaked during the lifetime of this fixture. Raises `AssertionError` with details if anything has leaked. It does this by referring to the listing of ``/proc/self/fd``, a "magic" directory that the kernel populates with details of open file-descriptors. It captures this list during fixture set-up and compares against it at fixture tear-down. """ def setUp(self): super(DetectLeakedFileDescriptors, self).setUp() self.fdpath = "/proc/%d/fd" % os.getpid() self.addCleanup(self.check, os.listdir(self.fdpath)) def check(self, fds_ref): fds_now = os.listdir(self.fdpath) fds_new = {} for fd in set(fds_now) - set(fds_ref): try: fds_new[fd] = os.readlink(os.path.join(self.fdpath, fd)) except OSError as error: if error.errno == ENOENT: # The FD has been closed since listing the directory, # presumably by another thread in this process. Twisted's # reactor is likely. In any case, this is not a leak, # though it may indicate a somewhat racy test. pass else: raise if len(fds_new) != 0: message = ["File descriptor(s) leaked:"] message.extend( "* %s --> %s" % (fd, desc) for (fd, desc) in fds_new.viewitems()) raise AssertionError("\n".join(message)) maas-1.9.5+bzr4599.orig/src/maastesting/httpd.py0000644000000000000000000001213713056115004017462 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """HTTP server fixture.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "HTTPServerFixture", ] from BaseHTTPServer import HTTPServer import gzip from io import BytesIO import os from SimpleHTTPServer import SimpleHTTPRequestHandler from SocketServer import ThreadingMixIn import threading from fixtures import Fixture class ThreadingHTTPServer(ThreadingMixIn, HTTPServer): """A simple HTTP server that will run in its own thread.""" class SilentHTTPRequestHandler(SimpleHTTPRequestHandler): # SimpleHTTPRequestHandler logs to stdout: silence it. log_request = lambda *args, **kwargs: None log_error = lambda *args, **kwargs: None def _gzip_compress(self, f): gz_out = BytesIO() gz = gzip.GzipFile(mode='wb', fileobj=gz_out) gz.write(f.read()) gz.flush() gz_out.getvalue() return gz_out def is_gzip_accepted(self): accepted = set() for header in self.headers.getallmatchingheaders('Accept-Encoding'): # getallmatchingheaders returns the whole line, so first we have to # split off the header definition _, content = header.split(':', 1) content = content.strip() # Then, you are allowed to specify a comma separated list of # acceptable encodings. You are also allowed to specify # 'encoding;q=XXX' to specify what encodings you would prefer. # We'll allow it to be set, but just ignore it. # http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html encodings = [encoding.strip().split(';', )[0] for encoding in content.split(',')] accepted.update(encodings) if 'gzip' in accepted: return True return False # This is a copy & paste and minor modification of # SimpleHTTPRequestHandler's send_head code. Because to support # Content-Encoding gzip, we have to change what headers get returned (as it # affects Content-Length headers. def send_head(self): """Common code for GET and HEAD commands. This sends the response code and MIME headers. Return value is either a file object (which has to be copied to the outputfile by the caller unless the command was HEAD, and must be closed by the caller under all circumstances), or None, in which case the caller has nothing further to do. """ path = self.translate_path(self.path) f = None if os.path.isdir(path): if not self.path.endswith('/'): # redirect browser - doing basically what apache does self.send_response(301) self.send_header("Location", self.path + "/") self.end_headers() return None for index in "index.html", "index.htm": index = os.path.join(path, index) if os.path.exists(index): path = index break else: return self.list_directory(path) ctype = self.guess_type(path) try: # Always read in binary mode. Opening files in text mode may cause # newline translations, making the actual size of the content # transmitted *less* than the content-length! f = open(path, 'rb') except IOError: self.send_error(404, "File not found") return None if self.is_gzip_accepted(): return self.start_gz_response(ctype, f) else: return self.start_response(ctype, f) def start_gz_response(self, ctype, f): self.send_response(200) self.send_header("Content-type", ctype) self.send_header("Content-Encoding", 'gzip') gz_out = self._gzip_compress(f) self.send_header("Content-Length", unicode(gz_out.tell())) gz_out.seek(0) self.end_headers() return gz_out def start_response(self, ctype, f): self.send_response(200) self.send_header("Content-type", ctype) fs = os.fstat(f.fileno()) self.send_header("Content-Length", unicode(fs[6])) self.send_header("Last-Modified", self.date_time_string(fs.st_mtime)) self.end_headers() return f class HTTPServerFixture(Fixture): """Bring up a very simple, threaded, web server. Files are served from the current working directory and below. """ def __init__(self, host="localhost", port=0): super(HTTPServerFixture, self).__init__() self.server = ThreadingHTTPServer( (host, port), SilentHTTPRequestHandler) @property def url(self): return "http://%s:%d/" % self.server.server_address def setUp(self): super(HTTPServerFixture, self).setUp() threading.Thread(target=self.server.serve_forever).start() self.addCleanup(self.server.shutdown) maas-1.9.5+bzr4599.orig/src/maastesting/karma.conf.js0000644000000000000000000000437713056115004020351 0ustar 00000000000000// Karma configuration // Generated on Fri Jan 09 2015 20:41:58 GMT-0500 (EST) module.exports = function(config) { config.set({ // base path that will be used to resolve all patterns (eg. files, exclude) basePath: '', // frameworks to use // available frameworks: https://npmjs.org/browse/keyword/karma-adapter frameworks: ['jasmine'], // list of files / patterns to load in the browser files: [ '/usr/share/javascript/jquery/jquery.js', '/usr/share/javascript/angular.js/angular.js', '/usr/share/javascript/angular.js/angular-route.js', '/usr/share/javascript/angular.js/angular-mocks.js', '/usr/share/javascript/angular.js/angular-cookies.js', '../../src/maasserver/static/js/angular/maas.js', '../../src/maasserver/static/js/angular/testing/*.js', '../../src/maasserver/static/js/angular/*/*.js', '../../src/maasserver/static/js/angular/*/tests/test_*.js' ], // list of files to exclude exclude: [ ], // preprocess matching files before serving them to the browser // available preprocessors: https://npmjs.org/browse/keyword/karma-preprocessor preprocessors: { }, // test results reporter to use // possible values: 'dots', 'progress' // available reporters: https://npmjs.org/browse/keyword/karma-reporter reporters: ['failed'], // web server port port: 9876, // enable / disable colors in the output (reporters and logs) colors: true, // level of logging // possible values: config.LOG_DISABLE || config.LOG_ERROR || config.LOG_WARN || config.LOG_INFO || config.LOG_DEBUG logLevel: config.LOG_INFO, // enable / disable watching file and executing tests whenever any file changes autoWatch: false, // start these browsers // available browser launchers: https://npmjs.org/browse/keyword/karma-launcher browsers: ['PhantomJS'], // Continuous Integration mode // if true, Karma captures browsers, runs the tests and exits singleRun: true, // List of plugins to enable plugins: [ 'karma-jasmine', 'karma-chrome-launcher', 'karma-firefox-launcher', 'karma-opera-launcher', 'karma-phantomjs-launcher', 'karma-failed-reporter' ] }); }; maas-1.9.5+bzr4599.orig/src/maastesting/karma.py0000644000000000000000000000414613056115004017433 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Helper to start the Karma environment to run MAAS JS unit-tests.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ ] import os from subprocess import ( CalledProcessError, check_output, Popen, ) from maastesting.fixtures import DisplayFixture def is_browser_available(browser): """Return True if browser is available.""" try: check_output(('which', browser)) except CalledProcessError: return False else: return True def gen_available_browsers(): """Find available browsers for the current system. Yields ``(name, environ)`` tuples, where ``name`` is passed to the runner, and ``environ`` is a dict of additional environment variables needed to run the given browser. """ # PhantomJS is always enabled. yield "PhantomJS", {} # XXX: allenap bug=1427492 2015-03-03: Firefox has been very unreliable # both with Karma and with Selenium. Disabling it. # if is_browser_available("firefox"): # yield "Firefox", {} # Prefer Chrome, but fall-back to Chromium. if is_browser_available("google-chrome"): yield "Chrome", {"CHROME_BIN": "google-chrome"} elif is_browser_available("chromium-browser"): yield "Chrome", {"CHROME_BIN": "chromium-browser"} if is_browser_available("opera"): yield "Opera", {} def run_karma(): """Start Karma with the MAAS JS testing configuration.""" browsers = set() # Names passed to bin/karma. extra = {} # Additional environment variables. for name, env in gen_available_browsers(): browsers.add(name) extra.update(env) command = ( 'bin/karma', 'start', '--single-run', '--no-colors', '--browsers', ','.join(browsers), 'src/maastesting/karma.conf.js') with DisplayFixture(): karma = Popen(command, env=dict(os.environ, **extra)) raise SystemExit(karma.wait()) maas-1.9.5+bzr4599.orig/src/maastesting/loader.py0000644000000000000000000000231713056115004017604 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test loader for MAAS and its applications.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "MAASTestLoader", ] import unittest class MAASTestLoader(unittest.TestLoader): """Scan modules for tests by default. This discovers tests using `unittest.TestLoader.discover` when `loadTestsFromName` is called. This is not standard behaviour, but it's here to help hook into setuptools' testing support. Refer to as ``maastesting.loader:MAASTestLoader`` in ``setup.py``. """ def loadTestsFromName(self, name, module=None): assert module is None, ( "Module %r is confusing. This method expects the name passed " "in to actually be a filesystem path from which to start test " "discovery. It doesn't know what to do when a module object is " "passed in too. Sorry, either this is not the class you're " "looking for, or you're doing it wrong." % (module,)) return self.discover(name) maas-1.9.5+bzr4599.orig/src/maastesting/management/0000755000000000000000000000000013056115004020075 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maastesting/matchers.py0000644000000000000000000002055713056115004020152 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """testtools custom matchers""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'DocTestMatches', 'GreaterThanOrEqual', 'HasAttribute', 'IsCallable', 'IsCallableMock', 'IsFiredDeferred', 'IsUnfiredDeferred', 'LessThanOrEqual', 'MockAnyCall', 'MockCalledOnce', 'MockCalledOnceWith', 'MockCalledWith', 'MockCallsMatch', 'MockNotCalled', 'Provides', ] import doctest from functools import partial from testtools import matchers from testtools.matchers import ( AfterPreprocessing, Annotate, Equals, GreaterThan, HasLength, IsInstance, LessThan, Matcher, MatchesAll, MatchesAny, MatchesPredicate, MatchesStructure, Mismatch, ) from twisted.internet import defer class IsCallable(Matcher): """Matches if the matchee is callable.""" def match(self, something): if not callable(something): return Mismatch("%r is not callable" % (something,)) def __str__(self): return self.__class__.__name__ class Provides(MatchesPredicate): """Match if the given interface is provided.""" def __init__(self, iface): super(Provides, self).__init__( iface.providedBy, "%%r does not provide %s" % iface.getName()) class HasAttribute(Matcher): """Match if the given attribute is available.""" def __init__(self, attribute): super(HasAttribute, self).__init__() self.attribute = attribute def match(self, something): try: getattr(something, self.attribute) except AttributeError: return Mismatch( "%r does not have a %r attribute" % ( something, self.attribute)) def __str__(self): return "%s(%r)" % (self.__class__.__name__, self.attribute) class IsCallableMock(Matcher): """Match if the subject looks like a mock that's callable. `mock.create_autospec` can return objects like functions and modules that are also callable mocks, but we can't use a simple ``isinstance`` test to ascertain that. Here we assume the presence of ``return_value`` and ``side_effect`` attributes means that we've found a callable mock. These attributes are defined in `mock.CallableMixin`. """ def match(self, something): return MatchesAll( HasAttribute("return_value"), HasAttribute("side_effect"), IsCallable(), ).match(something) def __str__(self): return self.__class__.__name__ def get_mock_calls(mock): """Return a list of all calls made to the given `mock`. :type mock: :class:`Mock` """ return mock.call_args_list class MockCalledWith(Matcher): """Matches if the matchee Mock was called with the provided args. Use of Mock.assert_called_with is discouraged as it passes if you typo the function name. """ def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs def __str__(self): return "%s(args=%r, kwargs=%r)" % ( self.__class__.__name__, self.args, self.kwargs) def match(self, mock): try: mock.assert_called_with(*self.args, **self.kwargs) except AssertionError as e: return Mismatch(*e.args) return None class MockCalledOnceWith(MockCalledWith): """Matches if the matchee `Mock` was called once, with the provided args. To pass the match, the mock must have been called exactly once, and with the given arguments. Use `mock.ANY` for any parameters whose values don't matter for the match. Use this instead of `Mock.assert_called_once_with`, which just always passes blindly if you mis-spell the name. """ def match(self, mock): try: mock.assert_called_once_with(*self.args, **self.kwargs) except AssertionError as e: return Mismatch(*e.args) return None class MockCalledOnce(Matcher): """Matches if the matchee `Mock` was called once, with any arguments. The mock library does not have an equivalent. """ def __str__(self): return self.__class__.__name__ def match(self, mock): mismatch = IsCallableMock().match(mock) if mismatch is not None: return mismatch elif mock.call_count == 1: return None else: return Mismatch( "Expected to be called once. Called %d times." % mock.call_count) class MockAnyCall(MockCalledWith): """Matches if the matchee Mock was called at any time with the provided args. Use of Mock.assert_any_call is discouraged as it passes if you typo the function name. """ def match(self, mock): try: mock.assert_any_call(*self.args, **self.kwargs) except AssertionError as e: return Mismatch(*e.args) return None class MockCallsMatch(Matcher): """Matches if the matchee Mock was called with exactly the given sequence of calls. :param calls: A sequence of :class:`mock.call`s that the matchee is expected to have been called with. The mock library does not have an equivalent. """ def __init__(self, *calls): super(Matcher, self).__init__() self.calls = list(calls) def __str__(self): return "%s(%r)" % ( self.__class__.__name__, self.calls) def match(self, mock): matcher = MatchesAll( IsCallableMock(), Annotate( "calls do not match", AfterPreprocessing( get_mock_calls, Equals(self.calls)), ), first_only=True, ) return matcher.match(mock) class MockNotCalled(Matcher): """Matches if the matchee Mock was not called. The mock library does not have an equivalent. """ def __str__(self): return self.__class__.__name__ def match(self, mock): matcher = MatchesAll( IsCallableMock(), Annotate( "mock has been called", AfterPreprocessing( get_mock_calls, HasLength(0)), ), first_only=True, ) return matcher.match(mock) class IsFiredDeferred(Matcher): """Matches if the subject is a fired `Deferred`.""" def __str__(self): return self.__class__.__name__ def match(self, thing): if not isinstance(thing, defer.Deferred): return Mismatch("%r is not a Deferred" % (thing,)) if not thing.called: return Mismatch("%r has not been called" % (thing,)) return None class IsUnfiredDeferred(Matcher): """Matches if the subject is an unfired `Deferred`.""" def __str__(self): return self.__class__.__name__ def match(self, thing): if not isinstance(thing, defer.Deferred): return Mismatch("%r is not a Deferred" % (thing,)) if thing.called: return Mismatch( "%r has been called (result=%r)" % (thing, thing.result)) return None class MatchesPartialCall(Matcher): def __init__(self, func, *args, **keywords): super(MatchesPartialCall, self).__init__() self.expected = partial(func, *args, **keywords) def match(self, observed): matcher = MatchesAll( IsInstance(partial), MatchesStructure.fromExample( self.expected, "func", "args", "keywords"), first_only=True, ) return matcher.match(observed) def GreaterThanOrEqual(value): return MatchesAny(GreaterThan(value), Equals(value)) def LessThanOrEqual(value): return MatchesAny(LessThan(value), Equals(value)) class DocTestMatches(matchers.DocTestMatches): """See if a string matches a doctest example. This differs from testtools' matcher in that it, by default, normalises white-space and allows the use of ellipsis. See `doctest` for details. """ DEFAULT_FLAGS = doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE def __init__(self, example, flags=DEFAULT_FLAGS): super(DocTestMatches, self).__init__(example, flags) maas-1.9.5+bzr4599.orig/src/maastesting/models.py0000644000000000000000000000000013056115004017604 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maastesting/noseplug.py0000644000000000000000000000765313056115004020202 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Nose plugins for MAAS.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "Crochet", "main", "Select", ] import inspect import logging from nose.core import TestProgram from nose.plugins.base import Plugin from twisted.python.filepath import FilePath class Crochet(Plugin): """Start the Twisted reactor via Crochet.""" name = "crochet" log = logging.getLogger('nose.plugins.%s' % name) def configure(self, options, conf): """Configure, based on the parsed options. :attention: This is part of the Nose plugin contract. """ super(Crochet, self).configure(options, conf) if self.enabled: import crochet crochet.setup() def help(self): """Used in the --help text. :attention: This is part of the Nose plugin contract. """ return inspect.getdoc(self) class Select(Plugin): """Another way to limit which tests are chosen.""" name = "select" option_dirs = "%s_dirs" % name log = logging.getLogger('nose.plugins.%s' % name) def __init__(self): super(Select, self).__init__() self.dirs = frozenset() def options(self, parser, env): """Add options to Nose's parser. :attention: This is part of the Nose plugin contract. """ super(Select, self).options(parser, env) parser.add_option( "--%s-dir" % self.name, "--%s-directory" % self.name, dest=self.option_dirs, action="append", default=[], help=( "Allow test discovery in this directory. Explicitly named " "tests outside of this directory may still be loaded. This " "option can be given multiple times to allow discovery in " "multiple directories." ), metavar="DIR", ) def configure(self, options, conf): """Configure, based on the parsed options. :attention: This is part of the Nose plugin contract. """ super(Select, self).configure(options, conf) if self.enabled: # Process --${name}-dir. for path in getattr(options, self.option_dirs): self.addDirectory(path) if self.log.isEnabledFor(logging.DEBUG): self.log.debug( "Limiting to the following directories " "(exact matches only):") for path in sorted(self.dirs): self.log.debug("- %s", path) def addDirectory(self, path): """Include `path` in test discovery. This scans all child directories of `path` and also all `parents`; `wantDirectory()` can then do an exact match. """ start = FilePath(path) self.dirs = self.dirs.union( (fp.path for fp in start.parents()), (fp.path for fp in start.walk() if fp.isdir()), ) def wantDirectory(self, path): """Rejects directories outside of the chosen few. :attention: This is part of the Nose plugin contract. """ if path in self.dirs: self.log.debug("Selecting %s", path) return True else: self.log.debug("Rejecting %s", path) return False def help(self): """Used in the --help text. :attention: This is part of the Nose plugin contract. """ return inspect.getdoc(self) def main(): """Invoke Nose's `TestProgram` with extra plugins. Specifically the `Crochet` and `Select` plugins. At the command-line it's still necessary to enable these with the flags ``--with-crochet`` and/or ``--with-select``. """ return TestProgram(addplugins=[Crochet(), Select()]) maas-1.9.5+bzr4599.orig/src/maastesting/protractor/0000755000000000000000000000000013056115004020160 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maastesting/runtest.py0000644000000000000000000000612213056115004020040 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test executors for MAAS.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'MAASRunTest', 'MAASTwistedRunTest', ] import sys import types from testtools import ( deferredruntest, runtest, ) from twisted.internet import defer class InvalidTest(Exception): """Signifies that the test is invalid; it's not a good test.""" def check_for_generator(result): if isinstance(result, types.GeneratorType): raise InvalidTest( "Test returned a generator. Should it be " "decorated with inlineCallbacks?") else: return result class MAASRunTest(runtest.RunTest): """A specialisation of testtools' `RunTest`. It catches a common problem when writing tests for Twisted: forgetting to decorate a test with `inlineCallbacks` that needs it. Tests in `maas`, `maasserver`, and `metadataserver` run with a Twisted reactor managed by `crochet`. It can be easy to decorate a test that contains a ``yield`` with ``@wait_for_reactor`` or ``@asynchronous``, forget the crucial ``@inlineCallbacks``, but see that it passes... because it's not actually running. This is another reason why you should see your test fail before you make it pass, but why not have the computer check too? """ def _run_user(self, function, *args, **kwargs): """Override testtools' `_run_user`. `_run_user` is used in testtools for running functions in the test case that should not normally return a generator, so we check that here, as it's a good sign that a test case (or `setUp`, or `tearDown`) is yielding without `inlineCallbacks` to support it. """ try: result = function(*args, **kwargs) return check_for_generator(result) except KeyboardInterrupt: raise except: return self._got_user_exception(sys.exc_info()) class MAASTwistedRunTest(deferredruntest.AsynchronousDeferredRunTest): """A specialisation of testtools' `AsynchronousDeferredRunTest`. It catches a common problem when writing tests for Twisted: forgetting to decorate a test with `inlineCallbacks` that needs it. Tests in `maas`, `maasserver`, and `metadataserver` run with a Twisted reactor managed by `crochet`, so don't use this; it will result in a deadlock. """ def _run_user(self, function, *args): """Override testtools' `_run_user`. `_run_user` is used in testtools for running functions in the test case that may or may not return a `Deferred`. Here we also check for generators, a good sign that a test case (or `setUp`, or `tearDown`) is yielding without `inlineCallbacks` to support it. """ d = defer.maybeDeferred(function, *args) d.addCallback(check_for_generator) d.addErrback(self._got_user_failure) return d maas-1.9.5+bzr4599.orig/src/maastesting/scenarios.py0000644000000000000000000000205113056115004020317 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Adapting `testscenarios` to work with MAAS.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "WithScenarios", ] import testscenarios class WithScenarios(testscenarios.WithScenarios): """Variant of testscenarios_' that provides ``__call__``. Some sadistic `TestCase` implementations (cough, Django, cough) treat ``__call__`` as something other than a synonym for ``run``. This means that testscenarios_' ``WithScenarios``, which customises ``run`` only, does not work correctly. .. testscenarios_: https://launchpad.net/testscenarios """ def __call__(self, result=None): if self._get_scenarios(): for test in testscenarios.generate_scenarios(self): test.__call__(result) else: super(WithScenarios, self).__call__(result) maas-1.9.5+bzr4599.orig/src/maastesting/testcase.py0000644000000000000000000002301013056115004020142 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test related classes and functions for MAAS and its applications.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'MAASRunTest', 'MAASTestCase', 'MAASTwistedRunTest', ] import abc from collections import Sequence from contextlib import contextmanager import doctest from importlib import import_module import os import unittest from maastesting.crochet import EventualResultCatchingMixin from maastesting.factory import factory from maastesting.fixtures import TempDirectory from maastesting.matchers import DocTestMatches from maastesting.runtest import ( MAASRunTest, MAASTwistedRunTest, ) from maastesting.scenarios import WithScenarios import mock from nose.proxy import ResultProxy from nose.tools import nottest import testresources import testtools import testtools.matchers @nottest @contextmanager def active_test(result, test): """Force nose to report for the test that's running. Nose presents a proxy result and passes on results using only the top-level test, rather than the actual running test. This attempts to undo this dubious choice. If the result is not a nose proxy then this is a no-op. """ if isinstance(result, ResultProxy): orig = result.test.test result.test.test = test try: yield finally: result.test.test = orig else: yield class MAASTestType(abc.ABCMeta): """Base type for MAAS's test cases. Its only task at present is to ensure that `scenarios` is defined as a sequence. If not, for example it might be defined using a generator, it is coerced into a sequence. No attempt is made to suppress exceptions arising from this coercion, so failures are early and loud. Coercing generators is valuable because the use of these for scenarios can result in strange behaviour that doesn't obviously point to the cause. An alternative might be to reject non-sequences, but it seems we can safely handle them here just as well. Now that the issue is known, and a mechanism is in place to deal with it, we can easily change the policy later on. """ def __new__(cls, name, bases, attrs): scenarios = attrs.get("scenarios") if scenarios is not None: if not isinstance(scenarios, Sequence): scenarios = attrs["scenarios"] = tuple(scenarios) if len(scenarios) == 0: scenarios = attrs["scenarios"] = None return super(MAASTestType, cls).__new__( cls, name, bases, attrs) class MAASTestCase( WithScenarios, EventualResultCatchingMixin, testtools.TestCase): """Base `TestCase` for MAAS. Supports `test resources`_, `test scenarios`_, and `fixtures`_. .. _test resources: https://launchpad.net/testresources .. _test scenarios: https://launchpad.net/testscenarios .. _fixtures: https://launchpad.net/python-fixtures """ __metaclass__ = MAASTestType # Use a customised executor. run_tests_with = MAASRunTest # Allow testtools to generate longer diffs when tests fail. maxDiff = testtools.TestCase.maxDiff * 3 # testresources.ResourcedTestCase does something similar to this class # (with respect to setUpResources and tearDownResources) but it explicitly # up-calls to unittest.TestCase instead of using super() even though it is # not guaranteed that the next class in the inheritance chain is # unittest.TestCase. resources = ( # (resource-name, resource), ) scenarios = ( # (scenario-name, {instance-attribute-name: value, ...}), ) # The database may NOT be used in tests. See `checkDatabaseUse`. Use a # subclass like `MAASServerTestCase` or `MAASTransactionalServerTestCase` # instead, which will manage the database and transactions correctly. database_use_possible = "DJANGO_SETTINGS_MODULE" in os.environ database_use_permitted = False def setUp(self): self.maybeCloseDatabaseConnections() super(MAASTestCase, self).setUp() self.setUpResources() def setUpResources(self): testresources.setUpResources( self, self.resources, testresources._get_result()) def tearDown(self): self.tearDownResources() super(MAASTestCase, self).tearDown() self.checkDatabaseUse() def maybeCloseDatabaseConnections(self): """Close database connections if their use is not permitted.""" if self.database_use_possible and not self.database_use_permitted: from django.db import connection connection.close() def checkDatabaseUse(self): """Enforce `database_use_permitted`.""" if self.database_use_possible and not self.database_use_permitted: from django.db import connection self.expectThat( connection.connection, testtools.matchers.Is(None), "Test policy forbids use of the database.") connection.close() def tearDownResources(self): testresources.tearDownResources( self, self.resources, testresources._get_result()) def make_dir(self): """Create a temporary directory. This is a convenience wrapper around a fixture incantation. That's the only reason why it's on the test case and not in a factory. """ return self.useFixture(TempDirectory()).path def make_file(self, name=None, contents=None): """Create, and write to, a file. This is a convenience wrapper around `make_dir` and a factory call. It ensures that the file is in a directory that will be cleaned up at the end of the test. """ return factory.make_file(self.make_dir(), name, contents) # Django's implementation for this seems to be broken and was # probably only added to support compatibility with python 2.6. assertItemsEqual = unittest.TestCase.assertItemsEqual doctest_flags = doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE def assertAttributes(self, tested_object, attributes): """Check multiple attributes of `tested_object` against a dict. :param tested_object: Any object whose attributes should be checked. :param attributes: A dict of attributes to test, and their expected values. Only these attributes will be checked. """ matcher = testtools.matchers.MatchesStructure.byEquality(**attributes) self.assertThat(tested_object, matcher) def assertDocTestMatches( self, expected, observed, flags=DocTestMatches.DEFAULT_FLAGS): """See if `observed` matches `expected`, a doctest sample. By default uses the doctest flags `NORMALIZE_WHITESPACE` and `ELLIPSIS`. """ self.assertThat(observed, DocTestMatches(expected, flags)) def assertIdentical(self, expected, observed, msg=None): """Check if `expected` is `observed`. This is an obect-identity-equality test, not an object equality (i.e. __eq__) test. """ if expected is not observed: raise self.failureException( msg or '%r is not %r' % (expected, observed)) def assertNotIdentical(self, expected, observed, msg=None): """Check if `expected` is not `observed`. This is an obect-identity-equality test, not an object equality (i.e. __eq__) test. """ if expected is observed: raise self.failureException( msg or '%r is %r' % (expected, observed)) def run(self, result=None): with active_test(result, self): super(MAASTestCase, self).run(result) def __call__(self, result=None): with active_test(result, self): super(MAASTestCase, self).__call__(result) def patch(self, obj, attribute=None, value=mock.sentinel.unset): """Patch `obj.attribute` with `value`. If `value` is unspecified, a new `MagicMock` will be created and patched-in instead. Its ``__name__`` attribute will be set to `attribute` or the ``__name__`` of the replaced object if `attribute` is not given. This is a thin customisation of `testtools.TestCase.patch`, so refer to that in case of doubt. :return: The patched-in object. """ # If 'attribute' is None, assume 'obj' is a 'fully-qualified' object, # and assume that its __module__ is what we want to patch. For more # complex use cases, the two-parameter 'patch' will still need to # be used. if attribute is None: attribute = obj.__name__ obj = import_module(obj.__module__) if isinstance(attribute, unicode): attribute = attribute.encode("ascii") if value is mock.sentinel.unset: value = mock.MagicMock(__name__=attribute) super(MAASTestCase, self).patch(obj, attribute, value) return value def patch_autospec(self, obj, attribute, spec_set=False, instance=False): """Patch `obj.attribute` with an auto-spec of itself. See `mock.create_autospec` and `patch`. :return: The patched-in object. """ spec = getattr(obj, attribute) value = mock.create_autospec(spec, spec_set, instance) super(MAASTestCase, self).patch(obj, attribute, value) return value maas-1.9.5+bzr4599.orig/src/maastesting/tests/0000755000000000000000000000000013056115004017123 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maastesting/twisted.py0000644000000000000000000000507013056115004020020 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Testing helpers for Twisted code.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "always_fail_with", "always_succeed_with", "TwistedLoggerFixture", ] from copy import copy import operator from fixtures import Fixture from twisted.internet import defer from twisted.python import log class TwistedLoggerFixture(Fixture): """Capture all Twisted logging. Temporarily replaces all log observers. """ def __init__(self): super(TwistedLoggerFixture, self).__init__() self.logs = [] # Check if new logger from Twisted 15+ is being used. self.isNewLogger = hasattr(log.theLogPublisher, "_publishPublisher") def dump(self): """Return all logs as a string.""" return "\n---\n".join( log.textFromEventDict(event) for event in self.logs) # For compatibility with fixtures.FakeLogger. output = property(dump) def containsError(self): return any(log["isError"] for log in self.logs) def newSetUp(self): """Twisted 15+ uses a new logger.""" self._publishPublisher = log.theLogPublisher._publishPublisher log.theLogPublisher._publishPublisher = self.logs.append def newCleanUp(self): """Twisted 15+ uses a new logger.""" log.theLogPublisher._publishPublisher = self._publishPublisher def setUp(self): super(TwistedLoggerFixture, self).setUp() if self.isNewLogger: self.addCleanup(self.newCleanUp) self.newSetUp() else: self.addCleanup( operator.setitem, log.theLogPublisher.observers, slice(None), log.theLogPublisher.observers[:]) log.theLogPublisher.observers[:] = [self.logs.append] def always_succeed_with(result): """Return a callable that always returns a successful Deferred. The callable allows (and ignores) all arguments, and returns a shallow `copy` of `result`. """ def always_succeed(*args, **kwargs): return defer.succeed(copy(result)) return always_succeed def always_fail_with(result): """Return a callable that always returns a failed Deferred. The callable allows (and ignores) all arguments, and returns a shallow `copy` of `result`. """ def always_fail(*args, **kwargs): return defer.fail(copy(result)) return always_fail maas-1.9.5+bzr4599.orig/src/maastesting/utils.py0000644000000000000000000001057413056115004017502 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Testing utilities.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "age_file", "content_from_file", "extract_word_list", "get_write_time", "FakeRandInt", "preexec_fn", "run_isolated", "sample_binary_data", ] import codecs import os import re import signal from sys import ( stderr, stdout, ) from traceback import print_exc import subunit from testtools.content import Content from testtools.content_type import UTF8_TEXT def age_file(path, seconds): """Backdate a file's modification time so that it looks older.""" stat_result = os.stat(path) atime = stat_result.st_atime mtime = stat_result.st_mtime os.utime(path, (atime, mtime - seconds)) def get_write_time(path): """Return last modification time of file at `path`.""" return os.stat(path).st_mtime def content_from_file(path): """Alternative to testtools' version. This keeps an open file-handle, so it can obtain the log even when the file has been unlinked. """ fd = open(path, "rb") def iterate(): fd.seek(0) return iter(fd) return Content(UTF8_TEXT, iterate) def extract_word_list(string): """Return a list of words from a string. Words are any string of 1 or more characters, not including commas, semi-colons, or whitespace. """ return re.findall("[^,;\s]+", string) def preexec_fn(): # Revert Python's handling of SIGPIPE. See # http://bugs.python.org/issue1652 for more info. signal.signal(signal.SIGPIPE, signal.SIG_DFL) def run_isolated(cls, self, result): """Run a test suite or case in a subprocess. This is derived from ``subunit.run_isolated``. Subunit's version clobbers stdout by dup'ing the subunit's stream over the top, which prevents effective debugging at the terminal. This variant does not suffer from the same issue. """ c2pread, c2pwrite = os.pipe() pid = os.fork() if pid == 0: # Child: runs test and writes subunit to c2pwrite. try: os.close(c2pread) stream = os.fdopen(c2pwrite, 'wb') sender = subunit.TestProtocolClient(stream) cls.run(self, sender) stream.flush() stdout.flush() stderr.flush() except: # Print error and exit hard. try: print_exc(file=stderr) stderr.flush() finally: os._exit(2) finally: # Exit hard. os._exit(0) else: # Parent: receives subunit from c2pread. os.close(c2pwrite) stream = os.fdopen(c2pread, 'rb') receiver = subunit.TestProtocolServer(result) receiver.readFrom(stream) os.waitpid(pid, 0) # Some horrible binary data that could never, ever, under any encoding # known to man(1) survive mis-interpretation as text. # # The data contains a nul byte to trip up accidental string termination. # Switch the bytes of the byte-order mark around and by design you get # an invalid codepoint; put a byte with the high bit set between bytes # that have it cleared, and you have a guaranteed non-UTF-8 sequence. # # (1) Provided, of course, that man know only about ASCII and # UTF. sample_binary_data = codecs.BOM64_LE + codecs.BOM64_BE + b'\x00\xff\x00' class FakeRandInt: """Fake `randint` with forced limitations on its range. This lets you set a forced minimum, and/or a forced maximum, on the range of any call. For example, if you pass `forced_maximum=3`, then a call will never return more than 3. If you don't set a maximum, or if the call's maximum argument is less than the forced maximum, then the call's maximum will be respected. """ def __init__(self, real_randint, forced_minimum=None, forced_maximum=None): self.real_randint = real_randint self.minimum = forced_minimum self.maximum = forced_maximum def __call__(self, minimum, maximum): if self.minimum is not None: minimum = max(minimum, self.minimum) if self.maximum is not None: maximum = min(maximum, self.maximum) return self.real_randint(minimum, maximum) maas-1.9.5+bzr4599.orig/src/maastesting/yui3.py0000644000000000000000000000322013056115004017221 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Helpers for dealing with YUI3.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "extract_tests", "gen_failed_test_messages", "get_failed_tests_message", ] def extract_tests(results): """Extract tests from a YUI3 test result object. See `TestSuite-Level Events`_ for details of the test result object form. .. _TestSuite-Level Events: http://yuilibrary.com/yui/docs/test/#testsuite-level-events """ accumulator = {} _extract_tests(results, accumulator) return accumulator def _extract_tests(results, accumulator, *stack): """Helper for `extract_tests`.""" if isinstance(results, dict): if results["type"] == "test": name = ".".join(reversed(stack)) accumulator[name] = results else: for name, value in results.items(): _extract_tests(value, accumulator, name, *stack) def gen_failed_test_messages(results): """Yield test failure messages from the given results. @param results: See `extract_tests`. """ for name, test in extract_tests(results).items(): if test["result"] != "pass": yield "%s: %s" % (name, test["message"]) def get_failed_tests_message(results): """Return a complete error message for the given results. @param results: See `extract_tests`. """ messages = gen_failed_test_messages(results) return "\n\n".join(sorted(messages)) maas-1.9.5+bzr4599.orig/src/maastesting/management/__init__.py0000644000000000000000000000000013056115004022174 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maastesting/management/commands/0000755000000000000000000000000013056115004021676 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maastesting/management/commands/__init__.py0000644000000000000000000000000013056115004023775 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maastesting/management/commands/tests/0000755000000000000000000000000013056115004023040 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maastesting/management/commands/tests/__init__.py0000644000000000000000000000000013056115004025137 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maastesting/protractor/__init__.py0000644000000000000000000000000013056115004022257 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maastesting/protractor/clusterd.yaml0000644000000000000000000000014713056115004022673 0ustar 00000000000000## TFTP configuration. # tftp: port: 5254 generator: http://localhost:5253/MAAS/api/1.0/pxeconfig/ maas-1.9.5+bzr4599.orig/src/maastesting/protractor/fixture.yaml0000644000000000000000000000436713056115004022544 0ustar 00000000000000- fields: { callback: null, callback_confirmed: false, consumer: 7, is_approved: true, key: 3trwZ3uDXYACfY36wD, secret: T2fkbcnbWsS3k2YtMVvLCCKfb7F7mQSE, timestamp: 1370247090, token_type: 2, user: 1, verifier: ''} model: piston.token pk: 7 - fields: { description: '', key: y7FgR8SgJS6X9BAUek, name: MAAS consumer, secret: '', status: accepted, user: 1} model: piston.consumer pk: 7 - fields: { created: 2012-01-24, cluster_name: master, uuid: master, status: 1, api_token: 7, updated: !!timestamp '2012-01-24 10:52:38.735954'} model: maasserver.nodegroup pk: 1 - fields: { created: 2012-01-24, hostname: sun, architecture: 'i386/generic', owner: null, status: 0, nodegroup: 1, tags: [], cpu_count: 16, memory: 16384, system_id: node-2666dd64-4671-11e1-93b8-00225f89f211, updated: !!timestamp '2012-01-24 10:52:38.735954'} model: maasserver.node pk: 15 - fields: { created: 2012-01-24, hostname: moon, architecture: 'i386/generic', owner: null, status: 2, nodegroup: 1, tags: [], cpu_count: 1, memory: 512, system_id: node-29d7ad70-4671-11e1-93b8-00225f89f211, updated: !!timestamp '2012-01-24 10:52:44.507777'} model: maasserver.node pk: 16 - fields: {created: 2012-01-24, mac_address: '08:34:2a:b5:8a:45', node: 15, updated: !!timestamp '2012-01-24 11:00:59.951873'} model: maasserver.macaddress pk: 1 - fields: {created: 2012-01-24, mac_address: '08:05:44:c7:bb:45', node: 16, updated: !!timestamp '2012-01-22 11:01:23.998278'} model: maasserver.macaddress pk: 2 - fields: date_joined: 2012-01-23 11:52:14 email: admin@example.com first_name: '' groups: [] is_active: true is_staff: true is_superuser: true last_login: 2012-01-24 09:45:50 last_name: '' password: md5$uZic1a8frlfp$5eb190821d99ca2a68ec2f5bd46b4ae7 user_permissions: [] username: admin model: auth.user pk: 101 - fields: date_joined: 2012-01-24 10:49:41 email: test@example.com first_name: '' groups: [] is_active: true is_staff: false is_superuser: false last_login: 2012-01-24 10:49:41 last_name: '' password: md5$uZic1a8frlfp$5eb190821d99ca2a68ec2f5bd46b4ae7 user_permissions: [] username: user model: auth.user pk: 102 maas-1.9.5+bzr4599.orig/src/maastesting/protractor/protractor.conf.js0000644000000000000000000000040713056115004023642 0ustar 00000000000000// Protractor configuration exports.config = { // use the jasmine framework for tests framework: 'jasmine', // connect to webdriver default address seleniumAddress: 'http://localhost:4444', // list of files to run specs: [ 'tests/*.js', ] } maas-1.9.5+bzr4599.orig/src/maastesting/protractor/runner.py0000644000000000000000000001663613056115004022057 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Helper to start the Protractor environment to run MAAS JS E2E tests.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ ] import os import signal from subprocess import Popen import sys import time import traceback from django.conf import settings from django.core.management import call_command from django.db import DEFAULT_DB_ALIAS from django.test.runner import setup_databases from fixtures import Fixture from maastesting.fixtures import ( ChromiumWebDriverFixture, DisplayFixture, ) from postgresfixture import ClusterFixture from provisioningserver.testing.config import ClusterConfigurationFixture from testtools.monkey import patch from twisted.scripts import twistd class ServiceError(Exception): """Raised when failure to start a service.""" def redirect_to_devnull(): """Redirect all input and output to /dev/null.""" os.setsid() null = os.open(os.devnull, os.O_RDWR) os.dup2(null, 0) os.dup2(null, 1) os.dup2(null, 2) os.close(null) class MAASRegionServiceFixture(Fixture): """Starts and stops the MAAS region service. This process is forked to spawn regiond where it will run a different port and connect to the testing database instead of the development database. This will isolate this test from the development environment, allowing them to run at the same time. """ FIXTURE = "src/maastesting/protractor/fixture.yaml" def __init__(self): self.verbosity = 1 self.interactive = False def setUp(self): """Start the regiond service.""" super(MAASRegionServiceFixture, self).setUp() # Force django DEBUG false. self.addCleanup(patch(settings, "DEBUG", False)) # Create a database in the PostgreSQL cluster for each database # connection configured in Django"s settings that points to the same # datadir. cluster = ClusterFixture("db", preserve=True) self.useFixture(cluster) for database in settings.DATABASES.values(): if database["HOST"] == cluster.datadir: cluster.createdb(database["NAME"]) # Setup the database for testing. This is so the database is isolated # only for this testing. self.setup_databases() self.addCleanup(self.teardown_databases) # Fork the process to have regiond run in its own process. twistd_pid = os.fork() if twistd_pid == 0: # Redirect all output to /dev/null redirect_to_devnull() # Add command line options to start twistd. sys.argv[1:] = [ "--nodaemon", "--pidfile", "", "maas-regiond", ] # Change the DEFAULT_PORT so it can run along side of the # development regiond. from maasserver import eventloop patch(eventloop, "DEFAULT_PORT", 5253) # Start twistd. try: twistd.run() except: traceback.print_exc() os._exit(2) finally: os._exit(0) else: # Add cleanup to stop the twistd service. self.addCleanup(self.stop_twistd, twistd_pid) # Check that the child process is still running after a few # seconds. This makes sure that everything started okay and it # is still running. time.sleep(2) try: os.kill(twistd_pid, 0) except OSError: # Not running. raise ServiceError( "Failed to start regiond. Check that another test is " "not running at the same time.") def stop_twistd(self, twistd_pid): """Stop the regiond service.""" try: os.kill(twistd_pid, signal.SIGINT) _, return_code = os.waitpid(twistd_pid, 0) if return_code != 0: print("WARN: regiond didn't stop cleanly (%d)" % return_code) except OSError: print("WARN: regiond already died.") def setup_databases(self): """Setup the test databases.""" self._old_config = setup_databases(self.verbosity, self.interactive) # Load the fixture into the database. call_command( "loaddata", self.FIXTURE, verbosity=self.verbosity, database=DEFAULT_DB_ALIAS, skip_validation=True) def teardown_databases(self): """Teardown the test databases.""" old_names, mirrors = self._old_config for connection, old_name, destroy in old_names: if destroy: connection.creation.destroy_test_db(old_name, self.verbosity) class MAASClusterServiceFixture(Fixture): """Starts and stops the MAAS cluster service.""" def setUp(self): """Start the clusterd service.""" super(MAASClusterServiceFixture, self).setUp() self.useFixture(ClusterConfigurationFixture( cluster_uuid="adfd3977-f251-4f2c-8d61-745dbd690bf2", maas_url="http://0.0.0.0:5253/MAAS/")) # Fork the process to have clusterd run in its own process. twistd_pid = os.fork() if twistd_pid == 0: # Redirect all output to /dev/null redirect_to_devnull() # Add command line options to start twistd. sys.argv[1:] = ["--nodaemon", "--pidfile", "", "maas-clusterd"] # Start twistd. try: twistd.run() except: traceback.print_exc() os._exit(2) finally: os._exit(0) else: # Add cleanup to stop the twistd service. self.addCleanup(self.stop_twistd, twistd_pid) # Check that the child process is still running after a few # seconds. This makes sure that everything started okay and it # is still running. time.sleep(2) try: os.kill(twistd_pid, 0) except OSError: # Not running. raise ServiceError( "Failed to start clusterd. Check that another test is " "not running at the same time.") def stop_twistd(self, twistd_pid): """Stop the clusterd service.""" try: os.kill(twistd_pid, signal.SIGINT) _, return_code = os.waitpid(twistd_pid, 0) if return_code != 0: print("WARN: clusterd didn't stop cleanly (%d)" % return_code) except OSError: print("WARN: clusterd already died.") def run_protractor(): """Start Protractor with the MAAS JS E2E testing configuration. 1. Start regiond. 2. Start clusterd. 3. Start xvfb. 4. Start chromium webdriver. 5. Run protractor. 6. Stop chromium webdriver. 7. Stop xvfb. 8. Stop clusterd. 9. Stop regiond. """ with MAASRegionServiceFixture(), MAASClusterServiceFixture(): with DisplayFixture(), ChromiumWebDriverFixture(): protractor = Popen(( "bin/protractor", "src/maastesting/protractor/protractor.conf.js")) protractor_exit = protractor.wait() sys.exit(protractor_exit) maas-1.9.5+bzr4599.orig/src/maastesting/protractor/tests/0000755000000000000000000000000013056115004021322 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maastesting/protractor/tests/test_login.js0000644000000000000000000000416513056115004024035 0ustar 00000000000000/* Copyright 2015 Canonical Ltd. This software is licensed under the * GNU Affero General Public License version 3 (see the file LICENSE). * * E2E tests for the login page. */ describe("login", function() { // Clear all cookies and load the MAAS page before each test. This page // does not use angular so we tell protractor to ignore waiting for // angular to load. beforeEach(function() { browser.manage().deleteAllCookies(); browser.ignoreSynchronization = true; browser.get("http://localhost:5253/MAAS/") }); it("has login in title", function() { expect(browser.getTitle()).toContain("Login"); }); it("has username field", function() { expect(element(by.id("id_username")).isPresent()).toBe(true); }); it("has password field", function() { expect(element(by.id("id_password")).isPresent()).toBe(true); }); it("has login button", function() { expect( element( by.css('.login input[type="submit"]')).isPresent()).toBe(true); }); it("can login as admin", function() { element(by.id("id_username")).sendKeys("admin"); element(by.id("id_password")).sendKeys("test"); element(by.css('.login input[type="submit"]')).click(); expect( element.all( by.css('#user-link a')).get(0).getText()).toBe("admin"); }); it("can login as user", function() { element(by.id("id_username")).sendKeys("user"); element(by.id("id_password")).sendKeys("test"); element(by.css('.login input[type="submit"]')).click(); expect( element.all( by.css('#user-link a')).get(0).getText()).toBe("user"); }); it("shows mismatch username and password", function() { element(by.id("id_username")).sendKeys("badusername"); element(by.id("id_password")).sendKeys("badpassword"); element(by.css('.login input[type="submit"]')).click(); expect(element(by.css('p.form-errors')).getText()).toBe( "Your username and password didn't match. Please try again."); }); }); maas-1.9.5+bzr4599.orig/src/maastesting/tests/__init__.py0000644000000000000000000000000013056115004021222 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/maastesting/tests/test_conflict_markers.py0000644000000000000000000000265513056115004024071 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Check there's no conflict markers in the code.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from pipes import quote from subprocess import ( PIPE, Popen, STDOUT, ) from maastesting import root from maastesting.testcase import MAASTestCase from testtools.content import ( Content, UTF8_TEXT, ) # Do not use '=======' as a conflict marker since it's # used in docstrings. # Express the conflict markers so that this very file won't contain # them. CONFLICT_MARKERS = "<" * 7, ">" * 7 class TestConflictMarkers(MAASTestCase): def execute(self, *command): process = Popen(command, stdout=PIPE, stderr=STDOUT, cwd=root) output, _ = process.communicate() if len(output) != 0: name = "stdout/err from `%s`" % " ".join(map(quote, command)) self.addDetail(name, Content(UTF8_TEXT, lambda: [output])) self.assertEqual('', output, "Conflict markers present!") self.assertEqual(1, process.wait(), "(return code is not one)") def test_no_conflict_markers(self): command = ["egrep", "-rI", "--exclude=*~", "--exclude-dir=include"] command.append("|".join(CONFLICT_MARKERS)) self.execute(*command) maas-1.9.5+bzr4599.orig/src/maastesting/tests/test_factory.py0000644000000000000000000003345513056115004022215 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test the factory where appropriate. Don't overdo this.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from datetime import datetime from itertools import count import os.path import random from random import randint import subprocess from maastesting.factory import ( factory, TooManyRandomRetries, ) from maastesting.testcase import MAASTestCase from maastesting.utils import FakeRandInt from netaddr import ( IPAddress, IPNetwork, ) from testtools.matchers import ( Contains, EndsWith, FileContains, FileExists, MatchesAll, Not, StartsWith, ) from testtools.testcase import ExpectedException class TestFactory(MAASTestCase): def test_make_string_respects_size(self): sizes = [1, 10, 100] random_strings = [factory.make_string(size) for size in sizes] self.assertEqual(sizes, [len(string) for string in random_strings]) def test_pick_bool_returns_bool(self): self.assertIsInstance(factory.pick_bool(), bool) def test_pick_port_returns_int(self): self.assertIsInstance(factory.pick_port(), int) def test_make_vlan_tag_excludes_None_by_default(self): # Artificially limit randint to a very narrow range, to guarantee # some repetition in its output, and virtually guarantee that we test # both outcomes of the flip-a-coin call in make_vlan_tag. self.patch(random, 'randint', FakeRandInt(random.randint, 0, 1)) outcomes = {factory.make_vlan_tag() for _ in range(1000)} self.assertEqual({1}, outcomes) def test_make_vlan_tag_includes_None_if_allow_none(self): self.patch(random, 'randint', FakeRandInt(random.randint, 0, 1)) self.assertEqual( {None, 1}, { factory.make_vlan_tag(allow_none=True) for _ in range(1000) }) def test_make_ipv4_address(self): ip_address = factory.make_ipv4_address() self.assertIsInstance(ip_address, unicode) octets = ip_address.split('.') self.assertEqual(4, len(octets)) for octet in octets: self.assertTrue(0 <= int(octet) <= 255) def test_make_ipv4_address_but_not(self): # We want to look for clashes between identical IPs and/or netmasks. # Narrow down the range of randomness so we have a decent chance of # triggering a clash, but not so far that we'll loop for very long # trying to find a network we haven't seen already. self.patch( factory, 'make_ipv4_address', lambda: '10.%d.0.0' % randint(1, 200)) networks = [] for _ in range(100): networks.append(factory.make_ipv4_network(but_not=networks)) self.assertEquals(len(networks), len(set(networks))) def test_make_UUID(self): uuid = factory.make_UUID() self.assertIsInstance(uuid, unicode) self.assertEqual(36, len(uuid)) def test_make_ipv4_network(self): network = factory.make_ipv4_network() self.assertIsInstance(network, IPNetwork) def test_make_ipv4_network_respects_but_not(self): self.patch(factory, 'make_ipv4_address').return_value = IPAddress( '10.1.1.0') self.assertRaises( TooManyRandomRetries, factory.make_ipv4_network, slash=24, but_not=[IPNetwork('10.1.1.0/24')]) def test_make_ipv4_network_returns_network_not_in_but_not(self): self.patch(factory, 'make_ipv4_address').return_value = IPAddress( '10.1.1.0') self.assertEqual( IPNetwork('10.1.1.0/24'), factory.make_ipv4_network( slash=24, but_not=[IPNetwork('10.9.9.0/24')])) def test_make_ipv4_network_may_overlap_but_not(self): self.patch(factory, 'make_ipv4_address').return_value = IPAddress( '10.1.1.0') self.assertEqual( IPNetwork('10.1.1.0/24'), factory.make_ipv4_network( slash=24, but_not=[IPNetwork('10.1.0.0/16')])) def test_make_ipv4_network_avoids_network_in_disjoint_from(self): self.patch(factory, 'make_ipv4_address').return_value = IPAddress( '10.1.1.0') self.assertRaises( TooManyRandomRetries, factory.make_ipv4_network, slash=24, disjoint_from=[IPNetwork('10.1.1.0/24')]) def test_make_ipv4_network_avoids_network_overlapping_disjoint_from(self): self.patch(factory, 'make_ipv4_address').return_value = IPAddress( '10.1.1.0') self.assertRaises( TooManyRandomRetries, factory.make_ipv4_network, slash=24, disjoint_from=[IPNetwork('10.1.0.0/16')]) def test_make_ipv4_network_returns_network_disjoint_from(self): existing_network = factory.make_ipv4_network() new_network = factory.make_ipv4_network( disjoint_from=[existing_network]) self.assertNotEqual(existing_network, new_network) self.assertNotIn(new_network, existing_network) self.assertNotIn(existing_network, new_network) def test_pick_ip_in_network(self): network = factory.make_ipv4_network() ip = factory.pick_ip_in_network(network) self.assertTrue( network.first <= IPAddress(ip).value <= network.last) def test_make_ip_range_returns_IPs(self): low, high = factory.make_ip_range() self.assertIsInstance(low, IPAddress) self.assertIsInstance(high, IPAddress) self.assertLess(low, high) def test_make_ip_range_obeys_network(self): network = factory.make_ipv4_network() low, high = factory.make_ip_range(network) self.assertIn(low, network) self.assertIn(high, network) def test_make_ip_range_returns_low_and_high(self): # Make a very very small network, to maximise the chances of exposure # if the method gets this wrong e.g. by returning identical addresses. low, high = factory.make_ip_range(factory.make_ipv4_network(slash=31)) self.assertLess(low, high) def test_make_ip_range_obeys_but_not(self): # Make a very very small network, to maximise the chances of exposure # if the method gets this wrong. network = factory.make_ipv4_network(slash=30) first_low, first_high = factory.make_ip_range(network) second_low, second_high = factory.make_ip_range( network, but_not=(first_low, first_high)) self.assertNotEqual((first_low, first_high), (second_low, second_high)) def test_make_date_returns_datetime(self): self.assertIsInstance(factory.make_date(), datetime) def test_make_mac_address(self): mac_address = factory.make_mac_address() self.assertIsInstance(mac_address, unicode) self.assertEqual(17, len(mac_address)) for hex_octet in mac_address.split(":"): self.assertTrue(0 <= int(hex_octet, 16) <= 255) def test_make_mac_address_alternative_delimiter(self): self.patch(factory, "random_octets", count(0x3a)) mac_address = factory.make_mac_address(delimiter="-") self.assertEqual("3a-3b-3c-3d-3e-3f", mac_address) def test_make_random_leases_maps_ips_to_macs(self): [(ip, mac)] = factory.make_random_leases().items() self.assertEqual( 4, len(ip.split('.')), "IP address does not look like an IP address: '%s'" % ip) self.assertEqual( 6, len(mac.split(':')), "MAC address does not look like a MAC address: '%s'" % mac) def test_make_random_leases_randomizes_ips(self): self.assertNotEqual( factory.make_random_leases().keys(), factory.make_random_leases().keys()) def test_make_random_leases_randomizes_macs(self): self.assertNotEqual( factory.make_random_leases().values(), factory.make_random_leases().values()) def test_make_random_leases_returns_requested_number_of_leases(self): num_leases = randint(0, 3) self.assertEqual( num_leases, len(factory.make_random_leases(num_leases))) def test_make_file_creates_file(self): self.assertThat(factory.make_file(self.make_dir()), FileExists()) def test_make_file_writes_contents(self): contents = factory.make_string().encode('ascii') self.assertThat( factory.make_file(self.make_dir(), contents=contents), FileContains(contents)) def test_make_file_makes_up_contents_if_none_given(self): with open(factory.make_file(self.make_dir())) as temp_file: contents = temp_file.read() self.assertNotEqual('', contents) def test_make_file_uses_given_name(self): name = factory.make_string() self.assertEqual( name, os.path.basename(factory.make_file(self.make_dir(), name=name))) def test_make_file_uses_given_dir(self): directory = self.make_dir() name = factory.make_string() self.assertEqual( (directory, name), os.path.split(factory.make_file(directory, name=name))) def test_make_name_returns_unicode(self): self.assertIsInstance(factory.make_name(), unicode) def test_make_name_includes_prefix_and_separator(self): self.assertThat(factory.make_name('abc'), StartsWith('abc-')) def test_make_name_includes_random_text_of_requested_length(self): size = randint(1, 99) self.assertEqual( len('prefix') + len('-') + size, len(factory.make_name('prefix', size=size))) def test_make_name_includes_random_text(self): self.assertNotEqual( factory.make_name(size=100), factory.make_name(size=100)) def test_make_name_uses_configurable_separator(self): sep = 'SEPARATOR' prefix = factory.make_string(3) self.assertThat( factory.make_name(prefix, sep=sep), StartsWith(prefix + sep)) def test_make_name_does_not_require_prefix(self): size = randint(1, 99) unprefixed_name = factory.make_name(sep='-', size=size) self.assertEqual(size, len(unprefixed_name)) self.assertThat(unprefixed_name, Not(StartsWith('-'))) def test_make_name_does_not_include_weird_characters(self): self.assertThat( factory.make_name(size=100), MatchesAll(*[Not(Contains(char)) for char in '/ \t\n\r\\'])) def test_make_names_calls_make_name_with_each_prefix(self): self.patch(factory, "make_name", lambda prefix: prefix + "-xxx") self.assertSequenceEqual( ["abc-xxx", "def-xxx", "ghi-xxx"], list(factory.make_names("abc", "def", "ghi"))) def test_make_tarball_writes_tarball(self): filename = factory.make_name() contents = {filename: factory.make_string()} tarball = factory.make_tarball(self.make_dir(), contents) dest = self.make_dir() subprocess.check_call(['tar', '-xzf', tarball, '-C', dest]) self.assertThat( os.path.join(dest, filename), FileContains(contents[filename])) def test_make_tarball_makes_up_content_if_None(self): filename = factory.make_name() tarball = factory.make_tarball(self.make_dir(), {filename: None}) dest = self.make_dir() subprocess.check_call(['tar', '-xzf', tarball, '-C', dest]) self.assertThat(os.path.join(dest, filename), FileExists()) with open(os.path.join(dest, filename), 'rb') as unpacked_file: contents = unpacked_file.read() self.assertGreater(len(contents), 0) def test_make_parsed_url_accepts_explicit_port(self): port = factory.pick_port() url = factory.make_parsed_url(port=port) self.assertThat(url.netloc, EndsWith(':%d' % port), 'The generated URL does not contain' 'a port specification for port %d' % port) def test_make_parsed_url_can_omit_port(self): url = factory.make_parsed_url(port=False) self.assertThat(url.netloc, Not(Contains(':')), 'Generated url: %s contains a port number' 'in netloc segment' % url.geturl()) def test_make_parsed_url_pics_random_port(self): url = factory.make_parsed_url() self.assertThat(url.netloc, Contains(':'), 'Generated url: %s does not contain ' 'a port number in netloc segment' % url.geturl()) self.assertTrue(url.netloc.split(':')[1].isdigit(), 'Generated url: %s does not contain a valid ' 'port number in netloc segment' % url.geturl()) url = factory.make_parsed_url(port=True) self.assertThat(url.netloc, Contains(':'), ('Generated url: %s does not contain ' 'a port number in netloc segment') % url.geturl()) self.assertTrue(url.netloc.split(':')[1].isdigit(), 'Generated url: %s does not contain a valid ' 'port number in netloc segment' % url.geturl()) def test_make_parsed_url_asserts_with_conflicting_port_numbers(self): with ExpectedException(AssertionError): netloc = "%s:%d" % (factory.make_hostname(), factory.pick_port()) factory.make_parsed_url(netloc=netloc, port=factory.pick_port()) with ExpectedException(AssertionError): netloc = "%s:%d" % (factory.make_hostname(), factory.pick_port()) factory.make_parsed_url(netloc=netloc, port=True) maas-1.9.5+bzr4599.orig/src/maastesting/tests/test_fakemethod.py0000644000000000000000000000521713056115004022650 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for :class:`FakeMethod`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maastesting.fakemethod import ( FakeMethod, MultiFakeMethod, ) from maastesting.testcase import MAASTestCase class TestFakeMethod(MAASTestCase): def test_fakemethod_returns_None_by_default(self): self.assertEqual(None, FakeMethod()()) def test_fakemethod_returns_given_value(self): self.assertEqual("Input value", FakeMethod("Input value")()) def test_fakemethod_raises_given_failure(self): class ExpectedException(Exception): pass self.assertRaises( ExpectedException, FakeMethod(failure=ExpectedException())) def test_fakemethod_has_no_calls_initially(self): self.assertSequenceEqual([], FakeMethod().calls) def test_fakemethod_records_call(self): stub = FakeMethod() stub() self.assertSequenceEqual([((), {})], stub.calls) def test_fakemethod_records_args(self): stub = FakeMethod() stub(1, 2) self.assertSequenceEqual([((1, 2), {})], stub.calls) def test_fakemethod_records_kwargs(self): stub = FakeMethod() stub(x=10) self.assertSequenceEqual([((), {'x': 10})], stub.calls) def test_call_count_is_zero_initially(self): self.assertEqual(0, FakeMethod().call_count) def test_call_count_counts_calls(self): stub = FakeMethod() stub() self.assertEqual(1, stub.call_count) def test_extract_args_returns_just_call_args(self): stub = FakeMethod() stub(1, 2, 3, x=12) self.assertItemsEqual([(1, 2, 3)], stub.extract_args()) def test_extract_kwargs_returns_just_call_kwargs(self): stub = FakeMethod() stub(1, 2, 3, x=12) self.assertItemsEqual([{'x': 12}], stub.extract_kwargs()) class TestMultiFakeMethod(MAASTestCase): def test_call_calls_all_given_methods(self): methods = FakeMethod(), FakeMethod() method = MultiFakeMethod(methods) call1_args = "input 1" call2_args = "input 2" method(call1_args) method(call2_args) self.assertEqual( [[('input 1',)], [('input 2',)]], [methods[0].extract_args(), methods[1].extract_args()]) def test_raises_if_called_one_time_too_many(self): method = MultiFakeMethod([FakeMethod()]) method() self.assertRaises(ValueError, method) maas-1.9.5+bzr4599.orig/src/maastesting/tests/test_fixtures.py0000644000000000000000000002131513056115004022407 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the GNU # Affero General Public License version 3 (see the file LICENSE). """Tests for `maastesting.fixtures`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import __builtin__ import os import sys from fixtures import EnvironmentVariableFixture from maastesting import fixtures from maastesting.factory import factory from maastesting.fixtures import ( CaptureStandardIO, ImportErrorFixture, ProxiesDisabledFixture, TempDirectory, TempWDFixture, ) from maastesting.matchers import MockCallsMatch from maastesting.testcase import MAASTestCase from maastesting.utils import sample_binary_data from mock import call from testtools.matchers import ( DirExists, Equals, Is, Not, ) from testtools.testcase import ExpectedException class TestImportErrorFixture(MAASTestCase): """Tests for :class:`TestImportErrorFixture`.""" def test_import_non_targeted_module_successfull(self): self.useFixture(ImportErrorFixture('maastesting', 'root')) from maastesting import bindir # noqa def test_import_targeted_module_unsuccessfull(self): self.useFixture(ImportErrorFixture('maastesting', 'root')) with ExpectedException(ImportError): from maastesting import root # noqa def test_import_restores_original__import__(self): __real_import = __builtin__.__import__ with ImportErrorFixture('maastesting', 'root'): self.assertNotEqual( __real_import, __builtin__.__import__, 'ImportErrorFixture did not properly ' 'patch __builtin__.__import__') self.assertEqual( __real_import, __builtin__.__import__, 'ImportErrorFixture did not properly restore ' 'the original __builtin__.__import__ upon cleanup') class TestProxiedDisabledFixture(MAASTestCase): """Tests for :class:`ProxiesDisabledFixture`.""" def test_removes_http_proxy_from_environment(self): http_proxy = factory.make_name("http-proxy") initial = EnvironmentVariableFixture("http_proxy", http_proxy) self.useFixture(initial) # On entry, http_proxy is removed from the environment. with ProxiesDisabledFixture(): self.assertNotIn("http_proxy", os.environ) # On exit, http_proxy is restored. self.assertEqual(http_proxy, os.environ.get("http_proxy")) def test_removes_https_proxy_from_environment(self): https_proxy = factory.make_name("https-proxy") initial = EnvironmentVariableFixture("https_proxy", https_proxy) self.useFixture(initial) # On entry, https_proxy is removed from the environment. with ProxiesDisabledFixture(): self.assertNotIn("https_proxy", os.environ) # On exit, http_proxy is restored. self.assertEqual(https_proxy, os.environ.get("https_proxy")) class TestTempDirectory(MAASTestCase): def test_path_is_unicode(self): with TempDirectory() as fixture: self.assertIsInstance(fixture.path, unicode) def test_path_is_decoded_using_filesystem_encoding(self): sys = self.patch(fixtures, "sys") sys.getfilesystemencoding.return_value = "rot13" with TempDirectory() as fixture: self.assertIsInstance(fixture.path, unicode) self.assertThat(fixture.path, Not(DirExists())) self.assertThat(fixture.path.decode("rot13"), DirExists()) class TestTempWDFixture(MAASTestCase): def test_changes_dir_and_cleans_up(self): orig_cwd = os.getcwd() with TempWDFixture() as temp_wd: new_cwd = os.getcwd() self.assertTrue(os.path.isdir(temp_wd.path)) self.assertNotEqual(orig_cwd, new_cwd) self.assertEqual(new_cwd, temp_wd.path) final_cwd = os.getcwd() self.assertEqual(orig_cwd, final_cwd) self.assertFalse(os.path.isdir(new_cwd)) class TestCaptureStandardIO(MAASTestCase): """Test `CaptureStandardIO`.""" def test__captures_stdin(self): stdin_before = sys.stdin with CaptureStandardIO(): stdin_during = sys.stdin stdin_after = sys.stdin self.expectThat(stdin_during, Not(Is(stdin_before))) self.expectThat(stdin_during, Not(Is(stdin_after))) self.expectThat(stdin_after, Is(stdin_before)) def test__captures_stdout(self): stdout_before = sys.stdout with CaptureStandardIO(): stdout_during = sys.stdout stdout_after = sys.stdout self.expectThat(stdout_during, Not(Is(stdout_before))) self.expectThat(stdout_during, Not(Is(stdout_after))) self.expectThat(stdout_after, Is(stdout_before)) def test__captures_stderr(self): stderr_before = sys.stderr with CaptureStandardIO(): stderr_during = sys.stderr stderr_after = sys.stderr self.expectThat(stderr_during, Not(Is(stderr_before))) self.expectThat(stderr_during, Not(Is(stderr_after))) self.expectThat(stderr_after, Is(stderr_before)) def test__addInput_feeds_stdin(self): text = factory.make_name("text") with CaptureStandardIO() as stdio: stdio.addInput(text + "111") self.expectThat(sys.stdin.read(2), Equals(text[:2])) stdio.addInput(text + "222") self.expectThat(sys.stdin.read(), Equals( text[2:] + "111" + text + "222")) def test__getInput_returns_data_waiting_to_be_read(self): stdio = CaptureStandardIO() stdio.addInput("one\ntwo\n") with stdio: self.expectThat(sys.stdin.readline(), Equals("one\n")) self.expectThat(stdio.getInput(), Equals("two\n")) def test__getOutput_returns_data_written_to_stdout(self): self.assert_getter_returns_data_written_to_stream( CaptureStandardIO.getOutput, "stdout") def test__getError_returns_data_written_to_stderr(self): self.assert_getter_returns_data_written_to_stream( CaptureStandardIO.getError, "stderr") def assert_getter_returns_data_written_to_stream(self, getter, name): stream = self.patch(sys, name) before = factory.make_name("before") during = factory.make_name("during") after = factory.make_name("after") end = factory.make_name("end") print(before, file=getattr(sys, name), end=end) with CaptureStandardIO() as stdio: print(during, file=getattr(sys, name), end=end) print(after, file=getattr(sys, name), end=end) self.expectThat(getter(stdio), Equals(during + end)) self.expectThat(stream.write, MockCallsMatch( call(before), call(end), call(after), call(end))) def test__clearInput_clears_input(self): text = factory.make_name("text") with CaptureStandardIO() as stdio: stdio.addInput(text + "111") sys.stdin.read(2) stdio.clearInput() self.expectThat(sys.stdin.read(2), Equals("")) def test__clearOutput_clears_output(self): text = factory.make_name("text") with CaptureStandardIO() as stdio: sys.stdout.write(text) self.expectThat(stdio.getOutput(), Equals(text)) stdio.clearOutput() self.expectThat(stdio.getOutput(), Equals("")) def test__clearError_clears_error(self): text = factory.make_name("text") with CaptureStandardIO() as stdio: sys.stderr.write(text) self.expectThat(stdio.getError(), Equals(text)) stdio.clearError() self.expectThat(stdio.getError(), Equals("")) def test__clearAll_clears_input_output_and_error(self): text = factory.make_name("text") with CaptureStandardIO() as stdio: stdio.addInput(text) sys.stdout.write(text) sys.stderr.write(text) stdio.clearAll() self.expectThat(stdio.getInput(), Equals("")) self.expectThat(stdio.getOutput(), Equals("")) self.expectThat(stdio.getError(), Equals("")) def test__non_text_strings_are_rejected_on_stdout(self): with CaptureStandardIO(): error = self.assertRaises( UnicodeError, sys.stdout.write, sample_binary_data) self.assertDocTestMatches("... codec can't decode ...", unicode(error)) def test__non_text_strings_are_rejected_on_stderr(self): with CaptureStandardIO(): error = self.assertRaises( UnicodeError, sys.stderr.write, sample_binary_data) self.assertDocTestMatches("... codec can't decode ...", unicode(error)) maas-1.9.5+bzr4599.orig/src/maastesting/tests/test_httpd.py0000644000000000000000000000536513056115004021670 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maastesting.httpd`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from contextlib import closing import gzip from io import BytesIO from os.path import relpath from socket import ( gethostbyname, gethostname, ) from unittest import skip from urllib2 import ( Request, urlopen, ) from urlparse import urljoin from maastesting.fixtures import ProxiesDisabledFixture from maastesting.httpd import ( HTTPServerFixture, ThreadingHTTPServer, ) from maastesting.testcase import MAASTestCase from testtools.matchers import FileExists class TestHTTPServerFixture(MAASTestCase): def setUp(self): super(TestHTTPServerFixture, self).setUp() self.useFixture(ProxiesDisabledFixture()) @skip( "XXX: bigjools 2013-09-13 bug=1224837: Causes intermittent failures") def test_init(self): host = gethostname() fixture = HTTPServerFixture(host=host) self.assertIsInstance(fixture.server, ThreadingHTTPServer) expected_url = "http://%s:%d/" % ( gethostbyname(host), fixture.server.server_port) self.assertEqual(expected_url, fixture.url) def test_use(self): filename = relpath(__file__) self.assertThat(filename, FileExists()) with HTTPServerFixture() as httpd: url = urljoin(httpd.url, filename) with closing(urlopen(url)) as http_in: http_data_in = http_in.read() with open(filename, "rb") as file_in: file_data_in = file_in.read() self.assertEqual( file_data_in, http_data_in, "The content of %s differs from %s." % (url, filename)) def ungzip(self, content): gz = gzip.GzipFile(fileobj=BytesIO(content)) return gz.read() def test_supports_gzip(self): filename = relpath(__file__) with HTTPServerFixture() as httpd: url = urljoin(httpd.url, filename) headers = {'Accept-Encoding': 'gzip, deflate'} request = Request(url, None, headers=headers) with closing(urlopen(request)) as http_in: http_headers = http_in.info() http_data_in = http_in.read() self.assertEqual('gzip', http_headers['Content-Encoding']) with open(filename, "rb") as file_in: file_data_in = file_in.read() http_data_decompressed = self.ungzip(http_data_in) self.assertEqual( file_data_in, http_data_decompressed, "The content of %s differs from %s." % (url, filename)) maas-1.9.5+bzr4599.orig/src/maastesting/tests/test_lint.py0000644000000000000000000000770113056115004021507 0ustar 00000000000000# Copyright 2013-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Check there's no lint in the tree.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from difflib import unified_diff from itertools import ifilter from os import ( mkdir, walk, ) from os.path import ( join, relpath, ) from pipes import quote from shutil import ( copy2, rmtree, ) from subprocess import ( PIPE, Popen, STDOUT, ) from tempfile import mkdtemp from maastesting import root from maastesting.testcase import MAASTestCase from testtools.content import ( Content, UTF8_TEXT, ) class TestLint(MAASTestCase): def execute(self, *command): process = Popen(command, stdout=PIPE, stderr=STDOUT) output, _ = process.communicate() if len(output) != 0: name = "stdout/err from `%s`" % " ".join(map(quote, command)) self.addDetail(name, Content(UTF8_TEXT, lambda: [output])) self.assertEqual(0, process.wait(), "(return code is not zero)") def test_that_there_is_no_lint_in_the_tree(self): self.execute("make", "--quiet", "-C", root, "lint") def test_that_imports_are_formatted(self): # We're going to export all Python source code to a new, freshly # created, tree, then run `make format` in it. root_export = mkdtemp(prefix=".export.", dir=root) self.addCleanup(rmtree, root_export, ignore_errors=True) # Useful predicates. p_visible = lambda name: not name.startswith(".") p_is_python = lambda name: name.endswith(".py") # Copy all visible Python source files over. for dirpath, dirnames, filenames in walk(root): dirnames[:] = ifilter(p_visible, dirnames) dirpath_export = join(root_export, relpath(dirpath, start=root)) for dirname in dirnames: mkdir(join(dirpath_export, dirname)) for filename in ifilter(p_visible, filenames): if p_is_python(filename): src = join(dirpath, filename) dst = join(dirpath_export, filename) copy2(src, dst) # We'll need the Makefile and format-imports too. copy2(join(root, "Makefile"), root_export) copy2( join(root, "utilities", "format-imports"), join(root_export, "utilities", "format-imports")) # Format imports in the exported tree. self.execute("make", "--quiet", "-C", root_export, "format") # This will record a unified diff between the original source code and # the reformatted source code, should there be any. diff = [] # For each file in the export, compare it to its counterpart in the # original tree. for dirpath, dirnames, filenames in walk(root_export): dirpath_relative = relpath(dirpath, start=root_export) dirpath_original = join(root, dirpath_relative) for filename in ifilter(p_is_python, filenames): filepath_original = join(dirpath_original, filename) with open(filepath_original, "rb") as file_original: file_lines_original = file_original.readlines() filepath_formatted = join(dirpath, filename) with open(filepath_formatted, "rb") as file_formatted: file_lines_formatted = file_formatted.readlines() diff.extend(unified_diff( file_lines_original, file_lines_formatted, filepath_original, filepath_formatted)) if len(diff) != 0: self.addDetail("diff", Content(UTF8_TEXT, lambda: diff)) self.fail( "Some imports are not formatted; see the diff for the " "missing changes. Use `make format` to address them.") maas-1.9.5+bzr4599.orig/src/maastesting/tests/test_matchers.py0000644000000000000000000002720213056115004022345 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test matchers.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maastesting import matchers from maastesting.factory import factory from maastesting.matchers import ( GreaterThanOrEqual, HasAttribute, IsCallable, IsCallableMock, IsFiredDeferred, IsUnfiredDeferred, LessThanOrEqual, MockAnyCall, MockCalledOnce, MockCalledOnceWith, MockCalledWith, MockCallsMatch, MockNotCalled, ) from maastesting.testcase import MAASTestCase from mock import ( call, create_autospec, Mock, NonCallableMock, sentinel, ) from testtools.matchers import ( MatchesStructure, Mismatch, ) from twisted.internet import defer class TestIsCallable(MAASTestCase): def test_returns_none_when_matchee_is_callable(self): result = IsCallable().match(lambda: None) self.assertIs(None, result) def test_returns_mismatch_when_matchee_is_callable(self): result = IsCallable().match(1234) self.assertIsInstance(result, Mismatch) self.assertEqual( "1234 is not callable", result.describe()) def test_match_passes_through_to_callable_builtin(self): self.patch(matchers, "callable").return_value = True result = IsCallable().match(sentinel.function) matchers.callable.assert_called_once_with(sentinel.function) self.assertIs(None, result) def test_mismatch_passes_through_to_callable_builtin(self): self.patch(matchers, "callable").return_value = False result = IsCallable().match(sentinel.function) matchers.callable.assert_called_once_with(sentinel.function) self.assertIsInstance(result, Mismatch) self.assertEqual( "%r is not callable" % sentinel.function, result.describe()) class MockTestMixin: # Some matchers return a private MismatchDecorator object, which # does not descend from Mismatch, so we check the contract instead. is_mismatch = MatchesStructure( describe=IsCallable(), get_details=IsCallable(), ) def assertMismatch(self, result, message): self.assertThat(result, self.is_mismatch) self.assertIn(message, result.describe()) class TestMockCalledWith(MAASTestCase, MockTestMixin): def test_returns_none_when_matches(self): mock = Mock() mock(1, 2, frob=5, nob=6) matcher = MockCalledWith(1, 2, frob=5, nob=6) result = matcher.match(mock) self.assertIsNone(result) def test_returns_mismatch_when_does_not_match(self): mock = Mock() mock(1, 2, a=5) matcher = MockCalledWith(9, 2, a=5) result = matcher.match(mock) self.assertMismatch(result, "Expected call:") def test_str(self): matcher = MockCalledWith(1, a=2) self.assertEqual( "MockCalledWith(args=(1,), kwargs={'a': 2})", matcher.__str__()) class TestMockCalledOnceWith(MAASTestCase, MockTestMixin): def test_returns_none_when_matches(self): mock = Mock() mock(1, 2, frob=5, nob=6) matcher = MockCalledOnceWith(1, 2, frob=5, nob=6) result = matcher.match(mock) self.assertIsNone(result) def test_returns_mismatch_when_multiple_calls(self): mock = Mock() mock(1, 2, frob=5, nob=6) mock(1, 2, frob=5, nob=6) matcher = MockCalledOnceWith(1, 2, frob=5, nob=6) result = matcher.match(mock) self.assertMismatch(result, "Expected to be called once") def test_returns_mismatch_when_single_call_does_not_match(self): mock = Mock() mock(1, 2, a=5) matcher = MockCalledOnceWith(9, 2, a=5) result = matcher.match(mock) self.assertMismatch(result, "Expected call:") def test_str(self): matcher = MockCalledOnceWith(1, a=2) self.assertEqual( "MockCalledOnceWith(args=(1,), kwargs={'a': 2})", matcher.__str__()) class TestMockCalledOnce(MAASTestCase, MockTestMixin): def test_returns_none_when_matches(self): mock = Mock() mock(1, 2, frob=5, nob=6) matcher = MockCalledOnce() result = matcher.match(mock) self.assertIsNone(result) def test_returns_mismatch_when_multiple_calls(self): mock = Mock() mock(1, 2, frob=5, nob=6) mock(1, 2, frob=5, nob=6) matcher = MockCalledOnce() result = matcher.match(mock) self.assertMismatch( result, "Expected to be called once. Called 2 times.") def test_returns_mismatch_when_zero_calls(self): mock = Mock() matcher = MockCalledOnce() result = matcher.match(mock) self.assertMismatch( result, "Expected to be called once. Called 0 times.") def test_str(self): matcher = MockCalledOnce() self.assertEqual( "MockCalledOnce", matcher.__str__()) class TestMockAnyCall(MAASTestCase, MockTestMixin): def test_returns_none_when_matches(self): mock = Mock() mock(1, 2, frob=5, nob=6) matcher = MockAnyCall(1, 2, frob=5, nob=6) result = matcher.match(mock) self.assertIsNone(result) def test_returns_none_when_multiple_calls(self): mock = Mock() mock(1, 2, frob=5, nob=6) mock(1, 2, frob=5, nob=6) matcher = MockAnyCall(1, 2, frob=5, nob=6) result = matcher.match(mock) self.assertIsNone(result) def test_returns_mismatch_when_call_does_not_match(self): mock = Mock() mock(1, 2, a=5) matcher = MockAnyCall(1, 2, frob=5, nob=6) result = matcher.match(mock) self.assertMismatch(result, "call not found") class TestMockCallsMatch(MAASTestCase, MockTestMixin): def test_returns_none_when_matches(self): mock = Mock() mock(1, 2, frob=5, nob=6) matcher = MockCallsMatch(call(1, 2, frob=5, nob=6)) result = matcher.match(mock) self.assertIsNone(result) def test_returns_none_when_multiple_calls(self): mock = Mock() mock(1, 2, frob=5, nob=6) mock(1, 2, frob=5, nob=6) matcher = MockCallsMatch( call(1, 2, frob=5, nob=6), call(1, 2, frob=5, nob=6)) result = matcher.match(mock) self.assertIsNone(result) def test_returns_mismatch_when_calls_do_not_match(self): mock = Mock() mock(1, 2, a=5) mock(3, 4, a=5) matcher = MockCallsMatch( call(1, 2, a=5), call(3, 4, a="bogus")) result = matcher.match(mock) self.assertMismatch(result, "calls do not match") def test_has_useful_string_representation(self): matcher = MockCallsMatch( call(1, 2, a=3), call(4, 5, a=6)) self.assertEqual( "MockCallsMatch([call(1, 2, a=3), call(4, 5, a=6)])", matcher.__str__()) class TestMockNotCalled(MAASTestCase, MockTestMixin): def test_returns_none_mock_has_not_been_called(self): mock = Mock() matcher = MockNotCalled() result = matcher.match(mock) self.assertIsNone(result) def test_returns_mismatch_when_mock_has_been_called(self): mock = Mock() mock(1, 2, a=5) matcher = MockNotCalled() result = matcher.match(mock) self.assertMismatch(result, "mock has been called") def test_has_useful_string_representation(self): matcher = MockNotCalled() self.assertEqual("MockNotCalled", matcher.__str__()) class TestHasAttribute(MAASTestCase, MockTestMixin): def test__returns_none_if_attribute_exists(self): attribute = factory.make_string(3, prefix="attr") setattr(self, attribute, factory.make_name("value")) matcher = HasAttribute(attribute) result = matcher.match(self) self.assertIsNone(result) def test__returns_mismatch_if_attribute_does_not_exist(self): attribute = factory.make_string(3, prefix="attr") matcher = HasAttribute(attribute) result = matcher.match(self) self.assertMismatch( result, " does not have a %r attribute" % attribute) class TestIsCallableMock(MAASTestCase, MockTestMixin): def test__returns_none_when_its_a_callable_mock(self): mock = Mock() matcher = IsCallableMock() result = matcher.match(mock) self.assertIsNone(result) def test__returns_none_when_its_a_callable_autospec(self): mock = create_autospec(lambda: None) matcher = IsCallableMock() result = matcher.match(mock) self.assertIsNone(result) def test__returns_mismatch_when_its_a_non_callable_mock(self): mock = NonCallableMock() matcher = IsCallableMock() result = matcher.match(mock) self.assertMismatch( result, " is not callable") def test__returns_mismatch_when_its_a_non_callable_autospec(self): mock = create_autospec(None) matcher = IsCallableMock() result = matcher.match(mock) self.assertMismatch( result, " is not callable") def test__returns_mismatch_when_its_a_non_callable_object(self): matcher = IsCallableMock() result = matcher.match(object()) self.assertMismatch( result, " is not callable") class TestIsFiredDeferred(MAASTestCase, MockTestMixin): def test__matches_fired_deferred(self): d = defer.Deferred() d.callback(None) self.assertThat(d, IsFiredDeferred()) def test__does_not_match_unfired_deferred(self): d = defer.Deferred() self.assertMismatch( IsFiredDeferred().match(d), " has not been called") def test__does_not_match_non_deferred(self): self.assertMismatch( IsFiredDeferred().match(object()), " is not a Deferred") class TestIsUnfiredDeferred(MAASTestCase, MockTestMixin): def test__matches_unfired_deferred(self): d = defer.Deferred() self.assertThat(d, IsUnfiredDeferred()) def test__does_not_match_fired_deferred(self): d = defer.Deferred() d.callback(None) self.assertMismatch( IsUnfiredDeferred().match(d), " has been called (result=None)") def test__does_not_match_non_deferred(self): self.assertMismatch( IsUnfiredDeferred().match(object()), " is not a Deferred") class TestGreaterThanOrEqual(MAASTestCase, MockTestMixin): def test__matches_greater_than(self): self.assertThat(5, GreaterThanOrEqual(4)) self.assertThat("bbb", GreaterThanOrEqual("aaa")) def test__matches_equal_to(self): self.assertThat(5, GreaterThanOrEqual(5)) self.assertThat("bbb", GreaterThanOrEqual("bbb")) def test__does_not_match_less_than(self): self.assertMismatch( GreaterThanOrEqual(6).match(5), "Differences:") self.assertMismatch( GreaterThanOrEqual("ccc").match("bbb"), "Differences:") class TestLessThanOrEqual(MAASTestCase, MockTestMixin): def test__matches_less_than(self): self.assertThat(5, LessThanOrEqual(6)) self.assertThat("bbb", LessThanOrEqual("ccc")) def test__matches_equal_to(self): self.assertThat(5, LessThanOrEqual(5)) self.assertThat("bbb", LessThanOrEqual("bbb")) def test__does_not_match_greater_than(self): self.assertMismatch( LessThanOrEqual(4).match(5), "Differences:") self.assertMismatch( LessThanOrEqual("aaa").match("bbb"), "Differences:") maas-1.9.5+bzr4599.orig/src/maastesting/tests/test_noseplug.py0000644000000000000000000001162713056115004022377 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maastesting.noseplug`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from optparse import OptionParser from os import makedirs from os.path import ( dirname, join, ) import crochet as crochet_module from maastesting import noseplug from maastesting.factory import factory from maastesting.matchers import ( MockCalledOnceWith, MockNotCalled, ) from maastesting.noseplug import ( Crochet, Select, ) from maastesting.testcase import MAASTestCase from mock import ( ANY, sentinel, ) from testtools.matchers import ( Equals, IsInstance, MatchesListwise, MatchesSetwise, MatchesStructure, ) from twisted.python.filepath import FilePath class TestCrochet(MAASTestCase): def test__options_adds_options(self): crochet = Crochet() parser = OptionParser() crochet.options(parser=parser, env={}) self.assertThat( parser.option_list[-1:], MatchesListwise([ # The --with-crochet option. MatchesStructure.byEquality( action="store_true", default=None, dest="enable_plugin_crochet", ), ])) def test__configure_sets_up_crochet_if_enabled(self): self.patch_autospec(crochet_module, "setup") crochet = Crochet() parser = OptionParser() crochet.add_options(parser=parser, env={}) options, rest = parser.parse_args(["--with-crochet"]) crochet.configure(options, sentinel.conf) self.assertThat(crochet_module.setup, MockCalledOnceWith()) def test__configure_does_not_set_up_crochet_if_not_enabled(self): self.patch_autospec(crochet_module, "setup") crochet = Crochet() parser = OptionParser() crochet.add_options(parser=parser, env={}) options, rest = parser.parse_args([]) crochet.configure(options, sentinel.conf) self.assertThat(crochet_module.setup, MockNotCalled()) class TestSelect(MAASTestCase): def test__create_has_dirs(self): select = Select() self.assertThat( select, MatchesStructure.byEquality(dirs=frozenset())) def test__options_adds_options(self): select = Select() parser = OptionParser() select.options(parser=parser, env={}) self.assertThat( parser.option_list[-2:], MatchesListwise([ # The --with-select option. MatchesStructure.byEquality( action="store_true", default=None, dest="enable_plugin_select", ), # The --select-dir/--select-directory option. MatchesStructure.byEquality( action="append", default=[], dest="select_dirs", metavar="DIR", type="string", _short_opts=[], _long_opts=["--select-dir", "--select-directory"], ) ])) def test__configure_scans_directories(self): directory = self.make_dir() segments = factory.make_name("child"), factory.make_name("grandchild") makedirs(join(directory, *segments)) select = Select() parser = OptionParser() select.add_options(parser=parser, env={}) options, rest = parser.parse_args( ["--with-select", "--select-dir", directory]) select.configure(options, sentinel.conf) leaf = FilePath(directory).descendant(segments) expected_dirs = {leaf} expected_dirs.update(leaf.parents()) self.assertThat(select.dirs, Equals( set(fp.path for fp in expected_dirs))) def test__wantDirectory_checks_dirs_and_thats_it(self): directory = self.make_dir() segments = factory.make_name("child"), factory.make_name("grandchild") makedirs(join(directory, *segments)) select = Select() self.assertFalse(select.wantDirectory(directory)) select.addDirectory(directory) self.assertTrue(select.wantDirectory(directory)) self.assertTrue(select.wantDirectory(join(directory, *segments))) self.assertTrue(select.wantDirectory(dirname(directory))) self.assertFalse(select.wantDirectory( join(directory, factory.make_name("other-child")))) class TestMain(MAASTestCase): def test__sets_addplugins(self): self.patch_autospec(noseplug, "TestProgram") noseplug.main() self.assertThat( noseplug.TestProgram, MockCalledOnceWith(addplugins=[ANY, ANY])) plugins = noseplug.TestProgram.call_args[1]["addplugins"] self.assertThat(plugins, MatchesSetwise( IsInstance(Select), IsInstance(Crochet))) maas-1.9.5+bzr4599.orig/src/maastesting/tests/test_package.py0000644000000000000000000000424213056115004022131 0ustar 00000000000000# Copyright 2013-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the `maastesting` package.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from random import randint from warnings import ( catch_warnings, warn_explicit, ) import maastesting from maastesting.factory import factory from maastesting.testcase import MAASTestCase from testtools.matchers import ( Equals, IsInstance, MatchesAll, MatchesListwise, MatchesStructure, ) class TestWarnings(MAASTestCase): scenarios = sorted( (package_name, dict(package_name=package_name)) for package_name in maastesting.packages ) def test_pattern_matches_package(self): self.assertRegexpMatches( self.package_name, maastesting.packages_expr) def test_pattern_matches_subpackage(self): self.assertRegexpMatches( self.package_name + ".foo", maastesting.packages_expr) def warn(self, message, category): warn_explicit( message, category=category, filename=factory.make_name("file"), lineno=randint(1, 1000), module=self.package_name) def assertWarningsEnabled(self, category): message = "%s from %s" % (category.__name__, self.package_name) with catch_warnings(record=True) as log: self.warn(message, category=category) self.assertThat(log, MatchesListwise([ MatchesStructure( message=MatchesAll( IsInstance(category), MatchesStructure.byEquality(args=(message,)), ), category=Equals(category), ), ])) def test_BytesWarning_enabled(self): self.assertRaises( BytesWarning, self.warn, factory.make_name("message"), category=BytesWarning) def test_DeprecationWarning_enabled(self): self.assertWarningsEnabled(DeprecationWarning) def test_ImportWarning_enabled(self): self.assertWarningsEnabled(ImportWarning) maas-1.9.5+bzr4599.orig/src/maastesting/tests/test_runtest.py0000644000000000000000000000267113056115004022246 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maastesting.runtest`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maastesting.matchers import DocTestMatches from maastesting.runtest import ( MAASRunTest, MAASTwistedRunTest, ) from maastesting.testcase import MAASTestCase from testtools import TestCase from testtools.matchers import ( HasLength, Is, MatchesListwise, ) class TestExecutors(MAASTestCase): """Tests for `MAASRunTest` and `MAASTwistedRunTest`.""" scenarios = ( ("MAASRunTest", {"executor": MAASRunTest}), ("MAASTwistedRunTest", {"executor": MAASTwistedRunTest}), ) def test_catches_generator_tests(self): class BrokenTests(TestCase): run_tests_with = self.executor def test(self): yield None test = BrokenTests("test") result = test.run() self.assertThat(result.errors, HasLength(1)) self.assertThat(result.errors[0], MatchesListwise(( Is(test), DocTestMatches( """... InvalidTest: Test returned a generator. Should it be decorated with inlineCallbacks? """ ), ))) maas-1.9.5+bzr4599.orig/src/maastesting/tests/test_scenarios.py0000644000000000000000000000337413056115004022531 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maastesting.scenarios`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import unittest from maastesting.scenarios import WithScenarios from maastesting.testcase import MAASTestCase class TestWithScenarios(MAASTestCase): def test_scenarios_applied(self): # Scenarios are applied correctly when a test is called via __call__() # instead of run(). events = [] class Test(WithScenarios, unittest.TestCase): scenarios = [ ("one", dict(token="one")), ("two", dict(token="two")), ] def test(self): events.append(self.token) test = Test("test") test.__call__() self.assertEqual(["one", "two"], events) def test_scenarios_applied_by_call(self): # Scenarios are applied by __call__() when it is called first, and not # by run(). events = [] class Test(WithScenarios, unittest.TestCase): scenarios = [ ("one", dict(token="one")), ("two", dict(token="two")), ] def test(self): events.append(self.token) def run(self, result=None): # Call-up right past WithScenarios.run() to show that it is # not responsible for applying scenarios, and __call__() is. super(WithScenarios, self).run(result) test = Test("test") test.__call__() self.assertEqual(["one", "two"], events) maas-1.9.5+bzr4599.orig/src/maastesting/tests/test_scss.py0000644000000000000000000000422313056115004021510 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Check the current generated css matches generated css.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import os from pipes import quote from subprocess import ( PIPE, Popen, STDOUT, ) from maastesting import root from maastesting.testcase import MAASTestCase from testtools.content import ( Content, UTF8_TEXT, ) class TestCompiledSCSS(MAASTestCase): def execute(self, *command): process = Popen(command, stdout=PIPE, stderr=STDOUT, cwd=root) output, _ = process.communicate() if len(output) != 0: name = "stdout/err from `%s`" % " ".join(map(quote, command)) self.addDetail(name, Content(UTF8_TEXT, lambda: [output])) self.assertEqual(0, process.wait(), "failed to compile css.") def read_content(self, filename): with open(filename, "rb") as stream: return stream.read() def test_css_up_to_date(self): """ In-tree compiled CSS must match SCSS compilation. """ in_tree_css_path = os.path.join( root, "src", "maasserver", "static", "css", "maas-styles.css") self.assertIs( os.path.exists(in_tree_css_path), True, "maas-styles.css is missing.") # Compile the scss into css into a temp directory. output_dir = self.make_dir() self.execute( "bin/sass", "--include-path=src/maasserver/static/scss", "--output-style", "compressed", "src/maasserver/static/scss/maas-styles.scss", "-o", output_dir) # Content should be equal. Doesn't use assertEquals so the error # output doesn't contain the contents. in_tree_css = self.read_content(in_tree_css_path) tmp_css = self.read_content( os.path.join(output_dir, "maas-styles.css")) if in_tree_css != tmp_css: self.fail("maas-styles.css is out-of-date. (run 'make styles')") maas-1.9.5+bzr4599.orig/src/maastesting/tests/test_testcase.py0000644000000000000000000001014213056115004022345 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `MAASTestCase`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import os.path from shutil import rmtree from tempfile import mkdtemp from maastesting.factory import factory from maastesting.matchers import ( IsCallableMock, MockCalledOnceWith, MockCallsMatch, ) from maastesting.testcase import MAASTestCase from mock import ( call, MagicMock, sentinel, ) import mock as mock_module from testtools.matchers import ( DirExists, FileExists, ) class TestTestCase(MAASTestCase): """Tests the base `MAASTestCase` facilities.""" def test_make_dir_creates_directory(self): self.assertThat(self.make_dir(), DirExists()) def test_make_dir_creates_temporary_directory(self): other_temp_dir = mkdtemp() self.addCleanup(rmtree, other_temp_dir) other_temp_root, other_subdir = os.path.split(other_temp_dir) temp_root, subdir = os.path.split(self.make_dir()) self.assertEqual(other_temp_root, temp_root) self.assertNotIn(subdir, [b'', u'', None]) def test_make_dir_creates_one_directory_per_call(self): self.assertNotEqual(self.make_dir(), self.make_dir()) def test_make_file_creates_file(self): self.assertThat(self.make_file(), FileExists()) def test_make_file_uses_temporary_directory(self): directory = self.make_dir() self.patch(self, 'make_dir', lambda: directory) dir_part, file_part = os.path.split(self.make_file()) self.assertEqual(directory, dir_part) def test_patch_can_mock(self): # The patch method patches-in and returns a new MagicMock() instance # if no attribute value is given. attribute_name = factory.make_name("attribute") self.assertRaises(AttributeError, getattr, self, attribute_name) attribute = self.patch(self, attribute_name) self.assertIs(getattr(self, attribute_name), attribute) self.assertIsInstance(attribute, MagicMock) def method_to_be_patched(self, a, b): return sentinel.method_to_be_patched def test_patch_autospec_creates_autospec_from_target(self): # Grab a reference to this now. method_to_be_patched = self.method_to_be_patched # It's simpler to test that create_autospec has been called than it is # to test the result of calling it; mock does some clever things to do # what it does that make comparisons hard. create_autospec = self.patch(mock_module, "create_autospec") create_autospec.return_value = sentinel.autospec method_to_be_patched_autospec = self.patch_autospec( self, "method_to_be_patched", spec_set=sentinel.spec_set, instance=sentinel.instance) self.assertIs(sentinel.autospec, method_to_be_patched_autospec) self.assertIs(sentinel.autospec, self.method_to_be_patched) self.assertThat( create_autospec, MockCalledOnceWith( method_to_be_patched, sentinel.spec_set, sentinel.instance)) def test_patch_autospec_really_leaves_an_autospec_behind(self): self.patch_autospec(self, "method_to_be_patched") # The patched method is now a callable mock. self.assertThat(self.method_to_be_patched, IsCallableMock()) # The patched method can be called with positional or keyword # arguments. self.method_to_be_patched(1, 2) self.method_to_be_patched(3, b=4) self.method_to_be_patched(a=5, b=6) self.assertThat(self.method_to_be_patched, MockCallsMatch( call(1, 2), call(3, b=4), call(a=5, b=6))) # Calling the patched method with unrecognised arguments or not # enough arguments results in an exception. self.assertRaises(TypeError, self.method_to_be_patched, c=7) self.assertRaises(TypeError, self.method_to_be_patched, 8) self.assertRaises(TypeError, self.method_to_be_patched, b=9) maas-1.9.5+bzr4599.orig/src/maastesting/tests/test_utils.py0000644000000000000000000000162713056115004021702 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for testing helpers.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maastesting.testcase import MAASTestCase from maastesting.utils import extract_word_list class TestFunctions(MAASTestCase): def test_extract_word_list(self): expected = { "one 2": ["one", "2"], ", one ; 2": ["one", "2"], "one,2": ["one", "2"], "one;2": ["one", "2"], "\none\t 2;": ["one", "2"], "\none-two\t 3;": ["one-two", "3"], } observed = { string: extract_word_list(string) for string in expected } self.assertEqual(expected, observed) maas-1.9.5+bzr4599.orig/src/maastesting/tests/test_yui3.py0000644000000000000000000001050313056115004021424 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `maasserver.testing.yui3`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maastesting.testcase import MAASTestCase from maastesting.yui3 import ( extract_tests, gen_failed_test_messages, get_failed_tests_message, ) from nose.tools import nottest # Nose is over-zealous. nottest(extract_tests) nottest(gen_failed_test_messages) nottest(get_failed_tests_message) # From http://yuilibrary.com/yui/docs/test/#testsuite-level-events example_results = { 'failed': 3, 'ignored': 0, 'name': 'Test Suite 0', 'passed': 3, 'testCase0': { 'failed': 1, 'ignored': 0, 'name': 'testCase0', 'passed': 1, 'test0': { 'message': 'Test passed.', 'name': 'test0', 'result': 'pass', 'type': 'test', }, 'test1': { 'message': 'Assertion failed.', 'name': 'test1', 'result': 'fail', 'type': 'test', }, 'total': 2, 'type': 'testcase', }, 'testCase1': { 'failed': 1, 'ignored': 0, 'name': 'testCase1', 'passed': 1, 'test0': { 'message': 'Test passed.', 'name': 'test0', 'result': 'pass', 'type': 'test', }, 'test1': { 'message': 'Assertion failed.', 'name': 'test1', 'result': 'fail', 'type': 'test', }, 'total': 2, 'type': 'testcase', }, 'testSuite0': { 'failed': 1, 'ignored': 0, 'name': 'testSuite0', 'passed': 1, 'testCase2': { 'failed': 1, 'ignored': 0, 'name': 'testCase2', 'passed': 1, 'test0': { 'message': 'Test passed.', 'name': 'test0', 'result': 'pass', 'type': 'test', }, 'test1': { 'message': 'Assertion failed.', 'name': 'test1', 'result': 'fail', 'type': 'test', }, 'total': 2, 'type': 'testcase'}, 'total': 2, 'type': 'testsuite'}, 'total': 6, 'type': 'testsuite', } class TestFunctions(MAASTestCase): def test_extract_tests_names(self): expected_names = { "testCase0.test0", "testCase0.test1", "testCase1.test0", "testCase1.test1", "testSuite0.testCase2.test0", "testSuite0.testCase2.test1", } observed_tests = extract_tests(example_results) observed_test_names = set(observed_tests) self.assertSetEqual(expected_names, observed_test_names) def test_extract_tests(self): expected_results = { "testCase0.test0": "pass", "testCase0.test1": "fail", "testCase1.test0": "pass", "testCase1.test1": "fail", "testSuite0.testCase2.test0": "pass", "testSuite0.testCase2.test1": "fail", } observed_results = { name: test["result"] for name, test in extract_tests(example_results).items() } self.assertDictEqual(expected_results, observed_results) def test_gen_failed_test_messages(self): expected_messages = { "testCase0.test1: Assertion failed.", "testCase1.test1: Assertion failed.", "testSuite0.testCase2.test1: Assertion failed.", } observed_messages = gen_failed_test_messages(example_results) self.assertSetEqual(expected_messages, set(observed_messages)) def test_get_failed_tests_message(self): expected_message = ( "testCase0.test1: Assertion failed." "\n\n" "testCase1.test1: Assertion failed." "\n\n" "testSuite0.testCase2.test1: Assertion failed." ) observed_message = get_failed_tests_message(example_results) self.assertEqual(expected_message, observed_message) maas-1.9.5+bzr4599.orig/src/metadataserver/__init__.py0000644000000000000000000000154313056115004020565 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Metadata service application.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'DefaultMeta', 'logger', ] import logging logger = logging.getLogger("metadataserver") class DefaultMeta: """Base class for model `Meta` classes in the metadataserver app. Each model in the models package outside of __init__.py needs a nested `Meta` class that defines `app_label`. Otherwise, South won't recognize the model and will fail to generate schema migrations for it. """ app_label = 'metadataserver' try: import maasfascist maasfascist # Silence lint. except ImportError: pass maas-1.9.5+bzr4599.orig/src/metadataserver/address.py0000644000000000000000000000657313056115004020463 0ustar 00000000000000# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Figure out server address for the maas_url setting.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'guess_server_host', ] from os import environ import re import socket from subprocess import check_output from metadataserver import logger from provisioningserver.utils.network import get_all_addresses_for_interface # fcntl operation as defined in . This is GNU/Linux-specific! SIOCGIFADDR = 0x8915 def get_command_output(*command_line): """Execute a command line, and return its output. Raises an exception if return value is nonzero. :param *command_line: Words for the command line. No shell expansions are performed. :type *command_line: Sequence of unicode. :return: Output from the command. :rtype: List of unicode, one per line. """ env = { variable: value for variable, value in environ.items() if not variable.startswith('LC_') } env.update({ 'LC_ALL': 'C', 'LANG': 'en_US.UTF-8', }) return check_output(command_line, env=env).splitlines() def find_default_interface(ip_route_output): """Find the network interface used for the system's default route. If no default is found, makes a guess. :param ip_route_output: Output lines from "ip route show" output. :type ip_route_output: Sequence of unicode. :return: unicode, or None. """ route_lines = list(ip_route_output) for line in route_lines: match = re.match('default\s+.*\sdev\s+([^\s]+)', line) if match is not None: return match.groups()[0] # Still nothing? Try the first recognizable interface in the list. for line in route_lines: match = re.match('\s*(?:\S+\s+)*dev\s+([^\s]+)', line) if match is not None: return match.groups()[0] return None def get_ip_address(interface): """Get the first IP address for a given network interface. :return: An `IPAddress` instance for the first IP address on the interface. If the interface has both IPv4 and IPv6 addresses, the v4 address will be preferred. Otherwise the returned address will be the first result of a sort on the set of addresses on the interface. """ try: # get_all_addresses_for_interface yields IPAddress instances. # When sorted, IPAddress guarantees that IPv4 addresses will # sort before IPv6, so we just return the first address that # we've found. all_addresses = sorted(get_all_addresses_for_interface(interface)) return all_addresses[0] except Exception as e: logger.warn( "Could not determine address for apparent default interface " "%s (%s)" % (interface, e)) return None def guess_server_host(): """Make a guess as to this server's IP address or hostname. :return: IP address or hostname. :rtype: unicode """ ip_route_output = get_command_output( '/bin/ip', '-oneline', 'route', 'show') interface = find_default_interface(ip_route_output) if interface is None: return socket.gethostname() else: return get_ip_address(interface) maas-1.9.5+bzr4599.orig/src/metadataserver/admin.py0000644000000000000000000000167713056115004020126 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Register models with Django. We used to do this directly from `models/__init__.py`, as a side effect of importing from that package, but it led to `AlreadyRegistered` errors when running some tests in isolation (even when the import only happened once). Django automatically discovers the `admin` module and ensures that models are only registered once. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from django.contrib import admin from django.db.models import ( get_app, get_models, ) # Register models in the admin site. When the DEBUG setting is enabled, the # webapp will serve an administrator UI at /admin. for model in get_models(get_app('metadataserver')): admin.site.register(model) maas-1.9.5+bzr4599.orig/src/metadataserver/api.py0000644000000000000000000007115013056115004017600 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Metadata API.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'AnonMetaDataHandler', 'CommissioningScriptsHandler', 'CurtinUserDataHandler', 'IndexHandler', 'MetaDataHandler', 'UserDataHandler', 'VersionIndexHandler', ] import base64 import bz2 import httplib import json from django.conf import settings from django.core.exceptions import PermissionDenied from django.http import HttpResponse from django.shortcuts import get_object_or_404 from maasserver.api.nodes import store_node_power_parameters from maasserver.api.support import ( operation, OperationsHandler, ) from maasserver.api.utils import ( extract_oauth_key, get_mandatory_param, ) from maasserver.enum import ( NODE_STATUS, NODE_STATUS_CHOICES_DICT, ) from maasserver.exceptions import ( MAASAPIBadRequest, MAASAPINotFound, NodeStateViolation, ) from maasserver.models import ( Interface, Node, SSHKey, SSLKey, ) from maasserver.models.event import Event from maasserver.models.tag import Tag from maasserver.populate_tags import populate_tags_for_single_node from maasserver.preseed import ( get_curtin_userdata, get_enlist_preseed, get_enlist_userdata, get_preseed, ) from maasserver.utils import find_nodegroup from maasserver.utils.orm import get_one from metadataserver import logger from metadataserver.enum import ( RESULT_TYPE, SIGNAL_STATUS, ) from metadataserver.fields import Bin from metadataserver.models import ( CommissioningScript, NodeKey, NodeResult, NodeUserData, ) from metadataserver.models.commissioningscript import ( BUILTIN_COMMISSIONING_SCRIPTS, ) from metadataserver.user_data import poweroff from piston.utils import rc from provisioningserver.events import ( EVENT_DETAILS, EVENT_TYPES, ) class UnknownMetadataVersion(MAASAPINotFound): """Not a known metadata version.""" class UnknownNode(MAASAPINotFound): """Not a known node.""" def get_node_for_request(request): """Return the `Node` that `request` queries metadata for. For this form of access, a node can only query its own metadata. Thus the oauth key used to authenticate the request must belong to the same node that is being queried. Any request that is not made by an authenticated node will be denied. """ key = extract_oauth_key(request) try: return NodeKey.objects.get_node_for_key(key) except NodeKey.DoesNotExist: raise PermissionDenied("Not authenticated as a known node.") def get_node_for_mac(mac): """Identify node being queried based on its MAC address. This form of access is a security hazard, and thus it is permitted only on development systems where ALLOW_UNSAFE_METADATA_ACCESS is enabled. """ if not settings.ALLOW_UNSAFE_METADATA_ACCESS: raise PermissionDenied( "Unauthenticated metadata access is not allowed on this MAAS.") match = get_one(Interface.objects.filter(mac_address=mac)) if match is None: raise MAASAPINotFound() return match.node def get_queried_node(request, for_mac=None): """Identify and authorize the node whose metadata is being queried. :param request: HTTP request. In normal usage, this is authenticated with an oauth key; the key maps to the querying node, and the querying node always queries itself. :param for_mac: Optional MAC address for the node being queried. If this is given, and anonymous metadata access is enabled (do in development environments only!) then the node is looked up by its MAC address. :return: The :class:`Node` whose metadata is being queried. """ if for_mac is None: # Identify node, and authorize access, by oauth key. return get_node_for_request(request) else: # Access keyed by MAC address. return get_node_for_mac(for_mac) def make_text_response(contents): """Create a response containing `contents` as plain text.""" return HttpResponse(contents, mimetype='text/plain') def make_list_response(items): """Create an `HttpResponse` listing `items`, one per line.""" return make_text_response('\n'.join(items)) def check_version(version): """Check that `version` is a supported metadata version.""" if version not in ('latest', '2012-03-01'): raise UnknownMetadataVersion("Unknown metadata version: %s" % version) def add_event_to_node_event_log(node, origin, action, description): """Add an entry to the node's event log.""" if node.status == NODE_STATUS.COMMISSIONING: type_name = EVENT_TYPES.NODE_COMMISSIONING_EVENT elif node.status == NODE_STATUS.DEPLOYING: type_name = EVENT_TYPES.NODE_INSTALL_EVENT else: type_name = EVENT_TYPES.NODE_STATUS_EVENT event_details = EVENT_DETAILS[type_name] return Event.objects.register_event_and_event_type( node.system_id, type_name, type_level=event_details.level, type_description=event_details.description, event_action=action, event_description="'%s' %s" % (origin, description)) class MetadataViewHandler(OperationsHandler): create = update = delete = None def read(self, request, mac=None): return make_list_response(sorted(self.fields)) class IndexHandler(MetadataViewHandler): """Top-level metadata listing.""" fields = ('latest', '2012-03-01') class StatusHandler(MetadataViewHandler): read = update = delete = None def create(self, request, system_id): """Receive and process a status message from a node. A node can call this to report progress of its commissioning/installation process to the metadata server. Calling this from a node that is not Allocated, Commissioning, Ready, or Failed Tests will update the substatus_message node attribute. Signaling completion more than once is not an error; all but the first successful call are ignored. This method accepts a single JSON-encoded object payload, described as follows. { "event_type": "finish", "origin": "curtin", "description": "Finished XYZ", "name": "cmd-install", "result": "SUCCESS", "files": [ { "name": "logs.tgz", "encoding": "base64", "content": "QXVnIDI1IDA3OjE3OjAxIG1hYXMtZGV2... }, { "name": "results.log", "compression": "bzip2" "encoding": "base64", "content": "AAAAAAAAAAAAAAAAAAAAAAA... } ] } `event_type` can be "start", "progress" or "finish". `origin` tells us the program that originated the call. `description` is a human-readable, operator-friendly string that conveys what is being done to the node and that can be presented on the web UI. `name` is the name of the activity that's being executed. It's meaningful to the calling program and is a slash-separated path. We are mainly concerned with top-level events (no slashes), which are used to change the status of the node. `result` can be "SUCCESS" or "FAILURE" indicating whether the activity was successful or not. `files`, when present, contains one or more files. The attribute `path` tells us the name of the file, `compression` tells the compression we used before applying the `encoding` and content is the encoded data from the file. If the file being sent is the result of the execution of a script, the `result` key will hold its value. If `result` is not sent, it is interpreted as zero. """ def _retrieve_content(compression, encoding, content): """Extract the content of the sent file.""" # Select the appropriate decompressor. if compression is None: decompress = lambda s: s elif compression == 'bzip2': decompress = bz2.decompress else: raise MAASAPIBadRequest( 'Invalid compression: %s' % sent_file['compression']) # Select the appropriate decoder. if encoding == 'base64': decode = base64.decodestring else: raise MAASAPIBadRequest( 'Invalid encoding: %s' % sent_file['encoding']) return decompress(decode(sent_file['content'])) def _save_commissioning_result(node, path, exit_status, content): # Depending on the name of the file received, we need to invoke a # function to process it. if sent_file['path'] in BUILTIN_COMMISSIONING_SCRIPTS: postprocess_hook = BUILTIN_COMMISSIONING_SCRIPTS[path]['hook'] postprocess_hook( node=node, output=content, exit_status=exit_status) return NodeResult.objects.store_data( node, path, script_result=exit_status, result_type=RESULT_TYPE.COMMISSIONING, data=Bin(content)) def _save_installation_result(node, path, content): return NodeResult.objects.store_data( node, path, script_result=0, result_type=RESULT_TYPE.INSTALLATION, data=Bin(content)) def _is_top_level(activity_name): """Top-level events do not have slashes in theit names.""" return '/' not in activity_name node = get_queried_node(request) payload = request.read() try: message = json.loads(payload) except ValueError: message = "Status payload is not valid JSON:\n%s\n\n" % payload logger.error(message) raise MAASAPIBadRequest(message) # Mandatory attributes. try: event_type = message['event_type'] origin = message['origin'] activity_name = message['name'] description = message['description'] except KeyError: message = 'Missing parameter in status message %s' % payload logger.error(message) raise MAASAPIBadRequest(message) # Optional attributes. result = message.get('result') # Add this event to the node event log. add_event_to_node_event_log(node, origin, activity_name, description) # Save attached files, if any. for sent_file in message.get('files', []): content = _retrieve_content( compression=sent_file.get('compression'), encoding=sent_file['encoding'], content=sent_file['content']) # Set the result type according to the node's status. if node.status == NODE_STATUS.COMMISSIONING: _save_commissioning_result( node, sent_file['path'], sent_file.get('result', 0), content) elif node.status == NODE_STATUS.DEPLOYING: _save_installation_result(node, sent_file['path'], content) else: raise MAASAPIBadRequest( "Invalid status for saving files: %d" % node.status) # At the end of a top-level event, we change the node status. if _is_top_level(activity_name) and event_type == 'finish': if node.status == NODE_STATUS.COMMISSIONING: # Ensure that any IP lease are forcefully released in case # the host didn't bother doing that. node.release_leases() node.stop_transition_monitor() if result == 'SUCCESS': # Recalculate tags. populate_tags_for_single_node(Tag.objects.all(), node) # Setup the default storage layout and the initial # networking configuration for the node. node.set_default_storage_layout() node.set_initial_networking_configuration() elif result in ['FAIL', 'FAILURE']: node.status = NODE_STATUS.FAILED_COMMISSIONING elif node.status == NODE_STATUS.DEPLOYING: if result in ['FAIL', 'FAILURE']: node.mark_failed( None, "Installation failed (refer to the " "installation log for more information).") elif node.status == NODE_STATUS.DISK_ERASING: if result == 'SUCCESS': # disk erasing complete, release node. node.release() elif result in ['FAIL', 'FAILURE']: node.mark_failed(None, "Failed to erase disks.") # Deallocate the node if we enter any terminal state. if node.status in [ NODE_STATUS.READY, NODE_STATUS.FAILED_COMMISSIONING, NODE_STATUS.FAILED_DISK_ERASING]: node.owner = None node.error = 'failed: %s' % description node.save() return rc.ALL_OK class VersionIndexHandler(MetadataViewHandler): """Listing for a given metadata version.""" create = update = delete = None fields = ('maas-commissioning-scripts', 'meta-data', 'user-data') # States in which a node is allowed to signal # commissioning/installing status. # (Only in Commissioning/Deploying state, however, # will it have any effect.) signalable_states = [ NODE_STATUS.BROKEN, NODE_STATUS.COMMISSIONING, NODE_STATUS.FAILED_COMMISSIONING, NODE_STATUS.DEPLOYING, NODE_STATUS.FAILED_DEPLOYMENT, NODE_STATUS.READY, NODE_STATUS.DISK_ERASING, ] effective_signalable_states = [ NODE_STATUS.COMMISSIONING, NODE_STATUS.DEPLOYING, NODE_STATUS.DISK_ERASING, ] # Statuses that a commissioning node may signal, and the respective # state transitions that they trigger on the node. signaling_statuses = { SIGNAL_STATUS.OK: NODE_STATUS.READY, SIGNAL_STATUS.FAILED: NODE_STATUS.FAILED_COMMISSIONING, SIGNAL_STATUS.WORKING: None, } def read(self, request, version, mac=None): """Read the metadata index for this version.""" check_version(version) node = get_queried_node(request, for_mac=mac) if NodeUserData.objects.has_user_data(node): shown_fields = self.fields else: shown_fields = list(self.fields) shown_fields.remove('user-data') return make_list_response(sorted(shown_fields)) def _store_installation_results(self, node, request): """Store installation result file for `node`.""" for name, uploaded_file in request.FILES.items(): raw_content = uploaded_file.read() NodeResult.objects.store_data( node, name, script_result=0, result_type=RESULT_TYPE.INSTALLATION, data=Bin(raw_content)) def _store_commissioning_results(self, node, request): """Store commissioning result files for `node`.""" script_result = int(request.POST.get('script_result', 0)) for name, uploaded_file in request.FILES.items(): raw_content = uploaded_file.read() if name in BUILTIN_COMMISSIONING_SCRIPTS: postprocess_hook = BUILTIN_COMMISSIONING_SCRIPTS[name]['hook'] postprocess_hook( node=node, output=raw_content, exit_status=script_result) NodeResult.objects.store_data( node, name, script_result, result_type=RESULT_TYPE.COMMISSIONING, data=Bin(raw_content)) @operation(idempotent=False) def signal(self, request, version=None, mac=None): """Signal commissioning/installation status. A commissioning/installing node can call this to report progress of the commissioning/installation process to the metadata server. Calling this from a node that is not Allocated, Commissioning, Ready, or Failed Tests is an error. Signaling completion more than once is not an error; all but the first successful call are ignored. :param status: A commissioning/installation status code. This can be "OK" (to signal that commissioning/installation has completed successfully), or "FAILED" (to signal failure), or "WORKING" (for progress reports). :param script_result: If this call uploads files, this parameter must be provided and will be stored as the return value for the script which produced these files. :param error: An optional error string. If given, this will be stored (overwriting any previous error string), and displayed in the MAAS UI. If not given, any previous error string will be cleared. """ node = get_queried_node(request, for_mac=mac) status = get_mandatory_param(request.POST, 'status') if node.status not in self.signalable_states: raise NodeStateViolation( "Node wasn't commissioning/installing (status is %s)" % NODE_STATUS_CHOICES_DICT[node.status]) # These statuses are acceptable for commissioning, disk erasing, # and deploying. if status not in self.signaling_statuses: raise MAASAPIBadRequest( "Unknown commissioning/installation status: '%s'" % status) if node.status not in self.effective_signalable_states: # If commissioning, it is already registered. Nothing to be done. # If it is installing, should be in deploying state. return rc.ALL_OK if node.status == NODE_STATUS.COMMISSIONING: # Ensure that any IP lease are forcefully released in case # the host didn't bother doing that. if status != SIGNAL_STATUS.WORKING: node.release_leases() # Store the commissioning results. self._store_commissioning_results(node, request) # Commissioning was successful setup the default storage layout # and the initial networking configuration for the node. if status == SIGNAL_STATUS.OK: node.set_default_storage_layout() node.set_initial_networking_configuration() # XXX 2014-10-21 newell, bug=1382075 # Auto detection for IPMI tries to save power parameters # for Moonshot. This causes issues if the node's power type # is already MSCM as it uses SSH instead of IPMI. This fix # is temporary as power parameters should not be overwritten # during commissioning because MAAS already has knowledge to # boot the node. # See MP discussion bug=1389808, for further details on why # we are using bug fix 1382075 here. if node.power_type != "mscm": store_node_power_parameters(node, request) node.stop_transition_monitor() target_status = self.signaling_statuses.get(status) # Recalculate tags when commissioning ends. if target_status == NODE_STATUS.READY: populate_tags_for_single_node(Tag.objects.all(), node) elif node.status == NODE_STATUS.DEPLOYING: self._store_installation_results(node, request) if status == SIGNAL_STATUS.FAILED: node.mark_failed( None, "Installation failed (refer to the " "installation log for more information).") target_status = None elif node.status == NODE_STATUS.DISK_ERASING: if status == SIGNAL_STATUS.OK: # disk erasing complete, release node node.release() elif status == SIGNAL_STATUS.FAILED: node.mark_failed(None, "Failed to erase disks.") target_status = None if target_status in (None, node.status): # No status change. Nothing to be done. return rc.ALL_OK node.status = target_status # When moving to a terminal state, remove the allocation. node.owner = None node.error = request.POST.get('error', '') # Done. node.save() return rc.ALL_OK @operation(idempotent=False) def netboot_off(self, request, version=None, mac=None): """Turn off netboot on the node. A deploying node can call this to turn off netbooting when it finishes installing itself. """ node = get_queried_node(request, for_mac=mac) node.set_netboot(False) return rc.ALL_OK @operation(idempotent=False) def netboot_on(self, request, version=None, mac=None): """Turn on netboot on the node.""" node = get_queried_node(request, for_mac=mac) node.set_netboot(True) return rc.ALL_OK class MetaDataHandler(VersionIndexHandler): """Meta-data listing for a given version.""" fields = ('instance-id', 'local-hostname', 'public-keys', 'x509') def get_attribute_producer(self, item): """Return a callable to deliver a given metadata item. :param item: Sub-path for the attribute, e.g. "local-hostname" to get a handler that returns the logged-in node's hostname. :type item: unicode :return: A callable that accepts as arguments the logged-in node; the requested metadata version (e.g. "latest"); and `item`. It returns an HttpResponse. :rtype: Callable """ field = item.split('/')[0] if field not in self.fields: raise MAASAPINotFound("Unknown metadata attribute: %s" % field) producers = { 'local-hostname': self.local_hostname, 'instance-id': self.instance_id, 'public-keys': self.public_keys, 'x509': self.ssl_certs, } return producers[field] def read(self, request, version, mac=None, item=None): check_version(version) node = get_queried_node(request, for_mac=mac) # Requesting the list of attributes, not any particular # attribute. if item is None or len(item) == 0: fields = list(self.fields) commissioning_without_ssh = ( node.status == NODE_STATUS.COMMISSIONING and not node.enable_ssh) # Add public-keys to the list of attributes, if the # node has registered SSH keys. keys = SSHKey.objects.get_keys_for_user(user=node.owner) if not keys or commissioning_without_ssh: fields.remove('public-keys') return make_list_response(sorted(fields)) producer = self.get_attribute_producer(item) return producer(node, version, item) def local_hostname(self, node, version, item): """Produce local-hostname attribute.""" return make_text_response(node.fqdn) def instance_id(self, node, version, item): """Produce instance-id attribute.""" return make_text_response(node.system_id) def public_keys(self, node, version, item): """ Produce public-keys attribute.""" return make_list_response( SSHKey.objects.get_keys_for_user(user=node.owner)) def ssl_certs(self, node, version, item): """ Produce x509 certs attribute. """ return make_list_response( SSLKey.objects.get_keys_for_user(user=node.owner)) class UserDataHandler(MetadataViewHandler): """User-data blob for a given version.""" def read(self, request, version, mac=None): check_version(version) node = get_queried_node(request, for_mac=mac) try: # When a node is deploying, cloud-init's request # for user-data is when MAAS hands the node # off to a user. if node.status == NODE_STATUS.DEPLOYING: node.end_deployment() # If this node is supposed to be powered off, serve the # 'poweroff' userdata. if node.get_boot_purpose() == 'poweroff': user_data = poweroff.generate_user_data(node=node) else: user_data = NodeUserData.objects.get_user_data(node) return HttpResponse( user_data, mimetype='application/octet-stream') except NodeUserData.DoesNotExist: logger.info( "No user data registered for node named %s" % node.hostname) return HttpResponse(status=httplib.NOT_FOUND) class CurtinUserDataHandler(MetadataViewHandler): """Curtin user-data blob for a given version.""" def read(self, request, version, mac=None): check_version(version) node = get_queried_node(request, for_mac=mac) user_data = get_curtin_userdata(node) return HttpResponse( user_data, mimetype='application/octet-stream') class CommissioningScriptsHandler(MetadataViewHandler): """Return a tar archive containing the commissioning scripts.""" def read(self, request, version, mac=None): check_version(version) return HttpResponse( CommissioningScript.objects.get_archive(), mimetype='application/tar') class EnlistMetaDataHandler(OperationsHandler): """this has to handle the 'meta-data' portion of the meta-data api for enlistment only. It should mimic the read-only portion of /VersionIndexHandler""" create = update = delete = None data = { 'instance-id': 'i-maas-enlistment', 'local-hostname': "maas-enlisting-node", 'public-keys': "", } def read(self, request, version, item=None): check_version(version) # Requesting the list of attributes, not any particular attribute. if item is None or len(item) == 0: keys = sorted(self.data.keys()) # There's nothing in public-keys, so we don't advertise it. # But cloud-init does ask for it and it's not worth logging # a traceback for. keys.remove('public-keys') return make_list_response(keys) if item not in self.data: raise MAASAPINotFound("Unknown metadata attribute: %s" % item) return make_text_response(self.data[item]) class EnlistUserDataHandler(OperationsHandler): """User-data for the enlistment environment""" def read(self, request, version): check_version(version) nodegroup = find_nodegroup(request) return HttpResponse( get_enlist_userdata(nodegroup=nodegroup), mimetype="text/plain") class EnlistVersionIndexHandler(OperationsHandler): create = update = delete = None fields = ('meta-data', 'user-data') def read(self, request, version): return make_list_response(sorted(self.fields)) class AnonMetaDataHandler(VersionIndexHandler): """Anonymous metadata.""" @operation(idempotent=True) def get_enlist_preseed(self, request, version=None): """Render and return a preseed script for enlistment.""" nodegroup = find_nodegroup(request) return HttpResponse( get_enlist_preseed(nodegroup=nodegroup), mimetype="text/plain") @operation(idempotent=True) def get_preseed(self, request, version=None, system_id=None): """Render and return a preseed script for the given node.""" node = get_object_or_404(Node, system_id=system_id) return HttpResponse(get_preseed(node), mimetype="text/plain") @operation(idempotent=False) def netboot_off(self, request, version=None, system_id=None): """Turn off netboot on the node. A commissioning node can call this to turn off netbooting when it finishes installing itself. """ node = get_object_or_404(Node, system_id=system_id) node.set_netboot(False) # Build and register an event for "node installation finished". # This is a best-guess. At the moment, netboot_off() only gets # called when the node has finished installing, so it's an # accurate predictor of the end of the install process. type_name = EVENT_TYPES.NODE_INSTALLATION_FINISHED event_details = EVENT_DETAILS[type_name] Event.objects.register_event_and_event_type( node.system_id, type_name, type_level=event_details.level, type_description=event_details.description, event_description="Node disabled netboot") return rc.ALL_OK maas-1.9.5+bzr4599.orig/src/metadataserver/enum.py0000644000000000000000000000156513056115004017776 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Enumerations meaningful to the metadataserver application.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'SIGNAL_STATUS', 'SIGNAL_STATUS_CHOICES', 'RESULT_TYPE', 'RESULT_TYPE_CHOICES', ] class SIGNAL_STATUS: DEFAULT = "OK" OK = "OK" FAILED = "FAILED" WORKING = "WORKING" SIGNAL_STATUS_CHOICES = ( (SIGNAL_STATUS.OK, "OK"), (SIGNAL_STATUS.FAILED, "FAILED"), (SIGNAL_STATUS.WORKING, "WORKING"), ) class RESULT_TYPE: COMMISSIONING = 0 INSTALLATION = 1 RESULT_TYPE_CHOICES = ( (RESULT_TYPE.COMMISSIONING, "Commissioning"), (RESULT_TYPE.INSTALLATION, "Installation"), ) maas-1.9.5+bzr4599.orig/src/metadataserver/fields.py0000644000000000000000000001270013056115004020271 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Custom field types for the metadata server.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'BinaryField', ] from base64 import ( b64decode, b64encode, ) from django.db import connection from django.db.models import ( Field, SubfieldBase, ) from south.modelsinspector import add_introspection_rules class Bin(bytes): """Wrapper class to convince django that a string is really binary. This is really just a "bytes," but gets around an idiosyncracy of Django custom field conversions: they must be able to tell on the fly whether a value was retrieved from the database (and needs to be converted to a python-side value), or whether it's already a python-side object (which can stay as it is). The line between bytes and unicode is dangerously thin. So, to store a value in a BinaryField, wrap it in a Bin: my_model_object.binary_data = Bin(b"\x01\x02\x03") """ def __new__(cls, initializer): """Wrap a bytes. :param initializer: Binary string of data for this Bin. This must be a bytes. Anything else is almost certainly a mistake, so e.g. this constructor will refuse to render None as b'None'. :type initializer: bytes """ # We can't do this in __init__, because it passes its argument into # the upcall. It ends up in object.__init__, which sometimes issues # a DeprecationWarning because it doesn't want any arguments. # Those warnings would sometimes make their way into logs, breaking # tests that checked those logs. if not isinstance(initializer, bytes): raise AssertionError( "Not a binary string: '%s'" % repr(initializer)) return super(Bin, cls).__new__(cls, initializer) def __emittable__(self): """Emit base-64 encoded bytes. Exists as a hook for Piston's JSON encoder. """ return b64encode(self) # The BinaryField does not introduce any new parameters compared to its # parent's constructor so South will handle it just fine. # See http://south.aeracode.org/docs/customfields.html#extending-introspection # for details. add_introspection_rules([], ["^metadataserver\.fields\.BinaryField"]) class BinaryField(Field): """A field that stores binary data. The data is base64-encoded internally, so this is not very efficient. Do not use this for large blobs. We do not have direct support for binary data in django at the moment. It's possible to create a django model Field based by a postgres BYTEA, but: 1. Any data you save gets mis-interpreted as encoded text. This won't be obvious until you test with data that can't be decoded. 2. Any data you retrieve gets truncated at the first zero byte. """ __metaclass__ = SubfieldBase def to_python(self, value): """Django overridable: convert database value to python-side value.""" if isinstance(value, unicode): # Encoded binary data from the database. Convert. return Bin(b64decode(value)) elif value is None or isinstance(value, Bin): # Already in python-side form. return value else: raise AssertionError( "Invalid BinaryField value (expected unicode): '%s'" % repr(value)) def get_db_prep_value(self, value, connection=None, prepared=False): """Django overridable: convert python-side value to database value.""" if value is None: # Equivalent of a NULL. return None elif isinstance(value, Bin): # Python-side form. Convert to database form. return b64encode(value) elif isinstance(value, bytes): # Binary string. Require a Bin to make intent explicit. raise AssertionError( "Converting a binary string to BinaryField: " "either conversion is going the wrong way, or the value " "needs to be wrapped in a Bin.") elif isinstance(value, unicode): # Unicode here is almost certainly a sign of a mistake. raise AssertionError( "A unicode string is being mistaken for binary data.") else: raise AssertionError( "Invalid BinaryField value (expected Bin): '%s'" % repr(value)) def get_internal_type(self): return 'TextField' def _get_default(self): """Cargo-cult of Django's `Field.get_default`. Django is totally smoking crack on this one. It forces a unicode string out of the default which is demonstrably not unicode. This corrects that behaviour. """ if self.has_default(): if callable(self.default): return self.default() return self.default if not self.empty_strings_allowed: return None if self.null: if not connection.features.interprets_empty_strings_as_nulls: return None return b"" def get_default(self): """Override Django's crack-smoking ``Field.get_default``.""" default = self._get_default() return None if default is None else Bin(default) maas-1.9.5+bzr4599.orig/src/metadataserver/fixtures/0000755000000000000000000000000013056115004020322 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/metadataserver/middleware.py0000644000000000000000000000117013056115004021137 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Django "middlewares" for the metadata API.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'MetadataErrorsMiddleware', ] from django.conf import settings from maasserver.middleware import ExceptionMiddleware class MetadataErrorsMiddleware(ExceptionMiddleware): """Report exceptions from the metadata app as HTTP responses.""" path_regex = settings.METADATA_URL_REGEXP maas-1.9.5+bzr4599.orig/src/metadataserver/migrations/0000755000000000000000000000000013056115004020625 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/metadataserver/models/0000755000000000000000000000000013056115004017734 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/metadataserver/nodeinituser.py0000644000000000000000000000136313056115004021536 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """User management for nodes' access to the metadata service.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'get_node_init_user', 'user_name', ] from django.contrib.auth.models import User user_name = 'maas-init-node' # Cached, shared reference to this special user. Keep internal to this # module. node_init_user = None def get_node_init_user(): global node_init_user if node_init_user is None: node_init_user = User.objects.get(username=user_name) return node_init_user maas-1.9.5+bzr4599.orig/src/metadataserver/tests/0000755000000000000000000000000013056115004017613 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/metadataserver/urls.py0000644000000000000000000001477513056115004020026 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Metadata API URLs.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'urlpatterns', ] from django.conf.urls import ( patterns, url, ) from maasserver.api.auth import api_auth from maasserver.api.support import OperationsResource from metadataserver.api import ( AnonMetaDataHandler, CommissioningScriptsHandler, CurtinUserDataHandler, EnlistMetaDataHandler, EnlistUserDataHandler, EnlistVersionIndexHandler, IndexHandler, MetaDataHandler, StatusHandler, UserDataHandler, VersionIndexHandler, ) # Handlers for nodes requesting their own metadata. meta_data_handler = OperationsResource( MetaDataHandler, authentication=api_auth) user_data_handler = OperationsResource( UserDataHandler, authentication=api_auth) curtin_user_data_handler = OperationsResource( CurtinUserDataHandler, authentication=api_auth) version_index_handler = OperationsResource( VersionIndexHandler, authentication=api_auth) index_handler = OperationsResource( IndexHandler, authentication=api_auth) commissioning_scripts_handler = OperationsResource( CommissioningScriptsHandler, authentication=api_auth) # Handlers for status reporting status_handler = OperationsResource(StatusHandler, authentication=api_auth) # Handlers for anonymous metadata operations. meta_data_anon_handler = OperationsResource(AnonMetaDataHandler) # Handlers for UNSAFE anonymous random metadata access. meta_data_by_mac_handler = OperationsResource(MetaDataHandler) user_data_by_mac_handler = OperationsResource(UserDataHandler) version_index_by_mac_handler = OperationsResource(VersionIndexHandler) # Handlers for the anonymous enlistment metadata service enlist_meta_data_handler = OperationsResource(EnlistMetaDataHandler) enlist_user_data_handler = OperationsResource(EnlistUserDataHandler) enlist_index_handler = OperationsResource(IndexHandler) enlist_version_index_handler = OperationsResource(EnlistVersionIndexHandler) # Normal metadata access, available to a node querying its own metadata. # # The URL patterns must tolerate redundant leading slashes, because # cloud-init tends to add these. node_patterns = patterns( '', # The webhook-style status reporting handler. url( r'^status/(?P[\w\-]+)$', status_handler, name='metadata-status'), url( r'^/*(?P[^/]+)/meta-data/(?P.*)$', meta_data_handler, name='metadata-meta-data'), url( r'^/*(?P[^/]+)/user-data$', user_data_handler, name='metadata-user-data'), # Commissioning scripts. This is a blatant MAAS extension to the # metadata API, hence the "maas-" prefix. # Scripts are returned as a tar arhive, but the format is not # reflected in the http filename. The response's MIME type is # definitive. We may yet choose to compress the file, without # changing its name on the API. url( r'^/*(?P[^/]+)/maas-commissioning-scripts', commissioning_scripts_handler, name='commissioning-scripts'), url( r'^/*(?P[^/]+)/', version_index_handler, name='metadata-version'), url( r'^/*', index_handler, name='metadata'), ) # The curtin-specific metadata API. Only the user-data end-point is # really curtin-specific, all the other end-points are similar to the # normal metadata API. curtin_patterns = patterns( '', url( r'^/*curtin/(?P[^/]+)/meta-data/(?P.*)$', meta_data_handler, name='curtin-metadata-meta-data'), url( r'^/*curtin/(?P[^/]+)/user-data$', curtin_user_data_handler, name='curtin-metadata-user-data'), url( r'^/*curtin/(?P[^/]+)/', version_index_handler, name='curtin-metadata-version'), url( r'^/*curtin[/]*$', index_handler, name='curtin-metadata'), ) # Anonymous random metadata access, keyed by system ID. These serve requests # from the nodes which happen when the environment is so minimal that proper # authenticated calls are not possible. by_id_patterns = patterns( '', # XXX: rvb 2012-06-20 bug=1015559: This method is accessible # without authentication. This is a security threat. url( # could-init adds additional slashes in front of urls. r'^/*(?P[^/]+)/by-id/(?P[\w\-]+)/$', meta_data_anon_handler, name='metadata-node-by-id'), url( # cloud-init adds additional slashes in front of urls. r'^/*(?P[^/]+)/enlist-preseed/$', meta_data_anon_handler, name='metadata-enlist-preseed'), ) # UNSAFE anonymous random metadata access, keyed by MAC address. These won't # work unless ALLOW_UNSAFE_METADATA_ACCESS is enabled, which you should never # do on a production MAAS. by_mac_patterns = patterns( '', url( # could-init adds additional slashes in front of urls. r'^/*(?P[^/]+)/by-mac/(?P[^/]+)/meta-data/(?P.*)$', meta_data_by_mac_handler, name='metadata-meta-data-by-mac'), url( # could-init adds additional slashes in front of urls. r'^/*(?P[^/]+)/by-mac/(?P[^/]+)/user-data$', user_data_by_mac_handler, name='metadata-user-data-by-mac'), url( # could-init adds additional slashes in front of urls. r'^/*(?P[^/]+)/by-mac/(?P[^/]+)/', version_index_by_mac_handler, name='metadata-version-by-mac'), ) # Anonymous enlistment entry point enlist_metadata_patterns = patterns( '', url( r'^/*enlist/(?P[^/]+)/meta-data/(?P.*)$', enlist_meta_data_handler, name='enlist-metadata-meta-data'), url( r'^/*enlist/(?P[^/]+)/user-data$', enlist_user_data_handler, name='enlist-metadata-user-data'), url( r'^/*enlist/(?P[^/]+)[/]*$', enlist_version_index_handler, name='enlist-version'), url(r'^/*enlist[/]*$', enlist_index_handler, name='enlist'), ) # URL patterns. The anonymous patterns are listed first because they're # so recognizable: there's no chance of a regular metadata access being # mistaken for one of these based on URL pattern match. urlpatterns = ( enlist_metadata_patterns + by_id_patterns + by_mac_patterns + curtin_patterns + node_patterns) maas-1.9.5+bzr4599.orig/src/metadataserver/user_data/0000755000000000000000000000000013056115004020420 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/metadataserver/views.py0000644000000000000000000000003213056115004020153 0ustar 00000000000000# Create your views here. maas-1.9.5+bzr4599.orig/src/metadataserver/fixtures/initial_data.yaml0000644000000000000000000000043513056115004023632 0ustar 00000000000000- model: auth.user pk: 1 fields: username: 'maas-init-node' first_name: 'Node initializer' last_name: 'Special user' email: '' password: '!' is_staff: false is_active: false is_superuser: false last_login: 2012-02-16 date_joined: 2012-02-16 maas-1.9.5+bzr4599.orig/src/metadataserver/migrations/0001_initial.py0000644000000000000000000002177613056115004023305 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Initial metadataserver migration.""" from __future__ import ( absolute_import, print_function, # This breaks South. #unicode_literals, ) str = None __metaclass__ = type __all__ = [] # flake8: noqa # SKIP this file when reformatting. # The rest of this file was generated by South. # encoding: utf-8 import datetime from django.db import models from south.db import db from south.v2 import SchemaMigration class Migration(SchemaMigration): depends_on = ( ("maasserver", "0059_dhcp_detection_model"), ) def forwards(self, orm): # Adding model 'NodeKey' db.create_table('metadataserver_nodekey', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('node', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['maasserver.Node'], unique=True)), ('token', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['piston.Token'], unique=True)), ('key', self.gf('django.db.models.fields.CharField')(unique=True, max_length=18)), )) db.send_create_signal('metadataserver', ['NodeKey']) # Adding model 'NodeUserData' db.create_table('metadataserver_nodeuserdata', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('node', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['maasserver.Node'], unique=True)), ('data', self.gf('metadataserver.fields.BinaryField')()), )) db.send_create_signal('metadataserver', ['NodeUserData']) def backwards(self, orm): # Deleting model 'NodeKey' db.delete_table('metadataserver_nodekey') # Deleting model 'NodeUserData' db.delete_table('metadataserver_nodeuserdata') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'maasserver.node': { 'Meta': {'object_name': 'Node'}, 'after_commissioning_action': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'architecture': ('django.db.models.fields.CharField', [], {'default': "u'i386'", 'max_length': '10'}), 'created': ('django.db.models.fields.DateField', [], {}), 'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'power_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '10', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '10'}), 'system_id': ('django.db.models.fields.CharField', [], {'default': "u'node-b5811680-6f3a-11e1-baa5-00219bd0a2de'", 'unique': 'True', 'max_length': '41'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, 'metadataserver.nodekey': { 'Meta': {'object_name': 'NodeKey'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['maasserver.Node']", 'unique': 'True'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'unique': 'True'}) }, 'metadataserver.nodeuserdata': { 'Meta': {'object_name': 'NodeUserData'}, 'data': ('metadataserver.fields.BinaryField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['maasserver.Node']", 'unique': 'True'}) }, 'piston.consumer': { 'Meta': {'object_name': 'Consumer'}, 'description': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '16'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'consumers'", 'null': 'True', 'to': "orm['auth.User']"}) }, 'piston.token': { 'Meta': {'object_name': 'Token'}, 'callback': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'callback_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Consumer']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'timestamp': ('django.db.models.fields.IntegerField', [], {'default': '1331883374L'}), 'token_type': ('django.db.models.fields.IntegerField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tokens'", 'null': 'True', 'to': "orm['auth.User']"}), 'verifier': ('django.db.models.fields.CharField', [], {'max_length': '10'}) } } complete_apps = ['metadataserver'] maas-1.9.5+bzr4599.orig/src/metadataserver/migrations/0002_add_nodecommissionresult.py0000644000000000000000000002226113056115004026740 0ustar 00000000000000# flake8: noqa # SKIP this file when reformatting. # The rest of this file was generated by South. # encoding: utf-8 import datetime from django.db import models from south.db import db from south.v2 import SchemaMigration class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'NodeCommissionResult' db.create_table('metadataserver_nodecommissionresult', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('node', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['maasserver.Node'])), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('data', self.gf('django.db.models.fields.CharField')(max_length=1048576)), )) db.send_create_signal('metadataserver', ['NodeCommissionResult']) # Adding unique constraint on 'NodeCommissionResult', fields ['node', 'name'] db.create_unique('metadataserver_nodecommissionresult', ['node_id', 'name']) def backwards(self, orm): # Removing unique constraint on 'NodeCommissionResult', fields ['node', 'name'] db.delete_unique('metadataserver_nodecommissionresult', ['node_id', 'name']) # Deleting model 'NodeCommissionResult' db.delete_table('metadataserver_nodecommissionresult') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'maasserver.node': { 'Meta': {'object_name': 'Node'}, 'after_commissioning_action': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'architecture': ('django.db.models.fields.CharField', [], {'default': "u'i386'", 'max_length': '10'}), 'created': ('django.db.models.fields.DateField', [], {}), 'error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'power_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '10', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '10'}), 'system_id': ('django.db.models.fields.CharField', [], {'default': "u'node-b9edb888-839c-11e1-965e-002215205ce8'", 'unique': 'True', 'max_length': '41'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'null': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, 'metadataserver.nodecommissionresult': { 'Meta': {'unique_together': "((u'node', u'name'),)", 'object_name': 'NodeCommissionResult'}, 'data': ('django.db.models.fields.CharField', [], {'max_length': '1048576'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['maasserver.Node']"}) }, 'metadataserver.nodekey': { 'Meta': {'object_name': 'NodeKey'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['maasserver.Node']", 'unique': 'True'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'unique': 'True'}) }, 'metadataserver.nodeuserdata': { 'Meta': {'object_name': 'NodeUserData'}, 'data': ('metadataserver.fields.BinaryField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['maasserver.Node']", 'unique': 'True'}) }, 'piston.consumer': { 'Meta': {'object_name': 'Consumer'}, 'description': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '16'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'consumers'", 'null': 'True', 'to': "orm['auth.User']"}) }, 'piston.token': { 'Meta': {'object_name': 'Token'}, 'callback': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'callback_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Consumer']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'timestamp': ('django.db.models.fields.IntegerField', [], {'default': '1334124495L'}), 'token_type': ('django.db.models.fields.IntegerField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tokens'", 'null': 'True', 'to': "orm['auth.User']"}), 'verifier': ('django.db.models.fields.CharField', [], {'max_length': '10'}) } } complete_apps = ['metadataserver'] maas-1.9.5+bzr4599.orig/src/metadataserver/migrations/0003_populate_hardware_details.py0000644000000000000000000003324113056115004027057 0ustar 00000000000000"""Migration for populating new Node fields related to constraints A shared helper function is used that both the real Node model in maasserver and the stubs used by south can work with. This saves writing the logic to populate cpu_count and memory in two places. On migration it is likely that tags do not exist yet. If for whatever reason previously stored lshw output is not well formed XML, the fields in Node are not populated. """ import math from django.core.exceptions import ValidationError from lxml import etree from south.v2 import DataMigration _xpath_processor_count = """\ count(//node[@id='core']/ node[@class='processor'][not(@disabled)]) """ # Some machines have a element in their memory with the total # amount of memory, and other machines declare the size of the memory in # individual memory banks. This expression is mean to cope with both. _xpath_memory_bytes = """\ sum(//node[@id='memory']/size[@units='bytes'] | //node[starts-with(@id, 'memory:')] /node[starts-with(@id, 'bank:')]/size[@units='bytes']) div 1024 div 1024 """ _xpath_storage_bytes = """\ sum(//node[starts-with(@id, 'volume:')]/size[@units='bytes']) div 1024 div 1024 """ def update_hardware_details(node, xmlbytes, tag_manager): """Set node hardware_details from lshw output and update related fields. This previously resided in `maasserver.models.node`, but has been copied into this migration so that it can be modified in its original location without breaking this migration. """ try: doc = etree.XML(xmlbytes) except etree.XMLSyntaxError as e: raise ValidationError( {'hardware_details': ['Invalid XML: %s' % (e,)]}) node.hardware_details = xmlbytes # Same document, many queries: use XPathEvaluator. evaluator = etree.XPathEvaluator(doc) cpu_count = evaluator(_xpath_processor_count) memory = evaluator(_xpath_memory_bytes) if not memory or math.isnan(memory): memory = 0 storage = evaluator(_xpath_storage_bytes) if not storage or math.isnan(storage): storage = 0 node.cpu_count = cpu_count or 0 node.memory = memory node.storage = storage for tag in tag_manager.all(): if not tag.definition: continue has_tag = evaluator(tag.definition) if has_tag: node.tags.add(tag) else: node.tags.remove(tag) node.save() class Migration(DataMigration): _lshw_name = "01-lshw.out" def forwards(self, orm): """Move lshw text blob in metadataserver to xml field in Node""" Tag = orm['maasserver.tag'] for commission_result in orm.NodeCommissionResult.objects.filter( name=self._lshw_name): node = commission_result.node data = commission_result.data try: update_hardware_details(node, data, Tag.objects) except ValidationError: pass else: node.save() def backwards(self, orm): """Move xml field in Node to lshw text blob in metadataserver""" Node = orm['maasserver.Node'] for node in Node.objects.all(): lshw_output = node.hardware_details if lshw_output: orm.NodeCommissionResult.objects.store_data( node, self._lshw_name, lshw_output) models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'maasserver.node': { 'Meta': {'object_name': 'Node'}, 'after_commissioning_action': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'architecture': ('django.db.models.fields.CharField', [], {'default': "u'i386/generic'", 'max_length': '31'}), 'cpu_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'distro_series': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '10', 'null': 'True', 'blank': 'True'}), 'error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'hardware_details': ('maasserver.fields.XMLField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'memory': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'netboot': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'nodegroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.NodeGroup']", 'null': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'power_parameters': ('maasserver.fields.JSONObjectField', [], {'default': "u''", 'blank': 'True'}), 'power_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '10', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '10'}), 'system_id': ('django.db.models.fields.CharField', [], {'default': "u'node-dc867442-0ca0-11e2-9ff0-fa163e46ecd0'", 'unique': 'True', 'max_length': '41'}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['maasserver.Tag']", 'symmetrical': 'False'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'null': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'maasserver.nodegroup': { 'Meta': {'object_name': 'NodeGroup'}, 'api_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'api_token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'unique': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'dhcp_key': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}), 'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'}) }, u'maasserver.tag': { 'Meta': {'object_name': 'Tag'}, 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'definition': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'metadataserver.nodecommissionresult': { 'Meta': {'unique_together': "((u'node', u'name'),)", 'object_name': 'NodeCommissionResult'}, 'data': ('django.db.models.fields.CharField', [], {'max_length': '1048576'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']"}) }, u'metadataserver.nodekey': { 'Meta': {'object_name': 'NodeKey'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'unique': 'True'}) }, u'metadataserver.nodeuserdata': { 'Meta': {'object_name': 'NodeUserData'}, 'data': ('metadataserver.fields.BinaryField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}) }, 'piston.consumer': { 'Meta': {'object_name': 'Consumer'}, 'description': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '16'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'consumers'", 'null': 'True', 'to': "orm['auth.User']"}) }, 'piston.token': { 'Meta': {'object_name': 'Token'}, 'callback': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'callback_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Consumer']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'timestamp': ('django.db.models.fields.IntegerField', [], {'default': '1349189580L'}), 'token_type': ('django.db.models.fields.IntegerField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tokens'", 'null': 'True', 'to': "orm['auth.User']"}), 'verifier': ('django.db.models.fields.CharField', [], {'max_length': '10'}) } } complete_apps = ['metadataserver'] symmetrical = True maas-1.9.5+bzr4599.orig/src/metadataserver/migrations/0004_add_commissioningscript.py0000644000000000000000000002727013056115004026565 0ustar 00000000000000# -*- coding: utf-8 -*- import datetime from django.db import models from south.db import db from south.v2 import SchemaMigration class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'CommissioningScript' db.create_table(u'metadataserver_commissioningscript', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)), ('content', self.gf('metadataserver.fields.BinaryField')()), )) db.send_create_signal(u'metadataserver', ['CommissioningScript']) def backwards(self, orm): # Deleting model 'CommissioningScript' db.delete_table(u'metadataserver_commissioningscript') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'maasserver.node': { 'Meta': {'object_name': 'Node'}, 'after_commissioning_action': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'architecture': ('django.db.models.fields.CharField', [], {'default': "u'i386/generic'", 'max_length': '31'}), 'cpu_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'distro_series': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '10', 'null': 'True', 'blank': 'True'}), 'error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'hardware_details': ('maasserver.fields.XMLField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'unique': 'True', 'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'memory': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'netboot': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'nodegroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.NodeGroup']", 'null': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'power_parameters': ('maasserver.fields.JSONObjectField', [], {'default': "u''", 'blank': 'True'}), 'power_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '10', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '10'}), 'system_id': ('django.db.models.fields.CharField', [], {'default': "u'node-9b26bf42-3249-11e2-a760-fa163e25f662'", 'unique': 'True', 'max_length': '41'}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['maasserver.Tag']", 'symmetrical': 'False'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'null': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'maasserver.nodegroup': { 'Meta': {'object_name': 'NodeGroup'}, 'api_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'api_token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'unique': 'True'}), 'cluster_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'dhcp_key': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}), 'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'}) }, u'maasserver.tag': { 'Meta': {'object_name': 'Tag'}, 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'kernel_opts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'metadataserver.commissioningscript': { 'Meta': {'object_name': 'CommissioningScript'}, 'content': ('metadataserver.fields.BinaryField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, u'metadataserver.nodecommissionresult': { 'Meta': {'unique_together': "((u'node', u'name'),)", 'object_name': 'NodeCommissionResult'}, 'data': ('django.db.models.fields.CharField', [], {'max_length': '1048576'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']"}) }, u'metadataserver.nodekey': { 'Meta': {'object_name': 'NodeKey'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'unique': 'True'}) }, u'metadataserver.nodeuserdata': { 'Meta': {'object_name': 'NodeUserData'}, 'data': ('metadataserver.fields.BinaryField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}) }, 'piston.consumer': { 'Meta': {'object_name': 'Consumer'}, 'description': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '16'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'consumers'", 'null': 'True', 'to': "orm['auth.User']"}) }, 'piston.token': { 'Meta': {'object_name': 'Token'}, 'callback': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'callback_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Consumer']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'timestamp': ('django.db.models.fields.IntegerField', [], {'default': '1353330248L'}), 'token_type': ('django.db.models.fields.IntegerField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tokens'", 'null': 'True', 'to': "orm['auth.User']"}), 'verifier': ('django.db.models.fields.CharField', [], {'max_length': '10'}) } } complete_apps = ['metadataserver']maas-1.9.5+bzr4599.orig/src/metadataserver/migrations/0005_nodecommissionresult_add_timestamp.py0000644000000000000000000003034413056115004031027 0ustar 00000000000000# -*- coding: utf-8 -*- import datetime from django.db import models from south.db import db from south.v2 import SchemaMigration class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'NodeCommissionResult.created' db.add_column(u'metadataserver_nodecommissionresult', 'created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2012, 12, 12, 0, 0)), keep_default=False) # Adding field 'NodeCommissionResult.updated' db.add_column(u'metadataserver_nodecommissionresult', 'updated', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2012, 12, 12, 0, 0)), keep_default=False) def backwards(self, orm): # Deleting field 'NodeCommissionResult.created' db.delete_column(u'metadataserver_nodecommissionresult', 'created') # Deleting field 'NodeCommissionResult.updated' db.delete_column(u'metadataserver_nodecommissionresult', 'updated') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'maasserver.node': { 'Meta': {'object_name': 'Node'}, 'after_commissioning_action': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'architecture': ('django.db.models.fields.CharField', [], {'default': "u'i386/generic'", 'max_length': '31'}), 'cpu_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'distro_series': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '10', 'null': 'True', 'blank': 'True'}), 'error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'hardware_details': ('maasserver.fields.XMLField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'unique': 'True', 'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'memory': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'netboot': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'nodegroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.NodeGroup']", 'null': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'power_parameters': ('maasserver.fields.JSONObjectField', [], {'default': "u''", 'blank': 'True'}), 'power_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '10', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '10'}), 'system_id': ('django.db.models.fields.CharField', [], {'default': "u'node-3cd81978-444f-11e2-a51d-3c970e0e56dc'", 'unique': 'True', 'max_length': '41'}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['maasserver.Tag']", 'symmetrical': 'False'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'null': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'maasserver.nodegroup': { 'Meta': {'object_name': 'NodeGroup'}, 'api_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'api_token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'unique': 'True'}), 'cluster_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'dhcp_key': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'maas_url': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}), 'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'}) }, u'maasserver.tag': { 'Meta': {'object_name': 'Tag'}, 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'kernel_opts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'metadataserver.commissioningscript': { 'Meta': {'object_name': 'CommissioningScript'}, 'content': ('metadataserver.fields.BinaryField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, u'metadataserver.nodecommissionresult': { 'Meta': {'unique_together': "((u'node', u'name'),)", 'object_name': 'NodeCommissionResult'}, 'created': ('django.db.models.fields.DateTimeField', [], {}), 'data': ('django.db.models.fields.CharField', [], {'max_length': '1048576'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']"}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'metadataserver.nodekey': { 'Meta': {'object_name': 'NodeKey'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'unique': 'True'}) }, u'metadataserver.nodeuserdata': { 'Meta': {'object_name': 'NodeUserData'}, 'data': ('metadataserver.fields.BinaryField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}) }, 'piston.consumer': { 'Meta': {'object_name': 'Consumer'}, 'description': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '16'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'consumers'", 'null': 'True', 'to': "orm['auth.User']"}) }, 'piston.token': { 'Meta': {'object_name': 'Token'}, 'callback': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'callback_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Consumer']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'timestamp': ('django.db.models.fields.IntegerField', [], {'default': '1355311776L'}), 'token_type': ('django.db.models.fields.IntegerField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tokens'", 'null': 'True', 'to': "orm['auth.User']"}), 'verifier': ('django.db.models.fields.CharField', [], {'max_length': '10'}) } } complete_apps = ['metadataserver']maas-1.9.5+bzr4599.orig/src/metadataserver/migrations/0006_nodecommissionresult_add_status.py0000644000000000000000000002762213056115004030355 0ustar 00000000000000# -*- coding: utf-8 -*- import datetime from django.db import models from south.db import db from south.v2 import SchemaMigration class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'NodeCommissionResult.status' db.add_column(u'metadataserver_nodecommissionresult', 'status', self.gf('django.db.models.fields.CharField')(default=u'OK', max_length=100), keep_default=False) def backwards(self, orm): # Deleting field 'NodeCommissionResult.status' db.delete_column(u'metadataserver_nodecommissionresult', 'status') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'maasserver.node': { 'Meta': {'object_name': 'Node'}, 'after_commissioning_action': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'architecture': ('django.db.models.fields.CharField', [], {'default': "u'i386/generic'", 'max_length': '31'}), 'cpu_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'distro_series': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '10', 'null': 'True', 'blank': 'True'}), 'error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'hardware_details': ('maasserver.fields.XMLField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'unique': 'True', 'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'memory': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'netboot': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'nodegroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.NodeGroup']", 'null': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'power_parameters': ('maasserver.fields.JSONObjectField', [], {'default': "u''", 'blank': 'True'}), 'power_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '10', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '10'}), 'system_id': ('django.db.models.fields.CharField', [], {'default': "u'node-4d3880a0-444f-11e2-a706-3c970e0e56dc'", 'unique': 'True', 'max_length': '41'}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['maasserver.Tag']", 'symmetrical': 'False'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'null': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'maasserver.nodegroup': { 'Meta': {'object_name': 'NodeGroup'}, 'api_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'api_token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'unique': 'True'}), 'cluster_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'dhcp_key': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'maas_url': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}), 'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'}) }, u'maasserver.tag': { 'Meta': {'object_name': 'Tag'}, 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'kernel_opts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'metadataserver.commissioningscript': { 'Meta': {'object_name': 'CommissioningScript'}, 'content': ('metadataserver.fields.BinaryField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, u'metadataserver.nodecommissionresult': { 'Meta': {'unique_together': "((u'node', u'name'),)", 'object_name': 'NodeCommissionResult'}, 'created': ('django.db.models.fields.DateTimeField', [], {}), 'data': ('django.db.models.fields.CharField', [], {'max_length': '1048576'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']"}), 'status': ('django.db.models.fields.CharField', [], {'default': "u'OK'", 'max_length': '100'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'metadataserver.nodekey': { 'Meta': {'object_name': 'NodeKey'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'unique': 'True'}) }, u'metadataserver.nodeuserdata': { 'Meta': {'object_name': 'NodeUserData'}, 'data': ('metadataserver.fields.BinaryField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}) }, 'piston.consumer': { 'Meta': {'object_name': 'Consumer'}, 'description': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '16'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'consumers'", 'null': 'True', 'to': "orm['auth.User']"}) }, 'piston.token': { 'Meta': {'object_name': 'Token'}, 'callback': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'callback_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Consumer']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'timestamp': ('django.db.models.fields.IntegerField', [], {'default': '1355311816L'}), 'token_type': ('django.db.models.fields.IntegerField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tokens'", 'null': 'True', 'to': "orm['auth.User']"}), 'verifier': ('django.db.models.fields.CharField', [], {'max_length': '10'}) } } complete_apps = ['metadataserver']maas-1.9.5+bzr4599.orig/src/metadataserver/migrations/0007_nodecommissionresult_change_name_size.py0000644000000000000000000002757413056115004031510 0ustar 00000000000000# -*- coding: utf-8 -*- import datetime from django.db import models from south.db import db from south.v2 import SchemaMigration class Migration(SchemaMigration): def forwards(self, orm): # Changing field 'NodeCommissionResult.name' db.alter_column(u'metadataserver_nodecommissionresult', 'name', self.gf('django.db.models.fields.CharField')(max_length=255)) def backwards(self, orm): # Changing field 'NodeCommissionResult.name' db.alter_column(u'metadataserver_nodecommissionresult', 'name', self.gf('django.db.models.fields.CharField')(max_length=100)) models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'maasserver.node': { 'Meta': {'object_name': 'Node'}, 'after_commissioning_action': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'architecture': ('django.db.models.fields.CharField', [], {'default': "u'i386/generic'", 'max_length': '31'}), 'cpu_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'distro_series': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '10', 'null': 'True', 'blank': 'True'}), 'error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'hardware_details': ('maasserver.fields.XMLField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'unique': 'True', 'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'memory': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'netboot': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'nodegroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.NodeGroup']", 'null': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'power_parameters': ('maasserver.fields.JSONObjectField', [], {'default': "u''", 'blank': 'True'}), 'power_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '10', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '10'}), 'system_id': ('django.db.models.fields.CharField', [], {'default': "u'node-5b8f2212-444f-11e2-a886-3c970e0e56dc'", 'unique': 'True', 'max_length': '41'}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['maasserver.Tag']", 'symmetrical': 'False'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'null': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'maasserver.nodegroup': { 'Meta': {'object_name': 'NodeGroup'}, 'api_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'api_token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'unique': 'True'}), 'cluster_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'dhcp_key': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'maas_url': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}), 'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'}) }, u'maasserver.tag': { 'Meta': {'object_name': 'Tag'}, 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'kernel_opts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'metadataserver.commissioningscript': { 'Meta': {'object_name': 'CommissioningScript'}, 'content': ('metadataserver.fields.BinaryField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, u'metadataserver.nodecommissionresult': { 'Meta': {'unique_together': "((u'node', u'name'),)", 'object_name': 'NodeCommissionResult'}, 'created': ('django.db.models.fields.DateTimeField', [], {}), 'data': ('django.db.models.fields.CharField', [], {'max_length': '1048576'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']"}), 'status': ('django.db.models.fields.CharField', [], {'default': "u'OK'", 'max_length': '100'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'metadataserver.nodekey': { 'Meta': {'object_name': 'NodeKey'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'unique': 'True'}) }, u'metadataserver.nodeuserdata': { 'Meta': {'object_name': 'NodeUserData'}, 'data': ('metadataserver.fields.BinaryField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}) }, 'piston.consumer': { 'Meta': {'object_name': 'Consumer'}, 'description': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '16'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'consumers'", 'null': 'True', 'to': "orm['auth.User']"}) }, 'piston.token': { 'Meta': {'object_name': 'Token'}, 'callback': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'callback_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Consumer']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'timestamp': ('django.db.models.fields.IntegerField', [], {'default': '1355311840L'}), 'token_type': ('django.db.models.fields.IntegerField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tokens'", 'null': 'True', 'to': "orm['auth.User']"}), 'verifier': ('django.db.models.fields.CharField', [], {'max_length': '10'}) } } complete_apps = ['metadataserver']maas-1.9.5+bzr4599.orig/src/metadataserver/migrations/0008_rename_lshw_commissioning_output.py0000644000000000000000000003041413056115004030532 0ustar 00000000000000# -*- coding: utf-8 -*- import datetime from django.db import models from south.db import db from south.v2 import DataMigration def rename_commissioning_results(orm, old_name, new_name): """Rename any `NodeCommissionResult` called `old_name` to `new_name`.""" ncrs = orm['metadataserver.NodeCommissionResult'].objects ncrs.filter(name=old_name).update(name=new_name) class Migration(DataMigration): """Rename lshw output in accordance with new naming convention. The commissioning results for "lshw" were written as 01-lshw.out: the output of 01-lshw. But our naming convention reserves such names for user-provided commissioning scripts. MAAS-internal commissioning scripts have names starting with 00-maas-*. """ def forwards(self, orm): rename_commissioning_results(orm, '01-lshw.out', '00-maas-01-lshw.out') def backwards(self, orm): rename_commissioning_results(orm, '00-maas-01-lshw.out', '01-lshw.out') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'maasserver.node': { 'Meta': {'object_name': 'Node'}, 'after_commissioning_action': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'architecture': ('django.db.models.fields.CharField', [], {'default': "u'i386/generic'", 'max_length': '31'}), 'cpu_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'distro_series': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '10', 'null': 'True', 'blank': 'True'}), 'error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'hardware_details': ('maasserver.fields.XMLField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'unique': 'True', 'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'memory': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'netboot': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'nodegroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.NodeGroup']", 'null': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'power_parameters': ('maasserver.fields.JSONObjectField', [], {'default': "u''", 'blank': 'True'}), 'power_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '10', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '10'}), 'system_id': ('django.db.models.fields.CharField', [], {'default': "u'node-5b8f2212-444f-11e2-a886-3c970e0e56dc'", 'unique': 'True', 'max_length': '41'}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['maasserver.Tag']", 'symmetrical': 'False'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'null': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'maasserver.nodegroup': { 'Meta': {'object_name': 'NodeGroup'}, 'api_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'api_token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'unique': 'True'}), 'cluster_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'dhcp_key': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'maas_url': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}), 'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'}) }, u'maasserver.tag': { 'Meta': {'object_name': 'Tag'}, 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'kernel_opts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'metadataserver.commissioningscript': { 'Meta': {'object_name': 'CommissioningScript'}, 'content': ('metadataserver.fields.BinaryField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, u'metadataserver.nodecommissionresult': { 'Meta': {'unique_together': "((u'node', u'name'),)", 'object_name': 'NodeCommissionResult'}, 'created': ('django.db.models.fields.DateTimeField', [], {}), 'data': ('django.db.models.fields.CharField', [], {'max_length': '1048576'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']"}), 'status': ('django.db.models.fields.CharField', [], {'default': "u'OK'", 'max_length': '100'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'metadataserver.nodekey': { 'Meta': {'object_name': 'NodeKey'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'unique': 'True'}) }, u'metadataserver.nodeuserdata': { 'Meta': {'object_name': 'NodeUserData'}, 'data': ('metadataserver.fields.BinaryField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}) }, 'piston.consumer': { 'Meta': {'object_name': 'Consumer'}, 'description': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '16'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'consumers'", 'null': 'True', 'to': "orm['auth.User']"}) }, 'piston.token': { 'Meta': {'object_name': 'Token'}, 'callback': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'callback_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Consumer']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'timestamp': ('django.db.models.fields.IntegerField', [], {'default': '1355311840L'}), 'token_type': ('django.db.models.fields.IntegerField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tokens'", 'null': 'True', 'to': "orm['auth.User']"}), 'verifier': ('django.db.models.fields.CharField', [], {'max_length': '10'}) } } complete_apps = ['metadataserver'] symmetrical = True maas-1.9.5+bzr4599.orig/src/metadataserver/migrations/0009_delete_status.py0000644000000000000000000002744613056115004024531 0ustar 00000000000000# -*- coding: utf-8 -*- import datetime from django.db import models from south.db import db from south.v2 import SchemaMigration class Migration(SchemaMigration): def forwards(self, orm): # Deleting field 'NodeCommissionResult.status' db.delete_column(u'metadataserver_nodecommissionresult', 'status') def backwards(self, orm): # Adding field 'NodeCommissionResult.status' db.add_column(u'metadataserver_nodecommissionresult', 'status', self.gf('django.db.models.fields.CharField')(default=u'OK', max_length=100), keep_default=False) models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'maasserver.node': { 'Meta': {'object_name': 'Node'}, 'after_commissioning_action': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'architecture': ('django.db.models.fields.CharField', [], {'default': "u'i386/generic'", 'max_length': '31'}), 'cpu_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'distro_series': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '10', 'null': 'True', 'blank': 'True'}), 'error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'hardware_details': ('maasserver.fields.XMLField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'unique': 'True', 'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'memory': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'netboot': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'nodegroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.NodeGroup']", 'null': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'power_parameters': ('maasserver.fields.JSONObjectField', [], {'default': "u''", 'blank': 'True'}), 'power_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '10', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '10'}), 'system_id': ('django.db.models.fields.CharField', [], {'default': "u'node-73e09bf8-49d0-11e2-8c9f-3c970e0e56dc'", 'unique': 'True', 'max_length': '41'}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['maasserver.Tag']", 'symmetrical': 'False'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'null': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'maasserver.nodegroup': { 'Meta': {'object_name': 'NodeGroup'}, 'api_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'api_token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'unique': 'True'}), 'cluster_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'dhcp_key': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'maas_url': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}), 'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'}) }, u'maasserver.tag': { 'Meta': {'object_name': 'Tag'}, 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'kernel_opts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'metadataserver.commissioningscript': { 'Meta': {'object_name': 'CommissioningScript'}, 'content': ('metadataserver.fields.BinaryField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, u'metadataserver.nodecommissionresult': { 'Meta': {'unique_together': "((u'node', u'name'),)", 'object_name': 'NodeCommissionResult'}, 'created': ('django.db.models.fields.DateTimeField', [], {}), 'data': ('django.db.models.fields.CharField', [], {'max_length': '1048576'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']"}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'metadataserver.nodekey': { 'Meta': {'object_name': 'NodeKey'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'unique': 'True'}) }, u'metadataserver.nodeuserdata': { 'Meta': {'object_name': 'NodeUserData'}, 'data': ('metadataserver.fields.BinaryField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}) }, 'piston.consumer': { 'Meta': {'object_name': 'Consumer'}, 'description': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '16'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'consumers'", 'null': 'True', 'to': "orm['auth.User']"}) }, 'piston.token': { 'Meta': {'object_name': 'Token'}, 'callback': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'callback_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Consumer']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'timestamp': ('django.db.models.fields.IntegerField', [], {'default': '1355917041L'}), 'token_type': ('django.db.models.fields.IntegerField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tokens'", 'null': 'True', 'to': "orm['auth.User']"}), 'verifier': ('django.db.models.fields.CharField', [], {'max_length': '10'}) } } complete_apps = ['metadataserver']maas-1.9.5+bzr4599.orig/src/metadataserver/migrations/0010_add_script_result.py0000644000000000000000000002760013056115004025356 0ustar 00000000000000# -*- coding: utf-8 -*- import datetime from django.db import models from south.db import db from south.v2 import SchemaMigration class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'NodeCommissionResult.script_result' db.add_column(u'metadataserver_nodecommissionresult', 'script_result', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False) def backwards(self, orm): # Deleting field 'NodeCommissionResult.script_result' db.delete_column(u'metadataserver_nodecommissionresult', 'script_result') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'maasserver.node': { 'Meta': {'object_name': 'Node'}, 'after_commissioning_action': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'architecture': ('django.db.models.fields.CharField', [], {'default': "u'i386/generic'", 'max_length': '31'}), 'cpu_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'distro_series': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '10', 'null': 'True', 'blank': 'True'}), 'error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'hardware_details': ('maasserver.fields.XMLField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'unique': 'True', 'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'memory': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'netboot': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'nodegroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.NodeGroup']", 'null': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'power_parameters': ('maasserver.fields.JSONObjectField', [], {'default': "u''", 'blank': 'True'}), 'power_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '10', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '10'}), 'system_id': ('django.db.models.fields.CharField', [], {'default': "u'node-5065b92a-49d4-11e2-8786-3c970e0e56dc'", 'unique': 'True', 'max_length': '41'}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['maasserver.Tag']", 'symmetrical': 'False'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'null': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'maasserver.nodegroup': { 'Meta': {'object_name': 'NodeGroup'}, 'api_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'api_token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'unique': 'True'}), 'cluster_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'dhcp_key': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'maas_url': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}), 'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'}) }, u'maasserver.tag': { 'Meta': {'object_name': 'Tag'}, 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'kernel_opts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'metadataserver.commissioningscript': { 'Meta': {'object_name': 'CommissioningScript'}, 'content': ('metadataserver.fields.BinaryField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, u'metadataserver.nodecommissionresult': { 'Meta': {'unique_together': "((u'node', u'name'),)", 'object_name': 'NodeCommissionResult'}, 'created': ('django.db.models.fields.DateTimeField', [], {}), 'data': ('django.db.models.fields.CharField', [], {'max_length': '1048576'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']"}), 'script_result': ('django.db.models.fields.IntegerField', [], {}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'metadataserver.nodekey': { 'Meta': {'object_name': 'NodeKey'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'unique': 'True'}) }, u'metadataserver.nodeuserdata': { 'Meta': {'object_name': 'NodeUserData'}, 'data': ('metadataserver.fields.BinaryField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}) }, 'piston.consumer': { 'Meta': {'object_name': 'Consumer'}, 'description': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '16'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'consumers'", 'null': 'True', 'to': "orm['auth.User']"}) }, 'piston.token': { 'Meta': {'object_name': 'Token'}, 'callback': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'callback_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Consumer']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'timestamp': ('django.db.models.fields.IntegerField', [], {'default': '1355918694L'}), 'token_type': ('django.db.models.fields.IntegerField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tokens'", 'null': 'True', 'to': "orm['auth.User']"}), 'verifier': ('django.db.models.fields.CharField', [], {'max_length': '10'}) } } complete_apps = ['metadataserver']maas-1.9.5+bzr4599.orig/src/metadataserver/migrations/0011_commission_result_binary_data_col.py0000644000000000000000000003046413056115004030617 0ustar 00000000000000# -*- coding: utf-8 -*- import datetime from django.db import models from south.db import db from south.v2 import SchemaMigration class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'NodeCommissionResult.data_bin' db.add_column(u'metadataserver_nodecommissionresult', 'data_bin', self.gf('metadataserver.fields.BinaryField')(default='', max_length=1048576, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'NodeCommissionResult.data_bin' db.delete_column(u'metadataserver_nodecommissionresult', 'data_bin') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'maasserver.node': { 'Meta': {'object_name': 'Node'}, 'after_commissioning_action': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'architecture': ('django.db.models.fields.CharField', [], {'default': "u'i386/generic'", 'max_length': '31'}), 'cpu_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'distro_series': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '20', 'null': 'True', 'blank': 'True'}), 'error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'hardware_details': ('maasserver.fields.XMLField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'unique': 'True', 'max_length': '255', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'memory': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'netboot': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'nodegroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.NodeGroup']", 'null': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'power_parameters': ('maasserver.fields.JSONObjectField', [], {'default': "u''", 'blank': 'True'}), 'power_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '10', 'blank': 'True'}), 'routers': ('djorm_pgarray.fields.ArrayField', [], {'default': 'None', 'dbtype': "u'macaddr'", 'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '10'}), 'storage': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'system_id': ('django.db.models.fields.CharField', [], {'default': "u'node-c2d58220-1cc3-11e3-9b8f-000c29baa6bf'", 'unique': 'True', 'max_length': '41'}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['maasserver.Tag']", 'symmetrical': 'False'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Token']", 'null': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'maasserver.nodegroup': { 'Meta': {'object_name': 'NodeGroup'}, 'api_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'api_token': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Token']", 'unique': 'True'}), 'cluster_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'dhcp_key': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'maas_url': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}), 'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'}) }, u'maasserver.tag': { 'Meta': {'object_name': 'Tag'}, 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'kernel_opts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'metadataserver.commissioningscript': { 'Meta': {'object_name': 'CommissioningScript'}, 'content': ('metadataserver.fields.BinaryField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, u'metadataserver.nodecommissionresult': { 'Meta': {'unique_together': "((u'node', u'name'),)", 'object_name': 'NodeCommissionResult'}, 'created': ('django.db.models.fields.DateTimeField', [], {}), 'data': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '1048576', 'blank': 'True'}), 'data_bin': ('metadataserver.fields.BinaryField', [], {'default': "''", 'max_length': '1048576', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']"}), 'script_result': ('django.db.models.fields.IntegerField', [], {}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'metadataserver.nodekey': { 'Meta': {'object_name': 'NodeKey'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Token']", 'unique': 'True'}) }, u'metadataserver.nodeuserdata': { 'Meta': {'object_name': 'NodeUserData'}, 'data': ('metadataserver.fields.BinaryField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}) }, u'piston.consumer': { 'Meta': {'object_name': 'Consumer'}, 'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '16'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'consumers'", 'null': 'True', 'to': u"orm['auth.User']"}) }, u'piston.token': { 'Meta': {'object_name': 'Token'}, 'callback': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'callback_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Consumer']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'timestamp': ('django.db.models.fields.IntegerField', [], {'default': '1379111286L'}), 'token_type': ('django.db.models.fields.IntegerField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tokens'", 'null': 'True', 'to': u"orm['auth.User']"}), 'verifier': ('django.db.models.fields.CharField', [], {'max_length': '10'}) } } complete_apps = ['metadataserver']maas-1.9.5+bzr4599.orig/src/metadataserver/migrations/0012_commission_result_binary_data_recode.py0000644000000000000000000003052513056115004031302 0ustar 00000000000000# -*- coding: utf-8 -*- import datetime from django.db import models from south.db import db from south.v2 import DataMigration class Migration(DataMigration): def forwards(self, orm): "Write your forwards methods here." from metadataserver.fields import Bin for result in orm.NodeCommissionResult.objects.all(): result.data_bin = Bin(result.data.encode("utf-8")) result.save() def backwards(self, orm): "Write your backwards methods here." for result in orm.NodeCommissionResult.objects.all(): result.data = result.data_bin.decode("utf-8") result.save() models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'maasserver.node': { 'Meta': {'object_name': 'Node'}, 'after_commissioning_action': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'architecture': ('django.db.models.fields.CharField', [], {'default': "u'i386/generic'", 'max_length': '31'}), 'cpu_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'distro_series': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '20', 'null': 'True', 'blank': 'True'}), 'error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'hardware_details': ('maasserver.fields.XMLField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'unique': 'True', 'max_length': '255', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'memory': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'netboot': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'nodegroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.NodeGroup']", 'null': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'power_parameters': ('maasserver.fields.JSONObjectField', [], {'default': "u''", 'blank': 'True'}), 'power_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '10', 'blank': 'True'}), 'routers': ('djorm_pgarray.fields.ArrayField', [], {'default': 'None', 'dbtype': "u'macaddr'", 'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '10'}), 'storage': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'system_id': ('django.db.models.fields.CharField', [], {'default': "u'node-e664d6e6-1cc3-11e3-92b1-000c29baa6bf'", 'unique': 'True', 'max_length': '41'}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['maasserver.Tag']", 'symmetrical': 'False'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Token']", 'null': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'maasserver.nodegroup': { 'Meta': {'object_name': 'NodeGroup'}, 'api_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'api_token': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Token']", 'unique': 'True'}), 'cluster_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'dhcp_key': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'maas_url': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}), 'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'}) }, u'maasserver.tag': { 'Meta': {'object_name': 'Tag'}, 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'kernel_opts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'metadataserver.commissioningscript': { 'Meta': {'object_name': 'CommissioningScript'}, 'content': ('metadataserver.fields.BinaryField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, u'metadataserver.nodecommissionresult': { 'Meta': {'unique_together': "((u'node', u'name'),)", 'object_name': 'NodeCommissionResult'}, 'created': ('django.db.models.fields.DateTimeField', [], {}), 'data': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '1048576', 'blank': 'True'}), 'data_bin': ('metadataserver.fields.BinaryField', [], {'default': "''", 'max_length': '1048576', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']"}), 'script_result': ('django.db.models.fields.IntegerField', [], {}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'metadataserver.nodekey': { 'Meta': {'object_name': 'NodeKey'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Token']", 'unique': 'True'}) }, u'metadataserver.nodeuserdata': { 'Meta': {'object_name': 'NodeUserData'}, 'data': ('metadataserver.fields.BinaryField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}) }, u'piston.consumer': { 'Meta': {'object_name': 'Consumer'}, 'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '16'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'consumers'", 'null': 'True', 'to': u"orm['auth.User']"}) }, u'piston.token': { 'Meta': {'object_name': 'Token'}, 'callback': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'callback_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Consumer']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'timestamp': ('django.db.models.fields.IntegerField', [], {'default': '1379111345L'}), 'token_type': ('django.db.models.fields.IntegerField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tokens'", 'null': 'True', 'to': u"orm['auth.User']"}), 'verifier': ('django.db.models.fields.CharField', [], {'max_length': '10'}) } } complete_apps = ['metadataserver'] symmetrical = True maas-1.9.5+bzr4599.orig/src/metadataserver/migrations/0013_commission_result_drop_old_data_col.py0000644000000000000000000003025013056115004031130 0ustar 00000000000000# -*- coding: utf-8 -*- import datetime from django.db import models from south.db import db from south.v2 import SchemaMigration class Migration(SchemaMigration): def forwards(self, orm): # Deleting field 'NodeCommissionResult.data' db.delete_column(u'metadataserver_nodecommissionresult', 'data') def backwards(self, orm): # Adding field 'NodeCommissionResult.data' db.add_column(u'metadataserver_nodecommissionresult', 'data', self.gf('django.db.models.fields.CharField')(default=u'', max_length=1048576, blank=True), keep_default=False) models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'maasserver.node': { 'Meta': {'object_name': 'Node'}, 'after_commissioning_action': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'architecture': ('django.db.models.fields.CharField', [], {'default': "u'i386/generic'", 'max_length': '31'}), 'cpu_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'distro_series': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '20', 'null': 'True', 'blank': 'True'}), 'error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'hardware_details': ('maasserver.fields.XMLField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'unique': 'True', 'max_length': '255', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'memory': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'netboot': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'nodegroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.NodeGroup']", 'null': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'power_parameters': ('maasserver.fields.JSONObjectField', [], {'default': "u''", 'blank': 'True'}), 'power_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '10', 'blank': 'True'}), 'routers': ('djorm_pgarray.fields.ArrayField', [], {'default': 'None', 'dbtype': "u'macaddr'", 'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '10'}), 'storage': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'system_id': ('django.db.models.fields.CharField', [], {'default': "u'node-5ccffa3e-1cc6-11e3-af0e-000c29baa6bf'", 'unique': 'True', 'max_length': '41'}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['maasserver.Tag']", 'symmetrical': 'False'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Token']", 'null': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'maasserver.nodegroup': { 'Meta': {'object_name': 'NodeGroup'}, 'api_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'api_token': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Token']", 'unique': 'True'}), 'cluster_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'dhcp_key': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'maas_url': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}), 'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'}) }, u'maasserver.tag': { 'Meta': {'object_name': 'Tag'}, 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'kernel_opts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'metadataserver.commissioningscript': { 'Meta': {'object_name': 'CommissioningScript'}, 'content': ('metadataserver.fields.BinaryField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, u'metadataserver.nodecommissionresult': { 'Meta': {'unique_together': "((u'node', u'name'),)", 'object_name': 'NodeCommissionResult'}, 'created': ('django.db.models.fields.DateTimeField', [], {}), 'data_bin': ('metadataserver.fields.BinaryField', [], {'default': "''", 'max_length': '1048576', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']"}), 'script_result': ('django.db.models.fields.IntegerField', [], {}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'metadataserver.nodekey': { 'Meta': {'object_name': 'NodeKey'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Token']", 'unique': 'True'}) }, u'metadataserver.nodeuserdata': { 'Meta': {'object_name': 'NodeUserData'}, 'data': ('metadataserver.fields.BinaryField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}) }, u'piston.consumer': { 'Meta': {'object_name': 'Consumer'}, 'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '16'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'consumers'", 'null': 'True', 'to': u"orm['auth.User']"}) }, u'piston.token': { 'Meta': {'object_name': 'Token'}, 'callback': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'callback_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Consumer']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'timestamp': ('django.db.models.fields.IntegerField', [], {'default': '1379112403L'}), 'token_type': ('django.db.models.fields.IntegerField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tokens'", 'null': 'True', 'to': u"orm['auth.User']"}), 'verifier': ('django.db.models.fields.CharField', [], {'max_length': '10'}) } } complete_apps = ['metadataserver']maas-1.9.5+bzr4599.orig/src/metadataserver/migrations/0014_commission_result_rename_data_bin_col.py0000644000000000000000000002767513056115004031447 0ustar 00000000000000# -*- coding: utf-8 -*- import datetime from django.db import models from south.db import db from south.v2 import SchemaMigration class Migration(SchemaMigration): def forwards(self, orm): db.rename_column(u'metadataserver_nodecommissionresult', 'data_bin', 'data') def backwards(self, orm): db.rename_column(u'metadataserver_nodecommissionresult', 'data', 'data_bin') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'maasserver.node': { 'Meta': {'object_name': 'Node'}, 'after_commissioning_action': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'architecture': ('django.db.models.fields.CharField', [], {'default': "u'i386/generic'", 'max_length': '31'}), 'cpu_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'distro_series': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '20', 'null': 'True', 'blank': 'True'}), 'error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'hardware_details': ('maasserver.fields.XMLField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'unique': 'True', 'max_length': '255', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'memory': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'netboot': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'nodegroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.NodeGroup']", 'null': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'power_parameters': ('maasserver.fields.JSONObjectField', [], {'default': "u''", 'blank': 'True'}), 'power_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '10', 'blank': 'True'}), 'routers': ('djorm_pgarray.fields.ArrayField', [], {'default': 'None', 'dbtype': "u'macaddr'", 'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '10'}), 'storage': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'system_id': ('django.db.models.fields.CharField', [], {'default': "u'node-ac1667a4-1cc6-11e3-930d-000c29baa6bf'", 'unique': 'True', 'max_length': '41'}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['maasserver.Tag']", 'symmetrical': 'False'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Token']", 'null': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'maasserver.nodegroup': { 'Meta': {'object_name': 'NodeGroup'}, 'api_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'api_token': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Token']", 'unique': 'True'}), 'cluster_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'dhcp_key': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'maas_url': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}), 'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'}) }, u'maasserver.tag': { 'Meta': {'object_name': 'Tag'}, 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'kernel_opts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'metadataserver.commissioningscript': { 'Meta': {'object_name': 'CommissioningScript'}, 'content': ('metadataserver.fields.BinaryField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, u'metadataserver.nodecommissionresult': { 'Meta': {'unique_together': "((u'node', u'name'),)", 'object_name': 'NodeCommissionResult'}, 'created': ('django.db.models.fields.DateTimeField', [], {}), 'data': ('metadataserver.fields.BinaryField', [], {'default': "''", 'max_length': '1048576', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']"}), 'script_result': ('django.db.models.fields.IntegerField', [], {}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'metadataserver.nodekey': { 'Meta': {'object_name': 'NodeKey'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Token']", 'unique': 'True'}) }, u'metadataserver.nodeuserdata': { 'Meta': {'object_name': 'NodeUserData'}, 'data': ('metadataserver.fields.BinaryField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}) }, u'piston.consumer': { 'Meta': {'object_name': 'Consumer'}, 'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '16'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'consumers'", 'null': 'True', 'to': u"orm['auth.User']"}) }, u'piston.token': { 'Meta': {'object_name': 'Token'}, 'callback': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'callback_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Consumer']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'timestamp': ('django.db.models.fields.IntegerField', [], {'default': '1379112536L'}), 'token_type': ('django.db.models.fields.IntegerField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tokens'", 'null': 'True', 'to': u"orm['auth.User']"}), 'verifier': ('django.db.models.fields.CharField', [], {'max_length': '10'}) } } complete_apps = ['metadataserver'] ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000maas-1.9.5+bzr4599.orig/src/metadataserver/migrations/0015_rename_nodecommissionresult_add_result_type.pymaas-1.9.5+bzr4599.orig/src/metadataserver/migrations/0015_rename_nodecommissionresult_add_result_ty0000644000000000000000000003326513056115004031764 0ustar 00000000000000from django.db import models from metadataserver.enum import RESULT_TYPE from south.db import db # -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.v2 import SchemaMigration class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'NodeCommissionResult.status' db.add_column(u'metadataserver_nodecommissionresult', 'result_type', self.gf('django.db.models.fields.IntegerField')(default=RESULT_TYPE.COMMISSIONING), keep_default=False) db.rename_table(u'metadataserver_nodecommissionresult', u'metadataserver_noderesult') def backwards(self, orm): db.rename_table(u'metadataserver_noderesult', u'metadataserver_nodecommissionresult') # Deleting field 'NodeCommissionResult.status' db.delete_column(u'metadataserver_nodecommissionresult', 'result_type') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'maasserver.node': { 'Meta': {'object_name': 'Node'}, 'agent_name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'null': 'True', 'blank': 'True'}), 'architecture': ('django.db.models.fields.CharField', [], {'max_length': '31'}), 'boot_type': ('django.db.models.fields.CharField', [], {'default': "u'fastpath'", 'max_length': '20'}), 'cpu_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'distro_series': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '20', 'blank': 'True'}), 'error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'error_description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}), 'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'unique': 'True', 'max_length': '255', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'license_key': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}), 'memory': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'netboot': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'nodegroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.NodeGroup']", 'null': 'True'}), 'osystem': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '20', 'blank': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'power_parameters': ('maasserver.fields.JSONObjectField', [], {'default': "u''", 'blank': 'True'}), 'power_state': ('django.db.models.fields.CharField', [], {'default': "u'unknown'", 'max_length': '10'}), 'power_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '10', 'blank': 'True'}), 'routers': ('djorm_pgarray.fields.ArrayField', [], {'default': 'None', 'dbtype': "u'macaddr'", 'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '10'}), 'storage': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'system_id': ('django.db.models.fields.CharField', [], {'default': "u'node-8fa4490a-20c2-11e4-97b9-e82aea220bd2'", 'unique': 'True', 'max_length': '41'}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['maasserver.Tag']", 'symmetrical': 'False'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Token']", 'null': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}), 'zone': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Zone']", 'on_delete': 'models.SET_DEFAULT'}) }, u'maasserver.nodegroup': { 'Meta': {'object_name': 'NodeGroup'}, 'api_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'api_token': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Token']", 'unique': 'True'}), 'cluster_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'dhcp_key': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'maas_url': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}), 'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'}) }, u'maasserver.tag': { 'Meta': {'object_name': 'Tag'}, 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'kernel_opts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'maasserver.zone': { 'Meta': {'ordering': "[u'name']", 'object_name': 'Zone'}, 'created': ('django.db.models.fields.DateTimeField', [], {}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'metadataserver.commissioningscript': { 'Meta': {'object_name': 'CommissioningScript'}, 'content': ('metadataserver.fields.BinaryField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, u'metadataserver.nodekey': { 'Meta': {'object_name': 'NodeKey'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}), 'token': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Token']", 'unique': 'True'}) }, u'metadataserver.noderesult': { 'Meta': {'unique_together': "((u'node', u'name'),)", 'object_name': 'NodeResult'}, 'created': ('django.db.models.fields.DateTimeField', [], {}), 'data': ('metadataserver.fields.BinaryField', [], {'default': "''", 'max_length': '1048576', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']"}), 'result_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'script_result': ('django.db.models.fields.IntegerField', [], {}), 'updated': ('django.db.models.fields.DateTimeField', [], {}) }, u'metadataserver.nodeuserdata': { 'Meta': {'object_name': 'NodeUserData'}, 'data': ('metadataserver.fields.BinaryField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']", 'unique': 'True'}) }, u'piston.consumer': { 'Meta': {'object_name': 'Consumer'}, 'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '16'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'consumers'", 'null': 'True', 'to': u"orm['auth.User']"}) }, u'piston.token': { 'Meta': {'object_name': 'Token'}, 'callback': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'callback_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Consumer']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'timestamp': ('django.db.models.fields.IntegerField', [], {'default': '1407698073L'}), 'token_type': ('django.db.models.fields.IntegerField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tokens'", 'null': 'True', 'to': u"orm['auth.User']"}), 'verifier': ('django.db.models.fields.CharField', [], {'max_length': '10'}) } } complete_apps = ['metadataserver'] maas-1.9.5+bzr4599.orig/src/metadataserver/migrations/__init__.py0000644000000000000000000000000013056115004022724 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/metadataserver/models/__init__.py0000644000000000000000000000141413056115004022045 0ustar 00000000000000# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Model export and helpers for metadataserver. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'CommissioningScript', 'NodeResult', 'NodeKey', 'NodeUserData', ] from maasserver.utils import ignore_unused from metadataserver.models.commissioningscript import CommissioningScript from metadataserver.models.nodekey import NodeKey from metadataserver.models.noderesult import NodeResult from metadataserver.models.nodeuserdata import NodeUserData ignore_unused(CommissioningScript, NodeResult, NodeKey, NodeUserData) maas-1.9.5+bzr4599.orig/src/metadataserver/models/commissioningscript.py0000644000000000000000000007047613056115004024427 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Custom commissioning scripts, and their database backing.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'BUILTIN_COMMISSIONING_SCRIPTS', 'CommissioningScript', 'inject_lldp_result', 'inject_lshw_result', 'inject_result', 'LIST_MODALIASES_OUTPUT_NAME', 'LLDP_OUTPUT_NAME', 'LSHW_OUTPUT_NAME', ] from functools import partial from inspect import getsource from io import BytesIO from itertools import chain import json import logging import math import os.path import tarfile from textwrap import dedent from time import time as now from django.db.models import ( CharField, Manager, Model, ) from lxml import etree from maasserver.fields import MAC from maasserver.models import Fabric from maasserver.models.interface import PhysicalInterface from maasserver.models.physicalblockdevice import PhysicalBlockDevice from maasserver.models.tag import Tag from metadataserver import DefaultMeta from metadataserver.enum import RESULT_TYPE from metadataserver.fields import ( Bin, BinaryField, ) from metadataserver.models.noderesult import NodeResult from provisioningserver.utils.ipaddr import parse_ip_addr logger = logging.getLogger(__name__) # Path prefix for commissioning scripts. Commissioning scripts will be # extracted into this directory. ARCHIVE_PREFIX = "commissioning.d" # Name of the file where the commissioning scripts store lshw output. LSHW_OUTPUT_NAME = '00-maas-01-lshw.out' # Name of the file where the commissioning scripts store LLDP output. LLDP_OUTPUT_NAME = '99-maas-02-capture-lldp.out' def make_function_call_script(function, *args, **kwargs): """Compose a Python script that calls the given function. The function's source will be obtained by inspection. Ensure that the function is fully self-contained; don't rely on variables or imports from the module in which it is defined. The given arguments will be used when calling the function in the composed script. They are serialised into JSON with the limitations on types that that implies. :return: `bytes` """ template = dedent("""\ #!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import ( absolute_import, print_function, unicode_literals, ) import json __metaclass__ = type __all__ = [{function_name!r}] {function_source} if __name__ == '__main__': args = json.loads({function_args!r}) kwargs = json.loads({function_kwargs!r}) {function_name}(*args, **kwargs) """) script = template.format( function_name=function.__name__.decode('ascii'), function_source=dedent(getsource(function).decode('utf-8')).strip(), function_args=json.dumps(args).decode('utf-8'), function_kwargs=json.dumps(kwargs).decode('utf-8'), ) return script.encode("utf-8") # Built-in script to run lshw. LSHW_SCRIPT = dedent("""\ #!/bin/sh lshw -xml """) # Built-in script to run `ip addr` IPADDR_SCRIPT = dedent("""\ #!/bin/sh ip addr """) # Count the processors which do not declare their number of 'threads' # as 1 processor. _xpath_processor_count = """\ sum(//node[@id='core']/ node[@class='processor'] [not(@disabled)]//setting[@id='threads']/@value) + count(//node[@id='core']/node[@class='processor'] [not(@disabled)][not(configuration/setting[@id='threads'])])""" # Some machines have a element in their memory with the total # amount of memory, and other machines declare the size of the memory in # individual memory banks. This expression is mean to cope with both. _xpath_memory_bytes = """\ sum(//node[@id='memory']/size[@units='bytes'] | //node[starts-with(@id, 'memory:')] /node[starts-with(@id, 'bank:')]/size[@units='bytes']) div 1024 div 1024 """ def _create_default_physical_interface(node, ifname, mac): """Assigns the specified interface to the specified Node. Creates or updates a PhysicalInterface that corresponds to the given MAC. :param node: Node model object :param ifname: the interface name (for example, 'eth0') :param mac: the Interface to update and associate """ # We don't yet have enough information to put this newly-created Interface # into the proper Fabric/VLAN. (We'll do this on a "best effort" basis # later, if we are able to determine that the interface is on a particular # subnet due to a DHCP reply during commissioning.) fabric = Fabric.objects.get_default_fabric() vlan = fabric.get_default_vlan() interface = PhysicalInterface.objects.create( mac_address=mac, name=ifname, node=node, vlan=vlan) return interface def update_node_network_information(node, output, exit_status): """Updates the network interfaces from the results of `IPADDR_SCRIPT`. Creates and deletes an Interface according to what we currently know about this node's hardware. If `exit_status` is non-zero, this function returns without doing anything. """ assert isinstance(output, bytes) if exit_status != 0: return # Skip network configuration if set by the user. if node.skip_networking: return # Get the MAC addresses of all connected interfaces. ip_addr_info = parse_ip_addr(output) current_interfaces = set() for link in ip_addr_info.values(): link_mac = link.get('mac') # Ignore loopback interfaces. if link_mac is None: continue else: ifname = link['name'] try: interface = PhysicalInterface.objects.get( mac_address=link_mac) if interface.node is not None and interface.node != node: logger.warning( "Interface with MAC %s moved from node %s to %s. " "(The existing interface will be deleted.)" % (interface.mac_address, interface.node.fqdn, node.fqdn)) interface.delete() interface = _create_default_physical_interface( node, ifname, link_mac) else: # Interface already exists on this Node, so just update # the name. interface.name = ifname interface.save() except PhysicalInterface.DoesNotExist: interface = _create_default_physical_interface( node, ifname, link_mac) current_interfaces.add(interface) ips = link.get('inet', []) + link.get('inet6', []) interface.update_ip_addresses(ips) for iface in PhysicalInterface.objects.filter(node=node): if iface not in current_interfaces: iface.delete() def update_hardware_details(node, output, exit_status): """Process the results of `LSHW_SCRIPT`. Updates `node.cpu_count`, `node.memory`, and `node.storage` fields, and also evaluates all tag expressions against the given ``lshw`` XML. If `exit_status` is non-zero, this function returns without doing anything. """ assert isinstance(output, bytes) if exit_status != 0: return try: doc = etree.XML(output) except etree.XMLSyntaxError: logger.exception("Invalid lshw data.") else: # Same document, many queries: use XPathEvaluator. evaluator = etree.XPathEvaluator(doc) cpu_count = evaluator(_xpath_processor_count) memory = evaluator(_xpath_memory_bytes) if not memory or math.isnan(memory): memory = 0 node.cpu_count = cpu_count or 0 node.memory = memory node.save() # Built-in script to detect virtual instances. It will only detect QEMU # for now and may need expanding/generalising at some point. VIRTUALITY_SCRIPT = dedent("""\ #!/bin/sh grep '^model name.*QEMU.*' /proc/cpuinfo >/dev/null 2>&1 if [ $? -eq 0 ]; then echo "virtual" else echo "notvirtual" fi """) def set_virtual_tag(node, output, exit_status): """Process the results of `VIRTUALITY_SCRIPT`. This adds or removes the *virtual* tag from the node, depending on the presence of the terms "notvirtual" or "virtual" in `output`. If `exit_status` is non-zero, this function returns without doing anything. """ assert isinstance(output, bytes) if exit_status != 0: return tag, _ = Tag.objects.get_or_create(name='virtual') if b'notvirtual' in output: node.tags.remove(tag) elif b'virtual' in output: node.tags.add(tag) else: logger.warn( "Neither 'virtual' nor 'notvirtual' appeared in the " "captured VIRTUALITY_SCRIPT output for node %s.", node.system_id) # Run `dhclient` on all the unconfigured interfaces. # This is done to create records in the leases file for the # NICs attached to unconfigured interfaces. This way the leases # parser will be able to connect these NICs and the networks # MAAS knows about. def dhcp_explore(): def get_iface_list(ifconfig_output): return [ line.split()[0] for line in ifconfig_output.splitlines()[1:]] from subprocess import check_output, call all_ifaces = get_iface_list(check_output(("ifconfig", "-s", "-a"))) configured_ifaces = get_iface_list(check_output(("ifconfig", "-s"))) unconfigured_ifaces = set(all_ifaces) - set(configured_ifaces) for iface in sorted(unconfigured_ifaces): # Run dhclient in the background to avoid blocking the commissioning. call(["dhclient", "-nw", iface]) # Ignore return value and continue running dhcplient on the # other interfaces. # This function must be entirely self-contained. It must not use # variables or imports from the surrounding scope. def lldpd_install(config_file): """Installs and configures `lldpd` for passive capture. `config_file` refers to a shell script that is sourced by `lldpd`'s init script, i.e. it's Upstart config on Ubuntu. It selects the following options for the `lldpd` daemon: -c Enable the support of CDP protocol to deal with Cisco routers that do not speak LLDP. If repeated, CDPv1 packets will be sent even when there is no CDP peer detected. -f Enable the support of FDP protocol to deal with Foundry routers that do not speak LLDP. If repeated, FDP packets will be sent even when there is no FDP peer detected. -s Enable the support of SONMP protocol to deal with Nortel routers and switches that do not speak LLDP. If repeated, SONMP packets will be sent even when there is no SONMP peer detected. -e Enable the support of EDP protocol to deal with Extreme routers and switches that do not speak LLDP. If repeated, EDP packets will be sent even when there is no EDP peer detected. -r Receive-only mode. With this switch, lldpd will not send any frame. It will only listen to neighbors. These flags are chosen so that we're able to capture information from a broad spectrum of equipment, but without advertising the node's temporary presence. """ from subprocess import check_call check_call(("apt-get", "install", "--yes", "lldpd")) from codecs import open with open(config_file, "a", "ascii") as fd: fd.write('\n') # Ensure there's a newline. fd.write('# Configured by MAAS:\n') fd.write('DAEMON_ARGS="-c -f -s -e -r"\n') # Reload initctl configuration in order to make sure that the # lldpd init script is available before restart, otherwise # it might cause commissioning to fail. This is due bug # (LP: #882147) in the kernel. check_call(("initctl", "reload-configuration")) check_call(("service", "lldpd", "restart")) # This function must be entirely self-contained. It must not use # variables or imports from the surrounding scope. def lldpd_wait(reference_file, time_delay): """Wait until `lldpd` has been running for `time_delay` seconds. On an Ubuntu system, `reference_file` is typically `lldpd`'s UNIX socket in `/var/run`. """ from os.path import getmtime time_ref = getmtime(reference_file) from time import sleep, time time_remaining = time_ref + time_delay - time() if time_remaining > 0: sleep(time_remaining) # This function must be entirely self-contained. It must not use # variables or imports from the surrounding scope. def lldpd_capture(): """Capture LLDP information from `lldpd` in XML form.""" from subprocess import check_call check_call(("lldpctl", "-f", "xml")) _xpath_routers = "/lldp//id[@type='mac']/text()" def extract_router_mac_addresses(raw_content): """Extract the routers' MAC Addresses from raw LLDP information.""" if not raw_content: return None assert isinstance(raw_content, bytes) parser = etree.XMLParser() doc = etree.XML(raw_content.strip(), parser) return doc.xpath(_xpath_routers) def set_node_routers(node, output, exit_status): """Process recently captured raw LLDP information. The list of the routers' MAC Addresses is extracted from the raw LLDP information and stored on the given node. If `exit_status` is non-zero, this function returns without doing anything. """ assert isinstance(output, bytes) if exit_status != 0: return routers = extract_router_mac_addresses(output) if routers is None: node.routers = None else: node.routers = [MAC(router) for router in routers] node.save() LIST_MODALIASES_OUTPUT_NAME = '00-maas-04-list-modaliases.out' LIST_MODALIASES_SCRIPT = \ 'find /sys -name modalias -print0 | xargs -0 cat | sort -u' def gather_physical_block_devices(dev_disk_byid='/dev/disk/by-id/'): """Gathers information about a nodes physical block devices. The following commands are ran in order to gather the required information. lsblk Gathers the initial block devices not including slaves or holders. Gets the name, read-only, removable, model, and if rotary. udevadm Grabs the device path, serial number, if connected over SATA and rotational speed. blockdev Grabs the block size and size of the disk in bytes. """ import json import os import shlex from subprocess import check_output def _path_to_idpath(path): """Searches dev_disk_byid for a device symlinked to /dev/[path]""" if os.path.exists(dev_disk_byid): for link in os.listdir(dev_disk_byid): if os.path.exists(path) and os.path.samefile( os.path.join(dev_disk_byid, link), path): return os.path.join(dev_disk_byid, link) return None # Grab the block devices from lsblk. Excludes RAM devices # (default for lsblk), floppy disks, and loopback devices. blockdevs = [] block_list = check_output(( "lsblk", "--exclude", "1,2,7", "-d", "-P", "-o", "NAME,RO,RM,MODEL,ROTA")) for blockdev in block_list.splitlines(): tokens = shlex.split(blockdev) current_block = {} for token in tokens: k, v = token.split("=", 1) current_block[k] = v.strip() blockdevs.append(current_block) # Grab the device path, serial number, and sata connection. UDEV_MAPPINGS = { "DEVNAME": "PATH", "ID_SERIAL_SHORT": "SERIAL", "ID_ATA_SATA": "SATA", "ID_ATA_ROTATION_RATE_RPM": "RPM" } del_blocks = set() seen_devices = set() for block_info in blockdevs: # Some RAID devices return the name of the device seperated with "!", # but udevadm expects it to be a "/". block_name = block_info["NAME"].replace("!", "/") udev_info = check_output( ("udevadm", "info", "-q", "all", "-n", block_name)) for info_line in udev_info.splitlines(): info_line = info_line.strip() if info_line == "": continue _, info = info_line.split(" ", 1) if "=" not in info: continue k, v = info.split("=", 1) if k in UDEV_MAPPINGS: block_info[UDEV_MAPPINGS[k]] = v.strip() if k == "ID_CDROM" and v == "1": # Remove any type of CDROM from the blockdevs, as we # cannot use this device for installation. del_blocks.add(block_name) break if block_name in del_blocks: continue # Skip duplicate (serial, model) for multipath. serial = block_info.get("SERIAL") if serial: model = block_info.get("MODEL", "").strip() if (serial, model) in seen_devices: del_blocks.add(block_name) continue seen_devices.add((serial, model)) # Remove any devices that need to be removed. blockdevs = [ block_info for block_info in blockdevs if block_info["NAME"] not in del_blocks ] # Grab the size of the device, block size and id-path. for block_info in blockdevs: block_path = block_info["PATH"] id_path = _path_to_idpath(block_path) if id_path is not None: block_info["ID_PATH"] = id_path device_size = check_output( ("blockdev", "--getsize64", block_path)) device_block_size = check_output( ("blockdev", "--getbsz", block_path)) block_info["SIZE"] = device_size.strip() block_info["BLOCK_SIZE"] = device_block_size.strip() # Output block device information in json json_output = json.dumps(blockdevs, indent=True) print(json_output) def get_tags_from_block_info(block_info): """Return array of tags that will populate the `PhysicalBlockDevice`. Tags block devices for: rotary: Storage device with a spinning disk. ssd: Storage device with flash storage. removable: Storage device that can be easily removed like a USB flash drive. sata: Storage device that is connected over SATA. """ tags = [] if block_info["ROTA"] == "1": tags.append("rotary") else: tags.append("ssd") if block_info["RM"] == "1": tags.append("removable") if "SATA" in block_info and block_info["SATA"] == "1": tags.append("sata") if "RPM" in block_info and block_info["RPM"] != "0": tags.append("%srpm" % block_info["RPM"]) return tags def get_matching_block_device(block_devices, serial=None, id_path=None): """Return the matching block device based on `serial` or `id_path` from the provided list of `block_devices`.""" if serial: for block_device in block_devices: if block_device.serial == serial: return block_device elif id_path: for block_device in block_devices: if block_device.id_path == id_path: return block_device return None def update_node_physical_block_devices(node, output, exit_status): """Process the results of `gather_physical_block_devices`. This updates the physical block devices that are attached to a node. If `exit_status` is non-zero, this function returns without doing anything. """ assert isinstance(output, bytes) if exit_status != 0: return # Skip storage configuration if set by the user. if node.skip_storage: return try: blockdevs = json.loads(output) except ValueError as e: raise ValueError(e.message + ': ' + output) previous_block_devices = list( PhysicalBlockDevice.objects.filter(node=node).all()) for block_info in blockdevs: # Skip the read-only devices. We keep them in the output for # the user to view but they do not get an entry in the database. if block_info["RO"] == "1": continue name = block_info["NAME"] model = block_info.get("MODEL", "") serial = block_info.get("SERIAL", "") id_path = block_info.get("ID_PATH", "") if not id_path: # Fallback to the dev path if id_path missing. id_path = block_info["PATH"] size = long(block_info["SIZE"]) block_size = int(block_info["BLOCK_SIZE"]) tags = get_tags_from_block_info(block_info) block_device = get_matching_block_device( previous_block_devices, serial, id_path) if block_device is not None: # Already exists for the node. Keep the original object so the # ID doesn't change and if its set to the boot_disk that FK will # not need to be updated. previous_block_devices.remove(block_device) block_device.name = name block_device.model = model block_device.serial = serial block_device.id_path = id_path block_device.size = size block_device.block_size = block_size block_device.tags = tags block_device.save() else: # First check if there is an existing device with the same name. # If so, we need to rename it. Its name will be changed back later, # when we loop around to it. existing = PhysicalBlockDevice.objects.filter( node=node, name=name).all() for device in existing: # Use the device ID to ensure a unique temporary name. device.name = "%s.%d" % (device.name, device.id) device.save() # New block device. Create it on the node. PhysicalBlockDevice.objects.create( node=node, name=name, id_path=id_path, size=size, block_size=block_size, tags=tags, model=model, serial=serial, ) # Clear boot_disk if it is being removed. boot_disk = node.boot_disk if boot_disk is not None and boot_disk in previous_block_devices: boot_disk = None if node.boot_disk != boot_disk: node.boot_disk = boot_disk node.save() # Delete all the previous block devices that are no longer present # on the commissioned node. delete_block_device_ids = [ bd.id for bd in previous_block_devices ] if len(delete_block_device_ids) > 0: PhysicalBlockDevice.objects.filter( id__in=delete_block_device_ids).delete() def null_hook(node, output, exit_status): """Intentionally do nothing. Use this to explicitly ignore the response from a built-in commissioning script. """ # Built-in commissioning scripts. These go into the commissioning # tarball together with user-provided commissioning scripts. # To keep namespaces separated, names of the built-in scripts must be # prefixed with "00-maas-" or "99-maas-". # # The dictionary is keyed on the output filename that the script # produces. This is so it can be looked up later in the post-processing # hook. # # The contents of each dictionary entry are another dictionary with # keys: # "name" -> the script's name # "content" -> the actual script # "hook" -> a post-processing hook. # # The post-processing hook is a function that will be passed the node # and the raw content of the script's output, e.g. "hook(node, raw_content)" BUILTIN_COMMISSIONING_SCRIPTS = { LSHW_OUTPUT_NAME: { 'content': LSHW_SCRIPT.encode('ascii'), 'hook': update_hardware_details, }, '00-maas-02-virtuality.out': { 'content': VIRTUALITY_SCRIPT.encode('ascii'), 'hook': set_virtual_tag, }, '00-maas-03-install-lldpd.out': { 'content': make_function_call_script( lldpd_install, config_file="/etc/default/lldpd"), 'hook': null_hook, }, LIST_MODALIASES_OUTPUT_NAME: { 'content': LIST_MODALIASES_SCRIPT.encode('ascii'), 'hook': null_hook, }, '00-maas-06-dhcp-unconfigured-ifaces': { 'content': make_function_call_script(dhcp_explore), 'hook': null_hook, }, '00-maas-07-block-devices.out': { 'content': make_function_call_script(gather_physical_block_devices), 'hook': update_node_physical_block_devices, }, '99-maas-01-wait-for-lldpd.out': { 'content': make_function_call_script( lldpd_wait, "/var/run/lldpd.socket", time_delay=60), 'hook': null_hook, }, LLDP_OUTPUT_NAME: { 'content': make_function_call_script(lldpd_capture), 'hook': set_node_routers, }, '99-maas-03-network-interfaces.out': { 'content': IPADDR_SCRIPT.encode('ascii'), 'hook': update_node_network_information, }, } def add_names_to_scripts(scripts): """Derive script names from the script output filename. Designed for working with the `BUILTIN_COMMISSIONING_SCRIPTS` structure. """ for output_name, config in scripts.items(): if "name" not in config: script_name = os.path.basename(output_name) script_name, _ = os.path.splitext(script_name) config["name"] = script_name add_names_to_scripts(BUILTIN_COMMISSIONING_SCRIPTS) def add_script_to_archive(tarball, name, content, mtime): """Add a commissioning script to an archive of commissioning scripts.""" assert isinstance(content, bytes), "Script content must be binary." tarinfo = tarfile.TarInfo(name=os.path.join(ARCHIVE_PREFIX, name)) tarinfo.size = len(content) # Mode 0755 means: u=rwx,go=rx tarinfo.mode = 0755 # Modification time defaults to Epoch, which elicits annoying # warnings when decompressing. tarinfo.mtime = mtime tarball.addfile(tarinfo, BytesIO(content)) class CommissioningScriptManager(Manager): """Utility for the collection of `CommissioningScript`s.""" def _iter_builtin_scripts(self): for script in BUILTIN_COMMISSIONING_SCRIPTS.itervalues(): yield script['name'], script['content'] def _iter_user_scripts(self): for script in self.all(): yield script.name, script.content def _iter_scripts(self): return chain( self._iter_builtin_scripts(), self._iter_user_scripts()) def get_archive(self): """Produce a tar archive of all commissioning scripts. Each of the scripts will be in the `ARCHIVE_PREFIX` directory. """ binary = BytesIO() scripts = sorted(self._iter_scripts()) with tarfile.open(mode='w', fileobj=binary) as tarball: add_script = partial(add_script_to_archive, tarball, mtime=now()) for name, content in scripts: add_script(name, content) return binary.getvalue() class CommissioningScript(Model): """User-provided commissioning script. Actually a commissioning "script" could be a binary, e.g. because a hardware vendor supplied an update in the form of a binary executable. """ class Meta(DefaultMeta): """Needed for South to recognize this model.""" objects = CommissioningScriptManager() name = CharField(max_length=255, null=False, editable=True, unique=True) content = BinaryField(null=False) def inject_result(node, name, output, exit_status=0): """Inject a `name` result and trigger related hooks, if any. `output` and `exit_status` are recorded as `NodeResult` instances with the `name` given. A built-in hook is then searched for; if found, it is invoked. """ assert isinstance(output, bytes) NodeResult.objects.store_data( node, name, script_result=exit_status, result_type=RESULT_TYPE.COMMISSIONING, data=Bin(output)) if name in BUILTIN_COMMISSIONING_SCRIPTS: postprocess_hook = BUILTIN_COMMISSIONING_SCRIPTS[name]['hook'] postprocess_hook(node=node, output=output, exit_status=exit_status) def inject_lshw_result(node, output, exit_status=0): """Convenience to call `inject_result(name=LSHW_OUTPUT_NAME, ...)`.""" return inject_result(node, LSHW_OUTPUT_NAME, output, exit_status) def inject_lldp_result(node, output, exit_status=0): """Convenience to call `inject_result(name=LLDP_OUTPUT_NAME, ...)`.""" return inject_result(node, LLDP_OUTPUT_NAME, output, exit_status) maas-1.9.5+bzr4599.orig/src/metadataserver/models/nodekey.py0000644000000000000000000001055713056115004021754 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """:class:`NodeKey` model.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'NodeKey', ] from django.db.models import ( CharField, ForeignKey, Manager, Model, ) from maasserver.models.cleansave import CleanSave from maasserver.models.user import create_auth_token from maasserver.utils.orm import get_one from metadataserver import DefaultMeta from metadataserver.nodeinituser import get_node_init_user from piston.models import KEY_SIZE class NodeKeyManager(Manager): """Utility for the collection of NodeKeys. Each Node that needs to access the metadata service will have its own OAuth token, tied to the dedicated "node-init" user. Each node will see just its own meta-data when it accesses the service. NodeKeyManager is what connects those nodes to their respective tokens. There's two parts to using NodeKey and NodeKeyManager: 1. get_token_for_node(node) gives you a token that the node can then access the metadata service with. From the "token" that this returns, the node will need to know token.key, token.secret, and token.consumer.key for its credentials. 2. get_node_for_key(key) takes the token.key (which will be in the http Authorization header of a metadata request as "oauth_token") and looks up the associated Node. """ def _create_token(self, node): """Create an OAuth token for a given node. :param node: The system that is to be allowed access to the metadata service. :type node: Node :return: Token for the node to use. :rtype: piston.models.Token """ token = create_auth_token(get_node_init_user()) self.create(node=node, token=token, key=token.key) return token def get_token_for_node(self, node): """Find node's OAuth token, or if it doesn't have one, create it. This implicitly grants cloud-init (running on the node) access to the metadata service. Barring exceptions, this will always hold: get_node_for_key(get_token_for_node(node).key) == node :param node: The node that needs an oauth token for access to the metadata service. :type node: Node :return: An OAuth token, belonging to the node-init user, but uniquely associated with this node. :rtype: piston.models.Token """ nodekey = get_one(self.filter(node=node)) if nodekey is None: return self._create_token(node) else: return nodekey.token def clear_token_for_node(self, node): """Find node's OAuth token; if there is one, delete it. :param node: The node that needs an oauth token for access to the metadata service. :type node: Node """ nodekey = get_one(self.filter(node=node)) if nodekey is not None: # Django emulates ON DELETE CASCADE by default which is about as # sensible as eating live wasps. It means that deleting the token # will delete `nodekey` implicitly. nodekey.token.delete() def get_node_for_key(self, key): """Find the Node that `key` was created for. Barring exceptions, this will always hold: get_token_for_node(get_node_for_key(key)).key == key :param key: The key part of a node's OAuth token. :type key: unicode :raise NodeKey.DoesNotExist: if `key` is not associated with any node. """ return self.get(key=key).node class NodeKey(CleanSave, Model): """Associate a Node with its OAuth (token) key. :ivar node: A Node. :ivar key: A key, to be used by `node` for logging in. The key belongs to the maas-init-node user. """ class Meta(DefaultMeta): """Needed for South to recognize this model.""" objects = NodeKeyManager() node = ForeignKey( 'maasserver.Node', null=False, editable=False, unique=True) token = ForeignKey( 'piston.Token', null=False, editable=False, unique=True) key = CharField( max_length=KEY_SIZE, null=False, editable=False, unique=True) maas-1.9.5+bzr4599.orig/src/metadataserver/models/noderesult.py0000644000000000000000000001052213056115004022472 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """:class:`NodeResult` model.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'NodeResult', ] from django.db.models import ( CharField, ForeignKey, IntegerField, Manager, ) from django.shortcuts import get_object_or_404 from django.utils.html import escape from maasserver.models.cleansave import CleanSave from maasserver.models.timestampedmodel import TimestampedModel from maasserver.utils.converters import XMLToYAML from metadataserver import DefaultMeta from metadataserver.enum import ( RESULT_TYPE, RESULT_TYPE_CHOICES, ) from metadataserver.fields import BinaryField class NodeResultManager(Manager): """Utility to manage a collection of :class:`NodeResult`s.""" def clear_results(self, node): """Remove all existing results for a node.""" self.filter(node=node).delete() def store_data(self, node, name, script_result, result_type, data): """Store data about a node. :param node: The node that this result pertains to. :type node: :class:`maasserver.models.Node` :param name: The name of this result, typically the name of the commissioning script that generated it. :type name: string :param script_result: The exit code of the commissioning script. :type script_result: int :param result_type: The enum value for either commissioning (0) or installing (1). :type script_result: int :param data: The raw binary output of the commissioning script. :type data: :class:`metadataserver.fields.Bin` """ existing, created = self.get_or_create( node=node, name=name, defaults=dict( script_result=script_result, result_type=result_type, data=data)) if not created: existing.script_result = script_result existing.result_type = result_type existing.data = data existing.save() return existing def get_data(self, node, name): """Get data about a node.""" ncr = get_object_or_404(NodeResult, node=node, name=name) return ncr.data class NodeResult(CleanSave, TimestampedModel): """Storage for data returned from node commissioning/installation. Commissioning/Installing a node results in various bits of data that need to be stored, such as lshw output. This model allows storing of this data as unicode text, with an arbitrary name, for later retrieval. :ivar node: The context :class:`Node`. :ivar script_result: If this data results from the execution of a script, this is the status of this execution. This can be "OK", "FAILED" or "WORKING" for progress reports. :ivar result_type: This can be either commissioning or installation. :ivar name: A unique name to use for the data being stored. :ivar data: The file's actual data, unicode only. """ class Meta(DefaultMeta): unique_together = ('node', 'name') objects = NodeResultManager() node = ForeignKey( 'maasserver.Node', null=False, editable=False, unique=False) script_result = IntegerField(editable=False) result_type = IntegerField( choices=RESULT_TYPE_CHOICES, editable=False, default=RESULT_TYPE.COMMISSIONING) name = CharField(max_length=255, unique=False, editable=False) data = BinaryField( max_length=1024 * 1024, editable=True, blank=True, default=b'', null=False) def __unicode__(self): return "%s/%s" % (self.node.system_id, self.name) def get_data_as_html(self): """More-or-less human-readable HTML representation of the output.""" return escape(self.data.decode('utf-8', 'replace')) def get_data_as_yaml_html(self): """More-or-less human-readable Yaml HTML representation of the output. """ from metadataserver.models.commissioningscript import ( LLDP_OUTPUT_NAME, LSHW_OUTPUT_NAME, ) if self.name in (LLDP_OUTPUT_NAME, LSHW_OUTPUT_NAME): return escape(XMLToYAML(self.data).convert()) maas-1.9.5+bzr4599.orig/src/metadataserver/models/nodeuserdata.py0000644000000000000000000000520113056115004022762 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Node user-data for cloud-init's use.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'NodeUserData', ] from django.db.models import ( ForeignKey, Manager, Model, ) from maasserver.models.cleansave import CleanSave from metadataserver import DefaultMeta from metadataserver.fields import ( Bin, BinaryField, ) class NodeUserDataManager(Manager): """Utility for the collection of NodeUserData items.""" def set_user_data(self, node, data): """Set user data for the given node. If `data` is None, remove user data for the node. """ if data is None: self._remove(node) else: self._set(node, data) def get_user_data(self, node): """Retrieve user data for the given node.""" return self.get(node=node).data def has_user_data(self, node): """Do we have user data registered for node?""" return self.filter(node=node).exists() def _set(self, node, data): """Set actual user data for a node. Not usable if data is None.""" wrapped_data = Bin(data) (existing_entry, created) = self.get_or_create( node=node, defaults={'data': wrapped_data}) if not created: existing_entry.data = wrapped_data existing_entry.save() def _remove(self, node): """Remove metadata from node, if it has any any.""" self.filter(node=node).delete() def bulk_set_user_data(self, nodes, data): """Set the user data for the given nodes in bulk. This is more efficient than calling `set_user_data` on each node. """ self.filter(node__in=nodes).delete() if data is not None: self.bulk_create(( self.model(node=node, data=Bin(data)) for node in nodes )) class NodeUserData(CleanSave, Model): """User-data portion of a node's metadata. When cloud-init sets up a node, it retrieves specific data for that node from the metadata service. One portion of that is the "user-data" binary blob. :ivar node: Node that this is for. :ivar data: base64-encoded data. """ class Meta(DefaultMeta): """Needed for South to recognize this model.""" objects = NodeUserDataManager() node = ForeignKey( 'maasserver.Node', null=False, editable=False, unique=True) data = BinaryField(null=False) maas-1.9.5+bzr4599.orig/src/metadataserver/models/tests/0000755000000000000000000000000013056115004021076 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/metadataserver/models/tests/__init__.py0000644000000000000000000000000013056115004023175 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/metadataserver/models/tests/ip_addr_results.txt0000644000000000000000000000241113056115004025020 0ustar 000000000000001: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: eth0: mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000 link/ether 00:00:00:00:00:01 brd ff:ff:ff:ff:ff:ff inet 192.168.0.3/24 brd 192.168.0.255 scope global eth0 valid_lft forever preferred_lft forever inet6 fe80::3e97:efe:fe0e:56dc/64 scope link valid_lft forever preferred_lft forever inet6 2001:db8:a::123/64 scope link valid_lft forever preferred_lft forever 3: eth1: mtu 1500 qdisc mq state DOWN mode DORMANT group default qlen 1000 link/ether 00:00:00:00:00:02 brd ff:ff:ff:ff:ff:ff inet 172.17.42.1/16 scope global eth1 valid_lft forever preferred_lft forever 3: eth2: mtu 1500 qdisc mq state DOWN mode DORMANT group default qlen 1000 link/ether 00:00:00:00:00:03 brd ff:ff:ff:ff:ff:ff inet 172.17.12.1/16 scope global eth2 valid_lft forever preferred_lft forever maas-1.9.5+bzr4599.orig/src/metadataserver/models/tests/test_commissioningscript.py0000644000000000000000000020613013056115004026614 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test custom commissioning scripts.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import doctest from functools import partial from inspect import getsource from io import BytesIO import json from math import ( ceil, floor, ) import os.path import random from random import randint import subprocess from subprocess import ( CalledProcessError, check_output, STDOUT, ) import tarfile from textwrap import dedent import time from fixtures import FakeLogger from maasserver.enum import ( INTERFACE_TYPE, IPADDRESS_TYPE, ) from maasserver.fields import MAC from maasserver.models.blockdevice import MIN_BLOCK_DEVICE_SIZE from maasserver.models.interface import Interface from maasserver.models.physicalblockdevice import PhysicalBlockDevice from maasserver.models.tag import Tag from maasserver.models.vlan import VLAN from maasserver.testing.factory import factory from maasserver.testing.orm import reload_object from maasserver.testing.testcase import ( MAASServerTestCase, TestWithoutCrochetMixin, ) from maastesting.matchers import ( MockCalledOnceWith, MockCallsMatch, ) from maastesting.utils import sample_binary_data from metadataserver.enum import RESULT_TYPE from metadataserver.fields import Bin from metadataserver.models import ( CommissioningScript, commissioningscript as cs_module, ) from metadataserver.models.commissioningscript import ( ARCHIVE_PREFIX, extract_router_mac_addresses, inject_lldp_result, inject_lshw_result, inject_result, LLDP_OUTPUT_NAME, LSHW_OUTPUT_NAME, make_function_call_script, set_node_routers, set_virtual_tag, update_hardware_details, update_node_network_information, update_node_physical_block_devices, ) from metadataserver.models.noderesult import NodeResult from mock import ( call, create_autospec, Mock, sentinel, ) from netaddr import IPNetwork from testtools.content import text_content from testtools.matchers import ( Contains, ContainsAll, DocTestMatches, Equals, MatchesStructure, Not, ) def open_tarfile(content): """Open tar file from raw binary data.""" return tarfile.open(fileobj=BytesIO(content)) def make_script_name(base_name=None, number=None): """Make up a name for a commissioning script.""" if base_name is None: base_name = 'script' if number is None: number = randint(0, 99) return factory.make_name( '%0.2d-%s' % (number, factory.make_name(base_name))) class TestCommissioningScriptManager(MAASServerTestCase): def test_get_archive_wraps_scripts_in_tar(self): script = factory.make_CommissioningScript() path = os.path.join(ARCHIVE_PREFIX, script.name) archive = open_tarfile(CommissioningScript.objects.get_archive()) self.assertTrue(archive.getmember(path).isfile()) self.assertEqual(script.content, archive.extractfile(path).read()) def test_get_archive_wraps_all_scripts(self): scripts = {factory.make_CommissioningScript() for counter in range(3)} archive = open_tarfile(CommissioningScript.objects.get_archive()) self.assertThat( archive.getnames(), ContainsAll({ os.path.join(ARCHIVE_PREFIX, script.name) for script in scripts })) def test_get_archive_supports_binary_scripts(self): script = factory.make_CommissioningScript(content=sample_binary_data) path = os.path.join(ARCHIVE_PREFIX, script.name) archive = open_tarfile(CommissioningScript.objects.get_archive()) self.assertEqual(script.content, archive.extractfile(path).read()) def test_get_archive_includes_builtin_scripts(self): name = factory.make_name('00-maas') path = os.path.join(ARCHIVE_PREFIX, name) content = factory.make_string().encode('ascii') data = dict(name=name, content=content, hook='hook') self.patch(cs_module, 'BUILTIN_COMMISSIONING_SCRIPTS', {name: data}) archive = open_tarfile(CommissioningScript.objects.get_archive()) self.assertIn(path, archive.getnames()) self.assertEqual(content, archive.extractfile(path).read()) def test_get_archive_sets_sensible_mode(self): for counter in range(3): factory.make_CommissioningScript() archive = open_tarfile(CommissioningScript.objects.get_archive()) self.assertEqual({0755}, {info.mode for info in archive.getmembers()}) def test_get_archive_initializes_file_timestamps(self): # The mtime on a file inside the tarball is reasonable. # It would otherwise default to the Epoch, and GNU tar warns # annoyingly about improbably old files. start_time = floor(time.time()) script = factory.make_CommissioningScript() path = os.path.join(ARCHIVE_PREFIX, script.name) archive = open_tarfile(CommissioningScript.objects.get_archive()) timestamp = archive.getmember(path).mtime end_time = ceil(time.time()) self.assertGreaterEqual(timestamp, start_time) self.assertLessEqual(timestamp, end_time) class TestCommissioningScript(MAASServerTestCase): def test_scripts_may_be_binary(self): name = make_script_name() CommissioningScript.objects.create( name=name, content=Bin(sample_binary_data)) stored_script = CommissioningScript.objects.get(name=name) self.assertEqual(sample_binary_data, stored_script.content) class TestMakeFunctionCallScript(MAASServerTestCase): def run_script(self, script): script_filename = self.make_file("test.py", script) os.chmod(script_filename, 0700) try: return check_output((script_filename,), stderr=STDOUT) except CalledProcessError as error: self.addDetail("output", text_content(error.output)) raise def test_basic(self): def example_function(): print("Hello, World!", end="") script = make_function_call_script(example_function) self.assertEqual(b"Hello, World!", self.run_script(script)) def test_positional_args_get_passed_through(self): def example_function(a, b): print("a=%s, b=%d" % (a, b), end="") script = make_function_call_script(example_function, "foo", 12345) self.assertEqual(b"a=foo, b=12345", self.run_script(script)) def test_keyword_args_get_passed_through(self): def example_function(a, b): print("a=%s, b=%d" % (a, b), end="") script = make_function_call_script(example_function, a="foo", b=12345) self.assertEqual(b"a=foo, b=12345", self.run_script(script)) def test_positional_and_keyword_args_get_passed_through(self): def example_function(a, b): print("a=%s, b=%d" % (a, b), end="") script = make_function_call_script(example_function, "foo", b=12345) self.assertEqual(b"a=foo, b=12345", self.run_script(script)) def test_non_ascii_positional_args_are_passed_without_corruption(self): def example_function(text): print(repr(text), end="") script = make_function_call_script(example_function, "abc\u1234") self.assertEqual(b"u'abc\\u1234'", self.run_script(script)) def test_non_ascii_keyword_args_are_passed_without_corruption(self): def example_function(text): print(repr(text), end="") script = make_function_call_script(example_function, text="abc\u1234") self.assertEqual(b"u'abc\\u1234'", self.run_script(script)) def test_structured_arguments_are_passed_though_too(self): # Anything that can be JSON serialized can be passed. def example_function(arg): if arg == {"123": "foo", "bar": [4, 5, 6]}: print("Equal") else: print("Unequal, got %s" % repr(arg)) script = make_function_call_script( example_function, {"123": "foo", "bar": [4, 5, 6]}) self.assertEqual(b"Equal\n", self.run_script(script)) def isolate_function(function, namespace=None): """Recompile the given function in the given namespace. :param namespace: A dict to use as the namespace. If not provided, and empty namespace will be used. """ source = dedent(getsource(function)) modcode = compile(source, "isolated.py", "exec") namespace = {} if namespace is None else namespace exec(modcode, namespace) return namespace[function.__name__] class TestLLDPScripts(TestWithoutCrochetMixin, MAASServerTestCase): def test_install_script_installs_configures_and_restarts(self): config_file = self.make_file("config", "# ...") check_call = self.patch(subprocess, "check_call") lldpd_install = isolate_function(cs_module.lldpd_install) lldpd_install(config_file) # lldpd is installed and restarted. self.assertEqual( check_call.call_args_list, [ call(("apt-get", "install", "--yes", "lldpd")), call(("initctl", "reload-configuration")), call(("service", "lldpd", "restart")) ]) # lldpd's config was updated to include an updated DAEMON_ARGS # setting. Note that the new comment is on a new line, and # does not interfere with existing config. config_expected = dedent("""\ # ... # Configured by MAAS: DAEMON_ARGS="-c -f -s -e -r" """).encode("ascii") with open(config_file, "rb") as fd: config_observed = fd.read() self.assertEqual(config_expected, config_observed) def test_wait_script_waits_for_lldpd(self): reference_file = self.make_file("reference") time_delay = 8.98 # seconds lldpd_wait = isolate_function(cs_module.lldpd_wait) # Do the patching as late as possible, because the setup may call # one of the patched functions somewhere in the plumbing. We've had # spurious test failures over this: bug 1283918. self.patch(os.path, "getmtime").return_value = 10.65 self.patch(time, "time").return_value = 14.12 self.patch(time, "sleep") lldpd_wait(reference_file, time_delay) # lldpd_wait checks the mtime of the reference file, self.assertThat(os.path.getmtime, MockCalledOnceWith(reference_file)) # and gets the current time, self.assertThat(time.time, MockCalledOnceWith()) # then sleeps until time_delay seconds has passed since the # mtime of the reference file. self.assertThat(time.sleep, MockCalledOnceWith( os.path.getmtime.return_value + time_delay - time.time.return_value)) def test_capture_calls_lldpdctl(self): check_call = self.patch(subprocess, "check_call") lldpd_capture = isolate_function(cs_module.lldpd_capture) lldpd_capture() self.assertEqual( check_call.call_args_list, [call(("lldpctl", "-f", "xml"))]) lldp_output_template = """ %s """ lldp_output_interface_template = """ %s switch-name HDFD5BG7J 192.168.9.9 """ def make_lldp_output(macs): """Return an example raw lldp output containing the given MACs.""" interfaces = '\n'.join( lldp_output_interface_template % mac for mac in macs ) script = (lldp_output_template % interfaces).encode('utf8') return bytes(script) # The two following example outputs differ because eth2 and eth1 are not # configured and thus 'ifconfig -s -a' returns a list with both 'eth1' # and 'eth2' while 'ifconfig -s' does not contain them. # Example output of 'ifconfig -s -a': ifconfig_all = """ Iface MTU Met RX-OK RX-ERR RX-DRP RX-OVR TX-OK TX-ERR TX-DRP eth2 1500 0 0 0 0 0 0 0 eth1 1500 0 0 0 0 0 0 0 eth0 1500 0 1366127 0 0 0 831110 0 lo 65536 0 38075 0 0 0 38075 0 virbr0 1500 0 0 0 0 0 0 0 wlan0 1500 0 2304695 0 0 0 1436049 0 """ # Example output of 'ifconfig -s': ifconfig_config = """ Iface MTU Met RX-OK RX-ERR RX-DRP RX-OVR TX-OK TX-ERR TX-DRP eth0 1500 0 1366127 0 0 0 831110 0 lo 65536 0 38115 0 0 0 38115 0 virbr0 1500 0 0 0 0 0 0 0 wlan0 1500 0 2304961 0 0 0 1436319 0 """ class TestDHCPExplore(MAASServerTestCase): def test_calls_dhclient_on_unconfigured_interfaces(self): check_output = self.patch(subprocess, "check_output") check_output.side_effect = [ifconfig_all, ifconfig_config] mock_call = self.patch(subprocess, "call") dhcp_explore = isolate_function(cs_module.dhcp_explore) dhcp_explore() self.assertThat( mock_call, MockCallsMatch( call(["dhclient", "-nw", 'eth1']), call(["dhclient", "-nw", 'eth2']))) class TestExtractRouters(MAASServerTestCase): def test_extract_router_mac_addresses_returns_None_when_empty_input(self): self.assertIsNone(extract_router_mac_addresses('')) def test_extract_router_mac_addresses_returns_empty_list(self): lldp_output = make_lldp_output([]) self.assertItemsEqual([], extract_router_mac_addresses(lldp_output)) def test_extract_router_mac_addresses_returns_routers_list(self): macs = ["11:22:33:44:55:66", "aa:bb:cc:dd:ee:ff"] lldp_output = make_lldp_output(macs) routers = extract_router_mac_addresses(lldp_output) self.assertItemsEqual(macs, routers) class TestSetNodeRouters(MAASServerTestCase): def test_set_node_routers_updates_node(self): node = factory.make_Node(routers=None) macs = ["11:22:33:44:55:66", "aa:bb:cc:dd:ee:ff"] lldp_output = make_lldp_output(macs) set_node_routers(node, lldp_output, 0) self.assertItemsEqual( [MAC(mac) for mac in macs], reload_object(node).routers) def test_set_node_routers_updates_node_if_no_routers(self): node = factory.make_Node() lldp_output = make_lldp_output([]) set_node_routers(node, lldp_output, 0) self.assertItemsEqual([], reload_object(node).routers) def test_set_node_routers_does_nothing_if_script_failed(self): node = factory.make_Node() routers_before = node.routers macs = ["11:22:33:44:55:66", "aa:bb:cc:dd:ee:ff"] lldp_output = make_lldp_output(macs) set_node_routers(node, lldp_output, exit_status=1) routers_after = reload_object(node).routers self.assertItemsEqual(routers_before, routers_after) class TestInjectResult(MAASServerTestCase): def test_inject_result_stores_data(self): node = factory.make_Node() name = factory.make_name("result") output = factory.make_bytes() exit_status = next(factory.random_octets) inject_result(node, name, output, exit_status) self.assertThat( NodeResult.objects.get(node=node, name=name), MatchesStructure.byEquality( node=node, name=name, script_result=exit_status, result_type=RESULT_TYPE.COMMISSIONING, data=output)) def test_inject_result_calls_hook(self): node = factory.make_Node() name = factory.make_name("result") output = factory.make_bytes() exit_status = next(factory.random_octets) hook = Mock() self.patch( cs_module, "BUILTIN_COMMISSIONING_SCRIPTS", {name: {"hook": hook}}) inject_result(node, name, output, exit_status) self.assertThat(hook, MockCalledOnceWith( node=node, output=output, exit_status=exit_status)) def inject_lshw_result(self): # inject_lshw_result() just calls through to inject_result(). inject_result = self.patch( cs_module, "inject_result", create_autospec(cs_module.inject_result)) inject_lshw_result(sentinel.node, sentinel.output, sentinel.status) self.assertThat(inject_result, MockCalledOnceWith( sentinel.node, LSHW_OUTPUT_NAME, sentinel.output, sentinel.status)) def inject_lldp_result(self): # inject_lldp_result() just calls through to inject_result(). inject_result = self.patch( cs_module, "inject_result", create_autospec(cs_module.inject_result)) inject_lldp_result(sentinel.node, sentinel.output, sentinel.status) self.assertThat(inject_result, MockCalledOnceWith( sentinel.node, LLDP_OUTPUT_NAME, sentinel.output, sentinel.status)) class TestSetVirtualTag(MAASServerTestCase): def getVirtualTag(self): virtual_tag, _ = Tag.objects.get_or_create(name='virtual') return virtual_tag def assertTagsEqual(self, node, tags): self.assertItemsEqual( tags, [tag.name for tag in node.tags.all()]) def test_sets_virtual_tag(self): node = factory.make_Node() self.assertTagsEqual(node, []) set_virtual_tag(node, b"virtual", 0) self.assertTagsEqual(node, ["virtual"]) def test_removes_virtual_tag(self): node = factory.make_Node() node.tags.add(self.getVirtualTag()) self.assertTagsEqual(node, ["virtual"]) set_virtual_tag(node, b"notvirtual", 0) self.assertTagsEqual(node, []) def test_output_not_containing_virtual_does_not_set_tag(self): logger = self.useFixture(FakeLogger()) node = factory.make_Node() self.assertTagsEqual(node, []) set_virtual_tag(node, b"wibble", 0) self.assertTagsEqual(node, []) self.assertIn( "Neither 'virtual' nor 'notvirtual' appeared in the captured " "VIRTUALITY_SCRIPT output for node %s.\n" % node.system_id, logger.output) def test_output_not_containing_virtual_does_not_remove_tag(self): logger = self.useFixture(FakeLogger()) node = factory.make_Node() node.tags.add(self.getVirtualTag()) self.assertTagsEqual(node, ["virtual"]) set_virtual_tag(node, b"wibble", 0) self.assertTagsEqual(node, ["virtual"]) self.assertIn( "Neither 'virtual' nor 'notvirtual' appeared in the captured " "VIRTUALITY_SCRIPT output for node %s.\n" % node.system_id, logger.output) class TestUpdateHardwareDetails(MAASServerTestCase): doctest_flags = doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE def test_hardware_updates_cpu_count(self): node = factory.make_Node() xmlbytes = dedent("""\ """).encode("utf-8") update_hardware_details(node, xmlbytes, 0) node = reload_object(node) self.assertEqual(2, node.cpu_count) def test_cpu_count_counts_multi_cores(self): node = factory.make_Node() xmlbytes = dedent("""\ """).encode("utf-8") update_hardware_details(node, xmlbytes, 0) node = reload_object(node) self.assertEqual(5, node.cpu_count) def test_cpu_count_skips_disabled_cpus(self): node = factory.make_Node() xmlbytes = dedent("""\ """).encode("utf-8") update_hardware_details(node, xmlbytes, 0) node = reload_object(node) self.assertEqual(1, node.cpu_count) def test_hardware_updates_memory(self): node = factory.make_Node() xmlbytes = dedent("""\ 4294967296 """).encode("utf-8") update_hardware_details(node, xmlbytes, 0) node = reload_object(node) self.assertEqual(4096, node.memory) def test_hardware_updates_memory_lenovo(self): node = factory.make_Node() xmlbytes = dedent("""\ 4294967296 3221225472 536870912 """).encode("utf-8") update_hardware_details(node, xmlbytes, 0) node = reload_object(node) mega = 2 ** 20 expected = (4294967296 + 3221225472 + 536879812) / mega self.assertEqual(expected, node.memory) def test_hardware_updates_ignores_empty_tags(self): # Tags with empty definitions are ignored when # update_hardware_details gets called. factory.make_Tag(definition='') node = factory.make_Node() node.save() xmlbytes = ''.encode("utf-8") update_hardware_details(node, xmlbytes, 0) node = reload_object(node) # The real test is that update_hardware_details does not blow # up, see bug 1131418. self.assertEqual([], list(node.tags.all())) def test_hardware_updates_logs_invalid_xml(self): logger = self.useFixture(FakeLogger()) update_hardware_details(factory.make_Node(), b"garbage", 0) expected_log = dedent("""\ Invalid lshw data. Traceback (most recent call last): ... XMLSyntaxError: Start tag expected, '<' not found, line 1, column 1 """) self.assertThat( logger.output, DocTestMatches( expected_log, self.doctest_flags)) def test_hardware_updates_does_nothing_when_exit_status_is_not_zero(self): logger = self.useFixture(FakeLogger(name='commissioningscript')) update_hardware_details(factory.make_Node(), b"garbage", exit_status=1) self.assertEqual("", logger.output) class TestGatherPhysicalBlockDevices(MAASServerTestCase): def make_lsblk_output( self, name=None, read_only=False, removable=False, model=None, rotary=True): if name is None: name = factory.make_name('name') if model is None: model = factory.make_name('model') read_only = "1" if read_only else "0" removable = "1" if removable else "0" rotary = "1" if rotary else "0" return 'NAME="%s" RO="%s" RM="%s" MODEL="%s" ROTA="%s"' % ( name, read_only, removable, model, rotary) def make_udevadm_output( self, name, serial=None, sata=True, cdrom=False, dev='/dev'): if serial is None: serial = factory.make_name('serial') sata = "1" if sata else "0" output = dedent("""\ P: /devices/pci0000:00/ata3/host2/target2:0:0/2:0:0:0/block/{name} N: {name} E: DEVNAME={dev}/{name} E: DEVTYPE=disk E: ID_ATA_SATA={sata} E: ID_SERIAL_SHORT={serial} """).format(dev=os.path.abspath(dev), name=name, serial=serial, sata=sata) if cdrom: output += "E: ID_CDROM=1" else: output += "E: ID_ATA_ROTATION_RATE_RPM=5400" return output def call_gather_physical_block_devices( self, dev_disk_byid='/dev/disk/by-id/'): output = BytesIO() namespace = {"print": partial(print, file=output)} gather_physical_block_devices = isolate_function( cs_module.gather_physical_block_devices, namespace) gather_physical_block_devices(dev_disk_byid=dev_disk_byid) return json.loads(output.getvalue()) def test__calls_lsblk(self): check_output = self.patch(subprocess, "check_output") check_output.return_value = "" self.call_gather_physical_block_devices() self.assertThat(check_output, MockCalledOnceWith(( "lsblk", "--exclude", "1,2,7", "-d", "-P", "-o", "NAME,RO,RM,MODEL,ROTA"))) def test__returns_empty_list_when_no_disks(self): check_output = self.patch(subprocess, "check_output") check_output.return_value = "" self.assertEquals([], self.call_gather_physical_block_devices()) def test__calls_lsblk_then_udevadm(self): name = factory.make_name('name') check_output = self.patch(subprocess, "check_output") check_output.side_effect = [ self.make_lsblk_output( name=name), self.make_udevadm_output( name, cdrom=True), ] self.call_gather_physical_block_devices() self.assertThat(check_output, MockCallsMatch( call(( "lsblk", "--exclude", "1,2,7", "-d", "-P", "-o", "NAME,RO,RM,MODEL,ROTA")), call(("udevadm", "info", "-q", "all", "-n", name)))) def test__returns_empty_list_when_cdrom_only(self): name = factory.make_name('name') check_output = self.patch(subprocess, "check_output") check_output.side_effect = [ self.make_lsblk_output( name=name), self.make_udevadm_output( name, cdrom=True), ] self.assertEquals([], self.call_gather_physical_block_devices()) def test__calls_lsblk_udevadm_then_blockdev(self): name = factory.make_name('name') model = factory.make_name('model') serial = factory.make_name('serial') size = random.randint(3000 * 1000, 1000 * 1000 * 1000) block_size = random.choice([512, 1024, 4096]) check_output = self.patch(subprocess, "check_output") check_output.side_effect = [ self.make_lsblk_output(name=name, model=model), self.make_udevadm_output(name, serial=serial), '%s' % size, '%s' % block_size, ] self.call_gather_physical_block_devices() self.assertThat(check_output, MockCallsMatch( call(( "lsblk", "--exclude", "1,2,7", "-d", "-P", "-o", "NAME,RO,RM,MODEL,ROTA")), call(("udevadm", "info", "-q", "all", "-n", name)), call(("blockdev", "--getsize64", "/dev/%s" % name)), call(("blockdev", "--getbsz", "/dev/%s" % name)))) def test__returns_block_device(self): name = factory.make_name('name') model = factory.make_name('model') serial = factory.make_name('serial') size = random.randint(3000 * 1000, 1000 * 1000 * 1000) block_size = random.choice([512, 1024, 4096]) check_output = self.patch(subprocess, "check_output") # Create simulated /dev tree devroot = self.make_dir() os.mkdir(os.path.join(devroot, 'disk')) byidroot = os.path.join(devroot, 'disk', 'by_id') os.mkdir(byidroot) os.mknod(os.path.join(devroot, name)) os.symlink(os.path.join(devroot, name), os.path.join(byidroot, 'deviceid')) check_output.side_effect = [ self.make_lsblk_output(name=name, model=model), self.make_udevadm_output(name, serial=serial, dev=devroot), '%s' % size, '%s' % block_size, ] self.assertEquals([{ "NAME": name, "PATH": os.path.join(devroot, name), "ID_PATH": os.path.join(byidroot, 'deviceid'), "RO": "0", "RM": "0", "MODEL": model, "ROTA": "1", "SATA": "1", "SERIAL": serial, "SIZE": "%s" % size, "BLOCK_SIZE": "%s" % block_size, "RPM": "5400", }], self.call_gather_physical_block_devices(byidroot)) def test__removes_duplicate_block_device_same_serial_and_model(self): """Multipath disks get multiple IDs, but same serial/model is same device and should only be enumerated once.""" name = factory.make_name('name') model = factory.make_name('model') serial = factory.make_name('serial') size = random.randint(3000 * 1000, 1000 * 1000 * 1000) block_size = random.choice([512, 1024, 4096]) check_output = self.patch(subprocess, "check_output") name2 = factory.make_name('name') # Create simulated /dev tree. devroot = self.make_dir() os.mkdir(os.path.join(devroot, 'disk')) byidroot = os.path.join(devroot, 'disk', 'by_id') os.mkdir(byidroot) os.mknod(os.path.join(devroot, name)) os.symlink(os.path.join(devroot, name), os.path.join(byidroot, 'deviceid')) os.mknod(os.path.join(devroot, name2)) os.symlink(os.path.join(devroot, name2), os.path.join(byidroot, 'deviceid2')) check_output.side_effect = [ b"\n".join([ self.make_lsblk_output(name=name, model=model), self.make_lsblk_output(name=name2, model=model)]), self.make_udevadm_output(name, serial=serial, dev=devroot), self.make_udevadm_output(name2, serial=serial, dev=devroot), b'%d' % size, b'%d' % block_size, b'%d' % size, b'%d' % block_size, ] self.assertEqual([{ "NAME": name, "PATH": os.path.join(devroot, name), "ID_PATH": os.path.join(byidroot, 'deviceid'), "RO": "0", "RM": "0", "MODEL": model, "ROTA": "1", "SATA": "1", "SERIAL": serial, "SIZE": "%s" % size, "BLOCK_SIZE": "%s" % block_size, "RPM": "5400", }], self.call_gather_physical_block_devices(byidroot)) def test__removes_duplicate_block_device_same_serial_blank_model(self): """Multipath disks get multiple IDs, but same serial is same device.""" name = factory.make_name('name') model = "" serial = factory.make_name('serial') size = random.randint(3000 * 1000, 1000 * 1000 * 1000) block_size = random.choice([512, 1024, 4096]) check_output = self.patch(subprocess, "check_output") name2 = factory.make_name('name') # Create simulated /dev tree. devroot = self.make_dir() os.mkdir(os.path.join(devroot, 'disk')) byidroot = os.path.join(devroot, 'disk', 'by_id') os.mkdir(byidroot) os.mknod(os.path.join(devroot, name)) os.symlink(os.path.join(devroot, name), os.path.join(byidroot, 'deviceid')) os.mknod(os.path.join(devroot, name2)) os.symlink(os.path.join(devroot, name2), os.path.join(byidroot, 'deviceid2')) check_output.side_effect = [ b"\n".join([ self.make_lsblk_output(name=name, model=model), self.make_lsblk_output(name=name2, model=model)]), self.make_udevadm_output(name, serial=serial, dev=devroot), self.make_udevadm_output(name2, serial=serial, dev=devroot), b'%d' % size, b'%d' % block_size, b'%d' % size, b'%d' % block_size, ] self.assertEqual([{ "NAME": name, "PATH": os.path.join(devroot, name), "ID_PATH": os.path.join(byidroot, 'deviceid'), "RO": "0", "RM": "0", "MODEL": model, "ROTA": "1", "SATA": "1", "SERIAL": serial, "SIZE": "%s" % size, "BLOCK_SIZE": "%s" % block_size, "RPM": "5400", }], self.call_gather_physical_block_devices(byidroot)) def test__keeps_block_device_same_serial_different_model(self): """Multipath disks get multiple IDs, but same serial is same device.""" name = factory.make_name('name') model = factory.make_name('model') serial = factory.make_name('serial') size = random.randint(3000 * 1000, 1000 * 1000 * 1000) block_size = random.choice([512, 1024, 4096]) check_output = self.patch(subprocess, "check_output") name2 = factory.make_name('name') model2 = factory.make_name('model') # Create simulated /dev tree. devroot = self.make_dir() os.mkdir(os.path.join(devroot, 'disk')) byidroot = os.path.join(devroot, 'disk', 'by_id') os.mkdir(byidroot) os.mknod(os.path.join(devroot, name)) os.symlink(os.path.join(devroot, name), os.path.join(byidroot, 'deviceid')) os.mknod(os.path.join(devroot, name2)) os.symlink(os.path.join(devroot, name2), os.path.join(byidroot, 'deviceid2')) check_output.side_effect = [ b"\n".join([ self.make_lsblk_output(name=name, model=model), self.make_lsblk_output(name=name2, model=model2)]), self.make_udevadm_output(name, serial=serial, dev=devroot), self.make_udevadm_output(name2, serial=serial, dev=devroot), b'%d' % size, b'%d' % block_size, b'%d' % size, b'%d' % block_size, ] self.assertEqual([{ "NAME": name, "PATH": os.path.join(devroot, name), "ID_PATH": os.path.join(byidroot, 'deviceid'), "RO": "0", "RM": "0", "MODEL": model, "ROTA": "1", "SATA": "1", "SERIAL": serial, "SIZE": "%s" % size, "BLOCK_SIZE": "%s" % block_size, "RPM": "5400", }, { "NAME": name2, "PATH": os.path.join(devroot, name2), "ID_PATH": os.path.join(byidroot, 'deviceid2'), "RO": "0", "RM": "0", "MODEL": model2, "ROTA": "1", "SATA": "1", "SERIAL": serial, "SIZE": "%s" % size, "BLOCK_SIZE": "%s" % block_size, "RPM": "5400", }], self.call_gather_physical_block_devices(byidroot)) def test__keeps_block_device_blank_serial_same_model(self): """Multipath disks get multiple IDs, but same serial is same device.""" name = factory.make_name('name') model = factory.make_name('model') serial = '' size = random.randint(3000 * 1000, 1000 * 1000 * 1000) block_size = random.choice([512, 1024, 4096]) check_output = self.patch(subprocess, "check_output") name2 = factory.make_name('name') # Create simulated /dev tree. devroot = self.make_dir() os.mkdir(os.path.join(devroot, 'disk')) byidroot = os.path.join(devroot, 'disk', 'by_id') os.mkdir(byidroot) os.mknod(os.path.join(devroot, name)) os.symlink(os.path.join(devroot, name), os.path.join(byidroot, 'deviceid')) os.mknod(os.path.join(devroot, name2)) os.symlink(os.path.join(devroot, name2), os.path.join(byidroot, 'deviceid2')) check_output.side_effect = [ b"\n".join([ self.make_lsblk_output(name=name, model=model), self.make_lsblk_output(name=name2, model=model)]), self.make_udevadm_output(name, serial=serial, dev=devroot), self.make_udevadm_output(name2, serial=serial, dev=devroot), b'%d' % size, b'%d' % block_size, b'%d' % size, b'%d' % block_size, ] self.assertEqual([{ "NAME": name, "PATH": os.path.join(devroot, name), "ID_PATH": os.path.join(byidroot, 'deviceid'), "RO": "0", "RM": "0", "MODEL": model, "ROTA": "1", "SATA": "1", "SERIAL": serial, "SIZE": "%s" % size, "BLOCK_SIZE": "%s" % block_size, "RPM": "5400", }, { "NAME": name2, "PATH": os.path.join(devroot, name2), "ID_PATH": os.path.join(byidroot, 'deviceid2'), "RO": "0", "RM": "0", "MODEL": model, "ROTA": "1", "SATA": "1", "SERIAL": serial, "SIZE": "%s" % size, "BLOCK_SIZE": "%s" % block_size, "RPM": "5400", }], self.call_gather_physical_block_devices(byidroot)) def test__returns_block_device_without_id_path(self): """Block devices without by-id links should not have ID_PATH key""" name = factory.make_name('name') model = factory.make_name('model') serial = factory.make_name('serial') size = random.randint(3000 * 1000, 1000 * 1000 * 1000) block_size = random.choice([512, 1024, 4096]) check_output = self.patch(subprocess, "check_output") # Create simulated /dev tree without by-id link devroot = self.make_dir() os.mkdir(os.path.join(devroot, 'disk')) byidroot = os.path.join(devroot, 'disk', 'by_id') os.mkdir(byidroot) os.mknod(os.path.join(devroot, name)) check_output.side_effect = [ self.make_lsblk_output(name=name, model=model), self.make_udevadm_output(name, serial=serial, dev=devroot), '%s' % size, '%s' % block_size, ] self.assertEquals([{ "NAME": name, "PATH": os.path.join(devroot, name), "RO": "0", "RM": "0", "MODEL": model, "ROTA": "1", "SATA": "1", "SERIAL": serial, "SIZE": "%s" % size, "BLOCK_SIZE": "%s" % block_size, "RPM": "5400", }], self.call_gather_physical_block_devices(byidroot)) def test__returns_block_device_readonly(self): name = factory.make_name('name') model = factory.make_name('model') serial = factory.make_name('serial') size = random.randint(3000 * 1000, 1000 * 1000 * 1000) block_size = random.choice([512, 1024, 4096]) check_output = self.patch(subprocess, "check_output") check_output.side_effect = [ self.make_lsblk_output(name=name, model=model, read_only=True), self.make_udevadm_output(name, serial=serial), '%s' % size, '%s' % block_size, ] self.assertEquals([{ "NAME": name, "PATH": "/dev/%s" % name, "RO": "1", "RM": "0", "MODEL": model, "ROTA": "1", "SATA": "1", "SERIAL": serial, "SIZE": "%s" % size, "BLOCK_SIZE": "%s" % block_size, "RPM": "5400", }], self.call_gather_physical_block_devices()) def test__returns_block_device_ssd(self): name = factory.make_name('name') model = factory.make_name('model') serial = factory.make_name('serial') size = random.randint(3000 * 1000, 1000 * 1000 * 1000) block_size = random.choice([512, 1024, 4096]) check_output = self.patch(subprocess, "check_output") check_output.side_effect = [ self.make_lsblk_output(name=name, model=model, rotary=False), self.make_udevadm_output(name, serial=serial), '%s' % size, '%s' % block_size, ] self.assertEquals([{ "NAME": name, "PATH": "/dev/%s" % name, "RO": "0", "RM": "0", "MODEL": model, "ROTA": "0", "SATA": "1", "SERIAL": serial, "SIZE": "%s" % size, "BLOCK_SIZE": "%s" % block_size, "RPM": "5400", }], self.call_gather_physical_block_devices()) def test__returns_block_device_not_sata(self): name = factory.make_name('name') model = factory.make_name('model') serial = factory.make_name('serial') size = random.randint(3000 * 1000, 1000 * 1000 * 1000) block_size = random.choice([512, 1024, 4096]) check_output = self.patch(subprocess, "check_output") check_output.side_effect = [ self.make_lsblk_output(name=name, model=model), self.make_udevadm_output(name, serial=serial, sata=False), '%s' % size, '%s' % block_size, ] self.assertEquals([{ "NAME": name, "PATH": "/dev/%s" % name, "RO": "0", "RM": "0", "MODEL": model, "ROTA": "1", "SATA": "0", "SERIAL": serial, "SIZE": "%s" % size, "BLOCK_SIZE": "%s" % block_size, "RPM": "5400", }], self.call_gather_physical_block_devices()) def test__returns_block_device_removable(self): name = factory.make_name('name') model = factory.make_name('model') serial = factory.make_name('serial') size = random.randint(3000 * 1000, 1000 * 1000 * 1000) block_size = random.choice([512, 1024, 4096]) check_output = self.patch(subprocess, "check_output") check_output.side_effect = [ self.make_lsblk_output(name=name, model=model, removable=True), self.make_udevadm_output(name, serial=serial), '%s' % size, '%s' % block_size, ] self.assertEquals([{ "NAME": name, "PATH": "/dev/%s" % name, "RO": "0", "RM": "1", "MODEL": model, "ROTA": "1", "SATA": "1", "SERIAL": serial, "SIZE": "%s" % size, "BLOCK_SIZE": "%s" % block_size, "RPM": "5400", }], self.call_gather_physical_block_devices()) def test__returns_multiple_block_devices_in_order(self): names = [factory.make_name('name') for _ in range(3)] lsblk = [ self.make_lsblk_output(name=name) for name in names ] call_outputs = [] call_outputs.append("\n".join(lsblk)) for name in names: call_outputs.append(self.make_udevadm_output(name)) for name in names: call_outputs.append( "%s" % random.randint(1000 * 1000, 1000 * 1000 * 1000)) call_outputs.append( "%s" % random.choice([512, 1024, 4096])) check_output = self.patch(subprocess, "check_output") check_output.side_effect = call_outputs device_names = [ block_info['NAME'] for block_info in self.call_gather_physical_block_devices() ] self.assertEquals(names, device_names) class TestUpdateNodePhysicalBlockDevices(MAASServerTestCase): def make_block_device( self, name=None, path=None, id_path=None, size=None, block_size=None, model=None, serial=None, rotary=True, rpm=None, removable=False, sata=False): if name is None: name = factory.make_name('name') if path is None: path = '/dev/%s' % name if id_path is None: id_path = '/dev/disk/by-id/deviceid' if size is None: size = random.randint( MIN_BLOCK_DEVICE_SIZE * 2, MIN_BLOCK_DEVICE_SIZE * 4) if block_size is None: block_size = random.choice([512, 1024, 4096]) if model is None: model = factory.make_name('model') if serial is None: serial = factory.make_name('serial') if rpm is None: rpm = random.choice(('4800', '5400', '10000', '15000')) return { "NAME": name, "PATH": path, "ID_PATH": id_path, "SIZE": '%s' % size, "BLOCK_SIZE": '%s' % block_size, "MODEL": model, "SERIAL": serial, "RO": "0", "RM": "1" if removable else "0", "ROTA": "1" if rotary else "0", "SATA": "1" if sata else "0", "RPM": "0" if not rotary else rpm } def test__does_nothing_when_exit_status_is_not_zero(self): node = factory.make_Node() block_device = factory.make_PhysicalBlockDevice(node=node) update_node_physical_block_devices(node, b"garbage", exit_status=1) self.assertIsNotNone(reload_object(block_device)) def test__does_nothing_if_skip_storage(self): node = factory.make_Node(skip_storage=True) block_device = factory.make_PhysicalBlockDevice(node=node) update_node_physical_block_devices(node, b"garbage", exit_status=0) self.assertIsNotNone(reload_object(block_device)) def test__removes_previous_physical_block_devices(self): node = factory.make_Node() block_device = factory.make_PhysicalBlockDevice(node=node) update_node_physical_block_devices(node, b"[]", 0) self.assertIsNone(reload_object(block_device)) def test__creates_physical_block_devices(self): devices = [self.make_block_device() for _ in range(3)] device_names = [device['NAME'] for device in devices] node = factory.make_Node() json_output = json.dumps(devices).encode('utf-8') update_node_physical_block_devices(node, json_output, 0) created_names = [ device.name for device in PhysicalBlockDevice.objects.filter(node=node) ] self.assertItemsEqual(device_names, created_names) def test__handles_renamed_block_device(self): devices = [self.make_block_device(name='sda', serial='first')] node = factory.make_Node() json_output = json.dumps(devices).encode('utf-8') update_node_physical_block_devices(node, json_output, 0) devices = [ self.make_block_device(name='sda', serial='second'), self.make_block_device(name='sdb', serial='first'), ] json_output = json.dumps(devices).encode('utf-8') update_node_physical_block_devices(node, json_output, 0) device_names = [device['NAME'] for device in devices] created_names = [ device.name for device in PhysicalBlockDevice.objects.filter(node=node) ] self.assertItemsEqual(device_names, created_names) def test__only_updates_physical_block_devices(self): devices = [self.make_block_device() for _ in range(3)] node = factory.make_Node() json_output = json.dumps(devices).encode('utf-8') update_node_physical_block_devices(node, json_output, 0) created_ids_one = [ device.id for device in PhysicalBlockDevice.objects.filter(node=node) ] update_node_physical_block_devices(node, json_output, 0) created_ids_two = [ device.id for device in PhysicalBlockDevice.objects.filter(node=node) ] self.assertItemsEqual(created_ids_two, created_ids_one) def test__doesnt_reset_boot_disk(self): devices = [self.make_block_device() for _ in range(3)] node = factory.make_Node() json_output = json.dumps(devices).encode('utf-8') update_node_physical_block_devices(node, json_output, 0) boot_disk = PhysicalBlockDevice.objects.filter(node=node).first() node.boot_disk = boot_disk node.save() update_node_physical_block_devices(node, json_output, 0) self.assertEquals(boot_disk, reload_object(node).boot_disk) def test__clears_boot_disk(self): devices = [self.make_block_device() for _ in range(3)] node = factory.make_Node() json_output = json.dumps(devices).encode('utf-8') update_node_physical_block_devices(node, json_output, 0) update_node_physical_block_devices( node, json.dumps([]).encode('utf-8'), 0) self.assertIsNone(reload_object(node).boot_disk) def test__creates_physical_block_devices_in_order(self): devices = [self.make_block_device() for _ in range(3)] device_names = [device['NAME'] for device in devices] node = factory.make_Node() json_output = json.dumps(devices).encode('utf-8') update_node_physical_block_devices(node, json_output, 0) created_names = [ device.name for device in ( PhysicalBlockDevice.objects.filter(node=node).order_by('id')) ] self.assertEquals(device_names, created_names) def test__creates_physical_block_device(self): name = factory.make_name('name') id_path = '/dev/disk/by-id/deviceid' size = random.randint(3000 * 1000, 1000 * 1000 * 1000) block_size = random.choice([512, 1024, 4096]) model = factory.make_name('model') serial = factory.make_name('serial') device = self.make_block_device( name=name, size=size, block_size=block_size, model=model, serial=serial) node = factory.make_Node() json_output = json.dumps([device]).encode('utf-8') update_node_physical_block_devices(node, json_output, 0) self.assertThat( PhysicalBlockDevice.objects.filter(node=node).first(), MatchesStructure.byEquality( name=name, id_path=id_path, size=size, block_size=block_size, model=model, serial=serial)) def test__creates_physical_block_device_with_path(self): name = factory.make_name('name') size = random.randint(3000 * 1000, 1000 * 1000 * 1000) block_size = random.choice([512, 1024, 4096]) model = factory.make_name('model') serial = factory.make_name('serial') device = self.make_block_device( name=name, size=size, block_size=block_size, model=model, serial=serial, id_path='') node = factory.make_Node() json_output = json.dumps([device]).encode('utf-8') update_node_physical_block_devices(node, json_output, 0) self.assertThat( PhysicalBlockDevice.objects.filter(node=node).first(), MatchesStructure.byEquality( name=name, id_path='/dev/%s' % name, size=size, block_size=block_size, model=model, serial=serial)) def test__creates_physical_block_device_only_for_node(self): device = self.make_block_device() node = factory.make_Node(with_boot_disk=False) other_node = factory.make_Node(with_boot_disk=False) json_output = json.dumps([device]).encode('utf-8') update_node_physical_block_devices(node, json_output, 0) self.assertEquals( 0, PhysicalBlockDevice.objects.filter(node=other_node).count(), "Created physical block device for the incorrect node.") def test__creates_physical_block_device_with_rotary_tag(self): device = self.make_block_device(rotary=True) node = factory.make_Node() json_output = json.dumps([device]).encode('utf-8') update_node_physical_block_devices(node, json_output, 0) self.expectThat( PhysicalBlockDevice.objects.filter(node=node).first().tags, Contains('rotary')) self.expectThat( PhysicalBlockDevice.objects.filter(node=node).first().tags, Not(Contains('ssd'))) def test__creates_physical_block_device_with_rotary_and_rpm_tags(self): device = self.make_block_device(rotary=True, rpm=5400) node = factory.make_Node() json_output = json.dumps([device]).encode('utf-8') update_node_physical_block_devices(node, json_output, 0) self.expectThat( PhysicalBlockDevice.objects.filter(node=node).first().tags, Contains('rotary')) self.expectThat( PhysicalBlockDevice.objects.filter(node=node).first().tags, Contains('5400rpm')) def test__creates_physical_block_device_with_ssd_tag(self): device = self.make_block_device(rotary=False) node = factory.make_Node() json_output = json.dumps([device]).encode('utf-8') update_node_physical_block_devices(node, json_output, 0) self.expectThat( PhysicalBlockDevice.objects.filter(node=node).first().tags, ContainsAll(['ssd'])) self.expectThat( PhysicalBlockDevice.objects.filter(node=node).first().tags, Not(Contains('rotary'))) def test__creates_physical_block_device_without_removable_tag(self): device = self.make_block_device(removable=False) node = factory.make_Node() json_output = json.dumps([device]).encode('utf-8') update_node_physical_block_devices(node, json_output, 0) self.assertThat( PhysicalBlockDevice.objects.filter(node=node).first().tags, Not(Contains('removable'))) def test__creates_physical_block_device_with_removable_tag(self): device = self.make_block_device(removable=True) node = factory.make_Node() json_output = json.dumps([device]).encode('utf-8') update_node_physical_block_devices(node, json_output, 0) self.assertThat( PhysicalBlockDevice.objects.filter(node=node).first().tags, Contains('removable')) def test__creates_physical_block_device_without_sata_tag(self): device = self.make_block_device(sata=False) node = factory.make_Node() json_output = json.dumps([device]).encode('utf-8') update_node_physical_block_devices(node, json_output, 0) self.assertThat( PhysicalBlockDevice.objects.filter(node=node).first().tags, Not(Contains('sata'))) def test__creates_physical_block_device_with_sata_tag(self): device = self.make_block_device(sata=True) node = factory.make_Node() json_output = json.dumps([device]).encode('utf-8') update_node_physical_block_devices(node, json_output, 0) self.assertThat( PhysicalBlockDevice.objects.filter(node=node).first().tags, Contains('sata')) class TestUpdateNodeNetworkInformation(MAASServerTestCase): """Tests the update_node_network_information function using data from the ip_addr_results.txt file to simulate `ip addr`'s output. The EXPECTED_MACS dictionary below must match the contents of the file, which should specify a list of physical interfaces (such as what would be expected to be found during commissioning). """ EXPECTED_INTERFACES = { 'eth0': MAC("00:00:00:00:00:01"), 'eth1': MAC("00:00:00:00:00:02"), 'eth2': MAC("00:00:00:00:00:03"), } IP_ADDR_OUTPUT = None def setUp(self): self.IP_ADDR_OUTPUT = open( os.path.dirname(__file__) + '/ip_addr_results.txt').read() super(TestUpdateNodeNetworkInformation, self).setUp() def assert_expected_interfaces_and_macs_exist( self, node_interfaces, additional_interfaces={}): """Asserts to ensure that the type, name, and MAC address are appropriate, given Node's interfaces. (and an optional list of additional interfaces which must exist) """ expected_interfaces = self.EXPECTED_INTERFACES.copy() expected_interfaces.update(additional_interfaces) self.assertThat(len(node_interfaces), Equals(len(expected_interfaces))) for interface in node_interfaces: if interface.name.startswith('eth'): parts = interface.name.split('.') if len(parts) == 2 and parts[1].isdigit(): iftype = INTERFACE_TYPE.VLAN else: iftype = INTERFACE_TYPE.PHYSICAL self.assertThat( interface.type, Equals(iftype)) self.assertIn(interface.name, expected_interfaces) self.assertThat(interface.mac_address, Equals( expected_interfaces[interface.name])) def test__does_nothing_if_skip_networking(self): node = factory.make_Node(interface=True, skip_networking=True) boot_interface = node.get_boot_interface() update_node_network_information(node, self.IP_ADDR_OUTPUT, 0) self.assertIsNotNone(reload_object(boot_interface)) def test__add_all_interfaces(self): """Test a node that has no previously known interfaces on which we need to add a series of interfaces. """ node = factory.make_Node() # Delete all Interfaces created by factory attached to this node. Interface.objects.filter(node_id=node.id).delete() update_node_network_information(node, self.IP_ADDR_OUTPUT, 0) # Makes sure all the test dataset MAC addresses were added to the node. node_interfaces = Interface.objects.filter(node=node) self.assert_expected_interfaces_and_macs_exist(node_interfaces) def test__one_mac_missing(self): """Test whether we correctly detach a NIC that no longer appears to be connected to the node. """ node = factory.make_Node() # Create a MAC address that we know is not in the test dataset. factory.make_Interface( node=node, mac_address="01:23:45:67:89:ab") update_node_network_information(node, self.IP_ADDR_OUTPUT, 0) # These should have been added to the node. node_interfaces = Interface.objects.filter(node=node) self.assert_expected_interfaces_and_macs_exist(node_interfaces) # This one should have been removed because it no longer shows on the # `ip addr` output. db_macaddresses = [ iface.mac_address for iface in node.interface_set.all() ] self.assertNotIn(MAC('01:23:45:67:89:ab'), db_macaddresses) def test__reassign_mac(self): """Test whether we can assign a MAC address previously connected to a different node to the current one""" node1 = factory.make_Node() # Create a MAC address that we know IS in the test dataset. interface_to_be_reassigned = factory.make_Interface(node=node1) interface_to_be_reassigned.mac_address = MAC('00:00:00:00:00:01') interface_to_be_reassigned.save() node2 = factory.make_Node() update_node_network_information(node2, self.IP_ADDR_OUTPUT, 0) node2_interfaces = Interface.objects.filter(node=node2) self.assert_expected_interfaces_and_macs_exist(node2_interfaces) # Ensure the MAC object moved over to node2. self.assertItemsEqual([], Interface.objects.filter(node=node1)) self.assertItemsEqual([], Interface.objects.filter(node=node1)) def test__reassign_interfaces(self): """Test whether we can assign interfaces previously connected to a different node to the current one""" node1 = factory.make_Node() update_node_network_information(node1, self.IP_ADDR_OUTPUT, 0) # First make sure the first node has all the expected interfaces. node2_interfaces = Interface.objects.filter(node=node1) self.assert_expected_interfaces_and_macs_exist(node2_interfaces) # Grab the id from one of the created interfaces. interface_id = Interface.objects.filter(node=node1).first().id # Now make sure the second node has them all. node2 = factory.make_Node() update_node_network_information(node2, self.IP_ADDR_OUTPUT, 0) node2_interfaces = Interface.objects.filter(node=node2) self.assert_expected_interfaces_and_macs_exist(node2_interfaces) # Now make sure all the objects moved to the second node. self.assertItemsEqual([], Interface.objects.filter(node=node1)) self.assertItemsEqual([], Interface.objects.filter(node=node1)) # ... and ensure that the interface was deleted. self.assertItemsEqual([], Interface.objects.filter(id=interface_id)) def test__does_not_delete_virtual_interfaces_with_shared_mac(self): # Note: since this VLANInterface will be linked to the default VLAN # ("vid 0", which is actually invalid) the VLANInterface will # automatically get the name "vlan0". ETH0_MAC = self.EXPECTED_INTERFACES['eth0'].get_raw() ETH1_MAC = self.EXPECTED_INTERFACES['eth1'].get_raw() BOND_NAME = 'bond0' node = factory.make_Node() eth0 = factory.make_Interface( name="eth0", mac_address=ETH0_MAC, node=node) eth1 = factory.make_Interface( name="eth1", mac_address=ETH1_MAC, node=node) vlanif = factory.make_Interface( INTERFACE_TYPE.VLAN, mac_address=ETH0_MAC, parents=[eth0], node=node) factory.make_Interface( INTERFACE_TYPE.BOND, mac_address=ETH1_MAC, parents=[eth1], node=node, name=BOND_NAME) update_node_network_information(node, self.IP_ADDR_OUTPUT, 0) node_interfaces = Interface.objects.filter(node=node) self.assert_expected_interfaces_and_macs_exist( node_interfaces, {vlanif.name: ETH0_MAC, BOND_NAME: ETH1_MAC}) def test__interface_names_changed(self): # Note: the MACs here are swapped compared to their expected values. ETH0_MAC = self.EXPECTED_INTERFACES['eth1'].get_raw() ETH1_MAC = self.EXPECTED_INTERFACES['eth0'].get_raw() node = factory.make_Node() factory.make_Interface( INTERFACE_TYPE.PHYSICAL, name="eth0", mac_address=ETH0_MAC, node=node) factory.make_Interface( INTERFACE_TYPE.PHYSICAL, name="eth1", mac_address=ETH1_MAC, node=node) update_node_network_information(node, self.IP_ADDR_OUTPUT, 0) node_interfaces = Interface.objects.filter(node=node) # This will ensure that the interfaces were renamed appropriately. self.assert_expected_interfaces_and_macs_exist(node_interfaces) def test__mac_id_is_preserved(self): """Test whether MAC address entities are preserved and not recreated""" ETH0_MAC = self.EXPECTED_INTERFACES['eth0'].get_raw() node = factory.make_Node() iface_to_be_preserved = factory.make_Interface( mac_address=ETH0_MAC, node=node) update_node_network_information(node, self.IP_ADDR_OUTPUT, 0) self.assertIsNotNone(reload_object(iface_to_be_preserved)) def test__legacy_model_upgrade_preserves_interfaces(self): ETH0_MAC = self.EXPECTED_INTERFACES['eth0'].get_raw() ETH1_MAC = self.EXPECTED_INTERFACES['eth1'].get_raw() node = factory.make_Node() eth0 = factory.make_Interface(mac_address=ETH0_MAC, node=node) eth1 = factory.make_Interface(mac_address=ETH1_MAC, node=node) update_node_network_information(node, self.IP_ADDR_OUTPUT, 0) self.assertEqual(eth0, Interface.objects.get(id=eth0.id)) self.assertEqual(eth1, Interface.objects.get(id=eth1.id)) node_interfaces = Interface.objects.filter(node=node) self.assert_expected_interfaces_and_macs_exist(node_interfaces) def test__legacy_model_with_extra_mac(self): ETH0_MAC = self.EXPECTED_INTERFACES['eth0'].get_raw() ETH1_MAC = self.EXPECTED_INTERFACES['eth1'].get_raw() ETH2_MAC = self.EXPECTED_INTERFACES['eth2'].get_raw() ETH3_MAC = '00:00:00:00:01:04' node = factory.make_Node() eth0 = factory.make_Interface(mac_address=ETH0_MAC, node=node) eth1 = factory.make_Interface(mac_address=ETH1_MAC, node=node) eth2 = factory.make_Interface(mac_address=ETH2_MAC, node=node) eth3 = factory.make_Interface(mac_address=ETH3_MAC, node=node) update_node_network_information(node, self.IP_ADDR_OUTPUT, 0) node_interfaces = Interface.objects.filter(node=node) self.assert_expected_interfaces_and_macs_exist(node_interfaces) # Make sure we re-used the existing MACs in the database. self.assertIsNotNone(reload_object(eth0)) self.assertIsNotNone(reload_object(eth1)) self.assertIsNotNone(reload_object(eth2)) # Make sure the interface that no longer exists has been removed. self.assertIsNone(reload_object(eth3)) def test__does_not_delete_virtual_interfaces_with_unique_mac(self): ETH0_MAC = self.EXPECTED_INTERFACES['eth0'].get_raw() ETH1_MAC = self.EXPECTED_INTERFACES['eth1'].get_raw() BOND_MAC = '00:00:00:00:01:02' node = factory.make_Node() eth0 = factory.make_Interface(mac_address=ETH0_MAC, node=node) eth1 = factory.make_Interface(mac_address=ETH1_MAC, node=node) vlan = factory.make_Interface( INTERFACE_TYPE.VLAN, node=node, parents=[eth0]) bond = factory.make_Interface( INTERFACE_TYPE.BOND, mac_address=BOND_MAC, node=node, parents=[eth1]) update_node_network_information(node, self.IP_ADDR_OUTPUT, 0) # Freshen the other objects, since they may have changed names. vlan = reload_object(vlan) bond = reload_object(bond) node_interfaces = Interface.objects.filter(node=node) self.assert_expected_interfaces_and_macs_exist( node_interfaces, {vlan.name: ETH0_MAC, bond.name: BOND_MAC}) def test__deletes_virtual_interfaces_linked_to_removed_macs(self): VLAN_MAC = '00:00:00:00:01:01' BOND_MAC = '00:00:00:00:01:02' node = factory.make_Node() eth0 = factory.make_Interface( name='eth0', mac_address=VLAN_MAC, node=node) eth1 = factory.make_Interface( name='eth1', mac_address=BOND_MAC, node=node) factory.make_Interface( INTERFACE_TYPE.VLAN, mac_address=VLAN_MAC, parents=[eth0]) factory.make_Interface( INTERFACE_TYPE.BOND, mac_address=BOND_MAC, parents=[eth1]) update_node_network_information(node, self.IP_ADDR_OUTPUT, 0) node_interfaces = Interface.objects.filter(node=node) self.assert_expected_interfaces_and_macs_exist(node_interfaces) def test__creates_discovered_ip_address(self): node = factory.make_Node() cidr = '192.168.0.3/24' subnet = factory.make_Subnet( cidr=cidr, vlan=VLAN.objects.get_default_vlan()) update_node_network_information(node, self.IP_ADDR_OUTPUT, 0) eth0 = Interface.objects.get(node=node, name='eth0') address = unicode(IPNetwork(cidr).ip) ipv4_ip = eth0.ip_addresses.get(ip=address) self.assertThat( ipv4_ip, MatchesStructure.byEquality( alloc_type=IPADDRESS_TYPE.DISCOVERED, subnet=subnet, ip=address)) maas-1.9.5+bzr4599.orig/src/metadataserver/models/tests/test_nodecommissionresult.py0000644000000000000000000001432113056115004026775 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the :class:`NodeResult` model.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from random import randint from django.core.exceptions import ValidationError from django.http import Http404 from maasserver.testing.factory import factory from maasserver.utils.converters import XMLToYAML from maastesting.djangotestcase import DjangoTestCase from metadataserver.enum import RESULT_TYPE from metadataserver.fields import Bin from metadataserver.models import NodeResult from metadataserver.models.commissioningscript import ( LLDP_OUTPUT_NAME, LSHW_OUTPUT_NAME, ) class TestNodeResult(DjangoTestCase): """Test the NodeResult model.""" def test_unicode_represents_result(self): result = factory.make_NodeResult_for_commissioning() self.assertEqual( '%s/%s' % (result.node.system_id, result.name), unicode(result)) def test_can_store_data(self): node = factory.make_Node() name = factory.make_string() data = factory.make_bytes() factory.make_NodeResult_for_commissioning( node=node, name=name, data=data) ncr = NodeResult.objects.get(name=name) self.assertAttributes(ncr, dict(node=node, data=data)) def test_node_name_uniqueness(self): # You cannot have two result rows with the same name for the # same node. node = factory.make_Node() factory.make_NodeResult_for_commissioning(node=node, name="foo") self.assertRaises( ValidationError, factory.make_NodeResult_for_commissioning, node=node, name="foo") def test_different_nodes_can_have_same_data_name(self): node = factory.make_Node() ncr1 = factory.make_NodeResult_for_commissioning( node=node, name="foo") node2 = factory.make_Node() ncr2 = factory.make_NodeResult_for_commissioning( node=node2, name="foo") self.assertEqual(ncr1.name, ncr2.name) def test_get_data_as_html_returns_output(self): output = factory.make_string() result = factory.make_NodeResult_for_commissioning( data=output.encode('ascii')) self.assertEqual(output, result.get_data_as_html()) def test_get_data_as_yaml_html_returns_output(self): data = "bar".encode("utf-8") expected = XMLToYAML(data).convert() lshw_result = factory.make_NodeResult_for_commissioning( name=LSHW_OUTPUT_NAME, script_result=0, data=data) lldp_result = factory.make_NodeResult_for_commissioning( name=LLDP_OUTPUT_NAME, script_result=0, data=data) self.assertEqual(expected, lshw_result.get_data_as_yaml_html()) self.assertEqual(expected, lldp_result.get_data_as_yaml_html()) def test_get_data_as_html_escapes_binary(self): output = b'\x00\xff' result = factory.make_NodeResult_for_commissioning(data=output) html = result.get_data_as_html() self.assertIsInstance(html, unicode) # The nul byte turns into the zero character. The 0xff is an invalid # character and so becomes the Unicode "replacement character" 0xfffd. self.assertEqual('\x00\ufffd', html) def test_get_data_as_html_escapes_for_html(self): output = '<&>' result = factory.make_NodeResult_for_commissioning( data=output.encode('ascii')) self.assertEqual('<&>', result.get_data_as_html()) class TestNodeResultManager(DjangoTestCase): """Test the manager utility for NodeResult.""" def test_clear_results_removes_rows(self): # clear_results should remove all a node's results. node = factory.make_Node() factory.make_NodeResult_for_commissioning(node=node) factory.make_NodeResult_for_commissioning(node=node) factory.make_NodeResult_for_commissioning(node=node) NodeResult.objects.clear_results(node) self.assertItemsEqual( [], NodeResult.objects.filter(node=node)) def test_clear_results_ignores_other_nodes(self): # clear_results should only remove results for the supplied # node. node1 = factory.make_Node() factory.make_NodeResult_for_commissioning(node=node1) node2 = factory.make_Node() factory.make_NodeResult_for_commissioning(node=node2) NodeResult.objects.clear_results(node1) self.assertTrue( NodeResult.objects.filter(node=node2).exists()) def test_store_data(self): node = factory.make_Node() name = factory.make_string(255) data = factory.make_bytes(1024 * 1024) script_result = randint(0, 10) result = NodeResult.objects.store_data( node, name=name, script_result=script_result, result_type=RESULT_TYPE.COMMISSIONING, data=Bin(data)) result_in_db = NodeResult.objects.get(node=node) self.assertAttributes(result_in_db, dict(name=name, data=data)) # store_data() returns the model object. self.assertEqual(result, result_in_db) def test_store_data_updates_existing(self): node = factory.make_Node() name = factory.make_string(255) script_result = randint(0, 10) factory.make_NodeResult_for_commissioning(node=node, name=name) data = factory.make_bytes(1024 * 1024) NodeResult.objects.store_data( node, name=name, script_result=script_result, result_type=RESULT_TYPE.COMMISSIONING, data=Bin(data)) self.assertAttributes( NodeResult.objects.get(node=node), dict(name=name, data=data, script_result=script_result)) def test_get_data(self): ncr = factory.make_NodeResult_for_commissioning() result = NodeResult.objects.get_data(ncr.node, ncr.name) self.assertEqual(ncr.data, result) def test_get_data_404s_when_not_found(self): ncr = factory.make_NodeResult_for_commissioning() self.assertRaises( Http404, NodeResult.objects.get_data, ncr.node, "bad name") maas-1.9.5+bzr4599.orig/src/metadataserver/models/tests/test_nodekey.py0000644000000000000000000000515213056115004024150 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for :class:`NodeKey` model and manager.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.testing.factory import factory from maastesting.djangotestcase import DjangoTestCase from metadataserver.models import NodeKey from testtools.matchers import HasLength class TestNodeKeyManager(DjangoTestCase): """Test NodeKeyManager.""" def test_get_token_for_node_registers_node_key(self): node = factory.make_Node() token = NodeKey.objects.get_token_for_node(node) nodekey = NodeKey.objects.get(node=node, key=token.key) self.assertNotEqual(None, nodekey) self.assertEqual(token, nodekey.token) def test_get_node_for_key_finds_node(self): node = factory.make_Node() token = NodeKey.objects.get_token_for_node(node) self.assertEqual(node, NodeKey.objects.get_node_for_key(token.key)) def test_get_node_for_key_raises_DoesNotExist_if_key_not_found(self): non_key = factory.make_string() self.assertRaises( NodeKey.DoesNotExist, NodeKey.objects.get_node_for_key, non_key) def test_get_token_for_node_creates_token(self): node = factory.make_Node() token = NodeKey.objects.get_token_for_node(node) self.assertEqual(node, NodeKey.objects.get_node_for_key(token.key)) def test_get_token_for_node_returns_existing_token(self): node = factory.make_Node() original_token = NodeKey.objects.get_token_for_node(node) repeated_token = NodeKey.objects.get_token_for_node(node) self.assertEqual(original_token, repeated_token) def test_get_token_for_node_inverts_get_node_for_key(self): node = factory.make_Node() self.assertEqual( node, NodeKey.objects.get_node_for_key( NodeKey.objects.get_token_for_node(node).key)) def test_clear_token_for_node_deletes_related_NodeKey(self): node = factory.make_Node() NodeKey.objects.get_token_for_node(node) NodeKey.objects.clear_token_for_node(node) self.assertThat(NodeKey.objects.filter(node=node), HasLength(0)) def test_get_node_for_key_inverts_get_token_for_node(self): key = NodeKey.objects.get_token_for_node(factory.make_Node()).key self.assertEqual( key, NodeKey.objects.get_token_for_node( NodeKey.objects.get_node_for_key(key)).key) maas-1.9.5+bzr4599.orig/src/metadataserver/models/tests/test_nodeuserdata.py0000644000000000000000000001032413056115004025165 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for :class:`NodeUserData` and manager.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.testing.factory import factory from maastesting.djangotestcase import DjangoTestCase from metadataserver.models import NodeUserData class TestNodeUserDataManager(DjangoTestCase): """Test NodeUserDataManager.""" def test_set_user_data_creates_new_nodeuserdata_if_needed(self): node = factory.make_Node() data = b'foo' NodeUserData.objects.set_user_data(node, data) self.assertEqual(data, NodeUserData.objects.get(node=node).data) def test_set_user_data_overwrites_existing_userdata(self): node = factory.make_Node() data = b'bar' NodeUserData.objects.set_user_data(node, b'old data') NodeUserData.objects.set_user_data(node, data) self.assertEqual(data, NodeUserData.objects.get(node=node).data) def test_set_user_data_leaves_data_for_other_nodes_alone(self): node = factory.make_Node() NodeUserData.objects.set_user_data(node, b'intact') NodeUserData.objects.set_user_data(factory.make_Node(), b'unrelated') self.assertEqual(b'intact', NodeUserData.objects.get(node=node).data) def test_set_user_data_to_None_removes_user_data(self): node = factory.make_Node() NodeUserData.objects.set_user_data(node, b'original') NodeUserData.objects.set_user_data(node, None) self.assertItemsEqual([], NodeUserData.objects.filter(node=node)) def test_set_user_data_to_None_when_none_exists_does_nothing(self): node = factory.make_Node() NodeUserData.objects.set_user_data(node, None) self.assertItemsEqual([], NodeUserData.objects.filter(node=node)) def test_get_user_data_retrieves_data(self): node = factory.make_Node() data = b'splat' NodeUserData.objects.set_user_data(node, data) self.assertEqual(data, NodeUserData.objects.get_user_data(node)) def test_get_user_data_raises_DoesNotExist_if_not_found(self): node = factory.make_Node() self.assertRaises( NodeUserData.DoesNotExist, NodeUserData.objects.get_user_data, node) def test_get_user_data_ignores_other_nodes(self): node = factory.make_Node() data = b'bzzz' NodeUserData.objects.set_user_data(node, data) NodeUserData.objects.set_user_data(factory.make_Node(), b'unrelated') self.assertEqual(data, NodeUserData.objects.get_user_data(node)) def test_has_user_data_returns_False_if_node_has_no_user_data(self): self.assertFalse( NodeUserData.objects.has_user_data(factory.make_Node())) def test_has_user_data_returns_True_if_node_has_user_data(self): node = factory.make_Node() NodeUserData.objects.set_user_data(node, b"This node has user data.") self.assertTrue(NodeUserData.objects.has_user_data(node)) def test_bulk_set_user_data(self): nodes = [factory.make_Node() for _ in xrange(5)] data = factory.make_bytes() NodeUserData.objects.bulk_set_user_data(nodes, data) for node in nodes: self.assertEqual(data, NodeUserData.objects.get_user_data(node)) def test_bulk_set_user_data_only_deletes_when_data_is_None(self): nodes = [factory.make_Node() for _ in xrange(5)] NodeUserData.objects.bulk_set_user_data(nodes, None) for node in nodes: self.assertRaises( NodeUserData.DoesNotExist, NodeUserData.objects.get_user_data, node) def test_bulk_set_user_data_with_preexisting_data(self): nodes = [factory.make_Node() for _ in xrange(2)] data1 = factory.make_bytes() NodeUserData.objects.bulk_set_user_data(nodes, data1) nodes.extend(factory.make_Node() for _ in xrange(3)) data2 = factory.make_bytes() NodeUserData.objects.bulk_set_user_data(nodes, data2) for node in nodes: self.assertEqual(data2, NodeUserData.objects.get_user_data(node)) maas-1.9.5+bzr4599.orig/src/metadataserver/tests/__init__.py0000644000000000000000000000000013056115004021712 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/metadataserver/tests/models.py0000644000000000000000000000110713056115004021447 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test model for testing BinaryField.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "BinaryFieldModel", ] from django.db.models import Model from metadataserver.fields import BinaryField class BinaryFieldModel(Model): """Test model for BinaryField. Contains nothing but a BinaryField.""" data = BinaryField(null=True) maas-1.9.5+bzr4599.orig/src/metadataserver/tests/test_address.py0000644000000000000000000001510413056115004022652 0ustar 00000000000000# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test server-address-guessing logic.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random from socket import gethostname from maastesting.factory import factory from maastesting.testcase import MAASTestCase from metadataserver import address from testtools.matchers import MatchesRegex def parse_locale_lines(output): """Parse lines of output from /bin/locale into a dict.""" return { key: value.strip('"') for key, value in [line.split('=') for line in output]} class TestAddress(MAASTestCase): def test_get_command_output_executes_command(self): self.assertEqual( ["Hello"], address.get_command_output('echo', 'Hello')) def test_get_command_output_does_not_expand_arguments(self): self.assertEqual(["$*"], address.get_command_output('echo', '$*')) def test_get_command_output_returns_sequence_of_lines(self): self.assertEqual( ['1', '2'], address.get_command_output('echo', '1\n2')) def test_get_command_output_uses_C_locale(self): locale = parse_locale_lines(address.get_command_output('locale')) self.assertEqual('C', locale['LC_CTYPE']) self.assertEqual('C', locale['LC_MESSAGES']) self.assertEqual('en_US.UTF-8', locale['LANG']) def test_find_default_interface_finds_default_interface(self): sample_ip_route = [ "default via 10.0.0.1 dev eth1 proto static", "169.254.0.0/16 dev eth2 scope link metric 1000", "10.0.0.0/24 dev eth0 proto kernel scope link src 10.0.0.11 " "metric 2", "10.1.0.0/24 dev virbr0 proto kernel scope link src 10.1.0.1", "10.1.1.0/24 dev virbr1 proto kernel scope link src 10.1.1.1", ] self.assertEqual( 'eth1', address.find_default_interface(sample_ip_route)) def test_find_default_interface_finds_default_tagged_interface(self): sample_ip_route = [ "default via 10.20.64.1 dev eth0.2", "10.14.0.0/16 dev br0 proto kernel scope link src 10.14.4.1", "10.90.90.0/24 dev br0 proto kernel scope link src 10.90.90.1", "169.254.0.0/16 dev br0 scope link metric 1000", ] self.assertEqual( 'eth0.2', address.find_default_interface(sample_ip_route)) def test_find_default_interface_finds_default_aliased_interface(self): sample_ip_route = [ "default via 10.20.64.1 dev eth0:2", "10.14.0.0/16 dev br0 proto kernel scope link src 10.14.4.1", "10.90.90.0/24 dev br0 proto kernel scope link src 10.90.90.1", "169.254.0.0/16 dev br0 scope link metric 1000", ] self.assertEqual( 'eth0:2', address.find_default_interface(sample_ip_route)) def test_find_default_interface_makes_a_guess_if_no_default(self): sample_ip_route = [ "10.0.0.0/24 dev eth2 proto kernel scope link src 10.0.0.11 " "metric 2", "10.1.0.0/24 dev virbr0 proto kernel scope link src 10.1.0.1", "10.1.1.0/24 dev virbr1 proto kernel scope link src 10.1.1.1", ] self.assertEqual( 'eth2', address.find_default_interface(sample_ip_route)) def test_find_default_tagged_interface_makes_a_guess_if_no_default(self): sample_ip_route = [ "10.0.0.0/24 dev eth2.4 proto kernel scope link src 10.0.0.11 " "metric 2", "10.1.0.0/24 dev virbr0 proto kernel scope link src 10.1.0.1", "10.1.1.0/24 dev virbr1 proto kernel scope link src 10.1.1.1", ] self.assertEqual( 'eth2.4', address.find_default_interface(sample_ip_route)) def test_find_default_aliased_interface_makes_a_guess_if_no_default(self): sample_ip_route = [ "10.0.0.0/24 dev eth2:4 proto kernel scope link src 10.0.0.11 " "metric 2", "10.1.0.0/24 dev virbr0 proto kernel scope link src 10.1.0.1", "10.1.1.0/24 dev virbr1 proto kernel scope link src 10.1.1.1", ] self.assertEqual( 'eth2:4', address.find_default_interface(sample_ip_route)) def test_find_default_interface_returns_None_on_failure(self): self.assertIsNone(address.find_default_interface([])) def test_get_ip_address_finds_IP_address_of_interface(self): self.assertEqual('127.0.0.1', address.get_ip_address(b'lo')) def test_get_ip_address_prefers_v4_addresses_to_v6(self): addresses = [factory.make_ipv6_address() for _ in range(3)] # We add a deliberately low v6 address to show that the v4 # address is always preferred. ipv6_address = "::1" ipv4_address = factory.make_ipv4_address() addresses.append(ipv6_address) addresses.append(ipv4_address) self.patch( address, 'get_all_addresses_for_interface').return_value = ( addresses) self.assertEqual(ipv4_address, address.get_ip_address(b'lo')) def test_get_ip_address_returns_v6_address_if_no_v4_available(self): ipv6_address = factory.make_ipv6_address() self.patch( address, 'get_all_addresses_for_interface').return_value = ( [ipv6_address]) self.assertEqual(ipv6_address, address.get_ip_address(b'lo')) def test_get_ip_address_returns_consistent_result_from_address_set(self): addresses = [factory.make_ipv6_address() for _ in range(5)] expected_address = sorted(addresses)[0] for _ in range(5): random.shuffle(addresses) self.patch( address, 'get_all_addresses_for_interface').return_value = ( addresses) self.assertEqual( expected_address, address.get_ip_address(b'lo')) def test_get_ip_address_returns_None_on_failure(self): self.assertIsNone(address.get_ip_address(b'ethturboveyronsuper9')) def test_guess_server_host_finds_IP_address(self): self.assertThat( address.guess_server_host(), MatchesRegex("^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$")) def test_guess_server_host_returns_hostname_as_last_ditch_guess(self): def return_empty_list(*args): return [] self.patch(address, 'get_command_output', return_empty_list) self.assertEqual(gethostname(), address.guess_server_host()) maas-1.9.5+bzr4599.orig/src/metadataserver/tests/test_api.py0000644000000000000000000016003213056115004021777 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the metadata API.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from collections import namedtuple import httplib from io import BytesIO import json import os.path import random import tarfile from django.conf import settings from django.core.exceptions import PermissionDenied from django.core.urlresolvers import reverse from maasserver import preseed as preseed_module from maasserver.clusterrpc.testing.boot_images import make_rpc_boot_image from maasserver.enum import ( NODE_STATUS, NODEGROUP_STATUS, NODEGROUPINTERFACE_MANAGEMENT, ) from maasserver.exceptions import ( MAASAPINotFound, Unauthorized, ) from maasserver.models import ( Event, SSHKey, Tag, ) from maasserver.models.node import Node from maasserver.rpc.testing.mixins import PreseedRPCMixin from maasserver.testing.config import RegionConfigurationFixture from maasserver.testing.factory import factory from maasserver.testing.oauthclient import OAuthAuthenticatedClient from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase from maastesting.djangotestcase import DjangoTestCase from maastesting.matchers import ( MockCalledOnceWith, MockNotCalled, ) from maastesting.utils import sample_binary_data from metadataserver import api from metadataserver.api import ( add_event_to_node_event_log, check_version, get_node_for_mac, get_node_for_request, get_queried_node, make_list_response, make_text_response, MetaDataHandler, poweroff as api_poweroff, UnknownMetadataVersion, ) from metadataserver.models import ( NodeKey, NodeResult, NodeUserData, ) from metadataserver.models.commissioningscript import ARCHIVE_PREFIX from metadataserver.nodeinituser import get_node_init_user from mock import ( ANY, Mock, ) from netaddr import IPNetwork from provisioningserver.events import ( EVENT_DETAILS, EVENT_TYPES, ) from testtools.matchers import ( Contains, ContainsAll, MatchesAll, Not, ) class TestHelpers(DjangoTestCase): """Tests for the API helper functions.""" def fake_request(self, **kwargs): """Produce a cheap fake request, fresh from the sweat shop. Pass as arguments any header items you want to include. """ return namedtuple('FakeRequest', ['META'])(kwargs) def test_make_text_response_presents_text_as_text_plain(self): input_text = "Hello." response = make_text_response(input_text) self.assertEqual('text/plain', response['Content-Type']) self.assertEqual(input_text, response.content) def test_make_list_response_presents_list_as_newline_separated_text(self): response = make_list_response(['aaa', 'bbb']) self.assertEqual('text/plain', response['Content-Type']) self.assertEqual("aaa\nbbb", response.content) def test_check_version_accepts_latest(self): check_version('latest') # The test is that we get here without exception. pass def test_check_version_reports_unknown_version(self): self.assertRaises(UnknownMetadataVersion, check_version, '1.0') def test_get_node_for_request_finds_node(self): node = factory.make_Node() token = NodeKey.objects.get_token_for_node(node) request = self.fake_request( HTTP_AUTHORIZATION=factory.make_oauth_header( oauth_token=token.key)) self.assertEqual(node, get_node_for_request(request)) def test_get_node_for_request_reports_missing_auth_header(self): self.assertRaises( Unauthorized, get_node_for_request, self.fake_request()) def test_get_node_for_mac_refuses_if_anonymous_access_disabled(self): self.patch(settings, 'ALLOW_UNSAFE_METADATA_ACCESS', False) self.assertRaises( PermissionDenied, get_node_for_mac, factory.make_mac_address()) def test_get_node_for_mac_raises_404_for_unknown_mac(self): self.assertRaises( MAASAPINotFound, get_node_for_mac, factory.make_mac_address()) def test_get_node_for_mac_finds_node_by_mac(self): node = factory.make_Node_with_Interface_on_Subnet() iface = node.get_boot_interface() self.assertEqual(iface.node, get_node_for_mac(iface.mac_address)) def test_get_queried_node_looks_up_by_mac_if_given(self): node = factory.make_Node_with_Interface_on_Subnet() iface = node.get_boot_interface() self.assertEqual( iface.node, get_queried_node(object(), for_mac=iface.mac_address)) def test_get_queried_node_looks_up_oauth_key_by_default(self): node = factory.make_Node() token = NodeKey.objects.get_token_for_node(node) request = self.fake_request( HTTP_AUTHORIZATION=factory.make_oauth_header( oauth_token=token.key)) self.assertEqual(node, get_queried_node(request)) def test_add_event_to_node_event_log(self): expected_type = { # These statuses have specific event types. NODE_STATUS.COMMISSIONING: EVENT_TYPES.NODE_COMMISSIONING_EVENT, NODE_STATUS.DEPLOYING: EVENT_TYPES.NODE_INSTALL_EVENT, # All other statuses generate NODE_STATUS_EVENT events. NODE_STATUS.NEW: EVENT_TYPES.NODE_STATUS_EVENT, NODE_STATUS.FAILED_COMMISSIONING: EVENT_TYPES.NODE_STATUS_EVENT, NODE_STATUS.MISSING: EVENT_TYPES.NODE_STATUS_EVENT, NODE_STATUS.READY: EVENT_TYPES.NODE_STATUS_EVENT, NODE_STATUS.RESERVED: EVENT_TYPES.NODE_STATUS_EVENT, NODE_STATUS.ALLOCATED: EVENT_TYPES.NODE_STATUS_EVENT, NODE_STATUS.DEPLOYED: EVENT_TYPES.NODE_STATUS_EVENT, NODE_STATUS.RETIRED: EVENT_TYPES.NODE_STATUS_EVENT, NODE_STATUS.BROKEN: EVENT_TYPES.NODE_STATUS_EVENT, NODE_STATUS.FAILED_DEPLOYMENT: EVENT_TYPES.NODE_STATUS_EVENT, NODE_STATUS.RELEASING: EVENT_TYPES.NODE_STATUS_EVENT, NODE_STATUS.FAILED_RELEASING: EVENT_TYPES.NODE_STATUS_EVENT, NODE_STATUS.DISK_ERASING: EVENT_TYPES.NODE_STATUS_EVENT, NODE_STATUS.FAILED_DISK_ERASING: EVENT_TYPES.NODE_STATUS_EVENT, } for status in expected_type: node = factory.make_Node(status=status) origin = factory.make_name('origin') action = factory.make_name('action') description = factory.make_name('description') add_event_to_node_event_log(node, origin, action, description) event = Event.objects.get(node=node) self.assertEqual(node, event.node) self.assertEqual(action, event.action) self.assertIn(origin, event.description) self.assertIn(description, event.description) self.assertEqual(expected_type[node.status], event.type.name) def make_node_client(node=None): """Create a test client logged in as if it were `node`.""" if node is None: node = factory.make_Node() token = NodeKey.objects.get_token_for_node(node) return OAuthAuthenticatedClient(get_node_init_user(), token) def call_signal(client=None, version='latest', files={}, **kwargs): """Call the API's signal method. :param client: Optional client to POST with. If omitted, will create one for a commissioning node. :param version: API version to post on. Defaults to "latest". :param files: Optional dict of files to attach. Maps file name to file contents. :param **kwargs: Any other keyword parameters are passed on directly to the "signal" call. """ if client is None: client = make_node_client(factory.make_Node( status=NODE_STATUS.COMMISSIONING)) params = { 'op': 'signal', 'status': 'OK', } params.update(kwargs) params.update({ name: factory.make_file_upload(name, content) for name, content in files.items() }) url = reverse('metadata-version', args=[version]) return client.post(url, params) class TestMetadataCommon(DjangoTestCase): """Tests for the common metadata/curtin-metadata API views.""" # The curtin-metadata and the metadata views are similar in every # aspect except the user-data end-point. The same tests are used to # test both end-points. scenarios = [ ('metadata', {'metadata_prefix': 'metadata'}), ('curtin-metadata', {'metadata_prefix': 'curtin-metadata'}), ] def get_metadata_name(self, name_suffix=''): """Return the Django name of the metadata view. :param name_suffix: Suffix of the view name. The default value is the empty string (get_metadata_name() will return the root of the metadata API in this case). Depending on the value of self.metadata_prefix, this will return the name of the metadata view or of the curtin-metadata view. """ return self.metadata_prefix + name_suffix def test_no_anonymous_access(self): url = reverse(self.get_metadata_name()) self.assertEqual( httplib.UNAUTHORIZED, self.client.get(url).status_code) def test_metadata_index_shows_latest(self): client = make_node_client() url = reverse(self.get_metadata_name()) self.assertIn('latest', client.get(url).content) def test_metadata_index_shows_only_known_versions(self): client = make_node_client() url = reverse(self.get_metadata_name()) for item in client.get(url).content.splitlines(): check_version(item) # The test is that we get here without exception. pass def test_version_index_shows_unconditional_entries(self): client = make_node_client() view_name = self.get_metadata_name('-version') url = reverse(view_name, args=['latest']) items = client.get(url).content.splitlines() self.assertThat(items, ContainsAll([ 'meta-data', 'maas-commissioning-scripts', ])) def test_version_index_does_not_show_user_data_if_not_available(self): client = make_node_client() view_name = self.get_metadata_name('-version') url = reverse(view_name, args=['latest']) items = client.get(url).content.splitlines() self.assertNotIn('user-data', items) def test_version_index_shows_user_data_if_available(self): node = factory.make_Node() NodeUserData.objects.set_user_data(node, b"User data for node") client = make_node_client(node) view_name = self.get_metadata_name('-version') url = reverse(view_name, args=['latest']) items = client.get(url).content.splitlines() self.assertIn('user-data', items) def test_meta_data_view_lists_fields(self): # Some fields only are returned if there is data related to them. user, _ = factory.make_user_with_keys(n_keys=2, username='my-user') node = factory.make_Node(owner=user) client = make_node_client(node=node) view_name = self.get_metadata_name('-meta-data') url = reverse(view_name, args=['latest', '']) response = client.get(url) self.assertIn('text/plain', response['Content-Type']) self.assertItemsEqual( MetaDataHandler.fields, response.content.split()) def test_meta_data_view_is_sorted(self): client = make_node_client() view_name = self.get_metadata_name('-meta-data') url = reverse(view_name, args=['latest', '']) response = client.get(url) attributes = response.content.split() self.assertEqual(sorted(attributes), attributes) def test_meta_data_unknown_item_is_not_found(self): client = make_node_client() view_name = self.get_metadata_name('-meta-data') url = reverse(view_name, args=['latest', 'UNKNOWN-ITEM']) response = client.get(url) self.assertEqual(httplib.NOT_FOUND, response.status_code) def test_get_attribute_producer_supports_all_fields(self): handler = MetaDataHandler() producers = map(handler.get_attribute_producer, handler.fields) self.assertNotIn(None, producers) def test_meta_data_local_hostname_returns_fqdn(self): nodegroup = factory.make_NodeGroup( status=NODEGROUP_STATUS.ENABLED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) hostname = factory.make_string() domain = factory.make_string() node = factory.make_Node( hostname='%s.%s' % (hostname, domain), nodegroup=nodegroup) client = make_node_client(node) view_name = self.get_metadata_name('-meta-data') url = reverse(view_name, args=['latest', 'local-hostname']) response = client.get(url) self.assertEqual( (httplib.OK, node.fqdn), (response.status_code, response.content.decode('ascii'))) self.assertIn('text/plain', response['Content-Type']) def test_meta_data_instance_id_returns_system_id(self): node = factory.make_Node() client = make_node_client(node) view_name = self.get_metadata_name('-meta-data') url = reverse(view_name, args=['latest', 'instance-id']) response = client.get(url) self.assertEqual( (httplib.OK, node.system_id), (response.status_code, response.content.decode('ascii'))) self.assertIn('text/plain', response['Content-Type']) def test_public_keys_not_listed_for_node_without_public_keys(self): view_name = self.get_metadata_name('-meta-data') url = reverse(view_name, args=['latest', '']) client = make_node_client() response = client.get(url) self.assertNotIn( 'public-keys', response.content.decode('ascii').split('\n')) def test_public_keys_not_listed_for_comm_node_with_ssh_disabled(self): user, _ = factory.make_user_with_keys(n_keys=2, username='my-user') node = factory.make_Node( owner=user, status=NODE_STATUS.COMMISSIONING, enable_ssh=False) view_name = self.get_metadata_name('-meta-data') url = reverse(view_name, args=['latest', '']) client = make_node_client(node=node) response = client.get(url) self.assertNotIn( 'public-keys', response.content.decode('ascii').split('\n')) def test_public_keys_listed_for_comm_node_with_ssh_enabled(self): user, _ = factory.make_user_with_keys(n_keys=2, username='my-user') node = factory.make_Node( owner=user, status=NODE_STATUS.COMMISSIONING, enable_ssh=True) view_name = self.get_metadata_name('-meta-data') url = reverse(view_name, args=['latest', '']) client = make_node_client(node=node) response = client.get(url) self.assertIn( 'public-keys', response.content.decode('ascii').split('\n')) def test_public_keys_listed_for_node_with_public_keys(self): user, _ = factory.make_user_with_keys(n_keys=2, username='my-user') node = factory.make_Node(owner=user) view_name = self.get_metadata_name('-meta-data') url = reverse(view_name, args=['latest', '']) client = make_node_client(node=node) response = client.get(url) self.assertIn( 'public-keys', response.content.decode('ascii').split('\n')) def test_public_keys_for_node_without_public_keys_returns_empty(self): view_name = self.get_metadata_name('-meta-data') url = reverse(view_name, args=['latest', 'public-keys']) client = make_node_client() response = client.get(url) self.assertEqual( (httplib.OK, ''), (response.status_code, response.content)) def test_public_keys_for_node_returns_list_of_keys(self): user, _ = factory.make_user_with_keys(n_keys=2, username='my-user') node = factory.make_Node(owner=user) view_name = self.get_metadata_name('-meta-data') url = reverse(view_name, args=['latest', 'public-keys']) client = make_node_client(node=node) response = client.get(url) self.assertEqual(httplib.OK, response.status_code) keys = SSHKey.objects.filter(user=user).values_list('key', flat=True) expected_response = '\n'.join(keys) self.assertItemsEqual( expected_response, response.content.decode('ascii')) self.assertIn('text/plain', response['Content-Type']) def test_public_keys_url_with_additional_slashes(self): # The metadata service also accepts urls with any number of additional # slashes after 'metadata': e.g. http://host/metadata///rest-of-url. user, _ = factory.make_user_with_keys(n_keys=2, username='my-user') node = factory.make_Node(owner=user) view_name = self.get_metadata_name('-meta-data') url = reverse(view_name, args=['latest', 'public-keys']) # Insert additional slashes. url = url.replace('metadata', 'metadata/////') client = make_node_client(node=node) response = client.get(url) keys = SSHKey.objects.filter(user=user).values_list('key', flat=True) self.assertItemsEqual( '\n'.join(keys), response.content.decode('ascii')) class TestMetadataUserData(DjangoTestCase): """Tests for the metadata user-data API endpoint.""" def test_user_data_view_returns_binary_data(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) NodeUserData.objects.set_user_data(node, sample_binary_data) client = make_node_client(node) response = client.get(reverse('metadata-user-data', args=['latest'])) self.assertEqual('application/octet-stream', response['Content-Type']) self.assertIsInstance(response.content, bytes) self.assertEqual( (httplib.OK, sample_binary_data), (response.status_code, response.content)) def test_poweroff_user_data_returned_if_unexpected_status(self): node = factory.make_Node(status=NODE_STATUS.READY) NodeUserData.objects.set_user_data(node, sample_binary_data) client = make_node_client(node) user_data = factory.make_name('user data').encode("ascii") self.patch(api_poweroff, 'generate_user_data').return_value = user_data response = client.get(reverse('metadata-user-data', args=['latest'])) self.assertEqual('application/octet-stream', response['Content-Type']) self.assertIsInstance(response.content, bytes) self.assertEqual( (httplib.OK, user_data), (response.status_code, response.content)) def test_user_data_for_node_without_user_data_returns_not_found(self): client = make_node_client( factory.make_Node(status=NODE_STATUS.COMMISSIONING)) response = client.get(reverse('metadata-user-data', args=['latest'])) self.assertEqual(httplib.NOT_FOUND, response.status_code) class TestMetadataUserDataStateChanges(MAASServerTestCase): """Tests for the metadata user-data API endpoint.""" def test_request_does_not_cause_status_change_if_not_deploying(self): status = factory.pick_enum( NODE_STATUS, but_not=[NODE_STATUS.DEPLOYING]) node = factory.make_Node(status=status) NodeUserData.objects.set_user_data(node, sample_binary_data) client = make_node_client(node) response = client.get(reverse('metadata-user-data', args=['latest'])) self.assertEqual(httplib.OK, response.status_code) self.assertEqual(status, reload_object(node).status) def test_request_causes_status_change_if_deploying(self): node = factory.make_Node(status=NODE_STATUS.DEPLOYING) NodeUserData.objects.set_user_data(node, sample_binary_data) client = make_node_client(node) response = client.get(reverse('metadata-user-data', args=['latest'])) self.assertEqual(httplib.OK, response.status_code) self.assertEqual(NODE_STATUS.DEPLOYED, reload_object(node).status) class TestCurtinMetadataUserData(PreseedRPCMixin, DjangoTestCase): """Tests for the curtin-metadata user-data API endpoint.""" def test_curtin_user_data_view_returns_curtin_data(self): node = factory.make_Node(nodegroup=self.rpc_nodegroup, interface=True) factory.make_NodeGroupInterface( node.nodegroup, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) arch, subarch = node.architecture.split('/') boot_image = make_rpc_boot_image(purpose='xinstall') self.patch( preseed_module, 'get_boot_images_for').return_value = [boot_image] client = make_node_client(node) response = client.get( reverse('curtin-metadata-user-data', args=['latest'])) self.assertEqual(httplib.OK, response.status_code) self.assertIn("PREFIX='curtin'", response.content) class TestInstallingAPI(MAASServerTestCase): def test_other_user_than_node_cannot_signal_installation_result(self): node = factory.make_Node(status=NODE_STATUS.DEPLOYING) client = OAuthAuthenticatedClient(factory.make_User()) response = call_signal(client) self.assertEqual(httplib.FORBIDDEN, response.status_code) self.assertEqual( NODE_STATUS.DEPLOYING, reload_object(node).status) def test_signaling_installation_result_does_not_affect_other_node(self): node = factory.make_Node(status=NODE_STATUS.DEPLOYING) client = make_node_client( node=factory.make_Node(status=NODE_STATUS.DEPLOYING)) response = call_signal(client, status='OK') self.assertEqual(httplib.OK, response.status_code) self.assertEqual( NODE_STATUS.DEPLOYING, reload_object(node).status) def test_signaling_installation_success_leaves_node_deploying(self): node = factory.make_Node(interface=True, status=NODE_STATUS.DEPLOYING) client = make_node_client(node=node) response = call_signal(client, status='OK') self.assertEqual(httplib.OK, response.status_code) self.assertEqual(NODE_STATUS.DEPLOYING, reload_object(node).status) def test_signaling_installation_success_does_not_populate_tags(self): populate_tags_for_single_node = self.patch( api, "populate_tags_for_single_node") node = factory.make_Node(interface=True, status=NODE_STATUS.DEPLOYING) client = make_node_client(node=node) response = call_signal(client, status='OK') self.assertEqual(httplib.OK, response.status_code) self.assertEqual(NODE_STATUS.DEPLOYING, reload_object(node).status) self.assertThat(populate_tags_for_single_node, MockNotCalled()) def test_signaling_installation_success_is_idempotent(self): node = factory.make_Node(status=NODE_STATUS.DEPLOYING) client = make_node_client(node=node) call_signal(client, status='OK') response = call_signal(client, status='OK') self.assertEqual(httplib.OK, response.status_code) self.assertEqual(NODE_STATUS.DEPLOYING, reload_object(node).status) def test_signaling_installation_success_does_not_clear_owner(self): node = factory.make_Node( status=NODE_STATUS.DEPLOYING, owner=factory.make_User()) client = make_node_client(node=node) response = call_signal(client, status='OK') self.assertEqual(httplib.OK, response.status_code) self.assertEqual(node.owner, reload_object(node).owner) def test_signaling_installation_failure_makes_node_failed(self): node = factory.make_Node( status=NODE_STATUS.DEPLOYING, owner=factory.make_User()) client = make_node_client(node=node) response = call_signal(client, status='FAILED') self.assertEqual(httplib.OK, response.status_code) self.assertEqual( NODE_STATUS.FAILED_DEPLOYMENT, reload_object(node).status) def test_signaling_installation_failure_is_idempotent(self): node = factory.make_Node( status=NODE_STATUS.DEPLOYING, owner=factory.make_User()) client = make_node_client(node=node) call_signal(client, status='FAILED') response = call_signal(client, status='FAILED') self.assertEqual(httplib.OK, response.status_code) self.assertEqual( NODE_STATUS.FAILED_DEPLOYMENT, reload_object(node).status) class TestCommissioningAPI(MAASServerTestCase): def setUp(self): super(TestCommissioningAPI, self).setUp() self.patch(Node, 'stop_transition_monitor') self.patch(Node, 'release_leases') def test_commissioning_scripts(self): script = factory.make_CommissioningScript() response = make_node_client().get( reverse('commissioning-scripts', args=['latest'])) self.assertEqual( httplib.OK, response.status_code, "Unexpected response %d: %s" % (response.status_code, response.content)) self.assertIn( response['Content-Type'], { 'application/tar', 'application/x-gtar', 'application/x-tar', 'application/x-tgz', }) archive = tarfile.open(fileobj=BytesIO(response.content)) self.assertIn( os.path.join(ARCHIVE_PREFIX, script.name), archive.getnames()) def test_other_user_than_node_cannot_signal_commissioning_result(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = OAuthAuthenticatedClient(factory.make_User()) response = call_signal(client) self.assertEqual(httplib.FORBIDDEN, response.status_code) self.assertEqual( NODE_STATUS.COMMISSIONING, reload_object(node).status) def test_signaling_commissioning_result_does_not_affect_other_node(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client( node=factory.make_Node(status=NODE_STATUS.COMMISSIONING)) response = call_signal(client, status='OK') self.assertEqual(httplib.OK, response.status_code) self.assertEqual( NODE_STATUS.COMMISSIONING, reload_object(node).status) def test_signaling_commissioning_OK_repopulates_tags(self): populate_tags_for_single_node = self.patch( api, "populate_tags_for_single_node") node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node) response = call_signal(client, status='OK', script_result='0') self.assertEqual(httplib.OK, response.status_code) self.assertEqual(NODE_STATUS.READY, reload_object(node).status) self.assertThat( populate_tags_for_single_node, MockCalledOnceWith(ANY, node)) def test_signaling_requires_status_code(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) url = reverse('metadata-version', args=['latest']) response = client.post(url, {'op': 'signal'}) self.assertEqual(httplib.BAD_REQUEST, response.status_code) def test_signaling_rejects_unknown_status_code(self): response = call_signal(status=factory.make_string()) self.assertEqual(httplib.BAD_REQUEST, response.status_code) def test_signaling_refuses_if_node_in_unexpected_state(self): node = factory.make_Node(status=NODE_STATUS.NEW) client = make_node_client(node=node) response = call_signal(client) self.assertEqual( ( httplib.CONFLICT, "Node wasn't commissioning/installing (status is New)", ), (response.status_code, response.content)) def test_signaling_accepts_WORKING_status(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) response = call_signal(client, status='WORKING') self.assertEqual(httplib.OK, response.status_code) self.assertEqual( NODE_STATUS.COMMISSIONING, reload_object(node).status) def test_signaling_stores_script_result(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) script_result = random.randint(0, 10) filename = factory.make_string() response = call_signal( client, script_result=script_result, files={filename: factory.make_string().encode('ascii')}) self.assertEqual(httplib.OK, response.status_code, response.content) result = NodeResult.objects.get(node=node) self.assertEqual(script_result, result.script_result) def test_signaling_stores_empty_script_result(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) response = call_signal( client, script_result=random.randint(0, 10), files={factory.make_string(): ''.encode('ascii')}) self.assertEqual(httplib.OK, response.status_code, response.content) result = NodeResult.objects.get(node=node) self.assertEqual('', result.data) def test_signaling_WORKING_keeps_owner(self): user = factory.make_User() node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) node.owner = user node.save() client = make_node_client(node=node) response = call_signal(client, status='WORKING') self.assertEqual(httplib.OK, response.status_code) self.assertEqual(user, reload_object(node).owner) def test_signaling_commissioning_success_makes_node_Ready(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) response = call_signal(client, status='OK') self.assertEqual(httplib.OK, response.status_code) self.assertEqual(NODE_STATUS.READY, reload_object(node).status) def test_signalling_commissioning_success_cancels_monitor(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) response = call_signal(client, status='OK') self.assertEqual(httplib.OK, response.status_code, response.content) self.assertThat(node.stop_transition_monitor, MockCalledOnceWith()) def test_signaling_commissioning_success_is_idempotent(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) call_signal(client, status='OK') response = call_signal(client, status='OK') self.assertEqual(httplib.OK, response.status_code) self.assertEqual(NODE_STATUS.READY, reload_object(node).status) def test_signaling_commissioning_success_clears_owner(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) node.owner = factory.make_User() node.save() client = make_node_client(node=node) response = call_signal(client, status='OK') self.assertEqual(httplib.OK, response.status_code) self.assertIsNone(reload_object(node).owner) def test_signaling_commissioning_failure_makes_node_Failed_Tests(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) response = call_signal(client, status='FAILED') self.assertEqual(httplib.OK, response.status_code) self.assertEqual( NODE_STATUS.FAILED_COMMISSIONING, reload_object(node).status) def test_signaling_commissioning_failure_does_not_populate_tags(self): populate_tags_for_single_node = self.patch( api, "populate_tags_for_single_node") node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) response = call_signal(client, status='FAILED') self.assertEqual(httplib.OK, response.status_code) self.assertThat(populate_tags_for_single_node, MockNotCalled()) def test_signalling_commissioning_failure_cancels_monitor(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) response = call_signal(client, status='FAILED') self.assertEqual(httplib.OK, response.status_code, response.content) self.assertThat(node.stop_transition_monitor, MockCalledOnceWith()) def test_signaling_commissioning_failure_is_idempotent(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) call_signal(client, status='FAILED') response = call_signal(client, status='FAILED') self.assertEqual(httplib.OK, response.status_code) self.assertEqual( NODE_STATUS.FAILED_COMMISSIONING, reload_object(node).status) def test_signaling_commissioning_failure_sets_node_error(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) error_text = factory.make_string() response = call_signal(client, status='FAILED', error=error_text) self.assertEqual(httplib.OK, response.status_code) self.assertEqual(error_text, reload_object(node).error) def test_signaling_commissioning_failure_clears_owner(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) node.owner = factory.make_User() node.save() client = make_node_client(node=node) response = call_signal(client, status='FAILED') self.assertEqual(httplib.OK, response.status_code) self.assertIsNone(reload_object(node).owner) def test_signaling_no_error_clears_existing_error(self): node = factory.make_Node( status=NODE_STATUS.COMMISSIONING, error=factory.make_string()) client = make_node_client(node=node) response = call_signal(client) self.assertEqual(httplib.OK, response.status_code) self.assertEqual('', reload_object(node).error) def test_signalling_stores_files_for_any_status(self): statuses = ['WORKING', 'OK', 'FAILED'] filename = factory.make_string() nodes = { status: factory.make_Node(status=NODE_STATUS.COMMISSIONING) for status in statuses} for status, node in nodes.items(): client = make_node_client(node=node) script_result = random.randint(0, 10) call_signal( client, status=status, script_result=script_result, files={filename: factory.make_bytes()}) self.assertEqual( {status: filename for status in statuses}, { status: NodeResult.objects.get(node=node).name for status, node in nodes.items()}) def test_signal_stores_file_contents(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) text = factory.make_string().encode('ascii') script_result = random.randint(0, 10) response = call_signal( client, script_result=script_result, files={'file.txt': text}) self.assertEqual(httplib.OK, response.status_code) self.assertEqual( text, NodeResult.objects.get_data(node, 'file.txt')) def test_signal_stores_binary(self): unicode_text = '<\u2621>' node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) script_result = random.randint(0, 10) response = call_signal( client, script_result=script_result, files={'file.txt': unicode_text.encode('utf-8')}) self.assertEqual(httplib.OK, response.status_code) self.assertEqual( unicode_text.encode("utf-8"), NodeResult.objects.get_data(node, 'file.txt')) def test_signal_stores_multiple_files(self): contents = { factory.make_string(): factory.make_string().encode('ascii') for counter in range(3)} node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) script_result = random.randint(0, 10) response = call_signal( client, script_result=script_result, files=contents) self.assertEqual(httplib.OK, response.status_code) self.assertEqual( contents, { result.name: result.data for result in node.noderesult_set.all() }) def test_signal_stores_files_up_to_documented_size_limit(self): # The documented size limit for commissioning result files: # one megabyte. What happens above this limit is none of # anybody's business, but files up to this size should work. size_limit = 2 ** 20 contents = factory.make_string(size_limit, spaces=True) node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) script_result = random.randint(0, 10) response = call_signal( client, script_result=script_result, files={'output.txt': contents.encode('utf-8')}) self.assertEqual(httplib.OK, response.status_code) stored_data = NodeResult.objects.get_data( node, 'output.txt') self.assertEqual(size_limit, len(stored_data)) def test_signal_stores_virtual_tag_on_node_if_virtual(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) content = 'virtual'.encode('utf-8') response = call_signal( client, script_result=0, files={'00-maas-02-virtuality.out': content}) self.assertEqual(httplib.OK, response.status_code) node = reload_object(node) self.assertEqual( ["virtual"], [each_tag.name for each_tag in node.tags.all()]) def test_signal_removes_virtual_tag_on_node_if_not_virtual(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) tag, _ = Tag.objects.get_or_create(name='virtual') node.tags.add(tag) client = make_node_client(node=node) content = 'notvirtual'.encode('utf-8') response = call_signal( client, script_result=0, files={'00-maas-02-virtuality.out': content}) self.assertEqual(httplib.OK, response.status_code) node = reload_object(node) self.assertEqual( [], [each_tag.name for each_tag in node.tags.all()]) def test_signal_leaves_untagged_physical_node_unaltered(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) content = 'notvirtual'.encode('utf-8') response = call_signal( client, script_result=0, files={'00-maas-02-virtuality.out': content}) self.assertEqual(httplib.OK, response.status_code) node = reload_object(node) self.assertEqual(0, len(node.tags.all())) def test_signal_current_power_type_mscm_does_not_store_params(self): node = factory.make_Node( power_type="mscm", status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) params = dict( power_address=factory.make_string(), power_user=factory.make_string(), power_pass=factory.make_string()) response = call_signal( client, power_type="moonshot", power_parameters=json.dumps(params)) self.assertEqual(httplib.OK, response.status_code, response.content) node = reload_object(node) self.assertEqual("mscm", node.power_type) self.assertNotEqual(params, node.power_parameters) def test_signal_refuses_bad_power_type(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) response = call_signal(client, power_type="foo") self.assertEqual( (httplib.BAD_REQUEST, "Bad power_type 'foo'"), (response.status_code, response.content)) def test_signal_power_type_stores_params(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) params = dict( power_address=factory.make_string(), power_user=factory.make_string(), power_pass=factory.make_string()) response = call_signal( client, power_type="ipmi", power_parameters=json.dumps(params)) self.assertEqual(httplib.OK, response.status_code, response.content) node = reload_object(node) self.assertEqual("ipmi", node.power_type) self.assertEqual(params, node.power_parameters) def test_signal_power_type_lower_case_works(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) params = dict( power_address=factory.make_string(), power_user=factory.make_string(), power_pass=factory.make_string()) response = call_signal( client, power_type="ipmi", power_parameters=json.dumps(params)) self.assertEqual(httplib.OK, response.status_code, response.content) node = reload_object(node) self.assertEqual( params, node.power_parameters) def test_signal_invalid_power_parameters(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) response = call_signal( client, power_type="ipmi", power_parameters="badjson") self.assertEqual( (httplib.BAD_REQUEST, "Failed to parse JSON power_parameters"), (response.status_code, response.content)) def test_signal_calls_release_leases_if_not_WORKING(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) response = call_signal(client, status='OK') self.assertEqual(httplib.OK, response.status_code, response.content) self.assertThat(node.release_leases, MockCalledOnceWith()) def test_signal_does_not_call_release_leases_if_WORKING(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) response = call_signal(client, status='WORKING') self.assertEqual(httplib.OK, response.status_code, response.content) self.assertThat(node.release_leases, MockNotCalled()) def test_signal_doesnt_call_release_leases_if_not_commissioning(self): node = factory.make_Node(status=NODE_STATUS.DEPLOYING) client = make_node_client(node=node) response = call_signal(client, status='OK') self.assertEqual(httplib.OK, response.status_code, response.content) self.assertThat(node.release_leases, MockNotCalled()) def test_signal_sets_default_storage_layout_if_OK(self): self.patch_autospec(Node, "set_default_storage_layout") node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node) response = call_signal(client, status='OK', script_result='0') self.assertEqual(httplib.OK, response.status_code) self.assertEqual(NODE_STATUS.READY, reload_object(node).status) self.assertThat( Node.set_default_storage_layout, MockCalledOnceWith(node)) def test_signal_does_not_set_default_storage_layout_if_WORKING(self): self.patch_autospec(Node, "set_default_storage_layout") node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node) response = call_signal(client, status='WORKING', script_result='0') self.assertEqual(httplib.OK, response.status_code) self.assertThat( Node.set_default_storage_layout, MockNotCalled()) def test_signal_does_not_set_default_storage_layout_if_FAILED(self): self.patch_autospec(Node, "set_default_storage_layout") node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node) response = call_signal(client, status='FAILED', script_result='0') self.assertEqual(httplib.OK, response.status_code) self.assertThat( Node.set_default_storage_layout, MockNotCalled()) def test_signal_calls_sets_initial_network_config_if_OK(self): mock_set_initial_networking_configuration = self.patch_autospec( Node, "set_initial_networking_configuration") node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node) response = call_signal(client, status='OK', script_result='0') self.assertEqual(httplib.OK, response.status_code) self.assertEqual(NODE_STATUS.READY, reload_object(node).status) self.assertThat( mock_set_initial_networking_configuration, MockCalledOnceWith(node)) def test_signal_doesnt_call_sets_initial_network_config_if_WORKING(self): mock_set_initial_networking_configuration = self.patch_autospec( Node, "set_initial_networking_configuration") node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node) response = call_signal(client, status='WORKING', script_result='0') self.assertEqual(httplib.OK, response.status_code) self.assertThat( mock_set_initial_networking_configuration, MockNotCalled()) def test_signal_doesnt_call_sets_initial_network_config_if_FAILED(self): mock_set_initial_networking_configuration = self.patch_autospec( Node, "set_initial_networking_configuration") node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node) response = call_signal(client, status='FAILED', script_result='0') self.assertEqual(httplib.OK, response.status_code) self.assertThat( mock_set_initial_networking_configuration, MockNotCalled()) class TestDiskErasingAPI(MAASServerTestCase): def test_signaling_erasing_failure_makes_node_failed_erasing(self): node = factory.make_Node( status=NODE_STATUS.DISK_ERASING, owner=factory.make_User()) client = make_node_client(node=node) response = call_signal(client, status='FAILED') self.assertEqual(httplib.OK, response.status_code) self.assertEqual( NODE_STATUS.FAILED_DISK_ERASING, reload_object(node).status) def test_signaling_erasing_ok_releases_node(self): node = factory.make_Node( status=NODE_STATUS.DISK_ERASING, owner=factory.make_User()) client = make_node_client(node=node) response = call_signal(client, status='OK') self.assertEqual(httplib.OK, response.status_code) self.assertEqual( NODE_STATUS.RELEASING, reload_object(node).status) class TestByMACMetadataAPI(DjangoTestCase): def test_api_retrieves_node_metadata_by_mac(self): node = factory.make_Node_with_Interface_on_Subnet() iface = node.get_boot_interface() url = reverse( 'metadata-meta-data-by-mac', args=['latest', iface.mac_address, 'instance-id']) response = self.client.get(url) self.assertEqual( (httplib.OK, iface.node.system_id), (response.status_code, response.content)) def test_api_retrieves_node_userdata_by_mac(self): node = factory.make_Node_with_Interface_on_Subnet( status=NODE_STATUS.COMMISSIONING) iface = node.get_boot_interface() user_data = factory.make_string().encode('ascii') NodeUserData.objects.set_user_data(iface.node, user_data) url = reverse( 'metadata-user-data-by-mac', args=['latest', iface.mac_address]) response = self.client.get(url) self.assertEqual( (httplib.OK, user_data), (response.status_code, response.content)) def test_api_normally_disallows_anonymous_node_metadata_access(self): self.patch(settings, 'ALLOW_UNSAFE_METADATA_ACCESS', False) node = factory.make_Node_with_Interface_on_Subnet() iface = node.get_boot_interface() url = reverse( 'metadata-meta-data-by-mac', args=['latest', iface.mac_address, 'instance-id']) response = self.client.get(url) self.assertEqual(httplib.FORBIDDEN, response.status_code) class TestNetbootOperationAPI(DjangoTestCase): def test_netboot_off(self): node = factory.make_Node(netboot=True) client = make_node_client(node=node) url = reverse('metadata-version', args=['latest']) response = client.post(url, {'op': 'netboot_off'}) node = reload_object(node) self.assertFalse(node.netboot, response) def test_netboot_on(self): node = factory.make_Node(netboot=False) client = make_node_client(node=node) url = reverse('metadata-version', args=['latest']) response = client.post(url, {'op': 'netboot_on'}) node = reload_object(node) self.assertTrue(node.netboot, response) class TestAnonymousAPI(DjangoTestCase): def test_anonymous_netboot_off(self): node = factory.make_Node(netboot=True) anon_netboot_off_url = reverse( 'metadata-node-by-id', args=['latest', node.system_id]) response = self.client.post( anon_netboot_off_url, {'op': 'netboot_off'}) node = reload_object(node) self.assertEqual( (httplib.OK, False), (response.status_code, node.netboot), response) def test_anonymous_get_enlist_preseed(self): # The preseed for enlistment can be obtained anonymously. anon_enlist_preseed_url = reverse( 'metadata-enlist-preseed', args=['latest']) # Fake the preseed so we're just exercising the view. fake_preseed = factory.make_string() self.patch(api, "get_enlist_preseed", Mock(return_value=fake_preseed)) response = self.client.get( anon_enlist_preseed_url, {'op': 'get_enlist_preseed'}) self.assertEqual( (httplib.OK, "text/plain", fake_preseed), (response.status_code, response["Content-Type"], response.content), response) def test_anonymous_get_enlist_preseed_detects_request_origin(self): ng_url = 'http://%s' % factory.make_name('host') network = IPNetwork("10.1.1/24") ip = factory.pick_ip_in_network(network) factory.make_NodeGroup( maas_url=ng_url, network=network, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) anon_enlist_preseed_url = reverse( 'metadata-enlist-preseed', args=['latest']) response = self.client.get( anon_enlist_preseed_url, {'op': 'get_enlist_preseed'}, REMOTE_ADDR=ip) self.assertThat(response.content, Contains(ng_url)) def test_anonymous_get_preseed(self): # The preseed for a node can be obtained anonymously. node = factory.make_Node() anon_node_url = reverse( 'metadata-node-by-id', args=['latest', node.system_id]) # Fake the preseed so we're just exercising the view. fake_preseed = factory.make_string() self.patch(api, "get_preseed", lambda node: fake_preseed) response = self.client.get( anon_node_url, {'op': 'get_preseed'}) self.assertEqual( (httplib.OK, "text/plain", fake_preseed), (response.status_code, response["Content-Type"], response.content), response) def test_anoymous_netboot_off_adds_installation_finished_event(self): node = factory.make_Node(netboot=True) anon_netboot_off_url = reverse( 'metadata-node-by-id', args=['latest', node.system_id]) self.client.post( anon_netboot_off_url, {'op': 'netboot_off'}) latest_event = Event.objects.filter(node=node).last() self.assertEqual( ( EVENT_TYPES.NODE_INSTALLATION_FINISHED, EVENT_DETAILS[ EVENT_TYPES.NODE_INSTALLATION_FINISHED].description, "Node disabled netboot", ), ( latest_event.type.name, latest_event.type.description, latest_event.description, )) class TestEnlistViews(DjangoTestCase): """Tests for the enlistment metadata views.""" def test_get_instance_id(self): # instance-id must be available md_url = reverse( 'enlist-metadata-meta-data', args=['latest', 'instance-id']) response = self.client.get(md_url) self.assertEqual( (httplib.OK, "text/plain"), (response.status_code, response["Content-Type"])) # just insist content is non-empty. It doesn't matter what it is. self.assertTrue(response.content) def test_get_hostname(self): # instance-id must be available md_url = reverse( 'enlist-metadata-meta-data', args=['latest', 'local-hostname']) response = self.client.get(md_url) self.assertEqual( (httplib.OK, "text/plain"), (response.status_code, response["Content-Type"])) # just insist content is non-empty. It doesn't matter what it is. self.assertTrue(response.content) def test_public_keys_returns_empty(self): # An enlisting node has no SSH keys, but it does request them. # If the node insists, we give it the empty list. md_url = reverse( 'enlist-metadata-meta-data', args=['latest', 'public-keys']) response = self.client.get(md_url) self.assertEqual( (httplib.OK, ""), (response.status_code, response.content)) def test_metadata_bogus_is_404(self): md_url = reverse( 'enlist-metadata-meta-data', args=['latest', 'BOGUS']) response = self.client.get(md_url) self.assertEqual(httplib.NOT_FOUND, response.status_code) def test_get_userdata(self): # instance-id must be available ud_url = reverse('enlist-metadata-user-data', args=['latest']) fake_preseed = factory.make_string() self.patch( api, "get_enlist_userdata", Mock(return_value=fake_preseed)) response = self.client.get(ud_url) self.assertEqual( (httplib.OK, "text/plain", fake_preseed), (response.status_code, response["Content-Type"], response.content), response) def test_get_userdata_detects_request_origin(self): nodegroup_url = 'http://%s' % factory.make_name('host') maas_url = factory.make_simple_http_url() self.useFixture(RegionConfigurationFixture(maas_url=maas_url)) network = IPNetwork("10.1.1/24") ip = factory.pick_ip_in_network(network) factory.make_NodeGroup( maas_url=nodegroup_url, network=network, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP) url = reverse('enlist-metadata-user-data', args=['latest']) response = self.client.get(url, REMOTE_ADDR=ip) self.assertThat( response.content, MatchesAll(Contains(nodegroup_url), Not(Contains(maas_url)))) def test_metadata_list(self): # /enlist/latest/metadata request should list available keys md_url = reverse('enlist-metadata-meta-data', args=['latest', ""]) response = self.client.get(md_url) self.assertEqual( (httplib.OK, "text/plain"), (response.status_code, response["Content-Type"])) self.assertTrue('instance-id' in response.content.splitlines()) self.assertTrue('local-hostname' in response.content.splitlines()) def test_api_version_contents_list(self): # top level api (/enlist/latest/) must list 'metadata' and 'userdata' md_url = reverse('enlist-version', args=['latest']) response = self.client.get(md_url) self.assertEqual( (httplib.OK, "text/plain"), (response.status_code, response["Content-Type"])) self.assertTrue('user-data' in response.content.splitlines()) self.assertTrue('meta-data' in response.content.splitlines()) maas-1.9.5+bzr4599.orig/src/metadataserver/tests/test_api_status.py0000644000000000000000000006477213056115004023420 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the metadata progress reporting API.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import base64 import bz2 import httplib import json import urllib from django.core.urlresolvers import reverse from maasserver.enum import NODE_STATUS from maasserver.models import ( Event, Node, Tag, ) from maasserver.testing.factory import factory from maasserver.testing.oauthclient import OAuthAuthenticatedClient from maasserver.testing.orm import reload_object from maasserver.testing.testcase import MAASServerTestCase from maastesting.matchers import ( MockCalledOnceWith, MockNotCalled, ) from metadataserver import api from metadataserver.models import ( NodeKey, NodeResult, ) from metadataserver.nodeinituser import get_node_init_user from mock import ANY def make_node_client(node=None): """Create a test client logged in as if it were `node`.""" if node is None: node = factory.make_Node() token = NodeKey.objects.get_token_for_node(node) return OAuthAuthenticatedClient(get_node_init_user(), token) def call_status(client=None, node=None, payload=None): """Call the API's status endpoint. The API does not receive any form data, just a JSON encoding several values. """ if node is None: node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) if client is None: client = make_node_client(node) url = reverse('metadata-status', args=[node.system_id]) return client.post( url, content_type='application/json', data=json.dumps(payload)) class TestStatusAPI(MAASServerTestCase): def test_other_user_than_node_cannot_signal_installation_result(self): node = factory.make_Node(status=NODE_STATUS.DEPLOYING) client = OAuthAuthenticatedClient(factory.make_User()) response = call_status(client, node) self.assertEqual(httplib.FORBIDDEN, response.status_code) self.assertEqual( NODE_STATUS.DEPLOYING, reload_object(node).status) # No node events were logged. self.assertFalse(Event.objects.filter(node=node).exists()) def test_status_installation_result_does_not_affect_other_node(self): node1 = factory.make_Node(status=NODE_STATUS.DEPLOYING) node2 = factory.make_Node(status=NODE_STATUS.DEPLOYING) client = make_node_client(node1) payload = { 'event_type': 'finish', 'result': 'SUCCESS', 'origin': 'curtin', 'name': 'cmd-install', 'description': 'Command Install', } response = call_status(client, node1, payload) self.assertEqual(httplib.OK, response.status_code) self.assertEqual( NODE_STATUS.DEPLOYING, reload_object(node2).status) # Check last node1 event. self.assertEqual( "'curtin' Command Install", Event.objects.filter(node=node1).last().description) # There must me no events for node2. self.assertFalse(Event.objects.filter(node=node2).exists()) def test_status_installation_success_leaves_node_deploying(self): node = factory.make_Node(interface=True, status=NODE_STATUS.DEPLOYING) client = make_node_client(node=node) payload = { 'event_type': 'finish', 'result': 'SUCCESS', 'origin': 'curtin', 'name': 'cmd-install', 'description': 'Command Install', } response = call_status(client, node, payload) self.assertEqual(httplib.OK, response.status_code) self.assertEqual(NODE_STATUS.DEPLOYING, reload_object(node).status) # Check last node event. self.assertEqual( "'curtin' Command Install", Event.objects.filter(node=node).last().description) def test_status_with_non_json_payload_fails(self): node = factory.make_Node(interface=True, status=NODE_STATUS.DEPLOYING) client = make_node_client(node=node) payload = { 'event_type': 'finish', 'result': 'SUCCESS', 'origin': 'curtin', 'name': 'cmd-install', 'description': 'Command Install', } client = make_node_client(node) url = reverse('metadata-status', args=[node.system_id]) response = client.post( url, content_type='application/json', data=urllib.urlencode(payload)) self.assertEqual(httplib.BAD_REQUEST, response.status_code) def test_status_comissioning_success_populates_tags(self): populate_tags_for_single_node = self.patch( api, "populate_tags_for_single_node") node = factory.make_Node( interface=True, status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) payload = { 'event_type': 'finish', 'result': 'SUCCESS', 'origin': 'curtin', 'name': 'cmd-install', 'description': 'Command Install', } response = call_status(client, node, payload) self.assertEqual(httplib.OK, response.status_code) self.assertThat( populate_tags_for_single_node, MockCalledOnceWith(ANY, node)) def test_status_comissioning_success_sets_default_storage_layout(self): node = factory.make_Node( interface=True, status=NODE_STATUS.COMMISSIONING) self.patch_autospec(Node, "set_default_storage_layout") client = make_node_client(node=node) payload = { 'event_type': 'finish', 'result': 'SUCCESS', 'origin': 'curtin', 'name': 'cmd-install', 'description': 'Command Install', } response = call_status(client, node, payload) self.assertEqual(httplib.OK, response.status_code) self.assertThat( Node.set_default_storage_layout, MockCalledOnceWith(node)) def test_status_comissioning_success_sets_node_network_configuration(self): node = factory.make_Node( interface=True, status=NODE_STATUS.COMMISSIONING) mock_set_initial_networking_configuration = self.patch_autospec( Node, "set_initial_networking_configuration") client = make_node_client(node=node) payload = { 'event_type': 'finish', 'result': 'SUCCESS', 'origin': 'curtin', 'name': 'cmd-install', 'description': 'Command Install', } response = call_status(client, node, payload) self.assertEqual(httplib.OK, response.status_code) self.assertThat( mock_set_initial_networking_configuration, MockCalledOnceWith(node)) def test_status_commissioning_failure_leaves_node_failed(self): node = factory.make_Node( interface=True, status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) payload = { 'event_type': 'finish', 'result': 'FAILURE', 'origin': 'curtin', 'name': 'commissioning', 'description': 'Commissioning', } response = call_status(client, node, payload) self.assertEqual(httplib.OK, response.status_code) self.assertEqual( NODE_STATUS.FAILED_COMMISSIONING, reload_object(node).status) # Check last node event. self.assertEqual( "'curtin' Commissioning", Event.objects.filter(node=node).last().description) def test_status_commissioning_failure_clears_owner(self): user = factory.make_User() node = factory.make_Node( interface=True, status=NODE_STATUS.COMMISSIONING, owner=user) client = make_node_client(node=node) payload = { 'event_type': 'finish', 'result': 'FAILURE', 'origin': 'curtin', 'name': 'commissioning', 'description': 'Commissioning', } self.assertEqual(user, node.owner) # Node has an owner response = call_status(client, node, payload) self.assertEqual(httplib.OK, response.status_code) self.assertEqual( NODE_STATUS.FAILED_COMMISSIONING, reload_object(node).status) self.assertIsNone(reload_object(node).owner) def test_status_installation_failure_leaves_node_failed(self): node = factory.make_Node(interface=True, status=NODE_STATUS.DEPLOYING) client = make_node_client(node=node) payload = { 'event_type': 'finish', 'result': 'FAILURE', 'origin': 'curtin', 'name': 'cmd-install', 'description': 'Command Install', } response = call_status(client, node, payload) self.assertEqual(httplib.OK, response.status_code) self.assertEqual( NODE_STATUS.FAILED_DEPLOYMENT, reload_object(node).status) # Check last node event. self.assertEqual( "'curtin' Command Install", Event.objects.filter(node=node).last().description) def test_status_installation_fail_leaves_node_failed(self): node = factory.make_Node(interface=True, status=NODE_STATUS.DEPLOYING) client = make_node_client(node=node) payload = { 'event_type': 'finish', 'result': 'FAIL', 'origin': 'curtin', 'name': 'cmd-install', 'description': 'Command Install', } response = call_status(client, node, payload) self.assertEqual(httplib.OK, response.status_code) self.assertEqual( NODE_STATUS.FAILED_DEPLOYMENT, reload_object(node).status) # Check last node event. self.assertEqual( "'curtin' Command Install", Event.objects.filter(node=node).last().description) def test_status_installation_failure_doesnt_clear_owner(self): user = factory.make_User() node = factory.make_Node( interface=True, status=NODE_STATUS.DEPLOYING, owner=user) client = make_node_client(node=node) payload = { 'event_type': 'finish', 'result': 'FAILURE', 'origin': 'curtin', 'name': 'cmd-install', 'description': 'Command Install', } self.assertEqual(user, node.owner) # Node has an owner response = call_status(client, node, payload) self.assertEqual(httplib.OK, response.status_code) self.assertEqual( NODE_STATUS.FAILED_DEPLOYMENT, reload_object(node).status) self.assertIsNotNone(reload_object(node).owner) def test_status_commissioning_failure_does_not_populate_tags(self): populate_tags_for_single_node = self.patch( api, "populate_tags_for_single_node") node = factory.make_Node( interface=True, status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) payload = { 'event_type': 'finish', 'result': 'FAILURE', 'origin': 'curtin', 'name': 'commissioning', 'description': 'Commissioning', } response = call_status(client, node, payload) self.assertEqual(httplib.OK, response.status_code) self.assertEqual( NODE_STATUS.FAILED_COMMISSIONING, reload_object(node).status) self.assertThat(populate_tags_for_single_node, MockNotCalled()) def test_status_erasure_failure_leaves_node_failed(self): node = factory.make_Node( interface=True, status=NODE_STATUS.DISK_ERASING) client = make_node_client(node=node) payload = { 'event_type': 'finish', 'result': 'FAILURE', 'origin': 'curtin', 'name': 'cmd-erase', 'description': 'Erasing disk', } response = call_status(client, node, payload) self.assertEqual(httplib.OK, response.status_code) self.assertEqual( NODE_STATUS.FAILED_DISK_ERASING, reload_object(node).status) # Check last node event. self.assertEqual( "'curtin' Erasing disk", Event.objects.filter(node=node).last().description) def test_status_erasure_failure_does_not_populate_tags(self): populate_tags_for_single_node = self.patch( api, "populate_tags_for_single_node") node = factory.make_Node( interface=True, status=NODE_STATUS.DISK_ERASING) client = make_node_client(node=node) payload = { 'event_type': 'finish', 'result': 'FAILURE', 'origin': 'curtin', 'name': 'cmd-erase', 'description': 'Erasing disk', } response = call_status(client, node, payload) self.assertEqual(httplib.OK, response.status_code) self.assertEqual( NODE_STATUS.FAILED_DISK_ERASING, reload_object(node).status) self.assertThat(populate_tags_for_single_node, MockNotCalled()) def test_status_erasure_failure_clears_owner(self): user = factory.make_User() node = factory.make_Node( interface=True, status=NODE_STATUS.DISK_ERASING, owner=user) client = make_node_client(node=node) payload = { 'event_type': 'finish', 'result': 'FAILURE', 'origin': 'curtin', 'name': 'cmd-erase', 'description': 'Erasing disk', } self.assertEqual(user, node.owner) # Node has an owner response = call_status(client, node, payload) self.assertEqual(httplib.OK, response.status_code) self.assertEqual( NODE_STATUS.FAILED_DISK_ERASING, reload_object(node).status) self.assertIsNone(reload_object(node).owner) def test_status_with_file_bad_encoder_fails(self): node = factory.make_Node( interface=True, status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) contents = 'These are the contents of the file.' encoded_content = base64.encodestring(bz2.compress(contents)) payload = { 'event_type': 'finish', 'result': 'FAILURE', 'origin': 'curtin', 'name': 'commissioning', 'description': 'Commissioning', 'files': [ { "path": "sample.txt", "encoding": "uuencode", "compression": "bzip2", "content": encoded_content } ] } response = call_status(client, node, payload) self.assertEqual(httplib.BAD_REQUEST, response.status_code) self.assertEqual('Invalid encoding: uuencode', response.content) def test_status_with_file_bad_compression_fails(self): node = factory.make_Node( interface=True, status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) contents = 'These are the contents of the file.' encoded_content = base64.encodestring(bz2.compress(contents)) payload = { 'event_type': 'finish', 'result': 'FAILURE', 'origin': 'curtin', 'name': 'commissioning', 'description': 'Commissioning', 'files': [ { "path": "sample.txt", "encoding": "base64", "compression": "jpeg", "content": encoded_content } ] } response = call_status(client, node, payload) self.assertEqual(httplib.BAD_REQUEST, response.status_code) self.assertEqual('Invalid compression: jpeg', response.content) def test_status_with_file_no_compression_succeeds(self): node = factory.make_Node( interface=True, status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) contents = 'These are the contents of the file.' encoded_content = base64.encodestring(contents) payload = { 'event_type': 'finish', 'result': 'FAILURE', 'origin': 'curtin', 'name': 'commissioning', 'description': 'Commissioning', 'files': [ { "path": "sample.txt", "encoding": "base64", "content": encoded_content } ] } response = call_status(client, node, payload) self.assertEqual(httplib.OK, response.status_code) self.assertEqual(contents, NodeResult.objects.get(node=node).data) def test_status_with_file_invalid_statuses_fails(self): """Adding files should fail for every status that's neither COMMISSIONING nor DEPLOYING""" for node_status in [ NODE_STATUS.DEFAULT, NODE_STATUS.NEW, NODE_STATUS.FAILED_COMMISSIONING, NODE_STATUS.MISSING, NODE_STATUS.READY, NODE_STATUS.RESERVED, NODE_STATUS.DEPLOYED, NODE_STATUS.RETIRED, NODE_STATUS.BROKEN, NODE_STATUS.ALLOCATED, NODE_STATUS.FAILED_DEPLOYMENT, NODE_STATUS.RELEASING, NODE_STATUS.FAILED_RELEASING, NODE_STATUS.DISK_ERASING, NODE_STATUS.FAILED_DISK_ERASING]: node = factory.make_Node(interface=True, status=node_status) client = make_node_client(node=node) contents = 'These are the contents of the file.' encoded_content = base64.encodestring(bz2.compress(contents)) payload = { 'event_type': 'finish', 'result': 'FAILURE', 'origin': 'curtin', 'name': 'commissioning', 'description': 'Commissioning', 'files': [ { "path": "sample.txt", "encoding": "base64", "compression": "bzip2", "content": encoded_content } ] } response = call_status(client, node, payload) self.assertEqual(httplib.BAD_REQUEST, response.status_code) self.assertEqual( 'Invalid status for saving files: %d' % node_status, response.content) def test_status_with_file_succeeds(self): """Adding files should succeed for every status that's either COMMISSIONING or DEPLOYING""" for node_status, target_status in [ (NODE_STATUS.COMMISSIONING, NODE_STATUS.FAILED_COMMISSIONING), (NODE_STATUS.DEPLOYING, NODE_STATUS.FAILED_DEPLOYMENT)]: node = factory.make_Node(interface=True, status=node_status) client = make_node_client(node=node) contents = 'These are the contents of the file.' encoded_content = base64.encodestring(bz2.compress(contents)) payload = { 'event_type': 'finish', 'result': 'FAILURE', 'origin': 'curtin', 'name': 'commissioning', 'description': 'Commissioning', 'files': [ { "path": "sample.txt", "encoding": "base64", "compression": "bzip2", "content": encoded_content } ] } response = call_status(client, node, payload) self.assertEqual(httplib.OK, response.status_code) self.assertEqual( target_status, reload_object(node).status) # Check the node result. self.assertEqual(contents, NodeResult.objects.get(node=node).data) def test_status_with_results_succeeds(self): """Adding a script result should succeed""" node = factory.make_Node( interface=True, status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) contents = 'These are the contents of the file.' encoded_content = base64.encodestring(bz2.compress(contents)) payload = { 'event_type': 'finish', 'result': 'FAILURE', 'origin': 'curtin', 'name': 'commissioning', 'description': 'Commissioning', 'files': [ { "path": "lshw", "encoding": "base64", "compression": "bzip2", "content": encoded_content, "result": -42 } ] } response = call_status(client, node, payload) self.assertEqual(httplib.OK, response.status_code) # Check the node result. node_result = NodeResult.objects.get(node=node) self.assertEqual(contents, node_result.data) self.assertEqual(-42, node_result.script_result) def test_status_with_results_no_script_result_defaults_to_zero(self): """Adding a script result should succeed without a return code defaults it to zero.""" node = factory.make_Node( interface=True, status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) contents = 'These are the contents of the file.' encoded_content = base64.encodestring(bz2.compress(contents)) payload = { 'event_type': 'finish', 'result': 'FAILURE', 'origin': 'curtin', 'name': 'commissioning', 'description': 'Commissioning', 'files': [ { "path": "lshw", "encoding": "base64", "compression": "bzip2", "content": encoded_content, } ] } response = call_status(client, node, payload) self.assertEqual(httplib.OK, response.status_code) # Check the node result. node_result = NodeResult.objects.get(node=node) self.assertEqual(0, node_result.script_result) def test_status_with_missing_event_type_fails(self): node = factory.make_Node(interface=True, status=NODE_STATUS.DEPLOYING) client = make_node_client(node=node) payload = { 'result': 'SUCCESS', 'origin': 'curtin', 'name': 'cmd-install', 'description': 'Command Install', } response = call_status(client, node, payload) self.assertEqual(httplib.BAD_REQUEST, response.status_code) self.assertIn('Missing parameter in status message', response.content) def test_status_with_missing_origin_fails(self): node = factory.make_Node(interface=True, status=NODE_STATUS.DEPLOYING) client = make_node_client(node=node) payload = { 'event_type': 'finish', 'result': 'SUCCESS', 'name': 'cmd-install', 'description': 'Command Install', } response = call_status(client, node, payload) self.assertEqual(httplib.BAD_REQUEST, response.status_code) self.assertIn('Missing parameter in status message', response.content) def test_status_with_missing_name_fails(self): node = factory.make_Node(interface=True, status=NODE_STATUS.DEPLOYING) client = make_node_client(node=node) payload = { 'event_type': 'finish', 'result': 'SUCCESS', 'origin': 'curtin', 'description': 'Command Install', } response = call_status(client, node, payload) self.assertEqual(httplib.BAD_REQUEST, response.status_code) self.assertIn('Missing parameter in status message', response.content) def test_status_with_missing_description_fails(self): node = factory.make_Node(interface=True, status=NODE_STATUS.DEPLOYING) client = make_node_client(node=node) payload = { 'event_type': 'finish', 'result': 'SUCCESS', 'origin': 'curtin', 'name': 'cmd-install', } response = call_status(client, node, payload) self.assertEqual(httplib.BAD_REQUEST, response.status_code) self.assertIn('Missing parameter in status message', response.content) def test_status_stores_virtual_tag_on_node_if_virtual(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) client = make_node_client(node=node) content = 'virtual'.encode('utf-8') payload = { 'event_type': 'finish', 'result': 'SUCCESS', 'origin': 'curtin', 'name': 'commissioning', 'description': 'Commissioning', 'files': [ { "path": "00-maas-02-virtuality.out", "encoding": "base64", "content": base64.encodestring(content), } ] } response = call_status(client, node, payload) self.assertEqual(httplib.OK, response.status_code) node = reload_object(node) self.assertEqual( ["virtual"], [each_tag.name for each_tag in node.tags.all()]) self.assertEqual(content, NodeResult.objects.get(node=node).data) self.assertEqual( "00-maas-02-virtuality.out", NodeResult.objects.get(node=node).name) def test_status_removes_virtual_tag_on_node_if_not_virtual(self): node = factory.make_Node(status=NODE_STATUS.COMMISSIONING) tag, _ = Tag.objects.get_or_create(name='virtual') node.tags.add(tag) client = make_node_client(node=node) content = 'notvirtual'.encode('utf-8') payload = { 'event_type': 'finish', 'result': 'SUCCESS', 'origin': 'curtin', 'name': 'commissioning', 'description': 'Commissioning', 'files': [ { "path": "00-maas-02-virtuality.out", "encoding": "base64", "content": base64.encodestring(content), } ] } response = call_status(client, node, payload) self.assertEqual(httplib.OK, response.status_code) node = reload_object(node) self.assertEqual( [], [each_tag.name for each_tag in node.tags.all()]) self.assertEqual(content, NodeResult.objects.get(node=node).data) self.assertEqual( "00-maas-02-virtuality.out", NodeResult.objects.get(node=node).name) maas-1.9.5+bzr4599.orig/src/metadataserver/tests/test_fields.py0000644000000000000000000000741313056115004022477 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test custom field types.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from base64 import b64encode from maasserver.testing.testcase import MAASServerTestCase from maastesting.djangotestcase import TestModelMixin from maastesting.factory import factory from metadataserver.fields import ( Bin, BinaryField, ) from metadataserver.tests.models import BinaryFieldModel class TestBin(MAASServerTestCase): """Test Bin helper class.""" def test_is_basically_bytes(self): self.assertEqual(b"Hello", Bin(b"Hello")) def test_refuses_to_construct_from_unicode(self): self.assertRaises(AssertionError, Bin, "Hello") def test_refuses_to_construct_from_None(self): self.assertRaises(AssertionError, Bin, None) def test_emits_base64(self): # Piston hooks onto an __emittable__() method, if present. # Bin() returns a base-64 encoded string so that it can be # transmitted in JSON. self.assertEqual(b"", Bin(b"").__emittable__()) example_bytes = factory.make_bytes() self.assertEqual( b64encode(example_bytes), Bin(example_bytes).__emittable__()) class TestBinaryField(TestModelMixin, MAASServerTestCase): """Test BinaryField. Uses BinaryFieldModel test model.""" app = 'metadataserver.tests' def test_stores_and_retrieves_None(self): binary_item = BinaryFieldModel() self.assertIsNone(binary_item.data) binary_item.save() self.assertIsNone( BinaryFieldModel.objects.get(id=binary_item.id).data) def test_stores_and_retrieves_empty_data(self): binary_item = BinaryFieldModel(data=Bin(b'')) self.assertEqual(b'', binary_item.data) binary_item.save() self.assertEqual( b'', BinaryFieldModel.objects.get(id=binary_item.id).data) def test_does_not_truncate_at_zero_bytes(self): data = b"BEFORE THE ZERO\x00AFTER THE ZERO" binary_item = BinaryFieldModel(data=Bin(data)) self.assertEqual(data, binary_item.data) binary_item.save() self.assertEqual( data, BinaryFieldModel.objects.get(id=binary_item.id).data) def test_stores_and_retrieves_binary_data(self): data = b"\x01\x02\xff\xff\xfe\xff\xff\xfe" binary_item = BinaryFieldModel(data=Bin(data)) self.assertEqual(data, binary_item.data) binary_item.save() self.assertEqual( data, BinaryFieldModel.objects.get(id=binary_item.id).data) def test_returns_bytes_not_text(self): binary_item = BinaryFieldModel(data=Bin(b"Data")) binary_item.save() retrieved_data = BinaryFieldModel.objects.get(id=binary_item.id).data self.assertIsInstance(retrieved_data, bytes) def test_looks_up_data(self): data = b"Binary item" binary_item = BinaryFieldModel(data=Bin(data)) binary_item.save() self.assertEqual( binary_item, BinaryFieldModel.objects.get(data=Bin(data))) def test_get_default_returns_None(self): field = BinaryField(null=True) self.patch(field, "default", None) self.assertIsNone(field.get_default()) def test_get_default_returns_Bin(self): field = BinaryField(null=True) self.patch(field, "default", Bin(b"wotcha")) self.assertEqual(Bin(b"wotcha"), field.get_default()) def test_get_default_returns_Bin_from_bytes(self): field = BinaryField(null=True) self.patch(field, "default", b"wotcha") self.assertEqual(Bin(b"wotcha"), field.get_default()) maas-1.9.5+bzr4599.orig/src/metadataserver/tests/test_migrations.py0000644000000000000000000000132113056115004023375 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Sanity checks for database migrations. These tests need to be included in each of the MAAS applications that has South-managed database migrations. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.testing.db_migrations import detect_sequence_clashes from maastesting.testcase import MAASTestCase class TestMigrations(MAASTestCase): def test_migrations_have_unique_numbers(self): self.assertEqual([], detect_sequence_clashes('metadataserver')) maas-1.9.5+bzr4599.orig/src/metadataserver/tests/test_nodeinituser.py0000644000000000000000000000230613056115004023735 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Model tests for metadata server.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from django.contrib.auth.models import User from maasserver.models import UserProfile from maasserver.testing.testcase import MAASServerTestCase from metadataserver.nodeinituser import ( get_node_init_user, user_name, ) from testtools import ExpectedException class TestNodeInitUser(MAASServerTestCase): """Test the special "user" that makes metadata requests from nodes.""" def test_always_returns_same_user(self): node_init_user = get_node_init_user() self.assertEqual(node_init_user.id, get_node_init_user().id) def test_holds_node_init_user(self): user = get_node_init_user() self.assertIsInstance(user, User) self.assertEqual(user_name, user.username) def test_node_init_user_has_no_profile(self): user = get_node_init_user() with ExpectedException(UserProfile.DoesNotExist): user.userprofile maas-1.9.5+bzr4599.orig/src/metadataserver/user_data/__init__.py0000644000000000000000000000000013056115004022517 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/metadataserver/user_data/commissioning.py0000644000000000000000000000143113056115004023647 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """User data generation for Commissioning.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from metadataserver.user_data.snippets import get_userdata_template_dir from metadataserver.user_data.utils import ( generate_user_data as _generate_user_data, ) def generate_user_data(node): """Produce the main commissioning script. :rtype: `bytes` """ userdata_dir = get_userdata_template_dir() result = _generate_user_data( node, userdata_dir, 'user_data.template', 'user_data_config.template') return result maas-1.9.5+bzr4599.orig/src/metadataserver/user_data/disk_erasing.py0000644000000000000000000000146313056115004023440 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Disk erasing userdata generation.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "generate_user_data", ] from metadataserver.user_data.snippets import get_userdata_template_dir from metadataserver.user_data.utils import ( generate_user_data as _generate_user_data, ) def generate_user_data(node): """Produce the disk erase script. :rtype: `bytes` """ userdata_dir = get_userdata_template_dir() result = _generate_user_data( node, userdata_dir, 'user_data_disk_erasing.template', 'user_data_config.template') return result maas-1.9.5+bzr4599.orig/src/metadataserver/user_data/poweroff.py0000644000000000000000000000145113056115004022622 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Poweroff userdata generation.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "generate_user_data", ] from metadataserver.user_data.snippets import get_userdata_template_dir from metadataserver.user_data.utils import ( generate_user_data as _generate_user_data, ) def generate_user_data(node): """Produce the poweroff script. :rtype: `bytes` """ userdata_dir = get_userdata_template_dir() result = _generate_user_data( node, userdata_dir, 'user_data_poweroff.template', 'user_data_config.template') return result maas-1.9.5+bzr4599.orig/src/metadataserver/user_data/snippets.py0000644000000000000000000000412713056115004022643 0ustar 00000000000000# Copyright 2013-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Low-level routines for access to snippets. These are used by the user-data code, but also by `setup.py`. That's why importing this must not pull in any unnecessary framework modules etc. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'list_snippets', 'read_snippet', 'strip_name', 'get_snippet_context', 'get_userdata_template_dir', ] import os from provisioningserver.utils import locate_config from provisioningserver.utils.fs import read_text_file USERDATA_BASE_DIR = 'templates/commissioning-user-data' def get_userdata_template_dir(): """Return the absolute location of the userdata template directory.""" return locate_config(USERDATA_BASE_DIR) def get_snippet_context(snippets_dir=None, encoding='utf-8'): """Return the context of all of the snippets.""" if snippets_dir is None: snippets_dir = os.path.join(get_userdata_template_dir(), 'snippets') snippets = { strip_name(name): read_snippet(snippets_dir, name, encoding=encoding) for name in list_snippets(snippets_dir) } return snippets def read_snippet(snippets_dir, name, encoding='utf-8'): """Read a snippet file. :rtype: `unicode` """ return read_text_file(os.path.join(snippets_dir, name), encoding=encoding) def is_snippet(filename): """Does `filename` represent a valid snippet name?""" return all([ not filename.startswith('.'), filename != '__init__.py', filename != 'tests', not filename.endswith('.pyc'), not filename.endswith('~'), ]) def list_snippets(snippets_dir): """List names of available snippets.""" return filter(is_snippet, os.listdir(snippets_dir)) def strip_name(snippet_name): """Canonicalize a snippet name.""" # Dot suffixes do not work well in tempita variable names. return snippet_name.replace('.', '_') maas-1.9.5+bzr4599.orig/src/metadataserver/user_data/tests/0000755000000000000000000000000013056115004021562 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/metadataserver/user_data/utils.py0000644000000000000000000000571713056115004022144 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Generate commissioning user-data from template and code snippets. This combines the `user_data.template` and the snippets of code in the `snippets` directory into the main commissioning script. Its contents are not customizable. To inject custom code, use the :class:`CommissioningScript` model. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'generate_user_data', ] from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText import os.path from maasserver.preseed import get_preseed_context from metadataserver.user_data.snippets import get_snippet_context import tempita ENCODING = 'utf-8' def generate_user_data(node, userdata_dir, userdata_template_name, config_template_name): """Produce a user_data script for use by commissioning and other operations. The main template file contains references to so-called ``snippets'' which are read in here, and substituted. In addition, the regular preseed context variables are available (such as 'http_proxy'). The final result is a MIME multipart message that consists of a 'cloud-config' part and an 'x-shellscript' part. This allows maximum flexibility with cloud-init as we read in a template 'user_data_config.template' to set cloud-init configs before the script is run. :rtype: `bytes` """ userdata_template_file = os.path.join( userdata_dir, userdata_template_name) config_template_file = os.path.join( userdata_dir, config_template_name) userdata_template = tempita.Template.from_filename( userdata_template_file, encoding=ENCODING) config_template = tempita.Template.from_filename( config_template_file, encoding=ENCODING) # The preseed context is a dict containing various configs that the # templates can use. nodegroup = node.nodegroup preseed_context = get_preseed_context(nodegroup=nodegroup) preseed_context['node'] = node # Render the snippets in the main template. snippets = get_snippet_context(encoding=ENCODING) snippets.update(preseed_context) userdata = userdata_template.substitute(snippets).encode(ENCODING) # Render the config. config = config_template.substitute(preseed_context) # Create a MIME multipart message from the config and the userdata. config_part = MIMEText(config, 'cloud-config', ENCODING) config_part.add_header( 'Content-Disposition', 'attachment; filename="config"') data_part = MIMEText(userdata, 'x-shellscript', ENCODING) data_part.add_header( 'Content-Disposition', 'attachment; filename="user_data.sh"') combined = MIMEMultipart() combined.attach(config_part) combined.attach(data_part) return combined.as_string() maas-1.9.5+bzr4599.orig/src/metadataserver/user_data/tests/__init__.py0000644000000000000000000000000013056115004023661 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/metadataserver/user_data/tests/test_commissioning.py0000644000000000000000000000554613056115004026063 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test generation of commissioning user data.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.preseed import get_preseed_context from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from maastesting.matchers import MockCalledWith from metadataserver.user_data import utils from metadataserver.user_data.commissioning import generate_user_data from mock import Mock from testtools.matchers import ContainsAll class TestCommissioningUserData(MAASServerTestCase): def test_generate_user_data_produces_commissioning_script(self): # generate_user_data produces a commissioning script which contains # both definitions and use of various commands in python. node = factory.make_Node() user_data = generate_user_data(node) # On Vivid and above the email library defaults to encoding the MIME # data as base64. We only check the inner contents if its not base64. if "Content-Transfer-Encoding: base64" not in user_data: self.assertThat( user_data, ContainsAll({ 'config', 'user_data.sh', 'maas-get', 'maas-signal', 'maas-ipmi-autodetect', 'def authenticate_headers', 'def encode_multipart_data', })) else: self.assertThat( user_data, ContainsAll({ 'config', 'user_data.sh', })) def test_nodegroup_passed_to_get_preseed_context(self): # I don't care about what effect it has, I just want to know # that it was passed as it can affect the contents of # `server_host` in the context. utils.get_preseed_context = Mock( # Use the real return value as it contains data necessary to # render the template. return_value=get_preseed_context()) node = factory.make_Node() generate_user_data(node) self.assertThat( utils.get_preseed_context, MockCalledWith(nodegroup=node.nodegroup)) def test_generate_user_data_generates_mime_multipart(self): # The generate_user_data func should create a MIME multipart # message consisting of cloud-config and x-shellscript # attachments. node = factory.make_Node() self.assertThat( generate_user_data(node), ContainsAll({ 'multipart', 'Content-Type: text/cloud-config', 'Content-Type: text/x-shellscript', })) maas-1.9.5+bzr4599.orig/src/metadataserver/user_data/tests/test_disk_erasing.py0000644000000000000000000000267413056115004025646 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test generation of disk erasing user data.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from metadataserver.user_data.disk_erasing import generate_user_data from testtools.matchers import ContainsAll class TestDiskErasingUserData(MAASServerTestCase): def test_generate_user_data_produces_disk_erase_script(self): node = factory.make_Node() user_data = generate_user_data(node) # On Vivid and above the email library defaults to encoding the MIME # data as base64. We only check the inner contents if its not base64. if "Content-Transfer-Encoding: base64" not in user_data: self.assertThat( user_data, ContainsAll({ 'config', 'user_data.sh', 'maas-signal', 'erase_disks', 'def authenticate_headers', 'def encode_multipart_data', })) else: self.assertThat( user_data, ContainsAll({ 'config', 'user_data.sh', })) maas-1.9.5+bzr4599.orig/src/metadataserver/user_data/tests/test_poweroff.py0000644000000000000000000000252113056115004025022 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test generation of poweroff user data.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from metadataserver.user_data.poweroff import generate_user_data from testtools.matchers import ContainsAll class TestPoweroffUserData(MAASServerTestCase): def test_generate_user_data_produces_poweroff_script(self): node = factory.make_Node() user_data = generate_user_data(node) # On Vivid and above the email library defaults to encoding the MIME # data as base64. We only check the inner contents if its not base64. if "Content-Transfer-Encoding: base64" not in user_data: self.assertThat( user_data, ContainsAll({ 'config', 'user_data.sh', 'Powering node off', 'poweroff', })) else: self.assertThat( user_data, ContainsAll({ 'config', 'user_data.sh', })) maas-1.9.5+bzr4599.orig/src/metadataserver/user_data/tests/test_snippets.py0000644000000000000000000000455113056115004025045 0ustar 00000000000000# Copyright 2013-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test the snippets-related support routines for commissioning user data.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import os.path from maastesting.factory import factory from maastesting.testcase import MAASTestCase from metadataserver.user_data.snippets import ( get_snippet_context, is_snippet, list_snippets, read_snippet, strip_name, ) class TestSnippets(MAASTestCase): def test_read_snippet_reads_snippet_file(self): contents = factory.make_string() snippet = self.make_file(contents=contents) self.assertEqual( contents, read_snippet(os.path.dirname(snippet), os.path.basename(snippet))) def test_strip_name_leaves_simple_names_intact(self): simple_name = factory.make_string() self.assertEqual(simple_name, strip_name(simple_name)) def test_strip_name_replaces_dots(self): self.assertEqual('_x_y_', strip_name('.x.y.')) def test_is_snippet(self): are_snippets = { 'snippet': True, 'with-dash': True, 'module.py': True, '.backup': False, 'backup~': False, 'module.pyc': False, '__init__.pyc': False, 'tests': False, } self.assertEqual( are_snippets, {name: is_snippet(name) for name in are_snippets}) def test_list_snippets(self): snippets_dir = self.make_dir() factory.make_file(snippets_dir, 'snippet') factory.make_file(snippets_dir, '.backup.pyc') self.assertItemsEqual(['snippet'], list_snippets(snippets_dir)) def test_get_snippet_context(self): contents = factory.make_string() snippets_dir = self.make_dir() factory.make_file(snippets_dir, 'snippet.py', contents=contents) self.assertItemsEqual( {'snippet_py': contents}, get_snippet_context(snippets_dir=snippets_dir)) def test_get_snippet_context_empty_if_no_snippets(self): snippets_dir = self.make_dir() context = {} self.assertEqual( context, get_snippet_context(snippets_dir=snippets_dir)) maas-1.9.5+bzr4599.orig/src/provisioningserver/__init__.py0000644000000000000000000000135513056115004021534 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """The MAAS Provisioning Server, now referred to as Cluster.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from twisted.application.service import MultiService from twisted.internet.protocol import Factory # The cluster's services. This is initialised by # ProvisioningServiceMaker. services = MultiService() # Make t.i.protocol.Factory quiet. Its jabbering is mind-numbingly # useless. Factory.noisy = False try: import maasfascist maasfascist # Silence lint. except ImportError: pass maas-1.9.5+bzr4599.orig/src/provisioningserver/__main__.py0000644000000000000000000000247513056115004021521 0ustar 00000000000000#!/usr/bin/env python2.7 # Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Command-line interface for the MAAS provisioning component.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type from provisioningserver import security import provisioningserver.boot.install_bootloader import provisioningserver.boot.install_grub import provisioningserver.cluster_config_command import provisioningserver.dhcp.writer import provisioningserver.upgrade_cluster from provisioningserver.utils.script import ( AtomicDeleteScript, AtomicWriteScript, MainScript, ) script_commands = { 'atomic-write': AtomicWriteScript, 'atomic-delete': AtomicDeleteScript, 'check-for-shared-secret': security.CheckForSharedSecretScript, 'generate-dhcp-config': provisioningserver.dhcp.writer, 'install-shared-secret': security.InstallSharedSecretScript, 'install-uefi-config': provisioningserver.boot.install_grub, 'upgrade-cluster': provisioningserver.upgrade_cluster, 'config': provisioningserver.cluster_config_command, } main = MainScript(__doc__) for name, command in sorted(script_commands.items()): main.register(name, command) main() maas-1.9.5+bzr4599.orig/src/provisioningserver/auth.py0000644000000000000000000000150713056115004020735 0ustar 00000000000000# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """API credentials for node-group workers.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'get_maas_user_gpghome', ] from provisioningserver.path import get_path def get_maas_user_gpghome(): """Return the GPG directory for the `maas` user. Set $GPGHOME to this value ad-hoc when needed. """ return get_path('/var/lib/maas/gnupg') cache = {} # Cache key for the API credentials as last sent by the server. API_CREDENTIALS_CACHE_KEY = 'api_credentials' # Cache key for the uuid of the nodegroup that this worker manages. NODEGROUP_UUID_CACHE_KEY = 'nodegroup_uuid' maas-1.9.5+bzr4599.orig/src/provisioningserver/boot/0000755000000000000000000000000013056115004020362 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/cluster_config_command.py0000644000000000000000000000630413056115004024500 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Change cluster controller configuration settings. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'add_arguments', 'run', ] from uuid import uuid4 from provisioningserver.config import ( ClusterConfiguration, UUID_NOT_SET, ) def update_maas_cluster_conf( url=None, uuid=None, init=None, tftp_port=None, tftp_root=None): """This function handles the logic behind using the parameters passed to run and setting / initializing values in the config backend. :param url: The MAAS URL to set. Does nothing if None. :param tftp_port: The tftp port number to set. Does nothing if None. :param tftp_root: The tftp root file path to set. Does nothing if None. :param uuid: The UUID to use for this cluster. Does nothing if None. :param init: Initializes the config backend with a new UUID if the backend does not currently have a value configured. NOTE: that the argument parser will not let uuid and init be passed at the same time, as these are mutually exclusive parameters. """ with ClusterConfiguration.open_for_update() as config: if url is not None: config.maas_url = url if uuid is not None: config.cluster_uuid = uuid if init: cur_uuid = config.cluster_uuid if cur_uuid == UUID_NOT_SET: config.cluster_uuid = unicode(uuid4()) if tftp_port is not None: config.tftp_port = tftp_port if tftp_root is not None: config.tftp_root = tftp_root all_arguments = ( '--region-url', '--uuid', '--init', '--tftp-port', '--tftp-root') def add_arguments(parser): """Add this command's options to the `ArgumentParser`. Specified by the `ActionScript` interface. """ parser.add_argument( '--region-url', action='store', required=False, help=('Change the URL where cluster controllers can reach the MAAS ' 'region controller.')) uuid_group = parser.add_mutually_exclusive_group() uuid_group.add_argument( '--uuid', action='store', required=False, help=('Change the cluster UUID. Pass AUTO to generate a new UUID if ' 'one is not already set.')) uuid_group.add_argument( '--init', action='store_true', required=False, help=('Generate a new UUID for this cluster controller.')) parser.add_argument( '--tftp-port', action='store', required=False, help=('The root directory for TFTP resources.')) parser.add_argument( '--tftp-root', action='store', required=False, help=('The root directory for TFTP resources.')) def run(args): """Update configuration settings.""" params = vars(args).copy() url = params.pop('region_url', None) uuid = params.pop('uuid', None) init = params.pop('init', None) tftp_port = params.pop('tftp_port', None) tftp_root = params.pop('tftp_root', None) update_maas_cluster_conf(url, uuid, init, tftp_port, tftp_root) maas-1.9.5+bzr4599.orig/src/provisioningserver/concurrency.py0000644000000000000000000000143613056115004022327 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Configuration relating to concurrency in the cluster controller. This module is intended as a place to define concurrency policies for code running in the cluster controller. Typically this will take the form of a Twisted concurrency primative, like `DeferredLock` or `DeferredSemaphore`. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "boot_images", "dhcp", ] from twisted.internet.defer import DeferredLock # Limit boot image imports to one at a time. boot_images = DeferredLock() # Limit DHCP changes to one at a time. dhcp = DeferredLock() maas-1.9.5+bzr4599.orig/src/provisioningserver/config.py0000644000000000000000000006566513056115004021260 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Configuration for the MAAS cluster. This module also contains the common library code that's used for configuration in both the region and the cluster. There are two styles of configuration object, one older and deprecated, and one new. The Old Way ----------- Configuration can be obtained through subclassing this module's `ConfigBase` validator class. It's pretty simple. Typical usage is:: >>> config = MyConfig.load_from_cache() {...} This reads in a configuration file from `MyConfig.DEFAULT_FILENAME` (see a note about that later). The configuration file is parsed as YAML, and a plain `dict` is returned with configuration nested within it. The configuration is validated at load time using `formencode`. The policy for validation is laid out in this module; see the various `formencode.Schema` subclasses. Configuration should be optional, and a sensible default should be provided in every instance. The defaults can be obtained from `MyConfig.get_defaults()`. An alternative to `MyConfig.load_from_cache()` is `MyConfig.load()`, which loads and validates a configuration file while bypassing the cache. See `ConfigBase` for other useful functions. `MyConfig.DEFAULT_FILENAME` is a class property, so does not need to be referenced via an instance of `MyConfig`. It refers to an environment variable named by `MyConfig.envvar` in the first instance, but should have a sensible default too. You can write to this property and it will update the environment so that child processes will also use the same configuration filename. To revert to the default - i.e. erase the environment variable - you can `del MyConfig.DEFAULT_FILENAME`. When testing, see `provisioningserver.testing.config.ConfigFixtureBase` to temporarily use a different configuration. The New Way ----------- There are two subclasses of this module's `Configuration` class, one for the region (`RegionConfiguration`) and for the cluster (`ClusterConfiguration`). Each defines a set of attributes which are the configuration variables: * If an attribute is declared as a `ConfigurationOption` then it's a read-write configuration option, and should have a sensible default if possible. * If an attribute is declared as a standard Python `property` then it's a read-only configuration option. A metaclass is also defined, which must inherit from `ConfigurationMeta`, to define a few other important options: * ``default`` is the default filename for the configuration database. * ``envvar`` is the name of an environment variable that, if defined, provides the filename for the configuration database. This is used in preference to ``default``. * ``backend`` is a factory that provides the storage mechanism. Currently you can choose from `ConfigurationFile` or `ConfigurationDatabase`. The latter is strongly recommended in preference to the former. An example:: class MyConfiguration(Configuration): class __metaclass__(ConfigurationMeta): envvar = "CONFIG_FILE" default = "/etc/myapp.conf" backend = ConfigurationDatabase images_dir = ConfigurationOption( "images_dir", "The directory in which to store images.", Directory(if_missing="/var/lib/myapp/images")) @property def png_dir(self): "The directory in which to store PNGs." return os.path.join(self.images_dir, "png") @property def gif_dir(self): "The directory in which to store GIFs." return os.path.join(self.images_dir, "gif") It can be used like so:: with MyConfiguration.open() as config: config.images_dir = "/var/www/example.com/images" print(config.png_dir, config.gif_dir) """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "BootSources", "ClusterConfiguration", "ConfigBase", "ConfigMeta", "is_dev_environment", "UUID_NOT_SET", ] from contextlib import ( closing, contextmanager, ) from copy import deepcopy from itertools import islice import json import logging import os from os import environ import os.path import re from shutil import copyfile import sqlite3 from threading import RLock from time import time import traceback import uuid from formencode import ( ForEach, Schema, ) from formencode.api import ( is_validator, NoDefault, ) from formencode.declarative import DeclarativeMeta from formencode.validators import ( Invalid, Number, Set, String, UnicodeString, URL, ) from provisioningserver.path import get_tentative_path from provisioningserver.utils.fs import ( atomic_write, ensure_dir, RunLock, ) import yaml logger = logging.getLogger(__name__) # Default result for cluster UUID if not set UUID_NOT_SET = '** UUID NOT SET **' class UUID(UnicodeString): """A validator for UUIDs The string must be a valid UUID. """ messages = dict(notUUID="%(value)r Failed to parse UUID") def validate_python(self, value, state=None): try: return uuid.UUID(unicode(value)) except: raise Invalid( self.message("notUUID", state, value=value), value, state) class Directory(UnicodeString): """A validator for a directory on the local filesystem. The directory must exist. """ messages = dict(notDir="%(value)r does not exist or is not a directory") def validate_python(self, value, state=None): if os.path.isdir(value): return value else: raise Invalid( self.message("notDir", state, value=value), value, state) class ExtendedURL(URL): """A validator URLs. This validator extends formencode.validators.URL by adding support for the general case of hostnames (i.e. hostnames containing numeric digits, hyphens, and hostnames of length 1), and ipv6 addresses with or without brackets. """ url_re = re.compile(r''' ^(http|https):// (?:[%:\w]*@)? # authenticator (?: # ip or domain (?P(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3} (?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))| (?P\[?(?:[A-F0-9]{1,4}:){7}[A-F0-9]{1,4}\]?)| (?P[a-z0-9][a-z0-9\-]{,62}\.)* # subdomain (?P[a-zA-Z0-9]{1,63}| [a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]) # tld or hostname ) (?::[0-9]{1,5})? # port # files/delims/etc (?P/[a-z0-9\-\._~:/\?#\[\]@!%\$&\'\(\)\*\+,;=]*)? $ ''', re.I | re.VERBOSE) class BootSourceSelection(Schema): """Configuration validator for boot source selection configuration.""" if_key_missing = None os = String(if_missing="*") release = String(if_missing="*") arches = Set(if_missing=["*"]) subarches = Set(if_missing=['*']) labels = Set(if_missing=['*']) class BootSource(Schema): """Configuration validator for boot source configuration.""" if_key_missing = None url = String( if_missing="http://maas.ubuntu.com/images/ephemeral-v2/releases/") keyring = String( if_missing="/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg") keyring_data = String(if_missing=None) selections = ForEach( BootSourceSelection, if_missing=[BootSourceSelection.to_python({})]) class ConfigBase: """Base configuration validator.""" @classmethod def parse(cls, stream): """Load a YAML configuration from `stream` and validate.""" return cls.to_python(yaml.safe_load(stream)) @classmethod def load(cls, filename=None): """Load a YAML configuration from `filename` and validate.""" if filename is None: filename = cls.DEFAULT_FILENAME with open(filename, "rb") as stream: return cls.parse(stream) @classmethod def _get_backup_name(cls, message, filename=None): if filename is None: filename = cls.DEFAULT_FILENAME return "%s.%s.bak" % (filename, message) @classmethod def create_backup(cls, message, filename=None): """Create a backup of the YAML configuration. The given 'message' will be used in the name of the backup file. """ backup_name = cls._get_backup_name(message, filename) if filename is None: filename = cls.DEFAULT_FILENAME copyfile(filename, backup_name) @classmethod def save(cls, config, filename=None): """Save a YAML configuration to `filename`, or to the default file.""" if filename is None: filename = cls.DEFAULT_FILENAME dump = yaml.safe_dump(config) atomic_write(dump, filename) _cache = {} _cache_lock = RLock() @classmethod def load_from_cache(cls, filename=None): """Load or return a previously loaded configuration. Keeps an internal cache of config files. If the requested config file is not in cache, it is loaded and inserted into the cache first. Each call returns a distinct (deep) copy of the requested config from the cache, so the caller can modify its own copy without affecting what other call sites see. This is thread-safe, so is okay to use from Django, for example. """ if filename is None: filename = cls.DEFAULT_FILENAME filename = os.path.abspath(filename) with cls._cache_lock: if filename not in cls._cache: with open(filename, "rb") as stream: cls._cache[filename] = cls.parse(stream) return deepcopy(cls._cache[filename]) @classmethod def flush_cache(cls, filename=None): """Evict a config file, or any cached config files, from cache.""" with cls._cache_lock: if filename is None: cls._cache.clear() else: if filename in cls._cache: del cls._cache[filename] @classmethod def field(target, *steps): """Obtain a field by following `steps`.""" for step in steps: target = target.fields[step] return target @classmethod def get_defaults(cls): """Return the default configuration.""" return cls.to_python({}) class ConfigMeta(DeclarativeMeta): """Metaclass for the root configuration schema.""" envvar = None # Set this in subtypes. default = None # Set this in subtypes. def _get_default_filename(cls): # Avoid circular imports. from provisioningserver.utils import locate_config # Get the configuration filename from the environment. Failing that, # look for the configuration in its default locations. return environ.get(cls.envvar, locate_config(cls.default)) def _set_default_filename(cls, filename): # Set the configuration filename in the environment. environ[cls.envvar] = filename def _delete_default_filename(cls): # Remove any setting of the configuration filename from the # environment. environ.pop(cls.envvar, None) DEFAULT_FILENAME = property( _get_default_filename, _set_default_filename, _delete_default_filename, doc=( "The default config file to load. Refers to " "`cls.envvar` in the environment.")) class BootSources(ConfigBase, ForEach): """Configuration for boot sources.""" class __metaclass__(ConfigMeta): envvar = "MAAS_BOOT_SOURCES_SETTINGS" default = "sources.yaml" validators = [BootSource] ############################################################################### # New configuration API follows. ############################################################################### # Permit reads by members of the same group. default_file_mode = 0o640 def touch(path, mode=default_file_mode): """Ensure that `path` exists.""" os.close(os.open(path, os.O_CREAT | os.O_APPEND, mode)) class ConfigurationImmutable(Exception): """The configuration is read-only; it cannot be mutated.""" class ConfigurationDatabase: """Store configuration in an sqlite3 database.""" def __init__(self, database, mutable=False): self.database = database self.mutable = mutable with self.cursor() as cursor: cursor.execute( "CREATE TABLE IF NOT EXISTS configuration " "(id INTEGER PRIMARY KEY," " name TEXT NOT NULL UNIQUE," " data BLOB)") def cursor(self): return closing(self.database.cursor()) def __iter__(self): with self.cursor() as cursor: results = cursor.execute( "SELECT name FROM configuration").fetchall() return (name for (name,) in results) def __getitem__(self, name): with self.cursor() as cursor: data = cursor.execute( "SELECT data FROM configuration" " WHERE name = ?", (name,)).fetchone() if data is None: raise KeyError(name) else: return json.loads(data[0]) def __setitem__(self, name, data): if self.mutable: with self.cursor() as cursor: cursor.execute( "INSERT OR REPLACE INTO configuration (name, data) " "VALUES (?, ?)", (name, json.dumps(data))) else: raise ConfigurationImmutable( "%s: Cannot set `%s'." % (self, name)) def __delitem__(self, name): if self.mutable: with self.cursor() as cursor: cursor.execute( "DELETE FROM configuration" " WHERE name = ?", (name,)) else: raise ConfigurationImmutable( "%s: Cannot set `%s'." % (self, name)) def __unicode__(self): with self.cursor() as cursor: # https://www.sqlite.org/pragma.html#pragma_database_list databases = "; ".join( "%s=%s" % (name, ":memory:" if path == "" else path) for (_, name, path) in cursor.execute("PRAGMA database_list")) return "%s(%s)" % (self.__class__.__name__, databases) @classmethod @contextmanager def open(cls, dbpath): """Open a configuration database. **Note** that this returns a context manager which will open the database READ-ONLY. """ # Ensure `dbpath` exists... touch(dbpath) # before opening it with sqlite. database = sqlite3.connect(dbpath) try: yield cls(database, mutable=False) except: raise else: database.rollback() finally: database.close() @classmethod @contextmanager def open_for_update(cls, dbpath): """Open a configuration database. **Note** that this returns a context manager which will close the database on exit, COMMITTING changes if the exit is clean. """ # Ensure `dbpath` exists... touch(dbpath) # before opening it with sqlite. database = sqlite3.connect(dbpath) try: yield cls(database, mutable=True) except: raise else: database.commit() finally: database.close() class ConfigurationFile: """Store configuration as YAML in a file. You should almost always prefer the `ConfigurationDatabase` variant above this. It provides things like transactions with optimistic write locking, synchronisation between processes, and all the goodies that come with a mature and battle-tested piece of kit such as SQLite3. This, by comparison, will clobber changes made in another thread or process without warning. We could add support for locking, even optimistic locking, but, you know, that's already been done: `ConfigurationDatabase` preceded this. Just use that. Really. Unless, you know, you've absolutely got to use this. """ def __init__(self, path, mutable=False): super(ConfigurationFile, self).__init__() self.config = {} self.dirty = False self.path = path self.mutable = mutable def __iter__(self): return iter(self.config) def __getitem__(self, name): return self.config[name] def __setitem__(self, name, data): if self.mutable: self.config[name] = data self.dirty = True else: raise ConfigurationImmutable( "%s: Cannot set `%s'." % (self, name)) def __delitem__(self, name): if self.mutable: if name in self.config: del self.config[name] self.dirty = True else: raise ConfigurationImmutable( "%s: Cannot set `%s'." % (self, name)) def load(self): """Load the configuration.""" with open(self.path, "rb") as fd: config = yaml.safe_load(fd) if config is None: self.config.clear() self.dirty = False elif isinstance(config, dict): self.config = config self.dirty = False else: raise ValueError( "Configuration in %s is not a mapping: %r" % (self.path, config)) def save(self): """Save the configuration.""" try: stat = os.stat(self.path) except OSError: mode = default_file_mode else: mode = stat.st_mode # Write, retaining the file's mode. atomic_write( yaml.safe_dump(self.config, default_flow_style=False), self.path, mode=mode) self.dirty = False def __unicode__(self): return "%s(%r)" % (self.__class__.__name__, self.path) @classmethod @contextmanager def open(cls, path): """Open a configuration file read-only. This avoids all the locking that happens in `open_for_update`. However, it will create the configuration file if it does not yet exist. **Note** that this returns a context manager which will DISCARD changes to the configuration on exit. """ # Ensure `path` exists... touch(path) # before loading it in. configfile = cls(path, mutable=False) configfile.load() yield configfile @classmethod @contextmanager def open_for_update(cls, path): """Open a configuration file. Locks are taken so that there can only be *one* reader or writer for a configuration file at a time. Where configuration files can be read by multiple concurrent processes it follows that each process should hold the file open for the shortest time possible. **Note** that this returns a context manager which will SAVE changes to the configuration on a clean exit. """ time_opened = None try: # Only one reader or writer at a time. with RunLock(path).wait(timeout=5.0): time_opened = time() # Ensure `path` exists... touch(path) # before loading it in. configfile = cls(path, mutable=True) configfile.load() try: yield configfile except: raise else: if configfile.dirty: configfile.save() finally: if time_opened is not None: time_open = time() - time_opened if time_open >= 2.5: mini_stack = ", from ".join( "%s:%d" % (fn, lineno) for fn, lineno, _, _ in islice(reversed(traceback.extract_stack()), 2, 5)) logger.warn( "Configuration file %s locked for %.1f seconds; this " "may starve other processes. Called from %s.", path, time_open, mini_stack) class ConfigurationMeta(type): """Metaclass for configuration objects. :cvar envvar: The name of the environment variable which will be used to store the filename of the configuration file. This can be passed in from the caller's environment. Setting `DEFAULT_FILENAME` updates this environment variable so that it's available to sub-processes. :cvar default: If the environment variable named by `envvar` is not set, this is used as the filename. :cvar backend: The class used to load the configuration. This must provide an ``open(filename)`` method that returns a context manager. This context manager must provide an object with a dict-like interface. """ envvar = None # Set this in subtypes. default = None # Set this in subtypes. backend = None # Set this in subtypes. def _get_default_filename(cls): # Get the configuration filename from the environment. Failing that, # look for the configuration in its default locations. filename = environ.get(cls.envvar) if filename is None or len(filename) == 0: return get_tentative_path(cls.default) else: return filename def _set_default_filename(cls, filename): # Set the configuration filename in the environment. environ[cls.envvar] = filename def _delete_default_filename(cls): # Remove any setting of the configuration filename from the # environment. environ.pop(cls.envvar, None) DEFAULT_FILENAME = property( _get_default_filename, _set_default_filename, _delete_default_filename, doc=( "The default configuration file to load. Refers to " "`cls.envvar` in the environment.")) class Configuration: """An object that holds configuration options. Configuration options should be defined by creating properties using `ConfigurationOption`. For example:: class ApplicationConfiguration(Configuration): application_name = ConfigurationOption( "application_name", "The name for this app, used in the UI.", validator=UnicodeString()) This can then be used like so:: config = ApplicationConfiguration(database) # database is dict-like. config.application_name = "Metal On A Plate" print(config.application_name) """ # Define this class variable in sub-classes. Using `ConfigurationMeta` as # a metaclass is a good way to achieve this. DEFAULT_FILENAME = None def __init__(self, store): """Initialise a new `Configuration` object. :param store: A dict-like object. """ super(Configuration, self).__init__() # Use the super-class's __setattr__() because it's redefined later on # to prevent accidentally setting attributes that are not options. super(Configuration, self).__setattr__("store", store) def __setattr__(self, name, value): """Prevent setting unrecognised options. Only options that have been declared on the class, using the `ConfigurationOption` descriptor for example, can be set. This is as much about preventing typos as anything else. """ if hasattr(self.__class__, name): super(Configuration, self).__setattr__(name, value) else: raise AttributeError( "%r object has no attribute %r" % ( self.__class__.__name__, name)) @classmethod @contextmanager def open(cls, filepath=None): if filepath is None: filepath = cls.DEFAULT_FILENAME ensure_dir(os.path.dirname(filepath)) with cls.backend.open(filepath) as store: yield cls(store) @classmethod @contextmanager def open_for_update(cls, filepath=None): if filepath is None: filepath = cls.DEFAULT_FILENAME ensure_dir(os.path.dirname(filepath)) with cls.backend.open_for_update(filepath) as store: yield cls(store) class ConfigurationOption: """Define a configuration option. This is for use with `Configuration` and its subclasses. """ def __init__(self, name, doc, validator): """Initialise a new `ConfigurationOption`. :param name: The name for this option. This is the name as which this option will be stored in the underlying `Configuration` object. :param doc: A description of the option. This is mandatory. :param validator: A `formencode.validators.Validator`. """ super(ConfigurationOption, self).__init__() assert isinstance(name, unicode) assert isinstance(doc, unicode) assert is_validator(validator) assert validator.if_missing is not NoDefault self.name = name self.__doc__ = doc self.validator = validator def __get__(self, obj, type=None): if obj is None: return self else: try: value = obj.store[self.name] except KeyError: return self.validator.if_missing else: return self.validator.from_python(value) def __set__(self, obj, value): obj.store[self.name] = self.validator.to_python(value) def __delete__(self, obj): del obj.store[self.name] class ClusterConfiguration(Configuration): """Local configuration for the MAAS cluster.""" class __metaclass__(ConfigurationMeta): envvar = "MAAS_CLUSTER_CONFIG" default = "/etc/maas/clusterd.conf" backend = ConfigurationFile maas_url = ConfigurationOption( "maas_url", "The HTTP URL for the MAAS region.", ExtendedURL( require_tld=False, if_missing="http://localhost:5240/MAAS")) # TFTP options. tftp_port = ConfigurationOption( "tftp_port", "The UDP port on which to listen for TFTP requests.", Number(min=0, max=(2 ** 16) - 1, if_missing=69)) tftp_root = ConfigurationOption( "tftp_root", "The root directory for TFTP resources.", Directory(if_missing=get_tentative_path( "/var/lib/maas/boot-resources/current"))) @property def tftp_generator_url(self): """The URL at which to obtain the TFTP options for a node.""" return "%s/api/1.0/pxeconfig/" % self.maas_url.rstrip("/") # GRUB options. @property def grub_root(self): "The root directory for GRUB resources." return os.path.join(self.tftp_root, "grub") # Cluster UUID Option cluster_uuid = ConfigurationOption( "cluster_uuid", "The UUID for this cluster controller", UUID(if_missing=UUID_NOT_SET)) def is_dev_environment(): """Is this the development environment, or production?""" try: from maastesting import root # noqa except: return False else: return True maas-1.9.5+bzr4599.orig/src/provisioningserver/dhcp/0000755000000000000000000000000013056115004020335 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/diskless.py0000644000000000000000000002052013056115004021611 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Generate diskless image for system to boot.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'create_diskless_disk', 'delete_diskless_disk', ] import os from textwrap import dedent from provisioningserver.config import ClusterConfiguration from provisioningserver.drivers.diskless import DisklessDriverRegistry from provisioningserver.drivers.osystem import ( BOOT_IMAGE_PURPOSE, OperatingSystemRegistry, ) from provisioningserver.logger import get_maas_logger from provisioningserver.utils.fs import ( atomic_symlink, atomic_write, ) from provisioningserver.utils.shell import call_and_check from twisted.python.filepath import FilePath maaslog = get_maas_logger("diskless") class DisklessError(Exception): """Error raised when issue occurs during a diskless task.""" def get_diskless_store(): """Return path to the diskless store. This is the location that all diskless links exist. It holds all of the currently in use disk for diskless booting. """ with ClusterConfiguration.open() as config: storage = FilePath(config.tftp_root).parent() return storage.child('diskless').child('store').path def compose_diskless_link_path(system_id): """Return path to the symbolic link for the given system_id. This is the link that will be written into the diskless store. It is used to reference what disks are currently being used for diskless booting. """ return os.path.join(get_diskless_store(), system_id) def create_diskless_link(system_id, storage_path): """Create symbolic link in the diskless store to the actual path of the backing store. Each diskless driver returns an absolute path to were the data can be accessed on the system. A symbolic link is made in the diskless store to reference this location, so it can be retrieved later by system_id. """ link_path = compose_diskless_link_path(system_id) if os.path.lexists(link_path): raise DisklessError( "Backend storage link already exists for: %s" % system_id) atomic_symlink(storage_path, link_path) def delete_diskless_link(system_id): """Delete symbolic link in the diskless store.""" link_path = compose_diskless_link_path(system_id) if os.path.lexists(link_path): os.unlink(link_path) def read_diskless_link(system_id): """Return actual path to the backing store, from the link in the diskless store.""" link_path = compose_diskless_link_path(system_id) if not os.path.lexists(link_path): return None return os.readlink(link_path) def get_diskless_target(system_id): """Get the iscsi target name for the node.""" prefix = 'iqn.2004-05.com.ubuntu:maas' return '%s:root-diskless-%s' % (prefix, system_id) def get_diskless_tgt_path(): """Return path to maas-diskless.tgt.""" with ClusterConfiguration.open() as config: storage = FilePath(config.tftp_root).parent() return storage.child('diskless').child('maas-diskless.tgt').path def tgt_entry(system_id, image): """Generate tgt target used for root disk Tgt target used by the node as its root disk. This function creates target description in a format used by tgt-admin. It uses system_id to generate target name and image as a path to image file which should be available. :param system_id: Node system_id :param image: Path to the image which should be shared via tgt/iscsi :return Tgt entry which can be written to tgt-admin configuration file """ target = get_diskless_target(system_id) entry = dedent("""\ readonly 0 backing-store "{image}" driver iscsi """).format(target=target, image=image) return entry def compose_diskless_tgt_config(): """Produce the contents of a diskless tgt conf file. :return: Contents for a `targets.conf` file. :rtype: bytes """ tgt_entries = [] for system_id in os.listdir(get_diskless_store()): image_path = compose_diskless_link_path(system_id) tgt_entries.append(tgt_entry(system_id, image_path)) return ''.join(tgt_entries).encode('utf-8') def reload_diskless_tgt(): """Reload the diskless tgt config.""" call_and_check([ 'sudo', '/usr/sbin/tgt-admin', '--conf', get_diskless_tgt_path(), '--update', 'ALL', ]) def update_diskless_tgt(): """Re-writes the "maas-diskless.tgt" to include all targets that have symlinks in the diskless store. Reloads the tgt config.""" tgt_path = get_diskless_tgt_path() tgt_config = compose_diskless_tgt_config() atomic_write(tgt_config, tgt_path, mode=0o644) reload_diskless_tgt() def get_diskless_driver(driver): """Return the diskless driver object. :raise DisklessError: if driver does not exist. """ driver_obj = DisklessDriverRegistry.get_item(driver) if driver_obj is None: raise DisklessError( "Cluster doesn't support diskless driver: %s" % driver) return driver_obj def compose_source_path(osystem_name, arch, subarch, release, label): """Return path to the source file for the diskless boot image. Each diskless driver will use this source to initialize the disk. """ osystem = OperatingSystemRegistry.get_item(osystem_name) if osystem is None: raise DisklessError( "OS doesn't exist in operating system registry: %s" % osystem_name) purposes = osystem.get_boot_image_purposes(arch, subarch, release, label) if BOOT_IMAGE_PURPOSE.DISKLESS not in purposes: raise DisklessError( "OS doesn't support diskless booting: %s" % osystem_name) root_path, _ = osystem.get_xinstall_parameters() with ClusterConfiguration.open() as config: return os.path.join( config.tftp_root, osystem_name, arch, subarch, release, label, root_path) def create_diskless_disk(driver, driver_options, system_id, osystem, arch, subarch, release, label): """Creates a disk using the `driver` for the `system_id`. This disk will be used for booting diskless.""" source_path = compose_source_path(osystem, arch, subarch, release, label) if not os.path.exists(source_path): raise DisklessError("Boot resources doesn't exist: %s" % source_path) link_path = compose_diskless_link_path(system_id) if os.path.lexists(link_path): raise DisklessError("Disk already exists for node: %s" % system_id) # Create the disk with the driver, and place the link in diskless source. maaslog.info( "Creating disk for node %s using driver: %s", system_id, driver) driver_obj = get_diskless_driver(driver) disk_path = driver_obj.create_disk( system_id, source_path, **driver_options) if disk_path is None or not os.path.exists(disk_path): raise DisklessError( "Driver failed to create disk for node: %s" % system_id) create_diskless_link(system_id, disk_path) # Re-write the tgt config, to include the new disk for the node. maaslog.info("Updating iSCSI targets.") update_diskless_tgt() def delete_diskless_disk(driver, driver_options, system_id): """Deletes the disk that was used by the node for diskless booting.""" link_path = compose_diskless_link_path(system_id) if not os.path.lexists(link_path): maaslog.warn("Disk already deleted for node: %s", system_id) return maaslog.info( "Destroying disk for node %s using driver: %s", system_id, driver) disk_path = read_diskless_link(system_id) if disk_path is None: raise DisklessError( "Failed to read diskless link for node: %s" % system_id) if os.path.exists(disk_path): driver_obj = get_diskless_driver(driver) driver_obj.delete_disk(system_id, disk_path, **driver_options) else: maaslog.warn(( "Assuming disk has already been removed " "for node %s by the driver: %s"), system_id, driver) delete_diskless_link(system_id) # Re-write the tgt config, to include only the remaining disks. maaslog.info("Updating iSCSI targets.") update_diskless_tgt() maas-1.9.5+bzr4599.orig/src/provisioningserver/dns/0000755000000000000000000000000013056115004020203 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/0000755000000000000000000000000013056115004021075 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/events.py0000644000000000000000000003070213056115004021277 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Event catalog.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'EVENT_DETAILS', 'EVENT_TYPES', 'send_event_node', 'send_event_node_mac_address', ] from collections import namedtuple from logging import ( DEBUG, ERROR, INFO, WARN, ) from provisioningserver.logger.log import get_maas_logger from provisioningserver.rpc import getRegionClient from provisioningserver.rpc.exceptions import ( NoSuchEventType, NoSuchNode, ) from provisioningserver.rpc.region import ( RegisterEventType, SendEvent, SendEventMACAddress, ) from provisioningserver.utils.twisted import ( asynchronous, callOut, DeferredValue, ) from twisted.internet.defer import ( maybeDeferred, succeed, ) from twisted.python.failure import Failure maaslog = get_maas_logger("events") class EVENT_TYPES: # Power-related events. NODE_POWER_ON_STARTING = 'NODE_POWER_ON_STARTING' NODE_POWER_OFF_STARTING = 'NODE_POWER_OFF_STARTING' NODE_POWERED_ON = 'NODE_POWERED_ON' NODE_POWERED_OFF = 'NODE_POWERED_OFF' NODE_POWER_ON_FAILED = 'NODE_POWER_ON_FAILED' NODE_POWER_OFF_FAILED = 'NODE_POWER_OFF_FAILED' NODE_POWER_QUERY_FAILED = 'NODE_POWER_QUERY_FAILED' # PXE request event. NODE_PXE_REQUEST = 'NODE_PXE_REQUEST' # TFTP request event. NODE_TFTP_REQUEST = 'NODE_TFTP_REQUEST' # Other installation-related event types. NODE_INSTALLATION_FINISHED = "NODE_INSTALLATION_FINISHED" # Node status transition event. NODE_CHANGED_STATUS = "NODE_CHANGED_STATUS" # Node status events NODE_STATUS_EVENT = "NODE_STATUS_EVENT" NODE_COMMISSIONING_EVENT = "NODE_COMMISSIONING_EVENT" NODE_INSTALL_EVENT = "NODE_INSTALL_EVENT" # Node user request events REQUEST_NODE_START_COMMISSIONING = "REQUEST_NODE_START_COMMISSIONING" REQUEST_NODE_ABORT_COMMISSIONING = "REQUEST_NODE_ABORT_COMMISSIONING" REQUEST_NODE_ABORT_DEPLOYMENT = "REQUEST_NODE_ABORT_DEPLOYMENT" REQUEST_NODE_ACQUIRE = "REQUEST_NODE_ACQUIRE" REQUEST_NODE_ERASE_DISK = "REQUEST_NODE_ERASE_DISK" REQUEST_NODE_ABORT_ERASE_DISK = "REQUEST_NODE_ABORT_ERASE_DISK" REQUEST_NODE_RELEASE = "REQUEST_NODE_RELEASE" REQUEST_NODE_MARK_FAILED = "REQUEST_NODE_MARK_FAILED" REQUEST_NODE_MARK_BROKEN = "REQUEST_NODE_MARK_BROKEN" REQUEST_NODE_MARK_FIXED = "REQUEST_NODE_MARK_FIXED" REQUEST_NODE_START_DEPLOYMENT = "REQUEST_NODE_START_DEPLOYMENT" REQUEST_NODE_START = "REQUEST_NODE_START" REQUEST_NODE_STOP = "REQUEST_NODE_STOP" EventDetail = namedtuple("EventDetail", ("description", "level")) EVENT_DETAILS = { # Event type -> EventDetail mapping. EVENT_TYPES.NODE_POWER_ON_STARTING: EventDetail( description="Powering node on", level=INFO, ), EVENT_TYPES.NODE_POWER_OFF_STARTING: EventDetail( description="Powering node off", level=INFO, ), EVENT_TYPES.NODE_POWERED_ON: EventDetail( description="Node powered on", level=INFO, ), EVENT_TYPES.NODE_POWERED_OFF: EventDetail( description="Node powered off", level=INFO, ), EVENT_TYPES.NODE_POWER_ON_FAILED: EventDetail( description="Failed to power on node", level=ERROR, ), EVENT_TYPES.NODE_POWER_OFF_FAILED: EventDetail( description="Failed to power off node", level=ERROR, ), EVENT_TYPES.NODE_POWER_QUERY_FAILED: EventDetail( description="Failed to query node's BMC", level=WARN, ), EVENT_TYPES.NODE_TFTP_REQUEST: EventDetail( description="TFTP Request", level=DEBUG, ), EVENT_TYPES.NODE_PXE_REQUEST: EventDetail( description="PXE Request", level=INFO, ), EVENT_TYPES.NODE_INSTALLATION_FINISHED: EventDetail( description="Installation complete", level=INFO, ), EVENT_TYPES.NODE_CHANGED_STATUS: EventDetail( description="Node changed status", level=INFO, ), EVENT_TYPES.NODE_STATUS_EVENT: EventDetail( description="Node status event", level=DEBUG, ), EVENT_TYPES.NODE_COMMISSIONING_EVENT: EventDetail( description="Node commissioning", level=DEBUG, ), EVENT_TYPES.NODE_INSTALL_EVENT: EventDetail( description="Node installation", level=DEBUG, ), EVENT_TYPES.REQUEST_NODE_START_COMMISSIONING: EventDetail( description="User starting node commissioning", level=INFO, ), EVENT_TYPES.REQUEST_NODE_ABORT_COMMISSIONING: EventDetail( description="User aborting node commissioning", level=INFO, ), EVENT_TYPES.REQUEST_NODE_ABORT_DEPLOYMENT: EventDetail( description="User aborting deployment", level=INFO, ), EVENT_TYPES.REQUEST_NODE_ACQUIRE: EventDetail( description="User acquiring node", level=INFO, ), EVENT_TYPES.REQUEST_NODE_ERASE_DISK: EventDetail( description="User erasing disk", level=INFO, ), EVENT_TYPES.REQUEST_NODE_ABORT_ERASE_DISK: EventDetail( description="User aborting disk erase", level=INFO, ), EVENT_TYPES.REQUEST_NODE_RELEASE: EventDetail( description="User releasing node", level=INFO, ), EVENT_TYPES.REQUEST_NODE_MARK_FAILED: EventDetail( description="User marking node failed", level=INFO, ), EVENT_TYPES.REQUEST_NODE_MARK_BROKEN: EventDetail( description="User marking node broken", level=INFO, ), EVENT_TYPES.REQUEST_NODE_MARK_FIXED: EventDetail( description="User marking node fixed", level=INFO, ), EVENT_TYPES.REQUEST_NODE_START_DEPLOYMENT: EventDetail( description="User starting deployment", level=INFO, ), EVENT_TYPES.REQUEST_NODE_START: EventDetail( description="User powering up node", level=INFO, ), EVENT_TYPES.REQUEST_NODE_STOP: EventDetail( description="User powering down node", level=INFO, ), } class NodeEventHub: """Singleton for sending node events to the region. This automatically ensures that the event type is registered before sending logs to the region. """ def __init__(self): super(NodeEventHub, self).__init__() self._types_registering = dict() self._types_registered = set() @asynchronous def registerEventType(self, event_type): """Ensure that `event_type` is known to the region. This populates the cache used by `ensureEventTypeRegistered` but does not consult it; it always attempts to contact the region. :return: :class:`Deferred` """ details = EVENT_DETAILS[event_type] def register(client): return client( RegisterEventType, name=event_type, level=details.level, description=details.description) d = maybeDeferred(getRegionClient).addCallback(register) # Whatever happens, we are now done registering. d.addBoth(callOut, self._types_registering.pop, event_type) # On success, record that the event type has been registered. On # failure, ensure that the set of registered event types does NOT # contain the event type. d.addCallbacks( callback=callOut, callbackArgs=( self._types_registered.add, event_type), errback=callOut, errbackArgs=( self._types_registered.discard, event_type)) # Capture the result into a DeferredValue. result = DeferredValue() result.capture(d) # Keep track of it so that concurrent requests don't duplicate work. self._types_registering[event_type] = result return result.get() @asynchronous def ensureEventTypeRegistered(self, event_type): """Ensure that `event_type` is known to the region. This method keeps track of event types that it has already registered, and so can return in the affirmative without needing to contact the region. :return: :class:`Deferred` """ if event_type in self._types_registered: return succeed(None) elif event_type in self._types_registering: return self._types_registering[event_type].get() else: return self.registerEventType(event_type) def _checkEventTypeRegistered(self, failure, event_type): """Check if the event type is NOT registered after all. Maybe someone has monkeyed about with the region database and removed the event type? In any case, if we see `NoSuchEventType` coming back from a `SendEvent` or `SendEventMACAddress` call we discard the event type from the set of registered types. Subsequent logging calls will cause this class to attempt to register the event type again. As of MAAS 1.9 the region will no longer signal `NoSuchEventType` or `NoSuchNode` errors because database activity is not performed before returning. This method thus exists for compatibility with pre-1.9 region controllers only. All failures, including `NoSuchEventType`, are passed through. """ if failure.check(NoSuchEventType): self._types_registered.discard(event_type) return failure @asynchronous def logByID(self, event_type, system_id, description=""): """Send the given node event to the region. The node is specified by its ID. :param event_type: The type of the event. :type event_type: unicode :param system_id: The system ID of the node. :type system_id: unicode :param description: An optional description of the event. :type description: unicode """ def send(_): client = getRegionClient() return client( SendEvent, system_id=system_id, type_name=event_type, description=description) d = self.ensureEventTypeRegistered(event_type).addCallback(send) d.addErrback(self._checkEventTypeRegistered, event_type) return d @asynchronous def logByMAC(self, event_type, mac_address, description=""): """Send the given node event to the region. The node is specified by its MAC address. :param event_type: The type of the event. :type event_type: unicode :param mac_address: The MAC address of the node. :type mac_address: unicode :param description: An optional description of the event. :type description: unicode """ def send(_): client = getRegionClient() return client( SendEventMACAddress, mac_address=mac_address, type_name=event_type, description=description) d = self.ensureEventTypeRegistered(event_type).addCallback(send) d.addErrback(self._checkEventTypeRegistered, event_type) # Suppress NoSuchNode. This happens during enlistment because the # region does not yet know of the node; it's quite normal. Logging # tracebacks telling us about it is not useful. Perhaps the region # should store these logs anyway. Then, if and when the node is # enlisted, logs prior to enlistment can be seen. d.addErrback(Failure.trap, NoSuchNode) return d # Singleton. nodeEventHub = NodeEventHub() @asynchronous def send_event_node(event_type, system_id, hostname, description=''): """Send the given node event to the region. :param event_type: The type of the event. :type event_type: unicode :param system_id: The system ID of the node of the event. :type system_id: unicode :param hostname: Ignored! :param description: An optional description of the event. :type description: unicode """ return nodeEventHub.logByID(event_type, system_id, description) @asynchronous def send_event_node_mac_address(event_type, mac_address, description=''): """Send the given node event to the region for the given mac address. :param event_type: The type of the event. :type event_type: unicode :param mac_address: The MAC Address of the node of the event. :type mac_address: unicode :param description: An optional description of the event. :type description: unicode """ return nodeEventHub.logByMAC(event_type, mac_address, description) maas-1.9.5+bzr4599.orig/src/provisioningserver/import_images/0000755000000000000000000000000013056115004022256 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/kernel_opts.py0000644000000000000000000001550013056115004022317 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Generate kernel command-line options for inclusion in PXE configs.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'compose_kernel_command_line', 'KernelParameters', 'prefix_target_name', ] from collections import namedtuple import os import curtin from provisioningserver.drivers import ArchitectureRegistry from provisioningserver.logger import get_maas_logger maaslog = get_maas_logger("kernel_opts") class EphemeralImagesDirectoryNotFound(Exception): """The ephemeral images directory cannot be found.""" KernelParametersBase = namedtuple( "KernelParametersBase", ( "osystem", # Operating system, e.g. "ubuntu" "arch", # Machine architecture, e.g. "i386" "subarch", # Machine subarchitecture, e.g. "generic" "release", # OS release, e.g. "precise" "label", # Image label, e.g. "release" "purpose", # Boot purpose, e.g. "commissioning" "hostname", # Machine hostname, e.g. "coleman" "domain", # Machine domain name, e.g. "example.com" "preseed_url", # URL from which a preseed can be obtained. "log_host", # Host/IP to which syslog can be streamed. "fs_host", # Host/IP on which ephemeral filesystems are hosted. "extra_opts", # String of extra options to supply, will be appended # verbatim to the kernel command line )) class KernelParameters(KernelParametersBase): # foo._replace() is just ugly, so alias it to __call__. __call__ = KernelParametersBase._replace def compose_preseed_opt(preseed_url): """Compose a kernel option for preseed URL. :param preseed_url: The URL from which a preseed can be fetched. """ # See https://help.ubuntu.com/12.04/installation-guide # /i386/preseed-using.html#preseed-auto return "auto url=%s" % preseed_url def compose_locale_opt(): locale = 'en_US' return "locale=%s" % locale def compose_logging_opts(log_host): return [ 'log_host=%s' % log_host, 'log_port=%d' % 514, ] def get_last_directory(root): """Return the last directory from the directories in the given root. This is used to get the most recent ephemeral import directory. The ephemeral directories are named after the release date: 20120424, 20120424, 20120301, etc. so fetching the last one (sorting by name) returns the most recent. """ dirs = [ os.path.join(root, directory) for directory in os.listdir(root)] dirs = filter(os.path.isdir, dirs) return sorted(dirs)[-1] ISCSI_TARGET_NAME_PREFIX = "iqn.2004-05.com.ubuntu:maas" def get_ephemeral_name(osystem, arch, subarch, release, label): """Return the name of the most recent ephemeral image.""" return "ephemeral-%s-%s-%s-%s-%s" % ( osystem, arch, subarch, release, label ) def compose_hostname_opts(params): """Return list of hostname/domain options based on `params`. The domain is omitted if `params` does not include it. """ options = [ 'hostname=%s' % params.hostname, ] if params.domain is not None: options.append('domain=%s' % params.domain) return options def prefix_target_name(name): """Prefix an ISCSI target name with the standard target-name prefix.""" return "%s:%s" % (ISCSI_TARGET_NAME_PREFIX, name) def compose_purpose_opts(params): """Return the list of the purpose-specific kernel options.""" if params.purpose in ["commissioning", "xinstall", "enlist"]: # These are kernel parameters read by the ephemeral environment. tname = prefix_target_name( get_ephemeral_name( params.osystem, params.arch, params.subarch, params.release, params.label)) kernel_params = [ # Read by the open-iscsi initramfs code. "iscsi_target_name=%s" % tname, "iscsi_target_ip=%s" % params.fs_host, "iscsi_target_port=3260", "iscsi_initiator=%s" % params.hostname, # Read by cloud-initramfs-dyn-netconf and klibc's ipconfig # in the initramfs. "ip=::::%s:BOOTIF" % params.hostname, # kernel / udev name iscsi devices with this path "ro root=/dev/disk/by-path/ip-%s:%s-iscsi-%s-lun-1" % ( params.fs_host, "3260", tname), # Read by overlayroot package. "overlayroot=tmpfs", # Read by cloud-init. "cloud-config-url=%s" % params.preseed_url, ] return kernel_params else: # These are options used by the Debian Installer. return [ "netcfg/choose_interface=auto", # Use the text installer, display only critical messages. "text priority=critical", compose_preseed_opt(params.preseed_url), compose_locale_opt(), ] + compose_hostname_opts(params) def compose_arch_opts(params): """Return any architecture-specific options required""" arch_subarch = '%s/%s' % (params.arch, params.subarch) resource = ArchitectureRegistry.get_item(arch_subarch) if resource is not None and resource.kernel_options is not None: return resource.kernel_options else: return [] CURTIN_KERNEL_CMDLINE_NAME = 'KERNEL_CMDLINE_COPY_TO_INSTALL_SEP' def get_curtin_kernel_cmdline_sep(): """Return the separator for passing extra parameters to the kernel.""" return getattr( curtin, CURTIN_KERNEL_CMDLINE_NAME, '--') def compose_kernel_command_line(params): """Generate a line of kernel options for booting `node`. :type params: `KernelParameters`. """ options = [] # nomodeset prevents video mode switching. options += ["nomodeset"] options += compose_purpose_opts(params) # Note: logging opts are not respected by ephemeral images, so # these are actually "purpose_opts" but were left generic # as it would be nice to have. options += compose_logging_opts(params.log_host) options += compose_arch_opts(params) cmdline_sep = get_curtin_kernel_cmdline_sep() if params.extra_opts: # Using --- before extra opts makes both d-i and Curtin install # them into the grub config when installing an OS, thus causing # the options to "stick" when local booting later. # see LP: #1402042 for info on '---' versus '--' options.append(cmdline_sep) options.append(params.extra_opts) kernel_opts = ' '.join(options) maaslog.debug( '%s: kernel parameters %s "%s"' % (cmdline_sep, params.hostname, kernel_opts)) return kernel_opts maas-1.9.5+bzr4599.orig/src/provisioningserver/logger/0000755000000000000000000000000013056115004020676 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/monkey.py0000644000000000000000000000274113056115004021277 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Monkey patch for the MAAS provisioning server""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "add_term_error_code_to_tftp", "force_simplestreams_to_use_urllib2", ] import sys def force_simplestreams_to_use_urllib2(): """Monkey-patch `simplestreams` to use `urllib2`. This prevents the use of `requests` which /may/ be helping simplestreams to lose file-descriptors. """ import simplestreams.contentsource if sys.version_info > (3, 0): import urllib.request as urllib_request import urllib.error as urllib_error else: import urllib2 as urllib_request urllib_error = urllib_request vars(simplestreams.contentsource).update( URL_READER=simplestreams.contentsource.Urllib2UrlReader, URL_READER_CLASSNAME="Urllib2UrlReader", urllib_error=urllib_error, urllib_request=urllib_request) def add_term_error_code_to_tftp(): """Add error code 8 to TFT server as introduced by RFC 2347. Manually apply the fix to python-tx-tftp landed in https://github.com/shylent/python-tx-tftp/pull/20 """ import tftp.datagram if tftp.datagram.errors.get(8) is None: tftp.datagram.errors[8] = ( "Terminate transfer due to option negotiation") maas-1.9.5+bzr4599.orig/src/provisioningserver/network.py0000644000000000000000000003020013056115004021455 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Discover networks attached to this cluster controller. A cluster controller uses this when registering itself with the region controller. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'discover_networks', ] from itertools import chain import json from operator import attrgetter from netaddr import ( IPAddress, IPNetwork, ) from netifaces import ( AF_INET, AF_INET6, ifaddresses, interfaces, ) from provisioningserver.utils.ipaddr import ( annotate_with_driver_information, parse_ip_addr, ) from provisioningserver.utils.network import clean_up_netifaces_address from provisioningserver.utils.shell import call_and_check class AttachedNetwork: """A network as found attached to a network interface.""" def __init__(self, interface, ip=None, subnet_mask=None): self.interface = interface self.ip = ip self.subnet_mask = subnet_mask @classmethod def from_address(cls, interface_name, address): """Construct `AttachedNetwork` from address as found by `netifaces`.""" addr = address.get('addr') if addr is not None: addr = clean_up_netifaces_address(addr, interface_name) return cls(interface_name, ip=addr, subnet_mask=address.get('netmask')) def is_relevant(self): """Could this be a network that MAAS is interested in?""" if self.interface == 'lo': # Loopback device. Not useful for nodes. return False if self.ip is None: # Interface has no address. Not usable. return False ipaddress = IPAddress(self.ip) if ipaddress.version == 4 and not self.subnet_mask: # IPv4 network has no broadcast address configured. Not usable. return False if ipaddress.is_link_local(): # Link-local address. MAAS doesn't know how to manage these. return False # Met all these requirements? Then this is a relevant network. return True def as_dict(self): """Return information as a dictionary. The return value's format is suitable as an interface description for use with the `register` API call. """ return { 'interface': self.interface, 'ip': self.ip, 'subnet_mask': self.subnet_mask, } def get_ip_network(self): """Return `IPNetwork` for this network.""" return IPNetwork('%s/%s' % (self.ip, self.subnet_mask)).cidr def get_interface_info(interface): """Return a list of `AttachedNetwork` for the named `interface`.""" ipv4_addrs = ifaddresses(interface).get(AF_INET, []) ipv6_addrs = ifaddresses(interface).get(AF_INET6, []) return [ AttachedNetwork.from_address(interface, address) for address in ipv4_addrs + ipv6_addrs ] def filter_unique_networks(networks): """Return only distinct networks out of `networks`. If two entries are on the same network (even if the entries' IP addresses differ), only one of them will be returned. :param networks: Iterable of `AttachedNetwork` that pass the `is_relevant` test. :return: List of `AttachedNetwork`. """ known_ip_networks = set() unique_networks = [] for network in sorted(networks, key=attrgetter('ip')): ip_network = network.get_ip_network() if ip_network not in known_ip_networks: unique_networks.append(network) known_ip_networks.add(ip_network) return unique_networks def discover_networks(): """Find the networks attached to this system. :return: A list of dicts, each containing keys `interface`, `ip`, and `subnet_mask`. """ networks = chain.from_iterable( get_interface_info(interface) for interface in interfaces()) networks = [network for network in networks if network.is_relevant()] networks = filter_unique_networks(networks) return [network.as_dict() for network in networks] def get_ip_addr_json(): """Returns this system's local IP address information, in JSON format. :raises:ExternalProcessError: if IP address information could not be gathered. """ ip_addr_output = call_and_check(["/sbin/ip", "addr"]) ifaces = parse_ip_addr(ip_addr_output) ifaces = annotate_with_driver_information(ifaces) ip_addr_json = json.dumps(ifaces) return ip_addr_json def _filter_managed_networks_by_ifname(networks): """ Given the specified list of networks, filters the list of networks and returns any that may be physical interfaces (based on the interface name). :param networks: A list of network dictionaries. Must contain an 'interface' key containing the interface name. :return: The filtered list. """ return [ network for network in networks if 'interface' in network and (network['interface'].startswith('eth') or network['interface'].startswith('en') or network['interface'].startswith('em') or network['interface'].startswith('vlan') or network['interface'].startswith('bond')) ] def _annotate_network_with_interface_information(network, addr_info): """Adds a 'type' field to a specified dictionary which represents a network interface. """ iface = addr_info.get(network['interface'], None) if iface is not None and 'type' in iface: network['type'] = iface['type'] if 'vid' in iface: network['vid'] = iface['vid'] if 'bridged_interfaces' in iface: network['bridged_interfaces'] = ' '.join( iface['bridged_interfaces']) if 'bonded_interfaces' in iface: network['bonded_interfaces'] = ' '.join( iface['bonded_interfaces']) if 'parent' in iface: network['parent'] = iface['parent'] return network def _bridges_a_physical_interface(ifname, addr_info): """Returns True if the bridge interface with the specified name bridges at least one physical Ethernet interface. Otherwise, returns False. """ bridge_interface = addr_info.get(ifname) for interface_name in bridge_interface.get('bridged_interfaces', []): iface = addr_info.get(interface_name, {}) if iface.get('type') == 'ethernet.physical': return True return False def _belongs_to_a_vlan(ifname, addr_info): """Returns True if the interface with the specified name is needed because a VLAN interface depends on it. """ for interface_name in addr_info: iface = addr_info.get(interface_name, {}) if iface.get('type') == 'ethernet.vlan': if iface.get('parent') == ifname: return True return False def _network_name(network): """Returns interface name for the specified network. (removes a trailing alias, if present.) """ return network['interface'].split(':')[0] def _should_manage_network(network, addr_info): """Returns True if this network should be managed; otherwise returns False. """ ifname = _network_name(network) addrinfo = addr_info.get(ifname, {}) iftype = addrinfo.get('type', '') # In general, only physical Ethernet interfaces, VLANs, and bonds # are going to be managed. Since they are most likely irrelevant, (and # we don't want them to create superfluous subnets) filter out virtual # interfaces (whose specific type cannot be determined) and bridges. # However, reconsider bridges as "possibly managed" if they are # present in support of a physical Ethernet device, or a VLAN is # defined on top of the bridge. return ( addrinfo and (_belongs_to_a_vlan(ifname, addr_info) or iftype == 'ethernet.physical' or iftype == 'ethernet.vlan' or iftype == 'ethernet.bond' or (iftype == 'ethernet.bridge' and _bridges_a_physical_interface(ifname, addr_info)) ) ) def _filter_and_annotate_managed_networks(networks, ip_addr_json): """ Given the specified list of networks and corresponding JSON, filters the list of networks and returns any that are known to be physical interfaces. (based on driver information gathered from /sys) Also annotates the list of networks with each network's type. :param networks: A list of network dictionaries. Must contain an 'interface' key containing the interface name. :param ip_addr_json: A JSON string returned from `get_ip_addr_json()`. :return: The filtered list. """ addr_info = json.loads(ip_addr_json) assert isinstance(addr_info, dict) return [ _annotate_network_with_interface_information(network, addr_info) for network in networks if _should_manage_network(network, addr_info) ] def filter_and_annotate_networks(networks, ip_addr_json=None): """ Given the specified list of networks and optional corresponding JSON, filters the list of networks and returns any that may correspond to managed networks. (that is, any physical Ethernet interfaces, plus bonds and VLANs.) If no interfaces are found, fall back to using the interface name to filter the list in a reasonable manner. (this allows support for running on LXCs, where all interfaces may be virtual.) Also annotates the list of networks with their type, and other metadata such as VLAN VID, bonded/bridged interfaces, or parent. :param networks: A list of network dictionaries. Must contain an 'interface' key containing the interface name. :param ip_addr_json: A JSON string returned from `get_ip_addr_json()`. :return: The filtered list. """ assert networks is not None if ip_addr_json is None: return _filter_managed_networks_by_ifname(networks) else: physical_networks = _filter_and_annotate_managed_networks( networks, ip_addr_json) if len(physical_networks) > 0: return physical_networks else: # Since we couldn't find anything, fall back to using the heuristic # based on names. (we could be running inside a container with only # virtual interfaces, etc.) return _filter_managed_networks_by_ifname( networks) def _get_interface_type_priority(iface): """Returns a sort key based on interface types we prefer to process first when adding them to a NodeGroup. The most important thing is that we need to process VLANs last, since they require the underlying Fabric to be created first. """ iftype = iface.get('type') # Physical interfaces first, followed by bonds, followed by bridges. # VLAN interfaces last. # This will ensure that underlying Fabric objects can be created before # any VLANs that may belong to each Fabric. if iftype == "ethernet.physical": return 0 elif iftype == "ethernet.wireless": return 1 elif iftype == "ethernet": return 2 elif iftype == "ethernet.bond": return 3 elif iftype == "ethernet.bridge": return 4 elif iftype == "ethernet.vlan": return 5 else: # We don't really care what the sort order is; they should be filtered # out anyway. return -1 def _network_priority_sort_key(iface): """Returns a sort key used for processing interfaces before adding them to a NodeGroup. First sorts by interface type, then interface name, then address family. (Since MAAS usually manages IPv4 addresses, and we have a name disambiguation funciton that can produce somewhat unfriendly names, make sure the IPv4 interfaces get to go first.) """ return ( _get_interface_type_priority(iface), iface['interface'], IPAddress(iface['ip']).version ) def sort_networks_by_priority(networks): """Sorts the specified list of networks in the order in which we would prefer to add them to a NodeGroup.""" return sorted(networks, key=_network_priority_sort_key) maas-1.9.5+bzr4599.orig/src/provisioningserver/path.py0000644000000000000000000000465313056115004020735 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Compute paths relative to root.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'get_path', 'get_tentative_path', ] import os from provisioningserver.utils.fs import ensure_dir def get_root(): """Return ``MAAS_ROOT`` if set, else "/".""" root = os.getenv('MAAS_ROOT') if root is None: return "/" elif len(root) == 0: return "/" else: return root def get_tentative_path(*path_elements): """Return an absolute path based on the `MAAS_ROOT` environment variable. Use this to compute paths like ``/var/lib/maas/gnupg``, so that demo and development environments can redirect them to a playground location. For example: * If ``MAAS_ROOT`` is set to ``/tmp/maasroot``, then ``get_path()`` will return ``/tmp/maasroot`` and ``get_path('/var/lib/maas')`` will return ``/tmp/maasroot/var/lib/maas``. * If ``MAAS_ROOT`` is not set, you just get (a normalised version of) the location you passed in; just ``get_path()`` will always return the root directory. This call may have minor side effects: it reads environment variables and the current working directory. Side effects during imports are bad, so avoid using this in global variables. Instead of exporting a variable that holds your path, export a getter function that returns your path. Add caching if it becomes a performance problem. """ # Strip off a leading slash from the given path, if any. If it were left # in, it would override preceding path elements and MAAS_ROOT would be # ignored later on. The dot is there to make the call work even with zero # path elements. path = os.path.join('.', *path_elements).lstrip('/') path = os.path.join(get_root(), path) return os.path.abspath(path) def get_path(*path_elements): """Return an absolute path based on the `MAAS_ROOT` environment variable. This also ensures that the parent directory of the resultant path exists and is a directory. See `get_tentative_path` for details. """ path = get_tentative_path(*path_elements) # Make sure that the path to the file actually exists. ensure_dir(os.path.dirname(path)) return path maas-1.9.5+bzr4599.orig/src/provisioningserver/plugin.py0000644000000000000000000002032013056115004021264 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Twisted Application Plugin code for the MAAS provisioning server""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "ProvisioningServiceMaker", ] from errno import ENOPROTOOPT import socket from socket import error as socket_error from provisioningserver.config import ClusterConfiguration from provisioningserver.monkey import ( add_term_error_code_to_tftp, force_simplestreams_to_use_urllib2, ) from provisioningserver.utils.debug import ( register_sigusr2_thread_dump_handler, ) from twisted.application.internet import TCPServer from twisted.application.service import IServiceMaker from twisted.internet import reactor from twisted.plugin import IPlugin from twisted.python import usage from twisted.web.resource import Resource from twisted.web.server import Site from zope.interface import implementer def serverFromString(description): """Lazy import from `provisioningserver.utils.introspect`.""" from provisioningserver.utils import introspect return introspect.serverFromString(description) class Options(usage.Options): """Command line options for the provisioning server.""" optParameters = [ ["introspect", None, None, ("Allow introspection, allowing unhindered access to the internals " "of MAAS. This should probably only be used for debugging. Supply " "an argument in 'endpoint' form; the document 'Getting Connected " "with Endpoints' on the Twisted Wiki may help."), serverFromString], ] @implementer(IServiceMaker, IPlugin) class ProvisioningServiceMaker: """Create a service for the Twisted plugin.""" options = Options def __init__(self, name, description): self.tapname = name self.description = description def _makeSiteService(self, papi_xmlrpc, config): """Create the site service.""" site_root = Resource() site_root.putChild("api", papi_xmlrpc) site = Site(site_root) site_port = config["port"] site_interface = config["interface"] site_service = TCPServer(site_port, site, interface=site_interface) site_service.setName("site") return site_service def _makeImageService(self, resource_root): from provisioningserver.pserv_services.image import ( BootImageEndpointService) from twisted.internet.endpoints import AdoptedStreamServerEndpoint port = 5248 # config["port"] # Make a socket with SO_REUSEPORT set so that we can run multiple we # applications. This is easier to do from outside of Twisted as there's # not yet official support for setting socket options. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) try: s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) except socket_error as e: # Python's socket module was compiled using modern headers # thus defining SO_REUSEPORT would cause issues as it might # running in older kernel that does not support SO_REUSEPORT. # XXX andreserl 2015-04-08 bug=1441684: We need to add a warning # log message when we see this error, and a test for it. if e.errno != ENOPROTOOPT: raise e s.bind(('0.0.0.0', port)) # Use a backlog of 50, which seems to be fairly common. s.listen(50) # Adopt this socket into Twisted's reactor. site_endpoint = AdoptedStreamServerEndpoint( reactor, s.fileno(), s.family) site_endpoint.port = port # Make it easy to get the port number. site_endpoint.socket = s # Prevent garbage collection. image_service = BootImageEndpointService( resource_root=resource_root, endpoint=site_endpoint) image_service.setName("image_service") return image_service def _makeTFTPService( self, cluster_uuid, tftp_root, tftp_port, tftp_generator): """Create the dynamic TFTP service.""" from provisioningserver.pserv_services.tftp import TFTPService tftp_service = TFTPService( resource_root=tftp_root, port=tftp_port, generator=tftp_generator, uuid=cluster_uuid) tftp_service.setName("tftp") return tftp_service def _makeImageDownloadService(self, rpc_service, cluster_uuid, tftp_root): from provisioningserver.pserv_services.image_download_service \ import ImageDownloadService image_download_service = ImageDownloadService( rpc_service, cluster_uuid, tftp_root, reactor) image_download_service.setName("image_download") return image_download_service def _makeLeaseUploadService(self, rpc_service, cluster_uuid): from provisioningserver.pserv_services.lease_upload_service \ import LeaseUploadService lease_upload_service = LeaseUploadService( rpc_service, reactor, cluster_uuid) lease_upload_service.setName("lease_upload") return lease_upload_service def _makeNodePowerMonitorService(self, cluster_uuid): from provisioningserver.pserv_services.node_power_monitor_service \ import NodePowerMonitorService node_monitor = NodePowerMonitorService(cluster_uuid, reactor) node_monitor.setName("node_monitor") return node_monitor def _makeRPCService(self): from provisioningserver.rpc.clusterservice import ClusterClientService rpc_service = ClusterClientService(reactor) rpc_service.setName("rpc") return rpc_service def _makeDHCPProbeService(self, rpc_service, cluster_uuid): from provisioningserver.pserv_services.dhcp_probe_service \ import DHCPProbeService dhcp_probe_service = DHCPProbeService( rpc_service, reactor, cluster_uuid) dhcp_probe_service.setName("dhcp_probe") return dhcp_probe_service def _makeServiceMonitorService(self): from provisioningserver.pserv_services.service_monitor_service \ import ServiceMonitorService service_monitor = ServiceMonitorService(reactor) service_monitor.setName("service_monitor") return service_monitor def _makeIntrospectionService(self, endpoint): from provisioningserver.utils import introspect introspect_service = ( introspect.IntrospectionShellService( location="cluster", endpoint=endpoint, namespace={})) introspect_service.setName("introspect") return introspect_service def _makeServices(self, config): # Several services need to make use of the RPC service. rpc_service = self._makeRPCService() yield rpc_service # Other services that make up the MAAS Region Controller. yield self._makeDHCPProbeService(rpc_service, config.cluster_uuid) yield self._makeLeaseUploadService(rpc_service, config.cluster_uuid) yield self._makeNodePowerMonitorService(config.cluster_uuid) yield self._makeServiceMonitorService() yield self._makeImageDownloadService( rpc_service, config.cluster_uuid, config.tftp_root) # The following are network-accessible services. yield self._makeImageService(config.tftp_root) yield self._makeTFTPService( config.cluster_uuid, config.tftp_root, config.tftp_port, config.tftp_generator_url) def makeService(self, options): """Construct the MAAS Cluster service.""" register_sigusr2_thread_dump_handler() force_simplestreams_to_use_urllib2() add_term_error_code_to_tftp() from provisioningserver import services with ClusterConfiguration.open() as config: for service in self._makeServices(config): service.setServiceParent(services) if options["introspect"] is not None: introspect = self._makeIntrospectionService(options["introspect"]) introspect.setServiceParent(services) return services maas-1.9.5+bzr4599.orig/src/provisioningserver/power/0000755000000000000000000000000013056115004020553 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/pserv_services/0000755000000000000000000000000013056115004022461 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/0000755000000000000000000000000013056115004020203 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/security.py0000644000000000000000000001332413056115004021643 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Cluster security code.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "calculate_digest", "get_shared_secret_filesystem_path", "get_shared_secret_from_filesystem", ] from binascii import ( a2b_hex, b2a_hex, ) import errno from hashlib import sha256 from hmac import HMAC from os import fchmod from os.path import dirname from sys import ( stderr, stdin, ) from provisioningserver.path import get_path from provisioningserver.utils.fs import ( ensure_dir, FileLock, read_text_file, write_text_file, ) def to_hex(b): """Convert byte string to hex encoding.""" assert isinstance(b, bytes), "%r is not a byte string" % (b,) return b2a_hex(b).decode("ascii") def to_bin(u): """Convert ASCII-only unicode string to hex encoding.""" assert isinstance(u, unicode), "%r is not a unicode string" % (u,) # Strip ASCII whitespace from u before converting. return a2b_hex(u.encode("ascii").strip()) def get_shared_secret_filesystem_path(): """Return the path to shared-secret on the filesystem.""" return get_path("var", "lib", "maas", "secret") def get_shared_secret_from_filesystem(): """Load the secret from the filesystem. `get_shared_secret_filesystem_path` defines where the file will be written. If the directory does not already exist, this will attempt to create it, including all parent directories. :return: A byte string of arbitrary length. """ secret_path = get_shared_secret_filesystem_path() ensure_dir(dirname(secret_path)) with FileLock(secret_path).wait(10): # Load secret from the filesystem, if it exists. try: secret_hex = read_text_file(secret_path) except IOError as e: if e.errno == errno.ENOENT: return None else: raise else: return to_bin(secret_hex) def set_shared_secret_on_filesystem(secret): """Write the secret to the filesystem. `get_shared_secret_filesystem_path` defines where the file will be written. If the directory does not already exist, this will attempt to create it, including all parent directories. :type secret: A byte string of arbitrary length. """ secret_path = get_shared_secret_filesystem_path() ensure_dir(dirname(secret_path)) secret_hex = to_hex(secret) with FileLock(secret_path).wait(10): # Ensure that the file has sensible permissions. with open(secret_path, "ab") as secret_f: fchmod(secret_f.fileno(), 0o640) # Write secret to the filesystem. write_text_file(secret_path, secret_hex) def calculate_digest(secret, message, salt): """Calculate a SHA-256 HMAC digest for the given data.""" assert isinstance(secret, bytes), "%r is not a byte string." % (secret,) assert isinstance(message, bytes), "%r is not byte string." % (message,) assert isinstance(salt, bytes), "%r is not a byte string." % (salt,) hmacr = HMAC(secret, digestmod=sha256) hmacr.update(message) hmacr.update(salt) return hmacr.digest() class InstallSharedSecretScript: """Install a shared-secret onto a cluster. This class conforms to the contract that :py:func:`MainScript.register` requires. """ @staticmethod def add_arguments(parser): """Initialise options for storing a shared-secret. :param parser: An instance of :class:`ArgumentParser`. """ @staticmethod def run(args): """Install a shared-secret to this cluster. When invoked interactively, you'll be prompted to enter the secret. Otherwise the secret will be read from the first line of stdin. In both cases, the secret must be hex/base16 encoded. """ # Obtain the secret from the invoker. if stdin.isatty(): try: secret_hex = raw_input("Secret (hex/base16 encoded): ") except EOFError: print() # So that the shell prompt appears on the next line. raise SystemExit(1) except KeyboardInterrupt: print() # So that the shell prompt appears on the next line. raise else: secret_hex = stdin.readline() # Decode and install the secret. try: secret = secret_hex.strip().decode("hex") except TypeError as error: print("Secret could not be decoded:", unicode(error), file=stderr) raise SystemExit(1) else: set_shared_secret_on_filesystem(secret) shared_secret_path = get_shared_secret_filesystem_path() print("Secret installed to %s." % shared_secret_path) raise SystemExit(0) class CheckForSharedSecretScript: """Check for the presence of a shared-secret on a cluster. This class conforms to the contract that :py:func:`MainScript.register` requires. """ @staticmethod def add_arguments(parser): """Initialise options for checking the presence of a shared-secret. :param parser: An instance of :class:`ArgumentParser`. """ @staticmethod def run(args): """Check for the presence of a shared-secret on this cluster. Exits 0 (zero) if a shared-secret has been installed. """ if get_shared_secret_from_filesystem() is None: print("Shared-secret is NOT installed.") raise SystemExit(1) else: print("Shared-secret is installed.") raise SystemExit(0) maas-1.9.5+bzr4599.orig/src/provisioningserver/service_monitor.py0000644000000000000000000003337513056115004023213 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Services monitor ensures services are in their expected state.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "service_monitor", ] from collections import defaultdict import os from subprocess import ( PIPE, Popen, STDOUT, ) from threading import Lock from provisioningserver.drivers.service import ( SERVICE_STATE, ServiceRegistry, ) from provisioningserver.logger.log import get_maas_logger from provisioningserver.utils import get_init_system from provisioningserver.utils.twisted import ( asynchronous, synchronous, ) from twisted.internet.threads import deferToThread maaslog = get_maas_logger("service_monitor") class UnknownServiceError(Exception): """Raised when a check is called for a service the `ServiceMonitor` does not know about.""" class ServiceActionError(Exception): """Raised when a service has failed to perform an action successfully.""" class ServiceParsingError(Exception): """Raised when the `ServiceMonitor` is unable to parse the status of a service.""" class ServiceNotOnError(Exception): """Raised when a service is not expected to be on, but a restart is performed.""" class ServiceMonitor: """Monitors all services from the `ServiceRegistry` to make sure they remain in their expected state. Actions are performed on the services to keep the services in their desired state.""" # Used to convert the upstart state to the `SERVICE_STATE` enum. UPSTART_TO_STATE = { "start": SERVICE_STATE.ON, "stop": SERVICE_STATE.OFF, } # Used to log when the process state is not expected for the active state. UPSTART_PROCESS_STATE = { SERVICE_STATE.ON: "running", SERVICE_STATE.OFF: "waiting", } # Used to convert the systemd state to the `SERVICE_STATE` enum. SYSTEMD_TO_STATE = { "active": SERVICE_STATE.ON, "inactive": SERVICE_STATE.OFF, } # Used to log when the process state is not expected for the active state. SYSTEMD_PROCESS_STATE = { SERVICE_STATE.ON: "running", SERVICE_STATE.OFF: "dead", } def __init__(self, init_system=None): if init_system is None: init_system = get_init_system() self.init_system = init_system self.service_locks = defaultdict(Lock) # A shared lock for critical sections. self._lock = Lock() def _get_service_lock(self, service): """Return the lock for service.""" with self._lock: return self.service_locks[service] def _get_service_by_name(self, name): """Return service from its name in the `ServiceRegistry`.""" service = ServiceRegistry.get_item(name) if service is None: raise UnknownServiceError( "Service '%s' is not registered." % name) return service @synchronous def get_service_state(self, name): service = ServiceRegistry.get_item(name) if service is None: raise UnknownServiceError( "Service '%s' is not registered." % name) return self._get_service_status(service)[0] @synchronous def ensure_all_services(self): """Ensures that all services from the `ServiceRegistry` are in their desired state.""" for name, service in sorted(ServiceRegistry): try: self.ensure_service(name) except ServiceActionError: # ensure_service method already logs this error. Just catch # it here and ignore it. pass except Exception as e: maaslog.error( "While monitoring service '%s' an error was " "encountered: %s", service.service_name, e) @synchronous def ensure_service(self, name): """Ensures that a service is in its desired state.""" service = self._get_service_by_name(name) with self._get_service_lock(name): self._ensure_service(service) @asynchronous def async_ensure_service(self, name): """Asynchronously ensures that a service is in its desired state.""" return deferToThread(self.ensure_service, name) @synchronous def restart_service(self, name): """Restart service. Service will only be restarted if its expected state is ON. `ServiceNotOnError` will be raised if restart is called and the services expected state is not ON. """ service = self._get_service_by_name(name) if service.get_expected_state() != SERVICE_STATE.ON: raise ServiceNotOnError( "Service '%s' is not on, unable to restart." % ( service.service_name)) with self._get_service_lock(name): self._service_action(service, "restart") active_state, process_state = self._get_service_status(service) if active_state != SERVICE_STATE.ON: error_msg = ( "Service '%s' failed to restart. Its current state " "is '%s' and '%s'." % ( service.service_name, active_state, process_state)) maaslog.error(error_msg) raise ServiceActionError(error_msg) else: maaslog.info( "Service '%s' has been restarted. Its current state " "is '%s' and '%s'." % ( service.service_name, active_state, process_state)) @asynchronous def async_restart_service(self, name): """Asynchronously restart the service.""" return deferToThread(self.restart_service, name) def _exec_service_action(self, service_name, action): """Perform the action with the service command. :return: tuple (exit code, output) """ # Force systemd to output in UTF-8 by selecting the C.UTF-8 locale. # This doesn't have any effect on upstart. env = os.environ.copy() env["LANG"] = "C.UTF-8" env["LC_ALL"] = "C.UTF-8" process = Popen( ["sudo", "service", service_name, action], stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True, env=env) output, _ = process.communicate() return process.wait(), output.decode("utf-8").strip() def _service_action(self, service, action): """Start or stop the service.""" exit_code, output = self._exec_service_action( service.service_name, action) if exit_code != 0: error_msg = ( "Service '%s' failed to %s: %s" % ( service.service_name, action, output)) maaslog.error(error_msg) raise ServiceActionError(error_msg) def _get_service_status(self, service): """Return service status based on the init system.""" if self.init_system == "systemd": return self._get_systemd_service_status( service.service_name) elif self.init_system == "upstart": return self._get_upstart_service_status( service.service_name) def _get_systemd_service_status(self, service_name): exit_code, output = self._exec_service_action(service_name, "status") # Ignore the exit_code because systemd will return none 0 for anything # other than a active service. # Parse the output of the command to determine the active status and # the current state of the service. # # output for running service looks like: # tgt.service - LSB: iscsi target daemon # Loaded: loaded (/etc/init.d/tgt) # Active: active (running) since Fri 2015-05-15 15:08:26 UTC; 7s ago # Docs: man:systemd-sysv-generator(8) # # output for stopped service looks like: # tgt.service - LSB: iscsi target daemon # Loaded: loaded (/etc/init.d/tgt) # Active: inactive (dead) # Docs: man:systemd-sysv-generator(8) # # output for unknown service looks like: # missing.service # Loaded: not-found (Reason: No such file or directory) # Active: inactive (dead) for line in output.splitlines(): line = line.strip() if line.startswith("Loaded"): load_status = line.split()[1] if load_status != "loaded": raise UnknownServiceError("'%s' is unknown to systemd." % ( service_name)) if line.startswith("Active"): active_split = line.split() active_state, process_state = ( active_split[1], active_split[2].lstrip('(').rstrip(')')) active_state_enum = self.SYSTEMD_TO_STATE.get(active_state) if active_state_enum is None: raise ServiceParsingError( "Unable to parse the active state from systemd for " "service '%s', active state reported as '%s'." % ( service_name, active_state)) return active_state_enum, process_state raise ServiceParsingError( "Unable to parse the output from systemd for service '%s'." % ( service_name)) def _get_upstart_service_status(self, service_name): exit_code, output = self._exec_service_action(service_name, "status") if exit_code != 0: raise UnknownServiceError("'%s' is unknown to upstart." % ( service_name)) for line in output.splitlines(): if not line.startswith('sudo:'): active_state, process_state = self._parse_upstart_status_line( line, service_name) break active_state_enum = self.UPSTART_TO_STATE.get(active_state) if active_state_enum is None: raise ServiceParsingError( "Unable to parse the active state from upstart for " "service '%s', active state reported as '%s'." % ( service_name, active_state)) return active_state_enum, process_state def _parse_upstart_status_line(self, output, service_name): # output looks like: # tgt start/running, process 29993 # split to get the active_state/process_state try: output_split = output.split(",")[0].split()[1].split("/") active_state, process_state = output_split[0], output_split[1] except IndexError: raise ServiceParsingError( "Unable to parse the output from upstart for service '%s'." % ( service_name)) return active_state, process_state def _get_expected_process_state(self, active_state): """Return the expected process state for the `active_state` based on the init system being used.""" if self.init_system == "systemd": return self.SYSTEMD_PROCESS_STATE[active_state] elif self.init_system == "upstart": return self.UPSTART_PROCESS_STATE[active_state] def _ensure_service(self, service): """Ensure that the service is set to the correct state. We only ensure that the service is at its expected state. The current init system will control its process state and it should reach its expected process state based on the service's current active state. """ expected_state = service.get_expected_state() active_state, process_state = self._get_service_status(service) if active_state == expected_state: expected_process_state = self._get_expected_process_state( active_state) if process_state != expected_process_state: maaslog.warn( "Service '%s' is %s but not in the expected state of " "'%s', its current state is '%s'.", service.service_name, active_state, expected_process_state, process_state) else: maaslog.debug( "Service '%s' is %s and '%s'.", service.service_name, active_state, process_state) else: # Service is not at its expected active state. Log the action that # will be taken to place the service in its correct state. if expected_state == SERVICE_STATE.ON: action, log_action = ("start", "started") elif expected_state == SERVICE_STATE.OFF: action, log_action = ("stop", "stopped") maaslog.info( "Service '%s' is not %s, it will be %s.", service.service_name, expected_state, log_action) # Perform the required action to get the service to reach # its target state. self._service_action(service, action) # Check that the service has remained at its target state. active_state, process_state = self._get_service_status(service) if active_state != expected_state: error_msg = ( "Service '%s' failed to %s. Its current state " "is '%s' and '%s'." % ( service.service_name, action, active_state, process_state)) maaslog.error(error_msg) raise ServiceActionError(error_msg) else: maaslog.info( "Service '%s' has been %s and is '%s'." % ( service.service_name, log_action, process_state)) # Global service monitor. service_monitor = ServiceMonitor() maas-1.9.5+bzr4599.orig/src/provisioningserver/tags.py0000644000000000000000000002742413056115004020740 0ustar 00000000000000# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Cluster-side evaluation of tags.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'merge_details', 'merge_details_cleanly', 'process_node_tags', ] from collections import OrderedDict from functools import partial import httplib import urllib2 import bson from lxml import etree from provisioningserver.logger import get_maas_logger from provisioningserver.utils import classify from provisioningserver.utils.xpath import try_match_xpath import simplejson as json maaslog = get_maas_logger("tag_processing") # An example laptop's lshw XML dump was 135kB. An example lab's LLDP # XML dump was 1.6kB. A batch size of 100 would mean downloading ~14MB # from the region controller, which seems workable. The previous batch # size of 1000 would have resulted in a ~140MB download, which, on the # face of it, appears excessive. DEFAULT_BATCH_SIZE = 100 # A content-type: function mapping that can decode data of that type. decoders = { "application/json": lambda data: json.loads(data), "application/bson": lambda data: bson.BSON(data).decode(), } def process_response(response): """All responses should be httplib.OK. Additionally, `decoders` will be consulted in an attempt to decode the content. If it can't be decoded it will be returned as bytes. :param response: The result of MAASClient.get/post/etc. :type response: urllib2.addinfourl (a file-like object that has a .code attribute.) """ if response.code != httplib.OK: text_status = httplib.responses.get(response.code, '') message = '%s, expected 200 OK' % text_status raise urllib2.HTTPError( response.url, response.code, message, response.headers, response.fp) content = response.read() content_type = response.headers.gettype() if content_type in decoders: decode = decoders[content_type] return decode(content) else: return content def get_nodes_for_node_group(client, nodegroup_uuid): """Retrieve the UUIDs of nodes in a particular group. :param client: MAAS client instance :param nodegroup_uuid: Node group for which to retrieve nodes :return: List of UUIDs for nodes in nodegroup """ path = '/api/1.0/nodegroups/%s/' % (nodegroup_uuid) return process_response(client.get(path, op='list_nodes')) def get_details_for_nodes(client, nodegroup_uuid, system_ids): """Retrieve details for a set of nodes. :param client: MAAS client :param system_ids: List of UUIDs of systems for which to fetch LLDP data :return: Dictionary mapping node UUIDs to details, e.g. LLDP output """ path = '/api/1.0/nodegroups/%s/' % (nodegroup_uuid,) return process_response(client.post( path, op='details', system_ids=system_ids)) def post_updated_nodes(client, tag_name, tag_definition, uuid, added, removed): """Update the nodes relevant for a particular tag. :param client: MAAS client :param tag_name: Name of tag :param tag_definition: Definition of the tag, used to assure that the work being done matches the current value. :param uuid: NodeGroup uuid of this worker. Needed for security permissions. (The nodegroup worker is only allowed to touch nodes in its nodegroup, otherwise you need to be a superuser.) :param added: Set of nodes to add :param removed: Set of nodes to remove """ path = '/api/1.0/tags/%s/' % (tag_name,) maaslog.debug( "Updating nodes for %s %s, adding %s removing %s" % (tag_name, uuid, len(added), len(removed))) try: return process_response(client.post( path, op='update_nodes', as_json=True, nodegroup=uuid, definition=tag_definition, add=added, remove=removed)) except urllib2.HTTPError as e: if e.code == httplib.CONFLICT: if e.fp is not None: msg = e.fp.read() else: msg = e.msg maaslog.info("Got a CONFLICT while updating tag: %s", msg) return {} raise def _details_prepare_merge(details): # We may mutate the details later, so copy now to prevent # affecting the caller's data. details = details.copy() # Prepare an nsmap in an OrderedDict. This ensures that lxml # serializes namespace declarations in a stable order. nsmap = OrderedDict((ns, ns) for ns in sorted(details)) # Root everything in a namespace-less element. Setting the nsmap # here ensures that prefixes are preserved when dumping later. # This element will be replaced by the root of the lshw detail. # However, if there is no lshw detail, this root element shares # its tag with the tag of an lshw XML tree, so that XPath # expressions written with the lshw tree in mind will still work # without it, e.g. "/list//{lldp}something". root = etree.Element("list", nsmap=nsmap) # We have copied details, and root is new. return details, root def _details_make_backwards_compatible(details, root): # For backward-compatibilty, if lshw details are available, these # should form the root of the composite document. xmldata = details.get("lshw") if xmldata is not None: try: lshw = etree.fromstring(xmldata) except etree.XMLSyntaxError as e: maaslog.warn("Invalid lshw details: %s", e) del details["lshw"] # Don't process again later. else: # We're throwing away the existing root, but we can adopt # its nsmap by becoming its child. root.append(lshw) root = lshw # We may have mutated details and root. return details, root def _details_do_merge(details, root): # Merge the remaining details into the composite document. for namespace in sorted(details): xmldata = details[namespace] if xmldata is not None: try: detail = etree.fromstring(xmldata) except etree.XMLSyntaxError as e: maaslog.warn("Invalid %s details: %s", namespace, e) else: # Add the namespace to all unqualified elements. for elem in detail.iter("{}*"): elem.tag = etree.QName(namespace, elem.tag) root.append(detail) # Re-home `root` in a new tree. This ensures that XPath # expressions like "/some-tag" work correctly. Without this, when # there's well-formed lshw data -- see the backward-compatibilty # hack futher up -- expressions would be evaluated from the first # root created in this function, even though that root is now the # parent of the current `root`. return etree.ElementTree(root) def merge_details(details): """Merge node details into a single XML document. `details` should be of the form:: {"name": xml-as-bytes, "name2": xml-as-bytes, ...} where `name` is the namespace (and prefix) where each detail's XML should be placed in the composite document; elements in each detail document without a namespace are moved into that namespace. The ``lshw`` detail is treated specially, purely for backwards compatibility. If present, it forms the root of the composite document, without any namespace changes, plus it will be included in the composite document in the ``lshw`` namespace. The returned document is always rooted with a ``list`` element. """ details, root = _details_prepare_merge(details) details, root = _details_make_backwards_compatible(details, root) return _details_do_merge(details, root) def merge_details_cleanly(details): """Merge node details into a single XML document. `details` should be of the form:: {"name": xml-as-bytes, "name2": xml-as-bytes, ...} where `name` is the namespace (and prefix) where each detail's XML should be placed in the composite document; elements in each detail document without a namespace are moved into that namespace. This is similar to `merge_details`, but the ``lshw`` detail is not treated specially. The result of this function is not compatible with XPath expressions created for old releases of MAAS. The returned document is always rooted with a ``list`` element. """ details, root = _details_prepare_merge(details) return _details_do_merge(details, root) def gen_batch_slices(count, size): """Generate `slice`s to split `count` objects into batches. The batches will be evenly distributed; no batch will differ in length from any other by more than 1. Note that the slices returned include a step. This means that slicing a list with the aid of this function then concatenating the results will not give you the same list. All the elements will be present, but not in the same order. :return: An iterator of `slice`s. """ batch_count, remaining = divmod(count, size) batch_count += 1 if remaining > 0 else 0 for batch in xrange(batch_count): yield slice(batch, None, batch_count) def gen_batches(things, batch_size): """Split `things` into even batches of <= `batch_size`. Note that batches are calculated by `get_batch_slices` which does not guarantee ordering. :type things: `list`, or anything else that can be sliced. :return: An iterator of `slice`s of `things`. """ slices = gen_batch_slices(len(things), batch_size) return (things[s] for s in slices) def gen_node_details(client, nodegroup_uuid, batches): """Fetch node details. This lazily fetches data in batches, but this detail is hidden from callers. :return: An iterator of ``(system-id, details-document)`` tuples. """ get_details = partial(get_details_for_nodes, client, nodegroup_uuid) for batch in batches: for system_id, details in get_details(batch).iteritems(): yield system_id, merge_details(details) def process_all(client, tag_name, tag_definition, nodegroup_uuid, system_ids, xpath, batch_size=None): maaslog.debug( "processing %d system_ids for tag %s nodegroup %s", len(system_ids), tag_name, nodegroup_uuid) if batch_size is None: batch_size = DEFAULT_BATCH_SIZE batches = gen_batches(system_ids, batch_size) node_details = gen_node_details(client, nodegroup_uuid, batches) nodes_matched, nodes_unmatched = classify( partial(try_match_xpath, xpath, logger=maaslog), node_details) # Upload all updates for one nodegroup at one time. This should be no more # than ~41*10,000 = 410kB. That should take <1s even on a 10Mbit network. # This also allows us to track if a nodegroup has been processed in the DB, # without having to add another API call. post_updated_nodes( client, tag_name, tag_definition, nodegroup_uuid, nodes_matched, nodes_unmatched) def process_node_tags( tag_name, tag_definition, tag_nsmap, client, nodegroup_uuid, batch_size=None): """Update the nodes for a new/changed tag definition. :param client: A `MAASClient` used to fetch the node's details via calls to the web API. :param nodegroup_uuid: The UUID for this cluster. :param tag_name: Name of the tag to update nodes for :param tag_definition: Tag definition :param batch_size: Size of batch """ # We evaluate this early, so we can fail before sending a bunch of data to # the server xpath = etree.XPath(tag_definition, namespaces=tag_nsmap) # Get nodes to process system_ids = get_nodes_for_node_group(client, nodegroup_uuid) process_all( client, tag_name, tag_definition, nodegroup_uuid, system_ids, xpath, batch_size=batch_size) maas-1.9.5+bzr4599.orig/src/provisioningserver/testing/0000755000000000000000000000000013056115004021074 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/tests/0000755000000000000000000000000013056115004020561 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/upgrade_cluster.py0000644000000000000000000002132313056115004023162 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Management command: upgrade the cluster. This module implements the `ActionScript` interface for pserv commands. Use the upgrade-cluster command when the MAAS code has been updated (e.g. while installing a package ugprade, from the packaging) to perform any data migrations that the new version may require. This maintains a list of upgrade hooks, each representing a data migration that was needed at some point in development of the MAAS cluster codebase. All these hooks get run, in chronological order. There is no record of updates that have already been performed; each hook figures out for itself whether its migration is needed. Backwards migrations are not supported. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'add_arguments', 'run', ] import os from os import makedirs import shutil from subprocess import check_call from textwrap import dedent from provisioningserver.auth import get_maas_user_gpghome from provisioningserver.boot.tftppath import ( drill_down, list_subdirs, ) from provisioningserver.config import ClusterConfiguration from provisioningserver.import_images.boot_resources import ( update_targets_conf, write_targets_conf, ) from provisioningserver.logger import get_maas_logger maaslog = get_maas_logger("cluster_upgrade") def make_maas_own_boot_resources(): """Upgrade hook: make the `maas` user the owner of the boot resources.""" # This reduces the privileges required for importing and managing images. with ClusterConfiguration.open() as config: boot_resources_storage = config.tftp_root if os.path.isdir(boot_resources_storage): check_call(['chown', '-R', 'maas', boot_resources_storage]) def create_gnupg_home(): """Upgrade hook: create maas user's GNUPG home directory.""" gpghome = get_maas_user_gpghome() if not os.path.isdir(gpghome): makedirs(gpghome) if os.geteuid() == 0: # Make the maas user the owner of its GPG home. Do this only if # running as root; otherwise it would probably fail. We want to # be able to start a development instance without triggering that. check_call(['chown', 'maas:maas', gpghome]) # Path to obsolete boot-resources configuration. BOOTRESOURCES_FILE = '/etc/maas/bootresources.yaml' # Recognisable header, to be prefixed to BOOTRESOURCES_FILE as part of the # warning that the file is obsolete. The retire_bootresources_yaml upgrade # hook will prefix this header and further details to the file, if and only # if this header is not yet present. BOOTRESOURCES_HEADER = "# THIS FILE IS OBSOLETE." # Warning, to be prefixed to BOOTRESOURCES_FILE as an indication that the # file is obsolete. BOOTRESOURCES_WARNING = BOOTRESOURCES_HEADER + '\n' + dedent("""\ # # The configuration below is no longer in use, and can be removed. By # default, cluster controllers now import images for all supported Ubuntu # LTS releases in all supported architectures. # # Imports can now be configured through the MAAS region controller API: # See http://maas.ubuntu.com/docs/api.html#boot-source # # To do this, first POST to the nodegroup's boot-sources endpoint (e.g. # http:///api/1.0/nodegroups//boot-sources), and then POST # to the resulting boot source to define selections. Each cluster can have # any number of boot sources, and each boot source can have any number of # selections, as in the old configuration. # # The same thing can be done using the command-line front-end for the API: # # maas boot-sources create \\ # url= keyring_filename= # # Here, # * is your login profile in the 'maas' command. # * is the UUID of the cluster. # * is the source's path as found in this config file. # * is the keyring entry as found in this config file. # # Full documentation can be found at http://maas.ubuntu.com/docs/ # # The maas-import-pxe-files import script has been removed. Instead use # the MAAS web UI, web API, or the "maas" command to trigger manual # imports. # """) + '\n' def retire_bootresources_yaml(): """Upgrade hook: mark `/etc/maas/bootresources.yaml` as obsolete. Prefixes `BOOTRESOURCES_WARNING` to the config file, if present. This file was temporarily used in MAAS 1.5 to let users restrict which boot resources should be downloaded, where from, and to where in the filesystem. The settings have been replaced with model classes. """ if not os.path.isfile(BOOTRESOURCES_FILE): return header = BOOTRESOURCES_HEADER.encode('ascii') warning = BOOTRESOURCES_WARNING.encode('ascii') with open(BOOTRESOURCES_FILE, 'r+b') as old_config: old_contents = old_config.read() if old_contents.startswith(header): # Warning is already there. return old_config.seek(0) old_config.write(warning) old_config.write(old_contents) def filter_out_directories_with_extra_levels(paths): """Remove paths that contain directories with more levels. We don't want to move other operating systems under the ubuntu directory.""" with ClusterConfiguration.open() as config: tftp_root = config.tftp_root for arch, subarch, release, label in paths: path = os.path.join(tftp_root, arch, subarch, release, label) if len(list_subdirs(path)) == 0: yield (arch, subarch, release, label) def migrate_architectures_into_ubuntu_directory(): """Upgrade hook: move architecture folders under the ubuntu folder. With the support of multiple operating systems the structure of the boot resources directory added another level to the hierarchy. Previously the hierarchy was arch/subarch/release/label, it has now been modified to os/arch/subarch/release/label. Before multiple operating systems only Ubuntu was supported. Check if folders have structure arch/subarch/release/label and move them into ubuntu folder. Making the final path ubuntu/arch/subarch/release/label. """ with ClusterConfiguration.open() as config: current_dir = config.tftp_root if not os.path.isdir(current_dir): return # If ubuntu folder already exists, then no reason to continue if 'ubuntu' in list_subdirs(current_dir): return # Starting point for iteration: paths that contain only the # top-level subdirectory of tftproot, i.e. the architecture name. potential_arches = list_subdirs(current_dir) paths = [[subdir] for subdir in potential_arches] # Extend paths deeper into the filesystem, through the levels that # represent sub-architecture, release, and label. # Any directory that doesn't extend this deep isn't a boot image. for level in ['subarch', 'release', 'label']: paths = drill_down(current_dir, paths) paths = filter_out_directories_with_extra_levels(paths) # Extract the only top directories (arch) from the paths, as we only need # its name to move into the new 'ubuntu' folder. arches = {arch for arch, _, _, _ in paths} if len(arches) == 0: return # Create the ubuntu directory and move the archiecture folders under that # directory. ubuntu_dir = os.path.join(current_dir, 'ubuntu') os.mkdir(ubuntu_dir) for arch in arches: shutil.move(os.path.join(current_dir, arch), ubuntu_dir) # Re-write the maas.tgt to point to the new location for the ubuntu boot # resources. write_targets_conf(current_dir) update_targets_conf(current_dir) # Upgrade hooks, from oldest to newest. The hooks are callables, taking no # arguments. They are called in order. # # Each hook figures out for itself whether its changes are needed. There is # no record of previous upgrades. UPGRADE_HOOKS = [ make_maas_own_boot_resources, create_gnupg_home, retire_bootresources_yaml, migrate_architectures_into_ubuntu_directory, ] def add_arguments(parser): """Add this command's options to the `ArgumentParser`. Specified by the `ActionScript` interface. """ # This command accepts no arguments. # The docstring for the "run" function is also the command's documentation. def run(args): """Perform any data migrations needed for upgrading this cluster.""" for hook in UPGRADE_HOOKS: maaslog.info("Cluster upgrade hook %s started." % hook.__name__) hook() maaslog.info("Cluster upgrade hook %s finished." % hook.__name__) maas-1.9.5+bzr4599.orig/src/provisioningserver/utils/0000755000000000000000000000000013056115004020557 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/boot/__init__.py0000644000000000000000000002200413056115004022471 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Boot Methods.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "BootMethod", "BootMethodRegistry", ] from abc import ( ABCMeta, abstractmethod, abstractproperty, ) from errno import ENOENT from io import BytesIO from os import path from provisioningserver.boot.tftppath import compose_image_path from provisioningserver.kernel_opts import compose_kernel_command_line from provisioningserver.rpc import getRegionClient from provisioningserver.rpc.region import GetArchiveMirrors from provisioningserver.utils import ( locate_config, tftp, ) from provisioningserver.utils.network import find_mac_via_arp from provisioningserver.utils.registry import Registry from provisioningserver.utils.twisted import asynchronous import tempita from tftp.backend import IReader from twisted.internet.defer import ( inlineCallbacks, returnValue, ) from zope.interface import implementer @asynchronous def get_archive_mirrors(): client = getRegionClient() return client(GetArchiveMirrors) @asynchronous(timeout=10) @inlineCallbacks def get_main_archive_url(): mirrors = yield get_archive_mirrors() main_url = mirrors['main'].geturl() returnValue(main_url) @asynchronous(timeout=10) @inlineCallbacks def get_ports_archive_url(): mirrors = yield get_archive_mirrors() ports_url = mirrors['ports'].geturl() returnValue(ports_url) @implementer(IReader) class BytesReader: def __init__(self, data): super(BytesReader, self).__init__() self.buffer = BytesIO(data) self.size = len(data) def read(self, size): return self.buffer.read(size) def finish(self): self.buffer.close() class BootMethodError(Exception): """Exception raised for errors from a BootMethod.""" class BootMethodInstallError(BootMethodError): """Exception raised for errors from a BootMethod performing install_bootloader. """ def get_parameters(match): """Helper that gets the matched parameters from the regex match. """ return { key: value for key, value in match.groupdict().items() if value is not None } def gen_template_filenames(purpose, arch, subarch): """List possible template filenames. :param purpose: The boot purpose, e.g. "local". :param arch: Main machine architecture. :param subarch: Sub-architecture, or "generic" if there is none. Returns a list of possible PXE template filenames using the following lookup order: config.{purpose}.{arch}.{subarch}.template config.{purpose}.{arch}.template config.{purpose}.template config.template """ elements = [purpose, arch, subarch] while len(elements) >= 1: yield "config.%s.template" % ".".join(elements) elements.pop() yield "config.template" def get_remote_mac(): """Gets the requestors MAC address from arp cache. This is used, when the dhcp lease file is not up-to-date soon enough to extract the MAC address from the IP address assigned by dhcp. """ remote_host, remote_port = tftp.get_remote_address() return find_mac_via_arp(remote_host) class BootMethod: """Skeleton for a boot method.""" __metaclass__ = ABCMeta # Path prefix that is used for the pxelinux.cfg. Used for # the dhcpd.conf that is generated. path_prefix = None # Arches for which this boot method needs to install boot loaders. bootloader_arches = [] @abstractproperty def name(self): """Name of the boot method.""" @abstractproperty def bios_boot_method(self): """Method used by the bios to boot. E.g. `pxe`.""" @abstractproperty def template_subdir(self): """Name of template sub-directory.""" @abstractproperty def bootloader_path(self): """Relative path from tftproot to boot loader.""" @abstractproperty def arch_octet(self): """Architecture type that supports this method. Used for the dhcpd.conf file that is generated. Must be in the format XX:XX. """ @abstractmethod def match_path(self, backend, path): """Checks path for a file the boot method needs to handle. :param backend: requesting backend :param path: requested path :return: dict of match params from path, None if no match """ @abstractmethod def get_reader(self, backend, kernel_params, **extra): """Gets the reader the backend will use for this combination of boot method, kernel parameters, and extra parameters. :param backend: requesting backend :param kernel_params: An instance of `KernelParameters`. :param extra: Allow for other arguments. This is a safety valve; parameters generated in another component (for example, see `TFTPBackend.get_boot_method_reader`) won't cause this to break. """ @abstractmethod def install_bootloader(self, destination): """Installs the required files for this boot method into the destination. :param destination: path to install bootloader """ def get_template_dir(self): """Gets the template directory for the boot method.""" return locate_config("templates/%s" % self.template_subdir) def get_template(self, purpose, arch, subarch): """Gets the best avaliable template for the boot method. Templates are loaded each time here so that they can be changed on the fly without restarting the provisioning server. :param purpose: The boot purpose, e.g. "local". :param arch: Main machine architecture. :param subarch: Sub-architecture, or "generic" if there is none. :return: `tempita.Template` """ pxe_templates_dir = self.get_template_dir() for filename in gen_template_filenames(purpose, arch, subarch): template_name = path.join(pxe_templates_dir, filename) try: return tempita.Template.from_filename( template_name, encoding="UTF-8") except IOError as error: if error.errno != ENOENT: raise else: error = ( "No PXE template found in %r for:\n" " Purpose: %r, Arch: %r, Subarch: %r\n" "This can happen if you manually power up a node when its " "state is not one that allows it. Is the node in the " "'New' or 'Ready' states? It needs to be Enlisting, " "Commissioning or Allocated." % ( pxe_templates_dir, purpose, arch, subarch)) raise AssertionError(error) def compose_template_namespace(self, kernel_params): """Composes the namespace variables that are used by a boot method template. """ dtb_subarchs = ['xgene-uboot-mustang'] def image_dir(params): return compose_image_path( params.osystem, params.arch, params.subarch, params.release, params.label) def initrd_path(params): if params.purpose == "install": return "%s/di-initrd" % image_dir(params) else: return "%s/boot-initrd" % image_dir(params) def kernel_path(params): if params.purpose == "install": return "%s/di-kernel" % image_dir(params) else: return "%s/boot-kernel" % image_dir(params) def dtb_path(params): if params.subarch in dtb_subarchs: if params.purpose == "install": return "%s/di-dtb" % image_dir(params) else: return "%s/boot-dtb" % image_dir(params) def kernel_command(params): return compose_kernel_command_line(params) namespace = { "initrd_path": initrd_path, "kernel_command": kernel_command, "kernel_params": kernel_params, "kernel_path": kernel_path, "dtb_path": dtb_path, } return namespace class BootMethodRegistry(Registry): """Registry for boot method classes.""" # Import the supported boot methods after defining BootMethod. from provisioningserver.boot.pxe import PXEBootMethod from provisioningserver.boot.uefi import UEFIBootMethod from provisioningserver.boot.uefi_arm64 import UEFIARM64BootMethod from provisioningserver.boot.powerkvm import PowerKVMBootMethod from provisioningserver.boot.powernv import PowerNVBootMethod from provisioningserver.boot.windows import WindowsPXEBootMethod builtin_boot_methods = [ PXEBootMethod(), UEFIBootMethod(), UEFIARM64BootMethod(), PowerKVMBootMethod(), PowerNVBootMethod(), WindowsPXEBootMethod(), ] for method in builtin_boot_methods: BootMethodRegistry.register_item(method.name, method) maas-1.9.5+bzr4599.orig/src/provisioningserver/boot/install_bootloader.py0000644000000000000000000000446413056115004024624 0ustar 00000000000000# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Install a pre-boot loader for TFTP download.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "install_bootloader", "make_destination", ] import filecmp import os.path from shutil import copyfile from provisioningserver.boot.tftppath import locate_tftp_path def make_destination(tftproot): """Locate a loader's destination, creating the directory if needed. :param tftproot: The root directory served up by the TFTP server, e.g. /var/lib/maas/tftp/. :return: Full path describing the directory that the installed loader should end up having. """ path = locate_tftp_path('', tftproot=tftproot) directory = os.path.dirname(path) if not os.path.isdir(directory): os.makedirs(directory) return directory def are_identical_files(old, new): """Are `old` and `new` identical? If `old` does not exist, the two are considered different (`new` is assumed to exist). """ if os.path.isfile(old): return filecmp.cmp(old, new, shallow=False) else: return False def install_bootloader(loader, destination): """Install bootloader file at path `loader` as `destination`. Installation will be atomic. If an identical loader is already installed, it will be left untouched. However it is still conceivable, depending on the TFTP implementation, that a download that is already in progress may suddenly start receiving data from the new file instead of the one it originally started downloading. :param loader: Name of loader to install. :param destination: Loader's intended filename, including full path, where it will become available over TFTP. """ if are_identical_files(destination, loader): return # Copy new loader next to the old one, to ensure that it is on the # same filesystem. Once it is, we can replace the old one with an # atomic rename operation. temp_file = '%s.new' % destination if os.path.exists(temp_file): os.remove(temp_file) copyfile(loader, temp_file) os.rename(temp_file, destination) maas-1.9.5+bzr4599.orig/src/provisioningserver/boot/install_grub.py0000644000000000000000000000236013056115004023422 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Install a GRUB2 pre-boot loader config for TFTP download.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "add_arguments", "run", ] import os.path from provisioningserver.boot.install_bootloader import make_destination from provisioningserver.config import ClusterConfiguration from provisioningserver.utils.fs import write_text_file CONFIG_FILE = """ # MAAS GRUB2 pre-loader configuration file # Load based on MAC address first. configfile (pxe)/grub/grub.cfg-${net_default_mac} # Failed to load based on MAC address. # Load amd64 by default, UEFI only supported by 64-bit configfile (pxe)/grub/grub.cfg-default-amd64 """ def add_arguments(parser): pass def run(args): """Install a GRUB2 pre-boot loader config into the TFTP directory structure. """ with ClusterConfiguration.open() as config: destination_path = make_destination(config.grub_root) destination_file = os.path.join(destination_path, 'grub.cfg') write_text_file(destination_file, CONFIG_FILE) maas-1.9.5+bzr4599.orig/src/provisioningserver/boot/powerkvm.py0000644000000000000000000000705713056115004022617 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """PowerKVM and PowerVM Boot Method""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'PowerKVMBootMethod', ] import glob import os.path from textwrap import dedent from urlparse import urlparse from provisioningserver.boot import ( BootMethod, BootMethodInstallError, get_ports_archive_url, utils, ) from provisioningserver.boot.install_bootloader import install_bootloader from provisioningserver.utils.fs import tempdir from provisioningserver.utils.shell import call_and_check GRUB_CONFIG = dedent("""\ configfile (pxe)/grub/grub.cfg-${net_default_mac} configfile (pxe)/grub/grub.cfg-default-ppc64el """) class PowerKVMBootMethod(BootMethod): name = "powerkvm" bios_boot_method = "powerkvm" template_subdir = None bootloader_path = "bootppc64.bin" bootloader_arches = ['ppc64el'] arch_octet = "00:0C" def match_path(self, backend, path): """Doesn't need to do anything, as the UEFIBootMethod provides the grub implementation needed. """ return None def get_reader(self, backend, kernel_params, **extra): """Doesn't need to do anything, as the UEFIBootMethod provides the grub implementation needed. """ return None def install_bootloader(self, destination): """Installs the required files for PowerKVM/PowerVM booting into the tftproot. """ ports_archive_url = get_ports_archive_url() archive_url = ports_archive_url.strip(urlparse(ports_archive_url).path) with tempdir() as tmp: # Download the grub-ieee1275-bin package data, filename = utils.get_updates_package( 'grub-ieee1275-bin', archive_url, 'main', 'ppc64el') if data is None: raise BootMethodInstallError( 'Failed to download grub-ieee1275-bin package from ' 'the archive.') grub_output = os.path.join(tmp, filename) with open(grub_output, 'wb') as stream: stream.write(data) # Extract the package with dpkg, and install the shim call_and_check(["dpkg", "-x", grub_output, tmp]) # Output the embedded config, so grub-mkimage can use it config_output = os.path.join(tmp, 'grub.cfg') with open(config_output, 'wb') as stream: stream.write(GRUB_CONFIG.encode('utf-8')) # Get list of grub modules module_dir = os.path.join( tmp, 'usr', 'lib', 'grub', 'powerpc-ieee1275') modules = [] for module_path in glob.glob(os.path.join(module_dir, '*.mod')): module_filename = os.path.basename(module_path) module_name, _ = os.path.splitext(module_filename) modules.append(module_name) # Generate the grub bootloader mkimage_output = os.path.join(tmp, self.bootloader_path) args = [ 'grub-mkimage', '-o', mkimage_output, '-O', 'powerpc-ieee1275', '-d', module_dir, '-c', config_output, ] call_and_check(args + modules) install_bootloader( mkimage_output, os.path.join(destination, self.bootloader_path)) maas-1.9.5+bzr4599.orig/src/provisioningserver/boot/powernv.py0000644000000000000000000001131113056115004022431 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """PowerNV Boot Method""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'PowerNVBootMethod', ] import re from provisioningserver.boot import ( BootMethod, BytesReader, get_parameters, get_remote_mac, ) from provisioningserver.boot.pxe import ( ARP_HTYPE, re_mac_address, ) from provisioningserver.kernel_opts import compose_kernel_command_line from tftp.backend import FilesystemReader # The pxelinux.cfg path is prefixed with the architecture for the # PowerNV nodes. This prefix is set by the path-prefix dhcpd option. # We assume that the ARP HTYPE (hardware type) that PXELINUX sends is # always Ethernet. re_config_file = r''' # Optional leading slash(es). ^/* ppc64el # PowerNV pxe prefix, set by dhcpd / pxelinux[.]cfg # PXELINUX expects this. / (?: # either a MAC {htype:02x} # ARP HTYPE. - (?P{re_mac_address.pattern}) # Capture MAC. | # or "default" default ) $ ''' re_config_file = re_config_file.format( htype=ARP_HTYPE.ETHERNET, re_mac_address=re_mac_address) re_config_file = re.compile(re_config_file, re.VERBOSE) def format_bootif(mac): """Formats a mac address into the BOOTIF format, expected by the linux kernel.""" mac = mac.replace(':', '-') mac = mac.lower() return '%02x-%s' % (ARP_HTYPE.ETHERNET, mac) class PowerNVBootMethod(BootMethod): name = "powernv" bios_boot_method = "powernv" template_subdir = "pxe" bootloader_path = "pxelinux.0" arch_octet = "00:0E" path_prefix = "ppc64el/" def get_params(self, backend, path): """Gets the matching parameters from the requested path.""" match = re_config_file.match(path) if match is not None: return get_parameters(match) if path.lstrip('/').startswith(self.path_prefix): return {'path': path} return None def match_path(self, backend, path): """Checks path for the configuration file that needs to be generated. :param backend: requesting backend :param path: requested path :return: dict of match params from path, None if no match """ params = self.get_params(backend, path) if params is None: return None params['arch'] = "ppc64el" if 'mac' not in params: mac = get_remote_mac() if mac is not None: params['mac'] = mac return params def get_reader(self, backend, kernel_params, **extra): """Render a configuration file as a unicode string. :param backend: requesting backend :param kernel_params: An instance of `KernelParameters`. :param extra: Allow for other arguments. This is a safety valve; parameters generated in another component (for example, see `TFTPBackend.get_config_reader`) won't cause this to break. """ # Due to the path prefix, all requested files from the client will # contain that prefix. Removing the prefix from the path will return # the correct path in the tftp root. if 'path' in extra: path = extra['path'] path = path.replace(self.path_prefix, '', 1) target_path = backend.base.descendant(path.split('/')) return FilesystemReader(target_path) # Return empty config for PowerNV local. PowerNV fails to # support the LOCALBOOT flag. Empty config will allow it # to select the first device. if kernel_params.purpose == 'local': return BytesReader("".encode("utf-8")) template = self.get_template( kernel_params.purpose, kernel_params.arch, kernel_params.subarch) namespace = self.compose_template_namespace(kernel_params) # Modify the kernel_command to inject the BOOTIF. PowerNV fails to # support the IPAPPEND pxelinux flag. def kernel_command(params): cmd_line = compose_kernel_command_line(params) if 'mac' in extra: mac = extra['mac'] mac = format_bootif(mac) return '%s BOOTIF=%s' % (cmd_line, mac) return cmd_line namespace['kernel_command'] = kernel_command return BytesReader(template.substitute(namespace).encode("utf-8")) def install_bootloader(self, destination): """Does nothing. No extra boot files are required. All of the boot files from PXEBootMethod will suffice.""" maas-1.9.5+bzr4599.orig/src/provisioningserver/boot/pxe.py0000644000000000000000000001232613056115004021534 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """PXE Boot Method""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'PXEBootMethod', ] from itertools import repeat import os.path import re from provisioningserver.boot import ( BootMethod, BytesReader, get_parameters, ) from provisioningserver.boot.install_bootloader import install_bootloader from provisioningserver.utils.fs import atomic_symlink # Bootloader file names to install. BOOTLOADERS = ['pxelinux.0', 'chain.c32', 'ifcpu64.c32'] # Possible locations in which to find the bootloader files. Search these # in this order for each file. (This exists because locations differ # across Ubuntu releases.) BOOTLOADER_DIRS = [ '/usr/lib/PXELINUX', '/usr/lib/syslinux', '/usr/lib/syslinux/modules/bios' ] # List of possible directories where to find additioning bootloader files. # The first existing directory will be symlinked to /syslinux/ inside # the TFTP root directory. SYSLINUX_DIRS = [ # Location for syslinux version 6 (the version in Utopic). '/usr/lib/syslinux/modules/bios', # Location for syslinux version 4 (the version in Trusty). '/usr/lib/syslinux' ] class ARP_HTYPE: """ARP Hardware Type codes.""" ETHERNET = 0x01 # PXELINUX represents a MAC address in IEEE 802 hyphen-separated # format. See http://www.syslinux.org/wiki/index.php/PXELINUX. re_mac_address_octet = r'[0-9a-f]{2}' re_mac_address = re.compile( "-".join(repeat(re_mac_address_octet, 6))) # We assume that the ARP HTYPE (hardware type) that PXELINUX sends is # always Ethernet. re_config_file = r''' # Optional leading slash(es). ^/* pxelinux[.]cfg # PXELINUX expects this. / (?: # either a MAC {htype:02x} # ARP HTYPE. - (?P{re_mac_address.pattern}) # Capture MAC. | # or "default" default (?: # perhaps with specified arch, with a separator of either '-' # or '.', since the spec was changed and both are unambiguous [.-](?P\w+) # arch (?:-(?P\w+))? # optional subarch )? ) $ ''' re_config_file = re_config_file.format( htype=ARP_HTYPE.ETHERNET, re_mac_address=re_mac_address) re_config_file = re.compile(re_config_file, re.VERBOSE) class PXEBootMethod(BootMethod): name = "pxe" bios_boot_method = "pxe" template_subdir = "pxe" bootloader_arches = ['i386', 'amd64'] bootloader_path = "pxelinux.0" arch_octet = "00:00" def match_path(self, backend, path): """Checks path for the configuration file that needs to be generated. :param backend: requesting backend :param path: requested path :return: dict of match params from path, None if no match """ match = re_config_file.match(path) if match is None: return None return get_parameters(match) def get_reader(self, backend, kernel_params, **extra): """Render a configuration file as a unicode string. :param backend: requesting backend :param kernel_params: An instance of `KernelParameters`. :param extra: Allow for other arguments. This is a safety valve; parameters generated in another component (for example, see `TFTPBackend.get_boot_method_reader`) won't cause this to break. """ template = self.get_template( kernel_params.purpose, kernel_params.arch, kernel_params.subarch) namespace = self.compose_template_namespace(kernel_params) return BytesReader(template.substitute(namespace).encode("utf-8")) def locate_bootloader(self, bootloader): """Search BOOTLOADER_DIRS for bootloader. :return: The full file path where the bootloader was found, or None. """ for dir in BOOTLOADER_DIRS: filename = os.path.join(dir, bootloader) if os.path.exists(filename): return filename return None def locate_syslinux_dir(self): """Search for an existing directory among SYSLINUX_DIRS.""" for bootloader_dir in SYSLINUX_DIRS: if os.path.exists(bootloader_dir): return bootloader_dir return None def install_bootloader(self, destination): """Installs the required files and symlinks into the tftproot.""" for bootloader in BOOTLOADERS: # locate_bootloader might return None but happy to let that # traceback here is it should never happen unless there's a # serious problem with packaging. bootloader_src = self.locate_bootloader(bootloader) bootloader_dst = os.path.join(destination, bootloader) install_bootloader(bootloader_src, bootloader_dst) # Create /syslinux/ symlink. PXE linux tries this subdirectory # when trying to fetch files for PXE-booting. bootloader_dir = self.locate_syslinux_dir() if bootloader_dir is not None: atomic_symlink( bootloader_dir, os.path.join(destination, 'syslinux')) maas-1.9.5+bzr4599.orig/src/provisioningserver/boot/tests/0000755000000000000000000000000013056115004021524 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/boot/tftppath.py0000644000000000000000000002160613056115004022573 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Construct TFTP paths for boot files.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'compose_image_path', 'list_boot_images', 'list_subdirs', 'locate_tftp_path', ] import errno from itertools import chain import os.path from provisioningserver.drivers.osystem import ( BOOT_IMAGE_PURPOSE, OperatingSystemRegistry, ) from provisioningserver.import_images.boot_image_mapping import ( BootImageMapping, ) from provisioningserver.import_images.helpers import ImageSpec from provisioningserver.logger import get_maas_logger maaslog = get_maas_logger("tftp") def compose_image_path(osystem, arch, subarch, release, label): """Compose the TFTP path for a PXE kernel/initrd directory. The path returned is relative to the TFTP root, as it would be identified by clients on the network. :param osystem: Operating system. :param arch: Main machine architecture. :param subarch: Sub-architecture, or "generic" if there is none. :param release: Operating system release, e.g. "precise". :param label: Release label, e.g. "release" or "alpha-2". :return: Path for the corresponding image directory (containing a kernel and initrd) as exposed over TFTP. """ # This is a TFTP path, not a local filesystem path, so hard-code the slash. return '/'.join([osystem, arch, subarch, release, label]) def locate_tftp_path(path, tftproot): """Return the local filesystem path corresponding to `path`. The return value gives the filesystem path where you'd have to put a file if you wanted it made available over TFTP as `path`. :param path: Path as used in the TFTP protocol for which you want the local filesystem equivalent. Pass `None` to get the root of the TFTP hierarchy. :param tftproot: The TFTP root directory. """ if path is None: return tftproot return os.path.join(tftproot, path.lstrip('/')) def is_visible_subdir(directory, subdir): """Is `subdir` a non-hidden sub-directory of `directory`?""" if subdir.startswith('.'): return False else: return os.path.isdir(os.path.join(directory, subdir)) def list_subdirs(directory): """Return a list of non-hidden directories in `directory`.""" return [ subdir for subdir in os.listdir(directory) if is_visible_subdir(directory, subdir) ] def extend_path(directory, path): """Dig one directory level deeper on `os.path.join(directory, *path)`. If `path` is a list of consecutive path elements drilling down from `directory`, return a list of sub-directory paths leading one step further down. :param directory: Base directory that `path` is relative to. :param path: A path to a subdirectory of `directory`, represented as a list of path elements relative to `directory`. :return: A list of paths that go one sub-directory level further down from `path`. """ return [ path + [subdir] for subdir in list_subdirs(os.path.join(directory, *path))] def drill_down(directory, paths): """Find the extensions of `paths` one level deeper into the filesystem. :param directory: Base directory that each path in `paths` is relative to. :param paths: A list of "path lists." Each path list is a list of path elements drilling down into the filesystem from `directory`. :return: A list of paths, each of which drills one level deeper down into the filesystem hierarchy than the originals in `paths`. """ return list(chain.from_iterable( extend_path(directory, path) for path in paths)) def extract_metadata(metadata, params): """Examine the maas.meta file for any required metadata. :param metadata: contents of the maas.meta file :param params: A dict of path components for the image (architecture, subarchitecture, release and label). :return: a dict of name/value metadata pairs. Currently, only "subarches" is extracted. """ mapping = BootImageMapping.load_json(metadata) image = ImageSpec( os=params["osystem"], arch=params["architecture"], subarch=params["subarchitecture"], release=params["release"], label=params["label"], ) try: # On upgrade from 1.5 to 1.6, the subarches does not exist in the # maas.meta file . Without this catch boot images will fail to # report until the boot images are imported again. subarches = mapping.mapping[image]['subarches'] except KeyError: return {} return dict(supported_subarches=subarches) def extract_image_params(path, maas_meta): """Represent a list of TFTP path elements as a list of boot-image dicts. :param path: Tuple or list that consists of a full [osystem, architecture, subarchitecture, release] that identify a kind of boot for which we may need an image. :param maas_meta: Contents of the maas.meta file. This may be an empty string. :return: A list of dicts, each of which may also include additional items of meta-data that are not elements in the path, such as "subarches". """ osystem, arch, subarch, release, label = path osystem_obj = OperatingSystemRegistry.get_item(osystem, default=None) if osystem_obj is None: return [] purposes = osystem_obj.get_boot_image_purposes( arch, subarch, release, label) # Expand the path into a list of dicts, one for each boot purpose. params = [] for purpose in purposes: image = dict( osystem=osystem, architecture=arch, subarchitecture=subarch, release=release, label=label, purpose=purpose) if purpose == BOOT_IMAGE_PURPOSE.XINSTALL: xinstall_path, xinstall_type = osystem_obj.get_xinstall_parameters( arch, subarch, release, label) image['xinstall_path'] = xinstall_path image['xinstall_type'] = xinstall_type else: image['xinstall_path'] = '' image['xinstall_type'] = '' params.append(image) # Merge in the meta-data. for image_dict in params: metadata = extract_metadata(maas_meta, image_dict) image_dict.update(metadata) return params def maas_meta_file_path(tftproot): """Return a string containing the full path to maas.meta.""" return os.path.join(tftproot, 'maas.meta') def maas_meta_last_modified(tftproot): """Return time of last modification of maas.meta. The time is the same as returned from getmtime() (seconds since epoch), or None if the file doesn't exist. :param tftproot: The TFTP root path. """ meta_file = maas_meta_file_path(tftproot) try: return os.path.getmtime(meta_file) except OSError as e: if e.errno == errno.ENOENT: return None raise def list_boot_images(tftproot): """List the available boot images. :param tftproot: TFTP root directory. :return: A list of dicts, describing boot images as consumed by the `report_boot_images` API call. """ # The sub-directories directly under tftproot, if they contain # images, represent operating systems. try: potential_osystems = list_subdirs(tftproot) except OSError as exception: if exception.errno == errno.ENOENT: # Directory does not exist, so return empty list. maaslog.warning( "No boot images have been imported from the region.") return [] # Other error. Propagate. raise # Starting point for iteration: paths that contain only the # top-level subdirectory of tftproot, i.e. the architecture name. paths = [[subdir] for subdir in potential_osystems] # Extend paths deeper into the filesystem, through the levels that # represent architecture, sub-architecture, release, and label. # Any directory that doesn't extend this deep isn't a boot image. for level in ['arch', 'subarch', 'release', 'label']: paths = drill_down(tftproot, paths) # Get hold of image meta-data stored in the maas.meta file. meta_file_path = maas_meta_file_path(tftproot) try: with open(meta_file_path, "rb") as f: metadata = f.read() except IOError as e: if e.errno != errno.ENOENT: # Unexpected error, propagate. raise # No meta file (yet), it means no import has run so just skip # it. metadata = "" # Each path we find this way should be a boot image. # This gets serialised to JSON, so we really have to return a list, not # just any iterable. return list(chain.from_iterable( extract_image_params(path, metadata) for path in paths)) maas-1.9.5+bzr4599.orig/src/provisioningserver/boot/uefi.py0000644000000000000000000001323513056115004021670 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """UEFI Boot Method""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'UEFIBootMethod', ] from itertools import repeat import os.path import re from textwrap import dedent from provisioningserver.boot import ( BootMethod, BootMethodInstallError, BytesReader, get_main_archive_url, get_parameters, utils, ) from provisioningserver.boot.install_bootloader import ( install_bootloader, make_destination, ) from provisioningserver.utils.fs import tempdir from provisioningserver.utils.shell import call_and_check CONFIG_FILE = dedent(""" # MAAS GRUB2 pre-loader configuration file # Load based on MAC address first. configfile (pxe)/grub/grub.cfg-${net_default_mac} # Failed to load based on MAC address. # Load amd64 by default, UEFI only supported by 64-bit configfile (pxe)/grub/grub.cfg-default-amd64 """) # GRUB EFINET represents a MAC address in IEEE 802 colon-seperated # format. Required for UEFI as GRUB2 only presents the MAC address # in colon-seperated format. re_mac_address_octet = r'[0-9a-f]{2}' re_mac_address = re.compile( ':'.join(repeat(re_mac_address_octet, 6))) # Match the grub/grub.cfg-* request for UEFI (aka. GRUB2) re_config_file = r''' # Optional leading slash(es). ^/* grub/grub[.]cfg # UEFI (aka. GRUB2) expects this. - (?: # either a MAC (?P{re_mac_address.pattern}) # Capture UEFI MAC. | # or "default" default (?: # perhaps with specified arch, with a separator of '-' [-](?P\w+) # arch (?:-(?P\w+))? # optional subarch )? ) $ ''' re_config_file = re_config_file.format( re_mac_address=re_mac_address) re_config_file = re.compile(re_config_file, re.VERBOSE) class UEFIBootMethod(BootMethod): name = "uefi" bios_boot_method = "uefi" template_subdir = "uefi" bootloader_arches = ['amd64'] bootloader_path = "bootx64.efi" arch_octet = "00:07" # AMD64 EFI def match_path(self, backend, path): """Checks path for the configuration file that needs to be generated. :param backend: requesting backend :param path: requested path :return: dict of match params from path, None if no match """ match = re_config_file.match(path) if match is None: return None params = get_parameters(match) # MAC address is in the wrong format, fix it mac = params.get("mac") if mac is not None: params["mac"] = mac.replace(':', '-') return params def get_reader(self, backend, kernel_params, **extra): """Render a configuration file as a unicode string. :param backend: requesting backend :param kernel_params: An instance of `KernelParameters`. :param extra: Allow for other arguments. This is a safety valve; parameters generated in another component (for example, see `TFTPBackend.get_boot_method_reader`) won't cause this to break. """ template = self.get_template( kernel_params.purpose, kernel_params.arch, kernel_params.subarch) namespace = self.compose_template_namespace(kernel_params) return BytesReader(template.substitute(namespace).encode("utf-8")) def install_bootloader(self, destination): """Installs the required files for UEFI booting into the tftproot. """ archive_url = get_main_archive_url() with tempdir() as tmp: # Download the shim-signed package data, filename = utils.get_updates_package( 'shim-signed', archive_url, 'main', 'amd64') if data is None: raise BootMethodInstallError( 'Failed to download shim-signed package from ' 'the archive.') shim_output = os.path.join(tmp, filename) with open(shim_output, 'wb') as stream: stream.write(data) # Extract the package with dpkg, and install the shim call_and_check(["dpkg", "-x", shim_output, tmp]) install_bootloader( os.path.join(tmp, 'usr', 'lib', 'shim', 'shim.efi.signed'), os.path.join(destination, self.bootloader_path)) # Download the grub-efi-amd64-signed package. data, filename = utils.get_updates_package( 'grub-efi-amd64-signed', archive_url, 'main', 'amd64') if data is None: raise BootMethodInstallError( 'Failed to download grub-efi-amd64-signed package from ' 'the archive.') grub_output = os.path.join(tmp, filename) with open(grub_output, 'wb') as stream: stream.write(data) # Extract the package with dpkg. call_and_check(["dpkg", "-x", grub_output, tmp]) # Install the grub boot loader. grub_signed = os.path.join( tmp, 'usr', 'lib', 'grub', 'x86_64-efi-signed', 'grubnetx64.efi.signed') install_bootloader( grub_signed, os.path.join(destination, 'grubx64.efi')) config_path = os.path.join(destination, 'grub') config_dst = os.path.join(config_path, 'grub.cfg') make_destination(config_path) with open(config_dst, 'wb') as stream: stream.write(CONFIG_FILE.encode("utf-8")) maas-1.9.5+bzr4599.orig/src/provisioningserver/boot/uefi_arm64.py0000644000000000000000000001007513056115004022700 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """UEFI ARM64 Boot Method""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'UEFIARM64BootMethod', ] import glob import os.path from textwrap import dedent from urlparse import urlparse from provisioningserver.boot import ( BootMethodInstallError, get_ports_archive_url, utils, ) from provisioningserver.boot.install_bootloader import install_bootloader from provisioningserver.boot.uefi import UEFIBootMethod from provisioningserver.utils.fs import tempdir from provisioningserver.utils.shell import call_and_check CONFIG_FILE_ARM64 = dedent(""" # MAAS GRUB2 pre-loader configuration file # Load based on MAC address first. configfile (pxe)/grub/grub.cfg-${net_default_mac} # Failed to load based on MAC address. # Load arm64 by default, UEFI only supported by 64-bit configfile (pxe)/grub/grub.cfg-default-arm64 """) class UEFIARM64BootMethod(UEFIBootMethod): name = "uefi_arm64" bios_boot_method = "uefi" template_subdir = "uefi" bootloader_arches = ['arm64'] bootloader_path = "grubaa64.efi" arch_octet = "00:0B" # ARM64 EFI def match_path(self, backend, path): """Doesn't need to do anything, as the UEFIBootMethod provides the grub implementation needed. """ return None def get_reader(self, backend, kernel_params, **extra): """Doesn't need to do anything, as the UEFIBootMethod provides the grub implementation needed. """ return None def install_bootloader(self, destination): """Installs the required files for UEFI ARM64 booting into the tftproot. """ ports_archive_url = get_ports_archive_url() archive_url = ports_archive_url.strip(urlparse(ports_archive_url).path) with tempdir() as tmp: # Download the grub-efi-arm64-bin package data, filename = utils.get_updates_package( 'grub-efi-arm64-bin', archive_url, 'main', 'arm64') if data is None: raise BootMethodInstallError( 'Failed to download grub-efi-arm64-bin package from ' 'the archive.') grub_output = os.path.join(tmp, filename) with open(grub_output, 'wb') as stream: stream.write(data) # Extract the package with dpkg call_and_check(["dpkg", "-x", grub_output, tmp]) # Output the embedded config, so grub-mkimage can use it config_output = os.path.join(tmp, 'grub.cfg') with open(config_output, 'wb') as stream: stream.write(CONFIG_FILE_ARM64.encode('utf-8')) # Get list of grub modules module_dir = os.path.join( tmp, 'usr', 'lib', 'grub', 'arm64-efi') modules = [] for module_path in glob.glob(os.path.join(module_dir, '*.mod')): module_filename = os.path.basename(module_path) module_name, _ = os.path.splitext(module_filename) # XXX newell 2015-04-28 bug=1459871,1459872: The module # skipping logic below can be removed once the listed bugs have # been fixed and released. See listed bugs for details. if module_name in ('setjmp', 'setjmp_test', 'progress'): continue modules.append(module_name) # Generate the grub bootloader mkimage_output = os.path.join(tmp, self.bootloader_path) args = [ 'grub-mkimage', '-o', mkimage_output, '-O', 'arm64-efi', '-d', module_dir, '-c', config_output, ] call_and_check(args + modules) install_bootloader( mkimage_output, os.path.join(destination, self.bootloader_path)) maas-1.9.5+bzr4599.orig/src/provisioningserver/boot/utils.py0000644000000000000000000001143013056115004022073 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Utilities that BootMethod's can use.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'get_distro_release', 'get_package', 'get_updates_package', ] import gzip import hashlib import os from platform import linux_distribution import re import StringIO import urllib2 from provisioningserver.utils.fs import tempdir from provisioningserver.utils.shell import call_and_check def urljoin(*args): return '/'.join(s.strip('/') for s in args) def get_distro_release(): """Returns the release name for the current distribution.""" distname, version, codename = linux_distribution() return codename def get_file(url): """Downloads the file from the URL. :param url: URL to download file :return: File data, or None """ # Build a new opener so that the environment is checked for proxy # URLs. Using urllib2.urlopen() means that we'd only be using the # proxies as defined when urlopen() was called the first time. response = urllib2.build_opener().open(url) return response.read() def get_md5sum(data): """Returns the md5sum for the provided data.""" md5 = hashlib.md5() md5.update(data) return md5.hexdigest() def gpg_verify_data(signature, data_file): """Verify's data using the signature.""" with tempdir() as tmp: sig_out = os.path.join(tmp, 'verify.gpg') with open(sig_out, 'wb') as stream: stream.write(signature) data_out = os.path.join(tmp, 'verify') with open(data_out, 'wb') as stream: stream.write(data_file) call_and_check([ "gpgv", "--keyring", "/etc/apt/trusted.gpg", sig_out, data_out ]) def decompress_packages(packages): compressed = StringIO.StringIO(packages) decompressed = gzip.GzipFile(fileobj=compressed) return unicode(decompressed.read(), errors='ignore') def get_packages(archive, component, architecture, release=None): """Gets the packages list from the archive.""" release = get_distro_release() if release is None else release url = urljoin(archive, 'dists', release) release_url = urljoin(url, 'Release') release_file = get_file(release_url) release_file_gpg = get_file('%s.gpg' % release_url) gpg_verify_data(release_file_gpg, release_file) # Download the packages and verify that md5sum matches path = '%s/binary-%s/Packages.gz' % (component, architecture) packages_url = urljoin(url, path) packages = get_file(packages_url) md5sum = re.search( r"^\s*?([a-zA-Z0-9]{32})\s+?[0-9]+\s+%s$" % path, release_file, re.MULTILINE).group(1) if get_md5sum(packages) != md5sum: raise ValueError("%s failed checksum." % packages_url) return decompress_packages(packages) def get_package_info(package, archive, component, architecture, release=None): """Gets the package information.""" release = get_distro_release() if release is None else release packages = get_packages(archive, component, architecture, release=release) info = re.search( r"^(Package: %s.*?)\n\n" % package, packages, re.MULTILINE | re.DOTALL) if info is None: return None info = info.group(1) data = {} for line in info.splitlines(): key, value = line.split(':', 1) data[key] = value.strip() return data def get_package(package, archive, component, architecture, release=None): """Downloads the package from the archive.""" release = get_distro_release() if release is None else release package = get_package_info( package, archive, component, architecture, release=release) if package is None: return None, None # Download the package and check checksum path = package['Filename'] filename = os.path.basename(path) url = urljoin(archive, path) deb = get_file(url) md5 = get_md5sum(deb) if md5 != package['MD5sum']: raise ValueError("%s failed checksum." % filename) return deb, filename def get_updates_package(package, archive, component, architecture, release=None): """Downloads the package from the {release}-updates if it exists, if not fails back to {release} archive. """ release = get_distro_release() if release is None else release releases = ['%s-updates' % release, release] for release in releases: deb, filename = get_package( package, archive, component, architecture, release=release) if deb is not None: return deb, filename return None, None maas-1.9.5+bzr4599.orig/src/provisioningserver/boot/windows.py0000644000000000000000000002607313056115004022436 0ustar 00000000000000# Copyright 2014-2015 Cloudbase Solutions SRL. # Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Windows PXE Boot Method""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'WindowsPXEBootMethod', ] import os.path import re import shutil import sys from provisioningserver.boot import ( BootMethod, BootMethodError, BytesReader, get_remote_mac, ) from provisioningserver.config import ClusterConfiguration from provisioningserver.logger.log import get_maas_logger from provisioningserver.rpc import getRegionClient from provisioningserver.rpc.exceptions import NoSuchNode from provisioningserver.rpc.region import RequestNodeInfoByMACAddress from provisioningserver.utils import tftp from provisioningserver.utils.fs import tempdir from provisioningserver.utils.twisted import ( asynchronous, deferred, ) from tftp.backend import FilesystemReader from twisted.internet.defer import ( inlineCallbacks, returnValue, succeed, ) from twisted.python.filepath import FilePath maaslog = get_maas_logger("windows") # These files do not exist in the tftproot. WindowsPXEBootMethod # handles access to these files returning the correct version # of the file for the booting version of Windows. # # Note: Each version of Windows can have different content for # these files. STATIC_FILES = [ 'pxeboot.0', 'bootmgr.exe', '\\boot\\bcd', '\\boot\\winpe.wim', '\\boot\\boot.sdi', '\\boot\\font\\wgl4_boot.ttf', ] def get_hivex_module(): """Returns the hivex module if avaliable. python-hivex is an optional dependency, but it is needed before MAAS can boot Windows. """ if 'hivex' not in sys.modules: try: __import__('hivex') except ImportError: return None return sys.modules['hivex'] def load_hivex(*args, **kwargs): """Returns the Hivex object.""" module = get_hivex_module() if module is None: return None return module.Hivex(*args, **kwargs) @asynchronous def request_node_info_by_mac_address(mac_address): """Request node info for the given mac address. :param mac_address: The MAC Address of the node of the event. :type mac_address: unicode """ if mac_address is None: maaslog.debug("Cannot determine node; MAC address is unknown.") return succeed(None) client = getRegionClient() d = client(RequestNodeInfoByMACAddress, mac_address=mac_address) def eb_request_node_info(failure): failure.trap(NoSuchNode) maaslog.debug("Node doesn't exist for MAC address: %s", mac_address) return None return d.addErrback(eb_request_node_info) class Bcd: """Allows modification of the load options in a Windows boot configuration data file. References: http://msdn.microsoft.com/en-us/library/windows/desktop/ - aa362652(v=vs.85).aspx - aa362641(v=vs.85).aspx """ GUID_WINDOWS_BOOTMGR = '{9dea862c-5cdd-4e70-acc1-f32b344d4795}' BOOT_MGR_DISPLAY_ORDER = '24000001' LOAD_OPTIONS = '12000030' def __init__(self, filename): self.hive = load_hivex(filename, write=True) # uids objects = self._get_root_objects() self.uids = {} for i in self.hive.node_children(objects): self.uids[self.hive.node_name(i)] = self.hive.node_children(i) # default bootloader mgr = self.uids[self.GUID_WINDOWS_BOOTMGR][1] bootmgr_elems = dict([(self.hive.node_name(i), i) for i in self.hive.node_children(mgr)]) self.loader = self._get_loader(bootmgr_elems) def _get_root_elements(self): """Gets the root from the hive.""" root = self.hive.root() r_elems = {} for i in self.hive.node_children(root): name = self.hive.node_name(i) r_elems[name] = i return r_elems def _get_root_objects(self): """Gets the root objects.""" elems = self._get_root_elements() return elems['Objects'] def _get_loader(self, bootmgr_elems): """Get default bootloader.""" (val,) = self.hive.node_values( bootmgr_elems[self.BOOT_MGR_DISPLAY_ORDER]) loader = self.hive.value_multiple_strings(val)[0] return loader def _get_loader_elems(self): """Get elements present in default boot loader. We need this in order to determine the loadoptions key. """ return dict( [(self.hive.node_name(i), i) for i in self.hive.node_children(self.uids[self.loader][1])]) def _get_load_options_key(self): """Gets the key containing the load options we want to edit.""" load_elem = self._get_loader_elems() load_option_key = load_elem.get(self.LOAD_OPTIONS, None) return load_option_key def set_load_options(self, value): """Sets the loadoptions value to param:value.""" h = self._get_load_options_key() if h is None: # No load options key in the hive, add the key # so the value can be set. h = self.hive.node_add_child( self.uids[self.loader][1], self.LOAD_OPTIONS) k_type = 1 key = "Element" data = { 't': k_type, 'key': key, # Windows only accepts utf-16le in load options. 'value': value.decode('utf-8').encode('utf-16le'), } self.hive.node_set_value(h, data) self.hive.commit(None) class WindowsPXEBootMethod(BootMethod): name = "windows" bios_boot_method = "pxe" template_subdir = "windows" bootloader_path = "pxeboot.0" arch_octet = None @deferred def get_node_info(self): """Gets node information via the remote mac.""" remote_mac = get_remote_mac() return request_node_info_by_mac_address(remote_mac) def clean_path(self, path): """Converts Windows path into a unix path and strips the boot subdirectory from the paths. """ path = path.lower().replace('\\', '/') if path[0:6] == "/boot/": path = path[6:] return path @inlineCallbacks def match_path(self, backend, path): """Checks path to see if the boot method should handle the requested file. :param backend: requesting backend :param path: requested path :return: dict of match params from path, None if no match """ # If the node is requesting the initial bootloader, then we # need to see if this node is set to boot Windows first. local_host, local_port = tftp.get_local_address() if path == 'pxelinux.0': data = yield self.get_node_info() if data is None: returnValue(None) # Only provide the Windows bootloader when installing # PXELINUX chainloading will work for the rest of the time. purpose = data.get('purpose') if purpose != 'install': returnValue(None) osystem = data.get('osystem') if osystem == 'windows': # python-hivex is needed to continue. if get_hivex_module() is None: raise BootMethodError('python-hivex package is missing.') returnValue({ 'mac': data.get('mac'), 'path': self.bootloader_path, 'local_host': local_host, }) # Fix the paths for the other static files, Windows requests. elif path.lower() in STATIC_FILES: returnValue({ 'mac': get_remote_mac(), 'path': self.clean_path(path), 'local_host': local_host, }) returnValue(None) def get_reader(self, backend, kernel_params, **extra): """Render a configuration file as a unicode string. :param backend: requesting backend :param kernel_params: An instance of `KernelParameters`. :param extra: Allow for other arguments. This is a safety valve; parameters generated in another component (for example, see `TFTPBackend.get_boot_method_reader`) won't cause this to break. """ path = extra['path'] if path == 'bcd': local_host = extra['local_host'] return self.compose_bcd(kernel_params, local_host) return self.output_static(kernel_params, path) def install_bootloader(self, destination): """Installs the required files for Windows booting into the tftproot. Does nothing. Windows requires manual installation of bootloader files, due to licensing. """ def compose_preseed_url(self, url): """Modifies the url to replace all forward slashes with backslashes, and prepends the ^ character to any upper-case characters. Boot load options of Windows will all be upper-cased as Windows does not care about case, and what gets exposed in the registry is all uppercase. MAAS requires a case-sensitive url. The Windows install script extracts the preseed url and any character that starts with ^ is then uppercased, so that the URL is correct. """ url = url.replace('/', '\\') return re.sub(r"([A-Z])", r"^\1", url) def get_resource_path(self, kernel_params, path): """Gets the resource path from the kernel param.""" with ClusterConfiguration.open() as config: resources = config.tftp_root return os.path.join( resources, 'windows', kernel_params.arch, kernel_params.subarch, kernel_params.release, kernel_params.label, path) def compose_bcd(self, kernel_params, local_host): """Composes the Windows boot configuration data. :param kernel_params: An instance of `KernelParameters`. :return: Binary data """ preseed_url = self.compose_preseed_url(kernel_params.preseed_url) release_path = "%s\\source" % kernel_params.release remote_path = "\\\\%s\\reminst" % local_host loadoptions = "%s;%s;%s" % \ (remote_path, release_path, preseed_url) # Generate the bcd file. bcd_template = self.get_resource_path(kernel_params, "bcd") if not os.path.isfile(bcd_template): raise BootMethodError( "Failed to find bcd template: %s" % bcd_template) with tempdir() as tmp: bcd_tmp = os.path.join(tmp, "bcd") shutil.copyfile(bcd_template, bcd_tmp) bcd = Bcd(bcd_tmp) bcd.set_load_options(loadoptions) with open(bcd_tmp, 'rb') as stream: return BytesReader(stream.read()) def output_static(self, kernel_params, path): """Outputs the static file based on the version of Windows.""" actual_path = self.get_resource_path(kernel_params, path) return FilesystemReader(FilePath(actual_path)) maas-1.9.5+bzr4599.orig/src/provisioningserver/boot/tests/__init__.py0000644000000000000000000000000013056115004023623 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/boot/tests/test_boot.py0000644000000000000000000001601413056115004024102 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.boot`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import errno import os from urlparse import urlparse from fixtures import EnvironmentVariableFixture from maastesting.factory import factory from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import ( MAASTestCase, MAASTwistedRunTest, ) import mock from provisioningserver import boot from provisioningserver.boot import ( BootMethod, BytesReader, gen_template_filenames, get_main_archive_url, get_ports_archive_url, get_remote_mac, ) from provisioningserver.rpc import region from provisioningserver.rpc.testing import MockLiveClusterToRegionRPCFixture import tempita from twisted.internet.defer import ( inlineCallbacks, succeed, ) from twisted.python import context class FakeBootMethod(BootMethod): name = "fake" bios_boot_method = "fake" template_subdir = "fake" bootloader_path = "fake.efi" arch_octet = "00:00" def match_path(self, backend, path): return {} def get_reader(backend, kernel_params, **extra): return BytesReader("") def install_bootloader(): pass class TestBootMethod(MAASTestCase): """Test for `BootMethod` in `provisioningserver.boot`.""" run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) @inlineCallbacks def test_get_remote_mac(self): remote_host = factory.make_ipv4_address() call_context = { "local": ( factory.make_ipv4_address(), factory.pick_port()), "remote": ( remote_host, factory.pick_port()), } mock_find = self.patch(boot, 'find_mac_via_arp') yield context.call(call_context, get_remote_mac) self.assertThat(mock_find, MockCalledOnceWith(remote_host)) def test_gen_template_filenames(self): purpose = factory.make_name("purpose") arch, subarch = factory.make_names("arch", "subarch") expected = [ "config.%s.%s.%s.template" % (purpose, arch, subarch), "config.%s.%s.template" % (purpose, arch), "config.%s.template" % (purpose, ), "config.template", ] observed = gen_template_filenames(purpose, arch, subarch) self.assertSequenceEqual(expected, list(observed)) def test_get_pxe_template(self): method = FakeBootMethod() purpose = factory.make_name("purpose") arch, subarch = factory.make_names("arch", "subarch") filename = factory.make_name("filename") # Set up the mocks that we've patched in. gen_filenames = self.patch(boot, "gen_template_filenames") gen_filenames.return_value = [filename] from_filename = self.patch(tempita.Template, "from_filename") from_filename.return_value = mock.sentinel.template # The template returned matches the return value above. template = method.get_template(purpose, arch, subarch) self.assertEqual(mock.sentinel.template, template) # gen_pxe_template_filenames is called to obtain filenames. gen_filenames.assert_called_once_with(purpose, arch, subarch) # Tempita.from_filename is called with an absolute path derived from # the filename returned from gen_pxe_template_filenames. from_filename.assert_called_once_with( os.path.join(method.get_template_dir(), filename), encoding="UTF-8") def make_fake_templates_dir(self, method): """Set up a fake templates dir, and return its path.""" fake_root = self.make_dir() fake_etc_maas = os.path.join(fake_root, "etc", "maas") self.useFixture(EnvironmentVariableFixture('MAAS_ROOT', fake_root)) fake_templates = os.path.join( fake_etc_maas, 'templates/%s' % method.template_subdir) os.makedirs(fake_templates) return fake_templates def test_get_template_gets_default_if_available(self): # If there is no template matching the purpose, arch, and subarch, # but there is a completely generic template, then get_pxe_template() # falls back to that as the default. method = FakeBootMethod() templates_dir = self.make_fake_templates_dir(method) generic_template = factory.make_file(templates_dir, 'config.template') purpose = factory.make_name("purpose") arch, subarch = factory.make_names("arch", "subarch") self.assertEqual( generic_template, method.get_template(purpose, arch, subarch).name) def test_get_template_not_found(self): # It is a critical and unrecoverable error if the default template # is not found. method = FakeBootMethod() self.make_fake_templates_dir(method) self.assertRaises( AssertionError, method.get_template, *factory.make_names("purpose", "arch", "subarch")) def test_get_templates_only_suppresses_ENOENT(self): # The IOError arising from trying to load a template that doesn't # exist is suppressed, but other errors are not. method = FakeBootMethod() from_filename = self.patch(tempita.Template, "from_filename") from_filename.side_effect = IOError() from_filename.side_effect.errno = errno.EACCES self.assertRaises( IOError, method.get_template, *factory.make_names("purpose", "arch", "subarch")) class TestGetArchiveUrl(MAASTestCase): run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def patch_rpc_methods(self, return_value=None): fixture = self.useFixture(MockLiveClusterToRegionRPCFixture()) protocol, connecting = fixture.makeEventLoop(region.GetArchiveMirrors) protocol.GetArchiveMirrors.return_value = return_value return protocol, connecting @inlineCallbacks def test_get_main_archive_url(self): mirrors = { 'main': urlparse(factory.make_url('ports')), 'ports': urlparse(factory.make_url('ports')), } return_value = succeed(mirrors) protocol, connecting = self.patch_rpc_methods(return_value) self.addCleanup((yield connecting)) value = yield get_main_archive_url() expected_url = mirrors['main'].geturl() self.assertEqual(expected_url, value) @inlineCallbacks def test_get_ports_archive_url(self): mirrors = { 'main': urlparse(factory.make_url('ports')), 'ports': urlparse(factory.make_url('ports')), } return_value = succeed(mirrors) protocol, connecting = self.patch_rpc_methods(return_value) self.addCleanup((yield connecting)) value = yield get_ports_archive_url() expected_url = mirrors['ports'].geturl() self.assertEqual(expected_url, value) maas-1.9.5+bzr4599.orig/src/provisioningserver/boot/tests/test_install_bootloader.py0000644000000000000000000000566213056115004027026 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the install_pxe_bootloader command.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import os.path from maastesting.factory import factory from maastesting.testcase import MAASTestCase from maastesting.utils import ( age_file, get_write_time, ) from provisioningserver.boot.install_bootloader import ( install_bootloader, make_destination, ) from testtools.matchers import ( DirExists, FileContains, ) class TestInstallBootloader(MAASTestCase): def test_integration(self): loader_contents = factory.make_string() loader = self.make_file(contents=loader_contents) destination = self.make_file() install_bootloader(loader, destination) self.assertThat(destination, FileContains(loader_contents)) def test_make_destination_creates_directory_if_not_present(self): tftproot = self.make_dir() dest = make_destination(tftproot) self.assertThat(dest, DirExists()) def test_make_destination_returns_existing_directory(self): tftproot = self.make_dir() make_destination(tftproot) dest = make_destination(tftproot) self.assertThat(dest, DirExists()) def test_install_bootloader_installs_new_bootloader(self): contents = factory.make_string() loader = self.make_file(contents=contents) install_dir = self.make_dir() dest = os.path.join(install_dir, factory.make_name('loader')) install_bootloader(loader, dest) self.assertThat(dest, FileContains(contents)) def test_install_bootloader_replaces_bootloader_if_changed(self): contents = factory.make_string() loader = self.make_file(contents=contents) dest = self.make_file(contents="Old contents") install_bootloader(loader, dest) self.assertThat(dest, FileContains(contents)) def test_install_bootloader_skips_if_unchanged(self): contents = factory.make_string() dest = self.make_file(contents=contents) age_file(dest, 100) original_write_time = get_write_time(dest) loader = self.make_file(contents=contents) install_bootloader(loader, dest) self.assertThat(dest, FileContains(contents)) self.assertEqual(original_write_time, get_write_time(dest)) def test_install_bootloader_sweeps_aside_dot_new_if_any(self): contents = factory.make_string() loader = self.make_file(contents=contents) dest = self.make_file(contents="Old contents") temp_file = '%s.new' % dest factory.make_file( os.path.dirname(temp_file), name=os.path.basename(temp_file)) install_bootloader(loader, dest) self.assertThat(dest, FileContains(contents)) maas-1.9.5+bzr4599.orig/src/provisioningserver/boot/tests/test_install_grub.py0000644000000000000000000000236513056115004025630 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the install_grub command.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import os.path from maastesting.factory import factory from maastesting.testcase import MAASTestCase import provisioningserver.boot.install_grub from provisioningserver.boot.tftppath import locate_tftp_path from provisioningserver.testing.config import ClusterConfigurationFixture from provisioningserver.utils.script import MainScript from testtools.matchers import FileExists class TestInstallGrub(MAASTestCase): def test_integration(self): tftproot = self.make_dir() self.useFixture(ClusterConfigurationFixture(tftp_root=tftproot)) action = factory.make_name("action") script = MainScript(action) script.register(action, provisioningserver.boot.install_grub) script.execute((action,)) config_filename = os.path.join('grub', 'grub.cfg') self.assertThat( locate_tftp_path( config_filename, tftproot=tftproot), FileExists()) maas-1.9.5+bzr4599.orig/src/provisioningserver/boot/tests/test_powerkvm.py0000644000000000000000000000627413056115004025020 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.boot.powerkvm`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from contextlib import contextmanager import os from maastesting.factory import factory from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import MAASTestCase from provisioningserver.boot import ( BootMethodInstallError, powerkvm as powerkvm_module, utils, ) from provisioningserver.boot.powerkvm import ( GRUB_CONFIG, PowerKVMBootMethod, ) from provisioningserver.tests.test_kernel_opts import make_kernel_parameters class TestPowerKVMBootMethod(MAASTestCase): """Tests `provisioningserver.boot.powerkvm.PowerKVMBootMethod`.""" def test_match_path_returns_None(self): method = PowerKVMBootMethod() paths = [factory.make_string() for _ in range(3)] for path in paths: self.assertEqual(None, method.match_path(None, path)) def test_get_reader_returns_None(self): method = PowerKVMBootMethod() params = [make_kernel_parameters() for _ in range(3)] for param in params: self.assertEqual(None, method.get_reader(None, params)) def test_install_bootloader_get_package_raises_error(self): method = PowerKVMBootMethod() self.patch(powerkvm_module, 'get_ports_archive_url') self.patch(utils, 'get_updates_package').return_value = (None, None) self.assertRaises( BootMethodInstallError, method.install_bootloader, None) def test_install_bootloader(self): method = PowerKVMBootMethod() filename = factory.make_name('dpkg') data = factory.make_string() tmp = self.make_dir() dest = self.make_dir() @contextmanager def tempdir(): try: yield tmp finally: pass mock_get_ports_archive_url = self.patch( powerkvm_module, 'get_ports_archive_url') mock_get_ports_archive_url.return_value = 'http://ports.ubuntu.com' mock_get_updates_package = self.patch(utils, 'get_updates_package') mock_get_updates_package.return_value = (data, filename) self.patch(powerkvm_module, 'call_and_check') self.patch(powerkvm_module, 'tempdir').side_effect = tempdir mock_install_bootloader = self.patch( powerkvm_module, 'install_bootloader') method.install_bootloader(dest) with open(os.path.join(tmp, filename), 'rb') as stream: saved_data = stream.read() self.assertEqual(data, saved_data) with open(os.path.join(tmp, 'grub.cfg'), 'rb') as stream: saved_config = stream.read().decode('utf-8') self.assertEqual(GRUB_CONFIG, saved_config) mkimage_expected = os.path.join(tmp, method.bootloader_path) dest_expected = os.path.join(dest, method.bootloader_path) self.assertThat( mock_install_bootloader, MockCalledOnceWith(mkimage_expected, dest_expected)) maas-1.9.5+bzr4599.orig/src/provisioningserver/boot/tests/test_powernv.py0000644000000000000000000003301713056115004024641 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.boot.powernv`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import os import re from maastesting.factory import factory from maastesting.testcase import MAASTestCase from mock import sentinel from provisioningserver.boot import ( BytesReader, powernv as powernv_module, ) from provisioningserver.boot.powernv import ( ARP_HTYPE, format_bootif, PowerNVBootMethod, re_config_file, ) from provisioningserver.boot.tests.test_pxe import parse_pxe_config from provisioningserver.boot.tftppath import compose_image_path from provisioningserver.pserv_services.tftp import TFTPBackend from provisioningserver.testing.config import ClusterConfigurationFixture from provisioningserver.tests.test_kernel_opts import make_kernel_parameters from testtools.matchers import ( IsInstance, MatchesAll, MatchesRegex, Not, StartsWith, ) def compose_config_path(mac): """Compose the TFTP path for a PowerNV PXE configuration file. The path returned is relative to the TFTP root, as it would be identified by clients on the network. :param mac: A MAC address, in IEEE 802 hyphen-separated form, corresponding to the machine for which this configuration is relevant. This relates to PXELINUX's lookup protocol. :return: Path for the corresponding PXE config file as exposed over TFTP. """ # Not using os.path.join: this is a TFTP path, not a native path. Yes, in # practice for us they're the same. We always assume that the ARP HTYPE # (hardware type) that PXELINUX sends is Ethernet. return "ppc64el/pxelinux.cfg/{htype:02x}-{mac}".format( htype=ARP_HTYPE.ETHERNET, mac=mac) def get_example_path_and_components(): """Return a plausible path and its components. The path is intended to match `re_config_file`, and the components are the expected groups from a match. """ components = {"mac": factory.make_mac_address("-")} config_path = compose_config_path(components["mac"]) return config_path, components class TestPowerNVBootMethod(MAASTestCase): def make_tftp_root(self): """Set, and return, a temporary TFTP root directory.""" tftproot = self.make_dir() self.useFixture(ClusterConfigurationFixture(tftp_root=tftproot)) return tftproot def test_compose_config_path_follows_maas_pxe_directory_layout(self): name = factory.make_name('config') self.assertEqual( 'ppc64el/pxelinux.cfg/%02x-%s' % (ARP_HTYPE.ETHERNET, name), compose_config_path(name)) def test_compose_config_path_does_not_include_tftp_root(self): tftproot = self.make_tftp_root() name = factory.make_name('config') self.assertThat( compose_config_path(name), Not(StartsWith(tftproot))) def test_bootloader_path(self): method = PowerNVBootMethod() self.assertEqual('pxelinux.0', method.bootloader_path) def test_bootloader_path_does_not_include_tftp_root(self): tftproot = self.make_tftp_root() method = PowerNVBootMethod() self.assertThat( method.bootloader_path, Not(StartsWith(tftproot))) def test_name(self): method = PowerNVBootMethod() self.assertEqual('powernv', method.name) def test_template_subdir(self): method = PowerNVBootMethod() self.assertEqual('pxe', method.template_subdir) def test_arch_octet(self): method = PowerNVBootMethod() self.assertEqual('00:0E', method.arch_octet) def test_path_prefix(self): method = PowerNVBootMethod() self.assertEqual('ppc64el/', method.path_prefix) class TestPowerNVBootMethodMatchPath(MAASTestCase): """Tests for `provisioningserver.boot.powernv.PowerNVBootMethod.match_path`. """ def test_match_path_pxe_config_with_mac(self): method = PowerNVBootMethod() config_path, expected = get_example_path_and_components() params = method.match_path(None, config_path) expected['arch'] = 'ppc64el' self.assertEqual(expected, params) def test_match_path_pxe_config_without_mac(self): method = PowerNVBootMethod() fake_mac = factory.make_mac_address() self.patch(powernv_module, 'get_remote_mac').return_value = fake_mac config_path = 'ppc64el/pxelinux.cfg/default' params = method.match_path(None, config_path) expected = { 'arch': 'ppc64el', 'mac': fake_mac, } self.assertEqual(expected, params) def test_match_path_pxe_prefix_request(self): method = PowerNVBootMethod() fake_mac = factory.make_mac_address() self.patch(powernv_module, 'get_remote_mac').return_value = fake_mac file_path = 'ppc64el/file' params = method.match_path(None, file_path) expected = { 'arch': 'ppc64el', 'mac': fake_mac, 'path': file_path, } self.assertEqual(expected, params) class TestPowerNVBootMethodRenderConfig(MAASTestCase): """Tests for `provisioningserver.boot.powernv.PowerNVBootMethod.get_reader` """ def test_get_reader_install(self): # Given the right configuration options, the PXE configuration is # correctly rendered. method = PowerNVBootMethod() params = make_kernel_parameters(self, purpose="install") output = method.get_reader(backend=None, kernel_params=params) # The output is a BytesReader. self.assertThat(output, IsInstance(BytesReader)) output = output.read(10000) # The template has rendered without error. PXELINUX configurations # typically start with a DEFAULT line. self.assertThat(output, StartsWith("DEFAULT ")) # The PXE parameters are all set according to the options. image_dir = compose_image_path( osystem=params.osystem, arch=params.arch, subarch=params.subarch, release=params.release, label=params.label) self.assertThat( output, MatchesAll( MatchesRegex( r'.*^\s+KERNEL %s/di-kernel$' % re.escape(image_dir), re.MULTILINE | re.DOTALL), MatchesRegex( r'.*^\s+INITRD %s/di-initrd$' % re.escape(image_dir), re.MULTILINE | re.DOTALL), MatchesRegex( r'.*^\s+APPEND .+?$', re.MULTILINE | re.DOTALL))) def test_get_reader_with_extra_arguments_does_not_affect_output(self): # get_reader() allows any keyword arguments as a safety valve. method = PowerNVBootMethod() options = { "backend": None, "kernel_params": make_kernel_parameters(self, purpose="install"), } # Capture the output before sprinking in some random options. output_before = method.get_reader(**options).read(10000) # Sprinkle some magic in. options.update( (factory.make_name("name"), factory.make_name("value")) for _ in range(10)) # Capture the output after sprinking in some random options. output_after = method.get_reader(**options).read(10000) # The generated template is the same. self.assertEqual(output_before, output_after) def test_get_reader_with_local_purpose(self): # If purpose is "local", output should be empty string. method = PowerNVBootMethod() options = { "backend": None, "kernel_params": make_kernel_parameters(purpose="local"), } output = method.get_reader(**options).read(10000) self.assertIn("", output) def test_get_reader_appends_bootif(self): method = PowerNVBootMethod() fake_mac = factory.make_mac_address() params = make_kernel_parameters(self, purpose="install") output = method.get_reader( backend=None, kernel_params=params, arch='ppc64el', mac=fake_mac) output = output.read(10000) config = parse_pxe_config(output) expected = 'BOOTIF=%s' % format_bootif(fake_mac) self.assertIn(expected, config['execute']['APPEND']) def test_format_bootif_replaces_colon(self): fake_mac = factory.make_mac_address() self.assertEqual( '01-%s' % fake_mac.replace(':', '-').lower(), format_bootif(fake_mac)) def test_format_bootif_makes_mac_address_lower(self): fake_mac = factory.make_mac_address() fake_mac = fake_mac.upper() self.assertEqual( '01-%s' % fake_mac.replace(':', '-').lower(), format_bootif(fake_mac)) class TestPowerNVBootMethodPathPrefix(MAASTestCase): """Tests for `provisioningserver.boot.powernv.PowerNVBootMethod.get_reader`. """ def test_get_reader_path_prefix(self): data = factory.make_string().encode("ascii") temp_file = self.make_file(name="example", contents=data) temp_dir = os.path.dirname(temp_file) backend = TFTPBackend( temp_dir, "http://nowhere.example.com/", sentinel.uuid) method = PowerNVBootMethod() options = { 'backend': backend, 'kernel_params': make_kernel_parameters(), 'path': 'ppc64el/example', } reader = method.get_reader(**options) self.addCleanup(reader.finish) self.assertEqual(len(data), reader.size) self.assertEqual(data, reader.read(len(data))) self.assertEqual(b"", reader.read(1)) def test_get_reader_path_prefix_only_removes_first_occurrence(self): data = factory.make_string().encode("ascii") temp_dir = self.make_dir() temp_subdir = os.path.join(temp_dir, 'ppc64el') os.mkdir(temp_subdir) factory.make_file(temp_subdir, "example", data) backend = TFTPBackend( temp_dir, "http://nowhere.example.com/", sentinel.uuid) method = PowerNVBootMethod() options = { 'backend': backend, 'kernel_params': make_kernel_parameters(), 'path': 'ppc64el/ppc64el/example', } reader = method.get_reader(**options) self.addCleanup(reader.finish) self.assertEqual(len(data), reader.size) self.assertEqual(data, reader.read(len(data))) self.assertEqual(b"", reader.read(1)) class TestPowerNVBootMethodRegex(MAASTestCase): """Tests for `provisioningserver.boot.powernv.PowerNVBootMethod.re_config_file`. """ def test_re_config_file_is_compatible_with_config_path_generator(self): # The regular expression for extracting components of the file path is # compatible with the PXE config path generator. for iteration in range(10): config_path, args = get_example_path_and_components() match = re_config_file.match(config_path) self.assertIsNotNone(match, config_path) self.assertEqual(args, match.groupdict()) def test_re_config_file_with_leading_slash(self): # The regular expression for extracting components of the file path # doesn't care if there's a leading forward slash; the TFTP server is # easy on this point, so it makes sense to be also. config_path, args = get_example_path_and_components() # Ensure there's a leading slash. config_path = "/" + config_path.lstrip("/") match = re_config_file.match(config_path) self.assertIsNotNone(match, config_path) self.assertEqual(args, match.groupdict()) def test_re_config_file_without_leading_slash(self): # The regular expression for extracting components of the file path # doesn't care if there's no leading forward slash; the TFTP server is # easy on this point, so it makes sense to be also. config_path, args = get_example_path_and_components() # Ensure there's no leading slash. config_path = config_path.lstrip("/") match = re_config_file.match(config_path) self.assertIsNotNone(match, config_path) self.assertEqual(args, match.groupdict()) def test_re_config_file_matches_classic_pxelinux_cfg(self): # The default config path is simply "pxelinux.cfg" (without # leading slash). The regex matches this. mac = 'aa-bb-cc-dd-ee-ff' match = re_config_file.match('ppc64el/pxelinux.cfg/01-%s' % mac) self.assertIsNotNone(match) self.assertEqual({'mac': mac}, match.groupdict()) def test_re_config_file_matches_pxelinux_cfg_with_leading_slash(self): mac = 'aa-bb-cc-dd-ee-ff' match = re_config_file.match('/ppc64el/pxelinux.cfg/01-%s' % mac) self.assertIsNotNone(match) self.assertEqual({'mac': mac}, match.groupdict()) def test_re_config_file_does_not_match_non_config_file(self): self.assertIsNone(re_config_file.match('ppc64el/pxelinux.cfg/kernel')) def test_re_config_file_does_not_match_file_in_root(self): self.assertIsNone(re_config_file.match('01-aa-bb-cc-dd-ee-ff')) def test_re_config_file_does_not_match_file_not_in_pxelinux_cfg(self): self.assertIsNone(re_config_file.match('foo/01-aa-bb-cc-dd-ee-ff')) def test_re_config_file_with_default(self): match = re_config_file.match('ppc64el/pxelinux.cfg/default') self.assertIsNotNone(match) self.assertEqual({'mac': None}, match.groupdict()) maas-1.9.5+bzr4599.orig/src/provisioningserver/boot/tests/test_pxe.py0000644000000000000000000005446513056115004023747 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the pxe boot method.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from collections import OrderedDict import os import re from maastesting.factory import factory from maastesting.matchers import MockCallsMatch from maastesting.testcase import MAASTestCase import mock from provisioningserver import kernel_opts from provisioningserver.boot import ( BytesReader, pxe as pxe_module, ) from provisioningserver.boot.pxe import ( ARP_HTYPE, BOOTLOADERS, PXEBootMethod, re_config_file, ) from provisioningserver.boot.tftppath import compose_image_path from provisioningserver.testing.config import ClusterConfigurationFixture from provisioningserver.tests.test_kernel_opts import make_kernel_parameters from testtools.matchers import ( Contains, ContainsAll, IsInstance, MatchesAll, MatchesRegex, Not, SamePath, StartsWith, ) def compose_config_path(mac): """Compose the TFTP path for a PXE configuration file. The path returned is relative to the TFTP root, as it would be identified by clients on the network. :param mac: A MAC address, in IEEE 802 hyphen-separated form, corresponding to the machine for which this configuration is relevant. This relates to PXELINUX's lookup protocol. :return: Path for the corresponding PXE config file as exposed over TFTP. """ # Not using os.path.join: this is a TFTP path, not a native path. Yes, in # practice for us they're the same. We always assume that the ARP HTYPE # (hardware type) that PXELINUX sends is Ethernet. return "pxelinux.cfg/{htype:02x}-{mac}".format( htype=ARP_HTYPE.ETHERNET, mac=mac) class TestPXEBootMethod(MAASTestCase): def make_tftp_root(self): """Set, and return, a temporary TFTP root directory.""" tftproot = self.make_dir() self.useFixture(ClusterConfigurationFixture(tftp_root=tftproot)) return tftproot def make_dummy_bootloader_sources(self, destination, loader_names): """install_bootloader requires real files to exist, this method creates them in the requested location. :return: list of created filenames """ created = [] for loader in loader_names: name = factory.make_file(destination, loader) created.append(name) return created def test_compose_config_path_follows_maas_pxe_directory_layout(self): name = factory.make_name('config') self.assertEqual( 'pxelinux.cfg/%02x-%s' % (ARP_HTYPE.ETHERNET, name), compose_config_path(name)) def test_compose_config_path_does_not_include_tftp_root(self): tftproot = self.make_tftp_root() name = factory.make_name('config') self.assertThat( compose_config_path(name), Not(StartsWith(tftproot))) def test_bootloader_path(self): method = PXEBootMethod() self.assertEqual('pxelinux.0', method.bootloader_path) def test_bootloader_path_does_not_include_tftp_root(self): tftproot = self.make_tftp_root() method = PXEBootMethod() self.assertThat( method.bootloader_path, Not(StartsWith(tftproot))) def test_name(self): method = PXEBootMethod() self.assertEqual('pxe', method.name) def test_template_subdir(self): method = PXEBootMethod() self.assertEqual('pxe', method.template_subdir) def test_arch_octet(self): method = PXEBootMethod() self.assertEqual('00:00', method.arch_octet) def test_locate_bootloader(self): # Put all the BOOTLOADERS except one in dir1, and the last in # dir2. dir1 = self.make_dir() dir2 = self.make_dir() dirs = [dir1, dir2] self.patch(pxe_module, "BOOTLOADER_DIRS", dirs) self.make_dummy_bootloader_sources(dir1, BOOTLOADERS[:-1]) [displaced_loader] = self.make_dummy_bootloader_sources( dir2, BOOTLOADERS[-1:]) method = PXEBootMethod() observed = method.locate_bootloader(BOOTLOADERS[-1]) self.assertEqual(displaced_loader, observed) def test_locate_bootloader_returns_None_if_not_found(self): method = PXEBootMethod() self.assertIsNone(method.locate_bootloader("foo")) def test_install_bootloader_installs_to_destination(self): # Disable the symlink creation. self.patch(pxe_module, "SYSLINUX_DIRS", []) tftproot = self.make_tftp_root() source_dir = self.make_dir() self.patch(pxe_module, "BOOTLOADER_DIRS", [source_dir]) self.make_dummy_bootloader_sources(source_dir, BOOTLOADERS) install_bootloader_call = self.patch(pxe_module, "install_bootloader") method = PXEBootMethod() method.install_bootloader(tftproot) expected = [ mock.call( os.path.join(source_dir, bootloader), os.path.join(tftproot, bootloader) ) for bootloader in BOOTLOADERS] self.assertThat( install_bootloader_call, MockCallsMatch(*expected)) def test_locate_syslinux_dir_returns_dir(self): dir1 = self.make_dir() dir2 = self.make_dir() dirs = [dir1, dir2] self.patch(pxe_module, "SYSLINUX_DIRS", dirs) method = PXEBootMethod() found_dir = method.locate_syslinux_dir() self.assertEqual(dir1, found_dir) def test_install_bootloader_creates_symlink(self): # Disable the copying of the bootloaders. self.patch(pxe_module, "BOOTLOADERS", []) target_dir = self.make_dir() self.patch(pxe_module, "SYSLINUX_DIRS", [target_dir]) tftproot = self.make_tftp_root() method = PXEBootMethod() method.install_bootloader(tftproot) syslinux_dir = os.path.join(tftproot, 'syslinux') self.assertThat(syslinux_dir, SamePath(target_dir)) def parse_pxe_config(text): """Parse a PXE config file. Returns a structure like the following, defining the sections:: {"section_label": {"KERNEL": "...", "INITRD": "...", ...}, ...} Additionally, the returned dict - which is actually an `OrderedDict`, as are all mappings returned from this function - has a `header` attribute. This is an `OrderedDict` of the settings in the top part of the PXE config file, the part before any labelled sections. """ result = OrderedDict() sections = re.split("^LABEL ", text, flags=re.MULTILINE) for index, section in enumerate(sections): elements = [ line.split(None, 1) for line in section.splitlines() if line and not line.isspace() ] if index == 0: result.header = OrderedDict(elements) else: [label] = elements.pop(0) if label in result: raise AssertionError( "Section %r already defined" % label) result[label] = OrderedDict(elements) return result class TestParsePXEConfig(MAASTestCase): """Tests for `parse_pxe_config`.""" def test_parse_with_no_header(self): config = parse_pxe_config("LABEL foo\nOPTION setting") self.assertEqual({"foo": {"OPTION": "setting"}}, config) self.assertEqual({}, config.header) def test_parse_with_no_labels(self): config = parse_pxe_config("OPTION setting") self.assertEqual({"OPTION": "setting"}, config.header) self.assertEqual({}, config) class TestPXEBootMethodRender(MAASTestCase): """Tests for `provisioningserver.boot.pxe.PXEBootMethod.render`.""" def test_get_reader_install(self): # Given the right configuration options, the PXE configuration is # correctly rendered. method = PXEBootMethod() params = make_kernel_parameters(self, purpose="install") output = method.get_reader(backend=None, kernel_params=params) # The output is a BytesReader. self.assertThat(output, IsInstance(BytesReader)) output = output.read(10000) # The template has rendered without error. PXELINUX configurations # typically start with a DEFAULT line. self.assertThat(output, StartsWith("DEFAULT ")) # The PXE parameters are all set according to the options. image_dir = compose_image_path( osystem=params.osystem, arch=params.arch, subarch=params.subarch, release=params.release, label=params.label) self.assertThat( output, MatchesAll( MatchesRegex( r'.*^\s+KERNEL %s/di-kernel$' % re.escape(image_dir), re.MULTILINE | re.DOTALL), MatchesRegex( r'.*^\s+INITRD %s/di-initrd$' % re.escape(image_dir), re.MULTILINE | re.DOTALL), MatchesRegex( r'.*^\s+APPEND .+?$', re.MULTILINE | re.DOTALL))) def test_get_reader_install_mustang_dtb(self): # Architecture specific test. # Given the right configuration options, the PXE configuration is # correctly rendered for Mustang. method = PXEBootMethod() params = make_kernel_parameters( testcase=self, osystem="ubuntu", arch="arm64", subarch="xgene-uboot-mustang", purpose="install") output = method.get_reader(backend=None, kernel_params=params) # The output is a BytesReader. self.assertThat(output, IsInstance(BytesReader)) output = output.read(10000) # The template has rendered without error. PXELINUX configurations # typically start with a DEFAULT line. self.assertThat(output, StartsWith("DEFAULT ")) # The PXE parameters are all set according to the options. image_dir = compose_image_path( osystem=params.osystem, arch=params.arch, subarch=params.subarch, release=params.release, label=params.label) self.assertThat( output, MatchesAll( MatchesRegex( r'.*^\s+KERNEL %s/di-kernel$' % re.escape(image_dir), re.MULTILINE | re.DOTALL), MatchesRegex( r'.*^\s+INITRD %s/di-initrd$' % re.escape(image_dir), re.MULTILINE | re.DOTALL), MatchesRegex( r'.*^\s+FDT %s/di-dtb$' % re.escape(image_dir), re.MULTILINE | re.DOTALL), MatchesRegex( r'.*^\s+APPEND .+?$', re.MULTILINE | re.DOTALL))) def test_get_reader_xinstall_mustang_dtb(self): # Architecture specific test. # Given the right configuration options, the PXE configuration is # correctly rendered for Mustang. method = PXEBootMethod() params = make_kernel_parameters( testcase=self, osystem="ubuntu", arch="arm64", subarch="xgene-uboot-mustang", purpose="xinstall") output = method.get_reader(backend=None, kernel_params=params) # The output is a BytesReader. self.assertThat(output, IsInstance(BytesReader)) output = output.read(10000) # The template has rendered without error. PXELINUX configurations # typically start with a DEFAULT line. self.assertThat(output, StartsWith("DEFAULT ")) # The PXE parameters are all set according to the options. image_dir = compose_image_path( osystem=params.osystem, arch=params.arch, subarch=params.subarch, release=params.release, label=params.label) self.assertThat( output, MatchesAll( MatchesRegex( r'.*^\s+KERNEL %s/boot-kernel$' % re.escape(image_dir), re.MULTILINE | re.DOTALL), MatchesRegex( r'.*^\s+INITRD %s/boot-initrd$' % re.escape(image_dir), re.MULTILINE | re.DOTALL), MatchesRegex( r'.*^\s+FDT %s/boot-dtb$' % re.escape(image_dir), re.MULTILINE | re.DOTALL), MatchesRegex( r'.*^\s+APPEND .+?$', re.MULTILINE | re.DOTALL))) def test_get_reader_with_extra_arguments_does_not_affect_output(self): # get_reader() allows any keyword arguments as a safety valve. method = PXEBootMethod() options = { "backend": None, "kernel_params": make_kernel_parameters(self, purpose="install"), } # Capture the output before sprinking in some random options. output_before = method.get_reader(**options).read(10000) # Sprinkle some magic in. options.update( (factory.make_name("name"), factory.make_name("value")) for _ in range(10)) # Capture the output after sprinking in some random options. output_after = method.get_reader(**options).read(10000) # The generated template is the same. self.assertEqual(output_before, output_after) def test_get_reader_with_local_purpose(self): # If purpose is "local", the config.localboot.template should be # used. method = PXEBootMethod() options = { "backend": None, "kernel_params": make_kernel_parameters(purpose="local"), } output = method.get_reader(**options).read(10000) self.assertIn("LOCALBOOT 0", output) def test_get_reader_with_local_purpose_i386_arch(self): # Intel i386 is a special case and needs to use the chain.c32 # loader as the LOCALBOOT PXE directive is unreliable. method = PXEBootMethod() options = { "backend": None, "kernel_params": make_kernel_parameters( arch="i386", purpose="local"), } output = method.get_reader(**options).read(10000) self.assertIn("chain.c32", output) self.assertNotIn("LOCALBOOT", output) def test_get_reader_with_local_purpose_amd64_arch(self): # Intel amd64 is a special case and needs to use the chain.c32 # loader as the LOCALBOOT PXE directive is unreliable. method = PXEBootMethod() options = { "backend": None, "kernel_params": make_kernel_parameters( arch="amd64", purpose="local"), } output = method.get_reader(**options).read(10000) self.assertIn("chain.c32", output) self.assertNotIn("LOCALBOOT", output) class TestPXEBootMethodRenderConfigScenarios(MAASTestCase): """Tests for `provisioningserver.boot.pxe.PXEBootMethod.render_config`.""" scenarios = [ ("commissioning", dict(purpose="commissioning")), ("xinstall", dict(purpose="xinstall")), ] def test_get_reader_scenarios(self): method = PXEBootMethod() get_ephemeral_name = self.patch(kernel_opts, "get_ephemeral_name") get_ephemeral_name.return_value = factory.make_name("ephemeral") osystem = factory.make_name('osystem') arch = factory.make_name('arch') subarch = factory.make_name('subarch') options = { "backend": None, "kernel_params": make_kernel_parameters( testcase=self, osystem=osystem, subarch=subarch, arch=arch, purpose=self.purpose), } output = method.get_reader(**options).read(10000) config = parse_pxe_config(output) # The default section is defined. default_section_label = config.header["DEFAULT"] self.assertThat(config, Contains(default_section_label)) default_section = dict(config[default_section_label]) contains_arch_path = StartsWith("%s/%s/%s" % (osystem, arch, subarch)) self.assertThat(default_section["KERNEL"], contains_arch_path) self.assertThat(default_section["INITRD"], contains_arch_path) self.assertEquals("2", default_section["IPAPPEND"]) class TestPXEBootMethodRenderConfigScenariosEnlist(MAASTestCase): def test_get_reader_scenarios(self): # The commissioning config uses an extra PXELINUX module to auto # select between i386 and amd64. method = PXEBootMethod() get_ephemeral_name = self.patch(kernel_opts, "get_ephemeral_name") get_ephemeral_name.return_value = factory.make_name("ephemeral") osystem = factory.make_name('osystem') options = { "backend": None, "kernel_params": make_kernel_parameters( testcase=self, osystem=osystem, subarch="generic", purpose='enlist'), } output = method.get_reader(**options).read(10000) config = parse_pxe_config(output) # The default section is defined. default_section_label = config.header["DEFAULT"] self.assertThat(config, Contains(default_section_label)) default_section = config[default_section_label] # The default section uses the ifcpu64 module, branching to the "i386" # or "amd64" labels accordingly. self.assertEqual("ifcpu64.c32", default_section["KERNEL"]) self.assertEqual( ["amd64", "--", "i386"], default_section["APPEND"].split()) # Both "i386" and "amd64" sections exist. self.assertThat(config, ContainsAll(("i386", "amd64"))) # Each section defines KERNEL, INITRD, and APPEND settings. The # KERNEL and INITRD ones contain paths referring to their # architectures. for section_label in ("i386", "amd64"): section = config[section_label] self.assertThat( section, ContainsAll(("KERNEL", "INITRD", "APPEND"))) contains_arch_path = StartsWith( "%s/%s/" % (osystem, section_label)) self.assertThat(section["KERNEL"], contains_arch_path) self.assertThat(section["INITRD"], contains_arch_path) self.assertIn("APPEND", section) class TestPXEBootMethodRegex(MAASTestCase): """Tests for `provisioningserver.boot.pxe.PXEBootMethod.re_config_file`.""" @staticmethod def get_example_path_and_components(): """Return a plausible path and its components. The path is intended to match `re_config_file`, and the components are the expected groups from a match. """ components = {"mac": factory.make_mac_address("-"), "arch": None, "subarch": None} config_path = compose_config_path(components["mac"]) return config_path, components def test_re_config_file_is_compatible_with_config_path_generator(self): # The regular expression for extracting components of the file path is # compatible with the PXE config path generator. for iteration in range(10): config_path, args = self.get_example_path_and_components() match = re_config_file.match(config_path) self.assertIsNotNone(match, config_path) self.assertEqual(args, match.groupdict()) def test_re_config_file_with_leading_slash(self): # The regular expression for extracting components of the file path # doesn't care if there's a leading forward slash; the TFTP server is # easy on this point, so it makes sense to be also. config_path, args = self.get_example_path_and_components() # Ensure there's a leading slash. config_path = "/" + config_path.lstrip("/") match = re_config_file.match(config_path) self.assertIsNotNone(match, config_path) self.assertEqual(args, match.groupdict()) def test_re_config_file_without_leading_slash(self): # The regular expression for extracting components of the file path # doesn't care if there's no leading forward slash; the TFTP server is # easy on this point, so it makes sense to be also. config_path, args = self.get_example_path_and_components() # Ensure there's no leading slash. config_path = config_path.lstrip("/") match = re_config_file.match(config_path) self.assertIsNotNone(match, config_path) self.assertEqual(args, match.groupdict()) def test_re_config_file_matches_classic_pxelinux_cfg(self): # The default config path is simply "pxelinux.cfg" (without # leading slash). The regex matches this. mac = 'aa-bb-cc-dd-ee-ff' match = re_config_file.match('pxelinux.cfg/01-%s' % mac) self.assertIsNotNone(match) self.assertEqual({'mac': mac, 'arch': None, 'subarch': None}, match.groupdict()) def test_re_config_file_matches_pxelinux_cfg_with_leading_slash(self): mac = 'aa-bb-cc-dd-ee-ff' match = re_config_file.match('/pxelinux.cfg/01-%s' % mac) self.assertIsNotNone(match) self.assertEqual({'mac': mac, 'arch': None, 'subarch': None}, match.groupdict()) def test_re_config_file_does_not_match_non_config_file(self): self.assertIsNone(re_config_file.match('pxelinux.cfg/kernel')) def test_re_config_file_does_not_match_file_in_root(self): self.assertIsNone(re_config_file.match('01-aa-bb-cc-dd-ee-ff')) def test_re_config_file_does_not_match_file_not_in_pxelinux_cfg(self): self.assertIsNone(re_config_file.match('foo/01-aa-bb-cc-dd-ee-ff')) def test_re_config_file_with_default(self): match = re_config_file.match('pxelinux.cfg/default') self.assertIsNotNone(match) self.assertEqual( {'mac': None, 'arch': None, 'subarch': None}, match.groupdict()) def test_re_config_file_with_default_arch(self): arch = factory.make_name('arch', sep='') match = re_config_file.match('pxelinux.cfg/default.%s' % arch) self.assertIsNotNone(match) self.assertEqual( {'mac': None, 'arch': arch, 'subarch': None}, match.groupdict()) def test_re_config_file_with_default_arch_and_subarch(self): arch = factory.make_name('arch', sep='') subarch = factory.make_name('subarch', sep='') match = re_config_file.match( 'pxelinux.cfg/default.%s-%s' % (arch, subarch)) self.assertIsNotNone(match) self.assertEqual( {'mac': None, 'arch': arch, 'subarch': subarch}, match.groupdict()) maas-1.9.5+bzr4599.orig/src/provisioningserver/boot/tests/test_tftppath.py0000644000000000000000000004356713056115004025006 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the tftppath module.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import errno import os.path from maastesting.factory import factory from maastesting.testcase import MAASTestCase from mock import Mock from provisioningserver.boot import tftppath from provisioningserver.boot.tftppath import ( compose_image_path, drill_down, extend_path, extract_image_params, extract_metadata, is_visible_subdir, list_boot_images, list_subdirs, locate_tftp_path, maas_meta_last_modified, ) from provisioningserver.drivers.osystem import OperatingSystemRegistry from provisioningserver.import_images.boot_image_mapping import ( BootImageMapping, ) from provisioningserver.import_images.helpers import ImageSpec from provisioningserver.import_images.testing.factory import ( make_image_spec, set_resource, ) from provisioningserver.testing.boot_images import ( make_boot_image_storage_params, make_image, ) from provisioningserver.testing.config import ClusterConfigurationFixture from provisioningserver.testing.os import make_osystem from testtools.matchers import ( Not, StartsWith, ) from testtools.testcase import ExpectedException class TestTFTPPath(MAASTestCase): def setUp(self): super(TestTFTPPath, self).setUp() self.tftproot = self.make_dir() self.useFixture(ClusterConfigurationFixture(tftp_root=self.tftproot)) def make_image_dir(self, image_params, tftproot): """Fake a boot image matching `image_params` under `tftproot`.""" image_dir = locate_tftp_path( compose_image_path( osystem=image_params['osystem'], arch=image_params['architecture'], subarch=image_params['subarchitecture'], release=image_params['release'], label=image_params['label']), tftproot) os.makedirs(image_dir) factory.make_file(image_dir, 'linux') factory.make_file(image_dir, 'initrd.gz') def make_meta_file(self, image_params, image_resource, tftproot): image = ImageSpec( os=image_params["osystem"], arch=image_params["architecture"], subarch=image_params["subarchitecture"], release=image_params["release"], label=image_params["label"]) mapping = BootImageMapping() mapping.setdefault(image, image_resource) maas_meta = mapping.dump_json() with open(os.path.join(tftproot, "maas.meta"), "wb") as f: f.write(maas_meta) def test_maas_meta_last_modified_returns_modification_time(self): path = factory.make_file(self.tftproot, name="maas.meta") expected = os.path.getmtime(path) observed = maas_meta_last_modified(self.tftproot) self.assertEqual(expected, observed) def test_maas_meta_last_modified_returns_None_if_no_file(self): observed = maas_meta_last_modified( os.path.join(self.tftproot, "maas.meta")) self.assertIsNone(observed) def test_maas_meta_last_modified_reraises_non_ENOENT(self): path = factory.make_file(self.tftproot, name="maas.meta") oserror = OSError() oserror.errno = errno.E2BIG self.patch(os.path, 'getmtime').side_effect = oserror self.assertRaises(OSError, maas_meta_last_modified, path) def test_compose_image_path_follows_storage_directory_layout(self): osystem = factory.make_name('osystem') arch = factory.make_name('arch') subarch = factory.make_name('subarch') release = factory.make_name('release') label = factory.make_name('label') self.assertEqual( '%s/%s/%s/%s/%s' % (osystem, arch, subarch, release, label), compose_image_path(osystem, arch, subarch, release, label)) def test_compose_image_path_does_not_include_tftp_root(self): osystem = factory.make_name('osystem') arch = factory.make_name('arch') subarch = factory.make_name('subarch') release = factory.make_name('release') label = factory.make_name('label') self.assertThat( compose_image_path(osystem, arch, subarch, release, label), Not(StartsWith(self.tftproot))) def test_locate_tftp_path_prefixes_tftp_root(self): pxefile = factory.make_name('pxefile') self.assertEqual( os.path.join(self.tftproot, pxefile), locate_tftp_path(pxefile, tftproot=self.tftproot)) def test_locate_tftp_path_returns_root_when_path_is_None(self): self.assertEqual( self.tftproot, locate_tftp_path(None, tftproot=self.tftproot)) def test_list_boot_images_copes_with_missing_directory(self): self.assertEqual([], list_boot_images(factory.make_string())) def test_list_boot_images_passes_on_other_exceptions(self): error = OSError(errno.EACCES, "Deliberate error for testing.") self.patch(tftppath, 'list_subdirs', Mock(side_effect=error)) with ExpectedException(OSError): list_boot_images(factory.make_string()) def test_list_boot_images_copes_with_empty_directory(self): self.assertEqual([], list_boot_images(self.tftproot)) def test_list_boot_images_copes_with_unexpected_files(self): os.makedirs(os.path.join(self.tftproot, factory.make_name('empty'))) factory.make_file(self.tftproot) self.assertEqual([], list_boot_images(self.tftproot)) def test_list_boot_images_finds_boot_image(self): params = make_boot_image_storage_params() self.make_image_dir(params, self.tftproot) purposes = ['install', 'commissioning', 'xinstall'] make_osystem(self, params['osystem'], purposes) self.assertItemsEqual( [make_image(params, purpose) for purpose in purposes], list_boot_images(self.tftproot)) def test_list_boot_images_enumerates_boot_images(self): purposes = ['install', 'commissioning', 'xinstall'] params = [make_boot_image_storage_params() for counter in range(3)] for param in params: self.make_image_dir(param, self.tftproot) make_osystem(self, param['osystem'], purposes) self.assertItemsEqual( [ make_image(param, purpose) for param in params for purpose in purposes ], list_boot_images(self.tftproot)) def test_list_boot_images_merges_maas_meta_data(self): params = make_boot_image_storage_params() self.make_image_dir(params, self.tftproot) # The required metadata is called "subarches" in maas.meta metadata = dict(subarches=factory.make_name("subarches")) self.make_meta_file(params, metadata, self.tftproot) purposes = ['install', 'commissioning', 'xinstall'] make_osystem(self, params['osystem'], purposes) # The API requires "supported_subarches". expected_metadata = dict(supported_subarches=metadata["subarches"]) self.assertItemsEqual( [make_image(params, purpose, expected_metadata) for purpose in purposes], list_boot_images(self.tftproot)) def test_list_boot_images_empty_on_missing_osystems(self): params = [make_boot_image_storage_params() for counter in range(3)] for param in params: self.make_image_dir(param, self.tftproot) self.assertItemsEqual([], list_boot_images(self.tftproot)) def test_is_visible_subdir_ignores_regular_files(self): plain_file = self.make_file() self.assertFalse( is_visible_subdir( os.path.dirname(plain_file), os.path.basename(plain_file))) def test_is_visible_subdir_ignores_hidden_directories(self): base_dir = self.make_dir() hidden_dir = factory.make_name('.') os.makedirs(os.path.join(base_dir, hidden_dir)) self.assertFalse(is_visible_subdir(base_dir, hidden_dir)) def test_is_visible_subdir_recognizes_subdirectory(self): base_dir = self.make_dir() subdir = factory.make_name('subdir') os.makedirs(os.path.join(base_dir, subdir)) self.assertTrue(is_visible_subdir(base_dir, subdir)) def test_list_subdirs_lists_empty_directory(self): self.assertItemsEqual([], list_subdirs(self.make_dir())) def test_list_subdirs_lists_subdirs(self): base_dir = self.make_dir() factory.make_file(base_dir, factory.make_name('plain-file')) subdir = factory.make_name('subdir') os.makedirs(os.path.join(base_dir, subdir)) self.assertItemsEqual([subdir], list_subdirs(base_dir)) def test_extend_path_finds_path_extensions(self): base_dir = self.make_dir() subdirs = [ factory.make_name('subdir-%d' % counter) for counter in range(3)] for subdir in subdirs: os.makedirs(os.path.join(base_dir, subdir)) self.assertItemsEqual( [[os.path.basename(base_dir), subdir] for subdir in subdirs], extend_path( os.path.dirname(base_dir), [os.path.basename(base_dir)])) def test_extend_path_builds_on_given_paths(self): base_dir = self.make_dir() lower_dir = factory.make_name('lower') subdir = factory.make_name('sub') os.makedirs(os.path.join(base_dir, lower_dir, subdir)) self.assertEqual( [[lower_dir, subdir]], extend_path(base_dir, [lower_dir])) def test_extend_path_stops_if_no_subdirs_found(self): self.assertItemsEqual([], extend_path(self.make_dir(), [])) def test_drill_down_follows_directory_tree(self): base_dir = self.make_dir() lower_dir = factory.make_name('lower') os.makedirs(os.path.join(base_dir, lower_dir)) subdirs = [ factory.make_name('subdir-%d' % counter) for counter in range(3)] for subdir in subdirs: os.makedirs(os.path.join(base_dir, lower_dir, subdir)) self.assertItemsEqual( [[lower_dir, subdir] for subdir in subdirs], drill_down(base_dir, [[lower_dir]])) def test_drill_down_ignores_subdir_not_in_path(self): base_dir = self.make_dir() irrelevant_dir = factory.make_name('irrelevant') irrelevant_subdir = factory.make_name('subdir') relevant_dir = factory.make_name('relevant') relevant_subdir = factory.make_name('subdir') os.makedirs(os.path.join(base_dir, irrelevant_dir, irrelevant_subdir)) os.makedirs(os.path.join(base_dir, relevant_dir, relevant_subdir)) self.assertEqual( [[relevant_dir, relevant_subdir]], drill_down(base_dir, [[relevant_dir]])) def test_drill_down_drops_paths_that_do_not_go_deep_enough(self): base_dir = self.make_dir() shallow_dir = factory.make_name('shallow') os.makedirs(os.path.join(base_dir, shallow_dir)) deep_dir = factory.make_name('deep') subdir = factory.make_name('sub') os.makedirs(os.path.join(base_dir, deep_dir, subdir)) self.assertEqual( [[deep_dir, subdir]], drill_down(base_dir, [[shallow_dir], [deep_dir]])) def test_extract_metadata(self): resource = dict( subarches=factory.make_name("subarch"), other_item=factory.make_name("other"), ) image = make_image_spec() mapping = set_resource(image_spec=image, resource=resource) metadata = mapping.dump_json() # Lack of consistency across maas in naming arch vs architecture # and subarch vs subarchitecture means I can't just do a simple # dict parameter expansion here. params = { "osystem": image.os, "architecture": image.arch, "subarchitecture": image.subarch, "release": image.release, "label": image.label, } extracted_data = extract_metadata(metadata, params) # We only expect the supported_subarches key from the resource data. expected = dict(supported_subarches=resource["subarches"]) self.assertEqual(expected, extracted_data) def test_extract_metadata_handles_missing_subarch(self): resource = dict( other_item=factory.make_name("other"), ) image = make_image_spec() mapping = set_resource(image_spec=image, resource=resource) metadata = mapping.dump_json() # Lack of consistency across maas in naming arch vs architecture # and subarch vs subarchitecture means I can't just do a simple # dict parameter expansion here. params = { "osystem": image.os, "architecture": image.arch, "subarchitecture": image.subarch, "release": image.release, "label": image.label, } self.assertEqual({}, extract_metadata(metadata, params)) def _make_path(self): osystem = factory.make_name("os") arch = factory.make_name("arch") subarch = factory.make_name("subarch") release = factory.make_name("release") label = factory.make_name("label") path = (osystem, arch, subarch, release, label) return path, osystem, arch, subarch, release, label def _patch_osystem_registry(self, values, xinstall_params=None): get_item = self.patch(OperatingSystemRegistry, "get_item") item_mock = Mock() item_mock.get_boot_image_purposes.return_value = values if xinstall_params is not None: item_mock.get_xinstall_parameters.return_value = xinstall_params get_item.return_value = item_mock def test_extract_image_params_with_no_metadata(self): path, osystem, arch, subarch, release, label = self._make_path() # Patch OperatingSystemRegistry to return a fixed list of # values. purpose1 = factory.make_name("purpose") purpose2 = factory.make_name("purpose") xi_purpose = "xinstall" xi_path = factory.make_name("xi_path") xi_type = factory.make_name("xi_type") purposes = [purpose1, purpose2, xi_purpose] self._patch_osystem_registry( purposes, xinstall_params=(xi_path, xi_type)) params = extract_image_params(path, "") self.assertItemsEqual( [ { "osystem": osystem, "architecture": arch, "subarchitecture": subarch, "release": release, "label": label, "purpose": purpose1, "xinstall_path": '', "xinstall_type": '', }, { "osystem": osystem, "architecture": arch, "subarchitecture": subarch, "release": release, "label": label, "purpose": purpose2, "xinstall_path": '', "xinstall_type": '', }, { "osystem": osystem, "architecture": arch, "subarchitecture": subarch, "release": release, "label": label, "purpose": xi_purpose, "xinstall_path": xi_path, "xinstall_type": xi_type, }, ], params) def test_extract_image_params_with_metadata(self): path, osystem, arch, subarch, release, label = self._make_path() # Patch OperatingSystemRegistry to return a fixed list of # values. purpose1 = factory.make_name("purpose") purpose2 = factory.make_name("purpose") xi_purpose = "xinstall" xi_path = factory.make_name("xi_path") xi_type = factory.make_name("xi_type") purposes = [purpose1, purpose2, xi_purpose] self._patch_osystem_registry( purposes, xinstall_params=(xi_path, xi_type)) # Create some maas.meta content. image = ImageSpec( os=osystem, arch=arch, subarch=subarch, release=release, label=label) image_resource = dict(subarches=factory.make_name("subarches")) mapping = BootImageMapping() mapping.setdefault(image, image_resource) maas_meta = mapping.dump_json() params = extract_image_params(path, maas_meta) self.assertItemsEqual( [ { "osystem": osystem, "architecture": arch, "subarchitecture": subarch, "release": release, "label": label, "purpose": purpose1, "xinstall_path": '', "xinstall_type": '', "supported_subarches": image_resource["subarches"], }, { "osystem": osystem, "architecture": arch, "subarchitecture": subarch, "release": release, "label": label, "purpose": purpose2, "xinstall_path": '', "xinstall_type": '', "supported_subarches": image_resource["subarches"], }, { "osystem": osystem, "architecture": arch, "subarchitecture": subarch, "release": release, "label": label, "purpose": xi_purpose, "xinstall_path": xi_path, "xinstall_type": xi_type, "supported_subarches": image_resource["subarches"], }, ], params) maas-1.9.5+bzr4599.orig/src/provisioningserver/boot/tests/test_uefi.py0000644000000000000000000003035613056115004024074 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.boot.uefi`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from contextlib import contextmanager import os import re from maastesting.factory import factory from maastesting.matchers import MockCallsMatch from maastesting.testcase import MAASTestCase from mock import call from provisioningserver.boot import ( BootMethodInstallError, BytesReader, uefi as uefi_module, utils, ) from provisioningserver.boot.tftppath import compose_image_path from provisioningserver.boot.uefi import ( re_config_file, UEFIBootMethod, ) from provisioningserver.tests.test_kernel_opts import make_kernel_parameters from testtools.matchers import ( ContainsAll, IsInstance, MatchesAll, MatchesRegex, StartsWith, ) def compose_config_path(mac=None, arch=None, subarch=None): """Compose the TFTP path for a UEFI configuration file. The path returned is relative to the TFTP root, as it would be identified by clients on the network. :param mac: A MAC address, in IEEE 802 colon-separated form, corresponding to the machine for which this configuration is relevant. :param arch: Architecture for the booting machine, for UEFI this is always amd64. :param subarch: Sub-architecture type, this is normally always generic. :return: Path for the corresponding PXE config file as exposed over TFTP. """ if mac is not None: return "grub/grub.cfg-{mac}".format(mac=mac) if arch is not None: if subarch is None: subarch = "generic" return "grub/grub.cfg-{arch}-{subarch}".format( arch=arch, subarch=subarch) return "grub/grub.cfg" class TestUEFIBootMethodRender(MAASTestCase): """Tests for `provisioningserver.boot.uefi.UEFIBootMethod.render`.""" def test_get_reader(self): # Given the right configuration options, the UEFI configuration is # correctly rendered. method = UEFIBootMethod() params = make_kernel_parameters(purpose="install") output = method.get_reader(backend=None, kernel_params=params) # The output is a BytesReader. self.assertThat(output, IsInstance(BytesReader)) output = output.read(10000) # The template has rendered without error. UEFI configurations # typically start with a DEFAULT line. self.assertThat(output, StartsWith("set default=\"0\"")) # The UEFI parameters are all set according to the options. image_dir = compose_image_path( osystem=params.osystem, arch=params.arch, subarch=params.subarch, release=params.release, label=params.label) self.assertThat( output, MatchesAll( MatchesRegex( r'.*^\s+linux %s/di-kernel .+?$' % re.escape(image_dir), re.MULTILINE | re.DOTALL), MatchesRegex( r'.*^\s+initrd %s/di-initrd$' % re.escape(image_dir), re.MULTILINE | re.DOTALL))) def test_get_reader_with_extra_arguments_does_not_affect_output(self): # get_reader() allows any keyword arguments as a safety valve. method = UEFIBootMethod() options = { "backend": None, "kernel_params": make_kernel_parameters(purpose="install"), } # Capture the output before sprinking in some random options. output_before = method.get_reader(**options).read(10000) # Sprinkle some magic in. options.update( (factory.make_name("name"), factory.make_name("value")) for _ in range(10)) # Capture the output after sprinking in some random options. output_after = method.get_reader(**options).read(10000) # The generated template is the same. self.assertEqual(output_before, output_after) def test_get_reader_with_local_purpose(self): # If purpose is "local", the config.localboot.template should be # used. method = UEFIBootMethod() options = { "backend": None, "kernel_params": make_kernel_parameters( purpose="local", arch="amd64"), } output = method.get_reader(**options).read(10000) self.assertIn("chainloader /efi/ubuntu/shimx64.efi", output) def test_get_reader_with_enlist_purpose(self): # If purpose is "enlist", the config.enlist.template should be # used. method = UEFIBootMethod() params = make_kernel_parameters( purpose="enlist", arch="amd64") options = { "backend": None, "kernel_params": params, } output = method.get_reader(**options).read(10000) self.assertThat(output, ContainsAll( [ "menuentry 'Enlist'", "%s/%s/%s" % (params.osystem, params.arch, params.subarch), "boot-kernel", ])) def test_get_reader_with_commissioning_purpose(self): # If purpose is "commissioning", the config.commissioning.template # should be used. method = UEFIBootMethod() params = make_kernel_parameters( purpose="commissioning", arch="amd64") options = { "backend": None, "kernel_params": params, } output = method.get_reader(**options).read(10000) self.assertThat(output, ContainsAll( [ "menuentry 'Commission'", "%s/%s/%s" % (params.osystem, params.arch, params.subarch), "boot-kernel", ])) class TestUEFIBootMethodRegex(MAASTestCase): """Tests `provisioningserver.boot.uefi.UEFIBootMethod.re_config_file`.""" @staticmethod def get_example_path_and_components(): """Return a plausible UEFI path and its components. The path is intended to match `re_config_file`, and the components are the expected groups from a match. """ components = {"mac": factory.make_mac_address(":"), "arch": None, "subarch": None} config_path = compose_config_path(components["mac"]) return config_path, components def test_re_config_file_is_compatible_with_cfg_path_generator(self): # The regular expression for extracting components of the file path is # compatible with the PXE config path generator. for iteration in range(10): config_path, args = self.get_example_path_and_components() match = re_config_file.match(config_path) self.assertIsNotNone(match, config_path) self.assertEqual(args, match.groupdict()) def test_re_config_file_with_leading_slash(self): # The regular expression for extracting components of the file path # doesn't care if there's a leading forward slash; the TFTP server is # easy on this point, so it makes sense to be also. config_path, args = self.get_example_path_and_components() # Ensure there's a leading slash. config_path = "/" + config_path.lstrip("/") match = re_config_file.match(config_path) self.assertIsNotNone(match, config_path) self.assertEqual(args, match.groupdict()) def test_re_config_file_without_leading_slash(self): # The regular expression for extracting components of the file path # doesn't care if there's no leading forward slash; the TFTP server is # easy on this point, so it makes sense to be also. config_path, args = self.get_example_path_and_components() # Ensure there's no leading slash. config_path = config_path.lstrip("/") match = re_config_file.match(config_path) self.assertIsNotNone(match, config_path) self.assertEqual(args, match.groupdict()) def test_re_config_file_matches_classic_grub_cfg(self): # The default config path is simply "grub.cfg-{mac}" (without # leading slash). The regex matches this. mac = 'aa:bb:cc:dd:ee:ff' match = re_config_file.match('grub/grub.cfg-%s' % mac) self.assertIsNotNone(match) self.assertEqual({'mac': mac, 'arch': None, 'subarch': None}, match.groupdict()) def test_re_config_file_matches_grub_cfg_with_leading_slash(self): mac = 'aa:bb:cc:dd:ee:ff' match = re_config_file.match( '/grub/grub.cfg-%s' % mac) self.assertIsNotNone(match) self.assertEqual({'mac': mac, 'arch': None, 'subarch': None}, match.groupdict()) def test_re_config_file_does_not_match_default_grub_config_file(self): self.assertIsNone(re_config_file.match('grub/grub.cfg')) def test_re_config_file_with_default(self): match = re_config_file.match('grub/grub.cfg-default') self.assertIsNotNone(match) self.assertEqual( {'mac': None, 'arch': None, 'subarch': None}, match.groupdict()) def test_re_config_file_with_default_arch(self): arch = factory.make_name('arch', sep='') match = re_config_file.match('grub/grub.cfg-default-%s' % arch) self.assertIsNotNone(match) self.assertEqual( {'mac': None, 'arch': arch, 'subarch': None}, match.groupdict()) def test_re_config_file_with_default_arch_and_subarch(self): arch = factory.make_name('arch', sep='') subarch = factory.make_name('subarch', sep='') match = re_config_file.match( 'grub/grub.cfg-default-%s-%s' % (arch, subarch)) self.assertIsNotNone(match) self.assertEqual( {'mac': None, 'arch': arch, 'subarch': subarch}, match.groupdict()) class TestUEFIBootMethod(MAASTestCase): """Tests `provisioningserver.boot.uefi.UEFIBootMethod`.""" def test_install_bootloader_get_package_raises_error(self): method = UEFIBootMethod() self.patch(uefi_module, 'get_main_archive_url') self.patch(utils, 'get_updates_package').return_value = (None, None) self.assertRaises( BootMethodInstallError, method.install_bootloader, None) def test_install_bootloader(self): method = UEFIBootMethod() shim_filename = factory.make_name('shim-signed') shim_data = factory.make_string() grub_filename = factory.make_name('grub-efi-amd64-signed') grub_data = factory.make_string() tmp = self.make_dir() dest = self.make_dir() @contextmanager def tempdir(): try: yield tmp finally: pass mock_get_main_archive_url = self.patch( uefi_module, 'get_main_archive_url') mock_get_main_archive_url.return_value = 'http://archive.ubuntu.com' mock_get_updates_package = self.patch(utils, 'get_updates_package') mock_get_updates_package.side_effect = [ (shim_data, shim_filename), (grub_data, grub_filename), ] self.patch(uefi_module, 'call_and_check') self.patch(uefi_module, 'tempdir').side_effect = tempdir mock_install_bootloader = self.patch( uefi_module, 'install_bootloader') method.install_bootloader(dest) with open(os.path.join(tmp, shim_filename), 'rb') as stream: saved_shim_data = stream.read() self.assertEqual(shim_data, saved_shim_data) with open(os.path.join(tmp, grub_filename), 'rb') as stream: saved_grub_data = stream.read() self.assertEqual(grub_data, saved_grub_data) shim_expected = os.path.join( tmp, "usr", "lib", "shim", "shim.efi.signed") shim_dest_expected = os.path.join(dest, method.bootloader_path) grub_expected = os.path.join( tmp, "usr", "lib", "grub", "x86_64-efi-signed", "grubnetx64.efi.signed") grub_dest_expected = os.path.join(dest, "grubx64.efi") self.assertThat( mock_install_bootloader, MockCallsMatch( call(shim_expected, shim_dest_expected), call(grub_expected, grub_dest_expected))) maas-1.9.5+bzr4599.orig/src/provisioningserver/boot/tests/test_uefi_arm64.py0000644000000000000000000000633613056115004025106 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.boot.uefi_arm64`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from contextlib import contextmanager import os from maastesting.factory import factory from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import MAASTestCase from provisioningserver.boot import ( BootMethodInstallError, uefi_arm64 as uefi_arm64_module, utils, ) from provisioningserver.boot.uefi_arm64 import ( CONFIG_FILE_ARM64, UEFIARM64BootMethod, ) from provisioningserver.tests.test_kernel_opts import make_kernel_parameters class TestUEFIARM64BootMethod(MAASTestCase): """Tests `provisioningserver.boot.uefi_arm64.UEFIARM64BootMethod`.""" def test_match_path_returns_None(self): method = UEFIARM64BootMethod() paths = [factory.make_string() for _ in range(3)] for path in paths: self.assertEqual(None, method.match_path(None, path)) def test_get_reader_returns_None(self): method = UEFIARM64BootMethod() params = [make_kernel_parameters() for _ in range(3)] for param in params: self.assertEqual(None, method.get_reader(None, params)) def test_install_bootloader_get_package_raises_error(self): method = UEFIARM64BootMethod() self.patch(uefi_arm64_module, 'get_ports_archive_url') self.patch(utils, 'get_updates_package').return_value = (None, None) self.assertRaises( BootMethodInstallError, method.install_bootloader, None) def test_install_bootloader(self): method = UEFIARM64BootMethod() filename = factory.make_name('dpkg') data = factory.make_string() tmp = self.make_dir() dest = self.make_dir() @contextmanager def tempdir(): try: yield tmp finally: pass mock_get_ports_archive_url = self.patch( uefi_arm64_module, 'get_ports_archive_url') mock_get_ports_archive_url.return_value = 'http://ports.ubuntu.com' mock_get_updates_package = self.patch(utils, 'get_updates_package') mock_get_updates_package.return_value = (data, filename) self.patch(uefi_arm64_module, 'call_and_check') self.patch(uefi_arm64_module, 'tempdir').side_effect = tempdir mock_install_bootloader = self.patch( uefi_arm64_module, 'install_bootloader') method.install_bootloader(dest) with open(os.path.join(tmp, filename), 'rb') as stream: saved_data = stream.read() self.assertEqual(data, saved_data) with open(os.path.join(tmp, 'grub.cfg'), 'rb') as stream: saved_config = stream.read().decode('utf-8') self.assertEqual(CONFIG_FILE_ARM64, saved_config) mkimage_expected = os.path.join(tmp, method.bootloader_path) dest_expected = os.path.join(dest, method.bootloader_path) self.assertThat( mock_install_bootloader, MockCalledOnceWith(mkimage_expected, dest_expected)) maas-1.9.5+bzr4599.orig/src/provisioningserver/boot/tests/test_utils.py0000644000000000000000000001403113056115004024274 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.boot.utils`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maastesting.factory import factory from maastesting.matchers import ( MockCalledOnceWith, MockCallsMatch, ) from maastesting.testcase import MAASTestCase from mock import call from provisioningserver.boot import utils class TestBootMethodUtils(MAASTestCase): """Test for `BootMethod` in `provisioningserver.boot.utils`.""" def test_get_packages(self): archive = factory.make_name("archive") comp, arch, release = factory.make_names("comp", "arch", "release") release_gpg = factory.make_string() packages_gz = factory.make_string() url = utils.urljoin(archive, 'dists', release) release_url = utils.urljoin(url, 'Release') release_gpg_url = utils.urljoin(url, 'Release.gpg') packages_path = '%s/binary-%s/Packages.gz' % (comp, arch) packages_url = utils.urljoin(url, packages_path) packages_gz_md5 = utils.get_md5sum(packages_gz) release_data = " %s 012 %s" % (packages_gz_md5, packages_path) get_file = self.patch(utils, "get_file") get_file.side_effect = [release_data, release_gpg, packages_gz] verify_data = self.patch(utils, "gpg_verify_data") decompress = self.patch(utils, "decompress_packages") utils.get_packages(archive, comp, arch, release) self.assertThat( verify_data, MockCalledOnceWith(release_gpg, release_data)) self.assertThat( decompress, MockCalledOnceWith(packages_gz)) self.assertThat( get_file, MockCallsMatch( call(release_url), call(release_gpg_url), call(packages_url))) def test_get_packages_errors_on_invalid_checksum(self): archive = factory.make_name("archive") comp, arch, release = factory.make_names("comp", "arch", "release") release_gpg = factory.make_string() packages_gz = factory.make_string() packages_path = '%s/binary-%s/Packages.gz' % (comp, arch) packages_gz_md5 = utils.get_md5sum(packages_gz + '0') release_data = " %s 012 %s" % (packages_gz_md5, packages_path) get_file = self.patch(utils, "get_file") get_file.side_effect = [release_data, release_gpg, packages_gz] self.patch(utils, "gpg_verify_data") self.patch(utils, "decompress_packages") self.assertRaises( ValueError, utils.get_packages, archive, comp, arch, release) def test_get_package_info(self): package = factory.make_name("package") archive = factory.make_name("archive") comp, arch, release = factory.make_names("comp", "arch", "release") package_items = {} package_list = "Package: %s\n" % package for _ in range(5): key, value = factory.make_names("key", "value") package_items[key] = value package_list += "%s: %s\n" % (key, value) package_list += "\n" get_packages = self.patch(utils, "get_packages") get_packages.return_value = package_list output = utils.get_package_info( package, archive, comp, arch, release) self.assertEqual(package, output['Package']) for key, value in package_items.items(): self.assertEqual(value, output[key]) def test_get_package(self): package = factory.make_name("package") filename = factory.make_name("filename") archive = factory.make_name("archive") comp, arch, release = factory.make_names("comp", "arch", "release") package_data = factory.make_string() package_md5 = utils.get_md5sum(package_data) package_info = { 'Package': package, 'Filename': filename, 'MD5sum': package_md5 } get_package_info = self.patch(utils, "get_package_info") get_package_info.return_value = package_info get_file = self.patch(utils, "get_file") get_file.return_value = package_data data, fn = utils.get_package( package, archive, comp, arch, release) url = utils.urljoin(archive, filename) self.assertThat(get_file, MockCalledOnceWith(url)) self.assertEqual(package_data, data) self.assertEqual(filename, fn) def test_get_package_errors_on_invalid_checksum(self): package = factory.make_name("package") filename = factory.make_name("filename") archive = factory.make_name("archive") comp, arch, release = factory.make_names("comp", "arch", "release") package_data = factory.make_string() package_md5 = utils.get_md5sum(package_data + '0') package_info = { 'Package': package, 'Filename': filename, 'MD5sum': package_md5 } get_package_info = self.patch(utils, "get_package_info") get_package_info.return_value = package_info get_file = self.patch(utils, "get_file") get_file.return_value = package_data self.assertRaises( ValueError, utils.get_package, package, archive, comp, arch, release) def test_get_updates_package(self): package = factory.make_name("package") archive = factory.make_name("archive") comp, arch, release = factory.make_names("comp", "arch", "release") get_package = self.patch(utils, "get_package") get_package.return_value = (None, None) utils.get_updates_package(package, archive, comp, arch, release) updates = '%s-updates' % release self.assertThat( get_package, MockCallsMatch( call(package, archive, comp, arch, release=updates), call(package, archive, comp, arch, release=release))) maas-1.9.5+bzr4599.orig/src/provisioningserver/boot/tests/test_windows.py0000644000000000000000000003550313056115004024635 0ustar 00000000000000# Copyright 2014-2015 Cloudbase Solutions SRL. # Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.boot.windows`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import logging import os import shutil from fixtures import FakeLogger from maastesting.factory import factory from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import ( MAASTestCase, MAASTwistedRunTest, ) from maastesting.twisted import ( always_fail_with, always_succeed_with, ) import mock from mock import sentinel from provisioningserver.boot import ( BootMethodError, BytesReader, windows as windows_module, ) from provisioningserver.boot.windows import ( Bcd, WindowsPXEBootMethod, ) from provisioningserver.rpc.exceptions import NoSuchNode from provisioningserver.rpc.region import RequestNodeInfoByMACAddress from provisioningserver.testing.config import ClusterConfigurationFixture from provisioningserver.tests.test_kernel_opts import make_kernel_parameters from testtools.deferredruntest import extract_result from testtools.matchers import Is from tftp.backend import FilesystemReader from twisted.internet.defer import inlineCallbacks class TestBcd(MAASTestCase): def configure_hivex(self): mock_hivex = mock.MagicMock() self.patch(windows_module, 'load_hivex').return_value = mock_hivex mock_hivex.node_name.side_effect = ['Objects', Bcd.GUID_WINDOWS_BOOTMGR, Bcd.BOOT_MGR_DISPLAY_ORDER] mock_hivex.node_children.side_effect = [ [factory.make_name('objects')], [factory.make_name('object')], ['value0', factory.make_UUID()], [factory.make_name('element')]] mock_hivex.node_values.return_value = [factory.make_name('val')] def configure_bcd(self, uids=None): self.configure_hivex() filename = factory.make_name('filename') bcd = Bcd(filename) bcd.uids = mock.MagicMock(spec=dict) if uids is None: uids = [factory.make_name('uid'), factory.make_name('uid')] bcd.uids.__getitem__.return_value = uids bcd.hive = mock.MagicMock() return bcd def test_get_loader(self): bcd = self.configure_bcd() mock_elem = factory.make_name('elem') bootmgr_elems = mock.MagicMock(spec=dict) bootmgr_elems.__getitem__.return_value = mock_elem mock_node_value = factory.make_name('node_value') bcd.hive.node_values.return_value = [mock_node_value] mock_string = factory.make_name('strings') bcd.hive.value_multiple_strings.return_value = [mock_string] response = bcd._get_loader(bootmgr_elems) self.assertThat(bcd.hive.node_values, MockCalledOnceWith(mock_elem)) self.assertThat( bcd.hive.value_multiple_strings, MockCalledOnceWith(mock_node_value)) self.assertEqual(mock_string, response) def test_get_loader_elems(self): mock_uid_0 = factory.make_name('uid') mock_uid_1 = factory.make_name('uid') bcd = self.configure_bcd(uids=[mock_uid_0, mock_uid_1]) mock_child = factory.make_name('child') bcd.hive.node_children.side_effect = [[mock_child]] mock_name = factory.make_name('name') bcd.hive.node_name.return_value = mock_name response = bcd._get_loader_elems() self.assertThat(bcd.hive.node_children, MockCalledOnceWith(mock_uid_1)) self.assertThat(bcd.hive.node_name, MockCalledOnceWith(mock_child)) self.assertEqual(response, {mock_name: mock_child}) def test_get_load_options_key(self): bcd = self.configure_bcd() fake_load_elem = factory.make_name('load_elem') mock_load_elem = mock.MagicMock() mock_load_elem.get.return_value = fake_load_elem mock_get_loader_elems = self.patch(Bcd, '_get_loader_elems') mock_get_loader_elems.return_value = mock_load_elem response = bcd._get_load_options_key() self.assertThat( mock_get_loader_elems, MockCalledOnceWith()) self.assertThat( mock_load_elem.get, MockCalledOnceWith(bcd.LOAD_OPTIONS, None)) self.assertEqual(response, fake_load_elem) def test_set_load_options(self): mock_uid_0 = factory.make_name('uid') mock_uid_1 = factory.make_name('uid') bcd = self.configure_bcd(uids=[mock_uid_0, mock_uid_1]) fake_value = factory.make_name('value') mock_get_load_options_key = self.patch(Bcd, '_get_load_options_key') mock_get_load_options_key.return_value = None fake_child = factory.make_name('child') bcd.hive.node_add_child.return_value = fake_child bcd.set_load_options(value=fake_value) compare = {'t': 1, 'key': "Element", 'value': fake_value.decode('utf-8').encode('utf-16le'), } self.assertThat( mock_get_load_options_key, MockCalledOnceWith()) self.assertThat( bcd.hive.node_add_child, MockCalledOnceWith(mock_uid_1, bcd.LOAD_OPTIONS)) self.assertThat( bcd.hive.node_set_value, MockCalledOnceWith(fake_child, compare)) self.assertThat(bcd.hive.commit, MockCalledOnceWith(None)) class TestRequestNodeInfoByMACAddress(MAASTestCase): run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def test__returns_None_when_MAC_is_None(self): logger = self.useFixture(FakeLogger("maas", logging.DEBUG)) d = windows_module.request_node_info_by_mac_address(None) self.assertThat(extract_result(d), Is(None)) self.assertDocTestMatches( "Cannot determine node; MAC address is unknown.", logger.output) def test__returns_None_when_node_not_found(self): logger = self.useFixture(FakeLogger("maas", logging.DEBUG)) client = self.patch(windows_module, "getRegionClient").return_value client.side_effect = always_fail_with(NoSuchNode()) mac = factory.make_mac_address() d = windows_module.request_node_info_by_mac_address(mac) self.assertThat(extract_result(d), Is(None)) self.assertDocTestMatches( "Node doesn't exist for MAC address: %s" % mac, logger.output) def test__returns_output_from_RequestNodeInfoByMACAddress(self): client = self.patch(windows_module, "getRegionClient").return_value client.side_effect = always_succeed_with(sentinel.node_info) d = windows_module.request_node_info_by_mac_address(sentinel.mac) self.assertThat(extract_result(d), Is(sentinel.node_info)) self.assertThat(client, MockCalledOnceWith( RequestNodeInfoByMACAddress, mac_address=sentinel.mac)) class TestWindowsPXEBootMethod(MAASTestCase): run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def setUp(self): self.patch(windows_module, 'get_hivex_module') super(TestWindowsPXEBootMethod, self).setUp() def test_clean_path(self): method = WindowsPXEBootMethod() parts = [factory.make_string() for _ in range(3)] dirty_path = '\\'.join(parts) valid_path = dirty_path.lower().replace('\\', '/') clean_path = method.clean_path(dirty_path) self.assertEqual(valid_path, clean_path) def test_clean_path_strip_boot(self): method = WindowsPXEBootMethod() dirty_path = '\\Boot\\BCD' clean_path = method.clean_path(dirty_path) self.assertEqual('bcd', clean_path) def test_get_node_info(self): method = WindowsPXEBootMethod() mac = factory.make_mac_address() self.patch(windows_module, 'get_remote_mac').return_value = mac mock_request_node_info = self.patch( windows_module, 'request_node_info_by_mac_address') method.get_node_info() self.assertThat( mock_request_node_info, MockCalledOnceWith(mac)) @inlineCallbacks def test_match_path_pxelinux(self): method = WindowsPXEBootMethod() method.remote_path = factory.make_string() mock_mac = factory.make_mac_address() mock_get_node_info = self.patch(method, 'get_node_info') mock_get_node_info.return_value = { 'purpose': 'install', 'osystem': 'windows', 'mac': mock_mac, } params = yield method.match_path(None, 'pxelinux.0') self.assertEqual(mock_mac, params['mac']) self.assertEqual(method.bootloader_path, params['path']) @inlineCallbacks def test_match_path_pxelinux_only_on_install(self): method = WindowsPXEBootMethod() method.remote_path = factory.make_string() mock_mac = factory.make_mac_address() mock_get_node_info = self.patch(method, 'get_node_info') mock_get_node_info.return_value = { 'purpose': factory.make_string(), 'osystem': 'windows', 'mac': mock_mac, } params = yield method.match_path(None, 'pxelinux.0') self.assertEqual(params, None) @inlineCallbacks def test_match_path_pxelinux_missing_hivex(self): method = WindowsPXEBootMethod() method.remote_path = factory.make_string() mock_mac = factory.make_mac_address() mock_get_node_info = self.patch(method, 'get_node_info') mock_get_node_info.return_value = { 'purpose': factory.make_string(), 'osystem': 'windows', 'mac': mock_mac, } self.patch(windows_module, 'HAVE_HIVEX', ) params = yield method.match_path(None, 'pxelinux.0') self.assertEqual(params, None) @inlineCallbacks def test_match_path_pxelinux_only_on_windows(self): method = WindowsPXEBootMethod() method.remote_path = factory.make_string() mock_mac = factory.make_mac_address() mock_get_node_info = self.patch(method, 'get_node_info') mock_get_node_info.return_value = { 'purpose': 'install', 'osystem': factory.make_string(), 'mac': mock_mac, } params = yield method.match_path(None, 'pxelinux.0') self.assertEqual(params, None) @inlineCallbacks def test_match_path_pxelinux_get_node_info_None(self): method = WindowsPXEBootMethod() method.remote_path = factory.make_string() mock_get_node_info = self.patch(method, 'get_node_info') mock_get_node_info.return_value = None params = yield method.match_path(None, 'pxelinux.0') self.assertEqual(params, None) @inlineCallbacks def test_match_path_static_file(self): method = WindowsPXEBootMethod() mock_mac = factory.make_mac_address() mock_get_node_info = self.patch(windows_module, 'get_remote_mac') mock_get_node_info.return_value = mock_mac params = yield method.match_path(None, 'bootmgr.exe') self.assertEqual(mock_mac, params['mac']) self.assertEqual('bootmgr.exe', params['path']) @inlineCallbacks def test_match_path_static_file_clean_path(self): method = WindowsPXEBootMethod() mock_mac = factory.make_mac_address() mock_get_node_info = self.patch(windows_module, 'get_remote_mac') mock_get_node_info.return_value = mock_mac params = yield method.match_path(None, '\\Boot\\BCD') self.assertEqual(mock_mac, params['mac']) self.assertEqual('bcd', params['path']) def test_get_reader_bcd(self): method = WindowsPXEBootMethod() mock_compose_bcd = self.patch(method, 'compose_bcd') local_host = factory.make_ipv4_address() kernel_params = make_kernel_parameters(osystem='windows') method.get_reader( None, kernel_params, path='bcd', local_host=local_host) self.assertThat( mock_compose_bcd, MockCalledOnceWith(kernel_params, local_host)) def test_get_reader_static_file(self): method = WindowsPXEBootMethod() mock_path = factory.make_name('path') mock_output_static = self.patch(method, 'output_static') kernel_params = make_kernel_parameters(osystem='windows') method.get_reader(None, kernel_params, path=mock_path) self.assertThat( mock_output_static, MockCalledOnceWith(kernel_params, mock_path)) def test_compose_preseed_url(self): url = 'http://localhost/MAAS' expected = 'http:\\\\localhost\\^M^A^A^S' method = WindowsPXEBootMethod() output = method.compose_preseed_url(url) self.assertEqual(expected, output) def test_compose_bcd(self): method = WindowsPXEBootMethod() local_host = factory.make_ipv4_address() kernel_params = make_kernel_parameters() fake_output = factory.make_string().encode('utf-8') self.patch(os.path, 'isfile').return_value = True self.patch(shutil, 'copyfile') self.patch(windows_module, 'Bcd') with mock.patch( 'provisioningserver.boot.windows.open', mock.mock_open(read_data=fake_output), create=True): output = method.compose_bcd(kernel_params, local_host) self.assertTrue(isinstance(output, BytesReader)) self.assertEqual(fake_output, output.read(-1)) def test_compose_bcd_missing_template(self): method = WindowsPXEBootMethod() self.patch(method, 'get_resource_path').return_value = '' local_host = factory.make_ipv4_address() kernel_params = make_kernel_parameters() self.assertRaises( BootMethodError, method.compose_bcd, kernel_params, local_host) def test_get_resouce_path(self): fake_tftproot = self.make_dir() self.useFixture(ClusterConfigurationFixture(tftp_root=fake_tftproot)) method = WindowsPXEBootMethod() fake_path = factory.make_name('path') fake_kernelparams = make_kernel_parameters() result = method.get_resource_path(fake_kernelparams, fake_path) expected = os.path.join( fake_tftproot, 'windows', fake_kernelparams.arch, fake_kernelparams.subarch, fake_kernelparams.release, fake_kernelparams.label, fake_path) self.assertEqual(expected, result) def test_output_static(self): method = WindowsPXEBootMethod() contents = factory.make_string() temp_dir = self.make_dir() filename = factory.make_file(temp_dir, "resource", contents=contents) self.patch(method, 'get_resource_path').return_value = filename result = method.output_static(None, None) self.assertIsInstance(result, FilesystemReader) self.assertEqual(contents, result.read(10000)) maas-1.9.5+bzr4599.orig/src/provisioningserver/dhcp/__init__.py0000644000000000000000000000501113056115004022443 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Monitored service driver.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "DHCPv4Server", "DHCPv6Server", ] from abc import ( ABCMeta, abstractproperty, ) from provisioningserver.path import get_path # Location of the DHCPv4 configuration file. DHCPv4_CONFIG_FILE = '/var/lib/maas/dhcpd.conf' # Location of the DHCPv4 interfaces file. DHCPv4_INTERFACES_FILE = '/var/lib/maas/dhcpd-interfaces' # Location of the DHCPv6 configuration file. DHCPv6_CONFIG_FILE = '/var/lib/maas/dhcpd6.conf' # Location of the DHCPv6 interfaces file. DHCPv6_INTERFACES_FILE = '/var/lib/maas/dhcpd6-interfaces' # Message to put in the DHCP config file when the DHCP server gets stopped. DISABLED_DHCP_SERVER = "# DHCP server stopped and disabled." class DHCPServer: """Represents the settings for a DHCP server. :cvar descriptive_name: A name to use for this server in human-readable texts. :cvar template_basename: The base filename for the template to use when generating configuration for this server. :cvar interfaces_filename: The full path and filename for the server's interfaces file. :cvar config_filename: The full path and filename for the server's configuration file. :ivar omapi_key: The OMAPI secret key for the server. """ __metaclass__ = ABCMeta descriptive_name = abstractproperty() template_basename = abstractproperty() interfaces_filename = abstractproperty() config_filename = abstractproperty() dhcp_service = abstractproperty() def __init__(self, omapi_key): super(DHCPServer, self).__init__() self.omapi_key = omapi_key class DHCPv4Server(DHCPServer): """Represents the settings for a DHCPv4 server. See `DHCPServer`. """ descriptive_name = "DHCPv4" template_basename = 'dhcpd.conf.template' interfaces_filename = get_path(DHCPv4_INTERFACES_FILE) config_filename = get_path(DHCPv4_CONFIG_FILE) dhcp_service = "dhcp4" class DHCPv6Server(DHCPServer): """Represents the settings for a DHCPv6 server. See `DHCPServer`. """ descriptive_name = "DHCPv6" template_basename = 'dhcpd6.conf.template' interfaces_filename = get_path(DHCPv6_INTERFACES_FILE) config_filename = get_path(DHCPv6_CONFIG_FILE) dhcp_service = "dhcp6" maas-1.9.5+bzr4599.orig/src/provisioningserver/dhcp/config.py0000644000000000000000000000614013056115004022155 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Write config output for ISC DHCPD.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "DHCPConfigError", "get_config", ] from itertools import ( chain, repeat, ) from platform import linux_distribution from provisioningserver.boot import BootMethodRegistry from provisioningserver.utils import locate_config import tempita # Location of DHCP templates, relative to the configuration directory. TEMPLATES_DIR = "templates/dhcp" # Used to generate the conditional bootloader behaviour CONDITIONAL_BOOTLOADER = """ {{behaviour}} option arch = {{arch_octet}} { filename \"{{bootloader}}\"; {{if path_prefix}} option path-prefix \"{{path_prefix}}\"; {{endif}} } """ # Used to generate the PXEBootLoader special case PXE_BOOTLOADER = """ else { filename \"{{bootloader}}\"; {{if path_prefix}} option path-prefix \"{{path_prefix}}\"; {{endif}} } """ class DHCPConfigError(Exception): """Exception raised for errors processing the DHCP config.""" def compose_conditional_bootloader(): output = "" behaviour = chain(["if"], repeat("elsif")) for name, method in BootMethodRegistry: if name != "pxe" and method.arch_octet is not None: output += tempita.sub( CONDITIONAL_BOOTLOADER, behaviour=next(behaviour), arch_octet=method.arch_octet, bootloader=method.bootloader_path, path_prefix=method.path_prefix, ).strip() + ' ' # The PXEBootMethod is used in an else statement for the generated # dhcpd config. This ensures that a booting node that does not # provide an architecture octet, or architectures that emulate # pxelinux can still boot. pxe_method = BootMethodRegistry.get_item('pxe') if pxe_method is not None: output += tempita.sub( PXE_BOOTLOADER, bootloader=pxe_method.bootloader_path, path_prefix=pxe_method.path_prefix, ).strip() return output.strip() def get_config(template_name, **params): """Return a DHCP config file based on the supplied parameters. :param template_name: Template file name: `dhcpd.conf.template` for the IPv4 template, `dhcpd6.conf.template` for the IPv6 template. :param **params: Variables to be substituted into the template. :return: A full configuration, as unicode text. """ template_file = locate_config(TEMPLATES_DIR, template_name) params['bootloader'] = compose_conditional_bootloader() params['platform_codename'] = linux_distribution()[2] params.setdefault("ntp_server") try: template = tempita.Template.from_filename( template_file, encoding="UTF-8") return template.substitute(**params) except (KeyError, NameError) as error: raise DHCPConfigError(*error.args) maas-1.9.5+bzr4599.orig/src/provisioningserver/dhcp/detect.py0000644000000000000000000003021413056115004022157 0ustar 00000000000000# Copyright 2013-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Utilities and helpers to help discover DHCP servers on your network.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'probe_dhcp', ] from contextlib import contextmanager import errno import fcntl import httplib import json from random import randint import socket import struct from urllib2 import ( HTTPError, URLError, ) from apiclient.maas_client import ( MAASClient, MAASDispatcher, MAASOAuth, ) from provisioningserver.logger import get_maas_logger maaslog = get_maas_logger("dhcp.detect") def make_transaction_ID(): """Generate a random DHCP transaction identifier.""" transaction_id = b'' for _ in range(4): transaction_id += struct.pack(b'!B', randint(0, 255)) return transaction_id class DHCPDiscoverPacket: """A representation of a DHCP_DISCOVER packet. :param my_mac: The MAC address to which the dhcp server should respond. Normally this is the MAC of the interface you're using to send the request. """ def __init__(self, my_mac): self.transaction_ID = make_transaction_ID() self.packed_mac = self.string_mac_to_packed(my_mac) self._build() @classmethod def string_mac_to_packed(cls, mac): """Convert a string MAC address to 6 hex octets. :param mac: A MAC address in the format AA:BB:CC:DD:EE:FF :return: a byte string of length 6 """ packed = b'' for pair in mac.split(':'): hex_octet = int(pair, 16) packed += struct.pack(b'!B', hex_octet) return packed def _build(self): self.packet = b'' self.packet += b'\x01' # Message type: Boot Request (1) self.packet += b'\x01' # Hardware type: Ethernet self.packet += b'\x06' # Hardware address length: 6 self.packet += b'\x00' # Hops: 0 self.packet += self.transaction_ID self.packet += b'\x00\x00' # Seconds elapsed: 0 # Bootp flags: 0x8000 (Broadcast) + reserved flags self.packet += b'\x80\x00' self.packet += b'\x00\x00\x00\x00' # Client IP address: 0.0.0.0 self.packet += b'\x00\x00\x00\x00' # Your (client) IP address: 0.0.0.0 self.packet += b'\x00\x00\x00\x00' # Next server IP address: 0.0.0.0 self.packet += b'\x00\x00\x00\x00' # Relay agent IP address: 0.0.0.0 self.packet += self.packed_mac # Client hardware address padding: 00000000000000000000 self.packet += b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' self.packet += b'\x00' * 67 # Server host name not given self.packet += b'\x00' * 125 # Boot file name not given self.packet += b'\x63\x82\x53\x63' # Magic cookie: DHCP # Option: (t=53,l=1) DHCP Message Type = DHCP Discover self.packet += b'\x35\x01\x01' self.packet += b'\x3d\x06' + self.packed_mac # Option: (t=55,l=3) Parameter Request List self.packet += b'\x37\x03\x03\x01\x06' self.packet += b'\xff' # End Option class DHCPOfferPacket: """A representation of a DHCP_OFFER packet.""" def __init__(self, data): self.transaction_ID = data[4:8] self.dhcp_server_ID = socket.inet_ntoa(data[245:249]) # UDP ports for the BOOTP protocol. Used for discovery requests. BOOTP_SERVER_PORT = 67 BOOTP_CLIENT_PORT = 68 # ioctl request for requesting IP address. SIOCGIFADDR = 0x8915 # ioctl request for requesting hardware (MAC) address. SIOCGIFHWADDR = 0x8927 def get_interface_MAC(sock, interface): """Obtain a network interface's MAC address, as a string.""" ifreq = struct.pack(b'256s', interface.encode('ascii')[:15]) info = fcntl.ioctl(sock.fileno(), SIOCGIFHWADDR, ifreq) mac = ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1] return mac def get_interface_IP(sock, interface): """Obtain an IP address for a network interface, as a string.""" ifreq = struct.pack( b'16sH14s', interface.encode('ascii')[:15], socket.AF_INET, b'\x00' * 14) info = fcntl.ioctl(sock, SIOCGIFADDR, ifreq) ip = struct.unpack(b'16sH2x4s8x', info)[2] return socket.inet_ntoa(ip) @contextmanager def udp_socket(): """Open, and later close, a UDP socket.""" sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # We're going to bind to the BOOTP/DHCP client socket, where dhclient may # also be listening, even if it's operating on a different interface! # The SO_REUSEADDR option makes this possible. sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) yield sock sock.close() def request_dhcp(interface): """Broadcast a DHCP discovery request. Return DHCP transaction ID.""" with udp_socket() as sock: sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) mac = get_interface_MAC(sock, interface) bind_address = get_interface_IP(sock, interface) discover = DHCPDiscoverPacket(mac) sock.bind((bind_address, BOOTP_CLIENT_PORT)) sock.sendto(discover.packet, ('', BOOTP_SERVER_PORT)) return discover.transaction_ID def receive_offers(transaction_id): """Receive DHCP offers. Return set of offering servers.""" servers = set() with udp_socket() as sock: # The socket we use for receiving DHCP offers must be bound to IF_ANY. sock.bind(('', BOOTP_CLIENT_PORT)) try: while True: sock.settimeout(3) data = sock.recv(1024) offer = DHCPOfferPacket(data) if offer.transaction_ID == transaction_id: servers.add(offer.dhcp_server_ID) except socket.timeout: # No more offers. Done. return servers def probe_dhcp(interface): """Look for a DHCP server on the network. This must be run with provileges to broadcast from the BOOTP port, which typically requires root. It may fail to bind to that port if a DHCP client is running on that same interface. :param interface: Network interface name, e.g. "eth0", attached to the network you wish to probe. :return: Set of discovered DHCP servers. :exception IOError: If the interface does not have an IP address. """ # There is a small race window here, after we close the first socket and # before we bind the second one. Hopefully executing a few lines of code # will be faster than communication over the network. # UDP is not reliable at any rate. If detection is important, we should # send out repeated requests. transaction_id = request_dhcp(interface) return receive_offers(transaction_id) def process_request(client_func, *args, **kwargs): """Run a MAASClient query and check for common errors. :return: None if there is an error, otherwise the decoded response body. """ try: response = client_func(*args, **kwargs) except (HTTPError, URLError) as e: maaslog.warning("Failed to contact region controller:\n%s", e) return None code = response.getcode() if code != httplib.OK: maaslog.error( "Failed talking to region controller, it returned:\n%s\n%s", code, response.read()) return None try: raw_data = response.read() if len(raw_data) > 0: data = json.loads(raw_data) else: return None except ValueError as e: maaslog.error( "Failed to decode response from region controller:\n%s", e) return None return data def determine_cluster_interfaces(knowledge): """Given server knowledge, determine network interfaces on this cluster. :return: a list of tuples of (interface name, ip) for all interfaces. :note: this uses an API call and not local probing because the region controller has the definitive and final say in what does and doesn't exist. """ api_path = ( 'api/1.0/nodegroups/%s/interfaces/' % knowledge['nodegroup_uuid']) oauth = MAASOAuth(*knowledge['api_credentials']) client = MAASClient(oauth, MAASDispatcher(), knowledge['maas_url']) interfaces = process_request(client.get, api_path, 'list') if interfaces is None: return None interface_names = sorted( (interface['interface'], interface['ip']) for interface in interfaces if interface['interface'] != '') return interface_names def probe_interface(interface, ip): """Probe the given interface for DHCP servers. :param interface: interface as returned from determine_cluster_interfaces :param ip: ip as returned from determine_cluster_interfaces :return: A set of IP addresses of detected servers. :note: Any servers running on the IP address of the local host are filtered out as they will be the MAAS DHCP server. """ try: servers = probe_dhcp(interface) except IOError as e: servers = set() if e.errno == errno.EADDRNOTAVAIL: # Errno EADDRNOTAVAIL is "Cannot assign requested address" # which we need to ignore; it means the interface has no IP # and there's no need to scan this interface as it's not in # use. maaslog.debug( "Ignoring DHCP scan for %s, it has no IP address", interface) elif e.errno == errno.ENODEV: # Errno ENODEV is "no such device". This seems an odd situation # since we're scanning detected devices, so this is probably # a bug. maaslog.error( "Ignoring DHCP scan for %s, it no longer exists. Check " "your cluster interfaces configuration.", interface) else: raise # Using servers.discard(ip) here breaks Mock in the tests, so # we're creating a copy of the set instead. results = servers.difference([ip]) return results def update_region_controller(knowledge, interface, server): """Update the region controller with the status of the probe. :param knowledge: dictionary of server info :param interface: name of interface, e.g. eth0 :param server: IP address of detected DHCP server, or None """ api_path = 'api/1.0/nodegroups/%s/interfaces/%s/' % ( knowledge['nodegroup_uuid'], interface) oauth = MAASOAuth(*knowledge['api_credentials']) client = MAASClient(oauth, MAASDispatcher(), knowledge['maas_url']) if server is None: server = '' process_request( client.post, api_path, 'report_foreign_dhcp', foreign_dhcp_ip=server) def periodic_probe_task(api_knowledge): """Probe for DHCP servers and set NodeGroupInterface.foriegn_dhcp. This should be run periodically so that the database has an up-to-date view of any rogue DHCP servers on the network. NOTE: This uses blocking I/O with sequential polling of interfaces, and hence doesn't scale well. It's a future improvement to make to throw it in parallel threads or async I/O. :param api_knowledge: A dict of the information needed to be able to make requests to the region's REST API. """ # Determine all the active interfaces on this cluster (nodegroup). interfaces = determine_cluster_interfaces(api_knowledge) if interfaces is None: maaslog.info("No interfaces on cluster, not probing DHCP.") return # Iterate over interfaces and probe each one. for interface, ip in interfaces: try: servers = probe_interface(interface, ip) except socket.error: maaslog.error( "Failed to probe sockets; did you configure authbind as per " "HACKING.txt?") return else: if len(servers) > 0: # Only send one, if it gets cleared out then the # next detection pass will send a different one, if it # still exists. update_region_controller( api_knowledge, interface, servers.pop()) else: update_region_controller(api_knowledge, interface, None) maas-1.9.5+bzr4599.orig/src/provisioningserver/dhcp/leases.py0000644000000000000000000001247413056115004022173 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Send lease updates to the server. This code runs inside node-group workers. It watches for changes to DHCP leases, and notifies the MAAS server so that it can rewrite DNS zone files as appropriate. Leases in this module are represented as dicts, mapping each leased IP address to the MAC address that it belongs to. The modification time and leases of the last-uploaded leases are cached, so as to suppress unwanted redundant updates. This cache is updated *before* the actual upload, so as to prevent thundering-herd problems: if an upload takes too long for whatever reason, subsequent updates should not be uploaded until the first upload is done. Some uploads may be lost due to concurrency or failures, but the situation will right itself eventually. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'check_lease_changes', 'record_lease_state', ] from collections import defaultdict import errno from os import ( fstat, stat, ) import sys from provisioningserver.dhcp.leases_parser_fast import parse_leases from provisioningserver.logger import get_maas_logger from provisioningserver.utils.shell import objectfork maaslog = get_maas_logger("dhcp.leases") # Cache for leases, and lease times. cache = defaultdict() # Cache key for the modification time on last-processed leases file. LEASES_TIME_CACHE_KEY = 'leases_time' # Cache key for the leases as last parsed. LEASES_CACHE_KEY = 'recorded_leases' def get_leases_file(): """Return the location of the DHCP leases file.""" # This used to be configuration-based so that the development env could # have a different location. However, nobody seems to be provisioning from # a dev environment so it's hard-coded until that need arises, as # converting to the pserv config would be wasted work right now. return "/var/lib/maas/dhcp/dhcpd.leases" def get_leases_timestamp(): """Return the last modification timestamp of the DHCP leases file. None will be returned if the DHCP lease file cannot be found. """ try: return stat(get_leases_file()).st_mtime except OSError as exception: # Return None only if the exception is a "No such file or # directory" exception. if exception.errno == errno.ENOENT: return None else: raise def parse_leases_file(): """Parse the DHCP leases file. :return: A tuple: (timestamp, leases). The `timestamp` is the last modification time of the leases file, and `leases` is a list containing tuples of leased IP addresses and their associated MAC addresses. None will be returned if the DHCP lease file cannot be found. """ try: with open(get_leases_file(), 'rb') as leases_file: contents = leases_file.read().decode('utf-8') return fstat(leases_file.fileno()).st_mtime, parse_leases(contents) except IOError as exception: # Return None only if the exception is a "No such file or # directory" exception. if exception.errno == errno.ENOENT: return None else: raise def check_lease_changes(): """Has the DHCP leases file changed in any significant way?""" # These variables are shared between worker threads/processes. # A bit of inconsistency due to concurrent updates is not a problem, # but read them both at once here to reduce the scope for trouble. previous_leases = cache.get(LEASES_CACHE_KEY) previous_leases_time = cache.get(LEASES_TIME_CACHE_KEY) if get_leases_timestamp() == previous_leases_time: return None exc_info = None # Keep track of exceptions in the *parent*. with objectfork() as (pid, recv, send): if pid == 0: # Child, where we'll do the parsing. send(parse_leases_file()) else: # Parent, where we'll receive the results. try: parse_result = recv() except: # This probably means that the child has crashed, but keep # hold of this exception for later in case it is a problem # only in the parent. exc_info = sys.exc_info() # On exit from the objectfork() context above the child process will be # terminated and any errors it encountered will be propagated into the # parent process; this block will not be reached. However it's possible # that the parent alone has crashed, and we propagate its error now. if exc_info is not None: raise exc_info if parse_result is not None: timestamp, leases = parse_result if leases == previous_leases: return None else: return timestamp, leases else: return None def record_lease_state(last_change, leases): """Record a snapshot of the state of DHCP leases. :param last_change: Modification date on the leases file with the given leases. :param leases: A dict mapping each leased IP address to the MAC address that it has been assigned to. """ cache[LEASES_TIME_CACHE_KEY] = last_change cache[LEASES_CACHE_KEY] = leases maas-1.9.5+bzr4599.orig/src/provisioningserver/dhcp/leases_parser.py0000644000000000000000000001723213056115004023544 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Parser for ISC dhcpd leases file. The parser is very minimal. All we really care about is which IP addresses are currently associated with which respective MAC addresses. The parser works out no other information than that, and does not pretend to parse the full format of the leases file faithfully. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'parse_leases', ] from collections import OrderedDict from datetime import datetime from pyparsing import ( CaselessKeyword, Dict, Group, oneOf, QuotedString, Regex, restOfLine, Suppress, ZeroOrMore, ) LEASE_TIME_FORMAT = '%w %Y/%m/%d %H:%M:%S' ip = Regex("[:0-9a-fA-F][-:.0-9a-fA-F]{2,38}") mac = Regex("[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}") hardware_type = Regex('[A-Za-z0-9_-]+') args = Regex('[^"{;]+') | QuotedString('"') expiry = Regex('[0-9]\s+[0-9/-]+\s+[0-9:]+') | 'never' identifier = Regex("[A-Za-z_][0-9A-Za-z_-]*") set_statement = ( CaselessKeyword('set') + identifier + Suppress('=') + QuotedString('"')) # For our purposes, leases and host declarations are similar enough that # we can parse them as the same construct with different names. lease_or_host = oneOf(['lease', 'host'], caseless=True) hardware = CaselessKeyword("hardware") + hardware_type("type") + mac("mac") fixed_address4 = CaselessKeyword("fixed-address") + ip("address") fixed_address6 = CaselessKeyword("fixed-address6") + ip("address6") ends = CaselessKeyword("ends") + expiry("expiry") deleted = CaselessKeyword("deleted") lone_statement_names = [ 'abandoned', 'bootp', 'deleted', 'dynamic', 'reserved', ] lone_statement = oneOf(lone_statement_names, caseless=True) other_statement_names = [ 'atsfp', 'binding', 'bootp', 'client-hostname', 'cltt', 'ddns-client-fqdn', 'ddns-fwd-name', 'ddns-rev-name', 'ddns-text', 'next', 'option', 'reserved', 'rewind', 'starts', 'tstp', 'tsfp', 'uid', 'vendor-class-identifier', ] other_statement = oneOf(other_statement_names, caseless=True) + args lease_statement = ( hardware | fixed_address4 | fixed_address6 | deleted | ends | set_statement | lone_statement | other_statement ) + Suppress(';') lease_parser = ( lease_or_host("lease_or_host") + ip("host") + Suppress('{') + Dict(ZeroOrMore(Group(lease_statement))) + Suppress('}') ) lease_parser.ignore('#' + restOfLine) def is_lease(entry): """Is `entry` a lease declaration?""" entry_type = entry.lease_or_host.lower() assert entry_type in {'host', 'lease'}, ( "Unknown entry type (not a host or lease): %s" % entry_type) return entry_type == 'lease' def is_host(entry): """Is `entry` a host declaration?""" return not is_lease(entry) def get_expiry_date(lease): """Get the expiry date for a lease, if any. :param lease: A lease as returned by the parser. :return: A UTC-based timestamp representing the lease's moment of expiry, or None if the lease has no expiry date. """ assert is_lease(lease) end_time = getattr(lease, 'ends', '') if end_time is None or len(end_time) == 0: binding_state = getattr(lease, 'binding state', '') binding_state = binding_state.lower() if binding_state == 'free': # For a 'free' lease, the release time is the 'starts' time. start_time = getattr(lease, 'starts', '') if start_time is None or len(start_time) == 0: return None return datetime.strptime(start_time, LEASE_TIME_FORMAT) elif end_time.lower() == 'never': return None else: return datetime.strptime(end_time, LEASE_TIME_FORMAT) def has_expired(lease, now): """Has `lease` expired? :param lease: A lease as returned by the parser. :param now: The current UTC-based timestamp to check expiry against. :return: Whether the lease has expired. """ assert is_lease(lease) # Deleted entries are definitely expired. if 'deleted' in lease: return True expiry_date = get_expiry_date(lease) if expiry_date is None: return False else: return expiry_date < now def gather_leases(hosts_and_leases): """Find current leases among `hosts_and_leases`.""" now = datetime.utcnow() # Ensure we have the most recent MAC leased to each IP address. leases = OrderedDict() for lease in filter(is_lease, hosts_and_leases): lease_mac = get_lease_mac(lease) lease_ip = lease.host if not has_expired(lease, now) and lease_mac is not None: leases[lease_ip] = lease_mac else: if lease_ip in leases: del leases[lease_ip] return [(ip, mac) for ip, mac in leases.items()] def get_host_mac(host): """Get the MAC address from a host declaration. For a rubout this is the 'host' record.""" assert is_host(host) if 'deleted' in host: host = getattr(host, 'host', None) if host in (None, '', b''): return None else: return host # In this case, it's stored the same way a lease MAC is stored. return get_lease_mac(host) def get_lease_mac(host): """Get the MAC address from a lease declaration.""" hardware = getattr(host, 'hardware', None) if hardware in (None, '', b''): return None else: return hardware.mac def get_host_key(host): """Get the key from a host declaration. The key can be the IP or the MAC depending on which version of MAAS created the host map. """ host = getattr(host, 'host', None) if host in (None, '', b''): return None else: return host def get_host_ip(host): """Get the IP address from a host declaration. A rubout has none.""" assert is_host(host) if 'deleted' in host: return None fields = ['fixed-address', 'fixed-address6'] for field in fields: address = getattr(host, field, None) if address not in (None, '', b''): return address return None def gather_hosts(hosts_and_leases): """Find current host declarations among `hosts_and_leases`.""" # Get MAC address mappings for host entries. A newer entry # overwrites an older one for the same IP address. A rubout entry # will have no IP address. host_maps = OrderedDict() for host in filter(is_host, hosts_and_leases): host_maps[get_host_key(host)] = (get_host_mac(host), get_host_ip(host)) # Now filter out mappings where the last entry was a rubout. return [ (val[1], val[0]) for _, val in host_maps.items() if val[1] and val[0] ] def combine_entries(entries): """Combine the hosts and leases declarations in a parsed leases file. :param entries: Parsed host/leases entries from a leases file. :return: A list mapping leased IP addresses to the respective MAC addresses that currently own them (regardless of whether they were found in a lease or in a host declaration). """ leases = gather_leases(entries) return leases + gather_hosts(entries) def parse_leases(leases_contents): """Parse contents of a leases file. :param leases_contents: Contents (as unicode) of the leases file. :return: A list mapping each currently leased IP address to the MAC address that it is associated with, with possible duplicates. """ entries = lease_parser.searchString(leases_contents) return combine_entries(entries) maas-1.9.5+bzr4599.orig/src/provisioningserver/dhcp/leases_parser_fast.py0000644000000000000000000000602213056115004024554 0ustar 00000000000000# Copyright 2013-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """A speedier version of `leases_parser`. This extracts the relevant stanzas from a leases file, keeping only the most recent "host" and "lease" entries, then uses the existing and properly defined but slow parser to parse them. This massively speeds up parsing a leases file that contains a modest number of unique host and lease entries, but has become very large because of churn. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'parse_leases', ] from collections import ( defaultdict, OrderedDict, ) from datetime import datetime from itertools import chain import re from provisioningserver.dhcp.leases_parser import ( get_host_ip, get_host_mac, get_lease_mac, has_expired, is_host, is_lease, lease_parser, ) re_entry = re.compile( r''' ^\s* # Ignore leading whitespace on each line. (host|lease) # Look only for host or lease stanzas. \s+ # Mandatory whitespace. ([0-9a-fA-F.:-]+) # Capture the IP/MAC address for this stanza. \s*{ # Optional whitespace then an opening brace. ''', re.MULTILINE | re.DOTALL | re.VERBOSE) def find_lease_starts(leases_contents): results = defaultdict(dict) for match in re_entry.finditer(leases_contents): stanza, address = match.groups() results[stanza][match.start()] = address return chain.from_iterable( mapping.keys() for mapping in results.itervalues()) def extract_leases(leases_contents): starts = find_lease_starts(leases_contents) for start in sorted(starts): record = lease_parser.scanString(leases_contents[start:]) try: token, _, _ = next(record) except StopIteration: pass else: yield token def parse_leases(leases_contents): leases = OrderedDict() hosts = [] now = datetime.utcnow() for entry in extract_leases(leases_contents): if is_lease(entry): mac = get_lease_mac(entry) if not has_expired(entry, now) and mac is not None: leases[entry.host] = entry.hardware.mac else: # Expired or released lease. if entry.host in leases: del leases[entry.host] elif is_host(entry): mac = get_host_mac(entry) ip = get_host_ip(entry) if ip and mac: # A host entry came later than a lease entry for the same IP # address. Letting them both stay will confuse MAAS by allowing # an ephemeral lease to exist at the same time as a static # host map. if ip in leases: del leases[ip] hosts.append((ip, mac)) results = leases.items() results.extend(hosts) return results maas-1.9.5+bzr4599.orig/src/provisioningserver/dhcp/omshell.py0000644000000000000000000002340313056115004022354 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Python wrapper around the `omshell` utility which amends objects inside the DHCP server. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "generate_omapi_key", "Omshell", ] import os import re from subprocess import ( PIPE, Popen, ) from textwrap import dedent from provisioningserver.logger.log import get_maas_logger from provisioningserver.utils import parse_key_value_file from provisioningserver.utils.fs import tempdir from provisioningserver.utils.shell import ( call_and_check, ExternalProcessError, ) maaslog = get_maas_logger("dhcp.omshell") bad_key_pattern = re.compile("[+/]no|no[+/]", flags=re.IGNORECASE) def call_dnssec_keygen(tmpdir): path = os.environ.get("PATH", "").split(os.pathsep) path.append("/usr/sbin") env = dict(os.environ, PATH=os.pathsep.join(path)) return call_and_check( ['dnssec-keygen', '-r', '/dev/urandom', '-a', 'HMAC-MD5', '-b', '512', '-n', 'HOST', '-K', tmpdir, '-q', 'omapi_key'], env=env) def run_repeated_keygen(tmpdir): # omshell has a bug where if the chars '/' or '+' appear either # side of the word 'no' (in any case), it throws an error like # "partial base64 value left over". We check for that here and # repeatedly generate a new key until a good one is generated. key = None while key is None: key_id = call_dnssec_keygen(tmpdir) # Locate the file that was written and strip out the Key: field in # it. if not key_id: raise AssertionError("dnssec-keygen didn't generate anything") key_id = key_id.strip() # Remove trailing newline. key_file_name = os.path.join(tmpdir, key_id + '.private') parsing_error = False try: config = parse_key_value_file(key_file_name) except ValueError: parsing_error = True if parsing_error or 'Key' not in config: raise AssertionError( "Key field not found in output from dnssec-keygen") key = config['Key'] if bad_key_pattern.search(key) is not None: # Force a retry. os.remove(key_file_name) # Stop dnssec_keygen complaints. key = None return key def generate_omapi_key(): """Generate a HMAC-MD5 key by calling out to the dnssec-keygen tool. :return: The shared key suitable for OMAPI access. :type: string """ # dnssec-keygen writes out files to a specified directory, so we # need to make a temp directory for that. # This relies on the temporary directory being accessible only to its # owner. temp_prefix = "%s." % os.path.basename(__file__) with tempdir(prefix=temp_prefix) as tmpdir: key = run_repeated_keygen(tmpdir) return key class Omshell: """Wrap up the omshell utility in Python. 'omshell' is an external executable that communicates with a DHCP daemon and manipulates its objects. This class wraps up the commands necessary to add and remove host maps (MAC to IP). :param server_address: The address for the DHCP server (ip or hostname) :param shared_key: An HMAC-MD5 key generated by dnssec-keygen like: $ dnssec-keygen -r /dev/urandom -a HMAC-MD5 -b 512 -n HOST omapi_key $ cat Komapi_key.+*.private |grep ^Key|cut -d ' ' -f2- It must match the key set in the DHCP server's config which looks like this: omapi-port 7911; key omapi_key { algorithm HMAC-MD5; secret "XXXXXXXXX"; #<-The output from the generated key above. }; omapi-key omapi_key; """ def __init__(self, server_address, shared_key): self.server_address = server_address self.shared_key = shared_key self.command = ["omshell"] def _run(self, stdin): proc = Popen(self.command, stdin=PIPE, stdout=PIPE) stdout, stderr = proc.communicate(stdin) if proc.poll() != 0: raise ExternalProcessError(proc.returncode, self.command, stdout) return proc.returncode, stdout def try_connection(self): # Don't pass the omapi_key as its not needed to just try to connect. stdin = dedent("""\ server {self.server_address} connect """) stdin = stdin.format(self=self) returncode, output = self._run(stdin) # If the omshell worked, the last line should reference a null # object. We need to strip blanks, newlines and '>' characters # for this to work. lines = output.strip('\n >').splitlines() try: last_line = lines[-1] except IndexError: last_line = "" if "obj: %s" % (mac_address, ip_address)) name = mac_address.replace(':', '-') stdin = dedent("""\ server {self.server_address} key omapi_key {self.shared_key} connect new host set ip-address = {ip_address} set hardware-address = {mac_address} set hardware-type = 1 set name = "{name}" create """) stdin = stdin.format( self=self, ip_address=ip_address, mac_address=mac_address, name=name) returncode, output = self._run(stdin) # If the call to omshell doesn't result in output containing the # magic string 'hardware-type' then we can be reasonably sure # that the 'create' command failed. Unfortunately there's no # other output like "successful" to check so this is the best we # can do. if "hardware-type" in output: # Success. pass elif "can't open object: I/O error" in output: # Host map already existed. Treat as success. pass else: raise ExternalProcessError(returncode, self.command, output) def remove(self, mac_address): # The "name" is not a host name; it's an identifier used within # the DHCP server. We use the MAC address. Prior to 1.9, MAAS using # the IPs as the key but changing to using MAC addresses allows the # DHCP service to give all the NICs of a bond the same IP address. # The only caveat of this change is that the remove() method needs # to be able to deal with legacy host mappings (using IP as # the key) and new host mappings (using the MAC as the key). # This is achieved by sending both the IP and the MAC: one of them will # be the key for the mapping (it will be the IP if the record was # created with by an old version of MAAS and the MAC otherwise). maaslog.debug("Removing host mapping key=%s" % mac_address) mac_address = mac_address.replace(':', '-') stdin = dedent("""\ server {self.server_address} key omapi_key {self.shared_key} connect new host set name = "{mac_address}" open remove """) stdin = stdin.format( self=self, mac_address=mac_address) returncode, output = self._run(stdin) # If the omshell worked, the last line should reference a null # object. We need to strip blanks, newlines and '>' characters # for this to work. lines = output.strip('\n >').splitlines() try: last_line = lines[-1] except IndexError: last_line = "" if "obj: ', BOOTP_SERVER_PORT), args[1]) def test_returns_transaction_id(self): patch_socket(self) self.patch_interface_MAC() self.patch_interface_IP() transaction_id = self.patch_transaction_ID() interface = factory.make_name('interface') self.assertEqual(transaction_id, request_dhcp(interface)) class FakePacketReceiver: """Fake callable to substitute for a socket's `recv`. Returns the given packets on successive calls. When it runs out, raises a timeout. """ def __init__(self, packets=None): if packets is None: packets = [] self.calls = [] self.packets = list(packets) def __call__(self, recv_size): self.calls.append(recv_size) if len(self.packets) == 0: raise socket.timeout() else: return self.packets.pop(0) class TestReceiveOffers(MAASTestCase): """Tests for `receive_offers`.""" def patch_recv(self, sock, num_packets=0): """Patch up socket's `recv` to return `num_packets` arbitrary packets. After that, further calls to `recv` will raise a timeout. """ packets = [factory.make_bytes() for _ in range(num_packets)] receiver = FakePacketReceiver(packets) self.patch(sock, 'recv', receiver) return receiver def patch_offer_packet(self): """Patch a mock `DHCPOfferPacket`.""" transaction_id = factory.make_bytes(4) packet = mock.MagicMock() packet.transaction_ID = transaction_id packet.dhcp_server_ID = factory.make_ipv4_address() self.patch(detect_module, 'DHCPOfferPacket').return_value = packet return packet def test_receives_from_socket(self): sock = patch_socket(self) receiver = self.patch_recv(sock) transaction_id = self.patch_offer_packet().transaction_ID receive_offers(transaction_id) self.assertEqual( [mock.call(socket.AF_INET, socket.SOCK_DGRAM)], socket.socket.mock_calls) self.assertEqual( [mock.call(('', BOOTP_CLIENT_PORT))], sock.bind.mock_calls) self.assertEqual([1024], receiver.calls) def test_returns_empty_if_nothing_received(self): sock = patch_socket(self) self.patch_recv(sock) transaction_id = self.patch_offer_packet().transaction_ID self.assertEqual(set(), receive_offers(transaction_id)) def test_processes_offer(self): sock = patch_socket(self) self.patch_recv(sock, 1) packet = self.patch_offer_packet() self.assertEqual( {packet.dhcp_server_ID}, receive_offers(packet.transaction_ID)) def test_ignores_other_transactions(self): sock = patch_socket(self) self.patch_recv(sock, 1) self.patch_offer_packet() other_transaction_id = factory.make_bytes(4) self.assertEqual(set(), receive_offers(other_transaction_id)) def test_propagates_errors_other_than_timeout(self): class InducedError(Exception): """Deliberately induced error for testing.""" sock = patch_socket(self) sock.recv = mock.MagicMock(side_effect=InducedError) self.assertRaises( InducedError, receive_offers, factory.make_bytes(4)) class MockResponse: # This implements just enough to look lke a urllib2 response object. def __init__(self, code=None, response=None): if code is None: code = httplib.OK self.code = code if response is None: response = "" self.response = response def getcode(self): return self.code def read(self): return self.response class TestPeriodicTask(PservTestCase): def setUp(self): # Initialise the knowledge cache. super(TestPeriodicTask, self).setUp() self.maaslog = self.useFixture(FakeLogger("maas.dhcp.detect")) uuid = factory.make_UUID() maas_url = 'http://%s.example.com/%s/' % ( factory.make_name('host'), factory.make_string(), ) api_credentials = make_api_credentials() self.useFixture(ClusterConfigurationFixture(maas_url=maas_url)) self.knowledge = dict( nodegroup_uuid=uuid, api_credentials=api_credentials, maas_url=maas_url) def make_fake_interfaces_response(self, interfaces_pairs): stanzas = [] for interfaces_pair in interfaces_pairs: stanza = textwrap.dedent(""" {{ "ip_range_high": null, "ip_range_low": null, "broadcast_ip": null, "ip": "{1}", "subnet_mask": "255.255.255.0", "management": 0, "interface": "{0}" }}""").format(*interfaces_pair) stanzas.append(stanza) interfaces_json = "[" interfaces_json += ",".join(stanzas) interfaces_json += "]" return interfaces_json def patch_fake_interfaces_list(self, interfaces_pairs): # Set up the api client to return a fake set of interfaces. # Determine_cluster_interfaces calls the API to discover what # interfaces are available, so any test code that calls it # should first call this helper to set up the required fake response. interfaces_json = self.make_fake_interfaces_response(interfaces_pairs) self.patch(MAASClient, 'get').return_value = MockResponse( httplib.OK, interfaces_json) def test_determine_cluster_interfaces_returns_interface_names(self): eth0_addr = factory.make_ipv4_address() wlan0_addr = factory.make_ipv4_address() self.patch_fake_interfaces_list( [("eth0", eth0_addr), ("wlan0", wlan0_addr)]) self.assertEqual( [("eth0", eth0_addr), ("wlan0", wlan0_addr)], determine_cluster_interfaces(self.knowledge)) def test_probe_interface_returns_empty_set_when_nothing_detected(self): eth0_addr = factory.make_ipv4_address() self.patch_fake_interfaces_list([("eth0", eth0_addr)]) self.patch(detect_module, 'probe_dhcp').return_value = set() interfaces = determine_cluster_interfaces(self.knowledge) results = probe_interface(*interfaces[0]) self.assertEqual(set(), results) def test_probe_interface_returns_empty_set_when_IP_missing(self): # If the interface being probed has no IP address, the # request_dhcr() method will raise IOError with errno 99. Make # sure this is caught and ignored. eth0_addr = factory.make_ipv4_address() self.patch_fake_interfaces_list([("eth0", eth0_addr)]) ioerror = IOError( errno.EADDRNOTAVAIL, "Cannot assign requested address") self.patch(fcntl, 'ioctl').side_effect = ioerror interfaces = determine_cluster_interfaces(self.knowledge) results = probe_interface(*interfaces[0]) self.assertEqual(set(), results) def test_probe_interface_returns_empty_set_when_device_missing(self): # If the interface being probed does not exist, the # request_dhcp() method will raise IOError with errno 19. Make # sure this is caught and ignored. eth0_addr = factory.make_ipv4_address() self.patch_fake_interfaces_list([("eth0", eth0_addr)]) ioerror = IOError(errno.ENODEV, "No such device") self.patch(fcntl, 'ioctl').side_effect = ioerror interfaces = determine_cluster_interfaces(self.knowledge) results = probe_interface(*interfaces[0]) self.assertEqual(set(), results) def test_probe_interface_returns_populated_set(self): # Test that the detected DHCP server is returned. eth0_addr = factory.make_ipv4_address() self.patch_fake_interfaces_list([("eth0", eth0_addr)]) self.patch( detect_module, 'probe_dhcp').return_value = {'10.2.2.2'} interfaces = determine_cluster_interfaces(self.knowledge) results = probe_interface(*interfaces[0]) self.assertEqual({'10.2.2.2'}, results) def test_probe_interface_filters_interface_own_ip(self): # Test that the interface shows the detected DHCP server except # if it is the same IP as the interface's. eth0_addr = factory.make_ipv4_address() self.patch_fake_interfaces_list([("eth0", eth0_addr)]) detected_dhcp = eth0_addr self.patch(detect_module, 'probe_dhcp').return_value = {detected_dhcp} interfaces = determine_cluster_interfaces(self.knowledge) results = probe_interface(*interfaces[0]) self.assertEqual(set(), results) def test_determine_cluster_interfaces_catchs_HTTPError_in_MASClient(self): self.patch(MAASClient, 'get').side_effect = urllib2.HTTPError( mock.sentinel, mock.sentinel, mock.sentinel, mock.sentinel, mock.sentinel) determine_cluster_interfaces(self.knowledge) self.assertIn( "Failed to contact region controller:", self.maaslog.output) def test_determine_cluster_interfaces_catches_URLError_in_MASClient(self): self.patch(MAASClient, 'get').side_effect = urllib2.URLError( mock.sentinel.arg1) determine_cluster_interfaces(self.knowledge) self.assertIn( "Failed to contact region controller:", self.maaslog.output) def test_determine_cluster_interfaces_catches_non_OK_response(self): self.patch(MAASClient, 'get').return_value = MockResponse( httplib.NOT_FOUND, "error text") determine_cluster_interfaces(self.knowledge) self.assertIn( "Failed talking to region controller, it returned:", self.maaslog.output) def test_update_region_controller_sets_detected_dhcp(self): mocked_post = self.patch(MAASClient, 'post') mocked_post.return_value = MockResponse() detected_server = factory.make_ipv4_address() update_region_controller(self.knowledge, "eth0", detected_server) uuid = self.knowledge['nodegroup_uuid'] self.assertThat(mocked_post, MockCalledOnceWith( 'api/1.0/nodegroups/%s/interfaces/eth0/' % uuid, 'report_foreign_dhcp', foreign_dhcp_ip=detected_server)) def test_update_region_controller_clears_detected_dhcp(self): mocked_post = self.patch(MAASClient, 'post') mocked_post.return_value = MockResponse() detected_server = None update_region_controller(self.knowledge, "eth0", detected_server) uuid = self.knowledge['nodegroup_uuid'] self.assertThat(mocked_post, MockCalledOnceWith( 'api/1.0/nodegroups/%s/interfaces/eth0/' % uuid, 'report_foreign_dhcp', foreign_dhcp_ip='')) def test_update_region_controller_catches_HTTPError_in_MAASClient(self): self.patch(MAASClient, 'post').side_effect = urllib2.HTTPError( mock.sentinel, mock.sentinel, mock.sentinel, mock.sentinel, mock.sentinel) update_region_controller(self.knowledge, "eth0", None) self.assertIn( "Failed to contact region controller:", self.maaslog.output) def test_update_region_controller_catches_URLError_in_MAASClient(self): self.patch(MAASClient, 'post').side_effect = urllib2.URLError( mock.sentinel.arg1) update_region_controller(self.knowledge, "eth0", None) self.assertIn( "Failed to contact region controller:", self.maaslog.output) def test_update_region_controller_catches_non_OK_response(self): mock_response = MockResponse(httplib.NOT_FOUND, "error text") self.patch(MAASClient, 'post').return_value = mock_response update_region_controller(self.knowledge, "eth0", None) self.assertIn( "Failed talking to region controller, it returned:", self.maaslog.output) def test_periodic_probe_task_exits_if_no_interfaces(self): mocked = self.patch(detect_module, 'probe_interface') self.patch( detect_module, 'determine_cluster_interfaces').return_value = None periodic_probe_task(self.knowledge) self.assertFalse(mocked.called) def test_periodic_probe_task_updates_region_with_detected_server(self): eth0_addr = factory.make_ipv4_address() wlan0_addr = factory.make_ipv4_address() detected_server = factory.make_ipv4_address() self.patch_fake_interfaces_list( [("eth0", eth0_addr), ("wlan0", wlan0_addr)]) self.patch( detect_module, 'probe_dhcp').return_value = {detected_server} mocked_update = self.patch(detect_module, 'update_region_controller') periodic_probe_task(self.knowledge) calls = [ mock.call(self.knowledge, 'eth0', detected_server), mock.call(self.knowledge, 'wlan0', detected_server), ] mocked_update.assert_has_calls(calls, any_order=True) def test_periodic_probe_task_updates_region_with_no_detected_server(self): eth0_addr = factory.make_ipv4_address() wlan0_addr = factory.make_ipv4_address() self.patch_fake_interfaces_list( [("eth0", eth0_addr), ("wlan0", wlan0_addr)]) self.patch( detect_module, 'probe_dhcp').return_value = set() mocked_update = self.patch(detect_module, 'update_region_controller') periodic_probe_task(self.knowledge) calls = [ mock.call(self.knowledge, 'eth0', None), mock.call(self.knowledge, 'wlan0', None), ] mocked_update.assert_has_calls(calls, any_order=True) maas-1.9.5+bzr4599.orig/src/provisioningserver/dhcp/tests/test_leases.py0000644000000000000000000001737613056115004024402 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the report_leases task.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from datetime import ( datetime, timedelta, ) import errno import os from textwrap import dedent from maastesting.factory import factory from maastesting.utils import ( age_file, get_write_time, ) from mock import Mock from provisioningserver.dhcp import leases as leases_module from provisioningserver.dhcp.leases import ( cache, check_lease_changes, LEASES_CACHE_KEY, LEASES_TIME_CACHE_KEY, parse_leases_file, record_lease_state, ) from provisioningserver.testing.testcase import PservTestCase class TestHelpers(PservTestCase): def test_record_lease_state_records_time_and_leases(self): time = datetime.utcnow() leases = factory.make_random_leases() record_lease_state(time, leases) self.assertEqual( (time, leases), ( cache.get(LEASES_TIME_CACHE_KEY), cache.get(LEASES_CACHE_KEY), )) class StopExecuting(BaseException): """Exception class to stop execution at a desired point. This is deliberately not just an :class:`Exception`. We want to interrupt the code that's being tested, not just exercise its error-handling capabilities. """ class TestUpdateLeases(PservTestCase): def redirect_parser(self, path): """Make the leases parser read from a file at `path`.""" self.patch(leases_module, 'get_leases_file').return_value = path def fake_leases_file(self, leases=None, age=None): """Fake the presence of a leases file. This does not go through the leases parser. It patches out the leases parser with a fake that returns the lease data you pass in here. :param leases: Dict of leases (mapping IP addresses to MACs). :param age: Number of seconds since last modification to leases file. :return: Path/name of temporary file. """ if leases is None: leases = {} leases = leases.copy() leases_file = self.make_file() if age is not None: age_file(leases_file, age) timestamp = get_write_time(leases_file) self.redirect_parser(leases_file) self.patch( leases_module, 'parse_leases_file', lambda: (timestamp, leases)) return leases_file def write_leases_file(self, contents): """Create a leases file, and cause the parser to read from it. This patches out the leases parser to read from the new file. :param contents: Text contents for the leases file. :return: Path of temporary leases file. """ leases_file = self.make_file( contents=dedent(contents).encode('utf-8')) self.redirect_parser(leases_file) return leases_file def set_lease_state(self, time=None, leases=None): """Set the recorded state of DHCP leases. This is similar to record_lease_state, except it will patch() the state so that it gets reset at the end of the test. Using this will prevent recorded lease state from leaking into other tests. """ cache[LEASES_TIME_CACHE_KEY] = time cache[LEASES_CACHE_KEY] = leases def test_record_lease_state_sets_leases_and_timestamp(self): time = datetime.utcnow() leases = factory.make_random_leases() self.set_lease_state() record_lease_state(time, leases) self.assertEqual( (time, leases), ( cache.get(LEASES_TIME_CACHE_KEY), cache.get(LEASES_CACHE_KEY), )) def test_check_lease_changes_returns_tuple_if_no_state_cached(self): self.set_lease_state() leases = factory.make_random_leases() leases_file = self.fake_leases_file(leases) self.assertEqual( (get_write_time(leases_file), leases), check_lease_changes()) def test_check_lease_changes_returns_tuple_if_lease_changed(self): ip = factory.make_ipv4_address() leases = {ip: factory.make_mac_address()} self.set_lease_state( datetime.utcnow() - timedelta(seconds=10), leases.copy()) leases[ip] = factory.make_mac_address() leases_file = self.fake_leases_file(leases) self.assertEqual( (get_write_time(leases_file), leases), check_lease_changes()) def redirect_parser_to_non_existing_file(self): file_name = self.make_file() os.remove(file_name) self.redirect_parser(file_name) def test_parse_leases_file_returns_None_if_file_does_not_exist(self): self.redirect_parser_to_non_existing_file() self.assertIsNone(parse_leases_file()) def test_get_leases_timestamp_returns_None_if_file_does_not_exist(self): self.redirect_parser_to_non_existing_file() self.assertIsNone(parse_leases_file()) def test_parse_leases_file_errors_if_unexpected_exception(self): exception = IOError(errno.EBUSY, factory.make_string()) self.patch(leases_module, 'open', Mock(side_effect=exception)) self.assertRaises(IOError, parse_leases_file) def test_get_leases_timestamp_errors_if_unexpected_exception(self): exception = OSError(errno.EBUSY, factory.make_string()) self.patch(leases_module, 'open', Mock(side_effect=exception)) self.assertRaises(OSError, parse_leases_file) def test_check_lease_changes_returns_tuple_if_lease_added(self): leases = factory.make_random_leases() self.set_lease_state( datetime.utcnow() - timedelta(seconds=10), leases.copy()) leases[factory.make_ipv4_address()] = factory.make_mac_address() leases_file = self.fake_leases_file(leases) self.assertEqual( (get_write_time(leases_file), leases), check_lease_changes()) def test_check_lease_returns_None_if_lease_file_does_not_exist(self): self.redirect_parser_to_non_existing_file() self.assertIsNone(check_lease_changes()) def test_check_lease_changes_returns_tuple_if_leases_dropped(self): self.set_lease_state( datetime.utcnow() - timedelta(seconds=10), factory.make_random_leases()) leases_file = self.fake_leases_file({}) self.assertEqual( (get_write_time(leases_file), {}), check_lease_changes()) def test_check_lease_changes_returns_None_if_no_change(self): leases = factory.make_random_leases() leases_file = self.fake_leases_file(leases) self.set_lease_state(get_write_time(leases_file), leases.copy()) self.assertIsNone(check_lease_changes()) def test_check_lease_changes_ignores_irrelevant_changes(self): leases = factory.make_random_leases() self.fake_leases_file(leases, age=10) self.set_lease_state(datetime.utcnow(), leases.copy()) self.assertIsNone(check_lease_changes()) def test_parse_leases_file_parses_leases(self): params = { 'ip': factory.make_ipv4_address(), 'mac': factory.make_mac_address(), } leases_file = self.write_leases_file("""\ lease %(ip)s { starts 5 2010/01/01 00:00:01; ends never; tstp 6 2010/01/02 05:00:00; tsfp 6 2010/01/02 05:00:00; binding state free; hardware ethernet %(mac)s; } """ % params) self.assertEqual( (get_write_time(leases_file), [(params['ip'], params['mac'])]), parse_leases_file()) maas-1.9.5+bzr4599.orig/src/provisioningserver/dhcp/tests/test_leases_parser.py0000644000000000000000000006524013056115004025747 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the DHCP leases parser.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from collections import namedtuple from datetime import datetime from textwrap import dedent from maastesting.factory import factory from maastesting.testcase import MAASTestCase from netaddr import IPAddress from provisioningserver.dhcp import ( leases_parser, leases_parser_fast, ) from provisioningserver.dhcp.leases_parser import ( combine_entries, gather_hosts, gather_leases, get_expiry_date, get_host_ip, get_host_mac, has_expired, is_host, is_lease, lease_parser, ) class Lease(object): def __init__( self, lease_or_host, host, fixed_address, hardware, starts, ends, binding_state=None): self.lease_or_host = lease_or_host self.host = host self.hardware = hardware self.starts = starts setattr(self, 'fixed-address', fixed_address) self.ends = ends if binding_state is not None: setattr(self, 'binding state', binding_state) def __iter__(self): return iter(self.__dict__.keys()) def fake_parsed_lease(ip=None, mac=None, starts=None, ends=None, entry_type='lease', state=None): """Fake a lease as produced by the parser.""" if ip is None: ip = factory.make_ipv4_address() if mac is None: mac = factory.make_mac_address() Hardware = namedtuple('Hardware', ['mac']) lease = Lease( entry_type, ip, ip, Hardware(mac), starts, ends, binding_state=state) return lease def fake_parsed_host(ip=None, mac=None): """Fake a host declaration as produced by the parser.""" return fake_parsed_lease(mac=mac, ip=ip, entry_type='host') class Rubout(object): def __init__(self, lease_or_host, host): self.lease_or_host = lease_or_host self.host = host self.deleted = 'true' def __iter__(self): return iter(self.__dict__.keys()) def get_fake_parsed_rubouts(ip=None, mac=None): """Returns 2 rubouts, one for the given IP and one for the given MAC. Rubouts now come in pairs: one with the IP as the key to cope with old-style host map declarations and one with the MAC as the key to deal with recent host map declarations. """ if ip is None: ip = factory.make_ipv4_address() if mac is None: mac = factory.make_mac_address() return [ fake_parsed_rubout(key=ip), fake_parsed_rubout(key=mac), ] def fake_parsed_rubout(key=None): """Fake a "rubout" host declaration.""" if key is None: key = factory.make_ipv4_address() rubout = Rubout('host', key) return rubout class TestLeasesParsers(MAASTestCase): scenarios = ( ("original", dict(parse=leases_parser.parse_leases)), ("fast", dict(parse=leases_parser_fast.parse_leases)), ) sample_lease_entry = dedent("""\ lease %(ip)s { starts 5 2010/01/01 00:00:01; ends never; tstp 6 2010/01/02 05:00:00; tsfp 6 2010/01/02 05:00:00; atsfp 6 2010/01/02 05:00:00; cltt 1 2010/01/02 05:00:00; binding state free; next binding state free; rewind binding state free; hardware ethernet %(mac)s; uid "\001\000\234\002\242\2020"; set vendorclass = "PXEClient:Arch:00000:UNDI:002001"; client-hostname foo; abandoned; option agent.circuit-id thing; option agent.remote-id thing; ddns-text foo; ddns-fwd-name foo; ddns-client-fqdn foo; ddns-rev-name foo; vendor-class-identifier foo; bootp; reserved; } """) sample_host_entry = dedent("""\ host %(mac)s { dynamic; hardware ethernet %(mac)s; fixed-address%(six)s %(ip)s; } """) def make_host_entry(self, ip, mac=None): """Create a host entry with the given IP and MAC addresses. The host entry will be in IPv4 or IPv6 depending on `ip`. """ if mac is None: mac = factory.make_mac_address() params = { 'ip': unicode(ip), 'mac': mac, } # The "six" parameter is suffixed to the fixed-address keyword: # empty string for IPv4, or "6" for IPv6. if IPAddress(ip).version == 6: params['six'] = '6' else: params['six'] = '' return self.sample_host_entry % params def make_lease_entry(self, ip, mac): """Create a lease entry mapping an IP address to a MAC address.""" params = { 'ip': unicode(ip), 'mac': mac, } return self.sample_lease_entry % params def test_parse_leases_copes_with_empty_file(self): self.assertEqual([], self.parse("")) def test_parse_leases_parses_IPv4_lease(self): ip = factory.make_ipv4_address() mac = factory.make_mac_address() leases = self.parse(self.make_lease_entry(ip, mac)) self.assertEqual([(ip, mac)], leases) def test_parse_leases_parses_IPv6_lease(self): ip = unicode(factory.make_ipv6_address()) mac = factory.make_mac_address() leases = self.parse(self.make_lease_entry(ip, mac)) self.assertEqual([(ip, mac)], leases) def test_parse_leases_parses_IPv4_host(self): ip = factory.make_ipv4_address() mac = factory.make_mac_address() lease = self.make_host_entry(ip, mac) leases = self.parse(lease) self.assertEqual([(ip, mac)], leases) def test_parse_leases_parses_IPv6_host(self): ip = factory.make_ipv6_address() mac = factory.make_mac_address() leases = self.parse(self.make_host_entry(ip, mac)) self.assertEqual([(unicode(ip), mac)], leases) def test_parse_leases_parses_full_sized_IPv6_address(self): ip = 'fc00:0001:0000:0000:0000:0000:0000:0000' leases = self.parse(self.make_host_entry(ip)) self.assertEqual([ip], [ipx for ipx, mac in leases]) def test_parse_leases_copes_with_misleading_values(self): params = { 'ip1': factory.make_ipv4_address(), 'mac1': factory.make_mac_address(), 'ip2': factory.make_ipv4_address(), 'mac2': factory.make_mac_address(), } leases = self.parse(dedent("""\ host %(mac1)s { dynamic; ### NOTE the following value has a closing brace, and ### also looks like a host record. uid "foo}host 12.34.56.78 { }"; hardware ethernet %(mac1)s; fixed-address %(ip1)s; } ### NOTE the extra indent on the line below. host %(mac2)s { dynamic; hardware ethernet %(mac2)s; fixed-address %(ip2)s; } """ % params)) self.assertEqual( [(params['ip1'], params['mac1']), (params['ip2'], params['mac2'])], leases) def test_parse_leases_parses_host_rubout(self): leases = self.parse(dedent("""\ host %s { deleted; } """ % factory.make_mac_address())) self.assertEqual([], leases) def test_parse_leases_ignores_incomplete_lease_at_end(self): params = { 'ip': factory.make_ipv4_address(), 'mac': factory.make_mac_address(), 'incomplete_ip': factory.make_ipv4_address(), } leases = self.parse(dedent("""\ lease %(ip)s { hardware ethernet %(mac)s; } lease %(incomplete_ip)s { starts 5 2010/01/01 00:00:05; """ % params)) self.assertEqual([(params['ip'], params['mac'])], leases) def test_parse_leases_ignores_comments(self): params = { 'ip': factory.make_ipv4_address(), 'mac': factory.make_mac_address(), } leases = self.parse(dedent("""\ # Top comment (ignored). lease %(ip)s { # End-of-line comment (ignored). # Comment in lease block (ignored). hardware ethernet %(mac)s; # EOL comment in lease (ignored). } # Comment right after closing brace (ignored). # End comment (ignored). """ % params)) self.assertEqual([(params['ip'], params['mac'])], leases) def test_parse_leases_ignores_expired_leases(self): params = { 'ip': factory.make_ipv4_address(), 'mac': factory.make_mac_address(), } leases = self.parse(dedent("""\ lease %(ip)s { hardware ethernet %(mac)s; ends 1 2001/01/01 00:00:00; } """ % params)) self.assertEqual([], leases) def test_parse_leases_treats_never_as_eternity(self): params = { 'ip': factory.make_ipv4_address(), 'mac': factory.make_mac_address(), } leases = self.parse(dedent("""\ lease %(ip)s { hardware ethernet %(mac)s; ends never; } """ % params)) self.assertEqual([(params['ip'], params['mac'])], leases) def test_parse_leases_treats_missing_end_date_as_eternity(self): params = { 'ip': factory.make_ipv4_address(), 'mac': factory.make_mac_address(), } leases = self.parse(dedent("""\ lease %(ip)s { hardware ethernet %(mac)s; } """ % params)) self.assertEqual([(params['ip'], params['mac'])], leases) def test_parse_leases_takes_current_lease_for_address(self): # Note: a previous version of this test case checked that two leases # can exist at the same time for a single IP address. This has been # removed in order to prevent stale entries from polluting MAAS's # discoveries with old information. To support bonds, static host # mappings will still allow more than one MAC address to be assigned to # the same IP address. params = { 'ip': factory.make_ipv4_address(), 'old_owner': factory.make_mac_address(), 'new_owner': factory.make_mac_address(), } leases = self.parse(dedent("""\ lease %(ip)s { hardware ethernet %(old_owner)s; ends 0 1990/01/01 00:00:00; } lease %(ip)s { hardware ethernet %(new_owner)s; } """ % params)) self.assertEqual( [(params['ip'], params['new_owner'])], leases) def test_multiple_host_declarations_are_reported(self): params = { 'ip': factory.make_ipv4_address(), 'bondmac1': factory.make_mac_address(), 'bondmac2': factory.make_mac_address(), } leases = self.parse(dedent("""\ host %(bondmac1)s { hardware ethernet %(bondmac1)s; fixed-address %(ip)s; } host %(bondmac2)s { hardware ethernet %(bondmac2)s; fixed-address %(ip)s; } """ % params)) self.assertEqual([ (params['ip'], params['bondmac1']), (params['ip'], params['bondmac2']) ], leases) def test_parse_leases_recognizes_host_deleted_statement_as_rubout(self): params = { 'ip': factory.make_ipv4_address(), 'mac': factory.make_mac_address(), } leases = self.parse(dedent("""\ host %(ip)s { dynamic; hardware ethernet %(mac)s; fixed-address %(ip)s; deleted; } """ % params)) self.assertEqual([], leases) def test_host_declaration_is_like_an_unexpired_lease(self): params = { 'ip': factory.make_ipv4_address(), 'mac': factory.make_mac_address(), } leases = self.parse(dedent("""\ host %(ip)s { hardware ethernet %(mac)s; fixed-address %(ip)s; } """ % params)) self.assertEqual([(params['ip'], params['mac'])], leases) class TestLeasesParserFast(MAASTestCase): def test_handles_dash_separator_for_host_mapping(self): ip = factory.make_ipv4_address() mac = factory.make_mac_address() mac_dash = mac.replace(":", "-") leases = leases_parser_fast.parse_leases(dedent("""\ host %s { hardware ethernet %s; fixed-address %s; } """ % (mac_dash, mac, ip))) self.assertEqual([(ip, mac)], leases) def test_multiple_host_declarations_are_reported(self): params = { 'ip': factory.make_ipv4_address(), 'bondmac1': factory.make_mac_address(), 'bondmac2': factory.make_mac_address(), } leases = leases_parser_fast.parse_leases(dedent("""\ host %(bondmac1)s { hardware ethernet %(bondmac1)s; fixed-address %(ip)s; } host %(bondmac2)s { hardware ethernet %(bondmac2)s; fixed-address %(ip)s; } """ % params)) self.assertEqual([ (params['ip'], params['bondmac1']), (params['ip'], params['bondmac2']) ], leases) def test_expired_lease_does_not_shadow_earlier_host_stanza(self): params = { 'ip': factory.make_ipv4_address(), 'mac1': factory.make_mac_address(), 'mac2': factory.make_mac_address(), } leases = leases_parser_fast.parse_leases(dedent("""\ host %(mac1)s { dynamic; hardware ethernet %(mac1)s; fixed-address %(ip)s; } lease %(ip)s { starts 5 2010/01/01 00:00:01; ends 1 2010/01/01 00:00:02; hardware ethernet %(mac2)s; } """ % params)) # The lease has expired so it doesn't shadow the host stanza, # and so the MAC returned is from the host stanza. self.assertEqual([(params["ip"], params["mac1"])], leases) def test_active_lease_shadows_earlier_host_stanza(self): params = { 'ip': factory.make_ipv4_address(), 'mac1': factory.make_mac_address(), 'mac2': factory.make_mac_address(), } leases = leases_parser_fast.parse_leases(dedent("""\ host %(mac1)s { dynamic; hardware ethernet %(mac1)s; fixed-address %(ip)s; } lease %(ip)s { starts 5 2010/01/01 00:00:01; hardware ethernet %(mac2)s; } """ % params)) # The lease hasn't expired, so shadows the earlier host stanza. # (But since the host mapping has not been removed, it takes precedence # by coming later in the list, since that should better describe the # intent of the MAAS administrator.) self.assertEqual( [(params["ip"], params["mac2"]), (params["ip"], params["mac1"])], leases) def test_host_stanza_replaces_earlier_active_lease(self): params = { 'ip': factory.make_ipv4_address(), 'mac1': factory.make_mac_address(), 'mac2': factory.make_mac_address(), } leases = leases_parser_fast.parse_leases(dedent("""\ lease %(ip)s { starts 5 2010/01/01 00:00:01; hardware ethernet %(mac2)s; } host %(ip)s { dynamic; hardware ethernet %(mac1)s; fixed-address %(ip)s; } """ % params)) # The lease hasn't expired, but the host entry is later. So the host # mapping takes precedence. self.assertEqual( [(params["ip"], params["mac1"])], leases) def test_released_lease_with_no_end_time_is_released(self): params = { 'ip': factory.make_ipv4_address(), 'mac1': factory.make_mac_address(), 'mac2': factory.make_mac_address(), } leases = leases_parser_fast.parse_leases(dedent("""\ lease %(ip)s { starts 5 2010/01/01 00:00:01; hardware ethernet %(mac2)s; } lease %(ip)s { dynamic; binding state free; starts 0 1990/01/01 00:00:00; } """ % params)) # The lease was added and then removed, so we expect a no-op. self.assertEqual([], leases) class TestLeasesParserFunctions(MAASTestCase): def test_get_expiry_date_parses_expiry_date(self): lease = fake_parsed_lease(ends='0 2011/01/02 03:04:05') self.assertEqual( datetime( year=2011, month=01, day=02, hour=03, minute=04, second=05), get_expiry_date(lease)) def test_get_expiry_date_uses_start_date_for_free_lease(self): lease = fake_parsed_lease(starts='0 2011/01/02 03:04:05', state='free') self.assertEqual( datetime( year=2011, month=01, day=02, hour=03, minute=04, second=05), get_expiry_date(lease)) def test_get_expiry_date_returns_None_for_never(self): self.assertIsNone( get_expiry_date(fake_parsed_lease(ends='never'))) def test_get_expiry_date_returns_None_if_no_expiry_given(self): self.assertIsNone(get_expiry_date(fake_parsed_lease(ends=None))) def test_has_expired_returns_True_for_deleted_lease(self): now = datetime.utcnow() # Make a lease with no expiry, and mark it deleted. lease = fake_parsed_lease(ends=None) del lease.hardware lease.deleted = True self.assertTrue(has_expired(lease, now)) def test_has_expired_returns_False_for_eternal_lease(self): now = datetime.utcnow() self.assertFalse(has_expired(fake_parsed_lease(ends=None), now)) def test_has_expired_returns_False_for_future_expiry_date(self): now = datetime.utcnow() later = '1 2035/12/31 23:59:59' self.assertFalse(has_expired(fake_parsed_lease(ends=later), now)) def test_has_expired_returns_True_for_past_expiry_date(self): now = datetime.utcnow() earlier = '1 2001/01/01 00:00:00' self.assertTrue( has_expired(fake_parsed_lease(ends=earlier), now)) def test_gather_leases_finds_current_leases(self): lease = fake_parsed_lease() self.assertEqual( [(getattr(lease, 'fixed-address'), lease.hardware.mac)], gather_leases([lease])) def test_gather_leases_ignores_expired_leases(self): earlier = '1 2001/01/01 00:00:00' lease = fake_parsed_lease(ends=earlier) self.assertEqual([], gather_leases([lease])) def test_gather_leases_combines_expired_and_current_leases(self): earlier = '1 2001/01/01 00:00:00' ip = factory.make_ipv4_address() old_owner = factory.make_mac_address() new_owner = factory.make_mac_address() leases = [ fake_parsed_lease(ip=ip, mac=old_owner, ends=earlier), fake_parsed_lease(ip=ip, mac=new_owner), ] self.assertEqual([(ip, new_owner)], gather_leases(leases)) def test_ordering_is_important_to_gather_leases(self): earlier = '1 2001/01/01 00:00:00' ip = factory.make_ipv4_address() old_owner = factory.make_mac_address() new_owner = factory.make_mac_address() leases = [ fake_parsed_lease(ip=ip, mac=new_owner), fake_parsed_lease(ip=ip, mac=old_owner, ends=earlier), ] self.assertEqual([], gather_leases(leases)) def test_gather_leases_ignores_host_declarations(self): self.assertEqual([], gather_leases([fake_parsed_host()])) def test_gather_hosts_finds_hosts(self): host = fake_parsed_host() self.assertEqual( [(getattr(host, 'fixed-address'), host.hardware.mac)], gather_hosts([host])) def test_gather_hosts_ignores_unaccompanied_rubouts(self): self.assertEqual([], gather_hosts([fake_parsed_rubout()])) def test_gather_hosts_ignores_rubbed_out_entries(self): ip = factory.make_ipv4_address() mac = factory.make_mac_address() hosts = [ fake_parsed_host(ip=ip, mac=mac) ] + get_fake_parsed_rubouts(ip=ip, mac=mac) self.assertEqual([], gather_hosts(hosts)) def test_gather_hosts_follows_reassigned_host(self): ip = factory.make_ipv4_address() mac = factory.make_mac_address() new_owner = factory.make_mac_address() hosts = [ fake_parsed_host(ip=ip, mac=mac) ] + get_fake_parsed_rubouts(ip=ip, mac=mac) + [ fake_parsed_host(ip=ip, mac=new_owner) ] self.assertEqual([(ip, new_owner)], gather_hosts(hosts)) def test_is_lease_and_is_host_recognize_lease(self): params = { 'ip': factory.make_ipv4_address(), 'mac': factory.make_mac_address(), } [parsed_lease] = lease_parser.searchString(dedent("""\ lease %(ip)s { hardware ethernet %(mac)s; } """ % params)) self.assertEqual( (True, False), (is_lease(parsed_lease), is_host(parsed_lease))) def test_is_lease_and_is_host_recognize_host(self): params = { 'ip': factory.make_ipv4_address(), 'mac': factory.make_mac_address(), } [parsed_host] = lease_parser.searchString(dedent("""\ host %(ip)s { hardware ethernet %(mac)s; } """ % params)) self.assertEqual( (False, True), (is_lease(parsed_host), is_host(parsed_host))) def test_get_host_mac_returns_None_for_host(self): params = { 'ip': factory.make_ipv4_address(), 'mac': factory.make_mac_address(), } [parsed_host] = lease_parser.searchString(dedent("""\ host %(ip)s { hardware ethernet %(mac)s; } """ % params)) self.assertEqual(params['mac'], get_host_mac(parsed_host)) def test_get_host_mac_returns_mac_for_rubout(self): mac = factory.make_mac_address() [parsed_host] = lease_parser.searchString(dedent("""\ host %s { deleted; } """ % mac)) self.assertEqual(mac, get_host_mac(parsed_host)) def test_get_host_ip_returns_None_for_rubout(self): params = { 'ip': factory.make_ipv4_address(), 'mac': factory.make_mac_address(), } [parsed_host] = lease_parser.searchString(dedent("""\ host %(mac)s { deleted; } """ % params)) self.assertIsNone(get_host_ip(parsed_host)) def test_combine_entries_accepts_host_followed_by_expired_lease(self): ip = factory.make_ipv4_address() mac = factory.make_mac_address() earlier = '1 2001/01/01 00:00:00' entries = [ fake_parsed_host(ip=ip, mac=mac), fake_parsed_lease(ip=ip, ends=earlier), ] self.assertEqual([(ip, mac)], combine_entries(entries)) def test_combine_entries_accepts_expired_lease_followed_by_host(self): ip = factory.make_ipv4_address() mac = factory.make_mac_address() earlier = '1 2001/01/01 00:00:00' entries = [ fake_parsed_lease(ip=ip, ends=earlier), fake_parsed_host(ip=ip, mac=mac), ] self.assertEqual([(ip, mac)], combine_entries(entries)) def test_combine_entries_accepts_old_rubout_followed_by_lease(self): ip = factory.make_ipv4_address() old_mac = factory.make_mac_address() mac = factory.make_mac_address() entries = [ fake_parsed_host(ip=ip, mac=old_mac), # Create old-style individual IP-based rubout. fake_parsed_rubout(key=ip), fake_parsed_lease(ip=ip, mac=mac), ] self.assertEqual([(ip, mac)], combine_entries(entries)) def test_combine_entries_accepts_rubout_followed_by_current_lease(self): ip = factory.make_ipv4_address() old_mac = factory.make_mac_address() mac = factory.make_mac_address() entries = [ fake_parsed_host(ip=ip, mac=old_mac) ] + get_fake_parsed_rubouts(ip=ip, mac=mac) + [ fake_parsed_lease(ip=ip, mac=mac), ] self.assertEqual([(ip, mac)], combine_entries(entries)) def test_combine_entries_ignores_rubout_followed_by_expired_lease(self): ip = factory.make_ipv4_address() mac = factory.make_mac_address() earlier = '1 2001/01/01 00:00:00' entries = [ fake_parsed_host(ip=ip, mac=mac) ] + get_fake_parsed_rubouts(ip=ip, mac=mac) + [ fake_parsed_lease(ip=ip, mac=mac, ends=earlier), ] self.assertEqual([], combine_entries(entries)) def test_combine_entries_ignores_expired_lease_followed_by_rubout(self): ip = factory.make_ipv4_address() mac = factory.make_mac_address() earlier = '1 2001/01/01 00:00:00' entries = [ fake_parsed_host(ip=ip, mac=mac), fake_parsed_lease(ip=ip, mac=mac, ends=earlier) ] + get_fake_parsed_rubouts(ip=ip, mac=mac) self.assertEqual([], combine_entries(entries)) def test_combine_entries_accepts_valid_lease_followed_by_rubout(self): ip = factory.make_ipv4_address() mac = factory.make_mac_address() entries = [ fake_parsed_host(ip=ip, mac=mac), fake_parsed_lease(ip=ip, mac=mac), ] + get_fake_parsed_rubouts(ip=ip, mac=mac) self.assertEqual([(ip, mac)], combine_entries(entries)) def test_combine_entries_accepts_reassigned_host(self): ip = factory.make_ipv4_address() mac = factory.make_mac_address() old_mac = factory.make_mac_address() entries = [ fake_parsed_host(ip=ip, mac=old_mac) ] + get_fake_parsed_rubouts(ip=ip, mac=mac) + [ fake_parsed_host(ip=ip, mac=mac), ] self.assertEqual([(ip, mac)], combine_entries(entries)) maas-1.9.5+bzr4599.orig/src/provisioningserver/dhcp/tests/test_omshell.py0000644000000000000000000003730213056115004024560 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the omshell.py file.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from itertools import product import os import subprocess import tempfile from textwrap import dedent from maastesting.factory import factory from maastesting.fakemethod import FakeMethod from maastesting.fixtures import TempDirectory from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import MAASTestCase from mock import ( ANY, Mock, ) from provisioningserver.dhcp import omshell from provisioningserver.dhcp.omshell import ( call_dnssec_keygen, generate_omapi_key, Omshell, ) from provisioningserver.utils.shell import ExternalProcessError from testtools.matchers import ( EndsWith, MatchesStructure, ) class TestOmshell(MAASTestCase): def test_initialisation(self): server_address = factory.make_string() shared_key = factory.make_string() shell = Omshell(server_address, shared_key) self.assertThat( shell, MatchesStructure.byEquality( server_address=server_address, shared_key=shared_key)) def test_try_connection_calls_omshell_correctly(self): server_address = factory.make_string() shell = Omshell(server_address, "") # Instead of calling a real omshell, we'll just record the # parameters passed to Popen. recorder = FakeMethod(result=(0, "obj: ")) shell._run = recorder shell.try_connection() expected_script = dedent("""\ server {server} connect """) expected_script = expected_script.format(server=server_address) # Check that the 'stdin' arg contains the correct set of # commands. self.assertEqual( [1, (expected_script,)], [recorder.call_count, recorder.extract_args()[0]]) def test_try_connection_returns_True(self): server_address = factory.make_string() shell = Omshell(server_address, "") # Instead of calling a real omshell, we'll just record the # parameters passed to Popen. recorder = FakeMethod(result=(0, "obj: ")) shell._run = recorder self.assertTrue(shell.try_connection()) def test_try_connection_returns_False(self): server_address = factory.make_string() shell = Omshell(server_address, "") # Instead of calling a real omshell, we'll just record the # parameters passed to Popen. recorder = FakeMethod(result=(0, factory.make_string())) shell._run = recorder self.assertFalse(shell.try_connection()) def test_create_calls_omshell_correctly(self): server_address = factory.make_string() shared_key = factory.make_string() ip_address = factory.make_ipv4_address() mac_address = factory.make_mac_address() shell = Omshell(server_address, shared_key) # Instead of calling a real omshell, we'll just record the # parameters passed to Popen. recorder = FakeMethod(result=(0, "hardware-type")) shell._run = recorder shell.create(ip_address, mac_address) expected_script = dedent("""\ server {server} key omapi_key {key} connect new host set ip-address = {ip} set hardware-address = {mac} set hardware-type = 1 set name = "{name}" create """) expected_script = expected_script.format( server=server_address, key=shared_key, ip=ip_address, mac=mac_address, name=mac_address.replace(':', '-')) # Check that the 'stdin' arg contains the correct set of # commands. self.assertEqual( [1, (expected_script,)], [recorder.call_count, recorder.extract_args()[0]]) def test_create_raises_when_omshell_fails(self): # If the call to omshell doesn't result in output containing the # magic string 'hardware-type' it means the set of commands # failed. server_address = factory.make_string() shared_key = factory.make_string() ip_address = factory.make_ipv4_address() mac_address = factory.make_mac_address() shell = Omshell(server_address, shared_key) # Fake a call that results in a failure with random output. random_output = factory.make_string() recorder = FakeMethod(result=(0, random_output)) shell._run = recorder exc = self.assertRaises( ExternalProcessError, shell.create, ip_address, mac_address) self.assertEqual(random_output, exc.output) def test_create_succeeds_when_host_map_already_exists(self): # To omshell, creating the same host map twice is an error. But # Omshell.create swallows the error and makes it look like # success. params = { 'ip': factory.make_ipv4_address(), 'mac': factory.make_mac_address(), 'hostname': factory.make_name('hostname') } shell = Omshell(factory.make_name('server'), factory.make_name('key')) # This is the kind of error output we get if a host map has # already been created. error_output = dedent("""\ obj: host ip-address = %(ip)s hardware-address = %(mac)s name = "%(hostname)s" > can't open object: I/O error obj: host ip-address = %(ip)s hardware-address = %(mac)s name = "%(hostname)s" """) % params shell._run = Mock(return_value=(0, error_output)) shell.create(params['ip'], params['mac']) # The test is that we get here without error. pass def test_remove_calls_omshell_correctly(self): server_address = factory.make_string() shared_key = factory.make_string() ip_address = factory.make_ipv4_address() shell = Omshell(server_address, shared_key) # Instead of calling a real omshell, we'll just record the # parameters passed to Popen. recorder = FakeMethod(result=(0, "thing1\nthing2\nobj: ")) shell._run = recorder shell.remove(ip_address) expected_script = dedent("""\ server {server} key omapi_key {key} connect new host set name = "{ip}" open remove """) expected_script = expected_script.format( server=server_address, key=shared_key, ip=ip_address) # Check that the 'stdin' arg contains the correct set of # commands. self.assertEqual([(expected_script,)], recorder.extract_args()) def test_remove_raises_when_omshell_fails(self): # If the call to omshell doesn't result in output ending in the # text 'obj: ' we can be fairly sure this operation # failed. server_address = factory.make_string() shared_key = factory.make_string() ip_address = factory.make_ipv4_address() shell = Omshell(server_address, shared_key) # Fake a call that results in a failure with random output. random_output = factory.make_string() recorder = FakeMethod(result=(0, random_output)) shell._run = recorder exc = self.assertRaises( subprocess.CalledProcessError, shell.remove, ip_address) self.assertEqual(random_output, exc.output) def test_remove_works_when_extraneous_blank_last_lines(self): # Sometimes omshell puts blank lines after the 'obj: ' so # we need to test that the code still works if that's the case. server_address = factory.make_string() shared_key = factory.make_string() ip_address = factory.make_ipv4_address() shell = Omshell(server_address, shared_key) # Fake a call that results in a something with our special output. output = "\n> obj: \n\n" self.patch(shell, '_run').return_value = (0, output) self.assertIsNone(shell.remove(ip_address)) def test_remove_works_when_extraneous_gt_char_present(self): # Sometimes omshell puts a leading '>' character in responses. # We need to test that the code still works if that's the case. server_address = factory.make_string() shared_key = factory.make_string() ip_address = factory.make_ipv4_address() shell = Omshell(server_address, shared_key) # Fake a call that results in a something with our special output. output = "\n>obj: \n>\n" self.patch(shell, '_run').return_value = (0, output) self.assertIsNone(shell.remove(ip_address)) def test_remove_works_when_object_already_removed(self): server_address = factory.make_string() shared_key = factory.make_string() ip_address = factory.make_ipv4_address() shell = Omshell(server_address, shared_key) output = "obj: \nobj: host\ncan't open object: not found\n" self.patch(shell, '_run').return_value = (0, output) self.assertIsNone(shell.remove(ip_address)) class Test_Omshell_nullify_lease(MAASTestCase): """Tests for Omshell.nullify_lease""" def test__calls_omshell_correctly(self): server_address = factory.make_string() shared_key = factory.make_string() ip_address = factory.make_ipv4_address() shell = Omshell(server_address, shared_key) # Instead of calling a real omshell, we'll just record the # parameters passed to Popen. run = self.patch(shell, '_run') run.return_value = (0, '\nends = 00:00:00:00') expected_script = dedent("""\ server {server} key omapi_key {key} connect new lease set ip-address = {ip} open set ends = 00:00:00:00 update """) expected_script = expected_script.format( server=server_address, key=shared_key, ip=ip_address) shell.nullify_lease(ip_address) self.assertThat(run, MockCalledOnceWith(expected_script)) def test__considers_nonexistent_lease_a_success(self): server_address = factory.make_string() shared_key = factory.make_string() ip_address = factory.make_ipv4_address() shell = Omshell(server_address, shared_key) output = ( "obj: \nobj: lease\nobj: lease\n" "can't open object: not found\nobj: lease\n") self.patch(shell, '_run').return_value = (0, output) shell.nullify_lease(ip_address) # No exception. self.assertThat(shell._run, MockCalledOnceWith(ANY)) def test__catches_invalid_error(self): server_address = factory.make_string() shared_key = factory.make_string() ip_address = factory.make_ipv4_address() shell = Omshell(server_address, shared_key) output = "obj: \nobj: lease\ninvalid value." self.patch(shell, '_run').return_value = (0, output) self.assertRaises( ExternalProcessError, shell.nullify_lease, ip_address) def test__catches_failed_update(self): server_address = factory.make_string() shared_key = factory.make_string() ip_address = factory.make_ipv4_address() shell = Omshell(server_address, shared_key) # make "ends" different to what we asked, so the post-run check # should fail. output = dedent("""\ obj: obj: lease obj: lease ip-address = 0a:00:00:72 state = 00:00:00:01 subnet = 00:00:00:03 pool = 00:00:00:04 hardware-address = 00:16:3e:06:45:5e hardware-type = 00:00:00:01 ends = 00:00:00:FF starts = "T@v'" tstp = 54:41:1e:e7 tsfp = 00:00:00:00 atsfp = 00:00:00:00 cltt = "T@v'" flags = 00 """) self.patch(shell, '_run').return_value = (0, output) self.assertRaises( ExternalProcessError, shell.nullify_lease, ip_address) class Test_generate_omapi_key(MAASTestCase): """Tests for omshell.generate_omapi_key""" def test_generate_omapi_key_returns_a_key(self): key = generate_omapi_key() # Could test for != None here, but the keys end in == for a 512 # bit length key, so that's a better check that the script was # actually run and produced output. self.assertThat(key, EndsWith("==")) def test_generate_omapi_key_leaves_no_temp_files(self): tmpdir = self.useFixture(TempDirectory()).path # Make mkdtemp() in omshell nest all directories within tmpdir. self.patch(tempfile, 'tempdir', tmpdir) generate_omapi_key() self.assertEqual([], os.listdir(tmpdir)) def test_generate_omapi_key_raises_assertionerror_on_no_output(self): self.patch(omshell, 'call_dnssec_keygen', FakeMethod()) self.assertRaises(AssertionError, generate_omapi_key) def test_generate_omapi_key_raises_assertionerror_on_bad_output(self): def returns_junk(tmpdir): key_name = factory.make_string() factory.make_file(tmpdir, "%s.private" % key_name) return key_name self.patch(omshell, 'call_dnssec_keygen', returns_junk) self.assertRaises(AssertionError, generate_omapi_key) def test_run_repeated_keygen(self): bad_patterns = { "+no", "/no", "no+", "no/", "+NO", "/NO", "NO+", "NO/", } bad_patterns_templates = { "foo%sbar", "one\ntwo\n%s\nthree\n", "%s", } # Test that a known bad key is ignored and we generate a new one # to replace it. bad_keys = { # This key is known to fail with omshell. "YXY5pr+No/8NZeodSd27wWbI8N6kIjMF/nrnFIlPwVLuByJKkQcBRtfDrD" "LLG2U9/ND7/bIlJxEGTUnyipffHQ==", } # Fabricate a range of keys containing the known-bad pattern. bad_keys.update( template % pattern for template, pattern in product( bad_patterns_templates, bad_patterns)) # An iterator that we can exhaust without mutating bad_keys. iter_bad_keys = iter(bad_keys) # Reference to the original parse_key_value_file, before we patch. parse_key_value_file = omshell.parse_key_value_file # Patch parse_key_value_file to return each of the known-bad keys # we've created, followed by reverting to its usual behaviour. def side_effect(*args, **kwargs): try: return {'Key': next(iter_bad_keys)} except StopIteration: return parse_key_value_file(*args, **kwargs) mock = self.patch(omshell, 'parse_key_value_file') mock.side_effect = side_effect # generate_omapi_key() does not return a key known to be bad. self.assertNotIn(generate_omapi_key(), bad_keys) class TestCallDnsSecKeygen(MAASTestCase): """Tests for omshell.call_dnssec_keygen.""" def test_runs_external_script(self): call_and_check = self.patch(omshell, 'call_and_check') target_dir = self.make_dir() path = os.environ.get("PATH", "").split(os.pathsep) path.append("/usr/sbin") call_dnssec_keygen(target_dir) call_and_check.assert_called_once_with( ['dnssec-keygen', '-r', '/dev/urandom', '-a', 'HMAC-MD5', '-b', '512', '-n', 'HOST', '-K', target_dir, '-q', 'omapi_key'], env=ANY) maas-1.9.5+bzr4599.orig/src/provisioningserver/dhcp/tests/test_writer.py0000644000000000000000000001237313056115004024432 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.dhcp.writer`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from argparse import ArgumentParser from io import BytesIO import os from subprocess import ( PIPE, Popen, ) import sys from maastesting import root from maastesting.factory import factory from maastesting.testcase import MAASTestCase from mock import Mock from provisioningserver.dhcp import writer from provisioningserver.dhcp.testing.config import make_subnet_config from provisioningserver.utils.fs import read_text_file from testtools.matchers import ( ContainsAll, MatchesStructure, ) class TestScript(MAASTestCase): """Test the DHCP configuration writer.""" def make_args(self, network=None): """Create a fake parameter for `run`, based on `network`.""" settings = make_subnet_config(network) args = Mock() args.outfile = None args.omapi_key = factory.make_name('key') args.subnet = settings['subnet'] args.interface = settings['interface'] args.subnet_mask = settings['subnet_mask'] args.broadcast_ip = settings['broadcast_ip'] args.dns_servers = settings['dns_servers'] args.ntp_server = settings['ntp_server'] args.domain_name = settings['domain_name'] args.router_ip = settings['router_ip'] args.ip_range_low = settings['ip_range_low'] args.ip_range_high = settings['ip_range_high'] return args def test_script_executable(self): args = self.make_args() script = [ "%s/bin/maas-provision" % root, "generate-dhcp-config", '--subnet', args.subnet, '--interface', args.interface, '--subnet-mask', args.subnet_mask, '--broadcast-ip', args.broadcast_ip, '--dns-servers', args.dns_servers, '--ntp-server', args.ntp_server, '--domain-name', args.domain_name, '--router-ip', args.router_ip, '--ip-range-low', args.ip_range_low, '--ip-range-high', args.ip_range_high, '--omapi-key', args.omapi_key, ] cmd = Popen( script, stdout=PIPE, env=dict(PYTHONPATH=":".join(sys.path))) output, err = cmd.communicate() self.assertEqual(0, cmd.returncode, err) self.assertThat(output, ContainsAll([ args.subnet, args.subnet_mask, args.broadcast_ip, args.omapi_key, args.dns_servers, args.ntp_server, args.domain_name, args.router_ip, args.ip_range_low, args.ip_range_high, ])) def test_arg_setup(self): test_args = ( '--subnet', 'subnet', '--interface', 'eth0', '--subnet-mask', 'subnet-mask', '--broadcast-ip', 'broadcast-ip', '--dns-servers', 'dns-servers', '--ntp-server', 'ntp-server', '--domain-name', 'domain-name', '--router-ip', 'router-ip', '--ip-range-low', 'ip-range-low', '--ip-range-high', 'ip-range-high', '--omapi-key', 'omapi-key', ) parser = ArgumentParser() writer.add_arguments(parser) args = parser.parse_args(test_args) self.assertThat( args, MatchesStructure.byEquality( subnet='subnet', interface='eth0', subnet_mask='subnet-mask', broadcast_ip='broadcast-ip', dns_servers='dns-servers', ntp_server='ntp-server', domain_name='domain-name', router_ip='router-ip', omapi_key='omapi-key', ip_range_low='ip-range-low', ip_range_high='ip-range-high')) def test_run(self): self.patch(sys, "stdout", BytesIO()) args = self.make_args(factory.make_ipv4_network()) writer.run(args) output = sys.stdout.getvalue() contains_all_params = ContainsAll([ args.subnet, args.interface, args.subnet_mask, args.broadcast_ip, args.omapi_key, args.dns_servers, args.ntp_server, args.domain_name, args.router_ip, args.ip_range_low, args.ip_range_high, ]) self.assertThat(output, contains_all_params) def test_run_save_to_file(self): args = self.make_args() args.outfile = os.path.join(self.make_dir(), "outfile.txt") writer.run(args) self.assertThat( read_text_file(args.outfile), ContainsAll([ args.subnet, args.interface, args.subnet_mask, args.broadcast_ip, args.omapi_key, args.dns_servers, args.ntp_server, args.domain_name, args.router_ip, args.ip_range_low, args.ip_range_high, ])) maas-1.9.5+bzr4599.orig/src/provisioningserver/dns/__init__.py0000644000000000000000000000000013056115004022302 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/dns/actions.py0000644000000000000000000001051413056115004022216 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Low-level actions to manage the DNS service, like reloading zones.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "bind_reconfigure", "bind_reload", "bind_reload_zone", "bind_write_configuration", "bind_write_options", "bind_write_zones", ] import collections from subprocess import CalledProcessError from time import sleep from provisioningserver.dns.config import ( DNSConfig, execute_rndc_command, set_up_options_conf, ) from provisioningserver.logger import get_maas_logger from provisioningserver.utils.shell import ExternalProcessError maaslog = get_maas_logger("dns") def bind_reconfigure(): """Ask BIND to reload its configuration and *new* zone files. From rndc(8): Reload the configuration file and load new zones, but do not reload existing zone files even if they have changed. This is faster than a full reload when there is a large number of zones because it avoids the need to examine the modification times of the zones files. """ try: execute_rndc_command(("reconfig",)) except CalledProcessError as exc: maaslog.error("Reloading BIND configuration failed: %s", exc) # Log before upgrade so that the output does not go to maaslog. ExternalProcessError.upgrade(exc) raise def bind_reload(): """Ask BIND to reload its configuration and all zone files. This operation is 'best effort' (with logging) as the server may not be running, and there is often no context for reporting. :return: True if success, False otherwise. """ try: execute_rndc_command(("reload",)) return True except CalledProcessError as exc: maaslog.error("Reloading BIND failed (is it running?): %s", exc) return False def bind_reload_with_retries(attempts=10, interval=2): """Ask BIND to reload its configuration and all zone files. :param attempts: The number of attempts. :param interval: The time in seconds to sleep between each attempt. """ for countdown in xrange(attempts - 1, -1, -1): if bind_reload(): break if countdown == 0: break else: sleep(interval) def bind_reload_zone(zone_name): """Ask BIND to reload the zone file for the given zone. :param zone_name: The name of the zone to reload. :return: True if success, False otherwise. """ try: execute_rndc_command(("reload", zone_name)) return True except CalledProcessError as exc: maaslog.error( "Reloading BIND zone %r failed (is it running?): %s", zone_name, exc) return False def bind_write_configuration(zones, trusted_networks): """Write BIND's configuration. :param zones: Those zones to include in main config. :type zones: Sequence of :py:class:`DNSZoneData`. :param trusted_networks: A sequence of CIDR network specifications that are permitted to use the DNS server as a forwarder. """ # trusted_networks was formerly specified as a single IP address with # netmask. These assertions are here to prevent code that assumes that # slipping through. assert not isinstance(trusted_networks, (bytes, unicode)) assert isinstance(trusted_networks, collections.Sequence) dns_config = DNSConfig(zones=zones) dns_config.write_config(trusted_networks=trusted_networks) def bind_write_options(upstream_dns, dnssec_validation): """Write BIND options. :param upstream_dns: A sequence of upstream DNS servers. """ # upstream_dns was formerly specified as a single IP address. These # assertions are here to prevent code that assumes that slipping through. assert not isinstance(upstream_dns, (bytes, unicode)) assert isinstance(upstream_dns, collections.Sequence) set_up_options_conf( upstream_dns=upstream_dns, dnssec_validation=dnssec_validation) def bind_write_zones(zones): """Write out DNS zones. :param zones: Those zones to write. :type zones: Sequence of :py:class:`DNSZoneData`. """ for zone in zones: zone.write_config() maas-1.9.5+bzr4599.orig/src/provisioningserver/dns/config.py0000644000000000000000000002463213056115004022031 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """DNS configuration.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'DNSConfig', 'MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME', 'set_up_rndc', 'set_up_options_conf', ] from collections import namedtuple from contextlib import contextmanager from datetime import datetime import errno import os import os.path import re import sys from provisioningserver.utils import locate_config from provisioningserver.utils.fs import atomic_write from provisioningserver.utils.isc import read_isc_file from provisioningserver.utils.shell import call_and_check import tempita NAMED_CONF_OPTIONS = 'named.conf.options' MAAS_NAMED_CONF_NAME = 'named.conf.maas' MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME = 'named.conf.options.inside.maas' MAAS_NAMED_RNDC_CONF_NAME = 'named.conf.rndc.maas' MAAS_RNDC_CONF_NAME = 'rndc.conf.maas' def get_dns_config_dir(): """Location of MAAS' bind configuration files.""" setting = os.getenv( "MAAS_DNS_CONFIG_DIR", locate_config(os.path.pardir, "bind", "maas")) if isinstance(setting, bytes): fsenc = sys.getfilesystemencoding() return setting.decode(fsenc) else: return setting def get_bind_config_dir(): """Location of bind configuration files.""" setting = os.getenv( "MAAS_BIND_CONFIG_DIR", locate_config(os.path.pardir, "bind")) if isinstance(setting, bytes): fsenc = sys.getfilesystemencoding() return setting.decode(fsenc) else: return setting def get_dns_rndc_port(): """RNDC port to be configured by MAAS to communicate with BIND.""" setting = os.getenv("MAAS_DNS_RNDC_PORT", "954") return int(setting) def get_dns_default_controls(): """Include the default RNDC controls (default RNDC key on port 953)?""" setting = os.getenv("MAAS_DNS_DEFAULT_CONTROLS", "1") return (setting == "1") class DNSConfigDirectoryMissing(Exception): """The directory where the config was about to be written is missing.""" class DNSConfigFail(Exception): """Raised if there's a problem with a DNS config.""" SRVRecord = namedtuple('SRVRecord', [ 'service', 'priority', 'weight', 'port', 'target' ]) # Default 'controls' stanza to be included in the Bind configuration, to # enable "remote" administration (well, only locally) for the init scripts, # so that they can control the DNS daemon over port 953. # This is in addition to a similar 'controls' stanza that allows MAAS itself # to control the daemon. That stanza is always present. DEFAULT_CONTROLS = """ controls { inet 127.0.0.1 port 953 allow { localhost; }; }; """ def extract_suggested_named_conf(rndc_content): """Extract 'named' configuration from the generated rndc configuration.""" start_marker = ( "# Use with the following in named.conf, adjusting the " "allow list as needed:\n") end_marker = '# End of named.conf' named_start = rndc_content.index(start_marker) + len(start_marker) named_end = rndc_content.index(end_marker) return rndc_content[named_start:named_end] def uncomment_named_conf(named_comment): """Return an uncommented version of the commented-out 'named' config.""" return re.sub('^# ', '', named_comment, flags=re.MULTILINE) def generate_rndc(port=953, key_name='rndc-maas-key', include_default_controls=True): """Use `rndc-confgen` (from bind9utils) to generate a rndc+named configuration. `rndc-confgen` generates the rndc configuration which also contains, in the form of a comment, the 'named' configuration we need. """ # Generate the configuration: # - 256 bits is the recommended size for the key nowadays. # - Use urandom to avoid blocking on the random generator. rndc_content = call_and_check( ['rndc-confgen', '-b', '256', '-r', '/dev/urandom', '-k', key_name, '-p', unicode(port).encode("ascii")]) named_comment = extract_suggested_named_conf(rndc_content) named_conf = uncomment_named_conf(named_comment) # The 'named' configuration contains a 'control' statement to enable # remote management by MAAS. If appropriate, add one to enable remote # management by the init scripts as well. if include_default_controls: named_conf += DEFAULT_CONTROLS # Return a tuple of the two configurations. return rndc_content, named_conf def get_named_rndc_conf_path(): return compose_config_path(MAAS_NAMED_RNDC_CONF_NAME) def get_rndc_conf_path(): return compose_config_path(MAAS_RNDC_CONF_NAME) def set_up_rndc(): """Writes out the two files needed to enable MAAS to use rndc commands: MAAS_RNDC_CONF_NAME and MAAS_NAMED_RNDC_CONF_NAME. """ rndc_content, named_content = generate_rndc( port=get_dns_rndc_port(), include_default_controls=get_dns_default_controls()) target_file = get_rndc_conf_path() with open(target_file, "wb") as f: f.write(rndc_content) target_file = get_named_rndc_conf_path() with open(target_file, "wb") as f: f.write(named_content) def execute_rndc_command(arguments): """Execute a rndc command.""" rndc_conf = get_rndc_conf_path() rndc_cmd = ['rndc', '-c', rndc_conf] rndc_cmd.extend(arguments) call_and_check(rndc_cmd) # Location of DNS templates, relative to the configuration directory. TEMPLATES_DIR = 'templates/dns' def set_up_options_conf(overwrite=True, **kwargs): """Write out the named.conf.options.inside.maas file. This file should be included by the top-level named.conf.options inside its 'options' block. MAAS cannot write the options file itself, so relies on either the DNSFixture in the test suite, or the packaging. Both should set that file up appropriately to include our file. """ template_path = os.path.join( locate_config(TEMPLATES_DIR), "named.conf.options.inside.maas.template") template = tempita.Template.from_filename(template_path) # Make sure "upstream_dns" is set at least to None. It's a special piece # of config and we don't want to require that every call site has to # specify it. If it's not set, the substitution will fail with the default # template that uses this value. kwargs.setdefault("upstream_dns") kwargs.setdefault("dnssec_validation", "auto") # Parse the options file and make sure MAAS doesn't define any options # that the user has already customized. allow_user_override_options = [ "allow-query", "allow-recursion", "allow-query-cache", ] try: parsed_options = read_isc_file( compose_bind_config_path(NAMED_CONF_OPTIONS)) except IOError: parsed_options = {} options = parsed_options.get('options', {}) for option in allow_user_override_options: kwargs['upstream_' + option.replace('-', '_')] = option in options try: rendered = template.substitute(kwargs) except NameError as error: raise DNSConfigFail(*error.args) target_path = compose_config_path(MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME) atomic_write(rendered, target_path, overwrite=overwrite, mode=0644) def compose_config_path(filename): """Return the full path for a DNS config or zone file.""" return os.path.join(get_dns_config_dir(), filename) def compose_bind_config_path(filename): """Return the full path for a DNS config or zone file.""" return os.path.join(get_bind_config_dir(), filename) def render_dns_template(template_name, *parameters): """Generate contents for a DNS configuration or zone file. :param template_name: Name of the template file that should be rendered. It must be in `TEMPLATES_DIR`. :param parameters: One or more dicts of paramaters to be passed to the template. Each adds to (and may overwrite) the previous ones. """ template_path = locate_config(TEMPLATES_DIR, template_name) template = tempita.Template.from_filename(template_path) combined_params = {} for params_dict in parameters: combined_params.update(params_dict) try: return template.substitute(combined_params) except NameError as error: raise DNSConfigFail(*error.args) @contextmanager def report_missing_config_dir(): """Report missing DNS config dir as `DNSConfigDirectoryMissing`. Use this around code that writes a new DNS configuration or zone file. It catches a "no such file or directory" error and raises a more helpful `DNSConfigDirectoryMissing` in its place. """ try: yield except (IOError, OSError) as e: if e.errno == errno.ENOENT: raise DNSConfigDirectoryMissing( "The directory where the DNS config files should be " "written does not exist. Make sure the 'maas-dns' " "package is installed on this region controller.") else: raise class DNSConfig: """A DNS configuration file. Encapsulation of DNS config templates and parameter substitution. """ template_file_name = 'named.conf.template' target_file_name = MAAS_NAMED_CONF_NAME def __init__(self, zones=None): if zones is None: zones = () self.zones = zones def write_config(self, overwrite=True, **kwargs): """Write out this DNS config file. :raises DNSConfigDirectoryMissing: if the DNS configuration directory does not exist. """ trusted_networks = kwargs.pop("trusted_networks", "") context = { 'zones': self.zones, 'DNS_CONFIG_DIR': get_dns_config_dir(), 'named_rndc_conf_path': get_named_rndc_conf_path(), 'trusted_networks': trusted_networks, 'modified': unicode(datetime.today()), } content = render_dns_template(self.template_file_name, kwargs, context) target_path = compose_config_path(self.target_file_name) with report_missing_config_dir(): atomic_write(content, target_path, overwrite=overwrite, mode=0644) @classmethod def get_include_snippet(cls): target_path = compose_config_path(cls.target_file_name) assert '"' not in target_path, ( "DNS config path contains quote: %s." % target_path) return 'include "%s";\n' % target_path maas-1.9.5+bzr4599.orig/src/provisioningserver/dns/testing.py0000644000000000000000000000246013056115004022234 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test helpers for DNS.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "patch_dns_config_path", "patch_dns_default_controls", "patch_dns_rndc_port", ] import sys from fixtures import EnvironmentVariable def patch_dns_config_path(testcase, config_dir=None): """Set the DNS config dir to a temporary directory, and return its path.""" fsenc = sys.getfilesystemencoding() if config_dir is None: config_dir = testcase.make_dir() if isinstance(config_dir, unicode): config_dir = config_dir.encode(fsenc) testcase.useFixture( EnvironmentVariable(b"MAAS_DNS_CONFIG_DIR", config_dir)) testcase.useFixture( EnvironmentVariable(b"MAAS_BIND_CONFIG_DIR", config_dir)) return config_dir.decode(fsenc) def patch_dns_rndc_port(testcase, port): testcase.useFixture( EnvironmentVariable(b"MAAS_DNS_RNDC_PORT", b"%d" % port)) def patch_dns_default_controls(testcase, enable): testcase.useFixture( EnvironmentVariable( b"MAAS_DNS_DEFAULT_CONTROLS", b"1" if enable else b"0")) maas-1.9.5+bzr4599.orig/src/provisioningserver/dns/tests/0000755000000000000000000000000013056115004021345 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/dns/zoneconfig.py0000644000000000000000000003765613056115004022737 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Classes for generating BIND zone config files.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'DNSForwardZoneConfig', 'DNSReverseZoneConfig', ] from abc import ABCMeta from datetime import datetime from itertools import chain import math from netaddr import ( IPAddress, IPNetwork, spanning_cidr, ) from netaddr.core import AddrFormatError from provisioningserver.dns.config import ( compose_config_path, render_dns_template, report_missing_config_dir, ) from provisioningserver.utils.fs import incremental_write from provisioningserver.utils.network import ( intersect_iprange, ip_range_within_network, ) def get_fqdn_or_ip_address(target): """Returns the ip address is target is a valid ip address, otherwise returns the target with appended '.' if missing.""" try: return IPAddress(target).format() except AddrFormatError: return target.rstrip('.') + '.' def enumerate_mapping(mapping): """Generate `(hostname, ip)` tuples from `mapping`. :param mapping: A dict mapping host names to lists of IP addresses. """ for hostname, ips in mapping.viewitems(): for ip in ips: yield hostname, ip def get_details_for_ip_range(ip_range): """For a given IPRange, return all subnets, a useable prefix and the reverse DNS suffix calculated from that IP range. :return: A tuple of: All subnets of /24 (or smaller if there is no /24 subnet to be found) in `ip_range`. A prefix made from the first two octets in the range. A RDNS suffix calculated from the first two octets in the range. """ # Calculate a spanning network for the range above. There are # 256 /24 networks in a /16, so that's the most /24s we're going # to have to deal with; this matters later on when we iterate # through the /24s within this network. cidr = spanning_cidr(ip_range) subnets = cidr.subnet(max(24, cidr.prefixlen)) # Split the spanning network into /24 subnets, then see if they fall # entirely within the original network range, partially, or not at # all. intersecting_subnets = [] for subnet in subnets: intersect = intersect_iprange(subnet, ip_range) if intersect is None: # The subnet does not fall within the original network. pass else: # The subnet falls partially within the original network, so print # out a $GENERATE expression for a subset of the /24. intersecting_subnets.append(intersect) octet_one = (cidr.value & 0xff000000) >> 24 octet_two = (cidr.value & 0x00ff0000) >> 16 # The first two octets of the network range formatted in the # usual dotted-quad style. We can precalculate the start of any IP # address in the range because we're only ever dealing with /16 # networks and smaller. prefix = "%d.%d" % (octet_one, octet_two) # Similarly, we can calculate what the reverse DNS suffix is going # to look like. rdns_suffix = "%d.%d.in-addr.arpa." % (octet_two, octet_one) return intersecting_subnets, prefix, rdns_suffix class DNSZoneConfigBase: """Base class for zone writers.""" __metaclass__ = ABCMeta template_file_name = 'zone.template' def __init__(self, domain, zone_name, serial=None): """ :param domain: The domain name of the forward zone. :param zone_name: Fully-qualified zone name. :param serial: The serial to use in the zone file. This must increment on each change. """ self.domain = domain self.zone_name = zone_name self.serial = serial self.target_path = compose_config_path('zone.%s' % self.zone_name) def make_parameters(self): """Return a dict of the common template parameters.""" return { 'domain': self.domain, 'serial': self.serial, 'modified': unicode(datetime.today()), } @classmethod def write_zone_file(cls, output_file, *parameters): """Write a zone file based on the zone file template. There is a subtlety with zone files: their filesystem timestamp must increase with every rewrite. Some filesystems (ext3?) only seem to support a resolution of one second, and so this method may set an unexpected modification time in order to maintain that property. """ content = render_dns_template(cls.template_file_name, *parameters) with report_missing_config_dir(): incremental_write(content, output_file, mode=0644) class DNSForwardZoneConfig(DNSZoneConfigBase): """Writes forward zone files. A forward zone config contains two kinds of mappings: "A" records map all possible IP addresses within each of its networks to generated hostnames based on those addresses. "CNAME" records map configured hostnames to the matching generated IP hostnames. An additional "A" record maps the domain to the name server itself. """ def __init__(self, domain, **kwargs): """See `DNSZoneConfigBase.__init__`. :param domain: The domain name of the forward zone. :param serial: The serial to use in the zone file. This must increment on each change. :param dns_ip: The IP address of the DNS server authoritative for this zone. :param mapping: A hostname:ip-addresses mapping for all known hosts in the zone. They will be mapped as A records. :param srv_mapping: Set of SRVRecord mappings. """ self._dns_ip = kwargs.pop('dns_ip', None) self._mapping = kwargs.pop('mapping', {}) self._network = kwargs.pop('network', None) self._dynamic_ranges = kwargs.pop('dynamic_ranges', []) self._srv_mapping = kwargs.pop('srv_mapping', []) super(DNSForwardZoneConfig, self).__init__( domain, zone_name=domain, **kwargs) @classmethod def get_mapping(cls, mapping, domain, dns_ip): """Return a generator mapping hostnames to IP addresses. This includes the record for the name server's IP. :param mapping: A dict mapping host names to lists of IP addresses. :param domain: Zone's domain name. :param dns_ip: IP address for the zone's authoritative DNS server. :return: A generator of tuples: (host name, IP address). """ return chain( [('%s.' % domain, dns_ip)], enumerate_mapping(mapping)) @classmethod def get_A_mapping(cls, mapping, domain, dns_ip): """Return a generator mapping hostnames to IP addresses for all the IPv4 addresses in `mapping`. The returned mapping is meant to be used to generate A records in the forward zone file. This includes the A record for the name server's IP. :param mapping: A dict mapping host names to lists of IP addresses. :param domain: Zone's domain name. :param dns_ip: IP address for the zone's authoritative DNS server. :return: A generator of tuples: (host name, IP address). """ mapping = cls.get_mapping(mapping, domain, dns_ip) return (item for item in mapping if IPAddress(item[1]).version == 4) @classmethod def get_AAAA_mapping(cls, mapping, domain, dns_ip): """Return a generator mapping hostnames to IP addresses for all the IPv6 addresses in `mapping`. The returned mapping is meant to be used to generate AAAA records in the forward zone file. :param mapping: A dict mapping host names to lists of IP addresses. :param domain: Zone's domain name. :param dns_ip: IP address for the zone's authoritative DNS server. :return: A generator of tuples: (host name, IP address). """ mapping = cls.get_mapping(mapping, domain, dns_ip) return (item for item in mapping if IPAddress(item[1]).version == 6) @classmethod def get_srv_mapping(cls, mappings): """Return a generator mapping srv entries to hostnames. :param mappings: Set of SRVRecord. :return: A generator of tuples: (service, 'priority weight port target'). """ for record in mappings: target = get_fqdn_or_ip_address(record.target) item = '%s %s %s %s' % ( record.priority, record.weight, record.port, target) yield (record.service, item) @classmethod def get_GENERATE_directives(cls, dynamic_range): """Return the GENERATE directives for the forward zone of a network. """ slash_16 = IPNetwork("%s/16" % IPAddress(dynamic_range.first)) if (dynamic_range.size > 256 ** 2 or not ip_range_within_network(dynamic_range, slash_16)): # We can't issue a sane set of $GENERATEs for any network # larger than a /16, or for one that spans two /16s, so we # don't try. return [] generate_directives = set() subnets, prefix, _ = get_details_for_ip_range(dynamic_range) for subnet in subnets: iterator = "%d-%d" % ( (subnet.first & 0x000000ff), (subnet.last & 0x000000ff)) hostname = "%s-%d-$" % ( prefix.replace('.', '-'), # Calculate what the third quad (i.e. 10.0.X.1) value should # be for this subnet. (subnet.first & 0x0000ff00) >> 8, ) ip_address = "%s.%d.$" % ( prefix, (subnet.first & 0x0000ff00) >> 8) generate_directives.add((iterator, hostname, ip_address)) return sorted( generate_directives, key=lambda directive: directive[2]) def write_config(self): """Write the zone file.""" # Create GENERATE directives for IPv4 ranges. generate_directives = list( chain.from_iterable( self.get_GENERATE_directives(dynamic_range) for dynamic_range in self._dynamic_ranges if dynamic_range.version == 4 )) self.write_zone_file( self.target_path, self.make_parameters(), { 'mappings': { 'SRV': self.get_srv_mapping( self._srv_mapping), 'A': self.get_A_mapping( self._mapping, self.domain, self._dns_ip), 'AAAA': self.get_AAAA_mapping( self._mapping, self.domain, self._dns_ip), }, 'generate_directives': { 'A': generate_directives, } }) class DNSReverseZoneConfig(DNSZoneConfigBase): """Writes reverse zone files. A reverse zone mapping contains "PTR" records, each mapping reverse-notation IP addresses within a network to the matching generated hostname. """ def __init__(self, domain, **kwargs): """See `DNSZoneConfigBase.__init__`. :param domain: The domain name of the forward zone. :param serial: The serial to use in the zone file. This must increment on each change. :param mapping: A hostname:ips mapping for all known hosts in the reverse zone. They will be mapped as PTR records. IP addresses not in `network` will be dropped. :param network: The network that the mapping exists within. :type network: :class:`netaddr.IPNetwork` """ self._mapping = kwargs.pop('mapping', {}) self._network = kwargs.pop("network", None) self._dynamic_ranges = kwargs.pop('dynamic_ranges', []) zone_name = self.compose_zone_name(self._network) super(DNSReverseZoneConfig, self).__init__( domain, zone_name=zone_name, **kwargs) @classmethod def compose_zone_name(cls, network): """Return the name of the reverse zone.""" # Generate the name of the reverse zone file: # Use netaddr's reverse_dns() to get the reverse IP name # of the first IP address in the network and then drop the first # octets of that name (i.e. drop the octets that will be specified in # the zone file). first = IPAddress(network.first) if first.version == 6: # IPv6. # Use float division and ceil to cope with network sizes that # are not divisible by 4. rest_limit = int(math.ceil((128 - network.prefixlen) / 4.)) else: # IPv4. # Use float division and ceil to cope with splits not done on # octets boundaries. rest_limit = int(math.ceil((32 - network.prefixlen) / 8.)) reverse_name = first.reverse_dns.split('.', rest_limit)[-1] # Strip off trailing '.'. return reverse_name[:-1] @classmethod def get_PTR_mapping(cls, mapping, domain, network): """Return reverse mapping: reverse IPs to hostnames. The reverse generated mapping is the mapping between the reverse IP addresses and the hostnames for all the IP addresses in the given `mapping`. The returned mapping is meant to be used to generate PTR records in the reverse zone file. :param mapping: A hostname:ip-addresses mapping for all known hosts in the reverse zone. :param domain: Zone's domain name. :param network: Zone's network. :type network: :class:`netaddr.IPNetwork` """ return ( ( IPAddress(ip).reverse_dns, '%s.%s.' % (hostname, domain), ) for hostname, ip in enumerate_mapping(mapping) # Filter out the IP addresses that are not in `network`. if IPAddress(ip) in network ) @classmethod def get_GENERATE_directives(cls, dynamic_range, domain): """Return the GENERATE directives for the reverse zone of a network.""" slash_16 = IPNetwork("%s/16" % IPAddress(dynamic_range.first)) if (dynamic_range.size > 256 ** 2 or not ip_range_within_network(dynamic_range, slash_16)): # We can't issue a sane set of $GENERATEs for any network # larger than a /16, or for one that spans two /16s, so we # don't try. return [] generate_directives = set() subnets, prefix, rdns_suffix = get_details_for_ip_range(dynamic_range) for subnet in subnets: iterator = "%d-%d" % ( (subnet.first & 0x000000ff), (subnet.last & 0x000000ff)) hostname = "%s-%d-$" % ( prefix.replace('.', '-'), (subnet.first & 0x0000ff00) >> 8) rdns = "$.%d.%s" % ( (subnet.first & 0x0000ff00) >> 8, rdns_suffix) generate_directives.add( (iterator, rdns, "%s.%s." % (hostname, domain))) return sorted( generate_directives, key=lambda directive: directive[2]) def write_config(self): """Write the zone file.""" # Create GENERATE directives for IPv4 ranges. generate_directives = list( chain.from_iterable( self.get_GENERATE_directives(dynamic_range, self.domain) for dynamic_range in self._dynamic_ranges if dynamic_range.version == 4 )) self.write_zone_file( self.target_path, self.make_parameters(), { 'mappings': { 'PTR': self.get_PTR_mapping( self._mapping, self.domain, self._network), }, 'generate_directives': { 'PTR': generate_directives, } } ) maas-1.9.5+bzr4599.orig/src/provisioningserver/dns/tests/__init__.py0000644000000000000000000000000013056115004023444 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/dns/tests/test_actions.py0000644000000000000000000002335713056115004024430 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for :py:module:`provisioningserver.dns.actions`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import os from os.path import join import random from random import randint from subprocess import CalledProcessError from textwrap import dedent from fixtures import FakeLogger from maastesting.factory import factory from maastesting.matchers import ( MockCalledOnceWith, MockCallsMatch, ) from maastesting.testcase import MAASTestCase from mock import ( call, sentinel, ) from netaddr import IPNetwork from provisioningserver.dns import actions from provisioningserver.dns.config import ( MAAS_NAMED_CONF_NAME, MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME, ) from provisioningserver.dns.testing import patch_dns_config_path from provisioningserver.dns.zoneconfig import ( DNSForwardZoneConfig, DNSReverseZoneConfig, ) from provisioningserver.testing.testcase import PservTestCase from provisioningserver.utils.shell import ExternalProcessError from testtools.matchers import ( AllMatch, Contains, FileContains, FileExists, ) class TestReconfigure(MAASTestCase): """Tests for :py:func:`actions.bind_reconfigure`.""" def test__executes_rndc_command(self): self.patch_autospec(actions, "execute_rndc_command") actions.bind_reconfigure() self.assertThat( actions.execute_rndc_command, MockCalledOnceWith(("reconfig",))) def test__logs_subprocess_error(self): erc = self.patch_autospec(actions, "execute_rndc_command") erc.side_effect = factory.make_CalledProcessError() with FakeLogger("maas") as logger: self.assertRaises(CalledProcessError, actions.bind_reconfigure) self.assertDocTestMatches( "Reloading BIND configuration failed: " "Command ... returned non-zero exit status ...", logger.output) def test__upgrades_subprocess_error(self): erc = self.patch_autospec(actions, "execute_rndc_command") erc.side_effect = factory.make_CalledProcessError() self.assertRaises(ExternalProcessError, actions.bind_reconfigure) class TestReload(MAASTestCase): """Tests for :py:func:`actions.bind_reload`.""" def test__executes_rndc_command(self): self.patch_autospec(actions, "execute_rndc_command") actions.bind_reload() self.assertThat( actions.execute_rndc_command, MockCalledOnceWith(("reload",))) def test__logs_subprocess_error(self): erc = self.patch_autospec(actions, "execute_rndc_command") erc.side_effect = factory.make_CalledProcessError() with FakeLogger("maas") as logger: self.assertFalse(actions.bind_reload()) self.assertDocTestMatches( "Reloading BIND failed (is it running?): " "Command ... returned non-zero exit status ...", logger.output) def test__false_on_subprocess_error(self): erc = self.patch_autospec(actions, "execute_rndc_command") erc.side_effect = factory.make_CalledProcessError() self.assertFalse(actions.bind_reload()) class TestReloadWithRetries(MAASTestCase): """Tests for :py:func:`actions.bind_reload_with_retries`.""" def test__calls_bind_reload_count_times(self): self.patch_autospec(actions, "sleep") # Disable. bind_reload = self.patch_autospec(actions, "bind_reload") bind_reload.return_value = False attempts = randint(3, 13) actions.bind_reload_with_retries(attempts=attempts) expected_calls = [call()] * attempts self.assertThat( actions.bind_reload, MockCallsMatch(*expected_calls)) def test__returns_on_success(self): self.patch_autospec(actions, "sleep") # Disable. bind_reload = self.patch(actions, "bind_reload") bind_reload_return_values = [False, False, True, ] bind_reload.side_effect = lambda: ( bind_reload_return_values.pop(0)) actions.bind_reload_with_retries(attempts=5) expected_calls = [call(), call(), call()] self.assertThat( actions.bind_reload, MockCallsMatch(*expected_calls)) def test__sleeps_interval_seconds_between_attempts(self): self.patch_autospec(actions, "sleep") # Disable. bind_reload = self.patch_autospec(actions, "bind_reload") bind_reload.return_value = False attempts = randint(3, 13) actions.bind_reload_with_retries( attempts=attempts, interval=sentinel.interval) expected_sleep_calls = [call(sentinel.interval)] * (attempts - 1) self.assertThat(actions.sleep, MockCallsMatch(*expected_sleep_calls)) class TestReloadZone(MAASTestCase): """Tests for :py:func:`actions.bind_reload_zone`.""" def test__executes_rndc_command(self): self.patch_autospec(actions, "execute_rndc_command") self.assertTrue(actions.bind_reload_zone(sentinel.zone)) self.assertThat( actions.execute_rndc_command, MockCalledOnceWith(("reload", sentinel.zone))) def test__logs_subprocess_error(self): erc = self.patch_autospec(actions, "execute_rndc_command") erc.side_effect = factory.make_CalledProcessError() with FakeLogger("maas") as logger: self.assertFalse(actions.bind_reload_zone(sentinel.zone)) self.assertDocTestMatches( "Reloading BIND zone ... failed (is it running?): " "Command ... returned non-zero exit status ...", logger.output) def test__false_on_subprocess_error(self): erc = self.patch_autospec(actions, "execute_rndc_command") erc.side_effect = factory.make_CalledProcessError() self.assertFalse(actions.bind_reload_zone(sentinel.zone)) class TestConfiguration(PservTestCase): """Tests for the `bind_write_*` functions.""" def setUp(self): super(TestConfiguration, self).setUp() # Ensure that files are written to a temporary directory. self.dns_conf_dir = self.make_dir() patch_dns_config_path(self, self.dns_conf_dir) # Patch out calls to 'execute_rndc_command'. self.patch_autospec(actions, 'execute_rndc_command') def test_bind_write_configuration_writes_file(self): domain = factory.make_string() zones = [ DNSReverseZoneConfig( domain, serial=random.randint(1, 100), network=factory.make_ipv4_network()), DNSReverseZoneConfig( domain, serial=random.randint(1, 100), network=factory.make_ipv6_network()), ] actions.bind_write_configuration( zones=zones, trusted_networks=[]) self.assertThat( os.path.join(self.dns_conf_dir, MAAS_NAMED_CONF_NAME), FileExists()) def test_bind_write_configuration_writes_file_with_acl(self): trusted_networks = [ factory.make_ipv4_network(), factory.make_ipv6_network(), ] actions.bind_write_configuration( zones=[], trusted_networks=trusted_networks) expected_file = os.path.join(self.dns_conf_dir, MAAS_NAMED_CONF_NAME) self.assertThat(expected_file, FileExists()) expected_content = dedent("""\ acl "trusted" { %s; %s; localnets; localhost; }; """) expected_content %= tuple(trusted_networks) self.assertThat(expected_file, FileContains( matcher=Contains(expected_content))) def test_bind_write_zones_writes_file(self): domain = factory.make_string() network = IPNetwork('192.168.0.3/24') dns_ip = factory.pick_ip_in_network(network) ip = factory.pick_ip_in_network(network) forward_zone = DNSForwardZoneConfig( domain, serial=random.randint(1, 100), mapping={factory.make_string(): [ip]}, dns_ip=dns_ip) reverse_zone = DNSReverseZoneConfig( domain, serial=random.randint(1, 100), network=network) actions.bind_write_zones(zones=[forward_zone, reverse_zone]) forward_file_name = 'zone.%s' % domain reverse_file_name = 'zone.0.168.192.in-addr.arpa' expected_files = [ join(self.dns_conf_dir, forward_file_name), join(self.dns_conf_dir, reverse_file_name), ] self.assertThat(expected_files, AllMatch(FileExists())) def test_bind_write_options_sets_up_config(self): # bind_write_configuration_and_zones writes the config file, writes # the zone files, and reloads the dns service. upstream_dns = [ factory.make_ipv4_address(), factory.make_ipv4_address(), ] dnssec_validation = random.choice(["auto", "yes", "no"]) expected_dnssec_validation = dnssec_validation actions.bind_write_options( upstream_dns=upstream_dns, dnssec_validation=dnssec_validation) expected_options_file = join( self.dns_conf_dir, MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME) self.assertThat(expected_options_file, FileExists()) expected_options_content = dedent("""\ forwarders { %s; %s; }; dnssec-validation %s; allow-query { any; }; allow-recursion { trusted; }; allow-query-cache { trusted; }; """) expected_options_content %= ( tuple(upstream_dns) + (expected_dnssec_validation,)) self.assertThat( expected_options_file, FileContains(expected_options_content)) maas-1.9.5+bzr4599.orig/src/provisioningserver/dns/tests/test_config.py0000644000000000000000000004471513056115004024236 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test cases for dns.config""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import errno import os.path import random from textwrap import dedent from fixtures import EnvironmentVariable from maastesting.factory import factory from maastesting.fakemethod import FakeMethod from maastesting.testcase import MAASTestCase from mock import Mock from netaddr import IPNetwork from provisioningserver.dns import config from provisioningserver.dns.config import ( compose_config_path, DEFAULT_CONTROLS, DNSConfig, DNSConfigDirectoryMissing, DNSConfigFail, execute_rndc_command, extract_suggested_named_conf, generate_rndc, MAAS_NAMED_CONF_NAME, MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME, MAAS_NAMED_RNDC_CONF_NAME, MAAS_RNDC_CONF_NAME, NAMED_CONF_OPTIONS, render_dns_template, report_missing_config_dir, set_up_options_conf, set_up_rndc, uncomment_named_conf, ) from provisioningserver.dns.testing import ( patch_dns_config_path, patch_dns_default_controls, ) from provisioningserver.dns.zoneconfig import ( DNSForwardZoneConfig, DNSReverseZoneConfig, ) from provisioningserver.utils import locate_config from provisioningserver.utils.isc import read_isc_file from testtools.matchers import ( AllMatch, Contains, ContainsAll, EndsWith, Equals, FileContains, FileExists, Is, IsInstance, MatchesAll, Not, SamePath, StartsWith, ) from testtools.testcase import ExpectedException from twisted.python.filepath import FilePath NAMED_CONF_OPTIONS_CONTENTS = dedent("""\ options { forwarders { 8.8.8.8; 8.8.4.4; }; dnssec-validation auto; allow-query { any; }; allow-recursion { trusted; }; allow-query-cache { trusted; }; auth-nxdomain no; listen-on-v6 { any; }; }; """) NAMED_CONF_OPTIONS_WITH_ALLOW_QUERY_CONTENTS = dedent("""\ options { forwarders { 8.8.8.8; 8.8.4.4; }; dnssec-validation auto; allow-query { any; }; auth-nxdomain no; listen-on-v6 { any; }; }; """) NAMED_CONF_OPTIONS_NO_ALLOW_CONTENTS = dedent("""\ options { forwarders { 8.8.8.8; 8.8.4.4; }; dnssec-validation auto; auth-nxdomain no; listen-on-v6 { any; }; }; """) class TestHelpers(MAASTestCase): def test_get_dns_config_dir_defaults_to_etc_bind_maas(self): self.useFixture(EnvironmentVariable("MAAS_DNS_CONFIG_DIR")) self.assertThat( config.get_dns_config_dir(), MatchesAll( SamePath(locate_config("../bind/maas")), IsInstance(unicode), )) def test_get_dns_config_dir_checks_environ_first(self): directory = self.make_dir() self.useFixture(EnvironmentVariable( "MAAS_DNS_CONFIG_DIR", directory.encode("ascii"))) self.assertThat( config.get_dns_config_dir(), MatchesAll( SamePath(directory), IsInstance(unicode), )) def test_get_bind_config_dir_defaults_to_etc_bind_maas(self): self.useFixture(EnvironmentVariable("MAAS_BIND_CONFIG_DIR")) self.assertThat( config.get_bind_config_dir(), MatchesAll( SamePath(locate_config("../bind")), IsInstance(unicode), )) def test_get_bind_config_dir_checks_environ_first(self): directory = self.make_dir() self.useFixture(EnvironmentVariable( "MAAS_BIND_CONFIG_DIR", directory.encode("ascii"))) self.assertThat( config.get_bind_config_dir(), MatchesAll( SamePath(directory), IsInstance(unicode), )) def test_get_dns_root_port_defaults_to_954(self): self.useFixture(EnvironmentVariable("MAAS_DNS_RNDC_PORT")) self.assertEqual(954, config.get_dns_rndc_port()) def test_get_dns_root_port_checks_environ_first(self): port = factory.pick_port() self.useFixture(EnvironmentVariable( "MAAS_DNS_RNDC_PORT", b"%d" % port)) self.assertEqual(port, config.get_dns_rndc_port()) def test_get_dns_default_controls_defaults_to_affirmative(self): self.useFixture(EnvironmentVariable("MAAS_DNS_DEFAULT_CONTROLS")) self.assertTrue(config.get_dns_default_controls()) def test_get_dns_default_controls_checks_environ_first(self): self.useFixture( EnvironmentVariable("MAAS_DNS_DEFAULT_CONTROLS", "0")) self.assertFalse(config.get_dns_default_controls()) class TestRNDCUtilities(MAASTestCase): def test_generate_rndc_returns_configurations(self): rndc_content, named_content = generate_rndc() # rndc_content and named_content look right. self.assertIn('# Start of rndc.conf', rndc_content) self.assertIn('controls {', named_content) # named_content does not include any comment. self.assertNotIn('\n#', named_content) def test_set_up_rndc_writes_configurations(self): dns_conf_dir = patch_dns_config_path(self) set_up_rndc() expected = ( (MAAS_RNDC_CONF_NAME, '# Start of rndc.conf'), (MAAS_NAMED_RNDC_CONF_NAME, 'controls {')) for filename, content in expected: with open(os.path.join(dns_conf_dir, filename), "rb") as stream: conf_content = stream.read() self.assertIn(content, conf_content) def test_set_up_options_conf_writes_configuration(self): dns_conf_dir = patch_dns_config_path(self) fake_dns = [factory.make_ipv4_address(), factory.make_ipv4_address()] set_up_options_conf(upstream_dns=fake_dns) target_file = os.path.join( dns_conf_dir, MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME) self.assertThat( target_file, MatchesAll(*( FileContains(matcher=Contains(address)) for address in fake_dns))) def test_set_up_options_conf_write_config_assumes_no_overrides(self): dns_conf_dir = patch_dns_config_path(self) set_up_options_conf() target_file = os.path.join( dns_conf_dir, MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME) target = read_isc_file(target_file) self.assertThat([ target['allow-query']['any'], target['allow-recursion']['trusted'], target['allow-query-cache']['trusted'], ], AllMatch(Equals(True))) def test_set_up_options_conf_write_config_allows_overrides(self): dns_conf_dir = patch_dns_config_path(self) factory.make_file( location=dns_conf_dir, name=NAMED_CONF_OPTIONS, contents=NAMED_CONF_OPTIONS_CONTENTS) set_up_options_conf() target_file = os.path.join( dns_conf_dir, MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME) target = read_isc_file(target_file) self.assertThat([ target.get('allow-query'), target.get('allow-recursion'), target.get('allow-query-cache'), ], AllMatch(Is(None))) def test_set_up_options_conf_write_config_allows_zero_overrides(self): dns_conf_dir = patch_dns_config_path(self) factory.make_file( location=dns_conf_dir, name=NAMED_CONF_OPTIONS, contents=NAMED_CONF_OPTIONS_NO_ALLOW_CONTENTS) set_up_options_conf() target_file = os.path.join( dns_conf_dir, MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME) target = read_isc_file(target_file) self.assertThat([ target['allow-query']['any'], target['allow-recursion']['trusted'], target['allow-query-cache']['trusted'], ], AllMatch(Equals(True))) def test_set_up_options_conf_write_config_allows_single_override(self): dns_conf_dir = patch_dns_config_path(self) factory.make_file( location=dns_conf_dir, name=NAMED_CONF_OPTIONS, contents=NAMED_CONF_OPTIONS_WITH_ALLOW_QUERY_CONTENTS) set_up_options_conf() target_file = os.path.join( dns_conf_dir, MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME) target = read_isc_file(target_file) self.assertIsNone(target.get('allow-query')) def test_set_up_options_conf_handles_no_upstream_dns(self): dns_conf_dir = patch_dns_config_path(self) set_up_options_conf() target_file = os.path.join( dns_conf_dir, MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME) self.assertThat(target_file, FileExists()) def test_set_up_options_conf_raises_on_bad_template(self): template = self.make_file( name="named.conf.options.inside.maas.template", contents=b"{{nonexistent}}") self.patch(config, "TEMPLATES_DIR", os.path.dirname(template)) exception = self.assertRaises(DNSConfigFail, set_up_options_conf) self.assertIn("name 'nonexistent' is not defined", repr(exception)) def test_rndc_config_includes_default_controls(self): dns_conf_dir = patch_dns_config_path(self) patch_dns_default_controls(self, enable=True) set_up_rndc() rndc_file = os.path.join(dns_conf_dir, MAAS_NAMED_RNDC_CONF_NAME) with open(rndc_file, "rb") as stream: conf_content = stream.read() self.assertIn(DEFAULT_CONTROLS, conf_content) def test_execute_rndc_command_executes_command(self): recorder = FakeMethod() fake_dir = patch_dns_config_path(self) self.patch(config, 'call_and_check', recorder) command = factory.make_string() execute_rndc_command([command]) rndc_conf_path = os.path.join(fake_dir, MAAS_RNDC_CONF_NAME) expected_command = ['rndc', '-c', rndc_conf_path, command] self.assertEqual((expected_command,), recorder.calls[0][0]) def test_extract_suggested_named_conf_extracts_section(self): named_part = factory.make_string() # Actual rndc-confgen output, mildly mangled for testing purposes. # Note the awkward line break. The code works by matching that exact # line, so there's no leeway with the spacing. rndc_config = dedent("""\ # Start of rndc.conf %(rndc_part)s # End of rndc.conf # %(start_marker)s %(named_part)s # End of named.conf """) % { 'start_marker': ( 'Use with the following in named.conf, ' 'adjusting the allow list as needed:'), 'rndc_part': factory.make_string(), 'named_part': named_part, } # What you get is just the suggested named.conf that's embedded in # the rndc-confgen output, not including its header and footer. self.assertEqual( named_part + '\n', extract_suggested_named_conf(rndc_config)) def test_extract_suggested_named_conf_notices_missing_boundary(self): # extract_suggested_named_conf raises an exception if it does not # find the expected boundary between the rndc and named parts of the # generated configuration. rndc_config = dedent("""\ # Start of rndc.conf %s %s # End of named.conf """) % (factory.make_string(), factory.make_string()) self.assertRaises( ValueError, extract_suggested_named_conf, rndc_config) def test_uncomment_named_conf_uncomments(self): rndc_conf = 'key "rndc_key" {}' self.assertEqual(rndc_conf, uncomment_named_conf("# %s" % rndc_conf)) def test_uncomment_named_conf_uncomments_multiple_lines(self): # named.conf section, extracted from actual rndc-confgen output. # Note the weird %s: the config has a line ending in a space. named_comment = dedent("""\ # key "rndc-key" { # \talgorithm hmac-md5; # \tsecret "FuvtYZbYYLLJQKtn3zembg=="; # }; # %s # controls { # \tinet 127.0.0.1 port 953 # \t\tallow { 127.0.0.1; } keys { "rndc-key"; }; # }; """) % "" self.assertThat(uncomment_named_conf(named_comment), Contains( 'key "rndc-key" {\n' '\talgorithm hmac-md5;\n')) class TestComposeConfigPath(MAASTestCase): """Tests for `compose_config_path`.""" def test_returns_filename_in_dns_config_dir(self): dns_dir = patch_dns_config_path(self) filename = factory.make_name('config') self.assertEqual( os.path.join(dns_dir, filename), compose_config_path(filename)) class TestRenderDNSTemplate(MAASTestCase): """Tests for `render_dns_template`.""" def test_renders_template(self): template_text = 'X %d Y' % random.randint(1, 10000) self.assertEqual( template_text, render_dns_template(self.make_file(contents=template_text))) def test_interpolates_parameters(self): param_name = factory.make_name('param', sep='_') param_value = factory.make_string() self.assertEqual( "X %s Y" % param_value, render_dns_template( self.make_file(contents="X {{%s}} Y" % param_name), {param_name: param_value})) def test_combines_parameter_dicts(self): self.assertEqual( "aaa bbb", render_dns_template( self.make_file(contents='{{one}} {{two}}'), {'one': 'aaa'}, {'two': 'bbb'})) def test_takes_latest_value_of_redefined_parameter(self): self.assertEqual( "last", render_dns_template( self.make_file(contents='{{var}}'), {'var': 'first'}, {'var': 'middle'}, {'var': 'last'})) def test_reports_missing_parameters(self): e = self.assertRaises( DNSConfigFail, render_dns_template, self.make_file(contents='{{x}}'), {'y': '?'}) self.assertIn("'x' is not defined", unicode(e)) class TestReportMissingConfigDir(MAASTestCase): """Tests for the `report_missing_config_dir` context manager.""" def test_specially_reports_missing_config_dir(self): with ExpectedException(DNSConfigDirectoryMissing): with report_missing_config_dir(): open(os.path.join(self.make_dir(), 'nonexistent-file.txt')) def test_succeeds_if_no_exceptions(self): with report_missing_config_dir(): pass # The real test is that we get here without error. pass def test_passes_on_other_similar_errors(self): with ExpectedException(OSError): with report_missing_config_dir(): raise OSError(errno.EACCES, "Deliberate error for testing.") def test_passes_on_dissimilar_errors(self): class DeliberateError(Exception): """Deliberately induced error for testing.""" with ExpectedException(DeliberateError): with report_missing_config_dir(): raise DeliberateError("This exception propagates unchanged.") class TestDNSConfig(MAASTestCase): """Tests for DNSConfig.""" def test_write_config_DNSConfigDirectoryMissing_if_dir_missing(self): dnsconfig = DNSConfig() dir_name = patch_dns_config_path(self) os.rmdir(dir_name) self.assertRaises(DNSConfigDirectoryMissing, dnsconfig.write_config) def test_write_config_errors_if_unexpected_exception(self): dnsconfig = DNSConfig() exception = IOError(errno.EBUSY, factory.make_string()) self.patch(config, 'atomic_write', Mock(side_effect=exception)) self.assertRaises(IOError, dnsconfig.write_config) def test_write_config_skips_writing_if_overwrite_false(self): # If DNSConfig is created with overwrite=False, it won't # overwrite an existing config file. target_dir = patch_dns_config_path(self) random_content = factory.make_string() factory.make_file( location=target_dir, name=MAAS_NAMED_CONF_NAME, contents=random_content) dnsconfig = DNSConfig() dnsconfig.write_config(overwrite=False) self.assertThat( os.path.join(target_dir, MAAS_NAMED_CONF_NAME), FileContains(random_content)) def test_write_config_writes_config_if_no_existing_file(self): # If DNSConfig is created with overwrite=False, the config file # will be written if no config file exists. target_dir = patch_dns_config_path(self) dnsconfig = DNSConfig() dnsconfig.write_config(overwrite=False) self.assertThat( os.path.join(target_dir, MAAS_NAMED_CONF_NAME), FileExists()) def test_write_config_writes_config(self): target_dir = patch_dns_config_path(self) domain = factory.make_string() network = IPNetwork('192.168.0.3/24') ip = factory.pick_ip_in_network(network) forward_zone = DNSForwardZoneConfig( domain, mapping={factory.make_string(): ip}) reverse_zone = DNSReverseZoneConfig(domain, network=network) dnsconfig = DNSConfig((forward_zone, reverse_zone)) dnsconfig.write_config() self.assertThat( os.path.join(target_dir, MAAS_NAMED_CONF_NAME), FileContains( matcher=ContainsAll( [ 'zone.%s' % domain, 'zone.0.168.192.in-addr.arpa', MAAS_NAMED_RNDC_CONF_NAME, ]))) def test_write_config_makes_config_world_readable(self): target_dir = patch_dns_config_path(self) DNSConfig().write_config() config_file = FilePath(os.path.join(target_dir, MAAS_NAMED_CONF_NAME)) self.assertTrue(config_file.getPermissions().other.read) def test_get_include_snippet_returns_snippet(self): target_dir = patch_dns_config_path(self) snippet = DNSConfig.get_include_snippet() self.assertThat( snippet, MatchesAll( Not(StartsWith('\n')), EndsWith('\n'), Contains(target_dir), Contains( 'include "%s/%s"' % ( config.get_dns_config_dir(), DNSConfig.target_file_name, )))) maas-1.9.5+bzr4599.orig/src/provisioningserver/dns/tests/test_zoneconfig.py0000644000000000000000000006614313056115004025131 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for BIND zone config generation.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from collections import ( Iterable, Sequence, ) import os.path import random from maastesting.factory import factory from maastesting.matchers import MockNotCalled from maastesting.testcase import MAASTestCase from netaddr import ( IPAddress, IPNetwork, IPRange, ) from provisioningserver.dns.config import ( get_dns_config_dir, SRVRecord, ) from provisioningserver.dns.testing import patch_dns_config_path from provisioningserver.dns.zoneconfig import ( DNSForwardZoneConfig, DNSReverseZoneConfig, ) from testtools.matchers import ( Contains, ContainsAll, Equals, FileContains, HasLength, IsInstance, MatchesAll, MatchesStructure, Not, ) from twisted.python.filepath import FilePath class TestDNSForwardZoneConfig(MAASTestCase): """Tests for DNSForwardZoneConfig.""" def make_srv_record(self, service=None, port=None, target=None, priority=None, weight=None): if service is None: service = '.'.join(factory.make_name('_') for _ in range(2)) if port is None: port = factory.pick_port() if target is None: target = factory.make_hostname() if priority is None: priority = factory.pick_port() if weight is None: weight = factory.pick_port() return SRVRecord( service=service, port=port, target=target, priority=priority, weight=weight) def get_srv_item_output(self, srv_record): return '%s %s %s %s.' % ( srv_record.priority, srv_record.weight, srv_record.port, srv_record.target, ) def test_fields(self): domain = factory.make_string() serial = random.randint(1, 200) hostname = factory.make_string() network = factory.make_ipv4_network() ip = factory.pick_ip_in_network(network) mapping = {hostname: [ip]} dns_zone_config = DNSForwardZoneConfig( domain, serial=serial, mapping=mapping) self.assertThat( dns_zone_config, MatchesStructure.byEquality( domain=domain, serial=serial, _mapping=mapping, ) ) def test_computes_dns_config_file_paths(self): domain = factory.make_name('zone') dns_zone_config = DNSForwardZoneConfig(domain) self.assertEqual( os.path.join(get_dns_config_dir(), 'zone.%s' % domain), dns_zone_config.target_path) def test_get_a_mapping_returns_ipv4_mapping(self): name = factory.make_string() network = IPNetwork('192.12.0.1/30') dns_ip = factory.pick_ip_in_network(network) ipv4_mapping = { factory.make_name('host'): factory.make_ipv4_address(), factory.make_name('host'): factory.make_ipv4_address(), } ipv6_mapping = { factory.make_name('host'): factory.make_ipv6_address(), factory.make_name('host'): factory.make_ipv6_address(), } combined_mapping = { hostname: [ip] for hostname, ip in (ipv4_mapping.items() + ipv6_mapping.items()) } expected = [('%s.' % name, dns_ip)] + ipv4_mapping.items() self.assertItemsEqual( expected, DNSForwardZoneConfig.get_A_mapping(combined_mapping, name, dns_ip)) def test_get_aaaa_mapping_returns_ipv6_mapping(self): name = factory.make_string() network = IPNetwork('192.12.0.1/30') dns_ip = factory.pick_ip_in_network(network) ipv4_mapping = { factory.make_name('host'): factory.make_ipv4_address(), factory.make_name('host'): factory.make_ipv4_address(), } ipv6_mapping = { factory.make_name('host'): factory.make_ipv6_address(), factory.make_name('host'): factory.make_ipv6_address(), } combined_mapping = { hostname: [ip] for hostname, ip in (ipv4_mapping.items() + ipv6_mapping.items()) } self.assertItemsEqual( ipv6_mapping.items(), DNSForwardZoneConfig.get_AAAA_mapping( combined_mapping, name, dns_ip)) def test_get_srv_mapping_returns_iterator(self): srv = self.make_srv_record() self.assertThat( DNSForwardZoneConfig.get_srv_mapping([srv]), MatchesAll( IsInstance(Iterable), Not(IsInstance(Sequence)))) def test_get_srv_mapping_returns_correct_format(self): srv = self.make_srv_record() self.assertItemsEqual([ (srv.service, self.get_srv_item_output(srv)), ], DNSForwardZoneConfig.get_srv_mapping([srv])) def test_get_srv_mapping_handles_ip_address_target(self): target = factory.make_ipv4_address() srv = self.make_srv_record(target=target) item = self.get_srv_item_output(srv) item = item.rstrip('.') self.assertItemsEqual([ (srv.service, item), ], DNSForwardZoneConfig.get_srv_mapping([srv])) def test_get_srv_mapping_returns_multiple(self): srvs = [self.make_srv_record() for _ in range(3)] entries = [] for srv in srvs: entries.append((srv.service, self.get_srv_item_output(srv))) self.assertItemsEqual( entries, DNSForwardZoneConfig.get_srv_mapping(srvs)) def test_writes_dns_zone_config(self): target_dir = patch_dns_config_path(self) domain = factory.make_string() network = factory.make_ipv4_network() dns_ip = factory.pick_ip_in_network(network) ipv4_hostname = factory.make_name('host') ipv4_ip = factory.pick_ip_in_network(network) ipv6_hostname = factory.make_name('host') ipv6_ip = factory.make_ipv6_address() mapping = { ipv4_hostname: [ipv4_ip], ipv6_hostname: [ipv6_ip], } expected_generate_directives = ( DNSForwardZoneConfig.get_GENERATE_directives(network)) srv = self.make_srv_record() dns_zone_config = DNSForwardZoneConfig( domain, serial=random.randint(1, 100), mapping=mapping, dns_ip=dns_ip, srv_mapping=[srv], dynamic_ranges=[IPRange(network.first, network.last)]) dns_zone_config.write_config() self.assertThat( os.path.join(target_dir, 'zone.%s' % domain), FileContains( matcher=ContainsAll( [ '%s IN SRV %s' % ( srv.service, self.get_srv_item_output(srv)), '%s IN A %s' % (ipv4_hostname, ipv4_ip), '%s IN AAAA %s' % (ipv6_hostname, ipv6_ip), ] + [ '$GENERATE %s %s IN A %s' % ( iterator_values, reverse_dns, hostname) for iterator_values, reverse_dns, hostname in expected_generate_directives ] ) ) ) def test_writes_dns_zone_config_with_NS_record(self): target_dir = patch_dns_config_path(self) dns_ip = factory.make_ipv4_address() dns_zone_config = DNSForwardZoneConfig( factory.make_string(), serial=random.randint(1, 100), dns_ip=dns_ip) dns_zone_config.write_config() self.assertThat( os.path.join(target_dir, 'zone.%s' % dns_zone_config.domain), FileContains( matcher=ContainsAll( [ 'IN NS %s.' % dns_zone_config.domain, '%s. IN A %s' % (dns_zone_config.domain, dns_ip), ]))) def test_ignores_generate_directives_for_v6_dynamic_ranges(self): patch_dns_config_path(self) domain = factory.make_string() network = factory.make_ipv4_network() dns_ip = factory.pick_ip_in_network(network) ipv4_hostname = factory.make_name('host') ipv4_ip = factory.pick_ip_in_network(network) ipv6_hostname = factory.make_name('host') ipv6_ip = factory.make_ipv6_address() ipv6_network = factory.make_ipv6_network() dynamic_range = IPRange(ipv6_network.first, ipv6_network.last) mapping = { ipv4_hostname: [ipv4_ip], ipv6_hostname: [ipv6_ip], } srv = self.make_srv_record() dns_zone_config = DNSForwardZoneConfig( domain, serial=random.randint(1, 100), mapping=mapping, dns_ip=dns_ip, srv_mapping=[srv], dynamic_ranges=[dynamic_range]) get_generate_directives = self.patch( dns_zone_config, 'get_GENERATE_directives') dns_zone_config.write_config() self.assertThat(get_generate_directives, MockNotCalled()) def test_config_file_is_world_readable(self): patch_dns_config_path(self) dns_zone_config = DNSForwardZoneConfig( factory.make_string(), serial=random.randint(1, 100), dns_ip=factory.make_ipv4_address()) dns_zone_config.write_config() filepath = FilePath(dns_zone_config.target_path) self.assertTrue(filepath.getPermissions().other.read) class TestDNSReverseZoneConfig(MAASTestCase): """Tests for DNSReverseZoneConfig.""" def test_fields(self): domain = factory.make_string() serial = random.randint(1, 200) network = factory.make_ipv4_network() dns_zone_config = DNSReverseZoneConfig( domain, serial=serial, network=network) self.assertThat( dns_zone_config, MatchesStructure.byEquality( domain=domain, serial=serial, _network=network, ) ) def test_computes_dns_config_file_paths(self): domain = factory.make_name('zone') reverse_file_name = 'zone.168.192.in-addr.arpa' dns_zone_config = DNSReverseZoneConfig( domain, network=IPNetwork("192.168.0.0/22")) self.assertEqual( os.path.join(get_dns_config_dir(), reverse_file_name), dns_zone_config.target_path) def test_reverse_zone_file(self): # DNSReverseZoneConfig calculates the reverse zone file name # correctly for IPv4 and IPv6 networks. expected = [ # IPv4 networks. (IPNetwork('192.168.0.1/22'), '168.192.in-addr.arpa'), (IPNetwork('192.168.0.1/24'), '0.168.192.in-addr.arpa'), # IPv6 networks. (IPNetwork('3ffe:801::/32'), '1.0.8.0.e.f.f.3.ip6.arpa'), (IPNetwork('2001:db8:0::/48'), '0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa'), ( IPNetwork('2001:ba8:1f1:400::/56'), '4.0.1.f.1.0.8.a.b.0.1.0.0.2.ip6.arpa' ), ( IPNetwork('2610:8:6800:1::/64'), '1.0.0.0.0.0.8.6.8.0.0.0.0.1.6.2.ip6.arpa', ), ( IPNetwork('2001:ba8:1f1:400::/103'), '0.0.0.0.0.0.0.0.0.0.0.4.0.1.f.1.0.8.a.b.0.1.0.0.2.ip6.arpa', ), ] results = [] for network, _ in expected: domain = factory.make_name('zone') dns_zone_config = DNSReverseZoneConfig(domain, network=network) results.append((network, dns_zone_config.zone_name)) self.assertEqual(expected, results) def test_get_ptr_mapping(self): name = factory.make_string() network = IPNetwork('192.12.0.1/30') hosts = { factory.make_string(): factory.pick_ip_in_network(network), factory.make_string(): factory.pick_ip_in_network(network), } expected = [ (IPAddress(ip).reverse_dns, '%s.%s.' % (hostname, name)) for hostname, ip in hosts.items() ] mapping = { hostname: [ip] for hostname, ip in hosts.items() } self.assertItemsEqual( expected, DNSReverseZoneConfig.get_PTR_mapping(mapping, name, network)) def test_get_ptr_mapping_drops_IPs_not_in_network(self): name = factory.make_string() network = IPNetwork('192.12.0.1/30') in_network_mapping = { factory.make_string(): factory.pick_ip_in_network(network), factory.make_string(): factory.pick_ip_in_network(network), } expected = [ (IPAddress(ip).reverse_dns, '%s.%s.' % (hostname, name)) for hostname, ip in in_network_mapping.items() ] mapping = { hostname: [ip] for hostname, ip in in_network_mapping.items() } extra_mapping = { factory.make_string(): ['192.50.0.2'], factory.make_string(): ['192.70.0.2'], } mapping.update(extra_mapping) self.assertItemsEqual( expected, DNSReverseZoneConfig.get_PTR_mapping(mapping, name, network)) def test_writes_dns_zone_config_with_NS_record(self): target_dir = patch_dns_config_path(self) network = factory.make_ipv4_network() dns_zone_config = DNSReverseZoneConfig( factory.make_string(), serial=random.randint(1, 100), network=network) dns_zone_config.write_config() self.assertThat( os.path.join( target_dir, 'zone.%s' % dns_zone_config.zone_name), FileContains( matcher=Contains('IN NS %s.' % dns_zone_config.domain))) def test_writes_reverse_dns_zone_config(self): target_dir = patch_dns_config_path(self) domain = factory.make_string() network = IPNetwork('192.168.0.1/22') dynamic_network = IPNetwork('192.168.0.1/28') dns_zone_config = DNSReverseZoneConfig( domain, serial=random.randint(1, 100), network=network, dynamic_ranges=[ IPRange(dynamic_network.first, dynamic_network.last)]) dns_zone_config.write_config() reverse_file_name = 'zone.168.192.in-addr.arpa' expected_generate_directives = dns_zone_config.get_GENERATE_directives( dynamic_network, domain) expected = ContainsAll( [ 'IN NS %s' % domain ] + [ '$GENERATE %s %s IN PTR %s' % ( iterator_values, reverse_dns, hostname) for iterator_values, reverse_dns, hostname in expected_generate_directives ]) self.assertThat( os.path.join(target_dir, reverse_file_name), FileContains(matcher=expected)) def test_ignores_generate_directives_for_v6_dynamic_ranges(self): patch_dns_config_path(self) domain = factory.make_string() network = IPNetwork('192.168.0.1/22') dynamic_network = IPNetwork("%s/64" % factory.make_ipv6_address()) dns_zone_config = DNSReverseZoneConfig( domain, serial=random.randint(1, 100), network=network, dynamic_ranges=[ IPRange(dynamic_network.first, dynamic_network.last)]) get_generate_directives = self.patch( dns_zone_config, 'get_GENERATE_directives') dns_zone_config.write_config() self.assertThat(get_generate_directives, MockNotCalled()) def test_reverse_config_file_is_world_readable(self): patch_dns_config_path(self) dns_zone_config = DNSReverseZoneConfig( factory.make_string(), serial=random.randint(1, 100), network=factory.make_ipv4_network()) dns_zone_config.write_config() filepath = FilePath(dns_zone_config.target_path) self.assertTrue(filepath.getPermissions().other.read) class TestDNSReverseZoneConfig_GetGenerateDirectives(MAASTestCase): """Tests for `DNSReverseZoneConfig.get_GENERATE_directives()`.""" def test_excplicitly(self): # The other tests in this TestCase rely on # get_expected_generate_directives(), which is quite dense. Here # we test get_GENERATE_directives() explicitly. ip_range = IPRange('192.168.0.55', '192.168.2.128') expected_directives = [ ("55-255", "$.0.168.192.in-addr.arpa.", "192-168-0-$.domain."), ("0-255", "$.1.168.192.in-addr.arpa.", "192-168-1-$.domain."), ("0-128", "$.2.168.192.in-addr.arpa.", "192-168-2-$.domain."), ] self.assertItemsEqual( expected_directives, DNSReverseZoneConfig.get_GENERATE_directives( ip_range, domain="domain")) def get_expected_generate_directives(self, network, domain): ip_parts = network.network.format().split('.') relevant_ip_parts = ip_parts[:-2] first_address = IPAddress(network.first).format() first_address_parts = first_address.split(".") if network.size < 256: last_address = IPAddress(network.last).format() iterator_low = int(first_address_parts[-1]) iterator_high = last_address.split('.')[-1] else: iterator_low = 0 iterator_high = 255 second_octet_offset = int(first_address_parts[-2]) expected_generate_directives = [] directives_needed = network.size / 256 if directives_needed == 0: directives_needed = 1 for num in range(directives_needed): expected_address_base = "%s-%s" % tuple(relevant_ip_parts) expected_address = "%s-%s-$" % ( expected_address_base, num + second_octet_offset) relevant_ip_parts.reverse() expected_rdns_base = ( "%s.%s.in-addr.arpa." % tuple(relevant_ip_parts)) expected_rdns_template = "$.%s.%s" % ( num + second_octet_offset, expected_rdns_base) expected_generate_directives.append( ( "%s-%s" % (iterator_low, iterator_high), expected_rdns_template, "%s.%s." % (expected_address, domain) )) relevant_ip_parts.reverse() return expected_generate_directives def test_returns_single_entry_for_slash_24_network(self): network = IPNetwork("%s/24" % factory.make_ipv4_address()) domain = factory.make_string() expected_generate_directives = self.get_expected_generate_directives( network, domain) directives = DNSReverseZoneConfig.get_GENERATE_directives( network, domain) self.expectThat(directives, HasLength(1)) self.assertItemsEqual(expected_generate_directives, directives) def test_returns_single_entry_for_tiny_network(self): network = IPNetwork("%s/28" % factory.make_ipv4_address()) domain = factory.make_string() expected_generate_directives = self.get_expected_generate_directives( network, domain) directives = DNSReverseZoneConfig.get_GENERATE_directives( network, domain) self.expectThat(directives, HasLength(1)) self.assertItemsEqual(expected_generate_directives, directives) def test_returns_single_entry_for_weird_small_range(self): ip_range = IPRange('10.0.0.1', '10.0.0.255') domain = factory.make_string() directives = DNSReverseZoneConfig.get_GENERATE_directives( ip_range, domain) self.expectThat(directives, HasLength(1)) def test_dtrt_for_larger_networks(self): # For every other network size that we're not explicitly # testing here, # DNSReverseZoneConfig.get_GENERATE_directives() will return # one GENERATE directive for every 255 addresses in the network. for prefixlen in range(23, 17): network = IPNetwork( "%s/%s" % (factory.make_ipv4_address(), prefixlen)) domain = factory.make_string() directives = DNSReverseZoneConfig.get_GENERATE_directives( network, domain) self.expectThat(directives, HasLength(network.size / 256)) def test_returns_two_entries_for_slash_23_network(self): network = IPNetwork(factory.make_ipv4_network(slash=23)) domain = factory.make_string() expected_generate_directives = self.get_expected_generate_directives( network, domain) directives = DNSReverseZoneConfig.get_GENERATE_directives( network, domain) self.expectThat(directives, HasLength(2)) self.assertItemsEqual(expected_generate_directives, directives) def test_ignores_network_larger_than_slash_16(self): network = IPNetwork("%s/15" % factory.make_ipv4_address()) self.assertEqual( [], DNSReverseZoneConfig.get_GENERATE_directives( network, factory.make_string())) def test_ignores_networks_that_span_slash_16s(self): # If the upper and lower bounds of a range span two /16 networks # (but contain between them no more than 65536 addresses), # get_GENERATE_directives() will return early ip_range = IPRange('10.0.0.55', '10.1.0.54') directives = DNSReverseZoneConfig.get_GENERATE_directives( ip_range, factory.make_string()) self.assertEqual([], directives) def test_sorts_output_by_hostname(self): network = IPNetwork("10.0.0.1/23") domain = factory.make_string() expected_hostname = "10-0-%s-$." + domain + "." expected_rdns = "$.%s.0.10.in-addr.arpa." directives = list(DNSReverseZoneConfig.get_GENERATE_directives( network, domain)) self.expectThat( directives[0], Equals( ("0-255", expected_rdns % "0", expected_hostname % "0"))) self.expectThat( directives[1], Equals( ("0-255", expected_rdns % "1", expected_hostname % "1"))) class TestDNSForwardZoneConfig_GetGenerateDirectives(MAASTestCase): """Tests for `DNSForwardZoneConfig.get_GENERATE_directives()`.""" def test_excplicitly(self): # The other tests in this TestCase rely on # get_expected_generate_directives(), which is quite dense. Here # we test get_GENERATE_directives() explicitly. ip_range = IPRange('192.168.0.55', '192.168.2.128') expected_directives = [ ("55-255", "192-168-0-$", "192.168.0.$"), ("0-255", "192-168-1-$", "192.168.1.$"), ("0-128", "192-168-2-$", "192.168.2.$"), ] self.assertItemsEqual( expected_directives, DNSForwardZoneConfig.get_GENERATE_directives(ip_range)) def get_expected_generate_directives(self, network): ip_parts = network.network.format().split('.') ip_parts[-1] = "$" expected_hostname = "%s" % "-".join(ip_parts) expected_address = ".".join(ip_parts) first_address = IPAddress(network.first).format() first_address_parts = first_address.split(".") last_address = IPAddress(network.last).format() last_address_parts = last_address.split(".") if network.size < 256: iterator_low = int(first_address_parts[-1]) iterator_high = int(last_address_parts[-1]) else: iterator_low = 0 iterator_high = 255 expected_iterator_values = "%s-%s" % (iterator_low, iterator_high) directives_needed = network.size / 256 if directives_needed == 0: directives_needed = 1 expected_directives = [] for num in range(directives_needed): ip_parts[-2] = unicode(num + int(ip_parts[-2])) expected_address = ".".join(ip_parts) expected_hostname = "%s" % "-".join(ip_parts) expected_directives.append( ( expected_iterator_values, expected_hostname, expected_address )) return expected_directives def test_returns_single_entry_for_slash_24_network(self): network = IPNetwork("%s/24" % factory.make_ipv4_address()) expected_directives = self.get_expected_generate_directives(network) directives = DNSForwardZoneConfig.get_GENERATE_directives( network) self.expectThat(directives, HasLength(1)) self.assertItemsEqual(expected_directives, directives) def test_returns_single_entry_for_tiny_network(self): network = IPNetwork("%s/31" % factory.make_ipv4_address()) expected_directives = self.get_expected_generate_directives(network) directives = DNSForwardZoneConfig.get_GENERATE_directives( network) self.assertEqual(1, len(expected_directives)) self.assertItemsEqual(expected_directives, directives) def test_returns_two_entries_for_slash_23_network(self): network = IPNetwork("%s/23" % factory.make_ipv4_address()) expected_directives = self.get_expected_generate_directives(network) directives = DNSForwardZoneConfig.get_GENERATE_directives( network) self.assertEqual(2, len(expected_directives)) self.assertItemsEqual(expected_directives, directives) def test_dtrt_for_larger_networks(self): # For every other network size that we're not explicitly # testing here, # DNSForwardZoneConfig.get_GENERATE_directives() will return # one GENERATE directive for every 255 addresses in the network. for prefixlen in range(23, 16): network = IPNetwork( "%s/%s" % (factory.make_ipv4_address(), prefixlen)) directives = DNSForwardZoneConfig.get_GENERATE_directives( network) self.assertIsEqual(network.size / 256, len(directives)) def test_ignores_network_larger_than_slash_16(self): network = IPNetwork("%s/15" % factory.make_ipv4_address()) self.assertEqual( [], DNSForwardZoneConfig.get_GENERATE_directives(network)) def test_ignores_networks_that_span_slash_16s(self): # If the upper and lower bounds of a range span two /16 networks # (but contain between them no more than 65536 addresses), # get_GENERATE_directives() will return early ip_range = IPRange('10.0.0.55', '10.1.0.54') directives = DNSForwardZoneConfig.get_GENERATE_directives( ip_range) self.assertEqual([], directives) def test_sorts_output(self): network = IPNetwork("10.0.0.0/23") expected_hostname = "10-0-%s-$" expected_address = "10.0.%s.$" directives = list(DNSForwardZoneConfig.get_GENERATE_directives( network)) self.expectThat(len(directives), Equals(2)) self.expectThat( directives[0], Equals( ("0-255", expected_hostname % "0", expected_address % "0"))) self.expectThat( directives[1], Equals( ("0-255", expected_hostname % "1", expected_address % "1"))) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/__init__.py0000644000000000000000000002003113056115004023202 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Hardware Drivers.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "Architecture", "ArchitectureRegistry", "BootResource", ] from abc import ( ABCMeta, abstractmethod, ) from jsonschema import validate from provisioningserver.power.schema import JSON_POWER_TYPE_PARAMETERS from provisioningserver.utils.registry import Registry # JSON schema representing the Django choices format as JSON; an array of # 2-item arrays. CHOICE_FIELD_SCHEMA = { 'type': 'array', 'items': { 'title': "Setting parameter field choice", 'type': 'array', 'minItems': 2, 'maxItems': 2, 'uniqueItems': True, 'items': { 'type': 'string', } }, } # JSON schema for what a settings field should look like. SETTING_PARAMETER_FIELD_SCHEMA = { 'title': "Setting parameter field", 'type': 'object', 'properties': { 'name': { 'type': 'string', }, 'field_type': { 'type': 'string', }, 'label': { 'type': 'string', }, 'required': { 'type': 'boolean', }, 'choices': CHOICE_FIELD_SCHEMA, 'default': { 'type': 'string', }, }, 'required': ['field_type', 'label', 'required'], } # JSON schema for what group of setting parameters should look like. JSON_SETTING_SCHEMA = { 'title': "Setting parameters set", 'type': 'object', 'properties': { 'name': { 'type': 'string', }, 'description': { 'type': 'string', }, 'fields': { 'type': 'array', 'items': SETTING_PARAMETER_FIELD_SCHEMA, }, }, 'required': ['name', 'description', 'fields'], } def make_setting_field( name, label, field_type=None, choices=None, default=None, required=False): """Helper function for building a JSON setting parameters field. :param name: The name of the field. :type name: string :param label: The label to be presented to the user for this field. :type label: string :param field_type: The type of field to create. Can be one of (string, choice, mac_address). Defaults to string. :type field_type: string. :param choices: The collection of choices to present to the user. Needs to be structured as a list of lists, otherwise make_setting_field() will raise a ValidationError. :type list: :param default: The default value for the field. :type default: string :param required: Whether or not a value for the field is required. :type required: boolean """ if field_type not in ('string', 'mac_address', 'choice'): field_type = 'string' if choices is None: choices = [] validate(choices, CHOICE_FIELD_SCHEMA) if default is None: default = "" field = { 'name': name, 'label': label, 'required': required, 'field_type': field_type, 'choices': choices, 'default': default, } return field def validate_settings(setting_fields): """Helper that validates that the fields adhere to the JSON schema.""" validate(setting_fields, JSON_SETTING_SCHEMA) def gen_power_types(): from provisioningserver.drivers.power import power_drivers_by_name for power_type in JSON_POWER_TYPE_PARAMETERS: driver = power_drivers_by_name.get(power_type['name']) if driver is not None: power_type['missing_packages'] = driver.detect_missing_packages() yield power_type class Architecture: def __init__(self, name, description, pxealiases=None, kernel_options=None): """Represents an architecture in the driver context. :param name: The architecture name as used in MAAS. arch/subarch or just arch. :param description: The human-readable description for the architecture. :param pxealiases: The optional list of names used if the hardware uses a different name when requesting its bootloader. :param kernel_options: The optional list of kernel options for this architecture. Anything supplied here supplements the options provided by MAAS core. """ if pxealiases is None: pxealiases = () self.name = name self.description = description self.pxealiases = pxealiases self.kernel_options = kernel_options class BootResource: """Abstraction of ephemerals and pxe resources required for a hardware driver. This resource is responsible for importing and reporting on what is potentially available in relation to a cluster controller. """ __metaclass__ = ABCMeta def __init__(self, name): self.name = name @abstractmethod def import_resources(self, at_location, filter=None): """Import the specified resources. :param at_location: URL to a Simplestreams index or a local path to a directory containing boot resources. :param filter: A simplestreams filter. e.g. "release=trusty label=beta-2 arch=amd64" This is ignored if the location is a local path, all resources at the location will be imported. TBD: How to provide progress information. """ @abstractmethod def describe_resources(self, at_location): """Enumerate all the boot resources. :param at_location: URL to a Simplestreams index or a local path to a directory containing boot resources. :return: a list of dictionaries describing the available resources, which will need to be imported so the driver can use them. [ { "release": "trusty", "arch": "amd64", "label": "beta-2", "size": 12344556, } , ] """ class HardwareDiscoverContext: __metaclass__ = ABCMeta @abstractmethod def startDiscovery(self): """TBD""" @abstractmethod def stopDiscovery(self): """TBD""" class ArchitectureRegistry(Registry): """Registry for architecture classes.""" @classmethod def get_by_pxealias(cls, alias): for _, arch in cls: if alias in arch.pxealiases: return arch return None class BootResourceRegistry(Registry): """Registry for boot resource classes.""" builtin_architectures = [ Architecture(name="i386/generic", description="i386"), Architecture(name="amd64/generic", description="amd64"), Architecture( name="arm64/generic", description="arm64/generic", pxealiases=["arm"]), Architecture( name="arm64/xgene-uboot", description="arm64/xgene-uboot", pxealiases=["arm"]), Architecture( name="arm64/xgene-uboot-mustang", description="arm64/xgene-uboot-mustang", pxealiases=["arm"]), Architecture( name="armhf/highbank", description="armhf/highbank", pxealiases=["arm"], kernel_options=["console=ttyAMA0"]), Architecture( name="armhf/generic", description="armhf/generic", pxealiases=["arm"], kernel_options=["console=ttyAMA0"]), Architecture( name="armhf/keystone", description="armhf/keystone", pxealiases=["arm"]), # PPC64EL needs a rootdelay for PowerNV. The disk controller # in the hardware, takes a little bit longer to come up then # the initrd wants to wait. Set this to 60 seconds, just to # give the booting machine enough time. This doesn't slow down # the booting process, it just increases the timeout. Architecture( name="ppc64el/generic", description="ppc64el", kernel_options=['rootdelay=60']), ] for arch in builtin_architectures: ArchitectureRegistry.register_item(arch.name, arch) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/diskless/0000755000000000000000000000000013056115004022716 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/hardware/0000755000000000000000000000000013056115004022672 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/osystem/0000755000000000000000000000000013056115004022600 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/power/0000755000000000000000000000000013056115004022231 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/service/0000755000000000000000000000000013056115004022535 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/tests/0000755000000000000000000000000013056115004022237 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/diskless/__init__.py0000644000000000000000000000567413056115004025043 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Base diskless driver.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "DisklessDriver", "DisklessDriverError", "DisklessDriverRegistry", ] from abc import ( ABCMeta, abstractmethod, abstractproperty, ) from jsonschema import validate from provisioningserver.drivers import ( JSON_SETTING_SCHEMA, validate_settings, ) from provisioningserver.utils.registry import Registry JSON_DISKLESS_DRIVERS_SCHEMA = { 'title': "Diskless drivers parameters set", 'type': 'array', 'items': JSON_SETTING_SCHEMA, } class DisklessDriverError: """Error when driver fails to complete the needed task.""" class DisklessDriver: """Skeleton for a diskless driver.""" __metaclass__ = ABCMeta def __init__(self): super(DisklessDriver, self).__init__() validate_settings(self.get_schema()) @abstractproperty def name(self): """Name of the diskless driver.""" @abstractproperty def description(self): """Description of the diskless driver.""" @abstractproperty def settings(self): """List of settings for the driver. Each setting in this list can be changed by the user. They are passed to the `create_disk` and `delete_disk` using the kwargs. It is up to the driver to read these options before performing the operation. """ @abstractmethod def create_disk(self, system_id, source_path, **kwargs): """Creates the disk for the `system_id` using the `source_path` as the data to place on the disk initially. :param system_id: `Node.system_id` :param source_path: Path to the source data :param kwargs: Settings user set from `get_settings`. :return: Path to the newly created disk. """ @abstractmethod def delete_disk(self, system_id, disk_path, **kwargs): """Deletes the disk for the `system_id`. :param system_id: `Node.system_id` :param disk_path: Path returned by `create_disk`. :param kwargs: Settings user set from `get_settings`. """ def get_schema(self): """Returns the JSON schema for the driver.""" return dict( name=self.name, description=self.description, fields=self.settings) class DisklessDriverRegistry(Registry): """Registry for diskless drivers.""" @classmethod def get_schema(cls): """Returns the full schema for the registry.""" schemas = [drivers.get_schema() for _, drivers in cls] validate(schemas, JSON_DISKLESS_DRIVERS_SCHEMA) return schemas builtin_diskless_drivers = [ ] for driver in builtin_diskless_drivers: DisklessDriverRegistry.register_item(driver.name, driver) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/diskless/tests/0000755000000000000000000000000013056115004024060 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/diskless/tests/__init__.py0000644000000000000000000000000013056115004026157 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/diskless/tests/test_base.py0000644000000000000000000001273613056115004026414 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.drivers.diskless`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maastesting.factory import factory from maastesting.testcase import MAASTestCase from mock import sentinel from provisioningserver.drivers import ( make_setting_field, validate_settings, ) from provisioningserver.drivers.diskless import ( DisklessDriver, DisklessDriverRegistry, ) from provisioningserver.utils.testing import RegistryFixture class FakeDisklessDriver(DisklessDriver): name = "" description = "" settings = [] def __init__(self, name, description, settings): self.name = name self.description = description self.settings = settings super(FakeDisklessDriver, self).__init__() def create_disk(self, system_id, source_path, **kwargs): raise NotImplementedError() def delete_disk(self, system_id, disk_path, **kwargs): raise NotImplementedError() def make_diskless_driver(name=None, description=None, settings=None): if name is None: name = factory.make_name('diskless') if description is None: description = factory.make_name('description') if settings is None: settings = [] return FakeDisklessDriver(name, description, settings) class TestFakeDisklessDriver(MAASTestCase): def test_attributes(self): fake_name = factory.make_name('name') fake_description = factory.make_name('description') fake_setting = factory.make_name('setting') fake_settings = [ make_setting_field( fake_setting, fake_setting.title()), ] attributes = { 'name': fake_name, 'description': fake_description, 'settings': fake_settings, } fake_driver = FakeDisklessDriver( fake_name, fake_description, fake_settings) self.assertAttributes(fake_driver, attributes) def test_make_diskless_driver(self): fake_name = factory.make_name('name') fake_description = factory.make_name('description') fake_setting = factory.make_name('setting') fake_settings = [ make_setting_field( fake_setting, fake_setting.title()), ] attributes = { 'name': fake_name, 'description': fake_description, 'settings': fake_settings, } fake_driver = make_diskless_driver( name=fake_name, description=fake_description, settings=fake_settings) self.assertAttributes(fake_driver, attributes) def test_make_diskless_driver_makes_name_and_description(self): fake_driver = make_diskless_driver() self.assertNotEqual("", fake_driver.name) self.assertNotEqual("", fake_driver.description) def test_create_disk_raises_not_implemented(self): fake_driver = make_diskless_driver() self.assertRaises( NotImplementedError, fake_driver.create_disk, sentinel.system_id, sentinel.source_path) def test_delete_disk_raises_not_implemented(self): fake_driver = make_diskless_driver() self.assertRaises( NotImplementedError, fake_driver.delete_disk, sentinel.system_id, sentinel.disk_path) class TestDisklessDriver(MAASTestCase): def test_get_schema(self): fake_name = factory.make_name('name') fake_description = factory.make_name('description') fake_setting = factory.make_name('setting') fake_settings = [ make_setting_field( fake_setting, fake_setting.title()), ] fake_driver = make_diskless_driver() self.assertItemsEqual({ 'name': fake_name, 'description': fake_description, 'fields': fake_settings, }, fake_driver.get_schema()) def test_get_schema_returns_valid_schema(self): fake_driver = make_diskless_driver() #: doesn't raise ValidationError validate_settings(fake_driver.get_schema()) class TestDisklessDriverRegistry(MAASTestCase): def setUp(self): super(TestDisklessDriverRegistry, self).setUp() # Ensure the global registry is empty for each test run. self.useFixture(RegistryFixture()) def test_registry(self): self.assertItemsEqual([], DisklessDriverRegistry) DisklessDriverRegistry.register_item("driver", sentinel.driver) self.assertIn( sentinel.driver, (item for name, item in DisklessDriverRegistry)) def test_get_schema(self): fake_driver_one = make_diskless_driver() fake_driver_two = make_diskless_driver() DisklessDriverRegistry.register_item( fake_driver_one.name, fake_driver_one) DisklessDriverRegistry.register_item( fake_driver_two.name, fake_driver_two) self.assertItemsEqual([ { 'name': fake_driver_one.name, 'description': fake_driver_one.description, 'fields': [], }, { 'name': fake_driver_two.name, 'description': fake_driver_two.description, 'fields': [], }], DisklessDriverRegistry.get_schema()) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/hardware/__init__.py0000644000000000000000000000000013056115004024771 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/hardware/apc.py0000644000000000000000000000541713056115004024016 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Support for managing American Power Conversion (APC) PDU outlets via SNMP. APC Network Management Card AOS and Rack PDU APP firmware versions supported: v3.7.3 v3.7.4 """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'power_control_apc', 'power_state_apc', ] from subprocess import ( PIPE, Popen, ) from time import sleep from provisioningserver.utils.shell import ExternalProcessError COMMON_ARGS = '-c private -v1 %s .1.3.6.1.4.1.318.1.1.12.3.3.1.1.4.%s' class APCState(object): ON = '1' OFF = '2' class APCException(Exception): """Failure communicating to the APC PDU. """ class APCSNMP: def run_process(self, command): proc = Popen(command.split(), stdout=PIPE, stderr=PIPE) stdout, stderr = proc.communicate() if proc.returncode != 0: raise ExternalProcessError( proc.returncode, command.split(), stderr) return stdout.split(": ")[1].rstrip('\n') def power_off_outlet(self, ip, outlet): """Power off outlet.""" command = 'snmpset ' + COMMON_ARGS % (ip, outlet) + ' i 2' return self.run_process(command) def power_on_outlet(self, ip, outlet, power_on_delay): """Power on outlet. This forces the outlet OFF first, then sleeps for `timeout` seconds, before turning it ON. """ command = 'snmpset ' + COMMON_ARGS % (ip, outlet) + ' i 1' self.power_off_outlet(ip, outlet) sleep(power_on_delay) return self.run_process(command) def get_power_state_of_outlet(self, ip, outlet): """Get power state of outlet (ON/OFF).""" command = 'snmpget ' + COMMON_ARGS % (ip, outlet) return self.run_process(command) def power_control_apc(ip, outlet, power_change, power_on_delay): """Handle calls from the power template for outlets with a power type of 'apc'. """ apc = APCSNMP() if power_change == 'off': apc.power_off_outlet(ip, outlet) elif power_change == 'on': apc.power_on_outlet(ip, outlet, float(power_on_delay)) else: raise AssertionError( "Unrecognised power change: %r" % (power_change,)) def power_state_apc(ip, outlet): """Return the power state for the APC PDU outlet.""" apc = APCSNMP() power_state = apc.get_power_state_of_outlet(ip, outlet) if power_state == APCState.OFF: return 'off' elif power_state == APCState.ON: return 'on' raise APCException('Unknown power state: %r' % power_state) def required_package(): return ['snmpset', 'snmp'] maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/hardware/hmc.py0000644000000000000000000000652613056115004024024 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Support for managing lpars via the IBM Hardware Management Console (HMC). This module provides support for interacting with IBM's HMC via SSH. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'power_control_hmc', 'power_state_hmc', ] from paramiko import ( AutoAddPolicy, SSHClient, SSHException, ) class HMCState: OFF = ('Shutting Down', 'Not Activated') ON = ('Starting', 'Running', 'Open Firmware') class HMCException(Exception): """Failure communicating to HMC.""" class HMC: """An API for interacting with the HMC via SSH.""" def __init__(self, ip, username, password): self.ip = ip self.username = username self.password = password self._ssh = SSHClient() self._ssh.set_missing_host_key_policy(AutoAddPolicy()) def _run_cli_command(self, command): """Run a single command and return unparsed text from stdout.""" self._ssh.connect( self.ip, username=self.username, password=self.password) try: _, stdout, _ = self._ssh.exec_command(command) output = stdout.read() finally: self._ssh.close() return output def get_lpar_power_state(self, server_name, lpar): """Get power state of lpar.""" power_state = self._run_cli_command( "lssyscfg -m %s -r lpar -F name:state" % server_name) return power_state.split('%s:' % lpar)[1].split('\n')[0] def power_lpar_on(self, server_name, lpar): """Power lpar on. Set bootstring flag to boot via network by default. This will set the default boot order to try and boot from the first five network interfaces it enumerates over. """ return self._run_cli_command( "chsysstate -r lpar -m %s -o on -n %s --bootstring network-all" % (server_name, lpar)) def power_lpar_off(self, server_name, lpar): """Power lpar off.""" return self._run_cli_command( "chsysstate -r lpar -m %s -o shutdown -n %s --immed" % (server_name, lpar)) def power_control_hmc(ip, username, password, server_name, lpar, power_change): """Handle calls from the power template for nodes with a power type of 'hmc'. """ hmc = HMC(ip, username, password) if power_change == 'off': hmc.power_lpar_off(server_name, lpar) elif power_change == 'on': if hmc.get_lpar_power_state(server_name, lpar) in HMCState.ON: hmc.power_lpar_off(server_name, lpar) hmc.power_lpar_on(server_name, lpar) else: raise HMCException("Unexpected maas power mode.") def power_state_hmc(ip, username, password, server_name, lpar): """Return the power state for the hmc machine.""" hmc = HMC(ip, username, password) try: power_state = hmc.get_lpar_power_state(server_name, lpar) except SSHException as e: raise HMCException("Failed to retrieve power state: %s" % e) if power_state in HMCState.OFF: return 'off' elif power_state in HMCState.ON: return 'on' raise HMCException('Unknown power state: %s' % power_state) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/hardware/mscm.py0000644000000000000000000001715613056115004024215 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Support for managing nodes via the Moonshot HP iLO Chassis Manager CLI. This module provides support for interacting with HP Moonshot iLO Chassis Management (MSCM) CLI via SSH, and for using that support to allow MAAS to manage systems via iLO. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'power_control_mscm', 'power_state_mscm', 'probe_and_enlist_mscm', ] import re from socket import error as SOCKETError from paramiko import ( AutoAddPolicy, SSHClient, SSHException, ) from provisioningserver.utils import ( commission_node, create_node, ) from provisioningserver.utils.twisted import synchronous cartridge_mapping = { 'ProLiant Moonshot Cartridge': 'amd64/generic', 'ProLiant m300 Server Cartridge': 'amd64/generic', 'ProLiant m350 Server Cartridge': 'amd64/generic', 'ProLiant m400 Server Cartridge': 'arm64/xgene-uboot', 'ProLiant m500 Server Cartridge': 'amd64/generic', 'ProLiant m700 Server Cartridge': 'amd64/generic', 'ProLiant m710 Server Cartridge': 'amd64/generic', 'ProLiant m800 Server Cartridge': 'armhf/keystone', 'Default': 'amd64/generic', } class MSCMState: OFF = "Off" ON = "On" class MSCMError(Exception): """Failure communicating to MSCM. """ class MSCM: """An API for interacting with the Moonshot iLO CM CLI via SSH.""" def __init__(self, host, username, password): self.host = host self.username = username self.password = password self._ssh = SSHClient() self._ssh.set_missing_host_key_policy(AutoAddPolicy()) def _run_cli_command(self, command): """Run a single command and return unparsed text from stdout.""" try: self._ssh.connect( self.host, username=self.username, password=self.password) _, stdout, _ = self._ssh.exec_command(command) output = stdout.read() except (SSHException, EOFError, SOCKETError) as e: raise MSCMError( "Could not make SSH connection to MSCM for " "%s on %s - %s" % (self.username, self.host, e)) finally: self._ssh.close() return output def discover_nodes(self): """Discover all available nodes. Example of stdout from running "show node list": 'show node list\r\r\nSlot ID Proc Manufacturer Architecture Memory Power Health\r\n---- ----- ---------------------- -------------------- ------ ----- ------\r\n 01 c1n1 Intel Corporation x86 Architecture 32 GB On OK \r\n 02 c2n1 N/A No Asset Information \r\n\r\n' The regex 'c\d+n\d' is finding the node_id's c1-45n1-8 """ node_list = self._run_cli_command("show node list") return re.findall(r'c\d+n\d', node_list) def get_node_macaddr(self, node_id): """Get node MAC address(es). Example of stdout from running "show node macaddr ": 'show node macaddr c1n1\r\r\nSlot ID NIC 1 (Switch A) NIC 2 (Switch B) NIC 3 (Switch A) NIC 4 (Switch B)\r\n ---- ----- ----------------- ----------------- ----------------- -----------------\r\n 1 c1n1 a0:1d:48:b5:04:34 a0:1d:48:b5:04:35 a0:1d:48:b5:04:36 a0:1d:48:b5:04:37\r\n\r\n\r\n' The regex '[\:]'.join(['[0-9A-F]{1,2}'] * 6) is finding the MAC Addresses for the given node_id. """ macs = self._run_cli_command("show node macaddr %s" % node_id) return re.findall(r':'.join(['[0-9a-f]{2}'] * 6), macs) def get_node_arch(self, node_id): """Get node architecture. Example of stdout from running "show node info ": 'show node info c1n1\r\r\n\r\nCartridge #1 \r\n Type: Compute\r\n Manufacturer: HP\r\n Product Name: ProLiant m500 Server Cartridge\r\n' Parsing this retrieves 'ProLiant m500 Server Cartridge' """ node_detail = self._run_cli_command("show node info %s" % node_id) cartridge = node_detail.split('Product Name: ')[1].splitlines()[0] if cartridge in cartridge_mapping: return cartridge_mapping[cartridge] return cartridge_mapping['Default'] def get_node_power_state(self, node_id): """Get power state of node (on/off). Example of stdout from running "show node power ": 'show node power c1n1\r\r\n\r\nCartridge #1\r\n Node #1\r\n Power State: On\r\n' Parsing this retrieves 'On' """ power_state = self._run_cli_command("show node power %s" % node_id) return power_state.split('Power State: ')[1].splitlines()[0] def power_node_on(self, node_id): """Power node on.""" return self._run_cli_command("set node power on %s" % node_id) def power_node_off(self, node_id): """Power node off.""" return self._run_cli_command("set node power off force %s" % node_id) def configure_node_boot_m2(self, node_id): """Configure HDD boot for node.""" return self._run_cli_command("set node boot M.2 %s" % node_id) def configure_node_bootonce_pxe(self, node_id): """Configure PXE boot for node once.""" return self._run_cli_command("set node bootonce pxe %s" % node_id) def power_control_mscm(host, username, password, node_id, power_change): """Handle calls from the power template for nodes with a power type of 'mscm'. """ mscm = MSCM(host, username, password) if power_change == 'off': mscm.power_node_off(node_id) elif power_change == 'on': if mscm.get_node_power_state(node_id) == MSCMState.ON: mscm.power_node_off(node_id) mscm.configure_node_bootonce_pxe(node_id) mscm.power_node_on(node_id) else: raise MSCMError("Unexpected maas power mode.") def power_state_mscm(host, username, password, node_id): """Return the power state for the mscm machine.""" mscm = MSCM(host, username, password) try: power_state = mscm.get_node_power_state(node_id) except SSHException as e: raise MSCMError( "Failed to retrieve power state: %s" % e) if power_state == MSCMState.OFF: return 'off' elif power_state == MSCMState.ON: return 'on' raise MSCMError('Unknown power state: %s' % power_state) @synchronous def probe_and_enlist_mscm(user, host, username, password, accept_all=False): """ Extracts all of nodes from mscm, sets all of them to boot via M.2 by, default, sets them to bootonce via PXE, and then enlists them into MAAS. """ mscm = MSCM(host, username, password) try: # if discover_nodes works, we have access to the system nodes = mscm.discover_nodes() except SSHException as e: raise MSCMError( "Failed to probe nodes for mscm with host=%s, " "username=%s, password=%s: %s" % (host, username, password, e)) for node_id in nodes: # Set default boot to M.2 mscm.configure_node_boot_m2(node_id) params = { 'power_address': host, 'power_user': username, 'power_pass': password, 'node_id': node_id, } arch = mscm.get_node_arch(node_id) macs = mscm.get_node_macaddr(node_id) system_id = create_node(macs, arch, 'mscm', params).wait(30) if accept_all: commission_node(system_id, user).wait(30) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/hardware/msftocs.py0000644000000000000000000002050313056115004024722 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'power_control_msftocs', 'power_state_msftocs', 'probe_and_enlist_msftocs', ] import urllib2 import urlparse from lxml.etree import fromstring from provisioningserver.utils import ( commission_node, create_node, ) from provisioningserver.utils.twisted import synchronous class MicrosoftOCSState(object): ON = "ON" OFF = "OFF" class MicrosoftOCSError(Exception): """Failure talking to a MicrosoftOCS chassis controller. """ class MicrosoftOCSAPI(object): """API to communicate with the Microsoft OCS Chassis Manager.""" def __init__(self, ip, port, username, password): """ :param ip: The IP address of the MicrosoftOCS chassis, e.g.: "192.168.0.1" :type ip: string :param port: The http port to connect to the MicrosoftOCS chassis, e.g.: "8000" :type port: string :param username: The username for authentication to the MicrosoftOCS chassis, e.g.: "admin" :type username: string :param password: The password for authentication to the MicrosoftOCS chassis, e.g.: "password" :type password: string """ self.ip = ip self.port = port self.username = username self.password = password def build_url(self, command, params=[]): url = 'http://%s:%d/' % (self.ip, self.port) params = filter(None, params) return urlparse.urljoin(url, command) + '?' + '&'.join(params) def extract_from_response(self, response, element_tag): """Extract text from first element with element_tag in response.""" root = fromstring(response) return root.findtext( './/ns:%s' % element_tag, namespaces={'ns': root.nsmap[None]}) def get(self, command, params=None): """Dispatch a GET request to a Microsoft OCS chassis.""" url = self.build_url(command, params) authinfo = urllib2.HTTPPasswordMgrWithDefaultRealm() authinfo.add_password(None, url, self.username, self.password) proxy_handler = urllib2.ProxyHandler({}) auth_handler = urllib2.HTTPBasicAuthHandler(authinfo) opener = urllib2.build_opener(proxy_handler, auth_handler) urllib2.install_opener(opener) response = urllib2.urlopen(url) return response.read() def get_blade_power_state(self, bladeid): """Gets the ON/OFF State of Blade.""" params = ["bladeid=%s" % bladeid] return self.extract_from_response( self.get('GetBladeState', params), 'bladeState') def _set_power(self, bladeid, element_tag): """Set AC Outlet Power for Blade.""" params = ["bladeid=%s" % bladeid] return self.extract_from_response( self.get(element_tag, params), 'completionCode') def set_power_off_blade(self, bladeid): """Turns AC Outlet Power OFF for Blade.""" return self._set_power(bladeid, 'SetBladeOff') def set_power_on_blade(self, bladeid): """Turns AC Outlet Power ON for Blade.""" return self._set_power(bladeid, 'SetBladeOn') def set_next_boot_device(self, bladeid, pxe=False, uefi=False, persistent=False): """Set Next Boot Device.""" boot_pxe = '2' if pxe else '3' boot_uefi = 'true' if uefi else 'false' boot_persistent = 'true' if persistent else 'false' params = [ "bladeid=%s" % bladeid, "bootType=%s" % boot_pxe, "uefi=%s" % boot_uefi, "persistent=%s" % boot_persistent ] return self.extract_from_response( self.get('SetNextBoot', params), 'nextBoot') def get_blades(self): """Gets available Blades. Returns dictionary of blade numbers and their corresponding MAC Addresses. """ blades = {} root = fromstring(self.get('GetChassisInfo')) namespace = {'ns': root.nsmap[None]} blade_collections = root.find( './/ns:bladeCollections', namespaces=namespace) # Iterate over all BladeInfo Elements for blade_info in blade_collections: blade_mac_address = blade_info.find( './/ns:bladeMacAddress', namespaces=namespace) macs = [] # Iterate over all NicInfo Elements and add MAC Addresses for nic_info in blade_mac_address: macs.append( nic_info.findtext( './/ns:macAddress', namespaces=namespace)) macs = filter(None, macs) if macs: # Retrive Blade id number bladeid = blade_info.findtext( './/ns:bladeNumber', namespaces=namespace) # Add MAC Addresses for Blade blades[bladeid] = macs return blades def power_state_msftocs(ip, port, username, password, blade_id): """Return the power state for the given Blade.""" port = int(port) or 8000 # Default Port for MicrosoftOCS Chassis is 8000 api = MicrosoftOCSAPI(ip, port, username, password) try: power_state = api.get_blade_power_state(blade_id) except urllib2.HTTPError as e: raise MicrosoftOCSError( "Failed to retrieve power state. HTTP error code: %s" % e.code) except urllib2.URLError as e: raise MicrosoftOCSError( "Failed to retrieve power state. Server could not be reached: %s" % e.reason) if power_state == MicrosoftOCSState.OFF: return 'off' elif power_state == MicrosoftOCSState.ON: return 'on' raise MicrosoftOCSError('Unknown power state: %s' % power_state) def power_control_msftocs( ip, port, username, password, blade_id, power_change): """Control the power state for the given Blade.""" port = int(port) or 8000 # Default Port for MicrosoftOCS Chassis is 8000 api = MicrosoftOCSAPI(ip, port, username, password) if power_change == 'on': power_state = api.get_blade_power_state(blade_id) if power_state == MicrosoftOCSState.ON: api.set_power_off_blade(blade_id) # Set default (persistent) boot to HDD api.set_next_boot_device(blade_id, persistent=True) # Set next boot to PXE api.set_next_boot_device(blade_id, pxe=True) api.set_power_on_blade(blade_id) elif power_change == 'off': api.set_power_off_blade(blade_id) else: raise MicrosoftOCSError( "Unexpected MAAS power mode: %s" % power_change) @synchronous def probe_and_enlist_msftocs( user, ip, port, username, password, accept_all=False): """ Extracts all of nodes from msftocs, sets all of them to boot via HDD by, default, sets them to bootonce via PXE, and then enlists them into MAAS. """ port = int(port) or 8000 # Default Port for MicrosoftOCS Chassis is 8000 api = MicrosoftOCSAPI(ip, port, username, password) try: # if get_blades works, we have access to the system blades = api.get_blades() except urllib2.HTTPError as e: raise MicrosoftOCSError( "Failed to probe nodes for Microsoft OCS with ip=%s " "port=%d, username=%s, password=%s. HTTP error code: %s" % (ip, port, username, password, e.code)) except urllib2.URLError as e: raise MicrosoftOCSError( "Failed to probe nodes for Microsoft OCS with ip=%s " "port=%d, username=%s, password=%s. " "Server could not be reached: %s" % (ip, port, username, password, e.reason)) for blade_id, macs in blades.iteritems(): # Set default (persistent) boot to HDD api.set_next_boot_device(blade_id, persistent=True) # Set next boot to PXE api.set_next_boot_device(blade_id, pxe=True) params = { 'power_address': ip, 'power_port': port, 'power_user': username, 'power_pass': password, 'blade_id': blade_id, } system_id = create_node(macs, 'amd64', 'msftocs', params).wait(30) if accept_all: commission_node(system_id, user).wait(30) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/hardware/seamicro.py0000644000000000000000000002666213056115004025062 0ustar 00000000000000# Copyright 2013-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'power_control_seamicro15k_v09', 'power_control_seamicro15k_v2', 'probe_seamicro15k_and_enlist', ] import httplib import json import time import urllib2 import urlparse from provisioningserver.logger import get_maas_logger from provisioningserver.utils import ( commission_node, create_node, ) from provisioningserver.utils.twisted import synchronous from provisioningserver.utils.url import compose_URL from seamicroclient import exceptions as seamicro_exceptions from seamicroclient.v2 import client as seamicro_client maaslog = get_maas_logger("drivers.seamicro") class POWER_STATUS: ON = 'Power-On' OFF = 'Power-Off' RESET = 'Reset' class SeaMicroError(Exception): """Failure talking to a SeaMicro chassis controller. """ pass class SeaMicroAPIV09Error(SeaMicroError): """Failure talking to a SeaMicro API v0.9. """ def __init__(self, msg, response_code=None): super(SeaMicroAPIV09Error, self).__init__(msg) self.response_code = response_code class SeaMicroAPIV09: allowed_codes = [httplib.OK, httplib.ACCEPTED, httplib.NOT_MODIFIED] def __init__(self, url): """ :param url: The URL of the seamicro chassis, e.g.: http://seamciro/v0.9 :type url: string """ self.url = url self.token = None def build_url(self, location, params=None): """Builds an order-dependent url, as the SeaMicro chassis requires order-dependent parameters. """ if params is None: params = [] params = filter(None, params) return urlparse.urljoin(self.url, location) + '?' + '&'.join(params) def parse_response(self, url, response): """Parses the HTTP response, checking for errors from the SeaMicro chassis. """ if response.getcode() not in self.allowed_codes: raise SeaMicroAPIV09Error( "got response code %s" % response.getcode(), response_code=response.getcode()) text = response.read() # Decode the response, it should be json. If not # handle that case and set json_data to None, so # a SeaMicroAPIV09Error can be raised. try: json_data = json.loads(text) except ValueError: json_data = None if not json_data: raise SeaMicroAPIV09Error( 'No JSON data found from %s: got %s' % (url, text)) json_rpc_code = int(json_data['error']['code']) if json_rpc_code not in self.allowed_codes: raise SeaMicroAPIV09Error( 'Got JSON RPC error code %d: %s for %s' % ( json_rpc_code, httplib.responses.get(json_rpc_code, 'Unknown!'), url), response_code=json_rpc_code) return json_data def get(self, location, params=None): """Dispatch a GET request to a SeaMicro chassis. The seamicro box has order-dependent HTTP parameters, so we build our own get URL, and use a list vs. a dict for data, as the order is implicit. """ url = self.build_url(location, params) response = urllib2.urlopen(url) json_data = self.parse_response(url, response) return json_data['result'] def put(self, location, params=None): """Dispatch a PUT request to a SeaMicro chassis. The seamicro box has order-dependent HTTP parameters, so we build our own get URL, and use a list vs. a dict for data, as the order is implicit. """ opener = urllib2.build_opener(urllib2.HTTPHandler) url = self.build_url(location, params) request = urllib2.Request(url) request.get_method = lambda: 'PUT' request.add_header('content-type', 'text/json') response = opener.open(request) json_data = self.parse_response(url, response) return json_data['result'] def is_logged_in(self): return self.token is not None def login(self, username, password): if not self.is_logged_in(): self.token = self.get("login", [username, password]) def logout(self): if self.is_logged_in(): self.get("logout") self.token = None def servers_all(self): return self.get("servers/all", [self.token]) def servers(self): return self.get("servers", [self.token]) def server_index(self, server_id): """API v0.9 uses arbitrary indexing, this function converts a server id to an index that can be used for detailed outputs & commands. """ servers = self.servers()['serverId'] for idx, name in servers.items(): if name == server_id: return idx return None def power_server(self, server_id, new_status, do_pxe=False, force=False): idx = self.server_index(server_id) if idx is None: raise SeaMicroAPIV09Error( 'Failed to retrieve server index, ' 'invalid server_id: %s' % server_id) location = 'servers/%s' % idx params = ['action=%s' % new_status] if new_status in [POWER_STATUS.ON, POWER_STATUS.RESET]: if do_pxe: params.append("using-pxe=true") else: params.append("using-pxe=false") elif new_status in [POWER_STATUS.OFF]: if force: params.append("force=true") else: params.append("force=false") else: raise SeaMicroAPIV09Error('Invalid power action: %s' % new_status) params.append(self.token) self.put(location, params=params) return True def power_on(self, server_id, do_pxe=False): return self.power_server(server_id, POWER_STATUS.ON, do_pxe=do_pxe) def power_off(self, server_id, force=False): return self.power_server(server_id, POWER_STATUS.OFF, force=force) def reset(self, server_id, do_pxe=False): return self.power_server(server_id, POWER_STATUS.RESET, do_pxe=do_pxe) def get_seamicro15k_api(version, ip, username, password): """Gets the api client depending on the version. Supports v0.9 and v2.0. :return: api for version, None if version not supported """ if version == 'v0.9': api = SeaMicroAPIV09(compose_URL('http:///v0.9/', ip)) try: api.login(username, password) except urllib2.URLError: # Cannot reach using v0.9, might not be supported return None return api elif version == 'v2.0': url = compose_URL('http:///v2.0', ip) try: api = seamicro_client.Client( auth_url=url, username=username, password=password) except seamicro_exceptions.ConnectionRefused: # Cannot reach using v2.0, might no be supported return None return api def get_seamicro15k_servers(version, ip, username, password): """Gets a list of tuples containing (server_id, mac_address) from the sm15k api version. Supports v0.9 and v2.0. :return: list of (server_id, mac_address), None if version not supported """ api = get_seamicro15k_api(version, ip, username, password) if api: if version == 'v0.9': return ( (server['serverId'].split('/')[0], server['serverMacAddr']) for server in api.servers_all().values() # There are 8 network cards attached to these boxes, we only # use NIC 0 for PXE booting. if server['serverNIC'] == '0' ) elif version == 'v2.0': servers = [] for server in api.servers.list(): id = server.id.split('/')[0] macs = [nic['macAddr'] for nic in server.nic.values()] servers.append((id, macs)) return servers return None def select_seamicro15k_api_version(power_control): """Returns the lastest api version to use.""" if power_control == 'ipmi': return ['v2.0', 'v0.9'] if power_control == 'restapi': return ['v0.9'] if power_control == 'restapi2': return ['v2.0'] raise SeaMicroError( 'Unsupported power control method: %s.' % power_control) def find_seamicro15k_servers(ip, username, password, power_control): """Returns the list of servers, using the latest supported api version.""" api_versions = select_seamicro15k_api_version(power_control) for version in api_versions: servers = get_seamicro15k_servers(version, ip, username, password) if servers is not None: return servers raise SeaMicroError('Failure to retrieve servers.') @synchronous def probe_seamicro15k_and_enlist(user, ip, username, password, power_control=None, accept_all=False): power_control = power_control or 'ipmi' maaslog.info("Probing for seamicro15k servers as %s@%s", username, ip) servers = find_seamicro15k_servers(ip, username, password, power_control) for system_id, macs in servers: params = { 'power_address': ip, 'power_user': username, 'power_pass': password, 'power_control': power_control, 'system_id': system_id } maaslog.info("Creating seamicro15k node with MACs: %s", macs) system_id = create_node(macs, 'amd64', 'sm15k', params).wait(30) if accept_all: commission_node(system_id, user).wait(30) def power_control_seamicro15k_v09(ip, username, password, server_id, power_change, retry_count=5, retry_wait=1): server_id = '%s/0' % server_id api = SeaMicroAPIV09(compose_URL('http:///v0.9/', ip)) while retry_count > 0: api.login(username, password) try: if power_change == "on": api.power_on(server_id, do_pxe=True) elif power_change == "off": api.power_off(server_id, force=True) except SeaMicroAPIV09Error as e: # Chance that multiple login's are at once, the api # only supports one at a time. So lets try again after # a second, up to max retry count. if e.response_code == 401: retry_count -= 1 time.sleep(retry_wait) continue else: raise break def power_control_seamicro15k_v2(ip, username, password, server_id, power_change): server_id = '%s/0' % server_id api = get_seamicro15k_api('v2.0', ip, username, password) if api is None: raise SeaMicroError('Unable to contact BMC controller.') server = api.servers.get(server_id) if power_change == "on": server.power_on(using_pxe=True) elif power_change == "off": server.power_off(force=True) def power_query_seamicro15k_v2(ip, username, password, server_id): server_id = '%s/0' % server_id api = get_seamicro15k_api('v2.0', ip, username, password) if api is None: raise SeaMicroError('Unable to contact BMC controller.') server = api.servers.get(server_id) if server.active: return "on" return "off" maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/hardware/tests/0000755000000000000000000000000013056115004024034 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/hardware/ucsm.py0000644000000000000000000004112613056115004024217 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Support for managing nodes via Cisco UCS Manager's HTTP-XML API. It's useful to have a cursory understanding of how UCS Manager XML API works. Cisco has a proprietary document that describes all of this in more detail, and I would suggest you get a copy of that if you want more information than is provided here. The Cisco DevNet website for UCS Manager has a link to the document, which is behind a login wall, and links to example UCS queries: https://developer.cisco.com/web/unifiedcomputing/home UCS Manager is a tool for managing servers. It provides an XML API for external applications to use to interact with UCS Manager to manage servers. The API is available via HTTP, and requests and responses are made of XML strings. MAAS's code for interacting with a UCS Manager is concerned with building these requests, sending them to UCS Manager, and processing the responses. UCS Manager stores information in a hierarchical structure known as the management information tree. This structure is exposed via the XML API, where we can manipulate objects in the tree by finding them, reading them, and writing them. Some definitions for terms that are used in this code: Boot Policy - Controls the boot order for a server. Each service profile is associated with a boot policy. Distinguished Name (DN) - Each object in UCS has a unique DN, which describes its position in the tree. This is like a fully qualified path, and provides a way for objects to reference other objects at other places in the tree, or for API users to look up specific objects in the tree. Class - Classes define the properties and states of objects. An object's class is given in its tag name. Managed Object (MO) - An object in the management information tree. Objects are recursive, and may have children of multiple types. With the exception of the root object, all objects have parents. In the XML API, objects are represented as XML elements. Method - Actions performed by the API on managed objects. These can change state, or read the current state, or both. Server - A physical server managed by UCS Manager. Servers must be associated with service profiles in order to be used. Service Profile - A set of configuration options for a server. Service profiles define the server's personality, and can be migrated from server to server. Service profiles describe boot policy, MAC addresses, network connectivity, IPMI configuration, and more. MAAS requires servers to be associated with service profiles. UUID - The UUID for a server. MAAS persists the UUID of each UCS managed server it enlists, and uses it as a key for looking the server up later. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) import contextlib import urllib2 import urlparse from lxml.etree import ( Element, tostring, XML, ) from provisioningserver.utils import ( commission_node, create_node, ) from provisioningserver.utils.twisted import synchronous str = None __metaclass__ = type __all__ = [ 'power_control_ucsm', 'power_state_ucsm', 'probe_and_enlist_ucsm', ] class UCSMState: DOWN = "down" UP = "up" class UCSM_XML_API_Error(Exception): """Failure talking to a Cisco UCS Manager.""" def __init__(self, msg, code): super(UCSM_XML_API_Error, self).__init__(msg) self.code = code def make_request_data(name, fields=None, children=None): """Build a request string for an API method.""" root = Element(name, fields) if children is not None: root.extend(children) return tostring(root) def parse_response(response_string): """Parse the response from an API method.""" doc = XML(response_string) error_code = doc.get('errorCode') if error_code is not None: raise UCSM_XML_API_Error(doc.get('errorDescr'), error_code) return doc class UCSM_XML_API: """Provides access to a Cisco UCS Manager's XML API. Public methods on this class correspond to UCS Manager XML API methods. Each request uses a new connection. The server supports keep-alive, so this client could be optimized to use it too. """ def __init__(self, url, username, password): self.url = url self.api_url = urlparse.urljoin(self.url, 'nuova') self.username = username self.password = password self.cookie = None def _send_request(self, request_data): """Issue a request via HTTP and parse the response.""" request = urllib2.Request(self.api_url, request_data) response = urllib2.urlopen(request) response_text = response.read() response_doc = parse_response(response_text) return response_doc def _call(self, name, fields=None, children=None): request_data = make_request_data(name, fields, children) response = self._send_request(request_data) return response def login(self): """Login to the API and get a cookie. Logging into the API gives a new cookie in response. The cookie will become inactive after it has been inactive for some amount of time (10 minutes is the default.) UCS Manager allows a limited number of active cookies at any point in time, so it's important to free the cookie up when finished by logging out via the ``logout`` method. """ fields = {'inName': self.username, 'inPassword': self.password} response = self._call('aaaLogin', fields) self.cookie = response.get('outCookie') def logout(self): """Logout from the API and free the cookie.""" fields = {'inCookie': self.cookie} self._call('aaaLogout', fields) self.cookie = None def config_resolve_class(self, class_id, filters=None): """Issue a configResolveClass request. This returns all of the objects of class ``class_id`` from the UCS Manager. Filters provide a way of limiting the classes returned according to their attributes. There are a number of filters available - Cisco's XML API documentation has a full chapter on filters. All we care about here is that filters are described with XML elements. """ fields = {'cookie': self.cookie, 'classId': class_id} in_filters = Element('inFilter') if filters: in_filters.extend(filters) return self._call('configResolveClass', fields, [in_filters]) def config_resolve_children(self, dn, class_id=None): """Issue a configResolveChildren request. This returns all of the children of the object named by ``dn``, or if ``class_id`` is not None, all of the children of type ``class_id``. """ fields = {'cookie': self.cookie, 'inDn': dn} if class_id is not None: fields['classId'] = class_id return self._call('configResolveChildren', fields) def config_resolve_dn(self, dn): """Retrieve a single object by name. This returns the object named by ``dn``, but not its children. """ fields = {'cookie': self.cookie, 'dn': dn} return self._call('configResolveDn', fields) def config_conf_mo(self, dn, config_items): """Issue a configConfMo request. This makes a configuration change on an object (MO). """ fields = {'cookie': self.cookie, 'dn': dn} in_configs = Element('inConfig') in_configs.extend(config_items) self._call('configConfMo', fields, [in_configs]) def get_servers(api, uuid=None): """Retrieve a list of servers from the UCS Manager.""" if uuid: attrs = {'class': 'computeItem', 'property': 'uuid', 'value': uuid} filters = [Element('eq', attrs)] else: filters = None resolved = api.config_resolve_class('computeItem', filters) return resolved.xpath('//outConfigs/*') def get_children(api, element, class_id): """Retrieve a list of child elements from the UCS Manager.""" resolved = api.config_resolve_children(element.get('dn'), class_id) return resolved.xpath('//outConfigs/%s' % class_id) def get_macs(api, server): """Retrieve the list of MAC addresses assigned to a server. Network interfaces are represented by 'adaptorUnit' objects, and are stored as children of servers. """ adaptors = get_children(api, server, 'adaptorUnit') macs = [] for adaptor in adaptors: host_eth_ifs = get_children(api, adaptor, 'adaptorHostEthIf') macs.extend([h.get('mac') for h in host_eth_ifs]) return macs def probe_lan_boot_options(api, server): """Probe for LAN boot options available on a server.""" service_profile = get_service_profile(api, server) boot_profile_dn = service_profile.get('operBootPolicyName') response = api.config_resolve_children(boot_profile_dn) return response.xpath('//outConfigs/lsbootLan') def probe_servers(api): """Retrieve the UUID and MAC addresses for servers from the UCS Manager.""" servers = get_servers(api) server_list = [] for s in servers: # If the server does not have any MAC, then we don't add it. if not get_macs(api, s): continue # If the server does not have LAN boot option (can't boot from LAN), # then we don't add it. if not probe_lan_boot_options(api, s): continue server_list.append((s, get_macs(api, s))) return server_list def get_server_power_control(api, server): """Retrieve the power control object for a server.""" service_profile_dn = server.get('assignedToDn') resolved = api.config_resolve_children(service_profile_dn, 'lsPower') power_controls = resolved.xpath('//outConfigs/lsPower') return power_controls[0] def set_server_power_control(api, power_control, command): """Issue a power command to a server's power control.""" attrs = {'state': command, 'dn': power_control.get('dn')} power_change = Element('lsPower', attrs) api.config_conf_mo(power_control.get('dn'), [power_change]) def get_service_profile(api, server): """Get the server's assigned service profile.""" service_profile_dn = server.get('assignedToDn') result = api.config_resolve_dn(service_profile_dn) service_profile = result.xpath('//outConfig/lsServer')[0] return service_profile def get_first_booter(boot_profile_response): """Find the device currently set to boot by default.""" # The 'order' attribue is a positive integer. The device with the # lowest order gets booted first. orders = boot_profile_response.xpath('//outConfigs/*/@order') ordinals = map(int, orders) top_boot_order = min(ordinals) first_query = '//outConfigs/*[@order=%s]' % top_boot_order current_first = boot_profile_response.xpath(first_query)[0] return current_first RO_KEYS = ['access', 'type'] def strip_ro_keys(elements): """Remove read-only keys from configuration elements. These are keys for attributes that aren't allowed to be changed via configConfMo request. They are included in MO's that we read from the API; stripping these attributes lets us reuse the elements for those MO's rather than building new ones from scratch. """ for ro_key in RO_KEYS: for element in elements: del(element.attrib[ro_key]) def make_policy_change(boot_profile_response): """Build the policy change tree required to make LAN boot first priority. The original top priority will be swapped with LAN boot's original priority. """ current_first = get_first_booter(boot_profile_response) lan_boot = boot_profile_response.xpath('//outConfigs/lsbootLan')[0] if current_first == lan_boot: return top_boot_order = current_first.get('order') current_first.set('order', lan_boot.get('order')) lan_boot.set('order', top_boot_order) elements = [current_first, lan_boot] strip_ro_keys(elements) policy_change = Element('lsbootPolicy') policy_change.extend(elements) return policy_change def set_lan_boot_default(api, server): """Set a server to boot via LAN by default. If LAN boot is already the top priority, no change will be made. This command changes the server's boot profile, which will affect any other servers also using that boot profile. This is ok, because probe and enlist enlists all the servers in the chassis. """ service_profile = get_service_profile(api, server) boot_profile_dn = service_profile.get('operBootPolicyName') response = api.config_resolve_children(boot_profile_dn) policy_change = make_policy_change(response) if policy_change is None: return api.config_conf_mo(boot_profile_dn, [policy_change]) @contextlib.contextmanager def logged_in(url, username, password): """Context manager that ensures the logout from the API occurs.""" api = UCSM_XML_API(url, username, password) api.login() try: yield api finally: api.logout() def get_power_command(maas_power_mode, current_state): """Translate a MAAS on/off state into a UCSM power command. If the node is up already and receives a request to power on, power cycle the node. """ if maas_power_mode == 'on': if current_state == 'up': return 'cycle-immediate' return 'admin-up' elif maas_power_mode == 'off': return 'admin-down' else: raise UCSM_XML_API_Error( 'Unexpected maas power mode: %s' % (maas_power_mode), None) def power_control_ucsm(url, username, password, uuid, maas_power_mode): """Handle calls from the power template for nodes with a power type of 'ucsm'. """ with logged_in(url, username, password) as api: # UUIDs are unique per server, so we get either one or zero # servers for a given UUID. [server] = get_servers(api, uuid) power_control = get_server_power_control(api, server) command = get_power_command(maas_power_mode, power_control.get('state')) set_server_power_control(api, power_control, command) def power_state_ucsm(url, username, password, uuid): """Return the power state for the ucsm machine.""" with logged_in(url, username, password) as api: # UUIDs are unique per server, so we get either one or zero # servers for a given UUID. [server] = get_servers(api, uuid) power_control = get_server_power_control(api, server) power_state = power_control.get('state') if power_state == UCSMState.DOWN: return 'off' elif power_state == UCSMState.UP: return 'on' raise UCSM_XML_API_Error( 'Unknown power state: %s' % power_state, None) @synchronous def probe_and_enlist_ucsm(user, url, username, password, accept_all=False): """Probe a UCS Manager and enlist all its servers. Here's what happens here: 1. Get a list of servers from the UCS Manager, along with their MAC addresses. 2. Configure each server to boot from LAN first. 3. Add each server to MAAS as a new node, with a power control method of 'ucsm'. The URL and credentials supplied are persisted with each node so MAAS knows how to access UCSM to manage the node in the future. This code expects each server in the system to have already been associated with a service profile. The servers must have networking configured, and their boot profiles must include a boot from LAN option. During enlistment, the boot profile for each service profile used by a server will be modified to move LAN boot to the highest priority boot option. Also, if any node fails to enlist, this enlistment process will stop and won't attempt to enlist any additional nodes. If a node is already known to MAAS, it will fail to enlist, so all nodes must be added at once. There is also room for optimization during enlistment. While our client deals with a single server at a time, the API is capable of reading/writing the settings of multiple servers in the same request. """ with logged_in(url, username, password) as api: servers = probe_servers(api) for server, _ in servers: set_lan_boot_default(api, server) for server, macs in servers: params = { 'power_address': url, 'power_user': username, 'power_pass': password, 'uuid': server.get('uuid'), } system_id = create_node(macs, 'amd64', 'ucsm', params).wait(30) if accept_all: commission_node(system_id, user).wait(30) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/hardware/virsh.py0000644000000000000000000002620513056115004024404 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'probe_virsh_and_enlist', ] from tempfile import NamedTemporaryFile from lxml import etree import pexpect from provisioningserver.logger import get_maas_logger from provisioningserver.utils import ( commission_node, create_node, ) from provisioningserver.utils.twisted import synchronous maaslog = get_maas_logger("drivers.virsh") XPATH_ARCH = "/domain/os/type/@arch" XPATH_BOOT = "/domain/os/boot" XPATH_OS = "/domain/os" # Virsh stores the architecture with a different # label then MAAS. This maps virsh architecture to # MAAS architecture. ARCH_FIX = { 'x86_64': 'amd64', 'ppc64': 'ppc64el', 'ppc64le': 'ppc64el', 'i686': 'i386', } class VirshVMState: OFF = "shut off" ON = "running" NO_STATE = "no state" IDLE = "idle" PAUSED = "paused" IN_SHUTDOWN = "in shutdown" CRASHED = "crashed" PM_SUSPENDED = "pmsuspended" VM_STATE_TO_POWER_STATE = { VirshVMState.OFF: "off", VirshVMState.ON: "on", VirshVMState.NO_STATE: "off", VirshVMState.IDLE: "off", VirshVMState.PAUSED: "off", VirshVMState.IN_SHUTDOWN: "on", VirshVMState.CRASHED: "off", VirshVMState.PM_SUSPENDED: "off", } class VirshError(Exception): """Failure communicating to virsh. """ class VirshSSH(pexpect.spawn): PROMPT = r"virsh \#" PROMPT_SSHKEY = "(?i)are you sure you want to continue connecting" PROMPT_PASSWORD = "(?i)(?:password)|(?:passphrase for key)" PROMPT_DENIED = "(?i)permission denied" PROMPT_CLOSED = "(?i)connection closed by remote host" PROMPTS = [ PROMPT_SSHKEY, PROMPT_PASSWORD, PROMPT, PROMPT_DENIED, PROMPT_CLOSED, pexpect.TIMEOUT, pexpect.EOF, ] I_PROMPT = PROMPTS.index(PROMPT) I_PROMPT_SSHKEY = PROMPTS.index(PROMPT_SSHKEY) I_PROMPT_PASSWORD = PROMPTS.index(PROMPT_PASSWORD) def __init__(self, timeout=30, maxread=2000, dom_prefix=None): super(VirshSSH, self).__init__( None, timeout=timeout, maxread=maxread) self.name = '' if dom_prefix is None: self.dom_prefix = '' else: self.dom_prefix = dom_prefix # Store a mapping of { machine_name: xml }. self.xml = {} def _execute(self, poweraddr): """Spawns the pexpect command.""" cmd = 'virsh --connect %s' % poweraddr self._spawn(cmd) def get_machine_xml(self, machine): # Check if we have a cached version of the XML. # This is a short-lived object, so we don't need to worry about # expiring objects in the cache. if machine in self.xml: return self.xml[machine] # Grab the XML from virsh if we don't have it already. output = self.run(['dumpxml', machine]).strip() if output.startswith("error:"): maaslog.error("%s: Failed to get XML for machine", machine) return None # Cache the XML, since we'll need it later to reconfigure the VM. self.xml[machine] = output return output def login(self, poweraddr, password=None): """Starts connection to virsh.""" self._execute(poweraddr) i = self.expect(self.PROMPTS, timeout=self.timeout) if i == self.I_PROMPT_SSHKEY: # New certificate, lets always accept but if # it changes it will fail to login. self.sendline("yes") i = self.expect(self.PROMPTS) if i == self.I_PROMPT_PASSWORD: # Requesting password, give it if available. if password is None: self.close() return False self.sendline(password) i = self.expect(self.PROMPTS) if i != self.I_PROMPT: # Something bad happened, either disconnect, # timeout, wrong password. self.close() return False return True def logout(self): """Quits the virsh session.""" self.sendline("quit") self.close() def prompt(self, timeout=None): """Waits for virsh prompt.""" if timeout is None: timeout = self.timeout i = self.expect([self.PROMPT, pexpect.TIMEOUT], timeout=timeout) if i == 1: return False return True def run(self, args): cmd = ' '.join(args) self.sendline(cmd) self.prompt() result = self.before.splitlines() return '\n'.join(result[1:]) def list(self): """Lists all VMs by name.""" machines = self.run(['list', '--all', '--name']) machines = machines.strip().splitlines() return [m for m in machines if m.startswith(self.dom_prefix)] def get_state(self, machine): """Gets the VM state.""" state = self.run(['domstate', machine]) state = state.strip() if state.startswith('error:'): return None return state def get_mac_addresses(self, machine): """Gets list of mac addressess assigned to the VM.""" output = self.run(['domiflist', machine]).strip() if output.startswith("error:"): maaslog.error("%s: Failed to get node MAC addresses", machine) return None output = output.splitlines()[2:] # Only return the last item of the line, as it is ensured that the # last item is the MAC Address. return [line.split()[-1] for line in output] def get_arch(self, machine): """Gets the VM architecture.""" output = self.get_machine_xml(machine) if output is None: maaslog.error("%s: Failed to get VM architecture", machine) return None doc = etree.XML(output) evaluator = etree.XPathEvaluator(doc) arch = evaluator(XPATH_ARCH)[0] # Fix architectures that need to be referenced by a different # name, that MAAS understands. return ARCH_FIX.get(arch, arch) def configure_pxe_boot(self, machine): """Given the specified machine, reads the XML dump and determines if the boot order needs to be changed. The boot order needs to be changed if it isn't (network, hd), and will be changed to that if it is found to be set to anything else. """ xml = self.get_machine_xml(machine) if xml is None: return False doc = etree.XML(xml) evaluator = etree.XPathEvaluator(doc) # Remove any existing elements under . boot_elements = evaluator(XPATH_BOOT) # Skip this if the boot order is already set up how we want it to be. if (len(boot_elements) == 2 and boot_elements[0].attrib['dev'] == 'network' and boot_elements[1].attrib['dev'] == 'hd'): return True for element in boot_elements: element.getparent().remove(element) # Grab the element and put the element we want in. os = evaluator(XPATH_OS)[0] os.append(etree.XML("")) os.append(etree.XML("")) # Rewrite the XML in a temporary file to use with 'virsh define'. with NamedTemporaryFile() as f: f.write(etree.tostring(doc)) f.write('\n') f.flush() output = self.run(['define', f.name]) if output.startswith('error:'): maaslog.error("%s: Failed to set network boot order", machine) return False maaslog.info("%s: Successfully set network boot order", machine) return True def poweron(self, machine): """Poweron a VM.""" output = self.run(['start', machine]).strip() if output.startswith("error:"): return False return True def poweroff(self, machine): """Poweroff a VM.""" output = self.run(['destroy', machine]).strip() if output.startswith("error:"): return False return True @synchronous def probe_virsh_and_enlist(user, poweraddr, password=None, prefix_filter=None, accept_all=False): """Extracts all of the VMs from virsh and enlists them into MAAS. :param user: user for the nodes. :param poweraddr: virsh connection string. :param password: password connection string. :param prefix_filter: only enlist nodes that have the prefix. :param accept_all: if True, commission enlisted nodes. """ conn = VirshSSH(dom_prefix=prefix_filter) if not conn.login(poweraddr, password): raise VirshError('Failed to login to virsh console.') for machine in conn.list(): arch = conn.get_arch(machine) state = conn.get_state(machine) macs = conn.get_mac_addresses(machine) # Force the machine off, as MAAS will control the machine # and it needs to be in a known state of off. if state == VirshVMState.ON: conn.poweroff(machine) params = { 'power_address': poweraddr, 'power_id': machine, } if password is not None: params['power_pass'] = password system_id = create_node( macs, arch, 'virsh', params, hostname=machine).wait(30) if system_id is not None: conn.configure_pxe_boot(machine) if accept_all: commission_node(system_id, user).wait(30) conn.logout() def power_control_virsh(poweraddr, machine, power_change, password=None): """Powers controls a VM using virsh.""" # Force password to None if blank, as the power control # script will send a blank password if one is not set. if password == '': password = None conn = VirshSSH() if not conn.login(poweraddr, password): raise VirshError('Failed to login to virsh console.') state = conn.get_state(machine) if state is None: raise VirshError('%s: Failed to get power state' % machine) if state == VirshVMState.OFF: if power_change == 'on': if conn.poweron(machine) is False: raise VirshError('%s: Failed to power on VM' % machine) elif state == VirshVMState.ON: if power_change == 'off': if conn.poweroff(machine) is False: raise VirshError('%s: Failed to power off VM' % machine) def power_state_virsh(poweraddr, machine, password=None): """Return the power state for the VM using virsh.""" # Force password to None if blank, as the power control # script will send a blank password if one is not set. if password == '': password = None conn = VirshSSH() if not conn.login(poweraddr, password): raise VirshError('Failed to login to virsh console.') state = conn.get_state(machine) if state is None: raise VirshError('Failed to get domain: %s' % machine) try: return VM_STATE_TO_POWER_STATE[state] except KeyError: raise VirshError('Unknown state: %s' % state) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/hardware/vmware.py0000644000000000000000000003744513056115004024562 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'power_control_vmware', 'power_query_vmware', 'probe_vmware_and_enlist', ] from abc import abstractmethod from collections import OrderedDict from importlib import import_module import traceback from urllib import unquote from provisioningserver.logger import get_maas_logger from provisioningserver.utils import ( commission_node, create_node, ) from provisioningserver.utils.twisted import synchronous vmomi_api = None vim = None maaslog = get_maas_logger("drivers.vmware") def try_pyvmomi_import(): """Attempt to import the pyVmomi API. This API is provided by the python-pyvmomi package; if it doesn't work out, we need to notify the user so they can install it. """ global vim global vmomi_api try: if vim is None: vim_module = import_module('pyVmomi') vim = getattr(vim_module, 'vim') if vmomi_api is None: vmomi_api = import_module('pyVim.connect') except ImportError: return False else: return True class VMwareAPIException(Exception): """Failure talking to the VMware API.""" class VMwareVMNotFound(VMwareAPIException): """The specified virtual machine was not found.""" class VMwareClientNotFound(VMwareAPIException): """A usable VMware API client was not found.""" class VMwareAPIConnectionFailed(VMwareAPIException): """The VMware API endpoint could not be contacted.""" class VMwareAPI(object): """Abstract base class to represent a MAAS-capable VMware API. The API must be capable of: - Gathering names, UUID, and MAC addresses of each virtual machine - Powering on/off VMs - Checking the power status of VMs """ def __init__(self, host, username, password, port=None, protocol=None): """ :param host: The VMware host to connect to :type host: string :param port: The port on the VMware host to connect to :type port: integer :param username: A username authorized for the specified VMware host :type username: string :param password: The password corresponding to the supplied username :type password: string :param protocol: The protocol to use (default: 'https') :type protocol: string """ self.host = host self.port = port self.username = username self.password = password self.protocol = protocol @abstractmethod def connect(self): """Connects to the VMware API""" raise NotImplementedError @abstractmethod def is_connected(self): """Returns True if the VMware API is thought to be connected""" raise NotImplementedError def disconnect(self): """Disconnects from the VMware API""" raise NotImplementedError @abstractmethod def find_vm_by_uuid(self, uuid): """ Searches for a VM that matches the specified UUID. The UUID can be either an instance UUID, or a BIOS UUID. If found, returns an object to represent the VM. Otherwise, returns None. :return: an opaque object representing the VM """ raise NotImplementedError @staticmethod @abstractmethod def get_maas_power_state(vm): """ Returns the MAAS representation of the power status for the specified virtual machine. :return: string ('on', 'off', or 'error') """ raise NotImplementedError @staticmethod @abstractmethod def set_power_state(vm, power_change): """ Sets the power state for the specified VM to the specified value. :param:power_change: the new desired state ('on' or 'off') :except:VMwareError: if the power status could not be changed """ raise NotImplementedError @abstractmethod def get_all_vm_properties(self): """ Creates dictionary that catalogs every virtual machine present on the VMware server. Each key is a machine name, and each value is a dictionary containing the following keys: - uuid: a UUID for the VM (to be used for power management) - macs: a list of MAC addresses associated with this VM - architecture: amd64 or i386 (depending on the guest ID) - power_state: the current power state of the VM ("on" or "off") :return: a dictionary as specified above """ raise NotImplementedError class VMwarePyvmomiAPI(VMwareAPI): def __init__( self, host, username, password, port=None, protocol=None): super(VMwarePyvmomiAPI, self).__init__( host, username, password, port=port, protocol=protocol) self.service_instance = None def connect(self): # place optional arguments in a dictionary to pass to the # VMware API call; otherwise the API will see 'None' and fail. extra_args = {} if self.port is not None: extra_args['port'] = self.port if self.protocol is not None: extra_args['protocol'] = self.protocol self.service_instance = vmomi_api.SmartConnect(host=self.host, user=self.username, pwd=self.password, **extra_args) if not self.service_instance: raise VMwareAPIConnectionFailed( "Could not connect to VMware service API") return self.service_instance is not None def is_connected(self): return self.service_instance is not None def disconnect(self): vmomi_api.Disconnect(self.service_instance) self.service_instance = None @staticmethod def _probe_network_cards(vm): """Returns a list of MAC addresses for this VM, followed by a list of unique keys that VMware uses to uniquely identify the NICs. The MAC addresses are used to create the node. If the node is created successfully, the keys will be used to set the boot order on the virtual machine.""" mac_addresses = [] nic_keys = [] for device in vm.config.hardware.device: if hasattr(device, 'macAddress'): mac = device.macAddress if mac is not None and mac != "": mac_addresses.append(mac) nic_keys.append(device.key) return mac_addresses, nic_keys @staticmethod def _get_uuid(vm): # In vCenter environments, using the BIOS UUID (uuid) is deprecated. # But we can use it as a fallback, since the API supports both. if hasattr(vm.summary.config, 'instanceUuid') \ and vm.summary.config.instanceUuid is not None: return vm.summary.config.instanceUuid elif hasattr(vm.summary.config, 'uuid') \ and vm.summary.config.uuid is not None: return vm.summary.config.uuid return None def _get_vm_list(self): vms = [] content = self.service_instance.RetrieveContent() for child in content.rootFolder.childEntity: if hasattr(child, 'vmFolder'): datacenter = child vm_folder = datacenter.vmFolder vm_list = vm_folder.childEntity vms = vms + vm_list return vms def find_vm_by_name(self, vm_name): vm_list = self._get_vm_list() for vm in vm_list: if vm_name == vm.summary.config.name: return vm return None def find_vm_by_uuid(self, uuid): content = self.service_instance.RetrieveContent() # First search using the instance UUID vm = content.searchIndex.FindByUuid(None, uuid, True, True) if vm is None: # ... otherwise, try using the BIOS UUID vm = content.searchIndex.FindByUuid(None, uuid, True, False) return vm @staticmethod def _get_power_state(vm): return vm.runtime.powerState @staticmethod def pyvmomi_to_maas_powerstate(power_state): """Returns a MAAS power state given the specified pyvmomi state""" if power_state == 'poweredOn': return "on" elif power_state == 'poweredOff': return "off" elif power_state == 'suspended': return "on" # TODO: model this in MAAS else: return "error" @staticmethod def get_maas_power_state(vm): return VMwarePyvmomiAPI.pyvmomi_to_maas_powerstate( vm.runtime.powerState) @staticmethod def set_power_state(vm, power_change): if vm is not None: if power_change == 'on': vm.PowerOn() elif power_change == 'off': vm.PowerOff() else: raise ValueError( "set_power_state: Invalid power_change state: {state}" .format(power_change)) @staticmethod def set_pxe_boot(vm_properties): boot_devices = [] for nic in vm_properties['nics']: boot_nic = vim.vm.BootOptions.BootableEthernetDevice() boot_nic.deviceKey = nic boot_devices.append(boot_nic) if len(boot_devices) > 0: vmconf = vim.vm.ConfigSpec() vmconf.bootOptions = vim.vm.BootOptions(bootOrder=boot_devices) # use the reference to the VM we stashed away in the properties vm_properties['this'].ReconfigVM_Task(vmconf) def _get_vm_properties(self, vm): """Gathers the properties for the specified VM, for inclusion into the dictionary containing the properties of all VMs.""" properties = {} properties['this'] = vm properties['uuid'] = self._get_uuid(vm) if "64" in vm.summary.config.guestId: properties['architecture'] = "amd64" else: properties['architecture'] = "i386" properties['power_state'] = self.pyvmomi_to_maas_powerstate( self._get_power_state(vm)) properties['macs'], properties['nics'] = self._probe_network_cards(vm) # These aren't needed now, but we might want them one day... # properties['cpus'] = vm.summary.config.numCpu # properties['ram'] = vm.summary.config.memorySizeMB return properties def get_all_vm_properties(self): # Using an OrderedDict() in case the order that virtual machines # are returned in is important to the user. virtual_machines = OrderedDict() vm_list = self._get_vm_list() for vm in vm_list: vm_name = vm.summary.config.name vm_properties = self._get_vm_properties(vm) virtual_machines[vm_name] = vm_properties return virtual_machines def _get_vmware_api( host, username, password, port=None, protocol=None): if try_pyvmomi_import(): # Attempt to detect the best available VMware API return VMwarePyvmomiAPI( host, username, password, port=port, protocol=protocol) else: raise VMwareClientNotFound( "Could not find a suitable VMware API (install python-pyvmomi)") def get_vmware_servers( host, username, password, port=None, protocol=None): servers = {} api = _get_vmware_api( host, username, password, port=port, protocol=protocol) if api.connect(): try: servers = api.get_all_vm_properties() finally: api.disconnect() return servers @synchronous def probe_vmware_and_enlist( user, host, username, password, port=None, protocol=None, prefix_filter=None, accept_all=False): # Both '' and None mean the same thing, so normalize it. if prefix_filter is None: prefix_filter = '' api = _get_vmware_api( host, username, password, port=port, protocol=protocol) if api.connect(): try: servers = api.get_all_vm_properties() _probe_and_enlist_vmware_servers( api, accept_all, host, password, port, prefix_filter, protocol, servers, user, username) finally: api.disconnect() def _probe_and_enlist_vmware_servers( api, accept_all, host, password, port, prefix_filter, protocol, servers, user, username): maaslog.info("Found %d VMware servers", len(servers)) for system_name in servers: if not system_name.startswith(prefix_filter): maaslog.info( "Skipping node named '%s'; does not match prefix filter '%s'", system_name, prefix_filter) continue properties = servers[system_name] params = { 'power_vm_name': system_name, 'power_uuid': properties['uuid'], 'power_address': host, 'power_port': port, 'power_protocol': protocol, 'power_user': username, 'power_pass': password, } # Note: the system name is URL encoded, so before we go to log # and/or create the node, we need to unquote it. # Otherwise we might pass in names like "Ubuntu%20(64-bit)" system_name = unquote(system_name).decode('utf8') maaslog.info( "Creating VMware node with MACs: %s (%s)", properties['macs'], system_name) system_id = create_node( properties['macs'], properties['architecture'], 'vmware', params, hostname=system_name).wait(30) if system_id is not None: api.set_pxe_boot(properties) if accept_all and system_id is not None: commission_node(system_id, user).wait(30) def _find_vm_by_uuid_or_name(api, uuid, vm_name): if uuid: vm = api.find_vm_by_uuid(uuid) elif vm_name: vm = api.find_vm_by_name(vm_name) else: raise VMwareVMNotFound( "Failed to find VM; need a UUID or a VM name for power control") return vm def power_control_vmware( host, username, password, vm_name, uuid, power_change, port=None, protocol=None): api = _get_vmware_api( host, username, password, port=port, protocol=protocol) if api.connect(): try: vm = _find_vm_by_uuid_or_name(api, uuid, vm_name) if vm is None: raise VMwareVMNotFound( "Failed to find VM; uuid={uuid}, name={name}" .format(uuid=uuid, name=vm_name)) api.set_power_state(vm, power_change) except VMwareAPIException: raise except: # This is to cover what might go wrong in set_power_state(), if # an exception occurs while poweriing on or off. raise VMwareAPIException( "Failed to set power state to {state} for uuid={uuid}" .format(state=power_change, uuid=uuid), traceback.format_exc()) finally: api.disconnect() def power_query_vmware( host, username, password, vm_name, uuid, port=None, protocol=None): """Return the power state for the VM with the specified UUID, using the VMware API.""" api = _get_vmware_api( host, username, password, port=port, protocol=protocol) if api.connect(): try: vm = _find_vm_by_uuid_or_name(api, uuid, vm_name) if vm is not None: return api.get_maas_power_state(vm) except VMwareAPIException: raise except: raise VMwareAPIException( "Failed to get power state for uuid={uuid}" .format(uuid=uuid), traceback.format_exc()) finally: api.disconnect() maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/hardware/tests/__init__.py0000644000000000000000000000000013056115004026133 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/hardware/tests/test_apc.py0000644000000000000000000001446013056115004026215 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for ``provisioningserver.drivers.hardware.apc``.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from random import randint from subprocess import PIPE from maastesting.factory import factory from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import MAASTestCase from mock import Mock from provisioningserver.drivers.hardware import apc as apc_module from provisioningserver.drivers.hardware.apc import ( APCException, APCSNMP, power_control_apc, power_state_apc, ) from provisioningserver.utils.shell import ExternalProcessError from testtools.matchers import Equals COMMON_ARGS = '-c private -v1 %s .1.3.6.1.4.1.318.1.1.12.3.3.1.1.4.%s' COMMON_OUTPUT = 'iso.3.6.1.4.1.318.1.1.12.3.3.1.1.4.%s = INTEGER: 1\n' class TestAPCSNMP(MAASTestCase): """Test for `APCSNMP`.""" def patch_popen(self, return_value=(None, None), returncode=0): process = Mock() process.returncode = returncode process.communicate = Mock(return_value=return_value) self.patch(apc_module, 'Popen', Mock(return_value=process)) return process def test_run_process_calls_command(self): ip = factory.make_ipv4_address() outlet = '%d' % randint(1, 16) command = 'snmpget ' + COMMON_ARGS % (ip, outlet) return_value = ((COMMON_OUTPUT % outlet), 'error_output') self.patch_popen(return_value) apc = APCSNMP() apc.run_process(command) self.assertThat( apc_module.Popen, MockCalledOnceWith( command.split(), stdout=PIPE, stderr=PIPE)) def test_run_process_returns_result(self): ip = factory.make_ipv4_address() outlet = '%d' % randint(1, 16) command = 'snmpget ' + COMMON_ARGS % (ip, outlet) return_value = ((COMMON_OUTPUT % outlet), 'error_output') self.patch_popen(return_value) apc = APCSNMP() result = apc.run_process(command) self.assertEqual(result, '1') def test_run_process_catches_failures(self): apc = APCSNMP() self.patch_popen(returncode=1) self.assertRaises( ExternalProcessError, apc.run_process, factory.make_name('command')) def test_power_off_outlet_calls_run_process(self): apc = APCSNMP() ip = factory.make_ipv4_address() outlet = '%d' % randint(1, 16) command = 'snmpset ' + COMMON_ARGS % (ip, outlet) + ' i 2' run_process = self.patch(apc, 'run_process') apc.power_off_outlet(ip, outlet) self.assertThat(run_process, MockCalledOnceWith(command)) def test_power_on_outlet_calls_run_process(self): apc = APCSNMP() ip = factory.make_ipv4_address() outlet = '%d' % randint(1, 16) power_on_delay = 0 command = 'snmpset ' + COMMON_ARGS % (ip, outlet) + ' i 1' power_off_outlet = self.patch(apc, 'power_off_outlet') run_process = self.patch(apc, 'run_process') apc.power_on_outlet(ip, outlet, power_on_delay) self.expectThat(power_off_outlet, MockCalledOnceWith(ip, outlet)) self.expectThat(run_process, MockCalledOnceWith(command)) def test_get_power_state_of_outlet_calls_run_process(self): apc = APCSNMP() ip = factory.make_ipv4_address() outlet = '%d' % randint(1, 16) command = 'snmpget ' + COMMON_ARGS % (ip, outlet) run_process = self.patch(apc, 'run_process') apc.get_power_state_of_outlet(ip, outlet) self.assertThat(run_process, MockCalledOnceWith(command)) class TestAPCPowerControl(MAASTestCase): """Tests for `power_control_apc`.""" def test__errors_on_unknown_power_change(self): ip = factory.make_ipv4_address() outlet = '%d' % randint(1, 16) power_change = factory.make_name('error') power_on_delay = 0 self.assertRaises( AssertionError, power_control_apc, ip, outlet, power_change, power_on_delay) def test___power_change_on(self): ip = factory.make_ipv4_address() outlet = '%d' % randint(1, 16) power_change = 'on' power_on_delay = 0 power_on_outlet = self.patch(APCSNMP, 'power_on_outlet') power_control_apc(ip, outlet, power_change, power_on_delay) self.assertThat( power_on_outlet, MockCalledOnceWith( ip, outlet, float(power_on_delay))) def test___power_change_off(self): ip = factory.make_ipv4_address() outlet = '%d' % randint(1, 16) power_change = 'off' power_on_delay = 0 power_off_outlet = self.patch(APCSNMP, 'power_off_outlet') power_control_apc(ip, outlet, power_change, power_on_delay) self.assertThat(power_off_outlet, MockCalledOnceWith(ip, outlet)) class TestAPCPowerState(MAASTestCase): """Tests for `power_control_state`.""" def test__gets_power_off_state(self): ip = factory.make_ipv4_address() outlet = '%d' % randint(1, 16) get_power_state_of_outlet = self.patch( APCSNMP, 'get_power_state_of_outlet', Mock(return_value='2')) power_state = power_state_apc(ip, outlet) self.expectThat( get_power_state_of_outlet, MockCalledOnceWith(ip, outlet)) self.expectThat(power_state, Equals('off')) def test__gets_power_on_state(self): ip = factory.make_ipv4_address() outlet = '%d' % randint(1, 16) get_power_state_of_outlet = self.patch( APCSNMP, 'get_power_state_of_outlet', Mock(return_value='1')) power_state = power_state_apc(ip, outlet) self.expectThat( get_power_state_of_outlet, MockCalledOnceWith(ip, outlet)) self.expectThat(power_state, Equals('on')) def test__errors_on_unknown_state(self): ip = factory.make_ipv4_address() outlet = '%d' % randint(1, 16) get_power_state_of_outlet = self.patch( APCSNMP, 'get_power_state_of_outlet', Mock(return_value='error')) self.assertRaises(APCException, power_state_apc, ip, outlet) self.expectThat( get_power_state_of_outlet, MockCalledOnceWith(ip, outlet)) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/hardware/tests/test_hmc.py0000644000000000000000000002130413056115004026214 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for ``provisioningserver.drivers.hardware.hmc``.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from random import choice from StringIO import StringIO from maastesting.factory import factory from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import MAASTestCase from mock import Mock from provisioningserver.drivers.hardware.hmc import ( HMC, HMCException, HMCState, power_control_hmc, power_state_hmc, ) from testtools.matchers import Equals def make_hmc_params(): """Make parameters for HMC.""" ip = factory.make_ipv4_address() username = factory.make_name('user') password = factory.make_name('password') server_name = factory.make_name('server_name') lpar = factory.make_name('lpar') return ip, username, password, server_name, lpar def make_hmc_api(): """Make a HMC object with randomized parameters.""" ip, username, password, _, _ = make_hmc_params() return HMC(ip, username, password) class TestHMC(MAASTestCase): """Tests for `HMC`.""" def test_run_cli_command_returns_output(self): api = make_hmc_api() command = factory.make_name('command') ssh_mock = self.patch(api, '_ssh') expected = factory.make_name('output') stdout = StringIO(expected) streams = factory.make_streams(stdout=stdout) ssh_mock.exec_command = Mock(return_value=streams) output = api._run_cli_command(command) self.expectThat(expected, Equals(output)) self.expectThat(ssh_mock.exec_command, MockCalledOnceWith(command)) def test_run_cli_command_connects_and_closes_ssh_client(self): api = make_hmc_api() ssh_mock = self.patch(api, '_ssh') ssh_mock.exec_command = Mock(return_value=factory.make_streams()) api._run_cli_command(factory.make_name('command')) self.expectThat( ssh_mock.connect, MockCalledOnceWith( api.ip, username=api.username, password=api.password)) self.expectThat(ssh_mock.close, MockCalledOnceWith()) def test_run_cli_command_closes_when_exception_raised(self): api = make_hmc_api() ssh_mock = self.patch(api, '_ssh') exception_type = factory.make_exception_type() ssh_mock.exec_command = Mock(side_effect=exception_type) command = factory.make_name('command') self.assertRaises(exception_type, api._run_cli_command, command) self.expectThat(ssh_mock.close, MockCalledOnceWith()) def test_get_lpar_power_state_gets_power_state(self): api = make_hmc_api() server_name = factory.make_name('server_name') lpar = factory.make_name('lpar') state = factory.make_name('state') expected = '%s:%s\n' % (lpar, state) cli_mock = self.patch(api, '_run_cli_command') cli_mock.return_value = expected output = api.get_lpar_power_state(server_name, lpar) command = "lssyscfg -m %s -r lpar -F name:state" % server_name self.expectThat( expected.split('%s:' % lpar)[1].split('\n')[0], Equals(output)) self.expectThat(cli_mock, MockCalledOnceWith(command)) def test_power_lpar_on_returns_expected_output(self): api = make_hmc_api() server_name = factory.make_name('server_name') lpar = factory.make_name('lpar') ssh_mock = self.patch(api, '_ssh') expected = factory.make_name('output') stdout = StringIO(expected) streams = factory.make_streams(stdout=stdout) ssh_mock.exec_command = Mock(return_value=streams) output = api.power_lpar_on(server_name, lpar) command = ("chsysstate -r lpar -m %s -o on -n %s " "--bootstring network-all" % (server_name, lpar)) self.expectThat(expected, Equals(output)) self.expectThat( ssh_mock.exec_command, MockCalledOnceWith(command)) def test_power_lpar_off_returns_expected_output(self): api = make_hmc_api() server_name = factory.make_name('server_name') lpar = factory.make_name('lpar') ssh_mock = self.patch(api, '_ssh') expected = factory.make_name('output') stdout = StringIO(expected) streams = factory.make_streams(stdout=stdout) ssh_mock.exec_command = Mock(return_value=streams) output = api.power_lpar_off(server_name, lpar) command = ("chsysstate -r lpar -m %s -o shutdown -n %s --immed" % (server_name, lpar)) self.expectThat(expected, Equals(output)) self.expectThat( ssh_mock.exec_command, MockCalledOnceWith(command)) class TestHMCPowerControl(MAASTestCase): """Tests for `power_control_hmc`.""" def test_power_control_error_on_unknown_power_change(self): ip, username, password, server_name, lpar = make_hmc_params() power_change = factory.make_name('error') self.assertRaises( HMCException, power_control_hmc, ip, username, password, server_name, lpar, power_change) def test_power_control_power_change_on_power_state_on(self): # power_change and current power_state are both 'on' ip, username, password, server_name, lpar = make_hmc_params() power_state_mock = self.patch(HMC, 'get_lpar_power_state') power_state_mock.return_value = choice(HMCState.ON) power_lpar_off_mock = self.patch(HMC, 'power_lpar_off') power_lpar_on_mock = self.patch(HMC, 'power_lpar_on') power_control_hmc(ip, username, password, server_name, lpar, power_change='on') self.expectThat( power_state_mock, MockCalledOnceWith(server_name, lpar)) self.expectThat( power_lpar_off_mock, MockCalledOnceWith(server_name, lpar)) self.expectThat( power_lpar_on_mock, MockCalledOnceWith(server_name, lpar)) def test_power_control_power_change_on_power_state_off(self): # power_change is 'on' and current power_state is 'off' ip, username, password, server_name, lpar = make_hmc_params() power_state_mock = self.patch(HMC, 'get_lpar_power_state') power_state_mock.return_value = HMCState.OFF power_lpar_on_mock = self.patch(HMC, 'power_lpar_on') power_control_hmc(ip, username, password, server_name, lpar, power_change='on') self.expectThat( power_state_mock, MockCalledOnceWith(server_name, lpar)) self.expectThat( power_lpar_on_mock, MockCalledOnceWith(server_name, lpar)) def test_power_control_power_change_off_power_state_on(self): # power_change is 'off' and current power_state is 'on' ip, username, password, server_name, lpar = make_hmc_params() power_lpar_off_mock = self.patch(HMC, 'power_lpar_off') power_control_hmc(ip, username, password, server_name, lpar, power_change='off') self.expectThat( power_lpar_off_mock, MockCalledOnceWith(server_name, lpar)) class TestHMCPowerState(MAASTestCase): """Tests for `power_state_hmc`.""" def test_power_state_failed_to_get_state(self): ip, username, password, server_name, lpar = make_hmc_params() power_state_mock = self.patch(HMC, 'get_lpar_power_state') power_state_mock.side_effect = HMCException('error') self.assertRaises( HMCException, power_state_hmc, ip, username, password, server_name, lpar) def test_power_state_get_off(self): ip, username, password, server_name, lpar = make_hmc_params() power_state_mock = self.patch(HMC, 'get_lpar_power_state') power_state_mock.return_value = choice(HMCState.OFF) self.assertThat( power_state_hmc(ip, username, password, server_name, lpar), Equals('off')) def test_power_state_get_on(self): ip, username, password, server_name, lpar = make_hmc_params() power_state_mock = self.patch(HMC, 'get_lpar_power_state') power_state_mock.return_value = choice(HMCState.ON) self.assertThat( power_state_hmc(ip, username, password, server_name, lpar), Equals('on')) def test_power_state_error_on_unknown_state(self): ip, username, password, server_name, lpar = make_hmc_params() power_state_mock = self.patch(HMC, 'get_lpar_power_state') power_state_mock.return_value = factory.make_name('error') self.assertRaises( HMCException, power_state_hmc, ip, username, password, server_name, lpar) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/hardware/tests/test_mscm.py0000644000000000000000000003544113056115004026413 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for ``provisioningserver.drivers.hardware.mscm``.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from random import randint import re from socket import error as SOCKETError from StringIO import StringIO from hypothesis import given from hypothesis.strategies import sampled_from from maastesting.factory import factory from maastesting.matchers import ( MockAnyCall, MockCalledOnceWith, MockCalledWith, ) from maastesting.testcase import ( MAASTestCase, MAASTwistedRunTest, ) from mock import Mock from paramiko import SSHException from provisioningserver.drivers.hardware import mscm from provisioningserver.drivers.hardware.mscm import ( cartridge_mapping, MSCM, MSCMError, MSCMState, power_control_mscm, power_state_mscm, probe_and_enlist_mscm, ) from provisioningserver.utils.twisted import asynchronous from testtools.matchers import Equals from testtools.testcase import ExpectedException from twisted.internet.defer import inlineCallbacks from twisted.internet.threads import deferToThread def make_mscm_api(): """Make a MSCM object with randomized parameters.""" host = factory.make_hostname('mscm') username = factory.make_name('user') password = factory.make_name('password') return MSCM(host, username, password) def make_node_id(): """Make a node_id.""" return 'c%sn%s' % (randint(1, 45), randint(1, 8)) def make_show_node_list(length=10): """Make a fake return value for discover_nodes.""" return re.findall(r'c\d+n\d', ''.join(make_node_id() for _ in xrange(length))) def make_show_node_macaddr(length=10): """Make a fake return value for get_node_macaddr.""" return ''.join((factory.make_mac_address() + ' ') for _ in xrange(length)) class TestMSCMCliApi(MAASTestCase): """Tests for `MSCM`.""" scenarios = [ ('power_node_on', dict(method='power_node_on')), ('power_node_off', dict(method='power_node_off')), ('configure_node_bootonce_pxe', dict(method='configure_node_bootonce_pxe')), ('configure_node_boot_m2', dict(method='configure_node_boot_m2')), ] def test_run_cli_command_returns_output(self): api = make_mscm_api() command = factory.make_name('command') ssh_mock = self.patch(api, '_ssh') expected = factory.make_name('output') stdout = StringIO(expected) streams = factory.make_streams(stdout=stdout) ssh_mock.exec_command = Mock(return_value=streams) output = api._run_cli_command(command) self.expectThat(expected, Equals(output)) self.expectThat(ssh_mock.exec_command, MockCalledOnceWith(command)) def test_run_cli_command_connects_and_closes_ssh_client(self): api = make_mscm_api() ssh_mock = self.patch(api, '_ssh') ssh_mock.exec_command = Mock(return_value=factory.make_streams()) api._run_cli_command(factory.make_name('command')) self.expectThat( ssh_mock.connect, MockCalledOnceWith( api.host, username=api.username, password=api.password)) self.expectThat(ssh_mock.close, MockCalledOnceWith()) def test_run_cli_command_closes_when_exception_raised(self): api = make_mscm_api() exception_type = factory.make_exception_type() ssh_mock = self.patch(api, '_ssh') ssh_mock.exec_command = Mock(side_effect=exception_type) command = factory.make_name('command') self.assertRaises(exception_type, api._run_cli_command, command) self.expectThat(ssh_mock.close, MockCalledOnceWith()) @given(sampled_from([EOFError, SSHException, SOCKETError])) def test_run_cli_command_crashes_for_ssh_error(self, error): api = make_mscm_api() ssh_mock = self.patch(api, '_ssh') ssh_mock.exec_command = Mock(side_effect=error) command = factory.make_name('command') self.assertRaises(MSCMError, api._run_cli_command, command) self.expectThat(ssh_mock.close, MockCalledOnceWith()) def test_discover_nodes(self): api = make_mscm_api() ssh_mock = self.patch(api, '_ssh') expected = make_show_node_list() stdout = StringIO(expected) streams = factory.make_streams(stdout=stdout) ssh_mock.exec_command = Mock(return_value=streams) output = api.discover_nodes() command = "show node list" self.expectThat(expected, Equals(output)) self.expectThat(ssh_mock.exec_command, MockCalledOnceWith(command)) def test_get_node_macaddr(self): api = make_mscm_api() expected = make_show_node_macaddr() cli_mock = self.patch(api, '_run_cli_command') cli_mock.return_value = expected node_id = make_node_id() output = api.get_node_macaddr(node_id) command = "show node macaddr %s" % node_id self.expectThat(re.findall(r':'.join(['[0-9a-f]{2}'] * 6), expected), Equals(output)) self.expectThat(cli_mock, MockCalledOnceWith(command)) def test_get_node_arch_returns_cartridge_mapping(self): api = make_mscm_api() expected = '\r\n Product Name: ProLiant Moonshot Cartridge\r\n' cli_mock = self.patch(api, '_run_cli_command') cli_mock.return_value = expected node_id = make_node_id() output = api.get_node_arch(node_id) command = "show node info %s" % node_id key = expected.split('Product Name: ')[1].splitlines()[0] self.expectThat(cartridge_mapping[key], Equals(output)) self.expectThat(cli_mock, MockCalledOnceWith(command)) def test_get_node_arch_returns_default_cartridge_mapping(self): api = make_mscm_api() expected = '\r\n Product Name: Testing\r\n' cli_mock = self.patch(api, '_run_cli_command') cli_mock.return_value = expected node_id = make_node_id() output = api.get_node_arch(node_id) command = "show node info %s" % node_id key = 'Default' self.expectThat(cartridge_mapping[key], Equals(output)) self.expectThat(cli_mock, MockCalledOnceWith(command)) def test_get_node_power_state(self): api = make_mscm_api() expected = '\r\n Node #1\r\n Power State: On\r\n' cli_mock = self.patch(api, '_run_cli_command') cli_mock.return_value = expected node_id = make_node_id() output = api.get_node_power_state(node_id) command = "show node power %s" % node_id self.expectThat(expected.split('Power State: ')[1].splitlines()[0], Equals(output)) self.expectThat(cli_mock, MockCalledOnceWith(command)) def test_power_and_configure_node_returns_expected_outout(self): api = make_mscm_api() ssh_mock = self.patch(api, '_ssh') expected = factory.make_name('output') stdout = StringIO(expected) streams = factory.make_streams(stdout=stdout) ssh_mock.exec_command = Mock(return_value=streams) output = getattr(api, self.method)(make_node_id()) self.assertThat(expected, Equals(output)) class TestMSCMProbeAndEnlist(MAASTestCase): """Tests for `probe_and_enlist_mscm`.""" run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) @inlineCallbacks def test_probe_and_enlist(self): user = factory.make_name('user') host = factory.make_hostname('mscm') username = factory.make_name('user') password = factory.make_name('password') system_id = factory.make_name('system_id') node_id = make_node_id() macs = make_show_node_macaddr(4) arch = 'arm64/xgene-uboot' discover_nodes_mock = self.patch(MSCM, 'discover_nodes') discover_nodes_mock.return_value = [node_id] boot_m2_mock = self.patch(MSCM, 'configure_node_boot_m2') node_arch_mock = self.patch(MSCM, 'get_node_arch') node_arch_mock.return_value = arch node_macs_mock = self.patch(MSCM, 'get_node_macaddr') node_macs_mock.return_value = macs create_node_mock = self.patch(mscm, 'create_node') create_node_mock.side_effect = asynchronous(lambda *args: system_id) commission_node_mock = self.patch(mscm, 'commission_node') params = { 'power_address': host, 'power_user': username, 'power_pass': password, 'node_id': node_id, } yield deferToThread( probe_and_enlist_mscm, user, host, username, password, accept_all=True) self.expectThat(discover_nodes_mock, MockAnyCall()) self.expectThat(boot_m2_mock, MockCalledWith(node_id)) self.expectThat(node_arch_mock, MockCalledOnceWith(node_id)) self.expectThat(node_macs_mock, MockCalledOnceWith(node_id)) self.expectThat( create_node_mock, MockCalledOnceWith(macs, arch, 'mscm', params)) self.expectThat( commission_node_mock, MockCalledOnceWith(system_id, user)) @inlineCallbacks def test_probe_and_enlist_discover_nodes_failure(self): user = factory.make_name('user') host = factory.make_hostname('mscm') username = factory.make_name('user') password = factory.make_name('password') discover_nodes_mock = self.patch(MSCM, 'discover_nodes') discover_nodes_mock.side_effect = MSCMError('error') with ExpectedException(MSCMError): yield deferToThread( probe_and_enlist_mscm, user, host, username, password) class TestMSCMPowerControl(MAASTestCase): """Tests for `power_control_mscm`.""" def test_power_control_error_on_unknown_power_change(self): host = factory.make_hostname('mscm') username = factory.make_name('user') password = factory.make_name('password') node_id = make_node_id() power_change = factory.make_name('error') self.assertRaises( MSCMError, power_control_mscm, host, username, password, node_id, power_change) def test_power_control_power_change_on_power_state_on(self): # power_change and current power_state are both 'on' host = factory.make_hostname('mscm') username = factory.make_name('user') password = factory.make_name('password') node_id = make_node_id() power_state_mock = self.patch(MSCM, 'get_node_power_state') power_state_mock.return_value = MSCMState.ON power_node_off_mock = self.patch(MSCM, 'power_node_off') bootonce_mock = self.patch(MSCM, 'configure_node_bootonce_pxe') power_node_on_mock = self.patch(MSCM, 'power_node_on') power_control_mscm(host, username, password, node_id, power_change='on') self.expectThat(power_state_mock, MockCalledOnceWith(node_id)) self.expectThat(power_node_off_mock, MockCalledOnceWith(node_id)) self.expectThat(bootonce_mock, MockCalledOnceWith(node_id)) self.expectThat(power_node_on_mock, MockCalledOnceWith(node_id)) def test_power_control_power_change_on_power_state_off(self): # power_change is 'on' and current power_state is 'off' host = factory.make_hostname('mscm') username = factory.make_name('user') password = factory.make_name('password') node_id = make_node_id() power_state_mock = self.patch(MSCM, 'get_node_power_state') power_state_mock.return_value = MSCMState.OFF bootonce_mock = self.patch(MSCM, 'configure_node_bootonce_pxe') power_node_on_mock = self.patch(MSCM, 'power_node_on') power_control_mscm(host, username, password, node_id, power_change='on') self.expectThat(power_state_mock, MockCalledOnceWith(node_id)) self.expectThat(bootonce_mock, MockCalledOnceWith(node_id)) self.expectThat(power_node_on_mock, MockCalledOnceWith(node_id)) def test_power_control_power_change_off_power_state_on(self): # power_change is 'off' and current power_state is 'on' host = factory.make_hostname('mscm') username = factory.make_name('user') password = factory.make_name('password') node_id = make_node_id() power_node_off_mock = self.patch(MSCM, 'power_node_off') power_control_mscm(host, username, password, node_id, power_change='off') self.assertThat(power_node_off_mock, MockCalledOnceWith(node_id)) class TestMSCMPowerState(MAASTestCase): """Tests for `power_state_mscm`.""" def test_power_state_failed_to_get_state(self): host = factory.make_hostname('mscm') username = factory.make_name('user') password = factory.make_name('password') node_id = make_node_id() power_state_mock = self.patch(MSCM, 'get_node_power_state') power_state_mock.side_effect = MSCMError('error') self.assertRaises( MSCMError, power_state_mscm, host, username, password, node_id) self.expectThat(power_state_mock, MockCalledOnceWith(node_id)) def test_power_state_get_off(self): host = factory.make_hostname('mscm') username = factory.make_name('user') password = factory.make_name('password') node_id = make_node_id() power_state_mock = self.patch(MSCM, 'get_node_power_state') power_state_mock.return_value = MSCMState.OFF self.expectThat( power_state_mscm(host, username, password, node_id), Equals('off')) self.expectThat(power_state_mock, MockCalledOnceWith(node_id)) def test_power_state_get_on(self): host = factory.make_hostname('mscm') username = factory.make_name('user') password = factory.make_name('password') node_id = make_node_id() power_state_mock = self.patch(MSCM, 'get_node_power_state') power_state_mock.return_value = MSCMState.ON self.expectThat( power_state_mscm(host, username, password, node_id), Equals('on')) self.expectThat(power_state_mock, MockCalledOnceWith(node_id)) def test_power_state_error_on_unknown_state(self): host = factory.make_hostname('mscm') username = factory.make_name('user') password = factory.make_name('password') node_id = make_node_id() power_state_mock = self.patch(MSCM, 'get_node_power_state') power_state_mock.return_value = factory.make_name('error') self.assertRaises( MSCMError, power_state_mscm, host, username, password, node_id) self.expectThat(power_state_mock, MockCalledOnceWith(node_id)) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/hardware/tests/test_msftocs.py0000644000000000000000000004244013056115004027127 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for ``provisioningserver.drivers.hardware.msftocs``.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from random import randint from StringIO import StringIO from textwrap import dedent import urllib2 as urllib2 import urlparse from maastesting.factory import factory from maastesting.matchers import ( MockAnyCall, MockCalledOnceWith, ) from maastesting.testcase import ( MAASTestCase, MAASTwistedRunTest, ) from mock import Mock from provisioningserver.drivers.hardware import msftocs from provisioningserver.drivers.hardware.msftocs import ( MicrosoftOCSAPI, MicrosoftOCSError, MicrosoftOCSState, power_control_msftocs, power_state_msftocs, probe_and_enlist_msftocs, ) from provisioningserver.utils.twisted import asynchronous from testtools.matchers import Equals from testtools.testcase import ExpectedException from twisted.internet.defer import inlineCallbacks from twisted.internet.threads import deferToThread XMLNS = "http://schemas.datacontract.org/2004/07/Microsoft.GFS.WCS.Contracts" XMLNS_I = "http://www.w3.org/2001/XMLSchema-instance" def make_msftocs_api(): """Make a MicrosoftOCSAPI object with randomized parameters.""" ip = factory.make_ipv4_address() port = randint(2000, 4000) username = factory.make_name('user') password = factory.make_name('password') return MicrosoftOCSAPI(ip, port, username, password) def make_msftocs_params(): """Make and return the parameters used for power control/state.""" ip = factory.make_ipv4_address() port = randint(2000, 4000) username = factory.make_name('username') password = factory.make_name('password') bladeid = randint(1, 24) return ip, port, username, password, bladeid class Test_MicrosoftOCSAPI(MAASTestCase): """Tests for `MicrosoftOCSAPI`.""" def test_build_url_builds_url(self): api = make_msftocs_api() params = [factory.make_string() for _ in range(3)] command = factory.make_string() output = api.build_url(command, params) parsed = urlparse.urlparse(output) url = '%s:%d' % (api.ip, api.port) self.expectThat(url, Equals(parsed.netloc)) self.expectThat(command, Equals(parsed.path.split('/')[1])) self.expectThat(params, Equals(parsed.query.split('&'))) def test_extract_from_response_finds_element_content(self): api = make_msftocs_api() response = dedent(""" Test """ % (XMLNS, XMLNS_I)) element_tag = 'd' expected = 'Test' output = api.extract_from_response(response, element_tag) self.assertThat(output, Equals(expected)) def test_get_gets_response(self): api = make_msftocs_api() params = [factory.make_string() for _ in range(3)] command = factory.make_string() expected = dedent(""" """ % (XMLNS, XMLNS_I)) response = StringIO(expected) mock_urlopen = self.patch( urllib2, 'urlopen', Mock(return_value=response)) url = api.build_url(command, params) output = api.get(command, params) self.expectThat(output, Equals(expected)) self.expectThat( mock_urlopen, MockCalledOnceWith(url)) def test_get_blade_power_state_gets_power_state(self): api = make_msftocs_api() bladeid = randint(1, 24) params = ["bladeid=%s" % bladeid] response = dedent(""" Success 1 %s ON """ % (XMLNS, XMLNS_I, bladeid)) mock_response = self.patch( MicrosoftOCSAPI, 'get', Mock(return_value=response)) expected = 'ON' output = api.get_blade_power_state(bladeid) self.expectThat(output, Equals(expected)) self.expectThat( mock_response, MockCalledOnceWith('GetBladeState', params)) def test_set_power_off_blade_powers_off_blade(self): api = make_msftocs_api() bladeid = randint(1, 24) params = ["bladeid=%s" % bladeid] response = dedent(""" Success 1 %s """ % (XMLNS, XMLNS_I, bladeid)) mock_response = self.patch( MicrosoftOCSAPI, 'get', Mock(return_value=response)) expected = 'Success' output = api.set_power_off_blade(bladeid) self.expectThat(output, Equals(expected)) self.expectThat( mock_response, MockCalledOnceWith('SetBladeOff', params)) def test_set_power_on_blade_powers_on_blade(self): api = make_msftocs_api() bladeid = randint(1, 24) params = ["bladeid=%s" % bladeid] response = dedent(""" Success 1 %s """ % (XMLNS, XMLNS_I, bladeid)) mock_response = self.patch( MicrosoftOCSAPI, 'get', Mock(return_value=response)) expected = 'Success' output = api.set_power_on_blade(bladeid) self.expectThat(output, Equals(expected)) self.expectThat( mock_response, MockCalledOnceWith('SetBladeOn', params)) def test_set_next_boot_device_sets_device(self): api = make_msftocs_api() bladeid = randint(1, 24) bootType = '2' boot_uefi = 'false' boot_persistent = 'false' params = [ "bladeid=%s" % bladeid, "bootType=%s" % bootType, "uefi=%s" % boot_uefi, "persistent=%s" % boot_persistent ] response = dedent(""" Success 1 Success %s ForcePxe """ % (XMLNS, XMLNS_I, bladeid)) mock_response = self.patch( MicrosoftOCSAPI, 'get', Mock(return_value=response)) expected = 'ForcePxe' output = api.set_next_boot_device(bladeid, pxe=True) self.expectThat(output, Equals(expected)) self.expectThat( mock_response, MockCalledOnceWith('SetNextBoot', params)) def test_get_blades_gets_blades(self): api = make_msftocs_api() response = dedent(""" Success 1 11 BLADE11 ON Success 1 1 F4:52:14:D6:70:98 Success 1 2 """ % (XMLNS, XMLNS_I)) mock_response = self.patch( MicrosoftOCSAPI, 'get', Mock(return_value=response)) expected = {'11': [u'F4:52:14:D6:70:98']} output = api.get_blades() self.expectThat(output, Equals(expected)) self.expectThat( mock_response, MockCalledOnceWith('GetChassisInfo')) class Test_MicrosoftOCSPowerState(MAASTestCase): """Tests for `power_state_msftocs`.""" def test_power_state_msftocs_failed_to_get_state_server_error(self): ip, port, username, password, bladeid = make_msftocs_params() power_state_mock = self.patch(MicrosoftOCSAPI, 'get_blade_power_state') power_state_mock.side_effect = urllib2.URLError('error') self.assertRaises( MicrosoftOCSError, power_state_msftocs, ip, port, username, password, bladeid) self.expectThat(power_state_mock, MockCalledOnceWith(bladeid)) def test_power_state_msftocs_failed_to_get_state_http_error(self): ip, port, username, password, bladeid = make_msftocs_params() power_state_mock = self.patch(MicrosoftOCSAPI, 'get_blade_power_state') power_state_mock.side_effect = urllib2.HTTPError( None, None, None, None, None) self.assertRaises( MicrosoftOCSError, power_state_msftocs, ip, port, username, password, bladeid) self.expectThat(power_state_mock, MockCalledOnceWith(bladeid)) def test_power_state_msftocs_gets_off_state(self): ip, port, username, password, bladeid = make_msftocs_params() power_state_mock = self.patch(MicrosoftOCSAPI, 'get_blade_power_state') power_state_mock.return_value = MicrosoftOCSState.OFF self.expectThat( power_state_msftocs(ip, port, username, password, bladeid), Equals('off')) self.expectThat(power_state_mock, MockCalledOnceWith(bladeid)) def test_power_state_msftocs_gets_on_state(self): ip, port, username, password, bladeid = make_msftocs_params() power_state_mock = self.patch(MicrosoftOCSAPI, 'get_blade_power_state') power_state_mock.return_value = MicrosoftOCSState.ON self.expectThat( power_state_msftocs(ip, port, username, password, bladeid), Equals('on')) self.expectThat(power_state_mock, MockCalledOnceWith(bladeid)) def test_power_state_msftocs_errors_on_unknown_state(self): ip, port, username, password, bladeid = make_msftocs_params() power_state_mock = self.patch(MicrosoftOCSAPI, 'get_blade_power_state') power_state_mock.return_value = factory.make_name('error') self.assertRaises( MicrosoftOCSError, power_state_msftocs, ip, port, username, password, bladeid) self.expectThat(power_state_mock, MockCalledOnceWith(bladeid)) class Test_MicrosoftOCSPowerControl(MAASTestCase): """Tests for `power_control_msftocs`.""" def test_power_control_msftocs_errors_on_unknown_power_change(self): ip, port, username, password, bladeid = make_msftocs_params() power_change = factory.make_name('error') self.assertRaises( MicrosoftOCSError, power_control_msftocs, ip, port, username, password, bladeid, power_change) def test_power_control_msftocs_power_change_on_power_state_on(self): # power_change and current power_state are both 'on' ip, port, username, password, bladeid = make_msftocs_params() power_state_mock = self.patch(MicrosoftOCSAPI, 'get_blade_power_state') power_state_mock.return_value = MicrosoftOCSState.ON power_node_off_mock = self.patch( MicrosoftOCSAPI, 'set_power_off_blade') next_boot_mock = self.patch(MicrosoftOCSAPI, 'set_next_boot_device') power_node_on_mock = self.patch(MicrosoftOCSAPI, 'set_power_on_blade') power_control_msftocs( ip, port, username, password, bladeid, power_change='on') self.expectThat(power_state_mock, MockCalledOnceWith(bladeid)) self.expectThat(power_node_off_mock, MockCalledOnceWith(bladeid)) self.expectThat(next_boot_mock.call_count, Equals(2)) self.expectThat(power_node_on_mock, MockCalledOnceWith(bladeid)) def test_power_control_msftocs_power_change_on_power_state_off(self): # power_change is 'on' and current power_state is 'off' ip, port, username, password, bladeid = make_msftocs_params() power_state_mock = self.patch(MicrosoftOCSAPI, 'get_blade_power_state') power_state_mock.return_value = MicrosoftOCSState.OFF next_boot_mock = self.patch(MicrosoftOCSAPI, 'set_next_boot_device') power_node_on_mock = self.patch(MicrosoftOCSAPI, 'set_power_on_blade') power_control_msftocs( ip, port, username, password, bladeid, power_change='on') self.expectThat(power_state_mock, MockCalledOnceWith(bladeid)) self.expectThat(next_boot_mock.call_count, Equals(2)) self.expectThat(power_node_on_mock, MockCalledOnceWith(bladeid)) def test_power_control_msftocs_power_change_off_power_state_on(self): # power_change is 'off' and current power_state is 'on' ip, port, username, password, bladeid = make_msftocs_params() power_node_off_mock = self.patch( MicrosoftOCSAPI, 'set_power_off_blade') power_control_msftocs( ip, port, username, password, bladeid, power_change='off') self.assertThat(power_node_off_mock, MockCalledOnceWith(bladeid)) class TestMicrosoftOCSProbeAndEnlist(MAASTestCase): """Tests for `probe_and_enlist_msftocs`.""" run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) @inlineCallbacks def test_probe_and_enlist_msftocs_probes_and_enlists(self): user = factory.make_name('user') ip = factory.make_ipv4_address() port = randint(2000, 4000) username = factory.make_name('username') password = factory.make_name('password') system_id = factory.make_name('system_id') blade_id = randint(1, 24) macs = [u'F4:52:14:D6:70:98', u'F4:52:14:D6:70:99'] blades_mock = self.patch(MicrosoftOCSAPI, 'get_blades') blades_mock.return_value = {'%s' % blade_id: macs} next_boot_device_mock = self.patch( MicrosoftOCSAPI, 'set_next_boot_device') create_node_mock = self.patch(msftocs, 'create_node') create_node_mock.side_effect = asynchronous(lambda *args: system_id) commission_node_mock = self.patch(msftocs, 'commission_node') params = { 'power_address': ip, 'power_port': port, 'power_user': username, 'power_pass': password, 'blade_id': blade_id, } yield deferToThread( probe_and_enlist_msftocs, user, ip, port, username, password, accept_all=True) self.expectThat(blades_mock, MockAnyCall()) self.expectThat(next_boot_device_mock.call_count, Equals(2)) self.expectThat( create_node_mock, MockCalledOnceWith(macs, 'amd64', 'msftocs', params)) self.expectThat( commission_node_mock, MockCalledOnceWith(system_id, user)) @inlineCallbacks def test_probe_and_enlist_msftocs_get_blades_failure_server_error(self): user = factory.make_name('user') ip = factory.make_ipv4_address() port = randint(2000, 4000) username = factory.make_name('username') password = factory.make_name('password') blades_mock = self.patch(MicrosoftOCSAPI, 'get_blades') blades_mock.side_effect = urllib2.URLError('error') with ExpectedException(MicrosoftOCSError): yield deferToThread( probe_and_enlist_msftocs, user, ip, port, username, password) self.expectThat(blades_mock, MockCalledOnceWith()) @inlineCallbacks def test_probe_and_enlist_msftocs_get_blades_failure_http_error(self): user = factory.make_name('user') ip = factory.make_ipv4_address() port = randint(2000, 4000) username = factory.make_name('username') password = factory.make_name('password') blades_mock = self.patch(MicrosoftOCSAPI, 'get_blades') blades_mock.side_effect = urllib2.HTTPError( None, None, None, None, None) with ExpectedException(MicrosoftOCSError): yield deferToThread( probe_and_enlist_msftocs, user, ip, port, username, password) self.expectThat(blades_mock, MockCalledOnceWith()) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/hardware/tests/test_seamicro.py0000644000000000000000000004477013056115004027263 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.drivers.hardware.seamicro`. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import json import urlparse from maastesting.factory import factory from maastesting.matchers import ( MockCalledOnceWith, MockCalledWith, MockCallsMatch, ) from maastesting.testcase import ( MAASTestCase, MAASTwistedRunTest, ) from mock import ( call, Mock, ) from provisioningserver.drivers.hardware import seamicro from provisioningserver.drivers.hardware.seamicro import ( find_seamicro15k_servers, power_control_seamicro15k_v09, power_control_seamicro15k_v2, power_query_seamicro15k_v2, POWER_STATUS, probe_seamicro15k_and_enlist, SeaMicroAPIV09, SeaMicroAPIV09Error, SeaMicroError, select_seamicro15k_api_version, ) from provisioningserver.utils.twisted import asynchronous from twisted.internet.defer import inlineCallbacks from twisted.internet.threads import deferToThread class FakeResponse: def __init__(self, response_code, response, is_json=False): self.response_code = response_code self.response = response if is_json: self.response = json.dumps(response) def getcode(self): return self.response_code def read(self): return self.response class FakeServer(object): def __init__(self, id): self.id = id self.nic = {} def add_fake_nic(self, id): self.nic[id] = {'macAddr': factory.make_mac_address()} def get_fake_macs(self): return [nic['macAddr'] for nic in self.nic.values()] class FakeSeaMicroServerManager(object): def __init__(self): self.servers = [] def get(self, server_id): for server in self.servers: if server_id == server.id: return server return None def list(self): return self.servers class FakeSeaMicroClient(object): pass class TestSeaMicroAPIV09(MAASTestCase): """Tests for SeaMicroAPIV09.""" def test_build_url(self): url = factory.make_string() api = SeaMicroAPIV09('http://%s/' % url) location = factory.make_string() params = [factory.make_string() for _ in range(3)] output = api.build_url(location, params) parsed = urlparse.urlparse(output) self.assertEqual(url, parsed.netloc) self.assertEqual(location, parsed.path.split('/')[1]) self.assertEqual(params, parsed.query.split('&')) def test_invalid_reponse_code(self): url = 'http://%s/' % factory.make_string() api = SeaMicroAPIV09(url) response = FakeResponse(401, 'Unauthorized') self.assertRaises( SeaMicroAPIV09Error, api.parse_response, url, response) def test_invalid_json_response(self): url = 'http://%s/' % factory.make_string() api = SeaMicroAPIV09(url) response = FakeResponse(200, factory.make_string()) self.assertRaises( SeaMicroAPIV09Error, api.parse_response, url, response) def test_json_error_response(self): url = 'http://%s/' % factory.make_string() api = SeaMicroAPIV09(url) data = { 'error': { 'code': 401 } } response = FakeResponse(200, data, is_json=True) self.assertRaises( SeaMicroAPIV09Error, api.parse_response, url, response) def test_json_valid_response(self): url = 'http://%s/' % factory.make_string() api = SeaMicroAPIV09(url) output = factory.make_string() data = { 'error': { 'code': 200 }, 'result': { 'data': output }, } response = FakeResponse(200, data, is_json=True) result = api.parse_response(url, response) self.assertEqual(output, result['result']['data']) def configure_get_result(self, result=None): self.patch( SeaMicroAPIV09, 'get', Mock(return_value=result)) def test_login_and_logout(self): token = factory.make_string() self.configure_get_result(token) url = 'http://%s/' % factory.make_string() api = SeaMicroAPIV09(url) api.login('username', 'password') self.assertEqual(token, api.token) api.logout() self.assertIsNone(api.token) def test_get_server_index(self): result = { 'serverId': { 0: '0/0', 1: '1/0', 2: '2/0', } } self.configure_get_result(result) url = 'http://%s/' % factory.make_string() api = SeaMicroAPIV09(url) self.assertEqual(0, api.server_index('0/0')) self.assertEqual(1, api.server_index('1/0')) self.assertEqual(2, api.server_index('2/0')) self.assertIsNone(api.server_index('3/0')) def configure_put_server_power(self, token=None): result = { 'serverId': { 0: '0/0', } } self.configure_get_result(result) mock = self.patch( SeaMicroAPIV09, 'put') url = 'http://%s/' % factory.make_string() api = SeaMicroAPIV09(url) api.token = token return mock, api def assert_put_power_called(self, mock, idx, new_status, *params): location = 'servers/%d' % idx params = ['action=%s' % new_status] + list(params) self.assertThat(mock, MockCalledOnceWith(location, params=params)) def test_put_server_power_on_using_pxe(self): token = factory.make_string() mock, api = self.configure_put_server_power(token) api.power_on('0/0', do_pxe=True) self.assert_put_power_called( mock, 0, POWER_STATUS.ON, 'using-pxe=true', token) def test_put_server_power_on_not_using_pxe(self): token = factory.make_string() mock, api = self.configure_put_server_power(token) api.power_on('0/0', do_pxe=False) self.assert_put_power_called( mock, 0, POWER_STATUS.ON, 'using-pxe=false', token) def test_put_server_power_reset_using_pxe(self): token = factory.make_string() mock, api = self.configure_put_server_power(token) api.reset('0/0', do_pxe=True) self.assert_put_power_called( mock, 0, POWER_STATUS.RESET, 'using-pxe=true', token) def test_put_server_power_reset_not_using_pxe(self): token = factory.make_string() mock, api = self.configure_put_server_power(token) api.reset('0/0', do_pxe=False) self.assert_put_power_called( mock, 0, POWER_STATUS.RESET, 'using-pxe=false', token) def test_put_server_power_off(self): token = factory.make_string() mock, api = self.configure_put_server_power(token) api.power_off('0/0', force=False) self.assert_put_power_called( mock, 0, POWER_STATUS.OFF, 'force=false', token) def test_put_server_power_off_force(self): token = factory.make_string() mock, api = self.configure_put_server_power(token) api.power_off('0/0', force=True) self.assert_put_power_called( mock, 0, POWER_STATUS.OFF, 'force=true', token) class TestSeaMicro(MAASTestCase): """Tests for SeaMicro custom hardware.""" run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def test_select_seamicro15k_api_version_ipmi(self): versions = select_seamicro15k_api_version('ipmi') self.assertEqual(['v2.0', 'v0.9'], versions) def test_select_seamicro15k_api_version_restapi(self): versions = select_seamicro15k_api_version('restapi') self.assertEqual(['v0.9'], versions) def test_select_seamicro15k_api_version_restapi2(self): versions = select_seamicro15k_api_version('restapi2') self.assertEqual(['v2.0'], versions) def configure_get_seamicro15k_api(self, return_value=None): ip = factory.make_ipv4_address() username = factory.make_string() password = factory.make_string() mock = self.patch( seamicro, 'get_seamicro15k_api') mock.return_value = return_value return mock, ip, username, password def test_find_seamicro15k_servers_impi(self): mock, ip, username, password = self.configure_get_seamicro15k_api() self.assertRaises( SeaMicroError, find_seamicro15k_servers, ip, username, password, 'ipmi') self.assertThat( mock, MockCallsMatch( call('v2.0', ip, username, password), call('v0.9', ip, username, password))) def test_find_seamicro15k_servers_restapi(self): mock, ip, username, password = self.configure_get_seamicro15k_api() self.assertRaises( SeaMicroError, find_seamicro15k_servers, ip, username, password, 'restapi') self.assertThat( mock, MockCalledOnceWith('v0.9', ip, username, password)) def test_find_seamicro15k_servers_restapi2(self): mock, ip, username, password = self.configure_get_seamicro15k_api() self.assertRaises( SeaMicroError, find_seamicro15k_servers, ip, username, password, 'restapi2') self.assertThat( mock, MockCalledOnceWith('v2.0', ip, username, password)) def configure_api_v09_login(self, token=None): token = token or factory.make_string() mock = self.patch( SeaMicroAPIV09, 'login') mock.return_value = token return mock @inlineCallbacks def test_probe_seamicro15k_and_enlist_v09(self): self.configure_api_v09_login() user = factory.make_name('user') ip = factory.make_ipv4_address() username = factory.make_name('username') password = factory.make_name('password') system_id = factory.make_name('system_id') result = { 0: { 'serverId': '0/0', 'serverNIC': '0', 'serverMacAddr': factory.make_mac_address(), }, 1: { 'serverId': '1/0', 'serverNIC': '0', 'serverMacAddr': factory.make_mac_address(), }, 2: { 'serverId': '2/0', 'serverNIC': '0', 'serverMacAddr': factory.make_mac_address(), }, 3: { 'serverId': '3/1', 'serverNIC': '1', 'serverMacAddr': factory.make_mac_address(), }, } self.patch( SeaMicroAPIV09, 'get', Mock(return_value=result)) mock_create_node = self.patch(seamicro, 'create_node') mock_create_node.side_effect = asynchronous(lambda *args: system_id) mock_commission_node = self.patch(seamicro, 'commission_node') yield deferToThread( probe_seamicro15k_and_enlist, user, ip, username, password, power_control='restapi', accept_all=True) self.assertEqual(3, mock_create_node.call_count) last = result[2] power_params = { 'power_control': 'restapi', 'system_id': last['serverId'].split('/')[0], 'power_address': ip, 'power_pass': password, 'power_user': username } self.expectThat( mock_create_node, MockCalledWith( last['serverMacAddr'], 'amd64', 'sm15k', power_params)) self.expectThat( mock_commission_node, MockCalledWith(system_id, user)) def test_power_control_seamicro15k_v09(self): self.configure_api_v09_login() ip = factory.make_ipv4_address() username = factory.make_string() password = factory.make_string() mock = self.patch( SeaMicroAPIV09, 'power_server') power_control_seamicro15k_v09(ip, username, password, '25', 'on') self.assertThat( mock, MockCalledOnceWith('25/0', POWER_STATUS.ON, do_pxe=True)) def test_power_control_seamicro15k_v09_retry_failure(self): self.configure_api_v09_login() ip = factory.make_ipv4_address() username = factory.make_string() password = factory.make_string() mock = self.patch( SeaMicroAPIV09, 'power_server') mock.side_effect = SeaMicroAPIV09Error("mock error", response_code=401) power_control_seamicro15k_v09( ip, username, password, '25', 'on', retry_count=5, retry_wait=0) self.assertEqual(5, mock.call_count) def test_power_control_seamicro15k_v09_exception_failure(self): self.configure_api_v09_login() ip = factory.make_ipv4_address() username = factory.make_string() password = factory.make_string() mock = self.patch( SeaMicroAPIV09, 'power_server') mock.side_effect = SeaMicroAPIV09Error("mock error") self.assertRaises( SeaMicroAPIV09Error, power_control_seamicro15k_v09, ip, username, password, '25', 'on') @inlineCallbacks def test_probe_seamicro15k_and_enlist_v2(self): user = factory.make_name('user') ip = factory.make_ipv4_address() username = factory.make_name('username') password = factory.make_name('password') system_id = factory.make_name('system_id') fake_server_0 = FakeServer('0/0') fake_server_0.add_fake_nic('0') fake_server_0.add_fake_nic('1') fake_server_1 = FakeServer('1/0') fake_server_1.add_fake_nic('0') fake_server_1.add_fake_nic('1') fake_client = FakeSeaMicroClient() fake_client.servers = FakeSeaMicroServerManager() fake_client.servers.servers.append(fake_server_0) fake_client.servers.servers.append(fake_server_1) mock_get_api = self.patch( seamicro, 'get_seamicro15k_api') mock_get_api.return_value = fake_client mock_create_node = self.patch(seamicro, 'create_node') mock_create_node.side_effect = asynchronous(lambda *args: system_id) mock_commission_node = self.patch(seamicro, 'commission_node') yield deferToThread( probe_seamicro15k_and_enlist, user, ip, username, password, power_control='restapi2', accept_all=True) self.assertEqual(2, mock_create_node.call_count) self.expectThat( mock_create_node, MockCallsMatch( call( fake_server_0.get_fake_macs(), 'amd64', 'sm15k', { 'power_control': 'restapi2', 'system_id': '0', 'power_address': ip, 'power_pass': password, 'power_user': username }), call( fake_server_1.get_fake_macs(), 'amd64', 'sm15k', { 'power_control': 'restapi2', 'system_id': '1', 'power_address': ip, 'power_pass': password, 'power_user': username }))) self.expectThat( mock_commission_node, MockCalledWith(system_id, user)) def test_power_control_seamicro15k_v2(self): ip = factory.make_ipv4_address() username = factory.make_string() password = factory.make_string() fake_server = FakeServer('0/0') fake_client = FakeSeaMicroClient() fake_client.servers = FakeSeaMicroServerManager() fake_client.servers.servers.append(fake_server) mock_power_on = self.patch(fake_server, 'power_on') mock_get_api = self.patch( seamicro, 'get_seamicro15k_api') mock_get_api.return_value = fake_client power_control_seamicro15k_v2(ip, username, password, '0', 'on') self.assertThat(mock_power_on, MockCalledOnceWith(using_pxe=True)) def test_power_control_seamicro15k_v2_raises_error_when_api_None(self): ip = factory.make_ipv4_address() username = factory.make_string() password = factory.make_string() mock_get_api = self.patch( seamicro, 'get_seamicro15k_api') mock_get_api.return_value = None self.assertRaises( SeaMicroError, power_control_seamicro15k_v2, ip, username, password, '0', 'on') def test_power_query_seamicro15k_v2_power_on(self): ip = factory.make_ipv4_address() username = factory.make_string() password = factory.make_string() fake_server = FakeServer('0/0') self.patch(fake_server, 'active', True) fake_client = FakeSeaMicroClient() fake_client.servers = FakeSeaMicroServerManager() fake_client.servers.servers.append(fake_server) mock_get_api = self.patch( seamicro, 'get_seamicro15k_api') mock_get_api.return_value = fake_client self.assertEqual( "on", power_query_seamicro15k_v2(ip, username, password, '0')) def test_power_query_seamicro15k_v2_power_off(self): ip = factory.make_ipv4_address() username = factory.make_string() password = factory.make_string() fake_server = FakeServer('0/0') self.patch(fake_server, 'active', False) fake_client = FakeSeaMicroClient() fake_client.servers = FakeSeaMicroServerManager() fake_client.servers.servers.append(fake_server) mock_get_api = self.patch( seamicro, 'get_seamicro15k_api') mock_get_api.return_value = fake_client self.assertEqual( "off", power_query_seamicro15k_v2(ip, username, password, '0')) def test_power_query_seamicro15k_v2_raises_error_when_api_None(self): ip = factory.make_ipv4_address() username = factory.make_string() password = factory.make_string() mock_get_api = self.patch( seamicro, 'get_seamicro15k_api') mock_get_api.return_value = None self.assertRaises( SeaMicroError, power_query_seamicro15k_v2, ip, username, password, '0') maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/hardware/tests/test_ucsm.py0000644000000000000000000006611013056115004026420 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for ``provisioningserver.drivers.hardware.ucsm``.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from itertools import permutations import random from StringIO import StringIO import urllib2 from lxml.etree import ( Element, SubElement, XML, ) from maastesting.factory import factory from maastesting.matchers import ( MockCalledOnceWith, MockCallsMatch, MockNotCalled, ) from maastesting.testcase import ( MAASTestCase, MAASTwistedRunTest, ) from mock import ( ANY, call, Mock, sentinel, ) from provisioningserver.drivers.hardware import ucsm from provisioningserver.drivers.hardware.ucsm import ( get_children, get_first_booter, get_macs, get_power_command, get_server_power_control, get_servers, get_service_profile, logged_in, make_policy_change, make_request_data, parse_response, power_control_ucsm, power_state_ucsm, probe_and_enlist_ucsm, probe_lan_boot_options, probe_servers, RO_KEYS, set_lan_boot_default, set_server_power_control, strip_ro_keys, UCSM_XML_API, UCSM_XML_API_Error, ) from provisioningserver.utils.twisted import asynchronous from testtools.matchers import Equals from twisted.internet.defer import inlineCallbacks from twisted.internet.threads import deferToThread def make_api(url='http://url', user='u', password='p', cookie='foo', mock_call=True): api = UCSM_XML_API(url, user, password) api.cookie = cookie return api def make_api_patch_call(testcase, *args, **kwargs): api = make_api(*args, **kwargs) mock = testcase.patch(api, '_call') return api, mock def make_fake_result(root_class, child_tag, container='outConfigs'): fake_result = Element(root_class) outConfigs = SubElement(fake_result, container) outConfigs.append(Element(child_tag)) return outConfigs def make_class(): return factory.make_name('class') def make_dn(): return factory.make_name('dn') def make_server(): return factory.make_name('server') class TestUCSMXMLAPIError(MAASTestCase): """Tests for ``UCSM_XML_API_Error``.""" def test_includes_code_and_msg(self): def raise_error(): raise UCSM_XML_API_Error('bad', 4224) error = self.assertRaises(UCSM_XML_API_Error, raise_error) self.assertEqual('bad', error.args[0]) self.assertEqual(4224, error.code) class TestMakeRequestData(MAASTestCase): """Tests for ``make_request_data``.""" def test_no_children(self): fields = {'hello': 'there'} request_data = make_request_data('foo', fields) root = XML(request_data) self.assertEqual('foo', root.tag) self.assertEqual('there', root.get('hello')) def test_with_children(self): fields = {'hello': 'there'} children_tags = ['bar', 'baz'] children = [Element(child_tag) for child_tag in children_tags] request_data = make_request_data('foo', fields, children) root = XML(request_data) self.assertEqual('foo', root.tag) self.assertItemsEqual(children_tags, (e.tag for e in root)) def test_no_fields(self): request_data = make_request_data('foo') root = XML(request_data) self.assertEqual('foo', root.tag) class TestParseResonse(MAASTestCase): """Tests for ``parse_response``.""" def test_no_error(self): xml = '' response = parse_response(xml) self.assertEqual('foo', response.tag) def test_error(self): xml = '' self.assertRaises(UCSM_XML_API_Error, parse_response, xml) class TestLogin(MAASTestCase): """"Tests for ``UCSM_XML_API.login``.""" def test_login_assigns_cookie(self): cookie = 'chocolate chip' api, mock = make_api_patch_call(self) mock.return_value = Element('aaaLogin', {'outCookie': cookie}) api.login() self.assertEqual(cookie, api.cookie) def test_login_call_parameters(self): user = 'user' password = 'pass' api, mock = make_api_patch_call(self, user=user, password=password) api.login() fields = {'inName': user, 'inPassword': password} self.assertThat(mock, MockCalledOnceWith('aaaLogin', fields)) class TestLogout(MAASTestCase): """"Tests for ``UCSM_XML_API.logout``.""" def test_logout_clears_cookie(self): api = make_api() self.patch(api, '_call') api.logout() self.assertIsNone(api.cookie) def test_logout_uses_cookie(self): api, mock = make_api_patch_call(self) cookie = api.cookie api.logout() fields = {'inCookie': cookie} self.assertThat(mock, MockCalledOnceWith('aaaLogout', fields)) class TestConfigResolveClass(MAASTestCase): """"Tests for ``UCSM_XML_API.config_resolve_class``.""" def test_no_filters(self): class_id = make_class() api, mock = make_api_patch_call(self) api.config_resolve_class(class_id) fields = {'cookie': api.cookie, 'classId': class_id} self.assertThat(mock, MockCalledOnceWith('configResolveClass', fields, ANY)) def test_with_filters(self): class_id = make_class() filter_element = Element('hi') api, mock = make_api_patch_call(self) api.config_resolve_class(class_id, [filter_element]) in_filters = mock.call_args[0][2] self.assertEqual([filter_element], in_filters[0][:]) def test_return_response(self): api, mock = make_api_patch_call(self) mock.return_value = Element('test') result = api.config_resolve_class('c') self.assertEqual(mock.return_value, result) class TestConfigResolveChildren(MAASTestCase): """"Tests for ``UCSM_XML_API.config_resolve_children``.""" def test_parameters(self): dn = make_dn() class_id = make_class() api, mock = make_api_patch_call(self) api.config_resolve_children(dn, class_id) fields = {'inDn': dn, 'classId': class_id, 'cookie': api.cookie} self.assertThat(mock, MockCalledOnceWith('configResolveChildren', fields)) def test_no_class_id(self): dn = make_dn() api, mock = make_api_patch_call(self) api.config_resolve_children(dn) fields = {'inDn': dn, 'cookie': api.cookie} self.assertThat(mock, MockCalledOnceWith('configResolveChildren', fields)) def test_return_response(self): api, mock = make_api_patch_call(self) mock.return_value = Element('test') result = api.config_resolve_children('d', 'c') self.assertEqual(mock.return_value, result) class TestConfigConfMo(MAASTestCase): """"Tests for ``UCSM_XML_API.config_conf_mo``.""" def test_parameters(self): dn = make_dn() config_items = [Element('hi')] api, mock = make_api_patch_call(self) api.config_conf_mo(dn, config_items) fields = {'dn': dn, 'cookie': api.cookie} self.assertThat(mock, MockCalledOnceWith('configConfMo', fields, ANY)) in_configs = mock.call_args[0][2] self.assertEqual(config_items, in_configs[0][:]) class TestCall(MAASTestCase): """"Tests for ``UCSM_XML_API._call``.""" def test_call(self): name = 'method' fields = {1: 2} children = [3, 4] request = '' response = Element('good') api = make_api() mock_make_request_data = self.patch(ucsm, 'make_request_data') mock_make_request_data.return_value = request mock_send_request = self.patch(api, '_send_request') mock_send_request.return_value = response api._call(name, fields, children) self.assertThat(mock_make_request_data, MockCalledOnceWith(name, fields, children)) self.assertThat(mock_send_request, MockCalledOnceWith(request)) class TestSendRequest(MAASTestCase): """"Tests for ``UCSM_XML_API._send_request``.""" def test_send_request(self): request_data = 'foo' api = make_api() self.patch(api, '_call') stream = StringIO('') mock = self.patch(urllib2, 'urlopen') mock.return_value = stream response = api._send_request(request_data) self.assertEqual('hi', response.tag) urllib_request = mock.call_args[0][0] self.assertEqual(request_data, urllib_request.data) class TestConfigResolveDn(MAASTestCase): """Tests for ``UCSM_XML_API.config_resolve_dn``.""" def test_parameters(self): api, mock = make_api_patch_call(self) test_dn = make_dn() fields = {'cookie': api.cookie, 'dn': test_dn} api.config_resolve_dn(test_dn) self.assertThat(mock, MockCalledOnceWith('configResolveDn', fields)) class TestGetServers(MAASTestCase): """Tests for ``get_servers``.""" def test_uses_uuid(self): uuid = factory.make_UUID() api = make_api() mock = self.patch(api, 'config_resolve_class') get_servers(api, uuid) filters = mock.call_args[0][1] attrib = {'class': 'computeItem', 'property': 'uuid', 'value': uuid} self.assertEqual(attrib, filters[0].attrib) def test_returns_result(self): uuid = factory.make_UUID() api = make_api() fake_result = make_fake_result('configResolveClass', 'found') self.patch(api, 'config_resolve_class').return_value = fake_result result = get_servers(api, uuid) self.assertEqual('found', result[0].tag) def test_class_id(self): uuid = factory.make_UUID() api = make_api() mock = self.patch(api, 'config_resolve_class') get_servers(api, uuid) self.assertThat(mock, MockCalledOnceWith('computeItem', ANY)) class TestProbeLanBootOptions(MAASTestCase): """Tests for ``probe_lan_boot_options``.""" def test_returns_result(self): api = make_api() server = sentinel.server mock_service_profile = Mock() mock_get_service_profile = self.patch(ucsm, 'get_service_profile') mock_get_service_profile.return_value = mock_service_profile mock_service_profile.get.return_value = sentinel.profile_get fake_result = make_fake_result('tag', 'lsbootLan') mock_config_resolve_children = self.patch( api, 'config_resolve_children') mock_config_resolve_children.return_value = fake_result self.assertEqual(1, len(probe_lan_boot_options(api, server))) self.assertThat( mock_config_resolve_children, MockCalledOnceWith(sentinel.profile_get)) self.assertThat( mock_service_profile.get, MockCalledOnceWith('operBootPolicyName')) self.assertThat( mock_get_service_profile, MockCalledOnceWith(api, server)) class TestGetChildren(MAASTestCase): """Tests for ``get_children``.""" def test_returns_result(self): search_class = make_class() api = make_api() fake_result = make_fake_result('configResolveChildren', search_class) self.patch(api, 'config_resolve_children').return_value = fake_result in_element = Element('test', {'dn': make_dn()}) class_id = search_class result = get_children(api, in_element, class_id) self.assertEqual(search_class, result[0].tag) def test_parameters(self): search_class = make_class() parent_dn = make_dn() api = make_api() mock = self.patch(api, 'config_resolve_children') in_element = Element('test', {'dn': parent_dn}) class_id = search_class get_children(api, in_element, class_id) self.assertThat(mock, MockCalledOnceWith(parent_dn, search_class)) class TestGetMacs(MAASTestCase): """Tests for ``get_macs``.""" def test_gets_adaptors(self): adaptor = 'adaptor' server = make_server() mac = 'xx' api = make_api() mock = self.patch(ucsm, 'get_children') def fake_get_children(api, element, class_id): if class_id == 'adaptorUnit': return [adaptor] elif class_id == 'adaptorHostEthIf': return [Element('ethif', {'mac': mac})] mock.side_effect = fake_get_children macs = get_macs(api, server) self.assertThat(mock, MockCallsMatch( call(api, server, 'adaptorUnit'), call(api, adaptor, 'adaptorHostEthIf'))) self.assertEqual([mac], macs) class TestProbeServers(MAASTestCase): """Tests for ``probe_servers``.""" def test_uses_api(self): api = make_api() mock = self.patch(ucsm, 'get_servers') probe_servers(api) self.assertThat(mock, MockCalledOnceWith(api)) def test_returns_results(self): servers = [{'uuid': factory.make_UUID()}] mac = 'mac' api = make_api() self.patch(ucsm, 'get_servers').return_value = servers self.patch(ucsm, 'get_macs').return_value = [mac] self.patch(ucsm, 'probe_lan_boot_options').return_value = ['option'] server_list = probe_servers(api) self.assertEqual([(servers[0], [mac])], server_list) def test_no_results_with_no_server_macs(self): servers = [{'uuid': factory.make_UUID()}] api = make_api() self.patch(ucsm, 'get_servers').return_value = servers self.patch(ucsm, 'get_macs').return_value = [] self.patch(ucsm, 'probe_lan_boot_options').return_value = ['option'] server_list = probe_servers(api) self.assertEqual([], server_list) def test_no_results_with_no_boot_options(self): servers = [{'uuid': factory.make_UUID()}] mac = 'mac' api = make_api() self.patch(ucsm, 'get_servers').return_value = servers self.patch(ucsm, 'get_macs').return_value = mac self.patch(ucsm, 'probe_lan_boot_options').return_value = [] server_list = probe_servers(api) self.assertEqual([], server_list) class TestGetServerPowerControl(MAASTestCase): """Tests for ``get_server_power_control``.""" def test_get_server_power_control(self): api = make_api() mock = self.patch(api, 'config_resolve_children') fake_result = make_fake_result('configResolveChildren', 'lsPower') mock.return_value = fake_result dn = make_dn() server = Element('computeItem', {'assignedToDn': dn}) power_control = get_server_power_control(api, server) self.assertThat(mock, MockCalledOnceWith(dn, 'lsPower')) self.assertEqual('lsPower', power_control.tag) class TestSetServerPowerControl(MAASTestCase): """Tests for ``set_server_power_control``.""" def test_set_server_power_control(self): api = make_api() power_dn = make_dn() power_control = Element('lsPower', {'dn': power_dn}) config_conf_mo_mock = self.patch(api, 'config_conf_mo') state = 'state' set_server_power_control(api, power_control, state) self.assertThat(config_conf_mo_mock, MockCalledOnceWith(power_dn, ANY)) power_change = config_conf_mo_mock.call_args[0][1][0] self.assertEqual(power_change.tag, 'lsPower') self.assertEqual({'state': state, 'dn': power_dn}, power_change.attrib) class TestLoggedIn(MAASTestCase): """Tests for ``logged_in``.""" def test_logged_in(self): mock = self.patch(ucsm, 'UCSM_XML_API') url = 'url' username = 'username' password = 'password' mock.return_value = Mock() with logged_in(url, username, password) as api: self.assertEqual(mock.return_value, api) self.assertThat(api.login, MockCalledOnceWith()) self.assertThat(mock.return_value.logout, MockCalledOnceWith()) class TestValidGetPowerCommand(MAASTestCase): scenarios = [ ('Power On', dict( power_mode='on', current_state='down', command='admin-up')), ('Power On', dict( power_mode='on', current_state='up', command='cycle-immediate')), ('Power Off', dict( power_mode='off', current_state='up', command='admin-down')), ] def test_get_power_command(self): command = get_power_command(self.power_mode, self.current_state) self.assertEqual(self.command, command) class TestInvalidGetPowerCommand(MAASTestCase): def test_get_power_command_raises_assertion_error_on_bad_power_mode(self): bad_power_mode = factory.make_name('unlikely') error = self.assertRaises(UCSM_XML_API_Error, get_power_command, bad_power_mode, None) self.assertIn(bad_power_mode, error.args[0]) class TestPowerControlUCSM(MAASTestCase): """Tests for ``power_control_ucsm``.""" def test_power_control_ucsm(self): uuid = factory.make_UUID() api = Mock() self.patch(ucsm, 'UCSM_XML_API').return_value = api get_servers_mock = self.patch(ucsm, 'get_servers') server = make_server() state = 'admin-down' power_control = Element('lsPower', {'state': state}) get_servers_mock.return_value = [server] get_server_power_control_mock = self.patch(ucsm, 'get_server_power_control') get_server_power_control_mock.return_value = power_control set_server_power_control_mock = self.patch(ucsm, 'set_server_power_control') power_control_ucsm('url', 'username', 'password', uuid, 'off') self.assertThat(get_servers_mock, MockCalledOnceWith(api, uuid)) self.assertThat(set_server_power_control_mock, MockCalledOnceWith(api, power_control, state)) class TestUCSMPowerState(MAASTestCase): """Tests for `power_state_ucsm`.""" def test_power_state_get_off(self): url = factory.make_name('url') username = factory.make_name('username') password = factory.make_name('password') uuid = factory.make_UUID() api = Mock() self.patch(ucsm, 'UCSM_XML_API').return_value = api get_servers_mock = self.patch(ucsm, 'get_servers') server = make_server() current_state = 'down' power_control = Element('lsPower', {'state': current_state}) get_servers_mock.return_value = [server] get_server_power_control_mock = self.patch( ucsm, 'get_server_power_control') get_server_power_control_mock.return_value = power_control power_state = power_state_ucsm(url, username, password, uuid) self.expectThat(get_servers_mock, MockCalledOnceWith(api, uuid)) self.expectThat( get_server_power_control_mock, MockCalledOnceWith(api, server)) self.expectThat(power_state, Equals('off')) def test_power_state_get_on(self): url = factory.make_name('url') username = factory.make_name('username') password = factory.make_name('password') uuid = factory.make_UUID() api = Mock() self.patch(ucsm, 'UCSM_XML_API').return_value = api get_servers_mock = self.patch(ucsm, 'get_servers') server = make_server() current_state = 'up' power_control = Element('lsPower', {'state': current_state}) get_servers_mock.return_value = [server] get_server_power_control_mock = self.patch( ucsm, 'get_server_power_control') get_server_power_control_mock.return_value = power_control power_state = power_state_ucsm(url, username, password, uuid) self.expectThat(get_servers_mock, MockCalledOnceWith(api, uuid)) self.expectThat( get_server_power_control_mock, MockCalledOnceWith(api, server)) self.expectThat(power_state, Equals('on')) def test_power_state_error_on_unknown_state(self): url = factory.make_name('url') username = factory.make_name('username') password = factory.make_name('password') uuid = factory.make_UUID() api = Mock() self.patch(ucsm, 'UCSM_XML_API').return_value = api get_servers_mock = self.patch(ucsm, 'get_servers') server = make_server() current_state = factory.make_name('error') power_control = Element('lsPower', {'state': current_state}) get_servers_mock.return_value = [server] get_server_power_control_mock = self.patch( ucsm, 'get_server_power_control') get_server_power_control_mock.return_value = power_control self.assertRaises( UCSM_XML_API_Error, power_state_ucsm, url, username, password, uuid) class TestProbeAndEnlistUCSM(MAASTestCase): """Tests for ``probe_and_enlist_ucsm``.""" run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) @inlineCallbacks def test_probe_and_enlist(self): user = factory.make_name('user') url = factory.make_name('url') username = factory.make_name('username') password = factory.make_name('password') system_id = factory.make_name('system_id') api = Mock() self.patch(ucsm, 'UCSM_XML_API').return_value = api server_element = {'uuid': 'uuid'} server = (server_element, ['mac'],) probe_servers_mock = self.patch(ucsm, 'probe_servers') probe_servers_mock.return_value = [server] set_lan_boot_default_mock = self.patch(ucsm, 'set_lan_boot_default') create_node_mock = self.patch(ucsm, 'create_node') create_node_mock.side_effect = asynchronous(lambda *args: system_id) commission_node_mock = self.patch(ucsm, 'commission_node') yield deferToThread( probe_and_enlist_ucsm, user, url, username, password, accept_all=True) self.expectThat( set_lan_boot_default_mock, MockCalledOnceWith(api, server_element)) self.expectThat(probe_servers_mock, MockCalledOnceWith(api)) params = { 'power_address': url, 'power_user': username, 'power_pass': password, 'uuid': server[0]['uuid'] } self.expectThat( create_node_mock, MockCalledOnceWith(server[1], 'amd64', 'ucsm', params)) self.expectThat( commission_node_mock, MockCalledOnceWith(system_id, user)) class TestGetServiceProfile(MAASTestCase): """Tests for ``get_service_profile.``""" def test_get_service_profile(self): test_dn = make_dn() server = Element('computeBlade', {'assignedToDn': test_dn}) api = make_api() mock = self.patch(api, 'config_resolve_dn') mock.return_value = make_fake_result('configResolveDn', 'lsServer', 'outConfig') service_profile = get_service_profile(api, server) self.assertThat(mock, MockCalledOnceWith(test_dn)) self.assertEqual(mock.return_value[0], service_profile) def make_boot_order_scenarios(size): """Produce test scenarios for testing get_first_booter. Each scenario is one of the permutations of a set of ``size`` elements, where each element has an integer 'order' attribute that get_first_booter will use to determine which device boots first. """ minimum = random.randint(0, 500) ordinals = xrange(minimum, minimum + size) elements = [ Element('Entry%d' % i, {'order': '%d' % i}) for i in ordinals ] orders = permutations(elements) orders = [{'order': order} for order in orders] scenarios = [('%d' % i, order) for i, order in enumerate(orders)] return scenarios, minimum class TestGetFirstBooter(MAASTestCase): """Tests for ``get_first_booter.``""" scenarios, minimum = make_boot_order_scenarios(3) def test_first_booter(self): """Ensure the boot device is picked according to the order attribute, not the order of elements in the list of devices.""" root = Element('outConfigs') root.extend(self.order) picked = get_first_booter(root) self.assertEqual(self.minimum, int(picked.get('order'))) class TestsForStripRoKeys(MAASTestCase): """Tests for ``strip_ro_keys.``""" def test_strip_ro_keys(self): attributes = {key: 'DC' for key in RO_KEYS} elements = [ Element('Element%d' % i, attributes) for i in xrange(random.randint(0, 10)) ] strip_ro_keys(elements) for key in RO_KEYS: values = [element.get(key) for element in elements] for value in values: self.assertIsNone(value) class TestMakePolicyChange(MAASTestCase): """Tests for ``make_policy_change``.""" def test_lan_already_top_priority(self): boot_profile_response = make_fake_result('configResolveChildren', 'lsbootLan') mock = self.patch(ucsm, 'get_first_booter') mock.return_value = boot_profile_response[0] change = make_policy_change(boot_profile_response) self.assertIsNone(change) self.assertThat(mock, MockCalledOnceWith(boot_profile_response)) def test_change_lan_to_top_priority(self): boot_profile_response = Element('outConfigs') lan_boot = Element('lsbootLan', {'order': 'second'}) storage_boot = Element('lsbootStorage', {'order': 'first'}) boot_profile_response.extend([lan_boot, storage_boot]) self.patch(ucsm, 'get_first_booter').return_value = storage_boot self.patch(ucsm, 'strip_ro_keys') change = make_policy_change(boot_profile_response) lan_boot_order = change.xpath('//lsbootPolicy/lsbootLan/@order') storage_boot_order = \ change.xpath('//lsbootPolicy/lsbootStorage/@order') self.assertEqual(['first'], lan_boot_order) self.assertEqual(['second'], storage_boot_order) class TestSetLanBootDefault(MAASTestCase): """Tets for ``set_lan_boot_default.``""" def test_no_change(self): api = make_api() server = make_server() self.patch(ucsm, 'get_service_profile') self.patch(api, 'config_resolve_children') self.patch(ucsm, 'make_policy_change').return_value = None config_conf_mo = self.patch(api, 'config_conf_mo') set_lan_boot_default(api, server) self.assertThat(config_conf_mo, MockNotCalled()) def test_with_change(self): api = make_api() server = make_server() test_dn = make_dn() test_change = 'change' service_profile = Element('test', {'operBootPolicyName': test_dn}) self.patch(ucsm, 'get_service_profile').return_value = service_profile self.patch(api, 'config_resolve_children') self.patch(ucsm, 'make_policy_change').return_value = test_change config_conf_mo = self.patch(api, 'config_conf_mo') set_lan_boot_default(api, server) self.assertThat(config_conf_mo, MockCalledOnceWith(test_dn, [test_change])) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/hardware/tests/test_virsh.py0000644000000000000000000004512413056115004026606 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.drivers.hardware.virsh`. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random from textwrap import dedent from lxml import etree from maastesting.factory import factory from maastesting.matchers import ( MockCalledOnceWith, MockCalledWith, MockCallsMatch, ) from maastesting.testcase import ( MAASTestCase, MAASTwistedRunTest, ) from mock import call from provisioningserver.drivers.hardware import virsh from provisioningserver.utils.twisted import asynchronous from testtools.testcase import ExpectedException from twisted.internet.defer import inlineCallbacks from twisted.internet.threads import deferToThread SAMPLE_IFLIST = dedent(""" Interface Type Source Model MAC ------------------------------------------------------- - bridge br0 e1000 %s - bridge br1 e1000 %s """) SAMPLE_DUMPXML = dedent(""" test 4096576 4096576 1 hvm """) SAMPLE_DUMPXML_2 = dedent(""" test 4096576 4096576 1 hvm """) SAMPLE_DUMPXML_3 = dedent(""" test 4096576 4096576 1 hvm """) SAMPLE_DUMPXML_4 = dedent(""" test 4096576 4096576 1 hvm """) class TestVirshSSH(MAASTestCase): """Tests for `VirshSSH`.""" def configure_virshssh_pexpect(self, inputs=None, dom_prefix=None): """Configures the VirshSSH class to use 'cat' process for testing instead of the actual virsh.""" conn = virsh.VirshSSH(timeout=0.1, dom_prefix=dom_prefix) self.addCleanup(conn.close) self.patch(conn, '_execute') conn._spawn('cat') if inputs is not None: for line in inputs: conn.sendline(line) return conn def configure_virshssh(self, output, dom_prefix=None): self.patch(virsh.VirshSSH, 'run').return_value = output return virsh.VirshSSH(dom_prefix=dom_prefix) def test_login_prompt(self): virsh_outputs = [ 'virsh # ' ] conn = self.configure_virshssh_pexpect(virsh_outputs) self.assertTrue(conn.login(poweraddr=None)) def test_login_with_sshkey(self): virsh_outputs = [ "The authenticity of host '127.0.0.1' can't be established.", "ECDSA key fingerprint is " "00:11:22:33:44:55:66:77:88:99:aa:bb:cc:dd:ee:ff.", "Are you sure you want to continue connecting (yes/no)? ", ] conn = self.configure_virshssh_pexpect(virsh_outputs) mock_sendline = self.patch(conn, 'sendline') conn.login(poweraddr=None) self.assertThat(mock_sendline, MockCalledOnceWith('yes')) def test_login_with_password(self): virsh_outputs = [ "ubuntu@%s's password: " % factory.make_ipv4_address(), ] conn = self.configure_virshssh_pexpect(virsh_outputs) fake_password = factory.make_name('password') mock_sendline = self.patch(conn, 'sendline') conn.login(poweraddr=None, password=fake_password) self.assertThat(mock_sendline, MockCalledOnceWith(fake_password)) def test_login_missing_password(self): virsh_outputs = [ "ubuntu@%s's password: " % factory.make_ipv4_address(), ] conn = self.configure_virshssh_pexpect(virsh_outputs) mock_close = self.patch(conn, 'close') self.assertFalse(conn.login(poweraddr=None, password=None)) self.assertThat(mock_close, MockCalledOnceWith()) def test_login_invalid(self): virsh_outputs = [ factory.make_string(), ] conn = self.configure_virshssh_pexpect(virsh_outputs) mock_close = self.patch(conn, 'close') self.assertFalse(conn.login(poweraddr=None)) self.assertThat(mock_close, MockCalledOnceWith()) def test_logout(self): conn = self.configure_virshssh_pexpect() mock_sendline = self.patch(conn, 'sendline') mock_close = self.patch(conn, 'close') conn.logout() self.assertThat(mock_sendline, MockCalledOnceWith('quit')) self.assertThat(mock_close, MockCalledOnceWith()) def test_prompt(self): virsh_outputs = [ 'virsh # ' ] conn = self.configure_virshssh_pexpect(virsh_outputs) self.assertTrue(conn.prompt()) def test_invalid_prompt(self): virsh_outputs = [ factory.make_string() ] conn = self.configure_virshssh_pexpect(virsh_outputs) self.assertFalse(conn.prompt()) def test_run(self): cmd = ['list', '--all', '--name'] expected = ' '.join(cmd) names = [factory.make_name('machine') for _ in range(3)] conn = self.configure_virshssh_pexpect() conn.before = '\n'.join([expected] + names) mock_sendline = self.patch(conn, 'sendline') mock_prompt = self.patch(conn, 'prompt') output = conn.run(cmd) self.assertThat(mock_sendline, MockCalledOnceWith(expected)) self.assertThat(mock_prompt, MockCalledOnceWith()) self.assertEqual('\n'.join(names), output) def test_list(self): names = [factory.make_name('machine') for _ in range(3)] conn = self.configure_virshssh('\n'.join(names)) expected = conn.list() self.assertItemsEqual(names, expected) def test_list_dom_prefix(self): prefix = 'dom_prefix' names = [prefix + factory.make_name('machine') for _ in range(3)] conn = self.configure_virshssh('\n'.join(names), dom_prefix=prefix) expected = conn.list() self.assertItemsEqual(names, expected) def test_get_state(self): state = factory.make_name('state') conn = self.configure_virshssh(state) expected = conn.get_state('') self.assertEqual(state, expected) def test_get_state_error(self): conn = self.configure_virshssh('error:') expected = conn.get_state('') self.assertEqual(None, expected) def test_mac_addresses_returns_list(self): macs = [factory.make_mac_address() for _ in range(2)] output = SAMPLE_IFLIST % (macs[0], macs[1]) conn = self.configure_virshssh(output) expected = conn.get_mac_addresses('') self.assertEqual(macs, expected) def test_get_arch_returns_valid(self): arch = factory.make_name('arch') output = SAMPLE_DUMPXML % arch conn = self.configure_virshssh(output) expected = conn.get_arch('') self.assertEqual(arch, expected) def test_get_arch_returns_valid_fixed(self): arch = random.choice(virsh.ARCH_FIX.keys()) fixed_arch = virsh.ARCH_FIX[arch] output = SAMPLE_DUMPXML % arch conn = self.configure_virshssh(output) expected = conn.get_arch('') self.assertEqual(fixed_arch, expected) class TestVirsh(MAASTestCase): """Tests for `probe_virsh_and_enlist`.""" run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def _probe_and_enlist_mock_run(self, *args): args = args[0] # if the argument is "define", we want to ensure that the boot # order has been set up correctly. if args[0] == "define": xml_file = args[1] with open(xml_file) as f: xml = f.read() doc = etree.XML(xml) evaluator = etree.XPathEvaluator(doc) boot_elements = evaluator(virsh.XPATH_BOOT) self.assertEqual(2, len(boot_elements)) # make sure we set the network to come first, then the HD self.assertEqual('network', boot_elements[0].attrib['dev']) self.assertEqual('hd', boot_elements[1].attrib['dev']) return "" @inlineCallbacks def test_probe_and_enlist(self): # Patch VirshSSH list so that some machines are returned # with some fake architectures. user = factory.make_name('user') system_id = factory.make_name('system_id') machines = [factory.make_name('machine') for _ in range(4)] self.patch(virsh.VirshSSH, 'list').return_value = machines fake_arch = factory.make_name('arch') mock_arch = self.patch(virsh.VirshSSH, 'get_arch') mock_arch.return_value = fake_arch # Patch get_state so that one of the machines is on, so we # can check that it will be forced off. fake_states = [ virsh.VirshVMState.ON, virsh.VirshVMState.OFF, virsh.VirshVMState.OFF, virsh.VirshVMState.ON, ] mock_state = self.patch(virsh.VirshSSH, 'get_state') mock_state.side_effect = fake_states # Setup the power parameters that we should expect to be # the output of the probe_and_enlist fake_password = factory.make_string() poweraddr = factory.make_name('poweraddr') called_params = [] fake_macs = [] for machine in machines: macs = [factory.make_mac_address() for _ in range(4)] fake_macs.append(macs) called_params.append({ 'power_address': poweraddr, 'power_id': machine, 'power_pass': fake_password, }) # Patch the get_mac_addresses so we get a known list of # mac addresses for each machine. mock_macs = self.patch(virsh.VirshSSH, 'get_mac_addresses') mock_macs.side_effect = fake_macs # Patch the poweroff and create as we really don't want these # actions to occur, but want to also check that they are called. mock_poweroff = self.patch(virsh.VirshSSH, 'poweroff') mock_create_node = self.patch(virsh, 'create_node') mock_create_node.side_effect = asynchronous( lambda *args, **kwargs: system_id) mock_commission_node = self.patch(virsh, 'commission_node') # Patch login and logout so that we don't really contact # a server at the fake poweraddr mock_login = self.patch(virsh.VirshSSH, 'login') mock_login.return_value = True mock_logout = self.patch(virsh.VirshSSH, 'logout') mock_get_machine_xml = self.patch(virsh.VirshSSH, 'get_machine_xml') mock_get_machine_xml.side_effect = [ SAMPLE_DUMPXML, SAMPLE_DUMPXML_2, SAMPLE_DUMPXML_3, SAMPLE_DUMPXML_4, ] mock_run = self.patch(virsh.VirshSSH, 'run') mock_run.side_effect = self._probe_and_enlist_mock_run # Perform the probe and enlist yield deferToThread( virsh.probe_virsh_and_enlist, user, poweraddr, password=fake_password, accept_all=True) # Check that login was called with the provided poweraddr and # password. self.expectThat( mock_login, MockCalledOnceWith(poweraddr, fake_password)) # The first machine should have poweroff called on it, as it # was initial in the on state. self.expectThat( mock_poweroff, MockCalledOnceWith(machines[0])) self.expectThat( mock_poweroff, MockCalledOnceWith(machines[3])) # Check that the create command had the correct parameters for # each machine. self.expectThat( mock_create_node, MockCallsMatch( call( fake_macs[0], fake_arch, 'virsh', called_params[0], machines[0]), call( fake_macs[1], fake_arch, 'virsh', called_params[1], machines[1]), call( fake_macs[2], fake_arch, 'virsh', called_params[2], machines[2]), call( fake_macs[3], fake_arch, 'virsh', called_params[3], machines[3]), )) self.assertThat(mock_logout, MockCalledOnceWith()) self.expectThat( mock_commission_node, MockCalledWith(system_id, user)) @inlineCallbacks def test_probe_and_enlist_login_failure(self): user = factory.make_name('user') poweraddr = factory.make_name('poweraddr') mock_login = self.patch(virsh.VirshSSH, 'login') mock_login.return_value = False with ExpectedException(virsh.VirshError): yield deferToThread( virsh.probe_virsh_and_enlist, user, poweraddr, password=factory.make_string()) class TestVirshPowerControl(MAASTestCase): """Tests for `power_control_virsh`.""" def test_power_control_login_failure(self): mock_login = self.patch(virsh.VirshSSH, 'login') mock_login.return_value = False self.assertRaises( virsh.VirshError, virsh.power_control_virsh, factory.make_name('poweraddr'), factory.make_name('machine'), 'on', password=factory.make_string()) def test_power_control_on(self): mock_login = self.patch(virsh.VirshSSH, 'login') mock_login.return_value = True mock_state = self.patch(virsh.VirshSSH, 'get_state') mock_state.return_value = virsh.VirshVMState.OFF mock_poweron = self.patch(virsh.VirshSSH, 'poweron') poweraddr = factory.make_name('poweraddr') machine = factory.make_name('machine') virsh.power_control_virsh(poweraddr, machine, 'on') self.assertThat( mock_login, MockCalledOnceWith(poweraddr, None)) self.assertThat( mock_state, MockCalledOnceWith(machine)) self.assertThat( mock_poweron, MockCalledOnceWith(machine)) def test_power_control_off(self): mock_login = self.patch(virsh.VirshSSH, 'login') mock_login.return_value = True mock_state = self.patch(virsh.VirshSSH, 'get_state') mock_state.return_value = virsh.VirshVMState.ON mock_poweroff = self.patch(virsh.VirshSSH, 'poweroff') poweraddr = factory.make_name('poweraddr') machine = factory.make_name('machine') virsh.power_control_virsh(poweraddr, machine, 'off') self.assertThat( mock_login, MockCalledOnceWith(poweraddr, None)) self.assertThat( mock_state, MockCalledOnceWith(machine)) self.assertThat( mock_poweroff, MockCalledOnceWith(machine)) def test_power_control_bad_domain(self): mock_login = self.patch(virsh.VirshSSH, 'login') mock_login.return_value = True mock_state = self.patch(virsh.VirshSSH, 'get_state') mock_state.return_value = None poweraddr = factory.make_name('poweraddr') machine = factory.make_name('machine') self.assertRaises( virsh.VirshError, virsh.power_control_virsh, poweraddr, machine, 'on') def test_power_control_power_failure(self): mock_login = self.patch(virsh.VirshSSH, 'login') mock_login.return_value = True mock_state = self.patch(virsh.VirshSSH, 'get_state') mock_state.return_value = virsh.VirshVMState.ON mock_poweroff = self.patch(virsh.VirshSSH, 'poweroff') mock_poweroff.return_value = False poweraddr = factory.make_name('poweraddr') machine = factory.make_name('machine') self.assertRaises( virsh.VirshError, virsh.power_control_virsh, poweraddr, machine, 'off') class TestVirshPowerState(MAASTestCase): """Tests for `power_state_virsh`.""" def test_power_state_login_failure(self): mock_login = self.patch(virsh.VirshSSH, 'login') mock_login.return_value = False self.assertRaises( virsh.VirshError, virsh.power_state_virsh, factory.make_name('poweraddr'), factory.make_name('machine'), password=factory.make_string()) def test_power_state_get_on(self): mock_login = self.patch(virsh.VirshSSH, 'login') mock_login.return_value = True mock_state = self.patch(virsh.VirshSSH, 'get_state') mock_state.return_value = virsh.VirshVMState.ON poweraddr = factory.make_name('poweraddr') machine = factory.make_name('machine') self.assertEqual( 'on', virsh.power_state_virsh(poweraddr, machine)) def test_power_state_get_off(self): mock_login = self.patch(virsh.VirshSSH, 'login') mock_login.return_value = True mock_state = self.patch(virsh.VirshSSH, 'get_state') mock_state.return_value = virsh.VirshVMState.OFF poweraddr = factory.make_name('poweraddr') machine = factory.make_name('machine') self.assertEqual( 'off', virsh.power_state_virsh(poweraddr, machine)) def test_power_control_bad_domain(self): mock_login = self.patch(virsh.VirshSSH, 'login') mock_login.return_value = True mock_state = self.patch(virsh.VirshSSH, 'get_state') mock_state.return_value = None poweraddr = factory.make_name('poweraddr') machine = factory.make_name('machine') self.assertRaises( virsh.VirshError, virsh.power_state_virsh, poweraddr, machine) def test_power_state_error_on_unknown_state(self): mock_login = self.patch(virsh.VirshSSH, 'login') mock_login.return_value = True mock_state = self.patch(virsh.VirshSSH, 'get_state') mock_state.return_value = 'unknown' poweraddr = factory.make_name('poweraddr') machine = factory.make_name('machine') self.assertRaises( virsh.VirshError, virsh.power_state_virsh, poweraddr, machine) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/hardware/tests/test_vmware.py0000644000000000000000000003773013056115004026760 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.drivers.hardware.vmware`. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random from maastesting.factory import factory from maastesting.testcase import ( MAASTestCase, MAASTwistedRunTest, ) from provisioningserver.drivers.hardware import vmware from provisioningserver.drivers.hardware.vmware import ( VMwarePyvmomiAPI, VMwareVMNotFound, ) from provisioningserver.utils.twisted import asynchronous from testtools import ExpectedException from testtools.matchers import ( Equals, Is, IsInstance, Not, ) from twisted.internet.defer import inlineCallbacks from twisted.internet.threads import deferToThread class FakeVmomiVMSummaryConfig(object): def __init__(self, name, has_instance_uuid=None, has_uuid=None): self.name = name self.guestId = random.choice(["otherLinux64Guest", "otherLinuxGuest"]) if has_instance_uuid is None: has_instance_uuid = random.choice([True, False]) if has_instance_uuid: self.instanceUuid = factory.make_UUID() if has_uuid is None: has_uuid = random.choice([True, False]) if has_uuid: self.uuid = factory.make_UUID() class FakeVmomiVMSummary(object): def __init__(self, name, has_instance_uuid=None, has_uuid=None): self.config = FakeVmomiVMSummaryConfig( name, has_instance_uuid=has_instance_uuid, has_uuid=has_uuid) class FakeVmomiVMRuntime(object): def __init__(self): # add an invalid power state into the mix self.powerState = random.choice( ["poweredOn", "poweredOff", "suspended", "warp9"]) class FakeVmomiVMConfigHardwareDevice(object): def __init__(self): pass class FakeVmomiNic(FakeVmomiVMConfigHardwareDevice): def __init__(self): super(FakeVmomiNic, self).__init__() self.macAddress = factory.make_mac_address() @property def key(self): return id(self) class FakeVmomiVMConfigHardware(object): def __init__(self, nics=None): self.device = [] if nics is None: nics = random.choice([1, 1, 1, 2, 2, 3]) for i in range(0, nics): self.device.append(FakeVmomiNic()) # add a few random non-NICs into the mix for i in range(0, random.choice([0, 1, 3, 5, 15])): self.device.append(FakeVmomiVMConfigHardwareDevice()) random.shuffle(self.device) class FakeVmomiVMConfig(object): def __init__(self, nics=None): self.hardware = FakeVmomiVMConfigHardware(nics=nics) class FakeVmomiVM(object): def __init__( self, name=None, nics=None, has_instance_uuid=None, has_uuid=None): if name is None: self._name = factory.make_hostname() else: self._name = name self.summary = FakeVmomiVMSummary( self._name, has_instance_uuid=has_instance_uuid, has_uuid=has_uuid) self.runtime = FakeVmomiVMRuntime() self.config = FakeVmomiVMConfig(nics=nics) def PowerOn(self): self.runtime.powerState = "poweredOn" def PowerOff(self): self.runtime.powerState = "poweredOff" def ReconfigVM_Task(self, vmconf): pass class FakeVmomiVmFolder(object): def __init__(self, servers=0, has_instance_uuid=None, has_uuid=None): self.childEntity = [] for i in range(0, servers): vm = FakeVmomiVM( has_instance_uuid=has_instance_uuid, has_uuid=has_uuid) self.childEntity.append(vm) class FakeVmomiDatacenter(object): def __init__(self, servers=0, has_instance_uuid=None, has_uuid=None): self.vmFolder = FakeVmomiVmFolder( servers=servers, has_instance_uuid=has_instance_uuid, has_uuid=has_uuid) class FakeVmomiRootFolder(object): def __init__(self, servers=0, has_instance_uuid=None, has_uuid=None): self.childEntity = [FakeVmomiDatacenter( servers=servers, has_instance_uuid=has_instance_uuid, has_uuid=has_uuid)] class FakeVmomiSearchIndex(object): def __init__(self, content): self.vms_by_instance_uuid = {} self.vms_by_uuid = {} for child in content.rootFolder.childEntity: if hasattr(child, 'vmFolder'): datacenter = child vm_folder = datacenter.vmFolder vm_list = vm_folder.childEntity for vm in vm_list: if hasattr(vm.summary.config, 'instanceUuid') \ and vm.summary.config.instanceUuid is not None: self.vms_by_instance_uuid[ vm.summary.config.instanceUuid] = vm if hasattr(vm.summary.config, 'uuid')\ and vm.summary.config.uuid is not None: self.vms_by_uuid[vm.summary.config.uuid] = vm def FindByUuid(self, datacenter, uuid, search_vms, search_by_instance_uuid): assert datacenter is None assert uuid is not None assert search_vms is True if search_by_instance_uuid: if uuid not in self.vms_by_instance_uuid: return None return self.vms_by_instance_uuid[uuid] else: if uuid not in self.vms_by_uuid: return None return self.vms_by_uuid[uuid] class FakeVmomiContent(object): def __init__(self, servers=0, has_instance_uuid=None, has_uuid=None): self.rootFolder = FakeVmomiRootFolder( servers=servers, has_instance_uuid=has_instance_uuid, has_uuid=has_uuid) self.searchIndex = FakeVmomiSearchIndex(self) class FakeVmomiServiceInstance(object): def __init__(self, servers=0, has_instance_uuid=None, has_uuid=None): self.content = FakeVmomiContent( servers=servers, has_instance_uuid=has_instance_uuid, has_uuid=has_uuid) def RetrieveContent(self): return self.content class TestVMwarePyvmomi(MAASTestCase): """Tests for VMware probe-and-enlist, and power query/control using the python-pyvmomi API.""" run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def configure_vmomi_api( self, servers=10, has_instance_uuid=None, has_uuid=None): mock_vmomi_api = self.patch(vmware, 'vmomi_api') mock_vmomi_api.SmartConnect.return_value = FakeVmomiServiceInstance( servers=servers, has_instance_uuid=has_instance_uuid, has_uuid=has_uuid) return mock_vmomi_api def setUp(self): super(TestVMwarePyvmomi, self).setUp() if vmware.try_pyvmomi_import() is False: self.skipTest('cannot test VMware without python-pyvmomi') def test_api_connection(self): mock_vmomi_api = self.configure_vmomi_api(servers=0) api = VMwarePyvmomiAPI( factory.make_hostname(), factory.make_username(), factory.make_username()) api.connect() self.expectThat( api.service_instance, IsInstance(FakeVmomiServiceInstance)) self.expectThat(api.is_connected(), Equals(True)) api.disconnect() self.expectThat(mock_vmomi_api.SmartConnect.called, Equals(True)) self.expectThat(mock_vmomi_api.Disconnect.called, Equals(True)) def test_api_failed_connection(self): mock_vmomi_api = self.patch(vmware, 'vmomi_api') mock_vmomi_api.SmartConnect.return_value = None api = VMwarePyvmomiAPI( factory.make_hostname(), factory.make_username(), factory.make_username()) with ExpectedException(vmware.VMwareAPIConnectionFailed): api.connect() self.expectThat(api.service_instance, Is(None)) self.expectThat(api.is_connected(), Equals(False)) api.disconnect() self.expectThat(mock_vmomi_api.SmartConnect.called, Equals(True)) self.expectThat(mock_vmomi_api.Disconnect.called, Equals(True)) def test_get_vmware_servers_empty(self): self.configure_vmomi_api(servers=0) servers = vmware.get_vmware_servers( factory.make_hostname(), factory.make_username(), factory.make_username(), port=8443, protocol='https') self.expectThat(servers, Equals({})) def test_get_vmware_servers(self): self.configure_vmomi_api(servers=10) servers = vmware.get_vmware_servers( factory.make_hostname(), factory.make_username(), factory.make_username()) self.expectThat(servers, Not(Equals({}))) def test_get_server_by_instance_uuid(self): mock_vmomi_api = self.configure_vmomi_api( servers=1, has_instance_uuid=True, has_uuid=False) search_index = \ mock_vmomi_api.SmartConnect.return_value.content.searchIndex instance_uuids = search_index.vms_by_instance_uuid.keys() for uuid in instance_uuids: vm = vmware._find_vm_by_uuid_or_name(mock_vmomi_api, uuid, None) self.assertIsNotNone(vm) def test_get_server_by_uuid(self): mock_vmomi_api = self.configure_vmomi_api( servers=1, has_instance_uuid=True, has_uuid=False) search_index = \ mock_vmomi_api.SmartConnect.return_value.content.searchIndex uuids = search_index.vms_by_uuid.keys() for uuid in uuids: vm = vmware._find_vm_by_uuid_or_name(mock_vmomi_api, uuid, None) self.assertIsNotNone(vm) def test_get_server_by_name(self): mock_vmomi_api = self.configure_vmomi_api( servers=1, has_instance_uuid=False, has_uuid=True) host = factory.make_hostname() username = factory.make_username() password = factory.make_username() servers = vmware.get_vmware_servers(host, username, password) for vm_name in servers.keys(): vm = vmware._find_vm_by_uuid_or_name( mock_vmomi_api, None, vm_name) self.assertIsNotNone(vm) def test_get_missing_server_raises_VMwareVMNotFound(self): mock_vmomi_api = self.configure_vmomi_api( servers=1, has_instance_uuid=True, has_uuid=True) with ExpectedException(VMwareVMNotFound): vmware._find_vm_by_uuid_or_name(mock_vmomi_api, None, None) def test_power_control_missing_server_raises_VMwareVMNotFound(self): self.configure_vmomi_api( servers=1, has_instance_uuid=True, has_uuid=True) host = factory.make_hostname() username = factory.make_username() password = factory.make_username() with ExpectedException(VMwareVMNotFound): vmware.power_control_vmware( host, username, password, None, None, "on") def test_power_query_missing_server_raises_VMwareVMNotFound(self): self.configure_vmomi_api( servers=1, has_instance_uuid=True, has_uuid=True) host = factory.make_hostname() username = factory.make_username() password = factory.make_username() with ExpectedException(VMwareVMNotFound): vmware.power_query_vmware(host, username, password, None, None) def test_power_control(self): mock_vmomi_api = self.configure_vmomi_api(servers=100) host = factory.make_hostname() username = factory.make_username() password = factory.make_username() servers = vmware.get_vmware_servers(host, username, password) # here we're grabbing indexes only available in the private mock object search_index = \ mock_vmomi_api.SmartConnect.return_value.content.searchIndex bios_uuids = search_index.vms_by_uuid.keys() instance_uuids = search_index.vms_by_instance_uuid.keys() # at least one should have a randomly-invalid state (just checking # for coverage, but since it's random, don't want to assert) vm_name = None for uuid in bios_uuids: vmware.power_query_vmware( host, username, password, vm_name, uuid) for uuid in instance_uuids: vmware.power_query_vmware( host, username, password, vm_name, uuid) for vm_name in servers.keys(): vmware.power_query_vmware( host, username, password, vm_name, None) # turn on a set of VMs, then verify they are on for uuid in bios_uuids: vmware.power_control_vmware( host, username, password, vm_name, uuid, "on") for uuid in bios_uuids: state = vmware.power_query_vmware( host, username, password, vm_name, uuid) self.expectThat(state, Equals("on")) # turn off a set of VMs, then verify they are off for uuid in instance_uuids: vmware.power_control_vmware( host, username, password, vm_name, uuid, "off") for uuid in instance_uuids: state = vmware.power_query_vmware( host, username, password, vm_name, uuid) self.expectThat(state, Equals("off")) self.expectThat(servers, Not(Equals({}))) @inlineCallbacks def test_probe_and_enlist(self): num_servers = 100 self.configure_vmomi_api(servers=num_servers) mock_create_node = self.patch(vmware, 'create_node') system_id = factory.make_name('system_id') mock_create_node.side_effect = asynchronous( lambda *args, **kwargs: system_id) mock_commission_node = self.patch(vmware, 'commission_node') host = factory.make_hostname() username = factory.make_username() password = factory.make_username() yield deferToThread( vmware.probe_vmware_and_enlist, factory.make_username(), host, username, password, accept_all=True) self.assertEqual(mock_create_node.call_count, num_servers) self.assertEqual(mock_commission_node.call_count, num_servers) @inlineCallbacks def test_probe_and_enlist_reconfigures_boot_order_if_create_node_ok(self): num_servers = 1 self.configure_vmomi_api(servers=num_servers) mock_create_node = self.patch(vmware, 'create_node') system_id = factory.make_name('system_id') mock_create_node.side_effect = asynchronous( lambda *args, **kwargs: system_id) mock_reconfigure_vm = self.patch(FakeVmomiVM, 'ReconfigVM_Task') # We need to not actually try to commission any nodes... self.patch(vmware, 'commission_node') host = factory.make_hostname() username = factory.make_username() password = factory.make_username() yield deferToThread( vmware.probe_vmware_and_enlist, factory.make_username(), host, username, password, accept_all=True) self.assertEqual(mock_reconfigure_vm.call_count, num_servers) @inlineCallbacks def test_probe_and_enlist_skips_pxe_config_if_create_node_failed(self): num_servers = 1 self.configure_vmomi_api(servers=num_servers) mock_create_node = self.patch(vmware, 'create_node') mock_create_node.side_effect = asynchronous( lambda *args, **kwargs: None) mock_reconfigure_vm = self.patch(FakeVmomiVM, 'ReconfigVM_Task') # We need to not actually try to commission any nodes... self.patch(vmware, 'commission_node') host = factory.make_hostname() username = factory.make_username() password = factory.make_username() yield deferToThread( vmware.probe_vmware_and_enlist, factory.make_username(), host, username, password, accept_all=True) self.assertEqual(mock_reconfigure_vm.call_count, 0) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/osystem/__init__.py0000644000000000000000000001514213056115004024714 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Osystem Drivers.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "Node", "OperatingSystem", "OperatingSystemRegistry", "Token", ] from abc import ( ABCMeta, abstractmethod, abstractproperty, ) from collections import namedtuple from provisioningserver.utils.registry import Registry class BOOT_IMAGE_PURPOSE: """The vocabulary of a `BootImage`'s purpose.""" #: Usable for commissioning COMMISSIONING = 'commissioning' #: Usable for install INSTALL = 'install' #: Usable for fast-path install XINSTALL = 'xinstall' #: Usable for diskless boot DISKLESS = 'diskless' # A cluster-side representation of a Node, relevant to the osystem code, # with only minimal fields. Node = namedtuple("Node", ("system_id", "hostname")) # A cluster-side representation of a Token, relevant to the osystem code, # with only minimal fields. Token = namedtuple("Token", ("consumer_key", "token_key", "token_secret")) def list_boot_images_for(osystem): """List all boot images for the given osystem.""" # Circular import from provisioningserver.rpc.boot_images import list_boot_images return [ image for image in list_boot_images() if image['osystem'] == osystem.name ] class OperatingSystem: """Skeleton for an operating system.""" __metaclass__ = ABCMeta @abstractproperty def name(self): """Name of the operating system.""" @abstractproperty def title(self): """Title of the operating system.""" @abstractmethod def is_release_supported(self, release): """Return True when the release is supported, False otherwise.""" @abstractmethod def get_default_release(self): """Return the default release to use when none is specified. :return: default release to use """ @abstractmethod def get_release_title(self, release): """Return the given release's title. :type release: unicode :return: unicode """ @abstractmethod def get_boot_image_purposes(self, arch, subarch, release, label): """Return a boot image's supported purposes. :param arch: Architecture of boot image. :param subarch: Sub-architecture of boot image. :param release: Release of boot image. :param label: Label of boot image. :return: list of supported purposes """ def format_release_choices(self, releases): """Format the release choices that are presented to the user. :param releases: list of installed boot image releases :return: Return Django "choices" list """ choices = [] releases = sorted(releases, reverse=True) for release in releases: title = self.get_release_title(release) if title is not None: choices.append((release, title)) return choices def gen_supported_releases(self): """List operating system's supported releases. This is based off the boot images that the cluster currently has for this operating system. """ for image in list_boot_images_for(self): release = image['release'] if self.is_release_supported(release): yield release def get_supported_releases(self): """Return operating system's supported releases as a set. This is based off the boot images that the cluster currently has for this operating system. :return: set of supported releases """ return set(self.gen_supported_releases()) def get_supported_commissioning_releases(self): """List operating system's supported commissioning releases. Typically this will only return something for Ubuntu, because that is the only operating system on which we commission. :return: list of releases. """ return [] def get_default_commissioning_release(self): """Return operating system's default commissioning release. Typically this will only return something for Ubuntu, because that is the only operating system on which we commission. :return: a release name, or ``None``. """ return None def requires_license_key(self, release): """Return whether the given release requires a license key. :param release: Release :return: True if requires license key, false otherwise. """ return False def validate_license_key(self, release, key): """Validate a license key for a release. This is only called if the release requires a license key. :param release: Release :param key: License key :return: True if valid, false otherwise """ raise NotImplementedError() def compose_preseed(self, preseed_type, node, token, metadata_url): """Compose preseed for the given node. :param preseed_type: Preseed type to compose. :param node: Node preseed needs generating for. :type node: :py:class:`Node` :param token: OAuth token for URL. :type token: :py:class:`Token` :param metadata_url: Metdata URL for node. :return: Preseed data for node. :raise: NotImplementedError: doesn't implement a custom preseed """ raise NotImplementedError() def get_xinstall_parameters(self, arch, subarch, release, label): """Return the xinstall image name and type for this operating system. :param arch: Architecture of boot image. :param subarch: Sub-architecture of boot image. :param release: Release of boot image. :param label: Label of boot image. :return: tuple with name of root image and image type """ return "root-tgz", "tgz" class OperatingSystemRegistry(Registry): """Registry for operating system classes.""" from provisioningserver.drivers.osystem.ubuntu import UbuntuOS from provisioningserver.drivers.osystem.centos import CentOS from provisioningserver.drivers.osystem.custom import CustomOS from provisioningserver.drivers.osystem.windows import WindowsOS from provisioningserver.drivers.osystem.suse import SUSEOS builtin_osystems = [ UbuntuOS(), CentOS(), CustomOS(), WindowsOS(), SUSEOS(), ] for osystem in builtin_osystems: OperatingSystemRegistry.register_item(osystem.name, osystem) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/osystem/centos.py0000644000000000000000000000341513056115004024450 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """CentOS Operating System.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "CentOS", ] import re from provisioningserver.drivers.osystem import ( BOOT_IMAGE_PURPOSE, OperatingSystem, ) DISTRO_SERIES_DEFAULT = 'centos65' # Regex matcher that is used to check if the release is supported. # It needs to match the name "centosXY". Where "X" is the major version # and "Y" is the minor version. DISTRO_MATCHER = re.compile("centos(?P[0-9])(?P[0-9])?\Z") class CentOS(OperatingSystem): """CentOS operating system.""" name = "centos" title = "CentOS" def get_boot_image_purposes(self, arch, subarch, release, label): """Gets the purpose of each boot image.""" return [ BOOT_IMAGE_PURPOSE.XINSTALL ] def is_release_supported(self, release): """Return True when the release is supported, False otherwise.""" matched = DISTRO_MATCHER.match(release) return matched is not None def get_default_release(self): """Gets the default release to use when a release is not explicit.""" return DISTRO_SERIES_DEFAULT def get_release_title(self, release): """Return the title for the given release.""" matched = DISTRO_MATCHER.match(release) if matched is None: return None matched_dict = matched.groupdict() major = matched_dict['major'] minor = matched_dict['minor'] if minor is None: minor = '0' return "CentOS %s.%s" % (major, minor) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/osystem/custom.py0000644000000000000000000000365713056115004024477 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Operating System class used for custom images.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "CustomOS", ] import os from provisioningserver.config import ClusterConfiguration from provisioningserver.drivers.osystem import ( BOOT_IMAGE_PURPOSE, OperatingSystem, ) class CustomOS(OperatingSystem): """Custom operating system.""" name = "custom" title = "Custom" def get_boot_image_purposes(self, arch, subarch, release, label): """Gets the purpose of each boot image.""" # Custom images can only be used with XINSTALL. return [BOOT_IMAGE_PURPOSE.XINSTALL] def is_release_supported(self, release): """Return True when the release is supported, False otherwise.""" # All release are supported, since the user uploaded it. return True def get_default_release(self): """Gets the default release to use when a release is not explicit.""" # No default for this OS. return "" def get_release_title(self, release): """Return the title for the given release.""" # Return the same name, since the cluster does not know about the # title of the image. The region will fix the title for the UI. return release def get_xinstall_parameters(self, arch, subarch, release, label): """Returns the xinstall image name and type for given image.""" with ClusterConfiguration.open() as config: dd_path = os.path.join( config.tftp_root, 'custom', arch, subarch, release, label, 'root-dd') if os.path.exists(dd_path): return "root-dd", "dd-tgz" else: return "root-tgz", "tgz" maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/osystem/suse.py0000644000000000000000000000252013056115004024130 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """SUSE Operating System.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "SUSEOS", ] from provisioningserver.drivers.osystem import ( BOOT_IMAGE_PURPOSE, OperatingSystem, ) DISTRO_SERIES_CHOICES = { 'opensuse13': 'openSUSE 13.1', } DISTRO_SERIES_DEFAULT = 'opensuse13' assert DISTRO_SERIES_DEFAULT in DISTRO_SERIES_CHOICES class SUSEOS(OperatingSystem): """SUSE operating system.""" name = "suse" title = "SUSE" def get_boot_image_purposes(self, arch, subarch, release, label): """Gets the purpose of each boot image.""" return [ BOOT_IMAGE_PURPOSE.XINSTALL ] def is_release_supported(self, release): """Return True when the release is supported, False otherwise.""" return release in DISTRO_SERIES_CHOICES def get_default_release(self): """Gets the default release to use when a release is not explicit.""" return DISTRO_SERIES_DEFAULT def get_release_title(self, release): """Return the title for the given release.""" return DISTRO_SERIES_CHOICES.get(release) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/osystem/tests/0000755000000000000000000000000013056115004023742 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/osystem/ubuntu.py0000644000000000000000000000521613056115004024500 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Ubuntu Operating System.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "UbuntuOS", ] from distro_info import UbuntuDistroInfo from provisioningserver.drivers.osystem import ( BOOT_IMAGE_PURPOSE, OperatingSystem, ) class UbuntuOS(OperatingSystem): """Ubuntu operating system.""" name = "ubuntu" title = "Ubuntu" def get_boot_image_purposes(self, arch, subarch, release, label): """Gets the purpose of each boot image.""" return [ BOOT_IMAGE_PURPOSE.COMMISSIONING, BOOT_IMAGE_PURPOSE.INSTALL, BOOT_IMAGE_PURPOSE.XINSTALL, BOOT_IMAGE_PURPOSE.DISKLESS, ] def is_release_supported(self, release): """Return True when the release is supported, False otherwise.""" row = self.get_distro_series_info_row(release) return row is not None def get_lts_release(self): """Return the latest Ubuntu LTS release.""" return UbuntuDistroInfo().lts() def get_default_release(self): """Gets the default release to use when a release is not explicit.""" return self.get_lts_release() def get_supported_commissioning_releases(self): """Gets the supported commissioning releases for Ubuntu. This only exists on Ubuntu, because that is the only operating system that supports commissioning. """ info = UbuntuDistroInfo() unsupported_releases = ['precise', 'xenial'] return [name for name in info.supported() if name not in unsupported_releases if info.is_lts(name)] def get_default_commissioning_release(self): """Gets the default commissioning release for Ubuntu. This only exists on Ubuntu, because that is the only operating system that supports commissioning. """ return 'trusty' def get_distro_series_info_row(self, release): """Returns the distro series row information from python-distro-info. """ info = UbuntuDistroInfo() for row in info._avail(info._date): if row['series'] == release: return row return None def get_release_title(self, release): """Return the title for the given release.""" row = self.get_distro_series_info_row(release) if row is None: return None return UbuntuDistroInfo()._format("fullname", row) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/osystem/windows.py0000644000000000000000000000707713056115004024657 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Windows Operating System.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "WindowsOS", ] import os import re from provisioningserver.config import ClusterConfiguration from provisioningserver.drivers.osystem import ( BOOT_IMAGE_PURPOSE, OperatingSystem, ) WINDOWS_CHOICES = { 'win2012': 'Windows "Server 2012"', 'win2012r2': 'Windows "Server 2012 R2"', 'win2012hv': 'Windows "Hyper-V Server 2012"', 'win2012hvr2': 'Windows "Hyper-V Server 2012 R2"', } WINDOWS_DEFAULT = 'win2012hvr2' REQUIRE_LICENSE_KEY = ['win2012', 'win2012r2'] class WindowsOS(OperatingSystem): """Windows operating system.""" name = "windows" title = "Windows" def get_boot_image_purposes(self, arch, subarch, release, label): """Gets the purpose of each boot image. Windows only allows install.""" # Windows can support both xinstall and install, but the correct files # need to be available before it is enabled. This way if only xinstall # is available the node will boot correctly, even if fast-path # installer is not selected. purposes = [] with ClusterConfiguration.open() as config: resources = config.tftp_root path = os.path.join( resources, 'windows', arch, subarch, release, label) if os.path.exists(os.path.join(path, 'root-dd')): purposes.append(BOOT_IMAGE_PURPOSE.XINSTALL) if os.path.exists(os.path.join(path, 'pxeboot.0')): purposes.append(BOOT_IMAGE_PURPOSE.INSTALL) return purposes def is_release_supported(self, release): """Return True when the release is supported, False otherwise.""" return release in WINDOWS_CHOICES def get_default_release(self): """Gets the default release to use when a release is not explicit.""" return WINDOWS_DEFAULT def get_release_title(self, release): """Return the title for the given release.""" return WINDOWS_CHOICES.get(release) def requires_license_key(self, release): return release in REQUIRE_LICENSE_KEY def validate_license_key(self, release, key): r = re.compile('^([A-Za-z0-9]{5}-){4}[A-Za-z0-9]{5}$') return r.match(key) def compose_preseed(self, preseed_type, node, token, metadata_url): """Since this method exists in the WindowsOS class, it will be called to provide preseed to all booting Windows nodes. """ # Don't override the curtin preseed. if preseed_type == 'curtin': raise NotImplementedError() # Sets the hostname in the preseed. Using just the hostname # not the FQDN. hostname = node.hostname.split(".", 1)[0] # Windows max hostname length is 15 characters. if len(hostname) > 15: hostname = hostname[:15] credentials = { 'maas_metadata_url': metadata_url, 'maas_oauth_consumer_secret': '', 'maas_oauth_consumer_key': token.consumer_key, 'maas_oauth_token_key': token.token_key, 'maas_oauth_token_secret': token.token_secret, 'hostname': hostname, } return credentials def get_xinstall_parameters(self, arch, subarch, release, label): """Returns the xinstall image name and type for Windows.""" return "root-dd", "dd-tgz" maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/osystem/tests/__init__.py0000644000000000000000000000000013056115004026041 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/osystem/tests/test_base.py0000644000000000000000000000555613056115004026300 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.drivers.osystem`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maastesting.factory import factory from maastesting.testcase import MAASTestCase from mock import sentinel from provisioningserver.drivers import osystem as osystem_module from provisioningserver.drivers.osystem import ( BOOT_IMAGE_PURPOSE, OperatingSystemRegistry, ) from provisioningserver.testing.os import make_osystem from provisioningserver.utils.testing import RegistryFixture class TestOperatingSystem(MAASTestCase): def make_usable_osystem(self): return make_osystem(self, factory.make_name('os'), [ BOOT_IMAGE_PURPOSE.COMMISSIONING, BOOT_IMAGE_PURPOSE.INSTALL, BOOT_IMAGE_PURPOSE.XINSTALL, ]) def make_boot_image_for(self, osystem, release): return dict( osystem=osystem, release=release, ) def configure_list_boot_images_for(self, osystem): images = [ self.make_boot_image_for(osystem.name, release) for release in osystem.get_supported_releases() ] self.patch_autospec( osystem_module, 'list_boot_images_for').return_value = images return images def test_format_release_choices(self): osystem = self.make_usable_osystem() releases = osystem.get_supported_releases() self.assertItemsEqual( [(release, release) for release in releases], osystem.format_release_choices(releases)) def test_format_release_choices_sorts(self): osystem = self.make_usable_osystem() releases = osystem.get_supported_releases() self.assertEqual( [(release, release) for release in sorted(releases, reverse=True)], osystem.format_release_choices(releases)) def test_gen_supported_releases(self): osystem = self.make_usable_osystem() images = self.configure_list_boot_images_for(osystem) releases = {image['release'] for image in images} self.assertItemsEqual( releases, osystem.gen_supported_releases()) class TestOperatingSystemRegistry(MAASTestCase): def setUp(self): super(TestOperatingSystemRegistry, self).setUp() # Ensure the global registry is empty for each test run. self.useFixture(RegistryFixture()) def test_operating_system_registry(self): self.assertItemsEqual([], OperatingSystemRegistry) OperatingSystemRegistry.register_item("resource", sentinel.resource) self.assertIn( sentinel.resource, (item for name, item in OperatingSystemRegistry)) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/osystem/tests/test_centos.py0000644000000000000000000000463213056115004026653 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the CentOS module.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from itertools import product from maastesting.factory import factory from maastesting.testcase import MAASTestCase from provisioningserver.drivers.osystem.centos import ( BOOT_IMAGE_PURPOSE, CentOS, DISTRO_SERIES_DEFAULT, ) from testtools.matchers import Equals class TestCentOS(MAASTestCase): def test_get_boot_image_purposes(self): osystem = CentOS() archs = [factory.make_name('arch') for _ in range(2)] subarchs = [factory.make_name('subarch') for _ in range(2)] releases = [factory.make_name('release') for _ in range(2)] labels = [factory.make_name('label') for _ in range(2)] for arch, subarch, release, label in product( archs, subarchs, releases, labels): expected = osystem.get_boot_image_purposes( arch, subarchs, release, label) self.assertIsInstance(expected, list) self.assertEqual(expected, [ BOOT_IMAGE_PURPOSE.XINSTALL, ]) def test_is_release_supported(self): name_supported = { "centos6": True, "centos65": True, "centos7": True, "centos71": True, "cent65": False, "cent": False, "centos711": False, } osystem = CentOS() for name, supported in name_supported.items(): self.expectThat( osystem.is_release_supported(name), Equals(supported)) def test_get_default_release(self): osystem = CentOS() expected = osystem.get_default_release() self.assertEqual(expected, DISTRO_SERIES_DEFAULT) def test_get_release_title(self): name_titles = { "centos6": "CentOS 6.0", "centos65": "CentOS 6.5", "centos7": "CentOS 7.0", "centos71": "CentOS 7.1", "cent65": None, "cent": None, "centos711": None, } osystem = CentOS() for name, title in name_titles.items(): self.expectThat( osystem.get_release_title(name), Equals(title)) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/osystem/tests/test_custom.py0000644000000000000000000000644713056115004026700 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the CentOS module.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from itertools import product import os from maastesting.factory import factory from maastesting.testcase import MAASTestCase from provisioningserver.config import ClusterConfiguration from provisioningserver.drivers.osystem.custom import ( BOOT_IMAGE_PURPOSE, CustomOS, ) from provisioningserver.testing.config import ClusterConfigurationFixture class TestCustomOS(MAASTestCase): def make_resource_path(self, filename): self.useFixture(ClusterConfigurationFixture()) tmpdir = self.make_dir() arch = factory.make_name('arch') subarch = factory.make_name('subarch') release = factory.make_name('release') label = factory.make_name('label') current_dir = os.path.join(tmpdir, 'current') + '/' dirpath = os.path.join( current_dir, 'custom', arch, subarch, release, label) os.makedirs(dirpath) factory.make_file(dirpath, filename) with ClusterConfiguration.open_for_update() as config: config.tftp_root = current_dir return arch, subarch, release, label def test_get_boot_image_purposes(self): osystem = CustomOS() archs = [factory.make_name('arch') for _ in range(2)] subarchs = [factory.make_name('subarch') for _ in range(2)] releases = [factory.make_name('release') for _ in range(2)] labels = [factory.make_name('label') for _ in range(2)] for arch, subarch, release, label in product( archs, subarchs, releases, labels): expected = osystem.get_boot_image_purposes( arch, subarchs, release, label) self.assertIsInstance(expected, list) self.assertEqual(expected, [ BOOT_IMAGE_PURPOSE.XINSTALL, ]) def test_is_release_supported(self): osystem = CustomOS() releases = [factory.make_name('release') for _ in range(3)] supported = [ osystem.is_release_supported(release) for release in releases ] self.assertEqual([True, True, True], supported) def test_get_default_release(self): osystem = CustomOS() self.assertEqual("", osystem.get_default_release()) def test_get_release_title(self): osystem = CustomOS() release = factory.make_name('release') self.assertEqual(release, osystem.get_release_title(release)) def test_get_xinstall_parameters_returns_root_tgz_tgz(self): osystem = CustomOS() arch, subarch, release, label = self.make_resource_path('root-tgz') self.assertItemsEqual( ('root-tgz', 'tgz'), osystem.get_xinstall_parameters(arch, subarch, release, label)) def test_get_xinstall_parameters_returns_root_dd_dd_tgz(self): osystem = CustomOS() arch, subarch, release, label = self.make_resource_path('root-dd') self.assertItemsEqual( ('root-dd', 'dd-tgz'), osystem.get_xinstall_parameters(arch, subarch, release, label)) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/osystem/tests/test_suse.py0000644000000000000000000000333213056115004026333 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the SUSEOS module.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from itertools import product import random from maastesting.factory import factory from maastesting.testcase import MAASTestCase from provisioningserver.drivers.osystem.suse import ( BOOT_IMAGE_PURPOSE, DISTRO_SERIES_CHOICES, DISTRO_SERIES_DEFAULT, SUSEOS, ) class TestSUSEOS(MAASTestCase): def test_get_boot_image_purposes(self): osystem = SUSEOS() archs = [factory.make_name('arch') for _ in range(2)] subarchs = [factory.make_name('subarch') for _ in range(2)] releases = [factory.make_name('release') for _ in range(2)] labels = [factory.make_name('label') for _ in range(2)] for arch, subarch, release, label in product( archs, subarchs, releases, labels): expected = osystem.get_boot_image_purposes( arch, subarchs, release, label) self.assertIsInstance(expected, list) self.assertEqual(expected, [ BOOT_IMAGE_PURPOSE.XINSTALL, ]) def test_get_default_release(self): osystem = SUSEOS() expected = osystem.get_default_release() self.assertEqual(expected, DISTRO_SERIES_DEFAULT) def test_get_release_title(self): osystem = SUSEOS() release = random.choice(DISTRO_SERIES_CHOICES.keys()) self.assertEqual( DISTRO_SERIES_CHOICES[release], osystem.get_release_title(release)) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/osystem/tests/test_ubuntu.py0000644000000000000000000000564113056115004026703 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the UbuntuOS module.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from itertools import product import random from distro_info import UbuntuDistroInfo from maastesting.factory import factory from maastesting.testcase import MAASTestCase from provisioningserver.drivers.osystem import BOOT_IMAGE_PURPOSE from provisioningserver.drivers.osystem.ubuntu import UbuntuOS class TestUbuntuOS(MAASTestCase): def get_lts_release(self): return UbuntuDistroInfo().lts() def get_release_title(self, release): info = UbuntuDistroInfo() for row in info._avail(info._date): if row['series'] == release: return info._format("fullname", row) return None def get_supported_lts_releases(self): info = UbuntuDistroInfo() unsupported_releases = ['precise', 'xenial'] return [name for name in info.supported() if name not in unsupported_releases if info.is_lts(name)] def test_get_boot_image_purposes(self): osystem = UbuntuOS() archs = [factory.make_name('arch') for _ in range(2)] subarchs = [factory.make_name('subarch') for _ in range(2)] releases = [factory.make_name('release') for _ in range(2)] labels = [factory.make_name('label') for _ in range(2)] for arch, subarch, release, label in product( archs, subarchs, releases, labels): expected = osystem.get_boot_image_purposes( arch, subarchs, release, label) self.assertIsInstance(expected, list) self.assertEqual(expected, [ BOOT_IMAGE_PURPOSE.COMMISSIONING, BOOT_IMAGE_PURPOSE.INSTALL, BOOT_IMAGE_PURPOSE.XINSTALL, BOOT_IMAGE_PURPOSE.DISKLESS, ]) def test_get_default_release(self): osystem = UbuntuOS() expected = osystem.get_default_release() self.assertEqual(expected, self.get_lts_release()) def test_get_supported_commissioning_releases(self): osystem = UbuntuOS() expected = osystem.get_supported_commissioning_releases() self.assertIsInstance(expected, list) self.assertEqual(expected, self.get_supported_lts_releases()) def test_default_commissioning_release(self): osystem = UbuntuOS() expected = osystem.get_default_commissioning_release() self.assertEqual(expected, 'trusty') def test_get_release_title(self): osystem = UbuntuOS() info = UbuntuDistroInfo() release = random.choice(info.all) self.assertEqual( osystem.get_release_title(release), self.get_release_title(release)) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/osystem/tests/test_windows.py0000644000000000000000000001767313056115004027063 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the WindowsOS module.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import os import random from maastesting.factory import factory from maastesting.testcase import MAASTestCase from provisioningserver.drivers.osystem import ( Node, Token, ) from provisioningserver.drivers.osystem.windows import ( BOOT_IMAGE_PURPOSE, REQUIRE_LICENSE_KEY, WINDOWS_CHOICES, WINDOWS_DEFAULT, WindowsOS, ) from provisioningserver.testing.config import ClusterConfigurationFixture class TestWindowsOS(MAASTestCase): def make_resource_path(self, files=[]): tmpdir = self.make_dir() arch = factory.make_name('arch') subarch = factory.make_name('subarch') release = factory.make_name('release') label = factory.make_name('label') dirpath = os.path.join( tmpdir, 'windows', arch, subarch, release, label) os.makedirs(dirpath) for fname in files: factory.make_file(dirpath, fname) self.useFixture(ClusterConfigurationFixture(tftp_root=tmpdir)) return arch, subarch, release, label def test_get_boot_image_purposes_neither(self): osystem = WindowsOS() arch, subarch, release, label = self.make_resource_path() self.assertItemsEqual( [], osystem.get_boot_image_purposes(arch, subarch, release, label)) def test_get_boot_image_purposes_both(self): osystem = WindowsOS() arch, subarch, release, label = self.make_resource_path( files=['root-dd', 'pxeboot.0']) self.assertItemsEqual( [BOOT_IMAGE_PURPOSE.XINSTALL, BOOT_IMAGE_PURPOSE.INSTALL], osystem.get_boot_image_purposes(arch, subarch, release, label)) def test_get_boot_image_purposes_xinstall_only(self): osystem = WindowsOS() arch, subarch, release, label = self.make_resource_path( files=['root-dd']) self.assertItemsEqual( [BOOT_IMAGE_PURPOSE.XINSTALL], osystem.get_boot_image_purposes(arch, subarch, release, label)) def test_get_boot_image_purposes_install_only(self): osystem = WindowsOS() arch, subarch, release, label = self.make_resource_path( files=['pxeboot.0']) self.assertItemsEqual( [BOOT_IMAGE_PURPOSE.INSTALL], osystem.get_boot_image_purposes(arch, subarch, release, label)) def test_get_default_release(self): osystem = WindowsOS() expected = osystem.get_default_release() self.assertEqual(expected, WINDOWS_DEFAULT) def test_get_release_title(self): osystem = WindowsOS() release = random.choice(WINDOWS_CHOICES.keys()) self.assertEqual( WINDOWS_CHOICES[release], osystem.get_release_title(release)) def test_requires_license_key_True(self): osystem = WindowsOS() for release in REQUIRE_LICENSE_KEY: self.assertTrue(osystem.requires_license_key(release)) def test_requires_license_key_False(self): osystem = WindowsOS() not_required = set( WINDOWS_CHOICES.keys()).difference(REQUIRE_LICENSE_KEY) for release in not_required: self.assertFalse(osystem.requires_license_key(release)) def test_validate_license_key(self): osystem = WindowsOS() parts = [factory.make_string(size=5) for _ in range(5)] key = '-'.join(parts) self.assertTrue( osystem.validate_license_key(REQUIRE_LICENSE_KEY[0], key)) def test_validate_license_key_invalid(self): osystem = WindowsOS() keys = [factory.make_string() for _ in range(3)] for key in keys: self.assertFalse( osystem.validate_license_key(REQUIRE_LICENSE_KEY[0], key)) def make_node(self, hostname=None): if hostname is None: machine = factory.make_name('hostname') dns = factory.make_name('dns') hostname = '%s.%s' % (machine, dns) return Node( system_id=factory.make_name("system_id"), hostname=hostname, ) def make_token(self, consumer_key=None, token_key=None, token_secret=None): if consumer_key is None: consumer_key = factory.make_name('consumer_key') if token_key is None: token_key = factory.make_name('token_key') if token_secret is None: token_secret = factory.make_name('secret_key') return Token( consumer_key=consumer_key, token_key=token_key, token_secret=token_secret, ) def test_compose_pressed_not_implemented_for_curtin(self): osystem = WindowsOS() node = self.make_node() token = self.make_token() url = factory.make_name('url') self.assertRaises( NotImplementedError, osystem.compose_preseed, 'curtin', node, token, url) def test_compose_preseed_has_required_keys(self): osystem = WindowsOS() node = self.make_node() token = self.make_token() url = factory.make_name('url') required_keys = [ 'maas_metadata_url', 'maas_oauth_consumer_secret', 'maas_oauth_consumer_key', 'maas_oauth_token_key', 'maas_oauth_token_secret', 'hostname', ] preseed = osystem.compose_preseed('default', node, token, url) self.assertItemsEqual(required_keys, preseed.keys()) def test_compose_preseed_uses_only_hostname(self): osystem = WindowsOS() machine = factory.make_name('hostname') dns = factory.make_name('dns') hostname = '%s.%s' % (machine, dns) node = self.make_node(hostname=hostname) token = self.make_token() url = factory.make_name('url') preseed = osystem.compose_preseed('default', node, token, url) self.assertEqual(machine, preseed['hostname']) def test_compose_preseed_truncates_hostname(self): osystem = WindowsOS() machine = factory.make_name('hostname', size=20) dns = factory.make_name('dns') hostname = '%s.%s' % (machine, dns) node = self.make_node(hostname=hostname) token = self.make_token() url = factory.make_name('url') preseed = osystem.compose_preseed('default', node, token, url) self.assertEqual(15, len(preseed['hostname'])) def test_compose_preseed_includes_oauth(self): osystem = WindowsOS() node = self.make_node() consumer_key = factory.make_name('consumer_key') token_key = factory.make_name('token_key') token_secret = factory.make_name('secret_key') token = self.make_token( consumer_key=consumer_key, token_key=token_key, token_secret=token_secret) url = factory.make_name('url') preseed = osystem.compose_preseed('default', node, token, url) self.assertEqual('', preseed['maas_oauth_consumer_secret']) self.assertEqual(consumer_key, preseed['maas_oauth_consumer_key']) self.assertEqual(token_key, preseed['maas_oauth_token_key']) self.assertEqual(token_secret, preseed['maas_oauth_token_secret']) def test_compose_preseed_includes_metadata_url(self): osystem = WindowsOS() node = self.make_node() token = self.make_token() url = factory.make_name('url') preseed = osystem.compose_preseed('default', node, token, url) self.assertEqual(url, preseed['maas_metadata_url']) def test_get_xinstall_parameters(self): osystem = WindowsOS() image, image_type = osystem.get_xinstall_parameters( None, None, None, None) self.assertEqual('root-dd', image) self.assertEqual('dd-tgz', image_type) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/power/__init__.py0000644000000000000000000002664113056115004024353 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Base power driver.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "POWER_QUERY_TIMEOUT", "PowerActionError", "PowerAuthError", "PowerConnError", "PowerDriver", "PowerDriverBase", "PowerError", "PowerFatalError", "PowerSettingError", "PowerToolError", ] from abc import ( ABCMeta, abstractmethod, abstractproperty, ) from datetime import timedelta import sys from jsonschema import validate from provisioningserver.drivers import ( JSON_SETTING_SCHEMA, validate_settings, ) from provisioningserver.utils import pause from provisioningserver.utils.registry import Registry from twisted.internet import reactor from twisted.internet.defer import ( inlineCallbacks, returnValue, ) from twisted.internet.threads import deferToThread # A policy used when waiting between retries of power changes. DEFAULT_WAITING_POLICY = (1, 2, 2, 4, 6, 8, 12) JSON_POWER_DRIVERS_SCHEMA = { 'title': "Power drivers parameters set", 'type': 'array', 'items': JSON_SETTING_SCHEMA, } # Timeout for the power query action. We might be holding up a thread for that # long but some BMCs (notably seamicro) can take a long time to respond to # a power query request. # This should be configurable per-BMC. POWER_QUERY_TIMEOUT = timedelta(seconds=45).total_seconds() class PowerError(Exception): """Base error for all power driver failure commands.""" class PowerFatalError(PowerError): """Error that is raised when the power action should not continue to retry at all. This exception will cause the power action to fail instantly, without retrying. """ class PowerSettingError(PowerFatalError): """Error that is raised when the power type is missing argument that is required to control the BMC. This exception will cause the power action to fail instantly, without retrying. """ class PowerToolError(PowerFatalError): """Error that is raised when the power tool is missing completely for use. This exception will cause the power action to fail instantly, without retrying. """ class PowerAuthError(PowerError): """Error raised when power driver fails to authenticate to BMC.""" class PowerConnError(PowerError): """Error raised when power driver fails to communicate to BMC.""" class PowerActionError(PowerError): """Error when actually performing an action on the BMC, like `on` or `off`.""" class PowerDriverBase: """Base driver for a power driver.""" __metaclass__ = ABCMeta def __init__(self): super(PowerDriverBase, self).__init__() validate_settings(self.get_schema()) @abstractproperty def name(self): """Name of the power driver.""" @abstractproperty def description(self): """Description of the power driver.""" @abstractproperty def settings(self): """List of settings for the driver. Each setting in this list will be different per user. They are passed to the `on`, `off`, and `query` using the context. It is up to the driver to read these options before performing the operation. """ @abstractmethod def on(self, system_id, context): """Perform the power on action for `system_id`. :param system_id: `Node.system_id` :param context: Power settings for the node. """ @abstractmethod def off(self, system_id, context): """Perform the power off action for `system_id`. :param system_id: `Node.system_id` :param context: Power settings for the node. """ @abstractmethod def query(self, system_id, context): """Perform the query action for `system_id`. :param system_id: `Node.system_id` :param context: Power settings for the node. :return: status of power on BMC. `on` or `off`. :raises PowerError: states unable to get status from BMC. It is up to this method to report the actual issue to the Region. The calling function should ignore this error, and continue on. """ def get_schema(self): """Returns the JSON schema for the driver.""" return dict( name=self.name, description=self.description, fields=self.settings) def get_error_message(err): """Returns the proper error message based on error.""" if isinstance(err, PowerAuthError): return "Could not authenticate to node's BMC: %s" % err elif isinstance(err, PowerConnError): return "Could not contact node's BMC: %s" % err elif isinstance(err, PowerSettingError): return "Missing or invalid power setting: %s" % err elif isinstance(err, PowerToolError): return "Missing power tool: %s" % err elif isinstance(err, PowerActionError): return "Failed to complete power action: %s" % err else: return "Failed talking to node's BMC for an unknown reason." class PowerDriver(PowerDriverBase): """Default power driver logic.""" wait_time = DEFAULT_WAITING_POLICY def __init__(self, clock=reactor): self.clock = reactor @abstractmethod def detect_missing_packages(self): """Implement this method for the actual implementation of the check for the driver's missing support packages. """ @abstractmethod def power_on(self, system_id, context): """Implement this method for the actual implementation of the power on command. """ @abstractmethod def power_off(self, system_id, context): """Implement this method for the actual implementation of the power off command. """ @abstractmethod def power_query(self, system_id, context): """Implement this method for the actual implementation of the power query command.""" def on(self, system_id, context): """Performs the power on action for `system_id`. Do not override `on` method unless you want to provide custom logic on how retries and error detection is handled. Override `power_on` for just the power on action, and `on` will handle the retrying. """ return self.perform_power(self.power_on, "on", system_id, context) def off(self, system_id, context): """Performs the power off action for `system_id`. Do not override `off` method unless you want to provide custom logic on how retries and error detection is handled. Override `power_off` for just the power off action, and `off` will handle the retrying and error reporting. """ return self.perform_power(self.power_off, "off", system_id, context) @inlineCallbacks def query(self, system_id, context): """Performs the power query action for `system_id`.""" exc_info = None, None, None for waiting_time in self.wait_time: try: state = yield deferToThread( self.power_query, system_id, context) except PowerFatalError: raise # Don't retry. except PowerError: exc_info = sys.exc_info() # Wait before retrying. yield pause(waiting_time, self.clock) else: returnValue(state) else: raise exc_info[0], exc_info[1], exc_info[2] @inlineCallbacks def perform_power(self, power_func, state_desired, system_id, context): """Provides the logic to perform the power actions. :param power_func: Function used to change the power state of the node. Typically this will be `self.power_on` or `self.power_off`. :param state_desired: The desired state for this node to be in, typically "on" or "off". :param system_id: The node's system ID. """ state = "unknown" exc_info = None, None, None for waiting_time in self.wait_time: # Try to change state. try: yield deferToThread( power_func, system_id, context) except PowerFatalError: raise # Don't retry. except PowerError: exc_info = sys.exc_info() # Wait before retrying. yield pause(waiting_time, self.clock) else: # Wait before checking state. yield pause(waiting_time, self.clock) # Try to get power state. try: state = yield deferToThread( self.power_query, system_id, context) except PowerFatalError: raise # Don't retry. except PowerError: exc_info = sys.exc_info() else: # If state is now the correct state, done. if state == state_desired: return if exc_info == (None, None, None): # No error found, so communication to the BMC is good, state must # have not changed in the elapsed time. That is the only reason we # should make it this far. raise PowerError( "Failed to power %s. BMC never transitioned from %s to %s." % (system_id, state, state_desired)) else: # Report the last error. raise exc_info[0], exc_info[1], exc_info[2] class PowerDriverRegistry(Registry): """Registry for power drivers.""" @classmethod def get_schema(cls): """Returns the full schema for the registry.""" schemas = [drivers.get_schema() for _, drivers in cls] validate(schemas, JSON_POWER_DRIVERS_SCHEMA) return schemas from provisioningserver.drivers.power.amt import AMTPowerDriver from provisioningserver.drivers.power.apc import APCPowerDriver from provisioningserver.drivers.power.dli import DLIPowerDriver from provisioningserver.drivers.power.ether_wake import EtherWakePowerDriver from provisioningserver.drivers.power.fence_cdu import FenceCDUPowerDriver from provisioningserver.drivers.power.hmc import HMCPowerDriver from provisioningserver.drivers.power.ipmi import IPMIPowerDriver from provisioningserver.drivers.power.msftocs import MicrosoftOCSPowerDriver from provisioningserver.drivers.power.moonshot import MoonshotIPMIPowerDriver from provisioningserver.drivers.power.mscm import MSCMPowerDriver from provisioningserver.drivers.power.seamicro import SeaMicroPowerDriver from provisioningserver.drivers.power.ucsm import UCSMPowerDriver from provisioningserver.drivers.power.virsh import VirshPowerDriver from provisioningserver.drivers.power.vmware import VMwarePowerDriver registered_power_drivers = [ APCPowerDriver(), DLIPowerDriver(), HMCPowerDriver(), IPMIPowerDriver(), MicrosoftOCSPowerDriver(), MoonshotIPMIPowerDriver(), MSCMPowerDriver(), SeaMicroPowerDriver(), UCSMPowerDriver(), VirshPowerDriver(), VMwarePowerDriver(), ] for driver in registered_power_drivers: PowerDriverRegistry.register_item(driver.name, driver) unregistered_power_drivers = [ AMTPowerDriver(), EtherWakePowerDriver(), FenceCDUPowerDriver(), ] power_drivers_by_name = { d.name: d for d in registered_power_drivers + unregistered_power_drivers } maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/power/amt.py0000644000000000000000000000232213056115004023363 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Template-based AMT Power Driver.""" str = None __metaclass__ = type __all__ = [] from provisioningserver.drivers.power import PowerDriver from provisioningserver.utils import shell REQUIRED_PACKAGES = [["amttool", "amtterm"], ["wsman", "wsmancli"]] class AMTPowerDriver(PowerDriver): name = 'amt' description = "AMT Power Driver." settings = [] def detect_missing_packages(self): missing_packages = [] # when this becomes a non-templated, registered power driver, we can # detect what version of AMT is on the Node to find out if wsman is # required (see amt.template). For now, we assume wsman is required for binary, package in REQUIRED_PACKAGES: if not shell.has_command_available(binary): missing_packages.append(package) return missing_packages def power_on(self, system_id, context): raise NotImplementedError def power_off(self, system_id, context): raise NotImplementedError def power_query(self, system_id, context): raise NotImplementedError maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/power/apc.py0000644000000000000000000000334213056115004023350 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """American Power Conversion (APC) Power Driver.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from provisioningserver.drivers.hardware.apc import ( power_control_apc, power_state_apc, required_package, ) from provisioningserver.drivers.power import PowerDriver from provisioningserver.utils import shell def extract_apc_parameters(context): ip = context.get('power_address') outlet = context.get('node_outlet') power_on_delay = context.get('power_on_delay') return ip, outlet, power_on_delay class APCPowerDriver(PowerDriver): name = 'apc' description = "APC Power Driver." settings = [] def detect_missing_packages(self): binary, package = required_package() if not shell.has_command_available(binary): return [package] return [] def power_on(self, system_id, context): """Power on Apc outlet.""" power_change = 'on' ip, outlet, power_on_delay = extract_apc_parameters(context) power_control_apc( ip, outlet, power_change, power_on_delay) def power_off(self, system_id, context): """Power off APC outlet.""" power_change = 'off' ip, outlet, power_on_delay = extract_apc_parameters(context) power_control_apc( ip, outlet, power_change, power_on_delay) def power_query(self, system_id, context): """Power query APC outlet.""" ip, outlet, _ = extract_apc_parameters(context) return power_state_apc(ip, outlet) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/power/dli.py0000644000000000000000000001012413056115004023351 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """DLI Power Driver.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import re from time import sleep from provisioningserver.drivers.power import ( PowerDriver, PowerError, PowerFatalError, ) from provisioningserver.utils import shell from provisioningserver.utils.shell import ( call_and_check, ExternalProcessError, ) class DLIPowerDriver(PowerDriver): name = 'dli' description = "DLI Power Driver." settings = [] def detect_missing_packages(self): if not shell.has_command_available('wget'): return ['wget'] return [] def _set_outlet_state( self, power_change, outlet_id=None, power_user=None, power_pass=None, power_address=None, **extra): """Power DLI outlet ON/OFF.""" try: url = 'http://%s:%s@%s/outlet?%s=%s' % ( power_user, power_pass, power_address, outlet_id, power_change) # --auth-no-challenge: send Basic HTTP authentication # information without first waiting for the server's challenge. call_and_check([ 'wget', '--auth-no-challenge', '-O', '/dev/null', url]) except ExternalProcessError as e: raise PowerFatalError( "Failed to power %s outlet %s: %s" % ( power_change, outlet_id, e.output_as_unicode)) def _query_outlet_state( self, outlet_id=None, power_user=None, power_pass=None, power_address=None, **extra): """Query DLI outlet power state. Sample snippet of query output from DLI: ... ... """ try: url = 'http://%s:%s@%s/index.htm' % ( power_user, power_pass, power_address) # --auth-no-challenge: send Basic HTTP authentication # information without first waiting for the server's challenge. wget_output = call_and_check([ 'wget', '--auth-no-challenge', '-qO-', url]) match = re.search(" ... """ class TestDLIPowerDriver(MAASTestCase): def test_missing_packages(self): mock = self.patch(has_command_available) mock.return_value = False driver = dli_module.DLIPowerDriver() missing = driver.detect_missing_packages() self.assertItemsEqual(["wget"], missing) def test_no_missing_packages(self): mock = self.patch(has_command_available) mock.return_value = True driver = dli_module.DLIPowerDriver() missing = driver.detect_missing_packages() self.assertItemsEqual([], missing) def test__set_outlet_state_calls_wget(self): driver = dli_module.DLIPowerDriver() power_change = factory.make_name('power_change') outlet_id = choice(['1', '2', '3', '4', '5', '6', '7', '8']) power_user = factory.make_name('power_user') power_pass = factory.make_name('power_pass') power_address = factory.make_name('power_address') url = 'http://%s:%s@%s/outlet?%s=%s' % ( power_user, power_pass, power_address, outlet_id, power_change) call_and_check_mock = self.patch(dli_module, 'call_and_check') driver._set_outlet_state( power_change, outlet_id, power_user, power_pass, power_address) self.assertThat( call_and_check_mock, MockCalledOnceWith( ['wget', '--auth-no-challenge', '-O', '/dev/null', url])) def test__set_outlet_state_crashes_when_wget_exits_nonzero(self): driver = dli_module.DLIPowerDriver() call_and_check_mock = self.patch(dli_module, 'call_and_check') call_and_check_mock.side_effect = ( ExternalProcessError(1, "dli something")) self.assertRaises( PowerFatalError, driver._set_outlet_state, sentinel.power_change, sentinel.outlet_id, sentinel.power_use, sentinel.power_pass, sentinel.power_address) def test__query_outlet_state_queries_on(self): driver = dli_module.DLIPowerDriver() outlet_id = choice(['1', '2', '3', '4', '5', '6', '7', '8']) power_user = factory.make_name('power_user') power_pass = factory.make_name('power_pass') power_address = factory.make_name('power_address') url = 'http://%s:%s@%s/index.htm' % ( power_user, power_pass, power_address) call_and_check_mock = self.patch(dli_module, 'call_and_check') call_and_check_mock.return_value = DLI_QUERY_OUTPUT % 'ff' result = driver._query_outlet_state( outlet_id, power_user, power_pass, power_address) self.expectThat( call_and_check_mock, MockCalledOnceWith( ['wget', '--auth-no-challenge', '-qO-', url])) self.expectThat(result, Equals('on')) def test__query_outlet_state_queries_off(self): driver = dli_module.DLIPowerDriver() outlet_id = choice(['1', '2', '3', '4', '5', '6', '7', '8']) power_user = factory.make_name('power_user') power_pass = factory.make_name('power_pass') power_address = factory.make_name('power_address') url = 'http://%s:%s@%s/index.htm' % ( power_user, power_pass, power_address) call_and_check_mock = self.patch(dli_module, 'call_and_check') call_and_check_mock.return_value = DLI_QUERY_OUTPUT % '00' result = driver._query_outlet_state( outlet_id, power_user, power_pass, power_address) self.expectThat( call_and_check_mock, MockCalledOnceWith( ['wget', '--auth-no-challenge', '-qO-', url])) self.expectThat(result, Equals('off')) def test__query_outlet_state_crashes_when_state_not_found(self): driver = dli_module.DLIPowerDriver() call_and_check_mock = self.patch(dli_module, 'call_and_check') call_and_check_mock.return_value = "Rubbish" self.assertRaises( PowerError, driver._query_outlet_state, sentinel.outlet_id, sentinel.power_user, sentinel.power_pass, sentinel.power_address) def test__query_outlet_state_crashes_when_wget_exits_nonzero(self): driver = dli_module.DLIPowerDriver() call_and_check_mock = self.patch(dli_module, 'call_and_check') call_and_check_mock.side_effect = ( ExternalProcessError(1, "dli something")) self.assertRaises( PowerFatalError, driver._query_outlet_state, sentinel.outlet_id, sentinel.power_user, sentinel.power_pass, sentinel.power_address) def test_power_on(self): driver = dli_module.DLIPowerDriver() system_id = factory.make_name('system_id') context = {'context': factory.make_name('context')} _query_outlet_state_mock = self.patch(driver, '_query_outlet_state') _query_outlet_state_mock.side_effect = ('on', 'off') _set_outlet_state_mock = self.patch(driver, '_set_outlet_state') self.patch(dli_module, 'sleep') driver.power_on(system_id, context) self.expectThat( _query_outlet_state_mock, MockCallsMatch( call(**context), call(**context))) self.expectThat( _set_outlet_state_mock, MockCallsMatch( call('OFF', **context), call('ON', **context))) def test_power_on_raises_power_error(self): driver = dli_module.DLIPowerDriver() system_id = factory.make_name('system_id') context = {'outlet_id': factory.make_name('outlet_id')} _query_outlet_state_mock = self.patch(driver, '_query_outlet_state') _query_outlet_state_mock.side_effect = ('on', 'not-off') _set_outlet_state_mock = self.patch(driver, '_set_outlet_state') self.patch(dli_module, 'sleep') self.assertRaises(PowerError, driver.power_on, system_id, context) self.expectThat( _query_outlet_state_mock, MockCallsMatch( call(**context), call(**context))) self.expectThat( _set_outlet_state_mock, MockCalledOnceWith('OFF', **context)) def test_power_off(self): driver = dli_module.DLIPowerDriver() system_id = factory.make_name('system_id') context = {'context': factory.make_name('context')} _set_outlet_state_mock = self.patch(driver, '_set_outlet_state') driver.power_off(system_id, context) self.assertThat( _set_outlet_state_mock, MockCalledOnceWith('OFF', **context)) def test_power_query(self): driver = dli_module.DLIPowerDriver() system_id = factory.make_name('system_id') context = {'context': factory.make_name('context')} _query_outlet_state_mock = self.patch(driver, '_query_outlet_state') driver.power_query(system_id, context) self.assertThat( _query_outlet_state_mock, MockCalledOnceWith(**context)) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/power/tests/test_ether_wake.py0000644000000000000000000000324613056115004027127 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.drivers.power.ether_wake`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maastesting.testcase import MAASTestCase from provisioningserver.drivers.power import ether_wake as ether_wake_module from provisioningserver.utils.shell import has_command_available class TestEtherWakePowerDriver(MAASTestCase): def test_missing_packages(self): mock = self.patch(has_command_available) mock.return_value = False driver = ether_wake_module.EtherWakePowerDriver() missing = driver.detect_missing_packages() self.assertItemsEqual(["wakeonlan or etherwake"], missing) def test_no_missing_packages(self): mock = self.patch(has_command_available) mock.return_value = True driver = ether_wake_module.EtherWakePowerDriver() missing = driver.detect_missing_packages() self.assertItemsEqual([], missing) def test_power_on(self): driver = ether_wake_module.EtherWakePowerDriver() self.assertRaises( NotImplementedError, driver.power_on, "fake_id", {}) def test_power_off(self): driver = ether_wake_module.EtherWakePowerDriver() self.assertRaises( NotImplementedError, driver.power_off, "fake_id", {}) def test_power_query(self): driver = ether_wake_module.EtherWakePowerDriver() self.assertRaises( NotImplementedError, driver.power_query, "fake_id", {}) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/power/tests/test_fence_cdu.py0000644000000000000000000000321613056115004026721 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.drivers.power.fence_cdu`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maastesting.testcase import MAASTestCase from provisioningserver.drivers.power import fence_cdu as fence_cdu_module from provisioningserver.utils.shell import has_command_available class TestFenceCDUPowerDriver(MAASTestCase): def test_missing_packages(self): mock = self.patch(has_command_available) mock.return_value = False driver = fence_cdu_module.FenceCDUPowerDriver() missing = driver.detect_missing_packages() self.assertItemsEqual(["fence-agents"], missing) def test_no_missing_packages(self): mock = self.patch(has_command_available) mock.return_value = True driver = fence_cdu_module.FenceCDUPowerDriver() missing = driver.detect_missing_packages() self.assertItemsEqual([], missing) def test_power_on(self): driver = fence_cdu_module.FenceCDUPowerDriver() self.assertRaises( NotImplementedError, driver.power_on, "fake_id", {}) def test_power_off(self): driver = fence_cdu_module.FenceCDUPowerDriver() self.assertRaises( NotImplementedError, driver.power_off, "fake_id", {}) def test_power_query(self): driver = fence_cdu_module.FenceCDUPowerDriver() self.assertRaises( NotImplementedError, driver.power_query, "fake_id", {}) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/power/tests/test_hmc.py0000644000000000000000000000670513056115004025563 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.drivers.power.hmc`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maastesting.factory import factory from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import MAASTestCase from provisioningserver.drivers.power import hmc as hmc_module from provisioningserver.drivers.power.hmc import ( extract_hmc_parameters, HMCPowerDriver, ) from testtools.matchers import Equals class TestHMCPowerDriver(MAASTestCase): def test_missing_packages(self): # there's nothing to check for, just confirm it returns [] driver = hmc_module.HMCPowerDriver() missing = driver.detect_missing_packages() self.assertItemsEqual([], missing) def make_parameters(self): system_id = factory.make_name('system_id') ip = factory.make_name('power_address') username = factory.make_name('power_user') password = factory.make_name('power_pass') server_name = factory.make_name('server_name') lpar = factory.make_name('lpar') context = { 'system_id': system_id, 'power_address': ip, 'power_user': username, 'power_pass': password, 'server_name': server_name, 'lpar': lpar, } return system_id, ip, username, password, server_name, lpar, context def test_extract_hmc_parameters_extracts_parameters(self): system_id, ip, username, password, server_name, lpar, context = ( self.make_parameters()) self.assertItemsEqual( (ip, username, password, server_name, lpar), extract_hmc_parameters(context)) def test_power_on_calls_power_control_hmc(self): system_id, ip, username, password, server_name, lpar, context = ( self.make_parameters()) hmc_power_driver = HMCPowerDriver() power_control_hmc = self.patch( hmc_module, 'power_control_hmc') hmc_power_driver.power_on(system_id, context) self.assertThat( power_control_hmc, MockCalledOnceWith( ip, username, password, server_name, lpar, power_change='on')) def test_power_off_calls_power_control_hmc(self): system_id, ip, username, password, server_name, lpar, context = ( self.make_parameters()) hmc_power_driver = HMCPowerDriver() power_control_hmc = self.patch( hmc_module, 'power_control_hmc') hmc_power_driver.power_off(system_id, context) self.assertThat( power_control_hmc, MockCalledOnceWith( ip, username, password, server_name, lpar, power_change='off')) def test_power_query_calls_power_state_hmc(self): system_id, ip, username, password, server_name, lpar, context = ( self.make_parameters()) hmc_power_driver = HMCPowerDriver() power_state_hmc = self.patch( hmc_module, 'power_state_hmc') power_state_hmc.return_value = 'off' expected_result = hmc_power_driver.power_query(system_id, context) self.expectThat( power_state_hmc, MockCalledOnceWith( ip, username, password, server_name, lpar)) self.expectThat(expected_result, Equals('off')) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/power/tests/test_ipmi.py0000644000000000000000000003452213056115004025750 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.drivers.power.ipmi`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random from subprocess import PIPE from maastesting.factory import factory from maastesting.matchers import ( MockCalledOnceWith, MockNotCalled, ) from maastesting.testcase import MAASTestCase from mock import ( ANY, sentinel, ) from provisioningserver.drivers.power import ( ipmi as ipmi_module, PowerAuthError, PowerFatalError, ) from provisioningserver.drivers.power.ipmi import ( IPMI_CONFIG, IPMIPowerDriver, ) from provisioningserver.utils.shell import ( ExternalProcessError, has_command_available, ) from testtools.matchers import ( Contains, Equals, ) def make_parameters(): power_address = factory.make_name('power_address') power_user = factory.make_name('power_user') power_pass = factory.make_name('power_pass') power_driver = factory.make_name('power_driver') power_off_mode = factory.make_name('power_off_mode') ipmipower = factory.make_name('ipmipower') ipmi_chassis_config = factory.make_name('ipmi_chassis_config') context = { 'power_address': power_address, 'power_user': power_user, 'power_pass': power_pass, 'power_driver': power_driver, 'power_off_mode': power_off_mode, 'ipmipower': ipmipower, 'ipmi_chassis_config': ipmi_chassis_config, } return ( power_address, power_user, power_pass, power_driver, power_off_mode, ipmipower, ipmi_chassis_config, context ) def make_ipmi_chassis_config_command( ipmi_chassis_config, power_address, power_pass, power_driver, power_user, tmp_config_name): print(tmp_config_name) return ( ipmi_chassis_config, '-W', 'opensesspriv', "--driver-type", power_driver, '-h', power_address, '-u', power_user, '-p', power_pass, '--commit', '--filename', tmp_config_name ) def make_ipmipower_command( ipmipower, power_address, power_pass, power_driver, power_user): return ( ipmipower, '-W', 'opensesspriv', "--driver-type", power_driver, '-h', power_address, '-u', power_user, '-p', power_pass ) class TestIPMIPowerDriver(MAASTestCase): def test_missing_packages(self): mock = self.patch(has_command_available) mock.return_value = False driver = ipmi_module.IPMIPowerDriver() missing = driver.detect_missing_packages() self.assertItemsEqual(['freeipmi-tools'], missing) def test_no_missing_packages(self): mock = self.patch(has_command_available) mock.return_value = True driver = ipmi_module.IPMIPowerDriver() missing = driver.detect_missing_packages() self.assertItemsEqual([], missing) def test__finds_power_address_from_mac_address(self): (power_address, power_user, power_pass, power_driver, power_off_mode, ipmipower, ipmi_chassis_config, context) = make_parameters() driver = IPMIPowerDriver() ip_address = factory.make_ipv4_address() find_ip_via_arp = self.patch(ipmi_module, 'find_ip_via_arp') find_ip_via_arp.return_value = ip_address power_change = random.choice(("on", "off")) env = driver.get_c_environment() context['mac_address'] = factory.make_mac_address() context['power_address'] = random.choice((None, "", " ")) self.patch_autospec(driver, "_issue_ipmi_chassis_config_command") self.patch_autospec(driver, "_issue_ipmi_power_command") driver._issue_ipmi_command(power_change, **context) # The IP address is passed to _issue_ipmi_chassis_config_command. self.assertThat( driver._issue_ipmi_chassis_config_command, MockCalledOnceWith(ANY, power_change, ip_address, env)) # The IP address is also within the command passed to # _issue_ipmi_chassis_config_command. self.assertThat( driver._issue_ipmi_chassis_config_command.call_args[0], Contains(ip_address)) # The IP address is passed to _issue_ipmi_power_command. self.assertThat( driver._issue_ipmi_power_command, MockCalledOnceWith(ANY, power_change, ip_address, env)) # The IP address is also within the command passed to # _issue_ipmi_power_command. self.assertThat( driver._issue_ipmi_power_command.call_args[0], Contains(ip_address)) def test__chassis_config_written_to_temporary_file(self): NamedTemporaryFile = self.patch(ipmi_module, "NamedTemporaryFile") tmpfile = NamedTemporaryFile.return_value tmpfile.__enter__.return_value = tmpfile tmpfile.name = factory.make_name("filename") IPMIPowerDriver._issue_ipmi_chassis_config_command( ["true"], sentinel.change, sentinel.addr, None) self.assertThat(NamedTemporaryFile, MockCalledOnceWith()) self.assertThat(tmpfile.__enter__, MockCalledOnceWith()) self.assertThat(tmpfile.write, MockCalledOnceWith(IPMI_CONFIG)) self.assertThat(tmpfile.flush, MockCalledOnceWith()) self.assertThat(tmpfile.__exit__, MockCalledOnceWith(None, None, None)) def test__issue_ipmi_command_issues_power_on(self): (power_address, power_user, power_pass, power_driver, power_off_mode, ipmipower, ipmi_chassis_config, context) = make_parameters() ipmi_chassis_config_command = make_ipmi_chassis_config_command( ipmi_chassis_config, power_address, power_pass, power_driver, power_user, ANY) ipmipower_command = make_ipmipower_command( ipmipower, power_address, power_pass, power_driver, power_user) ipmipower_command += ('--cycle', '--on-if-off') ipmi_power_driver = IPMIPowerDriver() env = ipmi_power_driver.get_c_environment() popen_mock = self.patch(ipmi_module, 'Popen') process = popen_mock.return_value process.communicate.return_value = (None, '') process.returncode = 0 call_and_check_mock = self.patch(ipmi_module, 'call_and_check') call_and_check_mock.return_value = "host: on" result = ipmi_power_driver._issue_ipmi_command('on', **context) self.expectThat( popen_mock, MockCalledOnceWith( ipmi_chassis_config_command, stdout=PIPE, stderr=PIPE, env=env)) self.expectThat( call_and_check_mock, MockCalledOnceWith( ipmipower_command, env=env)) self.expectThat(result, Equals('on')) def test__issue_ipmi_command_issues_power_off(self): (power_address, power_user, power_pass, power_driver, power_off_mode, ipmipower, ipmi_chassis_config, context) = make_parameters() ipmi_chassis_config_command = make_ipmi_chassis_config_command( ipmi_chassis_config, power_address, power_pass, power_driver, power_user, ANY) ipmipower_command = make_ipmipower_command( ipmipower, power_address, power_pass, power_driver, power_user) ipmipower_command += ('--off', ) ipmi_power_driver = IPMIPowerDriver() env = ipmi_power_driver.get_c_environment() popen_mock = self.patch(ipmi_module, 'Popen') process = popen_mock.return_value process.communicate.return_value = (None, '') process.returncode = 0 call_and_check_mock = self.patch(ipmi_module, 'call_and_check') call_and_check_mock.return_value = "host: off" result = ipmi_power_driver._issue_ipmi_command('off', **context) self.expectThat( popen_mock, MockCalledOnceWith( ipmi_chassis_config_command, stdout=PIPE, stderr=PIPE, env=env)) self.expectThat( call_and_check_mock, MockCalledOnceWith( ipmipower_command, env=env)) self.expectThat(result, Equals('off')) def test__issue_ipmi_command_issues_power_off_soft_mode(self): (power_address, power_user, power_pass, power_driver, power_off_mode, ipmipower, ipmi_chassis_config, context) = make_parameters() context['power_off_mode'] = 'soft' ipmi_chassis_config_command = make_ipmi_chassis_config_command( ipmi_chassis_config, power_address, power_pass, power_driver, power_user, ANY) ipmipower_command = make_ipmipower_command( ipmipower, power_address, power_pass, power_driver, power_user) ipmipower_command += ('--soft', ) ipmi_power_driver = IPMIPowerDriver() env = ipmi_power_driver.get_c_environment() popen_mock = self.patch(ipmi_module, 'Popen') process = popen_mock.return_value process.communicate.return_value = (None, '') process.returncode = 0 call_and_check_mock = self.patch(ipmi_module, 'call_and_check') call_and_check_mock.return_value = "host: off" result = ipmi_power_driver._issue_ipmi_command('off', **context) self.expectThat( popen_mock, MockCalledOnceWith( ipmi_chassis_config_command, stdout=PIPE, stderr=PIPE, env=env)) self.expectThat( call_and_check_mock, MockCalledOnceWith( ipmipower_command, env=env)) self.expectThat(result, Equals('off')) def test__issue_ipmi_command_issues_power_query(self): (power_address, power_user, power_pass, power_driver, power_off_mode, ipmipower, ipmi_chassis_config, context) = make_parameters() ipmipower_command = make_ipmipower_command( ipmipower, power_address, power_pass, power_driver, power_user) ipmipower_command += ('--stat', ) ipmi_power_driver = IPMIPowerDriver() env = ipmi_power_driver.get_c_environment() popen_mock = self.patch(ipmi_module, 'Popen') process = popen_mock.return_value process.communicate.return_value = (None, '') process.returncode = 0 call_and_check_mock = self.patch(ipmi_module, 'call_and_check') call_and_check_mock.return_value = "host: off" result = ipmi_power_driver._issue_ipmi_command('query', **context) self.expectThat(popen_mock, MockNotCalled()) self.expectThat( call_and_check_mock, MockCalledOnceWith( ipmipower_command, env=env)) self.expectThat(result, Equals('off')) def test__issue_ipmi_command_raises_power_fatal_error(self): _, _, _, _, _, _, _, context = make_parameters() ipmi_power_driver = IPMIPowerDriver() popen_mock = self.patch(ipmi_module, 'Popen') process = popen_mock.return_value process.communicate.return_value = (None, 'password invalid') process.returncode = 0 self.assertRaises( PowerAuthError, ipmi_power_driver._issue_ipmi_command, 'on', **context) def test__issue_ipmi_command_logs_maaslog_warning(self): _, _, _, _, _, _, _, context = make_parameters() ipmi_power_driver = IPMIPowerDriver() popen_mock = self.patch(ipmi_module, 'Popen') process = popen_mock.return_value process.communicate.return_value = (None, 'maaslog error') process.returncode = -1 maaslog = self.patch(ipmi_module, 'maaslog') self.patch(ipmi_power_driver, '_issue_ipmi_power_command') ipmi_power_driver._issue_ipmi_command('on', **context) self.assertThat( maaslog.warning, MockCalledOnceWith( 'Failed to change the boot order to PXE %s: %s' % ( context['power_address'], 'maaslog error'))) def test__issue_ipmi_command_catches_external_process_error(self): _, _, _, _, _, _, _, context = make_parameters() ipmi_power_driver = IPMIPowerDriver() popen_mock = self.patch(ipmi_module, 'Popen') process = popen_mock.return_value process.communicate.return_value = (None, '') process.returncode = 0 call_and_check_mock = self.patch(ipmi_module, 'call_and_check') call_and_check_mock.side_effect = ( ExternalProcessError(1, "ipmipower something")) self.assertRaises( PowerFatalError, ipmi_power_driver._issue_ipmi_command, 'on', **context) def test__issue_ipmi_command_returns_output_when_no_regex_match(self): _, _, _, _, _, _, _, context = make_parameters() ipmi_power_driver = IPMIPowerDriver() popen_mock = self.patch(ipmi_module, 'Popen') process = popen_mock.return_value process.communicate.return_value = (None, '') process.returncode = 0 call_and_check_mock = self.patch(ipmi_module, 'call_and_check') call_and_check_mock.return_value = "Rubbish" result = ipmi_power_driver._issue_ipmi_command('on', **context) self.assertEqual(result, "Rubbish") def test_power_on_calls__issue_ipmi_command(self): _, _, _, _, _, _, _, context = make_parameters() ipmi_power_driver = IPMIPowerDriver() _issue_ipmi_command_mock = self.patch( ipmi_power_driver, '_issue_ipmi_command') system_id = factory.make_name('system_id') ipmi_power_driver.power_on(system_id, context) self.assertThat( _issue_ipmi_command_mock, MockCalledOnceWith('on', **context)) def test_power_off_calls__issue_ipmi_command(self): _, _, _, _, _, _, _, context = make_parameters() ipmi_power_driver = IPMIPowerDriver() _issue_ipmi_command_mock = self.patch( ipmi_power_driver, '_issue_ipmi_command') system_id = factory.make_name('system_id') ipmi_power_driver.power_off(system_id, context) self.assertThat( _issue_ipmi_command_mock, MockCalledOnceWith('off', **context)) def test_power_query_calls__issue_ipmi_command(self): _, _, _, _, _, _, _, context = make_parameters() ipmi_power_driver = IPMIPowerDriver() _issue_ipmi_command_mock = self.patch( ipmi_power_driver, '_issue_ipmi_command') system_id = factory.make_name('system_id') ipmi_power_driver.power_query(system_id, context) self.assertThat( _issue_ipmi_command_mock, MockCalledOnceWith('query', **context)) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/power/tests/test_moonshot.py0000644000000000000000000001252613056115004026660 0ustar 00000000000000# Copyright 2015-2016 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.drivers.power.ipmi`.""" __all__ = [] import os from maastesting.factory import factory from maastesting.matchers import ( MockCalledOnceWith, MockCallsMatch, ) from maastesting.testcase import MAASTestCase from mock import call from provisioningserver.drivers.power import ( moonshot as moonshot_module, PowerActionError, ) from provisioningserver.drivers.power.moonshot import MoonshotIPMIPowerDriver from provisioningserver.utils.shell import ( ExternalProcessError, has_command_available, ) from testtools.matchers import Equals def make_context(): return { 'ipmitool': factory.make_name('ipmitool'), 'power_address': factory.make_name('power_address'), 'power_user': factory.make_name('power_user'), 'power_pass': factory.make_name('power_pass'), 'power_hwaddress': factory.make_string(spaces=True), } def make_command( ipmitool, power_address, power_user, power_pass, power_hwaddress): return ( ipmitool, '-I', 'lanplus', '-H', power_address, '-U', power_user, '-P', power_pass ) + tuple(power_hwaddress.split()) def make_pxe_command(context): return make_command( context['ipmitool'], context['power_address'], context['power_user'], context['power_pass'], context['power_hwaddress'] ) + ('chassis', 'bootdev', 'pxe') def make_ipmitool_command(power_change, context): return make_command( context['ipmitool'], context['power_address'], context['power_user'], context['power_pass'], context['power_hwaddress'] ) + ('power', power_change) class TestMoonshotIPMIPowerDriver(MAASTestCase): def test_missing_packages(self): mock = self.patch(has_command_available) mock.return_value = False driver = moonshot_module.MoonshotIPMIPowerDriver() missing = driver.detect_missing_packages() self.assertItemsEqual(['ipmitool'], missing) def test_no_missing_packages(self): mock = self.patch(has_command_available) mock.return_value = True driver = moonshot_module.MoonshotIPMIPowerDriver() missing = driver.detect_missing_packages() self.assertItemsEqual([], missing) def test__issue_ipmitool_command_sets_pxe_boot(self): context = make_context() env = os.environ.copy() env['LC_ALL'] = 'C' pxe_command = make_pxe_command(context) moonshot_driver = MoonshotIPMIPowerDriver() call_and_check_mock = self.patch(moonshot_module, 'call_and_check') moonshot_driver._issue_ipmitool_command('pxe', **context) self.assertThat( call_and_check_mock, MockCalledOnceWith(pxe_command, env=env)) def test__issue_ipmitool_command_returns_stdout_if_no_match(self): context = make_context() env = os.environ.copy() env['LC_ALL'] = 'C' ipmitool_command = make_ipmitool_command('status', context) moonshot_driver = MoonshotIPMIPowerDriver() call_and_check_mock = self.patch(moonshot_module, 'call_and_check') call_and_check_mock.return_value = b'other' result = moonshot_driver._issue_ipmitool_command('status', **context) self.expectThat( call_and_check_mock, MockCalledOnceWith(ipmitool_command, env=env)) self.expectThat(result, Equals('other')) def test__issue_ipmitool_raises_power_action_error(self): context = make_context() moonshot_driver = MoonshotIPMIPowerDriver() call_and_check_mock = self.patch(moonshot_module, 'call_and_check') call_and_check_mock.side_effect = ( ExternalProcessError(1, "ipmitool something")) self.assertRaises( PowerActionError, moonshot_driver._issue_ipmitool_command, 'status', **context) def test_power_on_calls__issue_ipmitool_command(self): context = make_context() moonshot_driver = MoonshotIPMIPowerDriver() _issue_ipmitool_command_mock = self.patch( moonshot_driver, '_issue_ipmitool_command') system_id = factory.make_name('system_id') moonshot_driver.power_on(system_id, context) self.assertThat( _issue_ipmitool_command_mock, MockCallsMatch( call('pxe', **context), call('on', **context))) def test_power_off_calls__issue_ipmitool_command(self): context = make_context() moonshot_driver = MoonshotIPMIPowerDriver() _issue_ipmitool_command_mock = self.patch( moonshot_driver, '_issue_ipmitool_command') system_id = factory.make_name('system_id') moonshot_driver.power_off(system_id, context) self.assertThat( _issue_ipmitool_command_mock, MockCalledOnceWith('off', **context)) def test_power_query_calls__issue_ipmitool_command(self): context = make_context() moonshot_driver = MoonshotIPMIPowerDriver() _issue_ipmitool_command_mock = self.patch( moonshot_driver, '_issue_ipmitool_command') system_id = factory.make_name('system_id') moonshot_driver.power_query(system_id, context) self.assertThat( _issue_ipmitool_command_mock, MockCalledOnceWith( 'status', **context)) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/power/tests/test_mscm.py0000644000000000000000000001134213056115004025744 0ustar 00000000000000# Copyright 2015-2016 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.drivers.power.mscm`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maastesting.factory import factory from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import MAASTestCase from provisioningserver.drivers.hardware.mscm import MSCMError from provisioningserver.drivers.hardware.tests.test_mscm import make_node_id from provisioningserver.drivers.power import ( mscm as mscm_module, PowerError, ) from provisioningserver.drivers.power.mscm import ( extract_mscm_parameters, MSCMPowerDriver, ) from testtools.matchers import Equals class TestMSCMPowerDriver(MAASTestCase): def test_missing_packages(self): # there's nothing to check for, just confirm it returns [] driver = mscm_module.MSCMPowerDriver() missing = driver.detect_missing_packages() self.assertItemsEqual([], missing) def make_parameters(self): system_id = factory.make_name('system_id') host = factory.make_name('power_address') username = factory.make_name('power_user') password = factory.make_name('power_pass') node_id = make_node_id() context = { 'system_id': system_id, 'power_address': host, 'power_user': username, 'power_pass': password, 'node_id': node_id, } return system_id, host, username, password, node_id, context def test_extract_mscm_parameters_extracts_parameters(self): system_id, host, username, password, node_id, context = ( self.make_parameters()) self.assertItemsEqual( (host, username, password, node_id), extract_mscm_parameters(context)) def test_power_on_calls_power_control_mscm(self): system_id, host, username, password, node_id, context = ( self.make_parameters()) mscm_power_driver = MSCMPowerDriver() power_control_mscm = self.patch( mscm_module, 'power_control_mscm') mscm_power_driver.power_on(system_id, context) self.assertThat( power_control_mscm, MockCalledOnceWith( host, username, password, node_id, power_change='on')) def test_power_on_raises_power_error(self): system_id, _, _, _, _, context = ( self.make_parameters()) mscm_power_driver = MSCMPowerDriver() power_control_mscm = self.patch( mscm_module, 'power_control_mscm') power_control_mscm.side_effect = MSCMError("Error") self.assertRaises( PowerError, mscm_power_driver.power_on, system_id, context) def test_power_off_calls_power_control_mscm(self): system_id, host, username, password, node_id, context = ( self.make_parameters()) mscm_power_driver = MSCMPowerDriver() power_control_mscm = self.patch( mscm_module, 'power_control_mscm') mscm_power_driver.power_off(system_id, context) self.assertThat( power_control_mscm, MockCalledOnceWith( host, username, password, node_id, power_change='off')) def test_power_off_raises_power_error(self): system_id, _, _, _, _, context = ( self.make_parameters()) mscm_power_driver = MSCMPowerDriver() power_control_mscm = self.patch( mscm_module, 'power_control_mscm') power_control_mscm.side_effect = MSCMError("Error") self.assertRaises( PowerError, mscm_power_driver.power_off, system_id, context) def test_power_query_calls_power_state_mscm(self): system_id, host, username, password, node_id, context = ( self.make_parameters()) mscm_power_driver = MSCMPowerDriver() power_state_mscm = self.patch( mscm_module, 'power_state_mscm') power_state_mscm.return_value = 'off' expected_result = mscm_power_driver.power_query(system_id, context) self.expectThat( power_state_mscm, MockCalledOnceWith( host, username, password, node_id)) self.expectThat(expected_result, Equals('off')) def test_power_query_raises_power_error(self): system_id, _, _, _, _, context = ( self.make_parameters()) mscm_power_driver = MSCMPowerDriver() power_control_mscm = self.patch( mscm_module, 'power_control_mscm') power_control_mscm.side_effect = MSCMError("Error") self.assertRaises( PowerError, mscm_power_driver.power_query, system_id, context) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/power/tests/test_msftocs.py0000644000000000000000000000716313056115004026471 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.drivers.power.msftocs`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maastesting.factory import factory from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import MAASTestCase from provisioningserver.drivers.power import msftocs as msftocs_module from provisioningserver.drivers.power.msftocs import ( extract_msftocs_parameters, MicrosoftOCSPowerDriver, ) from testtools.matchers import Equals class TestMicrosoftOCSPowerDriver(MAASTestCase): def test_missing_packages(self): # there's nothing to check for, just confirm it returns [] driver = msftocs_module.MicrosoftOCSPowerDriver() missing = driver.detect_missing_packages() self.assertItemsEqual([], missing) def make_parameters(self): system_id = factory.make_name('system_id') ip = factory.make_name('power_address') port = factory.make_name('power_port') username = factory.make_name('power_user') password = factory.make_name('power_pass') blade_id = factory.make_name('blade_id') context = { 'system_id': system_id, 'power_address': ip, 'power_port': port, 'power_user': username, 'power_pass': password, 'blade_id': blade_id, } return system_id, ip, port, username, password, blade_id, context def test_extract_msftocs_parameters_extracts_parameters(self): system_id, ip, port, username, password, blade_id, context = ( self.make_parameters()) self.assertItemsEqual( (ip, port, username, password, blade_id), extract_msftocs_parameters(context)) def test_power_on_calls_power_control_msftocs(self): power_change = 'on' system_id, ip, port, username, password, blade_id, context = ( self.make_parameters()) msftocs_power_driver = MicrosoftOCSPowerDriver() power_control_msftocs = self.patch( msftocs_module, 'power_control_msftocs') msftocs_power_driver.power_on(system_id, context) self.assertThat( power_control_msftocs, MockCalledOnceWith( ip, port, username, password, power_change)) def test_power_off_calls_power_control_msftocs(self): power_change = 'off' system_id, ip, port, username, password, blade_id, context = ( self.make_parameters()) msftocs_power_driver = MicrosoftOCSPowerDriver() power_control_msftocs = self.patch( msftocs_module, 'power_control_msftocs') msftocs_power_driver.power_off(system_id, context) self.assertThat( power_control_msftocs, MockCalledOnceWith( ip, port, username, password, power_change)) def test_power_query_calls_power_state_msftocs(self): system_id, ip, port, username, password, blade_id, context = ( self.make_parameters()) msftocs_power_driver = MicrosoftOCSPowerDriver() power_state_msftocs = self.patch( msftocs_module, 'power_state_msftocs') power_state_msftocs.return_value = 'off' expected_result = msftocs_power_driver.power_query(system_id, context) self.expectThat( power_state_msftocs, MockCalledOnceWith( ip, port, username, password, blade_id)) self.expectThat(expected_result, Equals('off')) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/power/tests/test_seamicro.py0000644000000000000000000001670313056115004026615 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.drivers.power.seamicro`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from random import choice from maastesting.factory import factory from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import MAASTestCase from provisioningserver.drivers.power import ( PowerFatalError, seamicro as seamicro_module, ) from provisioningserver.drivers.power.seamicro import ( extract_seamicro_parameters, SeaMicroPowerDriver, ) from provisioningserver.utils.shell import ( ExternalProcessError, has_command_available, ) from testtools.matchers import Equals class TestSeaMicroPowerDriver(MAASTestCase): def test_missing_packages(self): mock = self.patch(has_command_available) mock.return_value = False driver = seamicro_module.SeaMicroPowerDriver() missing = driver.detect_missing_packages() self.assertItemsEqual(['ipmitool'], missing) def test_no_missing_packages(self): mock = self.patch(has_command_available) mock.return_value = True driver = seamicro_module.SeaMicroPowerDriver() missing = driver.detect_missing_packages() self.assertItemsEqual([], missing) def make_context(self): ip = factory.make_name('power_address') username = factory.make_name('power_user') password = factory.make_name('power_pass') server_id = factory.make_name('system_id') context = { 'power_address': ip, 'power_user': username, 'power_pass': password, 'system_id': server_id, } return ip, username, password, server_id, context def test_extract_seamicro_parameters_extracts_parameters(self): ip, username, password, server_id, context = self.make_context() power_control = choice(['ipmi', 'restapi', 'restapi2']) context['power_control'] = power_control self.assertItemsEqual( (ip, username, password, server_id, power_control), extract_seamicro_parameters(context)) def test__power_control_seamicro15k_ipmi_calls_call_and_check(self): ip, username, password, server_id, _ = self.make_context() power_change = choice(['on', 'off']) seamicro_power_driver = SeaMicroPowerDriver() call_and_check_mock = self.patch(seamicro_module, 'call_and_check') seamicro_power_driver._power_control_seamicro15k_ipmi( ip, username, password, server_id, power_change) power_mode = 1 if power_change == 'on' else 6 self.assertThat( call_and_check_mock, MockCalledOnceWith([ 'ipmitool', '-I', 'lanplus', '-H', ip, '-U', username, '-P', password, 'raw', '0x2E', '1', '0x00', '0x7d', '0xab', power_mode, '0', server_id, ])) def test__power_control_seamicro15k_ipmi_raises_PowerFatalError(self): ip, username, password, server_id, _ = self.make_context() power_change = choice(['on', 'off']) seamicro_power_driver = SeaMicroPowerDriver() call_and_check_mock = self.patch(seamicro_module, 'call_and_check') call_and_check_mock.side_effect = ( ExternalProcessError(1, "ipmitool something")) self.assertRaises( PowerFatalError, seamicro_power_driver._power_control_seamicro15k_ipmi, ip, username, password, server_id, power_change) def test__power_calls__power_control_seamicro15k_ipmi(self): ip, username, password, server_id, context = self.make_context() context['power_control'] = 'ipmi' power_change = choice(['on', 'off']) seamicro_power_driver = SeaMicroPowerDriver() _power_control_seamicro15k_ipmi_mock = self.patch( seamicro_power_driver, '_power_control_seamicro15k_ipmi') seamicro_power_driver._power(power_change, context) self.assertThat( _power_control_seamicro15k_ipmi_mock, MockCalledOnceWith( ip, username, password, server_id, power_change=power_change)) def test__power_calls_power_control_seamicro15k_v09(self): ip, username, password, server_id, context = self.make_context() context['power_control'] = 'restapi' power_change = choice(['on', 'off']) seamicro_power_driver = SeaMicroPowerDriver() power_control_seamicro15k_v09_mock = self.patch( seamicro_module, 'power_control_seamicro15k_v09') seamicro_power_driver._power(power_change, context) self.assertThat( power_control_seamicro15k_v09_mock, MockCalledOnceWith( ip, username, password, server_id, power_change=power_change)) def test__power_calls_power_control_seamicro15k_v2(self): ip, username, password, server_id, context = self.make_context() context['power_control'] = 'restapi2' power_change = choice(['on', 'off']) seamicro_power_driver = SeaMicroPowerDriver() power_control_seamicro15k_v2_mock = self.patch( seamicro_module, 'power_control_seamicro15k_v2') seamicro_power_driver._power(power_change, context) self.assertThat( power_control_seamicro15k_v2_mock, MockCalledOnceWith( ip, username, password, server_id, power_change=power_change)) def test_power_on_calls_power(self): _, _, _, _, context = self.make_context() context['power_control'] = factory.make_name('power_control') seamicro_power_driver = SeaMicroPowerDriver() power_mock = self.patch(seamicro_power_driver, '_power') seamicro_power_driver.power_on(context['system_id'], context) self.assertThat( power_mock, MockCalledOnceWith('on', context)) def test_power_off_calls_power(self): _, _, _, _, context = self.make_context() context['power_control'] = factory.make_name('power_control') seamicro_power_driver = SeaMicroPowerDriver() power_mock = self.patch(seamicro_power_driver, '_power') seamicro_power_driver.power_off(context['system_id'], context) self.assertThat( power_mock, MockCalledOnceWith('off', context)) def test_power_query_calls_power_query_seamicro15k_v2(self): ip, username, password, server_id, context = self.make_context() context['power_control'] = 'restapi2' seamicro_power_driver = SeaMicroPowerDriver() power_query_seamicro15k_v2_mock = self.patch( seamicro_module, 'power_query_seamicro15k_v2') power_query_seamicro15k_v2_mock.return_value = 'on' power_state = seamicro_power_driver.power_query( context['system_id'], context) self.expectThat( power_query_seamicro15k_v2_mock, MockCalledOnceWith( ip, username, password, server_id)) self.expectThat(power_state, Equals('on')) def test_power_query_returns_unknown_if_not_restapi2(self): ip, username, password, server_id, context = self.make_context() context['power_control'] = factory.make_name('power_control') seamicro_power_driver = SeaMicroPowerDriver() power_state = seamicro_power_driver.power_query( context['system_id'], context) self.assertThat(power_state, Equals('unknown')) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/power/tests/test_ucsm.py0000644000000000000000000000644113056115004025760 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.drivers.power.ucsm`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maastesting.factory import factory from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import MAASTestCase from provisioningserver.drivers.power import ucsm as ucsm_module from provisioningserver.drivers.power.ucsm import ( extract_ucsm_parameters, UCSMPowerDriver, ) from testtools.matchers import Equals class TestUCSMPowerDriver(MAASTestCase): def test_missing_packages(self): # there's nothing to check for, just confirm it returns [] driver = ucsm_module.UCSMPowerDriver() missing = driver.detect_missing_packages() self.assertItemsEqual([], missing) def make_parameters(self): system_id = factory.make_name('system_id') url = factory.make_name('power_address') username = factory.make_name('power_user') password = factory.make_name('power_pass') uuid = factory.make_UUID() context = { 'system_id': system_id, 'power_address': url, 'power_user': username, 'power_pass': password, 'uuid': uuid, } return system_id, url, username, password, uuid, context def test_extract_ucsm_parameters_extracts_parameters(self): system_id, url, username, password, uuid, context = ( self.make_parameters()) self.assertItemsEqual( (url, username, password, uuid), extract_ucsm_parameters(context)) def test_power_on_calls_power_control_ucsm(self): system_id, url, username, password, uuid, context = ( self.make_parameters()) ucsm_power_driver = UCSMPowerDriver() power_control_ucsm = self.patch( ucsm_module, 'power_control_ucsm') ucsm_power_driver.power_on(system_id, context) self.assertThat( power_control_ucsm, MockCalledOnceWith( url, username, password, uuid, maas_power_mode='on')) def test_power_off_calls_power_control_ucsm(self): system_id, url, username, password, uuid, context = ( self.make_parameters()) ucsm_power_driver = UCSMPowerDriver() power_control_ucsm = self.patch( ucsm_module, 'power_control_ucsm') ucsm_power_driver.power_off(system_id, context) self.assertThat( power_control_ucsm, MockCalledOnceWith( url, username, password, uuid, maas_power_mode='off')) def test_power_query_calls_power_state_ucsm(self): system_id, url, username, password, uuid, context = ( self.make_parameters()) ucsm_power_driver = UCSMPowerDriver() power_state_ucsm = self.patch( ucsm_module, 'power_state_ucsm') power_state_ucsm.return_value = 'off' expected_result = ucsm_power_driver.power_query(system_id, context) self.expectThat( power_state_ucsm, MockCalledOnceWith( url, username, password, uuid)) self.expectThat(expected_result, Equals('off')) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/power/tests/test_virsh.py0000644000000000000000000000722513056115004026145 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.drivers.power.virsh`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maastesting.factory import factory from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import MAASTestCase from provisioningserver.drivers.power import virsh as virsh_module from provisioningserver.drivers.power.virsh import ( extract_virsh_parameters, VirshPowerDriver, ) from provisioningserver.utils.shell import has_command_available from testtools.matchers import Equals class TestVirshPowerDriver(MAASTestCase): def test_missing_packages(self): mock = self.patch(has_command_available) mock.return_value = False driver = virsh_module.VirshPowerDriver() missing = driver.detect_missing_packages() self.assertItemsEqual(['libvirt-bin'], missing) def test_no_missing_packages(self): mock = self.patch(has_command_available) mock.return_value = True driver = virsh_module.VirshPowerDriver() missing = driver.detect_missing_packages() self.assertItemsEqual([], missing) def make_parameters(self): system_id = factory.make_name('system_id') poweraddr = factory.make_name('power_address') machine = factory.make_name('power_id') password = factory.make_name('power_pass') context = { 'system_id': system_id, 'power_address': poweraddr, 'power_id': machine, 'power_pass': password, } return system_id, poweraddr, machine, password, context def test_extract_virsh_parameters_extracts_parameters(self): system_id, poweraddr, machine, password, context = ( self.make_parameters()) self.assertItemsEqual( (poweraddr, machine, password), extract_virsh_parameters(context)) def test_power_on_calls_power_control_virsh(self): power_change = 'on' system_id, poweraddr, machine, password, context = ( self.make_parameters()) virsh_power_driver = VirshPowerDriver() power_control_virsh = self.patch( virsh_module, 'power_control_virsh') virsh_power_driver.power_on(system_id, context) self.assertThat( power_control_virsh, MockCalledOnceWith( poweraddr, machine, power_change, password)) def test_power_off_calls_power_control_virsh(self): power_change = 'off' system_id, poweraddr, machine, password, context = ( self.make_parameters()) virsh_power_driver = VirshPowerDriver() power_control_virsh = self.patch( virsh_module, 'power_control_virsh') virsh_power_driver.power_off(system_id, context) self.assertThat( power_control_virsh, MockCalledOnceWith( poweraddr, machine, power_change, password)) def test_power_query_calls_power_state_virsh(self): system_id, poweraddr, machine, password, context = ( self.make_parameters()) virsh_power_driver = VirshPowerDriver() power_state_virsh = self.patch( virsh_module, 'power_state_virsh') power_state_virsh.return_value = 'off' expected_result = virsh_power_driver.power_query(system_id, context) self.expectThat( power_state_virsh, MockCalledOnceWith( poweraddr, machine, password)) self.expectThat(expected_result, Equals('off')) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/power/tests/test_vmware.py0000644000000000000000000001133513056115004026310 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # Gnu Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.drivers.power.vmware`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maastesting.factory import factory from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import MAASTestCase from provisioningserver.drivers.hardware.vmware import try_pyvmomi_import from provisioningserver.drivers.power import vmware as vmware_module from provisioningserver.drivers.power.vmware import ( extract_vmware_parameters, VMwarePowerDriver, ) from testtools.matchers import Equals class TestVMwarePowerDriver(MAASTestCase): def test_missing_packages(self): mock = self.patch(try_pyvmomi_import) mock.return_value = False driver = vmware_module.VMwarePowerDriver() missing = driver.detect_missing_packages() self.assertItemsEqual(["python-pyvmomi"], missing) def test_no_missing_packages(self): mock = self.patch(try_pyvmomi_import) mock.return_value = True driver = vmware_module.VMwarePowerDriver() missing = driver.detect_missing_packages() self.assertItemsEqual([], missing) def make_parameters(self, has_optional=True): system_id = factory.make_name('system_id') host = factory.make_name('power_address') username = factory.make_name('power_user') password = factory.make_name('power_pass') vm_name = factory.make_name('power_vm_name') uuid = factory.make_name('power_uuid') port = protocol = None context = { 'system_id': system_id, 'power_address': host, 'power_user': username, 'power_pass': password, 'power_vm_name': vm_name, 'power_uuid': uuid, 'power_port': port, 'power_protocol': protocol, } if not has_optional: context['power_port'] = "" context['power_protocol'] = "" return (system_id, host, username, password, vm_name, uuid, port, protocol, context) def test_extract_vmware_parameters_extracts_parameters(self): (system_id, host, username, password, vm_name, uuid, port, protocol, context) = self.make_parameters() self.assertItemsEqual( (host, username, password, vm_name, uuid, None, None), extract_vmware_parameters(context)) def test_extract_vmware_parameters_treats_optional_params_as_none(self): (system_id, host, username, password, vm_name, uuid, port, protocol, context) = self.make_parameters( has_optional=False) self.assertItemsEqual( (host, username, password, vm_name, uuid, port, protocol), extract_vmware_parameters(context)) def test_power_on_calls_power_control_vmware(self): power_change = 'on' (system_id, host, username, password, vm_name, uuid, port, protocol, context) = self.make_parameters() vmware_power_driver = VMwarePowerDriver() power_control_vmware = self.patch( vmware_module, 'power_control_vmware') vmware_power_driver.power_on(system_id, context) self.assertThat( power_control_vmware, MockCalledOnceWith( host, username, password, vm_name, uuid, power_change, port, protocol)) def test_power_off_calls_power_control_vmware(self): power_change = 'off' (system_id, host, username, password, vm_name, uuid, port, protocol, context) = self.make_parameters() vmware_power_driver = VMwarePowerDriver() power_control_vmware = self.patch( vmware_module, 'power_control_vmware') vmware_power_driver.power_off(system_id, context) self.assertThat( power_control_vmware, MockCalledOnceWith( host, username, password, vm_name, uuid, power_change, port, protocol)) def test_power_query_calls_power_query_vmware(self): (system_id, host, username, password, vm_name, uuid, port, protocol, context) = self.make_parameters() vmware_power_driver = VMwarePowerDriver() power_query_vmware = self.patch( vmware_module, 'power_query_vmware') power_query_vmware.return_value = 'off' expected_result = vmware_power_driver.power_query(system_id, context) self.expectThat( power_query_vmware, MockCalledOnceWith( host, username, password, vm_name, uuid, port, protocol)) self.expectThat(expected_result, Equals('off')) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/service/__init__.py0000644000000000000000000000271513056115004024653 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Monitored service driver.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "SERVICE_STATE", "Service", "ServiceRegistry", ] from abc import ( ABCMeta, abstractmethod, abstractproperty, ) from provisioningserver.utils.registry import Registry class SERVICE_STATE: """The vocabulary of a service expected state.""" #: Service should be on ON = 'on' #: Service should be off OFF = 'off' class Service: """Skeleton for a monitored service.""" __metaclass__ = ABCMeta @abstractproperty def name(self): """Nice name of the service.""" @abstractproperty def service_name(self): """Name of the service for upstart or systemd.""" @abstractmethod def get_expected_state(self): """Return a the expected state for the service.""" class ServiceRegistry(Registry): """Registry for service classes.""" from provisioningserver.drivers.service.tgt import TGTService from provisioningserver.drivers.service.dhcp import ( DHCPv4Service, DHCPv6Service, ) builtin_services = [ TGTService(), DHCPv4Service(), DHCPv6Service(), ] for service in builtin_services: ServiceRegistry.register_item(service.name, service) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/service/dhcp.py0000644000000000000000000000452013056115004024026 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Service class for the monitored dhcp services.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "DHCPv4Service", "DHCPv6Service", ] from abc import abstractproperty import os from provisioningserver.dhcp import ( DHCPv4_CONFIG_FILE, DHCPv6_CONFIG_FILE, DISABLED_DHCP_SERVER, ) from provisioningserver.drivers.service import ( Service, SERVICE_STATE, ) from provisioningserver.path import get_path from provisioningserver.utils.fs import read_text_file class DHCPService(Service): """Abstract monitored dhcp service.""" config_file = abstractproperty() def __init__(self): super(DHCPService, self).__init__() self.expected_state = self._get_starting_expected_state() def get_expected_state(self): """Return a the expected state for the dhcp service. The dhcp service is determined to be on when the service starts with `_starting_expected_state`. As the dhcp is enabled the `expected_state` is adjusted. """ return self.expected_state def is_on(self): """Return true if the service should be on.""" return self.expected_state == SERVICE_STATE.ON def on(self): """Set the expected state of the service to `ON`.""" self.expected_state = SERVICE_STATE.ON def off(self): """Set the expected state of the service to `OFF`.""" self.expected_state = SERVICE_STATE.OFF def _get_starting_expected_state(self): """Return the starting `expected_state` for this service.""" if not os.path.exists(self.config_file): return SERVICE_STATE.OFF else: config_contents = read_text_file(self.config_file) if config_contents == DISABLED_DHCP_SERVER: return SERVICE_STATE.OFF else: return SERVICE_STATE.ON class DHCPv4Service(DHCPService): name = "dhcp4" service_name = "maas-dhcpd" config_file = get_path(DHCPv4_CONFIG_FILE) class DHCPv6Service(DHCPService): name = "dhcp6" service_name = "maas-dhcpd6" config_file = get_path(DHCPv6_CONFIG_FILE) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/service/tests/0000755000000000000000000000000013056115004023677 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/service/tgt.py0000644000000000000000000000141613056115004023707 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Service class for the monitored tgt service.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "TGTService", ] from provisioningserver.drivers.service import ( Service, SERVICE_STATE, ) class TGTService(Service): """Monitored tgt service.""" name = "tgt" service_name = "tgt" def get_expected_state(self): """Return a the expected state for the tgt service. The tgt service should always be on. No condition exists where it should be off. """ return SERVICE_STATE.ON maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/service/tests/test_dhcp.py0000644000000000000000000000742013056115004026231 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the DHCPv4 and DHCPv6 service driver.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maastesting.factory import factory from maastesting.testcase import MAASTestCase from mock import sentinel from provisioningserver.dhcp import ( DHCPv4_CONFIG_FILE, DHCPv6_CONFIG_FILE, DISABLED_DHCP_SERVER, ) from provisioningserver.drivers.service import SERVICE_STATE from provisioningserver.drivers.service.dhcp import ( DHCPService, DHCPv4Service, DHCPv6Service, ) from provisioningserver.path import get_path class TestDHCPService(MAASTestCase): def make_dhcp_service(self, fake_config_file=None): if fake_config_file is None: fake_config_file = factory.make_name("config_file") class FakeDHCPService(DHCPService): name = factory.make_name("name") service_name = factory.make_name("service") config_file = fake_config_file return FakeDHCPService() def test_get_expected_state_returns_from_expected_state(self): service = self.make_dhcp_service() service.expected_state = sentinel.state self.assertEqual(sentinel.state, service.get_expected_state()) def test_is_on_returns_True_when_expected_state_on(self): service = self.make_dhcp_service() service.expected_state = SERVICE_STATE.ON self.assertTrue( service.is_on(), "Did not return true when expected_state was on.") def test_is_on_returns_False_when_expected_state_off(self): service = self.make_dhcp_service() service.expected_state = SERVICE_STATE.OFF self.assertFalse( service.is_on(), "Did not return false when expected_state was off.") def test_on_sets_expected_state_to_on(self): service = self.make_dhcp_service() service.expected_state = SERVICE_STATE.OFF service.on() self.assertEqual(SERVICE_STATE.ON, service.expected_state) def test_off_sets_expected_state_to_off(self): service = self.make_dhcp_service() service.expected_state = SERVICE_STATE.ON service.off() self.assertEqual(SERVICE_STATE.OFF, service.expected_state) def test__get_starting_expected_state_returns_off_if_doesnt_exist(self): service = self.make_dhcp_service() self.assertEqual( SERVICE_STATE.OFF, service._get_starting_expected_state()) def test__get_starting_expected_state_returns_on_if_not_disabled_cfg(self): service = self.make_dhcp_service() service.config_file = self.make_file() self.assertEqual( SERVICE_STATE.ON, service._get_starting_expected_state()) def test__get_starting_expected_state_returns_off_if_disabled_cfg(self): service = self.make_dhcp_service() service.config_file = self.make_file(contents=DISABLED_DHCP_SERVER) self.assertEqual( SERVICE_STATE.OFF, service._get_starting_expected_state()) class TestDHCPv4Service(MAASTestCase): def test_service_name(self): service = DHCPv4Service() self.assertEqual("maas-dhcpd", service.service_name) def test_config_file(self): service = DHCPv4Service() self.assertEqual(get_path(DHCPv4_CONFIG_FILE), service.config_file) class TestDHCPv6Service(MAASTestCase): def test_service_name(self): service = DHCPv6Service() self.assertEqual("maas-dhcpd6", service.service_name) def test_config_file(self): service = DHCPv6Service() self.assertEqual(get_path(DHCPv6_CONFIG_FILE), service.config_file) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/service/tests/test_tgt.py0000644000000000000000000000141113056115004026103 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the tgt service driver.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maastesting.testcase import MAASTestCase from provisioningserver.drivers.service import SERVICE_STATE from provisioningserver.drivers.service.tgt import TGTService class TestTGTService(MAASTestCase): def test_service_name(self): tgt = TGTService() self.assertEqual("tgt", tgt.service_name) def test_get_expected_state(self): tgt = TGTService() self.assertEqual(SERVICE_STATE.ON, tgt.get_expected_state()) maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/tests/__init__.py0000644000000000000000000000000013056115004024336 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/drivers/tests/test_base.py0000644000000000000000000001472213056115004024570 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.drivers`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random from jsonschema import ( validate, ValidationError, ) from maastesting.factory import factory from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import MAASTestCase from mock import sentinel from provisioningserver import drivers from provisioningserver.drivers import ( Architecture, ArchitectureRegistry, BootResourceRegistry, JSON_SETTING_SCHEMA, make_setting_field, SETTING_PARAMETER_FIELD_SCHEMA, validate_settings, ) from provisioningserver.utils.testing import RegistryFixture from testtools.matchers import ContainsAll class TestMakeSettingField(MAASTestCase): def test_returns_valid_schema(self): setting = make_setting_field( factory.make_name('name'), factory.make_name('label')) #: doesn't raise ValidationError validate(setting, SETTING_PARAMETER_FIELD_SCHEMA) def test_returns_dict_with_required_fields(self): setting = make_setting_field( factory.make_name('name'), factory.make_name('label')) self.assertThat( setting, ContainsAll([ 'name', 'label', 'required', 'field_type', 'choices', 'default'])) def test_defaults_field_type_to_string(self): setting = make_setting_field( factory.make_name('name'), factory.make_name('label')) self.assertEqual('string', setting['field_type']) def test_defaults_choices_to_empty_list(self): setting = make_setting_field( factory.make_name('name'), factory.make_name('label')) self.assertEqual([], setting['choices']) def test_defaults_default_to_empty_string(self): setting = make_setting_field( factory.make_name('name'), factory.make_name('label')) self.assertEqual("", setting['default']) def test_validates_choices(self): choices = [('invalid')] self.assertRaises( ValidationError, make_setting_field, factory.make_name('name'), factory.make_name('label'), field_type='choice', choices=choices) def test_returns_dict_with_correct_values(self): name = factory.make_name('name') label = factory.make_name('label') field_type = random.choice(['string', 'mac_address', 'choice']) choices = [ [factory.make_name('key'), factory.make_name('value')] for _ in range(3) ] default = factory.make_name('default') setting = make_setting_field( name, label, field_type=field_type, choices=choices, default=default, required=True) self.assertItemsEqual({ 'name': name, 'label': label, 'field_type': field_type, 'choices': choices, 'default': default, 'required': True }, setting) class TestValidateSettings(MAASTestCase): def test_calls_validate(self): mock_validate = self.patch(drivers, 'validate') validate_settings(sentinel.settings) self.assertThat( mock_validate, MockCalledOnceWith(sentinel.settings, JSON_SETTING_SCHEMA)) class TestRegistries(MAASTestCase): def setUp(self): super(TestRegistries, self).setUp() # Ensure the global registry is empty for each test run. self.useFixture(RegistryFixture()) def test_bootresource_registry(self): self.assertItemsEqual([], BootResourceRegistry) BootResourceRegistry.register_item("resource", sentinel.resource) self.assertIn( sentinel.resource, (item for name, item in BootResourceRegistry)) def test_architecture_registry(self): self.assertItemsEqual([], ArchitectureRegistry) ArchitectureRegistry.register_item("resource", sentinel.resource) self.assertIn( sentinel.resource, (item for name, item in ArchitectureRegistry)) def test_get_by_pxealias_returns_valid_arch(self): arch1 = Architecture( name="arch1", description="arch1", pxealiases=["archibald", "reginald"]) arch2 = Architecture( name="arch2", description="arch2", pxealiases=["fake", "foo"]) ArchitectureRegistry.register_item("arch1", arch1) ArchitectureRegistry.register_item("arch2", arch2) self.assertEqual( arch1, ArchitectureRegistry.get_by_pxealias("archibald")) def test_get_by_pxealias_returns_None_if_none_matching(self): arch1 = Architecture( name="arch1", description="arch1", pxealiases=["archibald", "reginald"]) arch2 = Architecture(name="arch2", description="arch2") ArchitectureRegistry.register_item("arch1", arch1) ArchitectureRegistry.register_item("arch2", arch2) self.assertEqual( None, ArchitectureRegistry.get_by_pxealias("stinkywinky")) def test_gen_power_types(self): from provisioningserver.drivers import power from provisioningserver.power import schema class TestGenPowerTypesPowerDriver(power.PowerDriver): name = 'test_gen_power_types' description = "test_gen_power_types Power Driver." settings = [] def detect_missing_packages(self): # these packages are forever missing return ['fake-package-one', 'fake-package-two'] def power_on(self, system_id, **kwargs): raise NotImplementedError def power_off(self, system_id, **kwargs): raise NotImplementedError def power_query(self, system_id, **kwargs): raise NotImplementedError # add my fake driver driver = TestGenPowerTypesPowerDriver() power.power_drivers_by_name[driver.name] = driver schema.JSON_POWER_TYPE_PARAMETERS += [{'name': "test_gen_power_types"}] # make sure fake packages are reported missing power_types = list(drivers.gen_power_types()) self.assertEqual(15, len(power_types)) self.assertItemsEqual( ['fake-package-one', 'fake-package-two'], power_types[-1].get('missing_packages')) maas-1.9.5+bzr4599.orig/src/provisioningserver/import_images/__init__.py0000644000000000000000000000000013056115004024355 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/import_images/boot_image_mapping.py0000644000000000000000000000761113056115004026455 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """The `BootImageMapping` class.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'BootImageMapping', ] import json from provisioningserver.import_images.helpers import ImageSpec from provisioningserver.utils import dict_depth def gen_image_spec_with_resource(os, data): """Generate image and resource for given operating system and data.""" for arch in data: for subarch in data[arch]: for release in data[arch][subarch]: for label in data[arch][subarch][release]: image = ImageSpec( os=os, arch=arch, subarch=subarch, release=release, label=label) resource = data[arch][subarch][release][label] yield image, resource class BootImageMapping: """Mapping of boot-image data. Maps `ImageSpec` tuples to metadata for Simplestreams products. This class is deliberately a bit more restrictive and less ad-hoc than a dict. It helps keep a clear view of the data structures in this module. """ def __init__(self): self.mapping = {} def items(self): """Iterate over `ImageSpec` keys, and their stored values.""" for image_spec, item in sorted(self.mapping.items()): yield image_spec, item def is_empty(self): """Is this mapping empty?""" return len(self.mapping) == 0 def setdefault(self, image_spec, item): """Set metadata for `image_spec` to item, if not already set.""" assert isinstance(image_spec, ImageSpec) self.mapping.setdefault(image_spec, item) def set(self, image_spec, item): """"Set metadata for `image_spec` to item, even if already set.""" assert isinstance(image_spec, ImageSpec) self.mapping[image_spec] = item def dump_json(self): """Produce JSON representing the mapped boot images. Tries to keep the output deterministic, so that identical data is likely to produce identical JSON. """ # The meta files represent the mapping as a nested hierarchy of dicts. # Keep that format. data = {} for image, resource in self.items(): os, arch, subarch, release, label = image data.setdefault(os, {}) data[os].setdefault(arch, {}) data[os][arch].setdefault(subarch, {}) data[os][arch][subarch].setdefault(release, {}) data[os][arch][subarch][release][label] = resource return json.dumps(data, sort_keys=True) @staticmethod def load_json(json_data): """Take a JSON representation and deserialize into an object. :param json_data: string produced by dump_json(), above. :return: A BootImageMapping If the json data is invalid, an empty BootImageMapping is returned. """ mapping = BootImageMapping() try: data = json.loads(json_data) except ValueError: return mapping depth = dict_depth(data) if depth == 5: # Support for older data. This has no operating system, then # it is ubuntu. for image, resource in gen_image_spec_with_resource( "ubuntu", data): mapping.setdefault(image, resource) elif depth == 6: for os in data: for image, resource in gen_image_spec_with_resource( os, data[os]): mapping.setdefault(image, resource) return mapping def get_image_arches(self): """Set of arches this BootImageMapping has an ImageSpec for.""" return {item[0].arch for item in self.items()} maas-1.9.5+bzr4599.orig/src/provisioningserver/import_images/boot_resources.py0000644000000000000000000003064613056115004025676 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'import_images', 'main', 'main_with_services', 'make_arg_parser', ] from argparse import ArgumentParser import errno import os from textwrap import dedent from provisioningserver.boot import BootMethodRegistry from provisioningserver.boot.tftppath import list_boot_images from provisioningserver.config import ( BootSources, ClusterConfiguration, ) from provisioningserver.import_images.cleanup import ( cleanup_snapshots_and_cache, ) from provisioningserver.import_images.download_descriptions import ( download_all_image_descriptions, ) from provisioningserver.import_images.download_resources import ( download_all_boot_resources, ) from provisioningserver.import_images.helpers import maaslog from provisioningserver.import_images.keyrings import write_all_keyrings from provisioningserver.import_images.product_mapping import map_products from provisioningserver.service_monitor import service_monitor from provisioningserver.utils.fs import ( atomic_symlink, atomic_write, read_text_file, tempdir, ) from provisioningserver.utils.shell import call_and_check from twisted.python.filepath import FilePath class NoConfigFile(Exception): """Raised when the config file for the script doesn't exist.""" def tgt_entry(osystem, arch, subarch, release, label, image): """Generate tgt target used to commission arch/subarch with release Tgt target used to commission arch/subarch machine with a specific Ubuntu release should have the following name: ephemeral-arch-subarch-release. This function creates target description in a format used by tgt-admin. It uses arch, subarch and release to generate target name and image as a path to image file which should be shared. Tgt target is marked as read-only. Tgt target has 'allow-in-use' option enabled because this script actively uses hardlinks to do image management and root images in different folders may point to the same inode. Tgt doesn't allow us to use the same inode for different tgt targets (even read-only targets which looks like a bug to me) without this option enabled. :param osystem: Operating System name we generate tgt target for :param arch: Architecture name we generate tgt target for :param subarch: Subarchitecture name we generate tgt target for :param release: Ubuntu release we generate tgt target for :param label: The images' label :param image: Path to the image which should be shared via tgt/iscsi :return Tgt entry which can be written to tgt-admin configuration file """ prefix = 'iqn.2004-05.com.ubuntu:maas' target_name = 'ephemeral-%s-%s-%s-%s-%s' % ( osystem, arch, subarch, release, label ) entry = dedent("""\ readonly 1 allow-in-use yes backing-store "{image}" driver iscsi """).format(prefix=prefix, target_name=target_name, image=image) return entry def install_boot_loaders(destination, arches): """Install the all the required file from each bootloader method. :param destination: Directory where the loaders should be stored. :param arches: Arches we want to install boot loaders for. """ for _, boot_method in BootMethodRegistry: if arches.intersection(boot_method.bootloader_arches) != set(): boot_method.install_bootloader(destination) def make_arg_parser(doc): """Create an `argparse.ArgumentParser` for this script.""" parser = ArgumentParser(description=doc) parser.add_argument( '--sources-file', action="store", required=True, help=( "Path to YAML file defining import sources. " "See this script's man page for a description of " "that YAML file's format." ) ) return parser def compose_targets_conf(snapshot_path): """Produce the contents of a snapshot's tgt conf file. :param snapshot_path: Filesystem path to a snapshot of current upstream boot resources. :return: Contents for a `targets.conf` file. :rtype: bytes """ # Use a set to make sure we don't register duplicate entries in tgt. entries = set() for item in list_boot_images(snapshot_path): osystem = item['osystem'] arch = item['architecture'] subarch = item['subarchitecture'] release = item['release'] label = item['label'] entries.add((osystem, arch, subarch, release, label)) tgt_entries = [] for osystem, arch, subarch, release, label in sorted(entries): root_image = os.path.join( snapshot_path, osystem, arch, subarch, release, label, 'root-image') if os.path.isfile(root_image): entry = tgt_entry( osystem, arch, subarch, release, label, root_image) tgt_entries.append(entry) text = ''.join(tgt_entries) return text.encode('utf-8') def meta_contains(storage, content): """Does the `maas.meta` file match `content`? If the file's contents match the latest data, there is no need to update. The file's timestamp is also updated to now to reflect the last time that this import was run. """ current_meta = os.path.join(storage, 'current', 'maas.meta') exists = os.path.isfile(current_meta) if exists: # Touch file to the current timestamp so that the last time this # import ran can be determined. os.utime(current_meta, None) return exists and content == read_text_file(current_meta) def update_current_symlink(storage, latest_snapshot): """Symlink `latest_snapshot` as the "current" snapshot.""" atomic_symlink(latest_snapshot, os.path.join(storage, 'current')) def write_snapshot_metadata(snapshot, meta_file_content): """Write "maas.meta" file.""" meta_file = os.path.join(snapshot, 'maas.meta') atomic_write(meta_file_content, meta_file, mode=0644) def write_targets_conf(snapshot): """Write "maas.tgt" file.""" targets_conf = os.path.join(snapshot, 'maas.tgt') targets_conf_content = compose_targets_conf(snapshot) atomic_write(targets_conf_content, targets_conf, mode=0644) def update_targets_conf(snapshot): """Runs tgt-admin to update the new targets from "maas.tgt".""" # Ensure that tgt is running before tgt-admin is used. service_monitor.ensure_service("tgt") # Update the tgt config. targets_conf = os.path.join(snapshot, 'maas.tgt') call_and_check([ 'sudo', '/usr/sbin/tgt-admin', '--conf', targets_conf, '--update', 'ALL', ]) def read_sources(sources_yaml): """Read boot resources config file. :param sources_yaml: Path to a YAML file containing a list of boot resource definitions. :return: A dict representing the boot-resources configuration. :raise NoConfigFile: If the configuration file was not present. """ # The config file is required. We do not fall back to defaults if it's # not there. try: return BootSources.load(filename=sources_yaml) except IOError as ex: if ex.errno == errno.ENOENT: # No config file. We have helpful error output for this. raise NoConfigFile(ex) else: # Unexpected error. raise def parse_sources(sources_yaml): """Given a YAML `config` string, return a `BootSources` for it.""" from StringIO import StringIO return BootSources.parse(StringIO(sources_yaml)) def import_images(sources): """Import images. Callable from the command line. :param config: An iterable of dicts representing the sources from which boot images will be downloaded. """ maaslog.info("Started importing boot images.") if len(sources) == 0: maaslog.warn("Can't import: region did not provide a source.") return with tempdir('keyrings') as keyrings_path: # We download the keyrings now because we need them for both # download_all_image_descriptions() and # download_all_boot_resources() later. sources = write_all_keyrings(keyrings_path, sources) image_descriptions = download_all_image_descriptions(sources) if image_descriptions.is_empty(): maaslog.warn( "Finished importing boot images, the region does not have " "any boot images available.") return with ClusterConfiguration.open() as config: storage = FilePath(config.tftp_root).parent().path meta_file_content = image_descriptions.dump_json() if meta_contains(storage, meta_file_content): maaslog.info( "Finished importing boot images, the region does not " "have any new images.") return product_mapping = map_products(image_descriptions) snapshot_path = download_all_boot_resources( sources, storage, product_mapping) maaslog.info("Writing boot image metadata and iSCSI targets.") write_snapshot_metadata(snapshot_path, meta_file_content) write_targets_conf(snapshot_path) maaslog.info("Installing boot images snapshot %s" % snapshot_path) install_boot_loaders(snapshot_path, image_descriptions.get_image_arches()) # If we got here, all went well. This is now truly the "current" snapshot. update_current_symlink(storage, snapshot_path) maaslog.info("Updating boot image iSCSI targets.") update_targets_conf(snapshot_path) # Now cleanup the old snapshots and cache. maaslog.info('Cleaning up old snapshots and cache.') cleanup_snapshots_and_cache(storage) # Import is now finished. maaslog.info("Finished importing boot images.") def main(args): """Entry point for the command-line import script. :param args: Command-line arguments as parsed by the `ArgumentParser` returned by `make_arg_parser`. :raise NoConfigFile: If a config file is specified but doesn't exist. """ sources = read_sources(args.sources_file) import_images(sources=sources) def main_with_services(args): """The *real* entry point for the command-line import script. This sets up the necessary RPC services before calling `main`, then clears up behind itself. :param args: Command-line arguments as parsed by the `ArgumentParser` returned by `make_arg_parser`. :raise NoConfigFile: If a config file is specified but doesn't exist. """ from sys import stderr import traceback from provisioningserver import services from provisioningserver.rpc import getRegionClient from provisioningserver.rpc.clusterservice import ClusterClientService from provisioningserver.rpc.exceptions import NoConnectionsAvailable from provisioningserver.utils.twisted import retries, pause from twisted.internet import reactor from twisted.internet.defer import inlineCallbacks from twisted.internet.threads import deferToThread @inlineCallbacks def start_services(): rpc_service = ClusterClientService(reactor) rpc_service.setName("rpc") rpc_service.setServiceParent(services) yield services.startService() for elapsed, remaining, wait in retries(15, 1, reactor): try: yield getRegionClient() except NoConnectionsAvailable: yield pause(wait, reactor) else: break else: print("Can't connect to the region.", file=stderr) raise SystemExit(1) @inlineCallbacks def stop_services(): yield services.stopService() exit_codes = {0} @inlineCallbacks def run_main(): try: yield start_services() try: yield deferToThread(main, args) finally: yield stop_services() except SystemExit as se: exit_codes.add(se.code) except: exit_codes.add(2) print("Failed to import boot resources", file=stderr) traceback.print_exc() finally: reactor.callLater(0, reactor.stop) reactor.callWhenRunning(run_main) reactor.run() exit_code = max(exit_codes) raise SystemExit(exit_code) maas-1.9.5+bzr4599.orig/src/provisioningserver/import_images/cleanup.py0000644000000000000000000000346113056115004024263 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Clean up old snapshot directories.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'cleanup_snapshots_and_cache', ] import os import shutil def list_old_snapshots(storage): """List of snapshot directories that are no longer in use.""" current_dir = os.path.join(storage, 'current') current_snapshot = os.path.basename(os.readlink(current_dir)) return [ os.path.join(storage, directory) for directory in os.listdir(storage) if directory.startswith('snapshot-') and directory != current_snapshot ] def cleanup_snapshots(storage): """Remove old snapshot directories.""" old_snapshots = list_old_snapshots(storage) for snapshot in old_snapshots: shutil.rmtree(snapshot) def list_unused_cache_files(storage): """List of cache files that are no longer being referenced by snapshots.""" cache_dir = os.path.join(storage, 'cache') cache_files = [ os.path.join(cache_dir, filename) for filename in os.listdir(cache_dir) if os.path.isfile(os.path.join(cache_dir, filename)) ] return [ cache_file for cache_file in cache_files if os.stat(cache_file).st_nlink == 1 ] def cleanup_cache(storage): """Remove files that are no longer being referenced by snapshots.""" cache_files = list_unused_cache_files(storage) for cache_file in cache_files: os.remove(cache_file) def cleanup_snapshots_and_cache(storage): """Remove old snapshot directories and old cache files.""" cleanup_snapshots(storage) cleanup_cache(storage) maas-1.9.5+bzr4599.orig/src/provisioningserver/import_images/download_descriptions.py0000644000000000000000000002022313056115004027224 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Download boot resource descriptions from Simplestreams repo. This module is responsible only for syncing the repo's metadata, not the boot resources themselves. The two are handled in separate Simplestreams synchronisation stages. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'download_all_image_descriptions', ] from provisioningserver.import_images.boot_image_mapping import ( BootImageMapping, ) from provisioningserver.import_images.helpers import ( get_os_from_product, get_signing_policy, ImageSpec, maaslog, ) from simplestreams.mirrors import ( BasicMirrorWriter, UrlMirrorReader, ) from simplestreams.util import ( path_from_mirror_url, products_exdata, ) def clean_up_repo_item(item): """Return a subset of dict `item` for storing in a boot images dict.""" keys_to_keep = [ 'content_id', 'product_name', 'version_name', 'path', 'subarches'] compact_item = {key: item[key] for key in keys_to_keep} return compact_item class RepoDumper(BasicMirrorWriter): """Gather metadata about boot images available in a Simplestreams repo. Used inside `download_image_descriptions`. Stores basic metadata about each image it finds upstream in a given `BootImageMapping`. Each stored item is a dict containing the basic metadata for retrieving a boot image. Simplestreams' `BasicMirrorWriter` in itself is stateless. It relies on a subclass (such as this one) to store data. :ivar boot_images_dict: A `BootImageMapping`. Image metadata will be stored here as it is discovered. Simplestreams does not interact with this variable. """ def __init__(self, boot_images_dict): super(RepoDumper, self).__init__(config={ # Only download the latest version. Without this all versions # will be read, causing miss matches in versions. 'max_items': 1, }) self.boot_images_dict = boot_images_dict def load_products(self, path=None, content_id=None): """Overridable from `BasicMirrorWriter`.""" # It looks as if this method only makes sense for MirrorReaders, not # for MirrorWriters. The default MirrorWriter implementation just # raises NotImplementedError. Stop it from doing that. return def insert_item(self, data, src, target, pedigree, contentsource): """Overridable from `BasicMirrorWriter`.""" item = products_exdata(src, pedigree) os = get_os_from_product(item) arch, subarches = item['arch'], item['subarches'] release = item['release'] label = item['label'] base_image = ImageSpec(os, arch, None, release, label) compact_item = clean_up_repo_item(item) for subarch in subarches.split(','): self.boot_images_dict.setdefault( base_image._replace(subarch=subarch), compact_item) # HWE resources need to map to a specfic resource, and not just to # any of the supported subarchitectures for that resource. subarch = item['subarch'] self.boot_images_dict.set( base_image._replace(subarch=subarch), compact_item) # HWE resources with generic, should map to the HWE that ships with # that release. hwe_arch = 'hwe-%s' % release[0] if subarch == hwe_arch and 'generic' in subarches: self.boot_images_dict.set( base_image._replace(subarch='generic'), compact_item) def sync(self, reader, path): try: super(RepoDumper, self).sync(reader, path) except IOError: maaslog.warning( "I/O error while syncing boot images. If this problem " "persists, verify network connectivity and disk usage.") def value_passes_filter_list(filter_list, property_value): """Does the given property of a boot image pass the given filter list? The value passes if either it matches one of the entries in the list of filter values, or one of the filter values is an asterisk (`*`). """ return '*' in filter_list or property_value in filter_list def value_passes_filter(filter_value, property_value): """Does the given property of a boot image pass the given filter? The value passes the filter if either the filter value is an asterisk (`*`) or the value is equal to the filter value. """ return filter_value in ('*', property_value) def image_passes_filter(filters, os, arch, subarch, release, label): """Filter a boot image against configured import filters. :param filters: A list of dicts describing the filters, as in `boot_merge`. If the list is empty, or `None`, any image matches. Any entry in a filter may be a string containing just an asterisk (`*`) to denote that the entry will match any value. :param os: The given boot image's operating system. :param arch: The given boot image's architecture. :param subarch: The given boot image's subarchitecture. :param release: The given boot image's OS release. :param label: The given boot image's label. :return: Whether the image matches any of the dicts in `filters`. """ if filters is None or len(filters) == 0: return True for filter_dict in filters: item_matches = ( value_passes_filter(filter_dict['os'], os) and value_passes_filter(filter_dict['release'], release) and value_passes_filter_list(filter_dict['arches'], arch) and value_passes_filter_list(filter_dict['subarches'], subarch) and value_passes_filter_list(filter_dict['labels'], label) ) if item_matches: return True return False def boot_merge(destination, additions, filters=None): """Complement one `BootImageMapping` with entries from another. This adds entries from `additions` (that match `filters`, if given) to `destination`, but only for those image specs for which `destination` does not have entries yet. :param destination: `BootImageMapping` to be updated. It will be extended in-place. :param additions: A second `BootImageMapping`, which will be used as a source of additional entries. :param filters: List of dicts, each of which contains 'os', arch', 'subarch', 'release', and 'label' keys. If given, entries are only considered for copying from `additions` to `destination` if they match at least one of the filters. Entries in the filter may be the string `*` (or for entries that are lists, may contain the string `*`) to make them match any value. """ for image, resource in additions.items(): os, arch, subarch, release, label = image if image_passes_filter( filters, os, arch, subarch, release, label): # Do not override an existing entry with the same # os/arch/subarch/release/label: the first entry found takes # precedence. destination.setdefault(image, resource) def download_image_descriptions(path, keyring=None): """Download image metadata from upstream Simplestreams repo. :param path: The path to a Simplestreams repo. :param keyring: Optional keyring for verifying the repo's signatures. :return: A `BootImageMapping` describing available boot resources. """ mirror, rpath = path_from_mirror_url(path, None) policy = get_signing_policy(rpath, keyring) reader = UrlMirrorReader(mirror, policy=policy) boot_images_dict = BootImageMapping() dumper = RepoDumper(boot_images_dict) dumper.sync(reader, rpath) return boot_images_dict def download_all_image_descriptions(sources): """Download image metadata for all sources in `config`.""" boot = BootImageMapping() for source in sources: repo_boot = download_image_descriptions( source['url'], keyring=source.get('keyring', None)) boot_merge(boot, repo_boot, source['selections']) return boot maas-1.9.5+bzr4599.orig/src/provisioningserver/import_images/download_resources.py0000644000000000000000000002747013056115004026543 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Simplestreams code to download boot resources.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'download_all_boot_resources', ] from datetime import datetime from gzip import GzipFile import os.path from provisioningserver.import_images.helpers import ( get_os_from_product, get_signing_policy, maaslog, ) from provisioningserver.utils import in_develop_mode from provisioningserver.utils.shell import call_and_check from simplestreams.contentsource import FdContentSource from simplestreams.mirrors import ( BasicMirrorWriter, UrlMirrorReader, ) from simplestreams.objectstores import FileStore from simplestreams.util import ( item_checksums, path_from_mirror_url, products_exdata, ) from twisted.python import log DEFAULT_KEYRING_PATH = "/usr/share/keyrings" def insert_file(store, name, tag, checksums, size, content_source): """Insert a file into `store`. :param store: A simplestreams `ObjectStore`. :param name: Logical name of the file being inserted. Only needs to be unique within the scope of this boot image. :param tag: UUID, or "tag," for the file. It will be inserted into `store` under this name, not its logical name. :param checksums: A Simplestreams checksums dict, mapping hash algorihm names (such as `sha256`) to the file's respective checksums as computed by those hash algorithms. :param size: Optional size for the file, so Simplestreams knows what size to expect. :param content_source: A Simplestreams `ContentSource` for reading the file. :return: A list of inserted files (actually, only the one file in this case) described as tuples of (path, logical name). The path lies in the directory managed by `store` and has a filename based on `tag`, not logical name. """ maaslog.debug("Inserting file %s (tag=%s, size=%s).", name, tag, size) store.insert(tag, content_source, checksums, mutable=False, size=size) # XXX jtv 2014-04-24 bug=1313580: Isn't _fullpath meant to be private? return [(store._fullpath(tag), name)] def call_uec2roottar(root_image_path, root_tgz_path): """Invoke `uec2roottar` with the given arguments. Here only so tests can stub it out. :param root_image_path: Input file. :param root_tgz_path: Output file. """ if in_develop_mode(): # In debug mode this is skipped as it requires the uec2roottar # script to have sudo abilities. The root-tgz is created as an # empty file so the correct links can be made. log.msg( "Conversion of root-image to root-tgz is skipped in DEVELOP mode.") open(root_tgz_path, "wb").close() else: call_and_check([ 'sudo', '/usr/bin/uec2roottar', '--user=maas', root_image_path, root_tgz_path, ]) def insert_root_image(store, tag, checksums, size, content_source): """Insert a root image into `store`. This may involve converting a UEC boot image into a root tarball. :param store: A simplestreams `ObjectStore`. :param tag: UUID, or "tag," for the file root image file. The root image and root tarball will both be stored in the cache directory under names derived from this tag. :param checksums: A Simplestreams checksums dict, mapping hash algorihm names (such as `sha256`) to the file's respective checksums as computed by those hash algorithms. :param size: Optional size for the file, so Simplestreams knows what size to expect. :param content_source: A Simplestreams `ContentSource` for reading the file. :return: A list of inserted files (root image and root tarball) described as tuples of (path, logical name). The path lies in the directory managed by `store` and has a filename based on `tag`, not logical name. """ maaslog.debug("Inserting root image (tag=%s, size=%s).", tag, size) root_image_tag = 'root-image-%s' % tag # XXX jtv 2014-04-24 bug=1313580: Isn't _fullpath meant to be private? root_image_path = store._fullpath(root_image_tag) root_tgz_tag = 'root-tgz-%s' % tag root_tgz_path = store._fullpath(root_tgz_tag) if not os.path.isfile(root_image_path): maaslog.debug("New root image: %s.", root_image_path) store.insert(tag, content_source, checksums, mutable=False, size=size) uncompressed = FdContentSource(GzipFile(store._fullpath(tag))) store.insert(root_image_tag, uncompressed, mutable=False) store.remove(tag) if not os.path.isfile(root_tgz_path): maaslog.debug("Converting root tarball: %s.", root_tgz_path) call_uec2roottar(root_image_path, root_tgz_path) return [(root_image_path, 'root-image'), (root_tgz_path, 'root-tgz')] def link_resources(snapshot_path, links, osystem, arch, release, label, subarches): """Hardlink entries in the snapshot directory to resources in the cache. This creates file entries in the snapshot directory for boot resources that are part of a single boot image. :param snapshot_path: Snapshot directory. :param links: A list of links that should be created to files stored in the cache. Each link is described as a tuple of (path, logical name). The path points to a file in the cache directory. The logical name will be link's filename, without path. :param osystem: Operating system with this boot image supports. :param arch: Architecture which this boot image supports. :param release: OS release of which this boot image is a part. :param label: OS release label of which this boot image is a part, e.g. `release` or `rc`. :param subarches: A list of sub-architectures which this boot image supports. For example, a kernel for one Ubuntu release for a given architecture and subarchitecture `generic` will typically also support the `hwe-*` subarchitectures that denote hardware-enablement kernels for older Ubuntu releases. """ for subarch in subarches: directory = os.path.join( snapshot_path, osystem, arch, subarch, release, label) if not os.path.exists(directory): os.makedirs(directory) for cached_file, logical_name in links: link_path = os.path.join(directory, logical_name) if os.path.isfile(link_path): os.remove(link_path) os.link(cached_file, link_path) class RepoWriter(BasicMirrorWriter): """Download boot resources from an upstream Simplestreams repo. :ivar root_path: Snapshot directory. :ivar store: A simplestreams `ObjectStore` where downloaded resources should be stored. :ivar product_mapping: A `ProductMapping` describing the desired boot resources. """ def __init__(self, root_path, store, product_mapping): self.root_path = root_path self.store = store self.product_mapping = product_mapping super(RepoWriter, self).__init__(config={ # Only download the latest version. Without this all versions # will be downloaded from simplestreams. 'max_items': 1, }) def load_products(self, path=None, content_id=None): """Overridable from `BasicMirrorWriter`.""" # It looks as if this method only makes sense for MirrorReaders, not # for MirrorWriters. The default MirrorWriter implementation just # raises NotImplementedError. Stop it from doing that. return def filter_version(self, data, src, target, pedigree): """Overridable from `BasicMirrorWriter`.""" return self.product_mapping.contains(products_exdata(src, pedigree)) def insert_item(self, data, src, target, pedigree, contentsource): """Overridable from `BasicMirrorWriter`.""" item = products_exdata(src, pedigree) checksums = item_checksums(data) tag = checksums['sha256'] size = data['size'] ftype = item['ftype'] if ftype == 'root-image.gz': links = insert_root_image( self.store, tag, checksums, size, contentsource) else: links = insert_file( self.store, ftype, tag, checksums, size, contentsource) os = get_os_from_product(item) subarches = self.product_mapping.get(item) link_resources( snapshot_path=self.root_path, links=links, osystem=os, arch=item['arch'], release=item['release'], label=item['label'], subarches=subarches) def download_boot_resources(path, store, snapshot_path, product_mapping, keyring_file=None): """Download boot resources for one simplestreams source. :param path: The Simplestreams URL for this source. :param store: A simplestreams `ObjectStore` where downloaded resources should be stored. :param snapshot_path: Filesystem path to a snapshot of current upstream boot resources. :param product_mapping: A `ProductMapping` describing the resources to be downloaded. :param keyring_file: Optional path to a keyring file for verifying signatures. """ writer = RepoWriter(snapshot_path, store, product_mapping) (mirror, rpath) = path_from_mirror_url(path, None) policy = get_signing_policy(rpath, keyring_file) reader = UrlMirrorReader(mirror, policy=policy) writer.sync(reader, rpath) def compose_snapshot_path(storage_path): """Put together a path for a new snapshot. A snapshot is a directory in `storage_path` containing boot resources. The snapshot's name contains the date in a sortable format. :param storage_path: Root storage directory, usually `/var/lib/maas/boot-resources`. :return: Path to the snapshot directory. """ now = datetime.utcnow() snapshot_name = 'snapshot-%s' % now.strftime('%Y%m%d-%H%M%S') return os.path.join(storage_path, snapshot_name) def download_all_boot_resources( sources, storage_path, product_mapping, store=None): """Download the actual boot resources. Local copies of boot resources are downloaded into a "cache" directory. This is a raw, flat store of resources, with UUID-based filenames called "tags." In addition, the downlads are hardlinked into a "snapshot directory." This directory, named after the date and time that the snapshot was initiated, reflects the currently available boot resources in a proper directory hierarchy with subdirectories for architectures, releases, and so on. :param sources: List of dicts describing the Simplestreams sources from which we should download. :param storage_path: Root storage directory, usually `/var/lib/maas/boot-resources`. :param snapshot_path: :param product_mapping: A `ProductMapping` describing the resources to be downloaded. :param store: A `FileStore` instance. Used only for testing. :return: Path to the snapshot directory. """ storage_path = os.path.abspath(storage_path) snapshot_path = compose_snapshot_path(storage_path) # Use a FileStore as our ObjectStore implementation. It will write to the # cache directory. if store is None: cache_path = os.path.join(storage_path, 'cache') store = FileStore(cache_path) # XXX jtv 2014-04-11: FileStore now also takes an argument called # complete_callback, which can be used for progress reporting. for source in sources: download_boot_resources( source['url'], store, snapshot_path, product_mapping, keyring_file=source.get('keyring')), return snapshot_path maas-1.9.5+bzr4599.orig/src/provisioningserver/import_images/helpers.py0000644000000000000000000000443313056115004024276 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Miscellaneous small definitions in support of boot-resource import.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'get_os_from_product', 'get_signing_policy', 'ImageSpec', 'maaslog', ] from collections import namedtuple import functools from provisioningserver.logger import get_maas_logger from simplestreams.util import policy_read_signed # A tuple of the items that together select a boot image. ImageSpec = namedtuple(b'ImageSpec', [ 'os', 'arch', 'subarch', 'release', 'label', ]) def get_signing_policy(path, keyring=None): """Return Simplestreams signing policy for the given path. :param path: Path to the Simplestreams index file. :param keyring: Optional keyring file for verifying signatures. :return: A "signing policy" callable. It accepts a file's content, path, and optional keyring as arguments, and if the signature verifies correctly, returns the content. The keyring defaults to the one you pass. """ if path.endswith('.json'): # The configuration deliberately selected an un-signed index. A signed # index would have a suffix of '.sjson'. Use a policy that doesn't # check anything. policy = lambda content, path, keyring: content else: # Otherwise: use default Simplestreams policy for verifying signatures. policy = policy_read_signed if keyring is not None: # Pass keyring to the policy, to use if the caller inside Simplestreams # does not provide one. policy = functools.partial(policy, keyring=keyring) return policy def get_os_from_product(item): """Returns the operating system that the product is refering to. Originally products did not contain the os field. This handles that missing field, by returning "ubuntu" as the operating system. Before the os field was added to the product mapping, only Ubuntu was supported. """ try: return item['os'] except KeyError: return "ubuntu" maaslog = get_maas_logger("import-images") maas-1.9.5+bzr4599.orig/src/provisioningserver/import_images/keyrings.py0000644000000000000000000000444513056115004024472 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Keyring management functions for the import boot images job and script.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'write_all_keyrings', ] import os from urlparse import urlsplit from provisioningserver.import_images.helpers import maaslog def write_keyring(keyring_path, keyring_data): """Write a keyring blob to a file. :param path: The path to the keyring file. :param keyring_data: The data to write to the keyring_file, as a base64-encoded string. """ maaslog.debug("Writing keyring %s to disk.", keyring_path) with open(keyring_path, 'wb') as keyring_file: keyring_file.write(keyring_data) def calculate_keyring_name(source_url): """Return a name for a keyring based on a URL.""" split_url = urlsplit(source_url) cleaned_path = split_url.path.strip('/').replace('/', '-') keyring_name = "%s-%s.gpg" % (split_url.netloc, cleaned_path) return keyring_name def write_all_keyrings(directory, sources): """For a given set of `sources`, write the keyrings to disk. :param directory: A directory where the key files should be written. Use a dedicated temporary directory for this, and clean it up when done. :param sources: An iterable of the sources whose keyrings need to be written. :return: The sources iterable, with each source whose keyring has been written now having a "keyring" value set, pointing to the file on disk. """ for source in sources: source_url = source.get('url') keyring_file = source.get('keyring') keyring_data = source.get('keyring_data') if keyring_file is not None and keyring_data is not None: maaslog.warning( "Both a keyring file and keyring data were specified; " "ignoring the keyring file.") if keyring_data is not None: keyring_file = os.path.join( directory, calculate_keyring_name(source_url)) write_keyring(keyring_file, keyring_data) source['keyring'] = keyring_file return sources maas-1.9.5+bzr4599.orig/src/provisioningserver/import_images/product_mapping.py0000644000000000000000000000560113056115004026025 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """The `ProductMapping` class.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'map_products', ] class ProductMapping: """Mapping of product data. Maps a combination of boot resource metadata (`content_id`, `product_name`, `version_name`) to a list of subarchitectures supported by that boot resource. """ def __init__(self): self.mapping = {} @staticmethod def make_key(resource): """Extract a key tuple from `resource`. The key is used for indexing `mapping`. :param resource: A dict describing a boot resource. It must contain the keys `content_id`, `product_name`, and `version_name`. :return: A tuple of the resource's content ID, product name, and version name. """ return ( resource['content_id'], resource['product_name'], resource['version_name'], ) def add(self, resource, subarch): """Add `subarch` to the list of subarches supported by a boot resource. The `resource` is a dict as returned by `products_exdata`. The method will use the values identified by keys `content_id`, `product_name`, and `version_name`. """ key = self.make_key(resource) self.mapping.setdefault(key, []) self.mapping[key].append(subarch) def contains(self, resource): """Does the dict contain a mapping for the given resource?""" return self.make_key(resource) in self.mapping def get(self, resource): """Return the mapped subarchitectures for `resource`.""" return self.mapping[self.make_key(resource)] def map_products(image_descriptions): """Determine the subarches supported by each boot resource. Many subarches may be deployed by a single boot resource. We note only subarchitectures here and ignore architectures because the metadata format tightly couples a boot resource to its architecture. We can figure out for which architecture we need to use a specific boot resource by looking at its description in the metadata. We can't do the same with subarch, because we may want to use a boot resource only for a specific subset of subarches. This function returns the relationship between boot resources and subarchitectures as a `ProductMapping`. :param image_descriptions: A `BootImageMapping` containing the images' metadata. :return: A `ProductMapping` mapping products to subarchitectures. """ mapping = ProductMapping() for image, boot_resource in image_descriptions.items(): mapping.add(boot_resource, image.subarch) return mapping maas-1.9.5+bzr4599.orig/src/provisioningserver/import_images/testing/0000755000000000000000000000000013056115004023733 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/import_images/tests/0000755000000000000000000000000013056115004023420 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/import_images/uec2roottar.py0000644000000000000000000001436413056115004025111 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Code for the `uec2roottar` script.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'main', 'make_argparser', ] import argparse from contextlib import contextmanager from glob import glob import os.path from subprocess import ( check_call, check_output, ) from provisioningserver.logger import get_maas_logger from provisioningserver.utils.env import environment_variables from provisioningserver.utils.fs import tempdir maaslog = get_maas_logger("uec2roottar") def make_argparser(description): """Create an `ArgumentParser` for this script.""" parser = argparse.ArgumentParser(description=description) parser.add_argument( 'image', metavar='IMAGE-FILE', help="Input file: UEC root image.") parser.add_argument( 'output', metavar='TARBALL', help="Output file: root tarball.") parser.add_argument( '--user', '-u', help="Set output file ownership to USER.") return parser def is_filesystem_file(path): """Does the file at `path` look like a filesystem-in-a-file?""" # Identify filesystems using the "file" utility. We'll be parsing the # output, so suppress any translation. with environment_variables({'LANG': 'C'}): output = check_output(['file', path]) return b"filesystem data" in output class ImageFileError(Exception): """Problem with the given image file.""" def extract_image_from_tarball(tarball, working_dir): """Extract image file from `tarball` into `working_dir`, return its path. This may extract multiple files into `working_dir`; it looks for files with names like `*.img`. The function only succeeds, however, if there is exactly one of those, in the tarball's root directory. """ glob_pattern = '*.img' maaslog.debug( "Extracting %s from %s into %s.", glob_pattern, tarball, working_dir) check_call([ 'tar', '-C', working_dir, '--wildcards', glob_pattern, '-Sxvzf', tarball, ]) # Look for .img files. Sort just so that if there is more than one image # file, we'll produce a consistent error message. candidates = sorted(glob(os.path.join(working_dir, glob_pattern))) if len(candidates) == 0: raise ImageFileError( "Tarball %s does not contain any %s." % (tarball, glob_pattern)) if len(candidates) > 1: raise ImageFileError( "Tarball %s contains multiple image files: %s." % (tarball, ', '.join(candidates))) [image] = candidates return image def get_image_file(path, temp_dir): """Return image file at, or contained in tarball at, `path`. :param path: Path to the image file. Must point to either a file containing a filesystem, or a tarball containing one, of the same base name. :param temp_dir: A temporary working directory. If the image needs to be extracted from a tarball, the tarball will be extracted here. """ if is_filesystem_file(path): # Easy. This is the actual image file. return path elif path.endswith('.tar.gz'): # Tarball. Extract image file. return extract_image_from_tarball(path, temp_dir) else: raise ImageFileError( "Expected '%s' to be either a filesystem file, or " "a gzipped tarball containing one." % path) def unmount(mountpoint): """Unmount filesystem at given mount point. If this fails, it logs the error as well as raising it. This means that error code paths can suppress the exception without depriving the user of the information. """ try: check_call(['umount', mountpoint]) except BaseException as e: maaslog.error("Could not unmount %s: %s", mountpoint, e) raise @contextmanager def loop_mount(image, mountpoint): """Context manager: temporarily loop-mount `image` at `mountpoint`.""" check_call(['mount', '-o', 'ro', image, mountpoint]) try: yield except: try: unmount(mountpoint) except Exception: # This is probably a secondary error resulting from the original # problem. Stick with the original exception. pass raise else: # Unmount after successful run. If this fails, let the exception # propagate. unmount(mountpoint) def tar_supports_xattr_opts(): """Returns True if the system's tar supports the 'xattrs' options.""" out = check_output(['tar', '--help']) return b"xattr" in out def extract_image(image, output): """Loop-mount `image`, and tar its contents into `output`.""" xattr_opts = [] if tar_supports_xattr_opts(): # Only add the xattrs options if tar supports it. # For insance tar on 12.04 does *not* support xattrs. xattr_opts = ['--xattrs', '--xattrs-include=*'] with tempdir() as mountpoint: cmd = ['tar'] + xattr_opts + [ # Work from mountpoint as the current directory. '-C', mountpoint, # Options: # -c: Create tarfile. # -p: Preserve permissions. # -S: Handle sparse files efficiently (images have those). # -z: Compress using gzip. # -f: Work on given tar file. '-cpSzf', output, '--numeric-owner', # Tar up the "current directory": the mountpoint. '.', ] with loop_mount(image, mountpoint): check_call(cmd) def set_ownership(path, user=None): """Set file ownership to `user` if specified.""" if user is not None: maaslog.debug("Setting file owner to %s.", user) check_call(['/bin/chown', user, path]) def main(args): """Do the work: loop-mount image, write contents to output file.""" output = args.output maaslog.debug("Converting %s to %s.", args.image, output) with tempdir() as working_dir: image = get_image_file(args.image, working_dir) extract_image(image, output) set_ownership(output, args.user) maaslog.debug("Finished. Wrote to %s.", output) maas-1.9.5+bzr4599.orig/src/provisioningserver/import_images/testing/__init__.py0000644000000000000000000000000013056115004026032 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/import_images/testing/factory.py0000644000000000000000000000737113056115004025764 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Factory helpers for the `import_images` package.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'make_boot_resource', 'make_image_spec', 'make_maas_meta', 'make_maas_meta_without_os', 'set_resource', ] from textwrap import dedent from maastesting.factory import factory from provisioningserver.import_images.boot_image_mapping import ( BootImageMapping, ) from provisioningserver.import_images.helpers import ImageSpec def make_maas_meta(): """Return fake maas.meta data.""" return dedent("""\ {"ubuntu": {"amd64": {"generic": {"precise": {"release": {"content_id": "com.ubuntu.maas:v2:download", "path": "precise/amd64/20140410/raring/generic/boot-kernel", "product_name": "com.ubuntu.maas:v2:boot:12.04:amd64:hwe-r", "subarches": "generic,hwe-p,hwe-q,hwe-r", "version_name": "20140410"}}, "trusty": {"release": {"content_id": "com.ubuntu.maas:v2:download", "path": "trusty/amd64/20140416.1/root-image.gz", "product_name": "com.ubuntu.maas:v2:boot:14.04:amd64:hwe-t", "subarches": "generic,hwe-p,hwe-q,hwe-r,hwe-s,hwe-t", "version_name": "20140416.1"}}}, "hwe-s": {"precise": {"release": {"content_id": "com.ubuntu.maas:v2:download", "path": "precise/amd64/20140410/saucy/generic/boot-kernel", "product_name": "com.ubuntu.maas:v2:boot:12.04:amd64:hwe-s", "subarches": "generic,hwe-p,hwe-q,hwe-r,hwe-s", "version_name": "20140410"}}}}}}""") # NOQA def make_maas_meta_without_os(): """Return fake maas.meta data, without the os field.""" return dedent("""\ {"amd64": {"generic": {"precise": {"release": {"content_id": "com.ubuntu.maas:v2:download", "path": "precise/amd64/20140410/raring/generic/boot-kernel", "product_name": "com.ubuntu.maas:v2:boot:12.04:amd64:hwe-r", "subarches": "generic,hwe-p,hwe-q,hwe-r", "version_name": "20140410"}}, "trusty": {"release": {"content_id": "com.ubuntu.maas:v2:download", "path": "trusty/amd64/20140416.1/root-image.gz", "product_name": "com.ubuntu.maas:v2:boot:14.04:amd64:hwe-t", "subarches": "generic,hwe-p,hwe-q,hwe-r,hwe-s,hwe-t", "version_name": "20140416.1"}}}, "hwe-s": {"precise": {"release": {"content_id": "com.ubuntu.maas:v2:download", "path": "precise/amd64/20140410/saucy/generic/boot-kernel", "product_name": "com.ubuntu.maas:v2:boot:12.04:amd64:hwe-s", "subarches": "generic,hwe-p,hwe-q,hwe-r,hwe-s", "version_name": "20140410"}}}}}""") # NOQA def make_boot_resource(): """Create a fake resource dict.""" return { 'content_id': factory.make_name('content_id'), 'product_name': factory.make_name('product_name'), 'version_name': factory.make_name('version_name'), } def make_image_spec(os=None, arch=None, subarch=None, release=None, label=None): """Return an `ImageSpec` with random values.""" if os is None: os = factory.make_name('os') if arch is None: arch = factory.make_name('arch') if subarch is None: subarch = factory.make_name('subarch') if release is None: release = factory.make_name('release') if label is None: label = factory.make_name('label') return ImageSpec(os, arch, subarch, release, label) def set_resource(boot_dict=None, image_spec=None, resource=None): """Add boot resource to a `BootImageMapping`, creating it if necessary.""" if boot_dict is None: boot_dict = BootImageMapping() if image_spec is None: image_spec = make_image_spec() if resource is None: resource = factory.make_name('boot-resource') boot_dict.mapping[image_spec] = resource return boot_dict maas-1.9.5+bzr4599.orig/src/provisioningserver/import_images/tests/__init__.py0000644000000000000000000000000013056115004025517 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/import_images/tests/test_boot_image_mapping.py0000644000000000000000000001300613056115004030651 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `BootImageMapping` and its module.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import json from maastesting.factory import factory from maastesting.testcase import MAASTestCase from provisioningserver.import_images.boot_image_mapping import ( BootImageMapping, ) from provisioningserver.import_images.testing.factory import ( make_image_spec, make_maas_meta, make_maas_meta_without_os, set_resource, ) class TestBootImageMapping(MAASTestCase): """Tests for `BootImageMapping`.""" def test_initially_empty(self): self.assertItemsEqual([], BootImageMapping().items()) def test_items_returns_items(self): image = make_image_spec() resource = factory.make_name('resource') image_dict = set_resource(image_spec=image, resource=resource) self.assertItemsEqual([(image, resource)], image_dict.items()) def test_is_empty_returns_True_if_empty(self): self.assertTrue(BootImageMapping().is_empty()) def test_is_empty_returns_False_if_not_empty(self): mapping = BootImageMapping() mapping.setdefault(make_image_spec(), factory.make_name('resource')) self.assertFalse(mapping.is_empty()) def test_setdefault_sets_unset_item(self): image_dict = BootImageMapping() image = make_image_spec() resource = factory.make_name('resource') image_dict.setdefault(image, resource) self.assertItemsEqual([(image, resource)], image_dict.items()) def test_setdefault_leaves_set_item_unchanged(self): image = make_image_spec() old_resource = factory.make_name('resource') image_dict = set_resource(image_spec=image, resource=old_resource) image_dict.setdefault(image, factory.make_name('newresource')) self.assertItemsEqual([(image, old_resource)], image_dict.items()) def test_set_overwrites_item(self): image_dict = BootImageMapping() image = make_image_spec() resource = factory.make_name('resource') image_dict.setdefault(image, factory.make_name('resource')) image_dict.set(image, resource) self.assertItemsEqual([(image, resource)], image_dict.items()) def test_dump_json_is_consistent(self): image = make_image_spec() resource = factory.make_name('resource') image_dict_1 = set_resource(image_spec=image, resource=resource) image_dict_2 = set_resource(image_spec=image, resource=resource) self.assertEqual(image_dict_1.dump_json(), image_dict_2.dump_json()) def test_dump_json_represents_empty_dict_as_empty_object(self): self.assertEqual('{}', BootImageMapping().dump_json()) def test_dump_json_represents_entry(self): image = make_image_spec() resource = factory.make_name('resource') image_dict = set_resource(image_spec=image, resource=resource) self.assertEqual( { image.os: { image.arch: { image.subarch: { image.release: {image.label: resource}, }, }, }, }, json.loads(image_dict.dump_json())) def test_dump_json_combines_similar_entries(self): image = make_image_spec() other_release = factory.make_name('other-release') resource1 = factory.make_name('resource') resource2 = factory.make_name('other-resource') image_dict = BootImageMapping() set_resource(image_dict, image, resource1) set_resource( image_dict, image._replace(release=other_release), resource2) self.assertEqual( { image.os: { image.arch: { image.subarch: { image.release: {image.label: resource1}, other_release: {image.label: resource2}, }, }, }, }, json.loads(image_dict.dump_json())) def test_load_json_result_matches_dump_of_own_data(self): # Loading the test data and dumping it again should result in # identical test data. test_meta_file_content = make_maas_meta() mapping = BootImageMapping.load_json(test_meta_file_content) dumped = mapping.dump_json() self.assertEqual(test_meta_file_content, dumped) def test_load_json_result_of_old_data_uses_ubuntu_as_os(self): test_meta_file_content = make_maas_meta_without_os() mapping = BootImageMapping.load_json(test_meta_file_content) os = {image.os for image, _ in mapping.items()}.pop() self.assertEqual('ubuntu', os) def test_load_json_returns_empty_mapping_for_invalid_json(self): bad_json = "" mapping = BootImageMapping.load_json(bad_json) self.assertEqual({}, mapping.mapping) def test_get_image_arches_gets_arches_from_imagespecs(self): expected_arches = set() mapping = None for _ in range(0, 3): image_spec = make_image_spec() resource = factory.make_name('resource') expected_arches.add(image_spec.arch) mapping = set_resource(mapping, image_spec, resource) self.assertEqual(expected_arches, mapping.get_image_arches()) maas-1.9.5+bzr4599.orig/src/provisioningserver/import_images/tests/test_boot_resources.py0000644000000000000000000005607313056115004030101 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the boot_resources module.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from datetime import ( datetime, timedelta, ) import errno import hashlib import json import os from random import randint from subprocess import ( PIPE, Popen, ) from maastesting.factory import factory from maastesting.matchers import ( MockAnyCall, MockCalledOnceWith, MockCalledWith, MockCallsMatch, ) from maastesting.testcase import MAASTestCase from maastesting.utils import age_file import mock from mock import call from provisioningserver.boot import BootMethodRegistry from provisioningserver.config import ( BootSources, ClusterConfiguration, ) from provisioningserver.import_images import boot_resources from provisioningserver.import_images.boot_image_mapping import ( BootImageMapping, ) from provisioningserver.import_images.testing.factory import make_image_spec from provisioningserver.testing.config import ( BootSourcesFixture, ClusterConfigurationFixture, ) from provisioningserver.utils.fs import write_text_file from testtools.content import Content from testtools.content_type import UTF8_TEXT from testtools.matchers import ( DirExists, FileExists, ) import yaml class TestTgtEntry(MAASTestCase): """Tests for `tgt_entry`.""" def test_generates_one_target(self): spec = make_image_spec() osystem = factory.make_name('osystem') image = self.make_file() entry = boot_resources.tgt_entry( osystem, spec.arch, spec.subarch, spec.release, spec.label, image) # The entry looks a bit like XML, but isn't well-formed. So don't try # to parse it as such! self.assertIn('')) def test_produces_suitable_output_for_tgt_admin(self): spec = make_image_spec() image = self.make_file() osystem = factory.make_name('osystem') entry = boot_resources.tgt_entry( osystem, spec.arch, spec.subarch, spec.release, spec.label, image) config = self.make_file(contents=entry) # Pretend to be root, but without requiring the actual privileges and # without prompting for a password. In that state, run tgt-admin. # It has to think it's root, even for a "pretend" run. # Make it read the config we just produced, and pretend to update its # iSCSI targets based on what it finds in the config. # # The only real test is that this succeed. cmd = Popen( [ 'fakeroot', 'tgt-admin', '--conf', config, '--pretend', '--update', 'ALL', ], stdout=PIPE, stderr=PIPE) stdout, stderr = cmd.communicate() self.addDetail('tgt-stderr', Content(UTF8_TEXT, lambda: [stderr])) self.addDetail('tgt-stdout', Content(UTF8_TEXT, lambda: [stdout])) self.assertEqual(0, cmd.returncode) class TestUpdateCurrentSymlink(MAASTestCase): def make_test_dirs(self): storage_dir = self.make_dir() target_dir = os.path.join(storage_dir, factory.make_name("target")) return storage_dir, target_dir def assertLinkIsUpdated(self, storage_dir, target_dir): boot_resources.update_current_symlink(storage_dir, target_dir) link_path = os.path.join(storage_dir, "current") self.assertEqual(target_dir, os.readlink(link_path)) def test_creates_current_symlink(self): storage_dir, target_dir = self.make_test_dirs() self.assertLinkIsUpdated(storage_dir, target_dir) def test_creates_current_symlink_when_link_exists(self): storage_dir = self.make_dir() self.assertLinkIsUpdated(storage_dir, "target01") self.assertLinkIsUpdated(storage_dir, "target02") def test_creates_current_symlink_when_temp_link_exists(self): symlink_real = os.symlink symlink = self.patch(os, "symlink") def os_symlink(src, dst): if symlink.call_count in (1, 2): raise OSError(errno.EEXIST, dst) else: return symlink_real(src, dst) # The first two times that os.symlink() is called, it will raise # OSError with EEXIST; update_current_symlink() handles this and # tries to create a new symlink with a different suffix. symlink.side_effect = os_symlink # Make the choice of provisional symlink less random, so that we can # match against what's happening. from provisioningserver.utils import fs randint = self.patch_autospec(fs, "randint") randint.side_effect = lambda a, b: randint.call_count storage_dir, target_dir = self.make_test_dirs() self.assertLinkIsUpdated(storage_dir, target_dir) self.assertThat(symlink, MockCallsMatch( call(target_dir, os.path.join(storage_dir, ".temp.000001")), call(target_dir, os.path.join(storage_dir, ".temp.000002")), call(target_dir, os.path.join(storage_dir, ".temp.000003")), )) def test_fails_when_creating_temp_link_exists_a_lot(self): symlink = self.patch(os, "symlink") symlink.side_effect = OSError(errno.EEXIST, "sorry buddy") storage_dir, target_dir = self.make_test_dirs() # If os.symlink() returns EEXIST more than 100 times, it gives up. error = self.assertRaises( OSError, boot_resources.update_current_symlink, storage_dir, target_dir) self.assertIs(error, symlink.side_effect) self.assertEqual(100, symlink.call_count) def test_fails_when_creating_temp_link_fails(self): symlink = self.patch(os, "symlink") symlink.side_effect = OSError(errno.EPERM, "just no") storage_dir, target_dir = self.make_test_dirs() # Errors from os.symlink() other than EEXIST are re-raised. error = self.assertRaises( OSError, boot_resources.update_current_symlink, storage_dir, target_dir) self.assertIs(error, symlink.side_effect) def test_cleans_up_when_renaming_fails(self): symlink = self.patch(os, "rename") symlink.side_effect = OSError(errno.EPERM, "just no") storage_dir, target_dir = self.make_test_dirs() error = self.assertRaises( OSError, boot_resources.update_current_symlink, storage_dir, target_dir) self.assertIs(error, symlink.side_effect) # No intermediate files are left behind. self.assertEqual([], os.listdir(storage_dir)) def checksum_sha256(data): """Return the SHA256 checksum for `data`, as a hex string.""" assert isinstance(data, bytes) summer = hashlib.sha256() summer.update(data) return summer.hexdigest() class TestMain(MAASTestCase): def setUp(self): super(TestMain, self).setUp() self.useFixture(ClusterConfigurationFixture()) self.storage = self.make_dir() current_dir = os.path.join(self.storage, 'current') + os.sep os.makedirs(current_dir) with ClusterConfiguration.open_for_update() as config: config.tftp_root = current_dir os.rmdir(current_dir) # Forcing arch to amd64 causes pxelinux.0 to be installed, giving more # test coverage. self.image = make_image_spec(arch='amd64') self.os, self.arch, self.subarch, \ self.release, self.label = self.image self.repo = self.make_simplestreams_repo(self.image) def patch_maaslog(self): """Suppress log output from the import code.""" self.patch(boot_resources, 'maaslog') def make_args(self, sources="", **kwargs): """Fake an `argumentparser` parse result.""" args = mock.Mock() # Set sources explicitly, otherwise boot_resources.main() gets # confused. args.sources = sources for key, value in kwargs.items(): setattr(args, key, value) return args def make_simplestreams_index(self, index_dir, stream, product): """Write a fake simplestreams index file. Return its path.""" index_file = os.path.join(index_dir, 'index.json') index = { 'format': 'index:1.0', 'updated': 'Tue, 25 Mar 2014 16:19:49 +0000', 'index': { stream: { 'datatype': 'image-ids', 'path': 'streams/v1/%s.json' % stream, 'updated': 'Tue, 25 Mar 2014 16:19:49 +0000', 'format': 'products:1.0', 'products': [product], }, }, } write_text_file(index_file, json.dumps(index)) return index_file def make_download_file(self, repo, image_spec, version, filename='boot-kernel'): """Fake a downloadable file in `repo`. Return the new file's POSIX path, and its contents. """ path = [ image_spec.release, image_spec.arch, version, image_spec.release, image_spec.subarch, filename, ] native_path = os.path.join(repo, *path) os.makedirs(os.path.dirname(native_path)) contents = ("Contents: %s" % filename).encode('utf-8') write_text_file(native_path, contents) # Return POSIX path for inclusion in Simplestreams data, not # system-native path for filesystem access. return '/'.join(path), contents def make_simplestreams_product_index(self, index_dir, stream, product, image_spec, os_release, download_file, contents, version): """Write a fake Simplestreams product index file. The image is written into the directory that holds the indexes. It contains one downloadable file, as specified by the arguments. """ index = { 'format': 'products:1.0', 'data-type': 'image-ids', 'updated': 'Tue, 25 Mar 2014 16:19:49 +0000', 'content_id': stream, 'products': { product: { 'versions': { version: { 'items': { 'boot-kernel': { 'ftype': 'boot-kernel', '_fake': 'fake-data: %s' % download_file, 'version': os_release, 'release': image_spec.release, 'path': download_file, 'sha256': checksum_sha256(contents), 'arch': image_spec.arch, 'subarches': image_spec.subarch, 'size': len(contents), }, }, }, }, 'subarch': image_spec.subarch, 'krel': image_spec.release, 'label': image_spec.label, 'kflavor': image_spec.subarch, 'version': os_release, 'subarches': [image_spec.subarch], 'release': image_spec.release, 'arch': image_spec.arch, 'os': image_spec.os, }, }, } write_text_file( os.path.join(index_dir, '%s.json' % stream), json.dumps(index)) def make_simplestreams_repo(self, image_spec): """Fake a local simplestreams repository containing the given image. This creates a temporary directory that looks like a realistic Simplestreams repository, containing one downloadable file for the given `image_spec`. """ os_release = '%d.%.2s' % ( randint(1, 99), ('04' if randint(0, 1) == 0 else '10'), ) repo = self.make_dir() index_dir = os.path.join(repo, 'streams', 'v1') os.makedirs(index_dir) stream = 'com.ubuntu.maas:daily:v2:download' product = 'com.ubuntu.maas:boot:%s:%s:%s' % ( os_release, image_spec.arch, image_spec.subarch, ) version = '20140317' download_file, sha = self.make_download_file(repo, image_spec, version) self.make_simplestreams_product_index( index_dir, stream, product, image_spec, os_release, download_file, sha, version) index = self.make_simplestreams_index(index_dir, stream, product) return index def make_working_args(self): """Create a set of working arguments for the script.""" # Prepare a fake repository and sources. sources = [ { 'url': self.repo, 'selections': [ { 'os': self.os, 'release': self.release, 'arches': [self.arch], 'subarches': [self.subarch], 'labels': [self.label], }, ], }, ] sources_file = self.make_file( 'sources.yaml', contents=yaml.safe_dump(sources)) return self.make_args(sources_file=sources_file) def test_successful_run(self): """Integration-test a successful run of the importer. This runs as much realistic code as it can, exercising most of the integration points for a real import. """ # Patch out things that we don't want running during the test. Patch # at a low level, so that we exercise all the function calls that a # unit test might not put to the test. self.patch_maaslog() self.patch(boot_resources, 'call_and_check') self.patch(boot_resources, "service_monitor") # We'll go through installation of a PXE boot loader here, but skip # all other boot loader types. Testing them all is a job for proper # unit tests. for method_name, boot_method in BootMethodRegistry: if method_name != 'pxe': self.patch(boot_method, 'install_bootloader') args = self.make_working_args() osystem = self.os arch = self.arch subarch = self.subarch release = self.release label = self.label # Run the import code. boot_resources.main(args) # Verify the reuslts. self.assertThat(os.path.join(self.storage, 'cache'), DirExists()) current = os.path.join(self.storage, 'current') self.assertTrue(os.path.islink(current)) self.assertThat(current, DirExists()) self.assertThat(os.path.join(current, 'pxelinux.0'), FileExists()) self.assertThat(os.path.join(current, 'maas.meta'), FileExists()) self.assertThat(os.path.join(current, 'maas.tgt'), FileExists()) self.assertThat( os.path.join( current, osystem, arch, subarch, self.release, self.label), DirExists()) # Verify the contents of the "meta" file. with open(os.path.join(current, 'maas.meta'), 'rb') as meta_file: meta_data = json.load(meta_file) self.assertEqual([osystem], meta_data.keys()) self.assertEqual([arch], meta_data[osystem].keys()) self.assertEqual([subarch], meta_data[osystem][arch].keys()) self.assertEqual([release], meta_data[osystem][arch][subarch].keys()) self.assertEqual( [label], meta_data[osystem][arch][subarch][release].keys()) self.assertItemsEqual( [ 'content_id', 'path', 'product_name', 'version_name', 'subarches', ], meta_data[osystem][arch][subarch][release][label].keys()) def test_warns_if_no_sources_selected(self): self.patch_maaslog() sources_fixture = self.useFixture(BootSourcesFixture([])) args = self.make_args(sources_file=sources_fixture.filename) boot_resources.main(args) self.assertThat( boot_resources.maaslog.warn, MockAnyCall("Can't import: region did not provide a source.")) def test_warns_if_no_boot_resources_found(self): # The import code used to crash when no resources were found in the # Simplestreams repositories (bug 1305758). This could happen easily # with mistakes in the sources. Now, you just get a logged warning. sources_fixture = self.useFixture(BootSourcesFixture( [ { 'url': self.make_dir(), 'keyring': factory.make_name('keyring'), 'selections': [{'release': factory.make_name('release')}], }, ])) self.patch(boot_resources, 'download_all_image_descriptions') boot_resources.download_all_image_descriptions.return_value = ( BootImageMapping()) self.patch_maaslog() self.patch(boot_resources, 'RepoWriter') args = self.make_args(sources_file=sources_fixture.filename) boot_resources.main(args) self.assertThat( boot_resources.maaslog.warn, MockAnyCall( "Finished importing boot images, the region does not have " "any boot images available.")) def test_raises_ioerror_when_no_sources_file_found(self): self.patch_maaslog() no_sources = os.path.join( self.make_dir(), '%s.yaml' % factory.make_name('no-sources')) self.assertRaises( boot_resources.NoConfigFile, boot_resources.main, self.make_args(sources_file=no_sources)) def test_raises_non_ENOENT_IOErrors(self): # main() will raise a NoConfigFile error when it encounters an # ENOENT IOError, but will otherwise just re-raise the original # IOError. mock_load = self.patch(BootSources, 'load') other_error = IOError(randint(errno.ENOENT + 1, 1000)) mock_load.side_effect = other_error self.patch_maaslog() raised_error = self.assertRaises( IOError, boot_resources.main, self.make_args()) self.assertEqual(other_error, raised_error) def test_raises_error_when_no_sources_passed(self): # main() raises an error when neither a sources file nor a sources # listing is specified. self.patch_maaslog() self.assertRaises( boot_resources.NoConfigFile, boot_resources.main, self.make_args(sources="", sources_file="")) def test_update_targets_conf_ensures_tgt_service(self): mock_ensure_service = self.patch( boot_resources.service_monitor, "ensure_service") self.patch(boot_resources, "call_and_check") boot_resources.update_targets_conf(factory.make_name("snapshot")) self.assertThat(mock_ensure_service, MockCalledOnceWith("tgt")) class TestMetaContains(MAASTestCase): """Tests for the `meta_contains` function.""" def make_meta_file(self, content=None): if content is None: content = factory.make_string() storage = self.make_dir() current = os.path.join(storage, 'current') os.mkdir(current) return storage, factory.make_file(current, 'maas.meta', content) def test_matching_content_is_compared_True(self): content = factory.make_string() storage, meta_file = self.make_meta_file(content) self.assertTrue(boot_resources.meta_contains(storage, content)) def test_mismatching_content_is_compared_False(self): content = factory.make_string() storage, meta_file = self.make_meta_file() self.assertFalse(boot_resources.meta_contains(storage, content)) def test_meta_contains_updates_file_timestamp(self): content = factory.make_string() storage, meta_file = self.make_meta_file(content) # Change the file's timestamp to a week ago. one_week_ago = timedelta(weeks=1).total_seconds() age_file(meta_file, one_week_ago) boot_resources.meta_contains(storage, content) # Check the timestamp was updated. expected_date = datetime.now() actual_date = datetime.fromtimestamp(int(os.path.getmtime(meta_file))) self.assertEqual(expected_date.day, actual_date.day) class TestParseSources(MAASTestCase): """Tests for the `parse_sources` function.""" def test_parses_sources(self): self.patch(boot_resources, 'maaslog') sources = [ { 'keyring': factory.make_name("keyring"), 'keyring_data': '', 'url': factory.make_name("something"), 'selections': [ { 'os': factory.make_name("os"), 'release': factory.make_name("release"), 'arches': [factory.make_name("arch")], 'subarches': [factory.make_name("subarch")], 'labels': [factory.make_name("label")], }, ], }, ] parsed_sources = boot_resources.parse_sources(yaml.safe_dump(sources)) self.assertEqual(sources, parsed_sources) class TestImportImages(MAASTestCase): """Tests for the `import_images`() function.""" def test_writes_source_keyrings(self): # Stop import_images() from actually doing anything. self.patch(boot_resources, 'maaslog') self.patch(boot_resources, 'call_and_check') self.patch(boot_resources, 'download_all_boot_resources') self.patch(boot_resources, 'download_all_image_descriptions') self.patch(boot_resources, 'install_boot_loaders') self.patch(boot_resources, 'update_current_symlink') self.patch(boot_resources, 'write_snapshot_metadata') self.patch(boot_resources, 'write_targets_conf') self.patch(boot_resources, 'update_targets_conf') fake_write_all_keyrings = self.patch( boot_resources, 'write_all_keyrings') sources = [ { 'keyring_data': self.getUniqueString(), 'url': factory.make_name("something"), 'selections': [ { 'os': factory.make_name("os"), 'release': factory.make_name("release"), 'arches': [factory.make_name("arch")], 'subarches': [factory.make_name("subarch")], 'labels': [factory.make_name("label")], }, ], }, ], boot_resources.import_images(sources) self.assertThat( fake_write_all_keyrings, MockCalledWith(mock.ANY, sources)) maas-1.9.5+bzr4599.orig/src/provisioningserver/import_images/tests/test_cleanup.py0000644000000000000000000000740413056115004026465 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the `cleanup` module.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import os from random import randint from maastesting.factory import factory from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import MAASTestCase from provisioningserver.import_images import cleanup class TestCleanup(MAASTestCase): def make_snapshot_dir(self, storage): name = factory.make_name('snapshot') path = os.path.join(storage, name) os.mkdir(path) return path def make_cache_file(self, storage, link_count=0): cache_dir = os.path.join(storage, 'cache') if not os.path.exists(cache_dir): os.mkdir(cache_dir) cache_file = factory.make_name('cache') cache_path = os.path.join(cache_dir, cache_file) open(cache_path, 'wb').close() link_dir = os.path.join(storage, 'links') if not os.path.exists(link_dir): os.mkdir(link_dir) for i in range(link_count): link_path = os.path.join(link_dir, '%s-%d' % (cache_file, i)) os.link(cache_path, link_path) return cache_path def test_list_old_snapshots_returns_all_but_current_directory(self): storage = self.make_dir() snapshots = [self.make_snapshot_dir(storage) for _ in range(3)] current_snapshot = self.make_snapshot_dir(storage) os.symlink( current_snapshot, os.path.join(storage, 'current')) self.assertItemsEqual(snapshots, cleanup.list_old_snapshots(storage)) def test_cleanup_snapshots_removes_all_old_snapshots(self): storage = self.make_dir() snapshots = [self.make_snapshot_dir(storage) for _ in range(3)] current_snapshot = self.make_snapshot_dir(storage) os.symlink( current_snapshot, os.path.join(storage, 'current')) cleanup.cleanup_snapshots(storage) remaining_snapshots = [ snapshot for snapshot in snapshots if os.path.exists(snapshot) ] self.assertEqual([], remaining_snapshots) def test_list_unused_cache_files_returns_all_files_nlink_equal_one(self): storage = self.make_dir() cache_nlink_1 = [self.make_cache_file(storage) for _ in range(3)] for _ in range(3): self.make_cache_file(storage, link_count=randint(1, 3)) self.assertItemsEqual( cache_nlink_1, cleanup.list_unused_cache_files(storage)) def test_cleanup_cache_removes_all_files_nlink_equal_one(self): storage = self.make_dir() for _ in range(3): self.make_cache_file(storage) cache_nlink_greater_than_1 = [ self.make_cache_file(storage, link_count=randint(1, 3)) for _ in range(3) ] cleanup.cleanup_cache(storage) cache_dir = os.path.join(storage, 'cache') remaining_cache = [ os.path.join(cache_dir, filename) for filename in os.listdir(cache_dir) if os.path.isfile(os.path.join(cache_dir, filename)) ] self.assertItemsEqual( cache_nlink_greater_than_1, remaining_cache) def test_cleanup_snapshots_and_cache_calls(self): storage = self.make_dir() mock_snapshots = self.patch_autospec(cleanup, 'cleanup_snapshots') mock_cache = self.patch_autospec(cleanup, 'cleanup_cache') cleanup.cleanup_snapshots_and_cache(storage) self.assertThat(mock_snapshots, MockCalledOnceWith(storage)) self.assertThat(mock_cache, MockCalledOnceWith(storage)) maas-1.9.5+bzr4599.orig/src/provisioningserver/import_images/tests/test_download_descriptions.py0000644000000000000000000003300013056115004031422 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the `download_descriptions` module.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import logging from fixtures import FakeLogger from maastesting.factory import factory from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import MAASTestCase from mock import sentinel from provisioningserver.import_images import download_descriptions from provisioningserver.import_images.boot_image_mapping import ( BootImageMapping, ) from provisioningserver.import_images.download_descriptions import ( clean_up_repo_item, RepoDumper, ) from provisioningserver.import_images.testing.factory import ( make_image_spec, set_resource, ) class TestValuePassesFilterList(MAASTestCase): """Tests for `value_passes_filter_list`.""" def test_nothing_passes_empty_list(self): self.assertFalse( download_descriptions.value_passes_filter_list( [], factory.make_name('value'))) def test_unmatched_value_does_not_pass(self): self.assertFalse( download_descriptions.value_passes_filter_list( [factory.make_name('filter')], factory.make_name('value'))) def test_matched_value_passes(self): value = factory.make_name('value') self.assertTrue( download_descriptions.value_passes_filter_list([value], value)) def test_value_passes_if_matched_anywhere_in_filter(self): value = factory.make_name('value') self.assertTrue( download_descriptions.value_passes_filter_list( [ factory.make_name('filter'), value, factory.make_name('filter'), ], value)) def test_any_value_passes_asterisk(self): self.assertTrue( download_descriptions.value_passes_filter_list( ['*'], factory.make_name('value'))) class TestValuePassesFilter(MAASTestCase): """Tests for `value_passes_filter`.""" def test_unmatched_value_does_not_pass(self): self.assertFalse( download_descriptions.value_passes_filter( factory.make_name('filter'), factory.make_name('value'))) def test_matching_value_passes(self): value = factory.make_name('value') self.assertTrue( download_descriptions.value_passes_filter(value, value)) def test_any_value_matches_asterisk(self): self.assertTrue( download_descriptions.value_passes_filter( '*', factory.make_name('value'))) class TestImagePassesFilter(MAASTestCase): """Tests for `image_passes_filter`.""" def make_filter_from_image(self, image_spec=None): """Create a filter dict that matches the given `ImageSpec`. If `image_spec` is not given, creates a random value. """ if image_spec is None: image_spec = make_image_spec() return { 'os': image_spec.os, 'arches': [image_spec.arch], 'subarches': [image_spec.subarch], 'release': image_spec.release, 'labels': [image_spec.label], } def test_any_image_passes_none_filter(self): os, arch, subarch, release, label = make_image_spec() self.assertTrue( download_descriptions.image_passes_filter( None, os, arch, subarch, release, label)) def test_any_image_passes_empty_filter(self): os, arch, subarch, release, label = make_image_spec() self.assertTrue( download_descriptions.image_passes_filter( [], os, arch, subarch, release, label)) def test_image_passes_matching_filter(self): image = make_image_spec() self.assertTrue( download_descriptions.image_passes_filter( [self.make_filter_from_image(image)], image.os, image.arch, image.subarch, image.release, image.label)) def test_image_does_not_pass_nonmatching_filter(self): image = make_image_spec() self.assertFalse( download_descriptions.image_passes_filter( [self.make_filter_from_image()], image.os, image.arch, image.subarch, image.release, image.label)) def test_image_passes_if_one_filter_matches(self): image = make_image_spec() self.assertTrue( download_descriptions.image_passes_filter( [ self.make_filter_from_image(), self.make_filter_from_image(image), self.make_filter_from_image(), ], image.os, image.arch, image.subarch, image.release, image.label)) def test_filter_checks_release(self): image = make_image_spec() self.assertFalse( download_descriptions.image_passes_filter( [ self.make_filter_from_image(image._replace( release=factory.make_name('other-release'))) ], image.os, image.arch, image.subarch, image.release, image.label)) def test_filter_checks_arches(self): image = make_image_spec() self.assertFalse( download_descriptions.image_passes_filter( [ self.make_filter_from_image(image._replace( arch=factory.make_name('other-arch'))) ], image.os, image.arch, image.subarch, image.release, image.label)) def test_filter_checks_subarches(self): image = make_image_spec() self.assertFalse( download_descriptions.image_passes_filter( [ self.make_filter_from_image(image._replace( subarch=factory.make_name('other-subarch'))) ], image.os, image.arch, image.subarch, image.release, image.label)) def test_filter_checks_labels(self): image = make_image_spec() self.assertFalse( download_descriptions.image_passes_filter( [ self.make_filter_from_image(image._replace( label=factory.make_name('other-label'))) ], image.os, image.arch, image.subarch, image.release, image.label)) class TestBootMerge(MAASTestCase): """Tests for `boot_merge`.""" def test_integrates(self): # End-to-end scenario for boot_merge: start with an empty boot # resources dict, and receive one resource from Simplestreams. total_resources = BootImageMapping() resources_from_repo = set_resource() download_descriptions.boot_merge(total_resources, resources_from_repo) # Since we started with an empty dict, the result contains the same # item that we got from Simplestreams, and nothing else. self.assertEqual(resources_from_repo.mapping, total_resources.mapping) def test_obeys_filters(self): filters = [ { 'os': factory.make_name('os'), 'arches': [factory.make_name('other-arch')], 'subarches': [factory.make_name('other-subarch')], 'release': factory.make_name('other-release'), 'label': [factory.make_name('other-label')], }, ] total_resources = BootImageMapping() resources_from_repo = set_resource() download_descriptions.boot_merge( total_resources, resources_from_repo, filters=filters) self.assertEqual({}, total_resources.mapping) def test_does_not_overwrite_existing_entry(self): image = make_image_spec() total_resources = set_resource( resource="Original resource", image_spec=image) original_resources = total_resources.mapping.copy() resources_from_repo = set_resource( resource="New resource", image_spec=image) download_descriptions.boot_merge(total_resources, resources_from_repo) self.assertEqual(original_resources, total_resources.mapping) class TestRepoDumper(MAASTestCase): """Tests for `RepoDumper`.""" def make_item(self, os=None, release=None, arch=None, subarch=None, subarches=None, label=None): if os is None: os = factory.make_name('os') if release is None: release = factory.make_name('release') if arch is None: arch = factory.make_name('arch') if subarch is None: subarch = factory.make_name('subarch') if subarches is None: subarches = [factory.make_name('subarch') for _ in range(3)] if subarch not in subarches: subarches.append(subarch) if label is None: label = factory.make_name('label') item = { 'content_id': factory.make_name('content_id'), 'product_name': factory.make_name('product_name'), 'version_name': factory.make_name('version_name'), 'path': factory.make_name('path'), 'os': os, 'release': release, 'arch': arch, 'subarch': subarch, 'subarches': ','.join(subarches), 'label': label, } return item, clean_up_repo_item(item) def test_insert_item_adds_item_per_subarch(self): boot_images_dict = BootImageMapping() dumper = RepoDumper(boot_images_dict) subarches = [factory.make_name('subarch') for _ in range(3)] item, _ = self.make_item( subarch=subarches.pop(), subarches=subarches) self.patch( download_descriptions, 'products_exdata').return_value = item dumper.insert_item( sentinel.data, sentinel.src, sentinel.target, sentinel.pedigree, sentinel.contentsource) image_specs = [ make_image_spec( os=item['os'], release=item['release'], arch=item['arch'], subarch=subarch, label=item['label']) for subarch in subarches ] self.assertItemsEqual(image_specs, boot_images_dict.mapping.keys()) def test_insert_item_sets_compat_item_specific_to_subarch(self): boot_images_dict = BootImageMapping() dumper = RepoDumper(boot_images_dict) subarches = [factory.make_name('subarch') for _ in range(5)] compat_subarch = subarches.pop() item, _ = self.make_item(subarch=subarches.pop(), subarches=subarches) second_item, compat_item = self.make_item( os=item['os'], release=item['release'], arch=item['arch'], subarch=compat_subarch, subarches=[compat_subarch], label=item['label']) self.patch( download_descriptions, 'products_exdata').side_effect = [item, second_item] for _ in range(2): dumper.insert_item( sentinel.data, sentinel.src, sentinel.target, sentinel.pedigree, sentinel.contentsource) image_spec = make_image_spec( os=item['os'], release=item['release'], arch=item['arch'], subarch=compat_subarch, label=item['label']) self.assertEqual(compat_item, boot_images_dict.mapping[image_spec]) def test_insert_item_sets_generic_to_release_item_for_hwe(self): boot_images_dict = BootImageMapping() dumper = RepoDumper(boot_images_dict) os = 'ubuntu' release = 'precise' arch = 'amd64' label = 'release' hwep_subarch = 'hwe-p' hwep_subarches = ['generic', 'hwe-p'] hwes_subarch = 'hwe-s' hwes_subarches = ['generic', 'hwe-p', 'hwe-s'] hwep_item, compat_item = self.make_item( os=os, release=release, arch=arch, subarch=hwep_subarch, subarches=hwep_subarches, label=label) hwes_item, _ = self.make_item( os=os, release=release, arch=arch, subarch=hwes_subarch, subarches=hwes_subarches, label=label) self.patch( download_descriptions, 'products_exdata').side_effect = [hwep_item, hwes_item] for _ in range(2): dumper.insert_item( sentinel.data, sentinel.src, sentinel.target, sentinel.pedigree, sentinel.contentsource) image_spec = make_image_spec( os=os, release=release, arch=arch, subarch='generic', label=label) self.assertEqual(compat_item, boot_images_dict.mapping[image_spec]) def test_sync_does_not_propagate_ioerror(self): mock_sync = self.patch(download_descriptions.BasicMirrorWriter, "sync") mock_sync.side_effect = IOError() boot_images_dict = BootImageMapping() dumper = RepoDumper(boot_images_dict) with FakeLogger("maas.import-images", level=logging.INFO) as maaslog: # What we're testing here is that sync() doesn't raise IOError... dumper.sync(sentinel.reader, sentinel.path) # ... but we'll validate that we properly called the [mock] # superclass method, and logged something, as well. self.assertThat( mock_sync, MockCalledOnceWith(sentinel.reader, sentinel.path)) self.assertDocTestMatches( "...error...syncing boot images...", maaslog.output) maas-1.9.5+bzr4599.orig/src/provisioningserver/import_images/tests/test_download_resources.py0000644000000000000000000000675213056115004030744 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.import_images.download_resources`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from datetime import datetime import os from maastesting.matchers import MockCalledWith from maastesting.testcase import MAASTestCase import mock from provisioningserver.import_images import download_resources from provisioningserver.import_images.product_mapping import ProductMapping from simplestreams.objectstores import FileStore class MockDateTime(mock.MagicMock): """A class for faking datetimes.""" _utcnow = datetime.utcnow() @classmethod def utcnow(cls): return cls._utcnow class TestDownloadAllBootResources(MAASTestCase): """Tests for `download_all_boot_resources`().""" def test_returns_snapshot_path(self): self.patch(download_resources, 'datetime', MockDateTime) storage_path = self.make_dir() expected_path = os.path.join( storage_path, 'snapshot-%s' % MockDateTime._utcnow.strftime('%Y%m%d-%H%M%S')) self.assertEqual( expected_path, download_resources.download_all_boot_resources( sources=[], storage_path=storage_path, product_mapping=None)) def test_calls_download_boot_resources(self): self.patch(download_resources, 'datetime', MockDateTime) storage_path = self.make_dir() snapshot_path = download_resources.compose_snapshot_path( storage_path) cache_path = os.path.join(storage_path, 'cache') file_store = FileStore(cache_path) source = { 'url': 'http://example.com', 'keyring': self.make_file("keyring"), } product_mapping = ProductMapping() fake = self.patch(download_resources, 'download_boot_resources') download_resources.download_all_boot_resources( sources=[source], storage_path=storage_path, product_mapping=product_mapping, store=file_store) self.assertThat( fake, MockCalledWith( source['url'], file_store, snapshot_path, product_mapping, keyring_file=source['keyring'])) class TestDownloadBootResources(MAASTestCase): """Tests for `download_boot_resources()`.""" def test_syncs_repo(self): fake_sync = self.patch(download_resources.RepoWriter, 'sync') storage_path = self.make_dir() snapshot_path = self.make_dir() cache_path = os.path.join(storage_path, 'cache') file_store = FileStore(cache_path) source_url = "http://maas.ubuntu.com/images/ephemeral-v2/releases/" download_resources.download_boot_resources( source_url, file_store, snapshot_path, None, None) self.assertEqual(1, len(fake_sync.mock_calls)) class TestComposeSnapshotPath(MAASTestCase): """Tests for `compose_snapshot_path`().""" def test_returns_path_under_storage_path(self): self.patch(download_resources, 'datetime', MockDateTime) storage_path = self.make_dir() expected_path = os.path.join( storage_path, 'snapshot-%s' % MockDateTime._utcnow.strftime('%Y%m%d-%H%M%S')) self.assertEqual( expected_path, download_resources.compose_snapshot_path(storage_path)) maas-1.9.5+bzr4599.orig/src/provisioningserver/import_images/tests/test_helpers.py0000644000000000000000000000462513056115004026502 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the `helpers` module.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maastesting.factory import factory from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import MAASTestCase import mock from provisioningserver.import_images import helpers from simplestreams.util import SignatureMissingException class TestGetSigningPolicy(MAASTestCase): """Tests for `get_signing_policy`.""" def test_picks_nonchecking_policy_for_json_index(self): path = 'streams/v1/index.json' policy = helpers.get_signing_policy(path) content = factory.make_string() self.assertEqual( content, policy(content, path, factory.make_name('keyring'))) def test_picks_checking_policy_for_sjson_index(self): path = 'streams/v1/index.sjson' content = factory.make_string() policy = helpers.get_signing_policy(path) self.assertRaises( SignatureMissingException, policy, content, path, factory.make_name('keyring')) def test_picks_checking_policy_for_json_gpg_index(self): path = 'streams/v1/index.json.gpg' content = factory.make_string() policy = helpers.get_signing_policy(path) self.assertRaises( SignatureMissingException, policy, content, path, factory.make_name('keyring')) def test_injects_default_keyring_if_passed(self): path = 'streams/v1/index.json.gpg' content = factory.make_string() keyring = factory.make_name('keyring') self.patch(helpers, 'policy_read_signed') policy = helpers.get_signing_policy(path, keyring) policy(content, path) self.assertThat( helpers.policy_read_signed, MockCalledOnceWith(mock.ANY, mock.ANY, keyring=keyring)) class TestGetOSFromProduct(MAASTestCase): """Tests for `get_os_from_product`.""" def test_returns_os_from_product(self): os = factory.make_name('os') product = {'os': os} self.assertEqual(os, helpers.get_os_from_product(product)) def test_returns_ubuntu_if_missing(self): self.assertEqual('ubuntu', helpers.get_os_from_product({})) maas-1.9.5+bzr4599.orig/src/provisioningserver/import_images/tests/test_keyrings.py0000644000000000000000000000743313056115004026673 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the import_images keyring management functions.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import os from maastesting.factory import factory from maastesting.matchers import ( MockCalledWith, MockCallsMatch, ) from maastesting.testcase import MAASTestCase import mock from provisioningserver.import_images import keyrings from testtools.matchers import FileContains class TestWriteKeyring(MAASTestCase): """Tests for `write_keyring().`""" def test_writes_keyring_to_file(self): keyring_data = b"A keyring! My kingdom for a keyring!" keyring_path = os.path.join(self.make_dir(), "a-keyring-file") keyrings.write_keyring(keyring_path, keyring_data) self.assertTrue(os.path.exists(keyring_path)) self.assertThat(keyring_path, FileContains(keyring_data)) class TestCalculateKeyringName(MAASTestCase): """Tests for `calculate_keyring_name()`.""" def test_creates_name_from_url(self): parts = [self.getUniqueString() for _ in range(1, 5)] source_url = "http://example.com/%s/" % "/".join(parts) expected_keyring_name = "example.com-%s.gpg" % "-".join(parts) self.assertEqual( expected_keyring_name, keyrings.calculate_keyring_name(source_url)) class TestWriteAllKeyrings(MAASTestCase): """Test for the `write_all_keyrings()` function.""" def test_writes_keyring_data(self): fake_write_keyring = self.patch(keyrings, 'write_keyring') sources = [{ 'url': "http://%s" % self.getUniqueString(), 'keyring_data': factory.make_bytes(), } for _ in range(5)] keyring_path = self.make_dir() keyrings.write_all_keyrings(keyring_path, sources) expected_calls = ( mock.call( os.path.join( keyring_path, keyrings.calculate_keyring_name(source['url'])), source['keyring_data']) for source in sources) self.assertThat(fake_write_keyring, MockCallsMatch(*expected_calls)) def test_returns_sources(self): self.patch(keyrings, 'write_keyring') sources = [{ 'url': "http://%s" % self.getUniqueString(), 'keyring_data': factory.make_bytes(), } for _ in range(5)] keyring_path = self.make_dir() expected_values = [ os.path.join( keyring_path, keyrings.calculate_keyring_name(source['url'])) for source in sources] returned_sources = keyrings.write_all_keyrings(keyring_path, sources) actual_values = [ source.get('keyring') for source in returned_sources] self.assertEqual(expected_values, actual_values) def test_ignores_existing_keyrings(self): self.patch(keyrings, 'write_keyring') fake_maaslog = self.patch(keyrings, 'maaslog') source = { 'url': self.getUniqueString(), 'keyring': self.getUniqueString(), 'keyring_data': self.getUniqueString(), } keyring_path = self.make_dir() [returned_source] = keyrings.write_all_keyrings(keyring_path, [source]) expected_keyring = os.path.join( keyring_path, keyrings.calculate_keyring_name(source['url'])) self.assertEqual(expected_keyring, returned_source.get('keyring')) self.assertThat( fake_maaslog.warning, MockCalledWith( "Both a keyring file and keyring data were specified; " "ignoring the keyring file.")) maas-1.9.5+bzr4599.orig/src/provisioningserver/import_images/tests/test_product_mapping.py0000644000000000000000000001426213056115004030231 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the `ProductMapping` class.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maastesting.factory import factory from maastesting.testcase import MAASTestCase from provisioningserver.import_images.boot_image_mapping import ( BootImageMapping, ) from provisioningserver.import_images.product_mapping import ( map_products, ProductMapping, ) from provisioningserver.import_images.testing.factory import ( make_boot_resource, make_image_spec, set_resource, ) class TestProductMapping(MAASTestCase): """Tests for `ProductMapping`.""" def test_initially_empty(self): self.assertEqual({}, ProductMapping().mapping) def test_make_key_extracts_identifying_items(self): resource = make_boot_resource() content_id = resource['content_id'] product_name = resource['product_name'] version_name = resource['version_name'] self.assertEqual( (content_id, product_name, version_name), ProductMapping.make_key(resource)) def test_make_key_ignores_other_items(self): resource = make_boot_resource() resource['other_item'] = factory.make_name('other') self.assertEqual( ( resource['content_id'], resource['product_name'], resource['version_name'], ), ProductMapping.make_key(resource)) def test_make_key_fails_if_key_missing(self): resource = make_boot_resource() del resource['version_name'] self.assertRaises( KeyError, ProductMapping.make_key, resource) def test_add_creates_subarches_list_if_needed(self): product_dict = ProductMapping() resource = make_boot_resource() subarch = factory.make_name('subarch') product_dict.add(resource, subarch) self.assertEqual( {product_dict.make_key(resource): [subarch]}, product_dict.mapping) def test_add_appends_to_existing_list(self): product_dict = ProductMapping() resource = make_boot_resource() subarches = [factory.make_name('subarch') for _ in range(2)] for subarch in subarches: product_dict.add(resource, subarch) self.assertEqual( {product_dict.make_key(resource): subarches}, product_dict.mapping) def test_contains_returns_true_for_stored_item(self): product_dict = ProductMapping() resource = make_boot_resource() subarch = factory.make_name('subarch') product_dict.add(resource, subarch) self.assertTrue(product_dict.contains(resource)) def test_contains_returns_false_for_unstored_item(self): self.assertFalse( ProductMapping().contains(make_boot_resource())) def test_contains_ignores_similar_items(self): product_dict = ProductMapping() resource = make_boot_resource() subarch = factory.make_name('subarch') product_dict.add(resource.copy(), subarch) resource['product_name'] = factory.make_name('other') self.assertFalse(product_dict.contains(resource)) def test_contains_ignores_extraneous_keys(self): product_dict = ProductMapping() resource = make_boot_resource() subarch = factory.make_name('subarch') product_dict.add(resource.copy(), subarch) resource['other_item'] = factory.make_name('other') self.assertTrue(product_dict.contains(resource)) def test_get_returns_stored_item(self): product_dict = ProductMapping() resource = make_boot_resource() subarch = factory.make_name('subarch') product_dict.add(resource, subarch) self.assertEqual([subarch], product_dict.get(resource)) def test_get_fails_for_unstored_item(self): product_dict = ProductMapping() resource = make_boot_resource() subarch = factory.make_name('subarch') product_dict.add(resource.copy(), subarch) resource['content_id'] = factory.make_name('other') self.assertRaises(KeyError, product_dict.get, resource) def test_get_ignores_extraneous_keys(self): product_dict = ProductMapping() resource = make_boot_resource() subarch = factory.make_name('subarch') product_dict.add(resource, subarch) resource['other_item'] = factory.make_name('other') self.assertEqual([subarch], product_dict.get(resource)) class TestMapProducts(MAASTestCase): """Tests for `map_products`.""" def test_maps_empty_dict_to_empty_dict(self): empty_boot_image_dict = BootImageMapping() self.assertEqual({}, map_products(empty_boot_image_dict).mapping) def test_maps_boot_resource_by_content_id_product_name_and_version(self): image = make_image_spec() resource = make_boot_resource() boot_dict = set_resource(resource=resource.copy(), image_spec=image) self.assertEqual( { ( resource['content_id'], resource['product_name'], resource['version_name'], ): [image.subarch], }, map_products(boot_dict).mapping) def test_concatenates_similar_resources(self): image1 = make_image_spec() image2 = make_image_spec() resource = make_boot_resource() boot_dict = BootImageMapping() # Create two images in boot_dict, both containing the same resource. for image in [image1, image2]: set_resource( boot_dict=boot_dict, resource=resource.copy(), image_spec=image) products_mapping = map_products(boot_dict) key = ( resource['content_id'], resource['product_name'], resource['version_name'], ) self.assertEqual([key], products_mapping.mapping.keys()) self.assertItemsEqual( [image1.subarch, image2.subarch], products_mapping.get(resource)) maas-1.9.5+bzr4599.orig/src/provisioningserver/import_images/tests/test_uec2roottar.py0000644000000000000000000003303213056115004027303 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the `uec2roottar` script and its supporting module..""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import os import os.path from subprocess import CalledProcessError from maastesting.factory import factory from maastesting.matchers import ( MockAnyCall, MockCalledOnceWith, MockNotCalled, ) from maastesting.testcase import MAASTestCase import mock from provisioningserver.import_images import uec2roottar from testtools.matchers import HasLength from testtools.testcase import ExpectedException def make_image_name(suffix='.img'): """Create an image file name (but not the actual file).""" return factory.make_name('root') + suffix def make_image(testcase, contents=None, suffix='.img'): """Create an image file.""" name = make_image_name(suffix) return testcase.make_file(name=name, contents=contents) def make_tarball_name(prefix='tarball'): """Create an arbitrary name for a tarball.""" return factory.make_name(prefix) + '.tar.gz' def make_roottar_location(testcase): """Create a name for an output root tarball, in an empty directory.""" name = make_tarball_name('root') return os.path.join(testcase.make_dir(), name) def patch_is_filesystem_file(testcase, answer): """Patch `is_filesystem_file` to return the given answer.""" testcase.patch(uec2roottar, 'is_filesystem_file').return_value = answer class TestMakeArgParser(MAASTestCase): """Tests for `make_argparser`.""" def test__defines_expected_options(self): image = make_image(self) output = make_roottar_location(self) user = factory.make_name('user') parser = uec2roottar.make_argparser(factory.make_string()) args = parser.parse_args([image, output, '--user', user]) self.assertEqual( ( image, output, user, ), ( args.image, args.output, args.user, )) def test__user_defaults_to_None(self): parser = uec2roottar.make_argparser(factory.make_string()) args = parser.parse_args( [make_image(self), make_roottar_location(self)]) self.assertIsNone(args.user) class TestIsFilesystemFile(MAASTestCase): """Tests for `is_filesystem_file`.""" def test__returns_True_if_file_looks_like_filesystem(self): image = make_image(self, suffix='.img') self.patch(uec2roottar, 'check_output').return_value = ( ("%s: filesystem data" % image).encode('utf-8')) self.assertTrue(uec2roottar.is_filesystem_file(image)) def test__returns_False_for_tarball(self): image = make_image(self, suffix='.tar.gz') self.patch(uec2roottar, 'check_output').return_value = ( ("%s: gzip compressed data, was ..." % image).encode('utf-8')) self.assertFalse(uec2roottar.is_filesystem_file(image)) def test__calls_file_with_C_language_setting(self): env_during_invocation = {} def fake_check_output(*args, **kwargs): env_during_invocation.update(os.environ) return b'' self.patch(uec2roottar, 'check_output', fake_check_output) uec2roottar.is_filesystem_file(make_image(self)) self.assertEqual('C', env_during_invocation.get('LANG')) class TestExtractImageFromTarball(MAASTestCase): """Tests for `extract_image_from_tarball`.""" def test__extracts_image(self): tarball = make_tarball_name() self.patch(uec2roottar, 'check_call') self.patch(uec2roottar, 'check_output') # Cheat: patch away extraction of the tarball, but pass a temporary # directory with an image already in it. The function will think it # just extracted the image from the tarball. image = make_image(self) working_dir = os.path.dirname(image) result = uec2roottar.extract_image_from_tarball(tarball, working_dir) self.assertThat( uec2roottar.check_call, MockCalledOnceWith([ 'tar', '-C', working_dir, '--wildcards', '*.img', '-Sxvzf', tarball, ])) self.assertEqual(image, result) def test__ignores_other_files(self): tarball = make_tarball_name() self.patch(uec2roottar, 'check_call') self.patch(uec2roottar, 'check_output') # Make the function think that it found two files in the tarball: an # image and some other file. image = make_image(self) working_dir = os.path.dirname(image) # This other file doesn't upset things, because it doesn't look like # an image file. factory.make_file(working_dir) self.assertEqual( image, uec2roottar.extract_image_from_tarball(tarball, working_dir)) def test__fails_if_no_image_found(self): tarball = make_tarball_name() self.patch(uec2roottar, 'check_call') self.patch(uec2roottar, 'check_output') empty_dir = self.make_dir() error = self.assertRaises( uec2roottar.ImageFileError, uec2roottar.extract_image_from_tarball, tarball, empty_dir) self.assertEqual( "Tarball %s does not contain any *.img." % tarball, unicode(error)) def test__fails_if_multiple_images_found(self): tarball = make_tarball_name() self.patch(uec2roottar, 'check_call') self.patch(uec2roottar, 'check_output') working_dir = self.make_dir() files = sorted( factory.make_file(working_dir, name=make_image_name()) for _ in range(2)) error = self.assertRaises( uec2roottar.ImageFileError, uec2roottar.extract_image_from_tarball, tarball, working_dir) self.assertEqual( "Tarball %s contains multiple image files: %s." % (tarball, ', '.join(files)), unicode(error)) class TestGetImageFile(MAASTestCase): """Tests for `get_image_file`.""" def test__returns_actual_image_file_unchanged(self): patch_is_filesystem_file(self, True) image = make_image(self) self.assertEqual( image, uec2roottar.get_image_file(image, factory.make_name('dir'))) def test__extracts_tarball_into_temp_dir(self): patch_is_filesystem_file(self, False) tarball = make_tarball_name() temp_dir = self.make_dir() image = make_image_name() patch = self.patch(uec2roottar, 'extract_image_from_tarball') patch.return_value = image result = uec2roottar.get_image_file(tarball, temp_dir) self.assertEqual(image, result) self.assertThat(patch, MockCalledOnceWith(tarball, temp_dir)) def test__rejects_other_files(self): patch_is_filesystem_file(self, False) filename = factory.make_name('weird-file') error = self.assertRaises( uec2roottar.ImageFileError, uec2roottar.get_image_file, filename, factory.make_name('dir')) self.assertEqual( "Expected '%s' to be either a filesystem file, or a " "gzipped tarball containing one." % filename, unicode(error)) class TestUnmount(MAASTestCase): """Tests for `unmount`.""" def test__calls_umount(self): self.patch(uec2roottar, 'check_call') mountpoint = factory.make_name('mount') uec2roottar.unmount(mountpoint) self.assertThat( uec2roottar.check_call, MockCalledOnceWith(['umount', mountpoint])) def test__propagates_failure(self): failure = CalledProcessError(9, factory.make_name('delibfail')) self.patch(uec2roottar, 'check_call').side_effect = failure self.patch(uec2roottar, 'maaslog') mountpoint = factory.make_name('mount') self.assertRaises(CalledProcessError, uec2roottar.unmount, mountpoint) self.assertThat( uec2roottar.maaslog.error, MockCalledOnceWith( "Could not unmount %s: %s", mountpoint, failure)) class TestLoopMount(MAASTestCase): """Tests for `loop_mount`.""" def test__mounts_and_unmounts_image(self): image = make_image_name() self.patch(uec2roottar, 'check_call') mountpoint = factory.make_name('mount') calls_before = len(uec2roottar.check_call.mock_calls) with uec2roottar.loop_mount(image, mountpoint): calls_during = len(uec2roottar.check_call.mock_calls) calls_after = len(uec2roottar.check_call.mock_calls) self.assertEqual( (0, 1, 2), (calls_before, calls_during, calls_after)) self.assertThat( uec2roottar.check_call, MockAnyCall(['mount', '-o', 'ro', image, mountpoint])) self.assertThat( uec2roottar.check_call, MockAnyCall(['umount', mountpoint])) def test__cleans_up_after_failure(self): class DeliberateException(Exception): pass self.patch(uec2roottar, 'check_call') image = make_image_name() mountpoint = factory.make_name('mount') with ExpectedException(DeliberateException): with uec2roottar.loop_mount(image, mountpoint): raise DeliberateException() self.assertThat( uec2roottar.check_call, MockAnyCall(['umount', mountpoint])) class TestTarSupportsXattrOpts(MAASTestCase): """Tests for `tar_supports_xattr_opts`.""" def test__returns_True_if_help_contains_ref_to_xattr(self): mock_check_call = self.patch(uec2roottar, 'check_output') mock_check_call.return_value = 'xattr' self.assertTrue(uec2roottar.tar_supports_xattr_opts()) self.assertThat(mock_check_call, MockCalledOnceWith(['tar', '--help'])) def test__returns_False_if_help_doesnt_contain_ref_to_xattr(self): mock_check_call = self.patch(uec2roottar, 'check_output') mock_check_call.return_value = 'nothing' self.assertFalse(uec2roottar.tar_supports_xattr_opts()) self.assertThat(mock_check_call, MockCalledOnceWith(['tar', '--help'])) class TestExtractImage(MAASTestCase): """Tests for `extract_image`.""" def extract_command_line(self, call): """Extract the command line from a `mock.call` for `check_call`.""" _, args, _ = call [command] = args return command def test__extracts_image_if_tar_supports_xattr(self): image = make_image_name() output = make_tarball_name() self.patch(uec2roottar, 'check_call') self.patch(uec2roottar, 'tar_supports_xattr_opts').return_value = False uec2roottar.extract_image(image, output) self.assertThat(uec2roottar.check_call.mock_calls, HasLength(3)) [mount_call, tar_call, umount_call] = uec2roottar.check_call.mock_calls self.assertEqual('mount', self.extract_command_line(mount_call)[0]) tar_command = self.extract_command_line(tar_call) self.assertEqual(['tar', '-C'], tar_command[:2]) self.assertEqual('umount', self.extract_command_line(umount_call)[0]) def test__extracts_image_if_tar_doesnt_supports_xattr(self): image = make_image_name() output = make_tarball_name() self.patch(uec2roottar, 'check_call') self.patch(uec2roottar, 'tar_supports_xattr_opts').return_value = True uec2roottar.extract_image(image, output) self.assertThat(uec2roottar.check_call.mock_calls, HasLength(3)) [mount_call, tar_call, umount_call] = uec2roottar.check_call.mock_calls self.assertEqual('mount', self.extract_command_line(mount_call)[0]) tar_command = self.extract_command_line(tar_call) self.assertEqual( ['tar', '--xattrs', '--xattrs-include=*', '-C'], tar_command[:4]) self.assertEqual('umount', self.extract_command_line(umount_call)[0]) class TestSetOwnership(MAASTestCase): """Tests for `set_ownership`.""" def test__does_nothing_if_no_user_specified(self): self.patch(uec2roottar, 'check_call') uec2roottar.set_ownership(make_tarball_name(), user=None) self.assertThat(uec2roottar.check_call, MockNotCalled()) def test__calls_chown_if_user_specified(self): self.patch(uec2roottar, 'check_call') user = factory.make_name('user') tarball = make_tarball_name() uec2roottar.set_ownership(tarball, user=user) self.assertThat( uec2roottar.check_call, MockCalledOnceWith(['/bin/chown', user, tarball])) class TestUEC2RootTar(MAASTestCase): """Integration tests for `uec2roottar`.""" def make_args(self, **kwargs): """Fake an `argparser` arguments object.""" args = mock.Mock() for key, value in kwargs.items(): setattr(args, key, value) return args def test__integrates(self): image_name = factory.make_name('root-image') + '.img' image = self.make_file(name=image_name) output_name = factory.make_name('root-tar') + '.tar.gz' output = os.path.join(self.make_dir(), output_name) args = self.make_args(image=image, output=output) self.patch(uec2roottar, 'check_call') self.patch(uec2roottar, 'check_output') patch_is_filesystem_file(self, True) uec2roottar.main(args) self.assertThat( uec2roottar.is_filesystem_file, MockCalledOnceWith(image)) maas-1.9.5+bzr4599.orig/src/provisioningserver/logger/__init__.py0000644000000000000000000000315213056115004023010 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """__init__ for the provisioningserver.logger package.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "basicConfig", "DEFAULT_LOG_FORMAT", "DEFAULT_LOG_FORMAT_DATE", "DEFAULT_LOG_LEVEL", "get_maas_logger" ] import logging import sys from provisioningserver.logger.log import get_maas_logger from twisted.python import log # This format roughly matches Twisted's default, so that combined Twisted and # Django logs are consistent with one another. DEFAULT_LOG_FORMAT = "%(asctime)s [%(name)s] %(levelname)s: %(message)s" DEFAULT_LOG_FORMAT_DATE = "%Y-%m-%d %H:%M:%S" DEFAULT_LOG_LEVEL = logging.INFO def basicConfig(): """Configure basic logging for both Twisted and Python. This is useful during start-up, to get something going. Note that nothing is done to address time-zones. Both Twisted and Python's ``logging`` using local-time by default. """ # Globally override Twisted's log date format. It's tricky to get to the # FileLogObserver that twistd installs so that we can modify its config # alone, but we actually do want to make a global change anyway. log.FileLogObserver.timeFormat = DEFAULT_LOG_FORMAT_DATE # Get basic Python logging working with options consistent with Twisted. logging.basicConfig( stream=sys.stdout, level=DEFAULT_LOG_LEVEL, format=DEFAULT_LOG_FORMAT, datefmt=DEFAULT_LOG_FORMAT_DATE) maas-1.9.5+bzr4599.orig/src/provisioningserver/logger/log.py0000644000000000000000000000441113056115004022031 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Logging for MAAS, redirects to syslog.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "get_maas_logger", ] import logging from logging.handlers import SysLogHandler class MAASLogger(logging.getLoggerClass()): """A Logger class that doesn't allow you to call exception().""" def exception(self, *args, **kwargs): raise NotImplementedError( "Don't log exceptions to maaslog; use the default " "Django logger instead") def get_maas_logger(syslog_tag=None): """Return a MAAS logger that will log to syslog. :param syslog_tag: A string that will be used to prefix the message in syslog. Will be appended to "maas" in the form "maas.". If None, the syslog tag will simply be "maas". syslog_tag is also used to name the logger with the Python logging module; loggers will be named "maas." unless syslog_tag is None. """ if syslog_tag is None: logger_name = "maas" else: logger_name = "maas.%s" % syslog_tag maaslog = logging.getLogger(logger_name) # This line is pure filth, but it allows us to return MAASLoggers # for any logger constructed by this function, whilst leaving all # other loggers to be the domain of the logging package. maaslog.__class__ = MAASLogger return maaslog def configure_root_logger(): # Configure the "root" handler. This is the only place where we need to # add the syslog handler and configure levels and formatting; sub-handlers # propagate up to this handler. root = get_maas_logger() if len(root.handlers) == 0: # It has not yet been configured. handler = SysLogHandler( "/dev/log", facility=SysLogHandler.LOG_DAEMON) handler.setFormatter(logging.Formatter( "%(name)s: [%(levelname)s] %(message)s")) root.addHandler(handler) root.setLevel(logging.INFO) # Don't propagate logs up to the root logger. root.propagate = 0 return root configure_root_logger() maas-1.9.5+bzr4599.orig/src/provisioningserver/logger/tests/0000755000000000000000000000000013056115004022040 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/logger/tests/__init__.py0000644000000000000000000000000013056115004024137 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/logger/tests/test_logger.py0000644000000000000000000001053713056115004024736 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for log.py""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from itertools import imap import logging import logging.handlers from maastesting.factory import factory from provisioningserver.logger import log from provisioningserver.logger.log import ( get_maas_logger, MAASLogger, ) from provisioningserver.testing.testcase import PservTestCase from testtools.matchers import ( HasLength, IsInstance, ) class TestGetMAASLogger(PservTestCase): def test_root_logger_logs_to_syslog(self): root_logger = get_maas_logger() self.assertThat(root_logger.handlers, HasLength(1)) [handler] = root_logger.handlers self.assertThat(handler, IsInstance(logging.handlers.SysLogHandler)) def test_root_logger_defaults_to_info(self): root_logger = get_maas_logger() self.assertEqual(logging.INFO, root_logger.level) def test_does_not_log_twice(self): maas_logger = get_maas_logger() maas_foo_logger = get_maas_logger("foo") all_handlers = [] # In previous versions of get_maas_logger(), the all_handlers list # would end up containing two handlers, because a new SysLogHandler # was added to each logger. This means that logging to the "maas.foo" # logger would emit a message to syslog via its handler, then the log # record would be propagated up to the "maas" logger (which we're # calling the root logger in this context) where its handler would # then emit another message to syslog. all_handlers.extend(maas_logger.handlers) all_handlers.extend(maas_foo_logger.handlers) self.expectThat(all_handlers, HasLength(1)) # Intercept calls to `emit` on each handler above. log_records = [] for handler in all_handlers: self.patch(handler, "emit", log_records.append) maas_foo_logger.info("A message from the Mekon") self.assertThat(log_records, HasLength(1)) def test_sets_custom_formatting(self): logger = get_maas_logger("foo.bar") [handler] = get_maas_logger().handlers log_records = [] self.patch(handler, "emit", log_records.append) robot_name = factory.make_name("Robot") logger.info("Hello there %s!", robot_name) self.assertEqual( "maas.foo.bar: [INFO] Hello there %s!" % robot_name, "\n---\n".join(imap(handler.format, log_records))) def test_sets_logger_name(self): self.patch(log, 'SysLogHandler') self.patch(logging, 'Formatter') name = factory.make_string() maaslog = get_maas_logger(name) self.assertEqual("maas.%s" % name, maaslog.name) def test_returns_same_logger_if_called_twice(self): self.patch(log, 'SysLogHandler') self.patch(logging, 'Formatter') name = factory.make_string() maaslog = get_maas_logger(name) maaslog_2 = get_maas_logger(name) self.assertIs(maaslog, maaslog_2) def test_exception_calls_disallowed(self): self.patch(log, 'SysLogHandler') self.patch(logging, 'Formatter') name = factory.make_string() maaslog = get_maas_logger(name) self.assertRaises( NotImplementedError, maaslog.exception, factory.make_string()) def test_returns_MAASLogger_instances(self): self.patch(log, 'SysLogHandler') self.patch(logging, 'Formatter') name = factory.make_string() maaslog = get_maas_logger(name) self.assertIsInstance(maaslog, MAASLogger) def test_doesnt_affect_general_logger_class(self): self.patch(log, 'SysLogHandler') self.patch(logging, 'Formatter') name = factory.make_string() get_maas_logger(name) self.assertIsNot( MAASLogger, logging.getLoggerClass()) def test_general_logger_class_accepts_exceptions(self): self.patch(log, 'SysLogHandler') self.patch(logging, 'Formatter') name = factory.make_string() get_maas_logger(name) other_logger = logging.getLogger() self.assertIsNone(other_logger.exception(factory.make_string())) maas-1.9.5+bzr4599.orig/src/provisioningserver/power/__init__.py0000644000000000000000000000325713056115004022673 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Power control.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "is_driver_available", "power_action_registry", "power_state_update", "QUERY_POWER_TYPES", ] from provisioningserver.rpc import getRegionClient from provisioningserver.rpc.region import UpdateNodePowerState from provisioningserver.utils.twisted import asynchronous # List of power_types that support querying the power state. # change_power_state() will only retry changing the power # state for these power types. # This is meant to be temporary until all the power types support # querying the power state of a node. QUERY_POWER_TYPES = [ 'amt', 'hmc', 'ipmi', 'moonshot', 'mscm', 'msftocs', 'sm15k', 'ucsm', 'virsh', 'vmware', ] # We could use a Registry here, but it seems kind of like overkill. power_action_registry = {} def is_driver_available(power_type): """Is there a Python-based driver available for the given power type?""" from provisioningserver.drivers import power # Circular import. return power.PowerDriverRegistry.get_item(power_type) is not None @asynchronous def power_state_update(system_id, state): """Report to the region about a node's power state. :param system_id: The system ID for the node. :param state: Typically "on", "off", or "error". """ client = getRegionClient() return client( UpdateNodePowerState, system_id=system_id, power_state=state) maas-1.9.5+bzr4599.orig/src/provisioningserver/power/change.py0000644000000000000000000002654613056115004022367 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """RPC helpers relating to power control.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "maybe_change_power_state", ] from datetime import timedelta from provisioningserver import power from provisioningserver.drivers.power import ( DEFAULT_WAITING_POLICY, get_error_message, power_drivers_by_name, PowerDriverRegistry, ) from provisioningserver.events import ( EVENT_TYPES, send_event_node, ) from provisioningserver.logger.log import get_maas_logger from provisioningserver.power import ( poweraction, query, ) from provisioningserver.rpc import getRegionClient from provisioningserver.rpc.exceptions import PowerActionAlreadyInProgress from provisioningserver.rpc.region import MarkNodeFailed from provisioningserver.utils.twisted import ( asynchronous, callOut, deferred, deferWithTimeout, pause, synchronous, ) from twisted.internet import reactor from twisted.internet.defer import ( CancelledError, inlineCallbacks, ) from twisted.internet.task import deferLater from twisted.internet.threads import deferToThread from twisted.python import log maaslog = get_maas_logger("power") # Timeout for change_power_state(). We set it to 5 minutes by default, # but it would be lovely if this was configurable. This is only a backstop # meant to cope with broken BMCs. CHANGE_POWER_STATE_TIMEOUT = timedelta(minutes=5).total_seconds() @asynchronous(timeout=15) @inlineCallbacks def power_change_failure(system_id, hostname, power_change, message): """Report a node that for which power control has failed.""" assert power_change in ['on', 'off'], ( "Unknown power change: %s" % power_change) maaslog.error( "Error changing power state (%s) of node: %s (%s)", power_change, hostname, system_id) client = getRegionClient() yield client( MarkNodeFailed, system_id=system_id, error_description=message, ) if power_change == 'on': event_type = EVENT_TYPES.NODE_POWER_ON_FAILED elif power_change == 'off': event_type = EVENT_TYPES.NODE_POWER_OFF_FAILED yield send_event_node(event_type, system_id, hostname, message) @synchronous def perform_power_change( system_id, hostname, power_type, power_change, context): """Issue the given `power_change` command. On failure the node will be marked as broken and the error will be re-raised to the caller. :deprecated: This relates to template-based power control. """ action = poweraction.PowerAction(power_type) try: return action.execute(power_change=power_change, **context) except poweraction.PowerActionFail as error: message = "Node could not be powered %s: %s" % (power_change, error) power_change_failure(system_id, hostname, power_change, message) raise @asynchronous def perform_power_driver_change( system_id, hostname, power_type, power_change, context): """Execute power driver `power_change` method. On failure the node will be marked as broken and the error will be re-raised to the caller. """ power_driver = PowerDriverRegistry.get_item(power_type) if power_change == 'on': d = power_driver.on(system_id, context) elif power_change == 'off': d = power_driver.off(system_id, context) def power_change_failed(failure): message = "Node could not be powered %s: %s" % ( power_change, get_error_message(failure.value)) df = power_change_failure(system_id, hostname, power_change, message) df.addCallback(lambda _: failure) # Propagate the original error. return df return d.addErrback(power_change_failed) @asynchronous @inlineCallbacks def power_change_success(system_id, hostname, power_change): """Report about a successful node power state change. This updates the region's record of the node's power state, logs to the MAAS log, and appends to the node's event log. :param system_id: The system ID for the node. :param hostname: The node's hostname, used in messages. :param power_change: "on" or "off". """ assert power_change in ['on', 'off'], ( "Unknown power change: %s" % power_change) yield power.power_state_update(system_id, power_change) maaslog.info( "Changed power state (%s) of node: %s (%s)", power_change, hostname, system_id) # Emit success event. if power_change == 'on': event_type = EVENT_TYPES.NODE_POWERED_ON elif power_change == 'off': event_type = EVENT_TYPES.NODE_POWERED_OFF yield send_event_node(event_type, system_id, hostname) @asynchronous @inlineCallbacks def power_change_starting(system_id, hostname, power_change): """Report about a node power state change starting. This logs to the MAAS log, and appends to the node's event log. :param system_id: The system ID for the node. :param hostname: The node's hostname, used in messages. :param power_change: "on" or "off". """ assert power_change in ['on', 'off'], ( "Unknown power change: %s" % power_change) maaslog.info( "Changing power state (%s) of node: %s (%s)", power_change, hostname, system_id) # Emit starting event. if power_change == 'on': event_type = EVENT_TYPES.NODE_POWER_ON_STARTING elif power_change == 'off': event_type = EVENT_TYPES.NODE_POWER_OFF_STARTING yield send_event_node(event_type, system_id, hostname) @asynchronous @deferred # Always return a Deferred. def maybe_change_power_state( system_id, hostname, power_type, power_change, context, clock=reactor): """Attempt to change the power state of a node. If there is no power action already in progress, register this action and then pass change_power_state() to the reactor to call later and then return. This function exists to guarantee that PowerActionAlreadyInProgress errors will be raised promptly, before any work is done to power the node on. :raises: PowerActionAlreadyInProgress if there's already a power action in progress for this node. """ assert power_change in ('on', 'off'), ( "Unknown power change: %s" % power_change) power_driver = power_drivers_by_name.get(power_type) if power_driver is None: raise poweraction.PowerActionFail( "Unknown power_type '%s'" % power_type) missing_packages = power_driver.detect_missing_packages() if len(missing_packages): raise poweraction.PowerActionFail( "'%s' package(s) are not installed" % " ".join( missing_packages)) # There should be one and only one power change for each system ID. if system_id in power.power_action_registry: current_power_change, d = power.power_action_registry[system_id] else: current_power_change, d = None, None if current_power_change is None: # Arrange for the power change to happen later; do not make the caller # wait, because it might take a long time. We set a timeout so that if # the power action doesn't return in a timely fashion (or fails # silently or some such) it doesn't block other actions on the node. d = deferLater( clock, 0, deferWithTimeout, CHANGE_POWER_STATE_TIMEOUT, change_power_state, system_id, hostname, power_type, power_change, context, clock) power.power_action_registry[system_id] = power_change, d # Whether we succeed or fail, we need to remove the action from the # registry of actions, otherwise subsequent actions will fail. d.addBoth(callOut, power.power_action_registry.pop, system_id, None) # Log cancellations distinctly from other errors. def eb_cancelled(failure): failure.trap(CancelledError) log.msg( "%s: Power could not be turned %s; timed out." % (hostname, power_change)) return power_change_failure( system_id, hostname, power_change, "Timed out") d.addErrback(eb_cancelled) # Catch-all log. d.addErrback( log.err, "%s: Power could not be turned %s." % ( hostname, power_change)) elif current_power_change == power_change: # What we want is already happening; let it continue. pass else: # Right now we reject conflicting power changes. However, we have the # Deferred (in `d`) along which the current power change is occurring, # so the option to cancel is available if we want it. raise PowerActionAlreadyInProgress( "Unable to change power state to '%s' for node %s: another " "action is already in progress for that node." % (power_change, hostname)) @asynchronous @inlineCallbacks def change_power_state( system_id, hostname, power_type, power_change, context, clock=reactor): """Change the power state of a node. This monitors the result of the power change by querying the power state of the node, thus attempting to ensure that the requested change has taken place. Success is reported using `power_change_success`. Power-related failures are reported using `power_change_failure`. Other failures must be reported by the caller. """ yield power_change_starting(system_id, hostname, power_change) # Use increasing waiting times to work around race conditions # that could arise when power-cycling the node. for waiting_time in DEFAULT_WAITING_POLICY: if power.is_driver_available(power_type): # There's a Python-based driver for this power type. yield perform_power_driver_change( system_id, hostname, power_type, power_change, context) else: # This power type is still template-based. yield deferToThread( perform_power_change, system_id, hostname, power_type, power_change, context) # Return now if we can't query the power state. if power_type not in power.QUERY_POWER_TYPES: return # Wait to let the node some time to change its power state. yield pause(waiting_time, clock) # Check current power state. if power.is_driver_available(power_type): new_power_state = yield query.perform_power_driver_query( system_id, hostname, power_type, context) else: new_power_state = yield deferToThread( perform_power_change, system_id, hostname, power_type, 'query', context) if new_power_state == "unknown" or new_power_state == power_change: yield power_change_success(system_id, hostname, power_change) return # Retry logic is handled by power driver # Once all power types have had templates converted to power drivers # this method will need to be re-factored. if power.is_driver_available(power_type): return # Failure: the power state of the node hasn't changed: mark it as # broken. message = "Timeout after %s tries" % len(DEFAULT_WAITING_POLICY) yield power_change_failure(system_id, hostname, power_change, message) maas-1.9.5+bzr4599.orig/src/provisioningserver/power/poweraction.py0000644000000000000000000001123113056115004023455 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Actions for power-related operations. :deprecated: This relates to template-based power control. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "PowerAction", "PowerActionFail", "UnknownPowerType", ] import os import subprocess from provisioningserver.utils import ( escape_py_literal, locate_config, ShellTemplate, ) from provisioningserver.utils.network import find_ip_via_arp class UnknownPowerType(Exception): """Raised when trying to process an unknown power type.""" class PowerActionFail(Exception): """Raised when there's a problem executing a power script.""" @classmethod def from_action(cls, power_action, err): message = "%s failed" % power_action.power_type is_process_error = isinstance(err, subprocess.CalledProcessError) # If the failure is a CalledProcessError, be careful not to call # its __str__ as this will include the actual template text # (which is the 'command' that was run). if is_process_error: message += " with return code %s" % err.returncode if err.output: message += ":\n" + ( err.output.decode("utf-8", "replace").strip()) else: message += ":\n%s" % err return cls(message) class PowerAction: """Actions for power-related operations. :param power_type: A power-type name, e.g. `ipmi`. The class is intended to be used in two phases: 1. Instantiation, passing the power_type. 2. .execute(), passing any parameters required by the template. """ def __init__(self, power_type): self.path = os.path.join( self.get_template_basedir(), power_type + ".template") if not os.path.exists(self.path): raise UnknownPowerType(power_type) self.power_type = power_type def get_template_basedir(self): """Directory where power templates are stored.""" return locate_config('templates/power') def get_config_basedir(self): """Directory where power config are stored.""" # By default, power config lives in the same directory as power # templates. This makes it easy to customize them together. return locate_config('templates/power') def get_template(self): with open(self.path, "rb") as f: return ShellTemplate(f.read(), name=self.path) def update_context(self, context): """Add and manipulate `context` as necessary.""" context['config_dir'] = self.get_config_basedir() context['escape_py_literal'] = escape_py_literal if 'mac_address' in context: mac_address = context['mac_address'] ip_address = find_ip_via_arp(mac_address) context['ip_address'] = ip_address else: context.setdefault('ip_address', None) return context def render_template(self, template, context): try: return template.substitute(context) except NameError as error: raise PowerActionFail.from_action(self, error) def run_shell(self, commands): """Execute raw shell script (as rendered from a template). :param commands: String containing shell script. :return: Standard output and standard error returned by the execution of the shell script. :raises: :class:`PowerActionFail` """ # This might need retrying but it could be better to leave that # to the individual scripts. shell = ("/bin/sh",) process = subprocess.Popen( shell, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True) output, _ = process.communicate(commands) if process.wait() == 0: return output.strip() else: raise PowerActionFail.from_action( self, subprocess.CalledProcessError( process.returncode, shell, output)) def execute(self, **context): """Execute the power template. :return: Standard output and standard error returned by the execution of the template. Any supplied parameters will be passed to the template as substitution values. """ template = self.get_template() context = self.update_context(context) rendered = self.render_template( template=template, context=context) return self.run_shell(rendered) maas-1.9.5+bzr4599.orig/src/provisioningserver/power/query.py0000644000000000000000000002063713056115004022302 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """RPC helpers relating to power control.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "get_power_state", "query_all_nodes", ] from functools import partial import sys from provisioningserver import power from provisioningserver.drivers.power import ( DEFAULT_WAITING_POLICY, power_drivers_by_name, PowerDriverRegistry, ) from provisioningserver.events import ( EVENT_TYPES, send_event_node, ) from provisioningserver.logger.log import get_maas_logger from provisioningserver.power import poweraction from provisioningserver.rpc.exceptions import NoSuchNode from provisioningserver.utils.twisted import ( asynchronous, pause, synchronous, ) from twisted.internet import reactor from twisted.internet.defer import ( DeferredList, DeferredSemaphore, inlineCallbacks, returnValue, succeed, ) from twisted.internet.threads import deferToThread from twisted.python import log maaslog = get_maas_logger("power") @synchronous def perform_power_query(system_id, hostname, power_type, context): """Query the node's power state. No exception handling is performed here. This allows `get_power_state` to perform multiple queries and only log the final error. :param power_type: This must refer to one of the template-based power drivers, and *not* to a Python-based one. :deprecated: This relates to template-based power control. """ action = poweraction.PowerAction(power_type) # `power_change` is a misnomer here. return action.execute(power_change='query', **context) @asynchronous def perform_power_driver_query(system_id, hostname, power_type, context): """Query the node's power state. No exception handling is performed here. This allows `get_power_state` to perform multiple queries and only log the final error. :param power_type: This must refer to one of the Python-based power drivers, and *not* to a template-based one. """ # Get power driver for given power type power_driver = PowerDriverRegistry[power_type] return power_driver.query(system_id, context) @asynchronous @inlineCallbacks def get_power_state(system_id, hostname, power_type, context, clock=reactor): """Return the power state of the given node. :return: The string "on" or "off". :raises PowerActionFail: When `power_type` is not queryable, or when there's a failure when querying the node's power state. """ if power_type not in power.QUERY_POWER_TYPES: # query_all_nodes() won't call this with an un-queryable power # type, however this is left here to prevent PEBKAC. raise poweraction.PowerActionFail( "Unknown power_type '%s'" % power_type) def check_power_state(state): if state not in ("on", "off", "unknown"): # This is considered an error. raise poweraction.PowerActionFail(state) # Capture errors as we go along. exc_info = None, None, None power_driver = power_drivers_by_name.get(power_type) if power_driver is None: raise poweraction.PowerActionFail( "Unknown power_type '%s'" % power_type) missing_packages = power_driver.detect_missing_packages() if len(missing_packages): raise poweraction.PowerActionFail( "'%s' package(s) are not installed" % ", ".join( missing_packages)) if power.is_driver_available(power_type): # New-style power drivers handle retries for themselves, so we only # ever call them once. try: power_state = yield perform_power_driver_query( system_id, hostname, power_type, context) check_power_state(power_state) except: # Hold the error; it will be reported later. exc_info = sys.exc_info() else: returnValue(power_state) else: # Old-style power drivers need to be retried. Use increasing waiting # times to work around race conditions that could arise when power # querying the node. for waiting_time in DEFAULT_WAITING_POLICY: # Perform power query. try: power_state = yield deferToThread( perform_power_query, system_id, hostname, power_type, context) check_power_state(power_state) except: # Hold the error; it may be reported later. exc_info = sys.exc_info() # Wait before trying again. yield pause(waiting_time, clock) else: returnValue(power_state) # Reaching here means that things have gone wrong. assert exc_info != (None, None, None) exc_type, exc_value, exc_trace = exc_info raise exc_type, exc_value, exc_trace @inlineCallbacks def power_query_success(system_id, hostname, state): """Report a node that for which power querying has succeeded.""" yield power.power_state_update(system_id, state) @inlineCallbacks def power_query_failure(system_id, hostname, failure): """Report a node that for which power querying has failed.""" message = "Power state could not be queried: %s" message %= failure.getErrorMessage() maaslog.error(message) yield power.power_state_update(system_id, 'error') yield send_event_node( EVENT_TYPES.NODE_POWER_QUERY_FAILED, system_id, hostname, message) @asynchronous def report_power_state(d, system_id, hostname): """Report the result of a power query. :param d: A `Deferred` that will fire with the node's updated power state, or an error condition. The callback/errback values are passed through unaltered. See `get_power_state` for details. """ def cb(state): d = power_query_success(system_id, hostname, state) d.addCallback(lambda _: state) return d def eb(failure): d = power_query_failure(system_id, hostname, failure) d.addCallback(lambda _: failure) return d return d.addCallbacks(cb, eb) def maaslog_report_success(node, power_state): """Log change in power state for node.""" if node['power_state'] != power_state: maaslog.info( "%s: Power state has changed from %s to %s.", node['hostname'], node['power_state'], power_state) return power_state def maaslog_report_failure(node, failure): """Log failure to query node.""" if failure.check(poweraction.PowerActionFail): maaslog.error( "%s: Could not query power state: %s.", node['hostname'], failure.getErrorMessage()) elif failure.check(NoSuchNode): maaslog.debug( "%s: Could not update power state: " "no such node.", node['hostname']) else: maaslog.error( "%s: Failed to refresh power state: %s", node['hostname'], failure.getErrorMessage()) # Also write out a full traceback to the server log. log.err(failure, "Failed to refresh power state.") def query_node(node, clock): """Calls `get_power_state` on the given node. Logs to maaslog as errors and power states change. """ if node['system_id'] in power.power_action_registry: maaslog.debug( "%s: Skipping query power status, " "power action already in progress.", node['hostname']) return succeed(None) else: d = get_power_state( node['system_id'], node['hostname'], node['power_type'], node['context'], clock=clock) d = report_power_state(d, node['system_id'], node['hostname']) d.addCallbacks( partial(maaslog_report_success, node), partial(maaslog_report_failure, node)) return d def query_all_nodes(nodes, max_concurrency=5, clock=reactor): """Queries the given nodes for their power state. Nodes' states are reported back to the region. :return: A deferred, which fires once all nodes have been queried, successfully or not. """ semaphore = DeferredSemaphore(tokens=max_concurrency) queries = ( semaphore.run(query_node, node, clock) for node in nodes if node['power_type'] in power.QUERY_POWER_TYPES) return DeferredList(queries, consumeErrors=True) maas-1.9.5+bzr4599.orig/src/provisioningserver/power/schema.py0000644000000000000000000002452513056115004022375 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Define json schema for power parameters.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "JSON_POWER_TYPE_PARAMETERS", "JSON_POWER_TYPE_SCHEMA", "POWER_TYPE_PARAMETER_FIELD_SCHEMA", ] from jsonschema import validate # We specifically declare this here so that a node not knowing its own # powertype won't fail to enlist. However, we don't want it in the list # of power types since setting a node's power type to "I don't know" # from another type doens't make any sense. UNKNOWN_POWER_TYPE = '' class IPMI_DRIVER: DEFAULT = '' LAN = 'LAN' LAN_2_0 = 'LAN_2_0' IPMI_DRIVER_CHOICES = [ [IPMI_DRIVER.LAN, "LAN [IPMI 1.5]"], [IPMI_DRIVER.LAN_2_0, "LAN_2_0 [IPMI 2.0]"], ] # Represent the Django choices format as JSON; an array of 2-item # arrays. CHOICE_FIELD_SCHEMA = { 'type': 'array', 'items': { 'title': "Power type parameter field choice", 'type': 'array', 'minItems': 2, 'maxItems': 2, 'uniqueItems': True, 'items': { 'type': 'string', } }, } POWER_TYPE_PARAMETER_FIELD_SCHEMA = { 'title': "Power type parameter field", 'type': 'object', 'properties': { 'name': { 'type': 'string', }, 'field_type': { 'type': 'string', }, 'label': { 'type': 'string', }, 'required': { 'type': 'boolean', }, 'choices': CHOICE_FIELD_SCHEMA, 'default': { 'type': 'string', }, }, 'required': ['field_type', 'label', 'required'], } # A basic JSON schema for what power type parameters should look like. JSON_POWER_TYPE_SCHEMA = { 'title': "Power parameters set", 'type': 'array', 'items': { 'title': "Power type parameters", 'type': 'object', 'properties': { 'name': { 'type': 'string', }, 'description': { 'type': 'string', }, 'missing_packages': { 'type': 'array', 'items': { 'type': 'string', }, }, 'fields': { 'type': 'array', 'items': POWER_TYPE_PARAMETER_FIELD_SCHEMA, }, }, 'required': ['name', 'description', 'fields'], }, } # Power control choices for sm15k power type SM15K_POWER_CONTROL_CHOICES = [ ["ipmi", "IPMI"], ["restapi", "REST API v0.9"], ["restapi2", "REST API v2.0"], ] def make_json_field( name, label, field_type=None, choices=None, default=None, required=False): """Helper function for building a JSON power type parameters field. :param name: The name of the field. :type name: string :param label: The label to be presented to the user for this field. :type label: string :param field_type: The type of field to create. Can be one of (string, choice, mac_address, password). Defaults to string. :type field_type: string. :param choices: The collection of choices to present to the user. Needs to be structured as a list of lists, otherwise make_json_field() will raise a ValidationError. :type list: :param default: The default value for the field. :type default: string :param required: Whether or not a value for the field is required. :type required: boolean """ if field_type not in ('string', 'mac_address', 'choice', 'password'): field_type = 'string' if choices is None: choices = [] validate(choices, CHOICE_FIELD_SCHEMA) if default is None: default = "" field = { 'name': name, 'label': label, 'required': required, 'field_type': field_type, 'choices': choices, 'default': default, } return field JSON_POWER_TYPE_PARAMETERS = [ { 'name': 'ether_wake', 'description': 'Wake-on-LAN', 'fields': [ make_json_field( 'mac_address', "MAC Address", field_type='mac_address'), ], }, { 'name': 'virsh', 'description': 'Virsh (virtual systems)', 'fields': [ make_json_field('power_address', "Power address"), make_json_field('power_id', "Power ID"), make_json_field( 'power_pass', "Power password (optional)", required=False, field_type='password'), ], }, { 'name': 'vmware', 'description': 'VMWare', 'fields': [ make_json_field( 'power_vm_name', "VM Name (if UUID unknown)", required=False), make_json_field( 'power_uuid', "VM UUID (if known)", required=False), make_json_field('power_address', "VMware hostname"), make_json_field('power_user', "VMware username"), make_json_field( 'power_pass', "VMware password", field_type='password'), make_json_field( 'power_port', "VMware API port (optional)", required=False), make_json_field( 'power_protocol', "VMware API protocol (optional)", required=False), ], }, { 'name': 'fence_cdu', 'description': 'Sentry Switch CDU', 'fields': [ make_json_field('power_address', "Power address"), make_json_field('power_id', "Power ID"), make_json_field('power_user', "Power user"), make_json_field( 'power_pass', "Power password", field_type='password'), ], }, { 'name': 'ipmi', 'description': 'IPMI', 'fields': [ make_json_field( 'power_driver', "Power driver", field_type='choice', choices=IPMI_DRIVER_CHOICES, default=IPMI_DRIVER.LAN_2_0), make_json_field('power_address', "IP address"), make_json_field('power_user', "Power user"), make_json_field( 'power_pass', "Power password", field_type='password'), make_json_field('mac_address', "Power MAC") ], }, { 'name': 'moonshot', 'description': 'HP Moonshot - iLO4 (IPMI)', 'fields': [ make_json_field('power_address', "Power address"), make_json_field('power_user', "Power user"), make_json_field( 'power_pass', "Power password", field_type='password'), make_json_field('power_hwaddress', "Power hardware address"), ], }, { 'name': 'sm15k', 'description': 'SeaMicro 15000', 'fields': [ make_json_field('system_id', "System ID"), make_json_field('power_address', "Power address"), make_json_field('power_user', "Power user"), make_json_field( 'power_pass', "Power password", field_type='password'), make_json_field( 'power_control', "Power control type", field_type='choice', choices=SM15K_POWER_CONTROL_CHOICES, default='ipmi'), ], }, { 'name': 'amt', 'description': 'Intel AMT', 'fields': [ make_json_field( 'mac_address', "MAC Address", field_type='mac_address'), make_json_field( 'power_pass', "Power password", field_type='password'), make_json_field('power_address', "Power address") ], }, { 'name': 'dli', 'description': 'Digital Loggers, Inc. PDU', 'fields': [ make_json_field('outlet_id', "Outlet ID"), make_json_field('power_address', "Power address"), make_json_field('power_user', "Power user"), make_json_field( 'power_pass', "Power password", field_type='password'), ], }, { 'name': 'ucsm', 'description': "Cisco UCS Manager", 'fields': [ make_json_field('uuid', "Server UUID"), make_json_field('power_address', "URL for XML API"), make_json_field('power_user', "API user"), make_json_field( 'power_pass', "API password", field_type='password'), ], }, { 'name': 'mscm', 'description': "HP Moonshot - iLO Chassis Manager", 'fields': [ make_json_field('power_address', "IP for MSCM CLI API"), make_json_field('power_user', "MSCM CLI API user"), make_json_field( 'power_pass', "MSCM CLI API password", field_type='password'), make_json_field( 'node_id', "Node ID - Must adhere to cXnY format " "(X=cartridge number, Y=node number)."), ], }, { 'name': 'msftocs', 'description': "Microsoft OCS - Chassis Manager", 'fields': [ make_json_field('power_address', "Power address"), make_json_field('power_port', "Power port"), make_json_field('power_user', "Power user"), make_json_field( 'power_pass', "Power password", field_type='password'), make_json_field('blade_id', "Blade ID (Typically 1-24)"), ], }, { 'name': 'apc', 'description': "American Power Conversion (APC) PDU", 'fields': [ make_json_field('power_address', "IP for APC PDU"), make_json_field( 'node_outlet', "APC PDU node outlet number (1-16)"), make_json_field( 'power_on_delay', "Power ON outlet delay (seconds)", default='5'), ], }, { 'name': 'hmc', 'description': "IBM Hardware Management Console (HMC)", 'fields': [ make_json_field('power_address', "IP for HMC"), make_json_field('power_user', "HMC username"), make_json_field( 'power_pass', "HMC password", field_type='password'), make_json_field( 'server_name', "HMC Managed System server name"), make_json_field( 'lpar', "HMC logical partition"), ], }, ] maas-1.9.5+bzr4599.orig/src/provisioningserver/power/tests/0000755000000000000000000000000013056115004021715 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/power/tests/__init__.py0000644000000000000000000000000013056115004024014 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/power/tests/test_change.py0000644000000000000000000007654613056115004024575 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for :py:module:`~provisioningserver.power.change`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random from maastesting.factory import factory from maastesting.matchers import ( MockCalledOnceWith, MockCallsMatch, MockNotCalled, ) from maastesting.testcase import ( MAASTestCase, MAASTwistedRunTest, ) from maastesting.twisted import ( always_succeed_with, TwistedLoggerFixture, ) from mock import ( ANY, call, DEFAULT, Mock, sentinel, ) from provisioningserver import power from provisioningserver.drivers.power import ( DEFAULT_WAITING_POLICY, get_error_message as get_driver_error_message, power_drivers_by_name, PowerDriverRegistry, PowerError, ) from provisioningserver.events import EVENT_TYPES from provisioningserver.power import poweraction from provisioningserver.rpc import ( exceptions, region, ) from provisioningserver.rpc.testing import ( MockClusterToRegionRPCFixture, MockLiveClusterToRegionRPCFixture, ) from provisioningserver.testing.events import EventTypesAllRegistered from testtools import ExpectedException from testtools.deferredruntest import extract_result from testtools.matchers import ( Equals, IsInstance, ) from twisted.internet import reactor from twisted.internet.defer import ( Deferred, inlineCallbacks, returnValue, ) from twisted.internet.task import Clock def patch_PowerAction(test, return_value=DEFAULT, side_effect=None): """Patch the PowerAction object. Patch the PowerAction object so that PowerAction().execute is replaced by a Mock object created using the given `return_value` and `side_effect`. This can be used to simulate various successes or failures patterns while manipulating the power state of a node. Returns a tuple of mock objects: power.poweraction.PowerAction and power.poweraction.PowerAction().execute. """ power_action_obj = Mock() power_action_obj_execute = Mock( return_value=return_value, side_effect=side_effect) power_action_obj.execute = power_action_obj_execute power_action = test.patch(poweraction, 'PowerAction') power_action.return_value = power_action_obj return power_action, power_action_obj_execute def do_not_pause(test): test.patch_autospec(power.change, "pause", always_succeed_with(None)) test.patch_autospec(power.query, "pause", always_succeed_with(None)) class TestPowerHelpers(MAASTestCase): def setUp(self): super(TestPowerHelpers, self).setUp() self.useFixture(EventTypesAllRegistered()) def patch_rpc_methods(self): fixture = self.useFixture(MockClusterToRegionRPCFixture()) protocol, io = fixture.makeEventLoop( region.MarkNodeFailed, region.UpdateNodePowerState, region.SendEvent) return protocol, io def test_power_change_success_emits_event(self): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_change = 'on' protocol, io = self.patch_rpc_methods() d = power.change.power_change_success( system_id, hostname, power_change) io.flush() self.assertThat( protocol.UpdateNodePowerState, MockCalledOnceWith( ANY, system_id=system_id, power_state=power_change) ) self.assertThat( protocol.SendEvent, MockCalledOnceWith( ANY, type_name=EVENT_TYPES.NODE_POWERED_ON, system_id=system_id, description='') ) self.assertIsNone(extract_result(d)) def test_power_change_starting_emits_event(self): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_change = 'on' protocol, io = self.patch_rpc_methods() d = power.change.power_change_starting( system_id, hostname, power_change) io.flush() self.assertThat( protocol.SendEvent, MockCalledOnceWith( ANY, type_name=EVENT_TYPES.NODE_POWER_ON_STARTING, system_id=system_id, description='') ) self.assertIsNone(extract_result(d)) def test_power_change_failure_emits_event(self): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') message = factory.make_name('message') power_change = 'on' protocol, io = self.patch_rpc_methods() d = power.change.power_change_failure( system_id, hostname, power_change, message) io.flush() self.assertThat( protocol.SendEvent, MockCalledOnceWith( ANY, type_name=EVENT_TYPES.NODE_POWER_ON_FAILED, system_id=system_id, description=message) ) self.assertIsNone(extract_result(d)) class TestChangePowerState(MAASTestCase): run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def setUp(self): super(TestChangePowerState, self).setUp() self.useFixture(EventTypesAllRegistered()) do_not_pause(self) @inlineCallbacks def patch_rpc_methods(self, return_value={}, side_effect=None): fixture = self.useFixture(MockLiveClusterToRegionRPCFixture()) protocol, connecting = fixture.makeEventLoop( region.MarkNodeFailed, region.UpdateNodePowerState, region.SendEvent) protocol.MarkNodeFailed.return_value = return_value protocol.MarkNodeFailed.side_effect = side_effect self.addCleanup((yield connecting)) returnValue(protocol.MarkNodeFailed) def test_change_power_state_calls_power_change_starting_early_on(self): # The first, or one of the first, things that change_power_state() # does is write to the node event log via power_change_starting(). class ArbitraryException(Exception): """This allows us to return early from a function.""" # Raise this exception when power_change_starting() is called, to # return early from change_power_state(). This lets us avoid set-up # for parts of the function that we're presently not interested in. self.patch_autospec(power.change, "power_change_starting") power.change.power_change_starting.side_effect = ArbitraryException() d = power.change.change_power_state( sentinel.system_id, sentinel.hostname, sentinel.power_type, sentinel.power_change, sentinel.context) self.assertRaises(ArbitraryException, extract_result, d) self.assertThat( power.change.power_change_starting, MockCalledOnceWith( sentinel.system_id, sentinel.hostname, sentinel.power_change)) @inlineCallbacks def test_change_power_state_changes_power_state(self): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_type = random.choice(power.QUERY_POWER_TYPES) power_change = random.choice(['on', 'off']) context = { factory.make_name('context-key'): factory.make_name('context-val') } self.patch(power, 'is_driver_available').return_value = False power.power_action_registry[system_id] = power_change, sentinel.d # Patch the power action utility so that it says the node is # in the required power state. power_action, execute = patch_PowerAction( self, return_value=power_change) markNodeBroken = yield self.patch_rpc_methods() yield power.change.change_power_state( system_id, hostname, power_type, power_change, context) self.assertThat( execute, MockCallsMatch( # One call to change the power state. call(power_change=power_change, **context), # One call to query the power state. call(power_change='query', **context), ), ) # The node hasn't been marked broken. self.assertThat(markNodeBroken, MockNotCalled()) @inlineCallbacks def test_change_power_state_doesnt_retry_for_certain_power_types(self): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') # Use a power type that is not among power.QUERY_POWER_TYPES. power_type = factory.make_name('power_type') power_change = random.choice(['on', 'off']) context = { factory.make_name('context-key'): factory.make_name('context-val') } self.patch(power, 'is_driver_available').return_value = False power.power_action_registry[system_id] = power_change, sentinel.d power_action, execute = patch_PowerAction( self, return_value=random.choice(['on', 'off'])) markNodeBroken = yield self.patch_rpc_methods() yield power.change.change_power_state( system_id, hostname, power_type, power_change, context) self.assertThat( execute, MockCallsMatch( # Only one call to change the power state. call(power_change=power_change, **context), ), ) # The node hasn't been marked broken. self.assertThat(markNodeBroken, MockNotCalled()) @inlineCallbacks def test_change_power_state_retries_if_power_state_doesnt_change(self): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_type = random.choice(power.QUERY_POWER_TYPES) power_change = 'on' context = { factory.make_name('context-key'): factory.make_name('context-val') } self.patch(power, 'is_driver_available').return_value = False power.power_action_registry[system_id] = power_change, sentinel.d # Simulate a failure to power up the node, then a success. power_action, execute = patch_PowerAction( self, side_effect=[None, 'off', None, 'on']) markNodeBroken = yield self.patch_rpc_methods() yield power.change.change_power_state( system_id, hostname, power_type, power_change, context) self.assertThat( execute, MockCallsMatch( call(power_change=power_change, **context), call(power_change='query', **context), call(power_change=power_change, **context), call(power_change='query', **context), ) ) # The node hasn't been marked broken. self.assertThat(markNodeBroken, MockNotCalled()) @inlineCallbacks def test_change_power_state_doesnt_retry_if_query_returns_unknown(self): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_type = random.choice(power.QUERY_POWER_TYPES) power_change = random.choice(['on', 'off']) context = { factory.make_name('context-key'): factory.make_name('context-val') } self.patch(power, 'is_driver_available').return_value = False power.power_action_registry[system_id] = power_change, sentinel.d # Patch the power action utility so that it says the node is # in the required power state. power_action, execute = patch_PowerAction( self, return_value="unknown") markNodeBroken = yield self.patch_rpc_methods() yield power.change.change_power_state( system_id, hostname, power_type, power_change, context) self.assertThat( execute, MockCallsMatch( # One call to change the power state. call(power_change=power_change, **context), # One call to query the power state. call(power_change='query', **context), ), ) # The node hasn't been marked broken. self.assertThat(markNodeBroken, MockNotCalled()) @inlineCallbacks def test_change_power_state_marks_the_node_broken_if_failure(self): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_type = random.choice(power.QUERY_POWER_TYPES) power_change = 'on' context = { factory.make_name('context-key'): factory.make_name('context-val') } self.patch(power, 'is_driver_available').return_value = False power.power_action_registry[system_id] = power_change, sentinel.d # Simulate a persistent failure. power_action, execute = patch_PowerAction( self, return_value='off') markNodeBroken = yield self.patch_rpc_methods() yield power.change.change_power_state( system_id, hostname, power_type, power_change, context) # The node has been marked broken. msg = "Timeout after %s tries" % len(DEFAULT_WAITING_POLICY) self.assertThat( markNodeBroken, MockCalledOnceWith( ANY, system_id=system_id, error_description=msg) ) @inlineCallbacks def test_change_power_state_marks_the_node_broken_if_exception(self): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_type = random.choice(power.QUERY_POWER_TYPES) power_change = 'on' context = { factory.make_name('context-key'): factory.make_name('context-val') } self.patch(power, 'is_driver_available').return_value = False power.power_action_registry[system_id] = power_change, sentinel.d # Simulate an exception. exception_message = factory.make_name('exception') power_action, execute = patch_PowerAction( self, side_effect=poweraction.PowerActionFail(exception_message)) markNodeBroken = yield self.patch_rpc_methods() with ExpectedException(poweraction.PowerActionFail): yield power.change.change_power_state( system_id, hostname, power_type, power_change, context) error_message = "Node could not be powered on: %s" % exception_message self.assertThat( markNodeBroken, MockCalledOnceWith( ANY, system_id=system_id, error_description=error_message)) @inlineCallbacks def test_change_power_state_pauses_inbetween_retries(self): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_type = random.choice(power.QUERY_POWER_TYPES) power_change = 'on' context = { factory.make_name('context-key'): factory.make_name('context-val') } self.patch(power, 'is_driver_available').return_value = False power.power_action_registry[system_id] = power_change, sentinel.d # Simulate two failures to power up the node, then a success. power_action, execute = patch_PowerAction( self, side_effect=[None, 'off', None, 'off', None, 'on']) # Patch calls to pause() to `execute` so that we record both in the # same place, and can thus see ordering. self.patch(power.change, 'pause', execute) self.patch(power.query, 'pause', execute) yield self.patch_rpc_methods() yield power.change.change_power_state( system_id, hostname, power_type, power_change, context) self.assertThat(execute, MockCallsMatch( call(power_change=power_change, **context), call(1, reactor), # pause(1, reactor) call(power_change='query', **context), call(power_change=power_change, **context), call(2, reactor), # pause(1, reactor) call(power_change='query', **context), )) @inlineCallbacks def test___handles_power_driver_power_types(self): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_type = random.choice(power.QUERY_POWER_TYPES) power_change = random.choice(['on', 'off']) context = { factory.make_name('context-key'): factory.make_name('context-val') } self.patch(power, 'is_driver_available').return_value = True perform_power_driver_change = self.patch_autospec( power.change, 'perform_power_driver_change') perform_power_driver_query = self.patch_autospec( power.query, 'perform_power_driver_query', Mock(return_value=power_change)) power_change_success = self.patch_autospec( power.change, 'power_change_success') yield self.patch_rpc_methods() yield power.change.change_power_state( system_id, hostname, power_type, power_change, context) self.expectThat( perform_power_driver_change, MockCalledOnceWith( system_id, hostname, power_type, power_change, context)) self.expectThat( perform_power_driver_query, MockCalledOnceWith( system_id, hostname, power_type, context)) self.expectThat( power_change_success, MockCalledOnceWith( system_id, hostname, power_change)) @inlineCallbacks def test__calls_power_driver_on_for_power_driver(self): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_type = random.choice(power.QUERY_POWER_TYPES) power_change = 'on' context = { factory.make_name('context-key'): factory.make_name('context-val') } self.patch(power, 'is_driver_available').return_value = True get_item = self.patch(PowerDriverRegistry, 'get_item') get_item.return_value = Mock(return_value='on') perform_power_driver_query = self.patch( power.query, 'perform_power_driver_query', Mock(return_value=power_change)) self.patch(power.change, 'power_change_success') yield self.patch_rpc_methods() result = yield power.change.change_power_state( system_id, hostname, power_type, power_change, context) self.expectThat(get_item, MockCalledOnceWith(power_type)) self.expectThat( perform_power_driver_query, MockCalledOnceWith( system_id, hostname, power_type, context)) self.expectThat( power.change.power_change_success, MockCalledOnceWith( system_id, hostname, power_change)) self.expectThat(result, Equals('on')) @inlineCallbacks def test__calls_power_driver_off_for_power_driver(self): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_type = random.choice(power.QUERY_POWER_TYPES) power_change = 'off' context = { factory.make_name('context-key'): factory.make_name('context-val') } self.patch(power, 'is_driver_available').return_value = True get_item = self.patch(PowerDriverRegistry, 'get_item') get_item.return_value = Mock(return_value='off') perform_power_driver_query = self.patch( power.query, 'perform_power_driver_query', Mock(return_value=power_change)) self.patch(power.change, 'power_change_success') yield self.patch_rpc_methods() result = yield power.change.change_power_state( system_id, hostname, power_type, power_change, context) self.expectThat(get_item, MockCalledOnceWith(power_type)) self.expectThat( perform_power_driver_query, MockCalledOnceWith( system_id, hostname, power_type, context)) self.expectThat( power.change.power_change_success, MockCalledOnceWith( system_id, hostname, power_change)) self.expectThat(result, Equals('off')) @inlineCallbacks def test__marks_the_node_broken_if_exception_for_power_driver(self): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_type = random.choice(power.QUERY_POWER_TYPES) power_change = 'on' context = { factory.make_name('context-key'): factory.make_name('context-val'), 'system_id': system_id } self.patch(power, 'is_driver_available').return_value = True exception = PowerError(factory.make_string()) get_item = self.patch(PowerDriverRegistry, 'get_item') get_item.side_effect = exception self.patch(power.change, 'power_change_failure') markNodeBroken = yield self.patch_rpc_methods() with ExpectedException(PowerError): yield power.change.change_power_state( system_id, hostname, power_type, power_change, context) error_message = "Node could not be powered on: %s" % ( get_driver_error_message(exception)) self.expectThat( markNodeBroken, MockCalledOnceWith( ANY, system_id=system_id, error_description=error_message)) self.expectThat( power.change.power_change_failure, MockCalledOnceWith( system_id, hostname, power_change, error_message)) class TestMaybeChangePowerState(MAASTestCase): run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def setUp(self): super(TestMaybeChangePowerState, self).setUp() self.patch(power, 'power_action_registry', {}) for power_driver in power_drivers_by_name.values(): self.patch( power_driver, "detect_missing_packages").return_value = [] self.useFixture(EventTypesAllRegistered()) do_not_pause(self) def patch_methods_using_rpc(self): self.patch_autospec(power.change, 'power_change_starting') power.change.power_change_starting.side_effect = ( always_succeed_with(None)) self.patch_autospec(power.change, 'change_power_state') power.change.change_power_state.side_effect = always_succeed_with(None) def test_always_returns_deferred(self): clock = Clock() power_type = random.choice(power.QUERY_POWER_TYPES) d = power.change.maybe_change_power_state( sentinel.system_id, sentinel.hostname, power_type, random.choice(("on", "off")), sentinel.context, clock=clock) self.assertThat(d, IsInstance(Deferred)) @inlineCallbacks def test_adds_action_to_registry(self): self.patch_methods_using_rpc() system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_type = random.choice(power.QUERY_POWER_TYPES) power_change = random.choice(['on', 'off']) context = { factory.make_name('context-key'): factory.make_name('context-val') } yield power.change.maybe_change_power_state( system_id, hostname, power_type, power_change, context) self.assertEqual( {system_id: (power_change, ANY)}, power.power_action_registry) reactor.runUntilCurrent() # Run all delayed calls. self.assertEqual({}, power.power_action_registry) @inlineCallbacks def test_checks_missing_packages(self): self.patch_methods_using_rpc() system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_type = random.choice(power.QUERY_POWER_TYPES) power_change = random.choice(['on', 'off']) context = { factory.make_name('context-key'): factory.make_name('context-val') } power_driver = power_drivers_by_name.get(power_type) detect_packages = self.patch_autospec( power_driver, "detect_missing_packages") detect_packages.return_value = [] yield power.change.maybe_change_power_state( system_id, hostname, power_type, power_change, context) reactor.runUntilCurrent() # Run all delayed calls. self.assertThat(detect_packages, MockCalledOnceWith()) @inlineCallbacks def test_errors_when_missing_packages(self): self.patch_methods_using_rpc() system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_type = random.choice(power.QUERY_POWER_TYPES) power_change = random.choice(['on', 'off']) context = { factory.make_name('context-key'): factory.make_name('context-val') } power_driver = power_drivers_by_name.get(power_type) detect_packages = self.patch_autospec( power_driver, "detect_missing_packages") detect_packages.return_value = ['gone'] with ExpectedException(poweraction.PowerActionFail): yield power.change.maybe_change_power_state( system_id, hostname, power_type, power_change, context) self.assertThat(detect_packages, MockCalledOnceWith()) @inlineCallbacks def test_errors_when_change_conflicts_with_in_progress_change(self): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_type = random.choice(power.QUERY_POWER_TYPES) power_changes = ['on', 'off'] random.shuffle(power_changes) current_power_change, power_change = power_changes context = { factory.make_name('context-key'): factory.make_name('context-val') } power.power_action_registry[system_id] = ( current_power_change, sentinel.d) with ExpectedException(exceptions.PowerActionAlreadyInProgress): yield power.change.maybe_change_power_state( system_id, hostname, power_type, power_change, context) @inlineCallbacks def test_does_nothing_when_change_matches_in_progress_change(self): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_type = random.choice(power.QUERY_POWER_TYPES) current_power_change = power_change = random.choice(['on', 'off']) context = { factory.make_name('context-key'): factory.make_name('context-val') } power.power_action_registry[system_id] = ( current_power_change, sentinel.d) yield power.change.maybe_change_power_state( system_id, hostname, power_type, power_change, context) self.assertThat(power.power_action_registry, Equals( {system_id: (power_change, sentinel.d)})) @inlineCallbacks def test_calls_change_power_state_later(self): self.patch_methods_using_rpc() system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_type = random.choice(power.QUERY_POWER_TYPES) power_change = random.choice(['on', 'off']) context = { factory.make_name('context-key'): factory.make_name('context-val') } yield power.change.maybe_change_power_state( system_id, hostname, power_type, power_change, context) reactor.runUntilCurrent() # Run all delayed calls. self.assertThat( power.change.change_power_state, MockCalledOnceWith( system_id, hostname, power_type, power_change, context, power.change.reactor)) @inlineCallbacks def test_clears_lock_if_change_power_state_success(self): self.patch_methods_using_rpc() system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_type = random.choice(power.QUERY_POWER_TYPES) power_change = random.choice(['on', 'off']) context = { factory.make_name('context-key'): factory.make_name('context-val') } yield power.change.maybe_change_power_state( system_id, hostname, power_type, power_change, context) reactor.runUntilCurrent() # Run all delayed calls. self.assertNotIn(system_id, power.power_action_registry) @inlineCallbacks def test_clears_lock_if_change_power_state_fails(self): class TestException(Exception): pass self.patch_autospec(power.change, 'power_change_starting') power.change.power_change_starting.side_effect = TestException('boom') system_id = factory.make_name('system_id') hostname = factory.make_hostname() power_type = random.choice(power.QUERY_POWER_TYPES) power_change = random.choice(['on', 'off']) context = sentinel.context logger = self.useFixture(TwistedLoggerFixture()) yield power.change.maybe_change_power_state( system_id, hostname, power_type, power_change, context) reactor.runUntilCurrent() # Run all delayed calls. self.assertNotIn(system_id, power.power_action_registry) self.assertDocTestMatches( """\ %s: Power could not be turned %s. Traceback (most recent call last): ... %s.TestException: boom """ % (hostname, power_change, __name__), logger.dump()) @inlineCallbacks def test_clears_lock_if_change_power_state_is_cancelled(self): # Patch in an unfired Deferred here. This will pause the call so that # we can grab the delayed call from the registry in time to cancel it. self.patch_autospec(power.change, 'change_power_state') power.change.change_power_state.return_value = Deferred() self.patch_autospec(power.change, 'power_change_failure') system_id = factory.make_name('system_id') hostname = factory.make_hostname() power_type = random.choice(power.QUERY_POWER_TYPES) power_change = random.choice(['on', 'off']) context = sentinel.context logger = self.useFixture(TwistedLoggerFixture()) yield power.change.maybe_change_power_state( system_id, hostname, power_type, power_change, context) # Get the Deferred from the registry and cancel it. _, d = power.power_action_registry[system_id] d.cancel() yield d self.assertNotIn(system_id, power.power_action_registry) self.assertDocTestMatches( """\ %s: Power could not be turned %s; timed out. """ % (hostname, power_change), logger.dump()) self.assertThat( power.change.power_change_failure, MockCalledOnceWith( system_id, hostname, power_change, "Timed out")) @inlineCallbacks def test__calls_change_power_state_with_timeout(self): self.patch_methods_using_rpc() defer_with_timeout = self.patch(power.change, 'deferWithTimeout') system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_type = random.choice(power.QUERY_POWER_TYPES) power_change = random.choice(['on', 'off']) context = { factory.make_name('context-key'): factory.make_name('context-val') } yield power.change.maybe_change_power_state( system_id, hostname, power_type, power_change, context) reactor.runUntilCurrent() # Run all delayed calls. self.assertThat( defer_with_timeout, MockCalledOnceWith( power.change.CHANGE_POWER_STATE_TIMEOUT, power.change.change_power_state, system_id, hostname, power_type, power_change, context, power.change.reactor)) maas-1.9.5+bzr4599.orig/src/provisioningserver/power/tests/test_power.py0000644000000000000000000000320213056115004024457 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for :py:module:`~provisioningserver.power`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random from maastesting.factory import factory from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import MAASTestCase from mock import ANY from provisioningserver import power from provisioningserver.rpc import region from provisioningserver.rpc.testing import MockClusterToRegionRPCFixture from testtools.deferredruntest import extract_result from testtools.matchers import Equals class TestPowerHelpers(MAASTestCase): def patch_rpc_methods(self): fixture = self.useFixture(MockClusterToRegionRPCFixture()) protocol, io = fixture.makeEventLoop( region.MarkNodeFailed, region.UpdateNodePowerState, region.SendEvent) return protocol, io def test_power_state_update_calls_UpdateNodePowerState(self): system_id = factory.make_name('system_id') state = random.choice(['on', 'off']) protocol, io = self.patch_rpc_methods() d = power.power_state_update(system_id, state) # This blocks until the deferred is complete io.flush() self.expectThat(extract_result(d), Equals({})) self.assertThat( protocol.UpdateNodePowerState, MockCalledOnceWith( ANY, system_id=system_id, power_state=state) ) maas-1.9.5+bzr4599.orig/src/provisioningserver/power/tests/test_poweraction.py0000644000000000000000000002262713056115004025671 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.power`. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import os import re from maastesting.factory import factory from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import ( MAASTestCase, MAASTwistedRunTest, ) from mock import ( ANY, sentinel, ) import provisioningserver.power.poweraction from provisioningserver.power.poweraction import ( PowerAction, PowerActionFail, UnknownPowerType, ) from provisioningserver.utils import ( escape_py_literal, locate_config, ShellTemplate, ) from testtools.matchers import ( FileContains, MatchesException, Raises, ) class TestPowerAction(MAASTestCase): """Tests for PowerAction.""" run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def configure_templates_dir(self, path): """Configure POWER_TEMPLATES_DIR to `path`.""" self.patch(PowerAction, 'get_template_basedir').return_value = path def test_init_raises_for_unknown_powertype(self): powertype = factory.make_name("powertype", sep='') self.assertRaises( UnknownPowerType, PowerAction, powertype) def test_init_stores_ether_wake_type(self): pa = PowerAction('ether_wake') self.assertEqual('ether_wake', pa.power_type) def test_init_stores_template_path(self): power_type = 'ether_wake' pa = PowerAction(power_type) path = os.path.join( pa.get_template_basedir(), power_type + ".template") self.assertEqual(path, pa.path) def test_template_basedir_defaults_to_config_dir(self): power_type = 'ether_wake' self.assertEqual( locate_config('templates/power'), PowerAction(power_type).get_template_basedir()) def test_template_basedir_prefers_configured_value(self): power_type = 'ether_wake' template_name = '%s.template' % power_type template = self.make_file(name=template_name) template_dir = os.path.dirname(template) self.configure_templates_dir(template_dir) self.assertEqual( template_dir, PowerAction('ether_wake').get_template_basedir()) def test_get_template_retrieves_template(self): pa = PowerAction('ether_wake') template = pa.get_template() self.assertIsInstance(template, ShellTemplate) self.assertThat(pa.path, FileContains(template.content)) def test_get_template_looks_for_template_in_template_basedir(self): contents = factory.make_string() power_type = 'ether_wake' template_name = '%s.template' % power_type template = self.make_file(name=template_name, contents=contents) self.configure_templates_dir(os.path.dirname(template)) self.assertEqual( contents, PowerAction(power_type).get_template().content) def test_render_template(self): # render_template() should take a template string and substitue # its variables. pa = PowerAction('ether_wake') template = ShellTemplate("template: {{mac}}") rendered = pa.render_template( template, pa.update_context({"mac": "mymac"})) self.assertEqual("template: mymac", rendered) def test_render_template_raises_PowerActionFail(self): # If not enough arguments are supplied to fill in template # variables then a PowerActionFail is raised. pa = PowerAction('ether_wake') template_name = factory.make_string() template = ShellTemplate("template: {{mac}}", name=template_name) self.assertThat( lambda: pa.render_template(template, pa.update_context({})), Raises(MatchesException( PowerActionFail, "ether_wake failed:\n" "name 'mac' is not defined at line \d+ column \d+ " "in file %s" % re.escape(template_name)))) def _create_template_file(self, template): """Create a temporary template file with the given contents.""" return self.make_file("testscript.sh", template) def run_action(self, path, **kwargs): pa = PowerAction('ether_wake') pa.path = path return pa.execute(**kwargs) def test_execute(self): # execute() should run the template through a shell. output_file = self.make_file( name='output', contents="(Output should go here)") template = "echo working {{mac}} > {{outfile}}" path = self._create_template_file(template) self.run_action(path, mac="test", outfile=output_file) self.assertThat(output_file, FileContains("working test\n")) def test_execute_return_execution_result(self): template = "echo ' test \n'" path = self._create_template_file(template) output = self.run_action(path) # run_action() returns the 'stripped' output. self.assertEqual('test', output) def test_execute_raises_PowerActionFail_for_failure(self): path = self._create_template_file("this_is_not_valid_shell") self.assertThat( lambda: self.run_action(path), Raises(MatchesException( PowerActionFail, "ether_wake failed with return code 127"))) def test_execute_raises_PowerActionFail_with_output(self): path = self._create_template_file("echo reason for failure; exit 1") self.assertThat( lambda: self.run_action(path), Raises( MatchesException(PowerActionFail, ".*:\nreason for failure"))) def test_wake_on_lan_cannot_shut_down_node(self): pa = PowerAction('ether_wake') self.assertRaises( PowerActionFail, pa.execute, power_change='off', mac=factory.make_mac_address()) def test_fence_cdu_checks_state(self): # We can't test the fence_cdu template in detail (and it may be # customized), but by making it use "echo" instead of a real # fence_cdu we can make it get a bogus answer from its status check. # The bogus answer is actually the rest of the fence_cdu command # line. It will complain about this and fail. action = PowerAction("fence_cdu") script = action.render_template( action.get_template(), action.update_context(dict( power_change='on', power_address='mysystem', power_id='system', power_user='me', power_pass='me', fence_cdu='echo')), ) output = action.run_shell(script) self.assertIn("Got unknown power state from fence_cdu", output) def configure_power_config_dir(self, path): """Configure POWER_CONFIG_DIR to `path`.""" self.patch(PowerAction, 'get_config_basedir').return_value = path def test_config_basedir_defaults_to_local_dir(self): power_type = 'ether_wake' self.assertEqual( locate_config('templates/power'), PowerAction(power_type).get_config_basedir()) class TestTemplateContext(MAASTestCase): def make_stubbed_power_action(self): power_action = PowerAction("amt") render_template = self.patch(power_action, "render_template") render_template.return_value = "echo done" return power_action def test_basic_context(self): power_action = self.make_stubbed_power_action() result = power_action.execute() self.assertEqual("done", result) self.assertThat( power_action.render_template, MockCalledOnceWith( template=ANY, context=dict( config_dir=locate_config("templates/power"), escape_py_literal=escape_py_literal, ip_address=None, ), )) def test_ip_address_is_unmolested_if_set(self): power_action = self.make_stubbed_power_action() ip_address = factory.make_ipv6_address() result = power_action.execute(ip_address=ip_address) self.assertEqual("done", result) self.assertThat( power_action.render_template, MockCalledOnceWith( template=ANY, context=dict( config_dir=locate_config("templates/power"), escape_py_literal=escape_py_literal, ip_address=ip_address, ), )) def test_execute_looks_up_ip_address_from_mac_address(self): find_ip_via_arp = self.patch( provisioningserver.power.poweraction, "find_ip_via_arp") find_ip_via_arp.return_value = sentinel.ip_address_from_mac power_action = self.make_stubbed_power_action() mac_address = factory.make_mac_address() result = power_action.execute(mac_address=mac_address) self.assertEqual("done", result) self.assertThat( power_action.render_template, MockCalledOnceWith( template=ANY, context=dict( config_dir=locate_config("templates/power"), escape_py_literal=escape_py_literal, ip_address=sentinel.ip_address_from_mac, mac_address=mac_address, ), )) maas-1.9.5+bzr4599.orig/src/provisioningserver/power/tests/test_query.py0000644000000000000000000006076413056115004024510 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for :py:module:`~provisioningserver.power.query`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from itertools import ( imap, izip, ) import logging import random from fixtures import FakeLogger from maastesting.factory import factory from maastesting.matchers import ( MockCalledOnceWith, MockCalledWith, MockCallsMatch, MockNotCalled, ) from maastesting.testcase import ( MAASTestCase, MAASTwistedRunTest, ) from maastesting.twisted import ( always_fail_with, always_succeed_with, TwistedLoggerFixture, ) from mock import ( ANY, call, DEFAULT, Mock, sentinel, ) from provisioningserver import power from provisioningserver.drivers.power import ( DEFAULT_WAITING_POLICY, power_drivers_by_name, PowerDriverRegistry, ) from provisioningserver.events import EVENT_TYPES from provisioningserver.power import poweraction from provisioningserver.rpc import ( exceptions, region, ) from provisioningserver.rpc.testing import MockClusterToRegionRPCFixture from provisioningserver.testing.events import EventTypesAllRegistered from testtools.deferredruntest import ( assert_fails_with, extract_result, ) from testtools.matchers import Not from twisted.internet import reactor from twisted.internet.defer import ( fail, inlineCallbacks, maybeDeferred, succeed, ) from twisted.internet.task import Clock from twisted.python.failure import Failure def patch_PowerAction(test, return_value=DEFAULT, side_effect=None): """Patch the PowerAction object. Patch the PowerAction object so that PowerAction().execute is replaced by a Mock object created using the given `return_value` and `side_effect`. This can be used to simulate various successes or failures patterns while manipulating the power state of a node. Returns a tuple of mock objects: power.poweraction.PowerAction and power.poweraction.PowerAction().execute. """ power_action_obj = Mock() power_action_obj_execute = Mock( return_value=return_value, side_effect=side_effect) power_action_obj.execute = power_action_obj_execute power_action = test.patch(poweraction, 'PowerAction') power_action.return_value = power_action_obj return power_action, power_action_obj_execute def do_not_pause(test): test.patch_autospec(power.change, "pause", always_succeed_with(None)) test.patch_autospec(power.query, "pause", always_succeed_with(None)) def suppress_reporting(test): # Skip telling the region; just pass-through the query result. report_power_state = test.patch(power.query, "report_power_state") report_power_state.side_effect = lambda d, system_id, hostname: d class TestPowerHelpers(MAASTestCase): def setUp(self): super(TestPowerHelpers, self).setUp() self.useFixture(EventTypesAllRegistered()) def patch_rpc_methods(self): fixture = self.useFixture(MockClusterToRegionRPCFixture()) protocol, io = fixture.makeEventLoop( region.MarkNodeFailed, region.UpdateNodePowerState, region.SendEvent) return protocol, io def test_power_query_failure_emits_event(self): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') message = factory.make_name('message') protocol, io = self.patch_rpc_methods() d = power.query.power_query_failure( system_id, hostname, Failure(Exception(message))) # This blocks until the deferred is complete. io.flush() self.assertIsNone(extract_result(d)) self.assertThat( protocol.SendEvent, MockCalledOnceWith( ANY, type_name=EVENT_TYPES.NODE_POWER_QUERY_FAILED, system_id=system_id, description=( "Power state could not be queried: " + message), )) class TestPowerQuery(MAASTestCase): def setUp(self): super(TestPowerQuery, self).setUp() self.useFixture(EventTypesAllRegistered()) self.patch(power.query, "deferToThread", maybeDeferred) for power_driver in power_drivers_by_name.values(): self.patch( power_driver, "detect_missing_packages").return_value = [] def patch_rpc_methods(self, return_value={}, side_effect=None): fixture = self.useFixture(MockClusterToRegionRPCFixture()) protocol, io = fixture.makeEventLoop( region.MarkNodeFailed, region.SendEvent, region.UpdateNodePowerState) protocol.MarkNodeFailed.return_value = return_value protocol.MarkNodeFailed.side_effect = side_effect return protocol.SendEvent, protocol.MarkNodeFailed, io def test_get_power_state_queries_node(self): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_type = random.choice(power.QUERY_POWER_TYPES) power_state = random.choice(['on', 'off']) context = { factory.make_name('context-key'): factory.make_name('context-val') } self.patch(power, 'is_driver_available').return_value = False power_driver = power_drivers_by_name.get(power_type) detect_packages = self.patch_autospec( power_driver, "detect_missing_packages") detect_packages.return_value = [] # Patch the power action utility so that it says the node is # in on/off power state. power_action, execute = patch_PowerAction( self, return_value=power_state) _, markNodeBroken, io = self.patch_rpc_methods() d = power.query.get_power_state( system_id, hostname, power_type, context) # This blocks until the deferred is complete. io.flush() self.assertEqual(power_state, extract_result(d)) self.assertThat(detect_packages, MockCalledOnceWith()) self.assertThat( execute, MockCallsMatch( # One call to change the power state. call(power_change='query', **context), ), ) def test_get_power_state_fails_for_missing_packages(self): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_type = random.choice(power.QUERY_POWER_TYPES) power_state = random.choice(['on', 'off']) context = { factory.make_name('context-key'): factory.make_name('context-val') } self.patch(power, 'is_driver_available').return_value = False # Patch the power action utility so that it says the node is # in on/off power state. power_action, execute = patch_PowerAction( self, return_value=power_state) _, markNodeBroken, io = self.patch_rpc_methods() power_driver = power_drivers_by_name.get(power_type) detect_packages = self.patch_autospec( power_driver, "detect_missing_packages") detect_packages.return_value = ['gone'] d = power.query.get_power_state( system_id, hostname, power_type, context) self.assertThat(detect_packages, MockCalledOnceWith()) return assert_fails_with(d, poweraction.PowerActionFail) def test_get_power_state_returns_unknown_for_certain_power_types(self): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') # Use a power type that is not among power.QUERY_POWER_TYPES. power_type = factory.make_name('power_type') context = { factory.make_name('context-key'): factory.make_name('context-val') } self.patch(power, 'is_driver_available').return_value = False _, _, io = self.patch_rpc_methods() d = power.query.get_power_state( system_id, hostname, power_type, context) return assert_fails_with(d, poweraction.PowerActionFail) def test_get_power_state_retries_if_power_query_fails(self): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_type = random.choice(power.QUERY_POWER_TYPES) power_state = random.choice(['on', 'off']) err_msg = factory.make_name('error') context = { factory.make_name('context-key'): factory.make_name('context-val') } self.patch(power, 'is_driver_available').return_value = False # Simulate a failure to power query the node, then success. power_action, execute = patch_PowerAction(self, side_effect=[ poweraction.PowerActionFail(err_msg), power_state]) sendEvent, markNodeBroken, io = self.patch_rpc_methods() do_not_pause(self) d = power.query.get_power_state( system_id, hostname, power_type, context) # This blocks until the deferred is complete. io.flush() self.assertEqual(power_state, extract_result(d)) self.assertThat( execute, MockCallsMatch( call(power_change='query', **context), call(power_change='query', **context), ) ) # The node hasn't been marked broken. self.assertThat(markNodeBroken, MockNotCalled()) def test_report_power_state_changes_power_state_if_failure(self): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') err_msg = factory.make_name('error') _, _, io = self.patch_rpc_methods() self.patch_autospec(power, 'power_state_update') # Simulate a failure when querying state. query = fail(poweraction.PowerActionFail(err_msg)) report = power.query.report_power_state(query, system_id, hostname) io.flush() error = self.assertRaises( poweraction.PowerActionFail, extract_result, report) self.assertEqual(err_msg, unicode(error)) self.assertThat( power.power_state_update, MockCalledOnceWith(system_id, 'error')) def test_report_power_state_changes_power_state_if_success(self): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_state = random.choice(['on', 'off']) _, _, io = self.patch_rpc_methods() self.patch_autospec(power, 'power_state_update') # Simulate a success when querying state. query = succeed(power_state) report = power.query.report_power_state(query, system_id, hostname) io.flush() self.assertEqual(power_state, extract_result(report)) self.assertThat( power.power_state_update, MockCalledOnceWith(system_id, power_state)) def test_report_power_state_changes_power_state_if_unknown(self): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_state = "unknown" _, _, io = self.patch_rpc_methods() self.patch_autospec(power, 'power_state_update') # Simulate a success when querying state. query = succeed(power_state) report = power.query.report_power_state(query, system_id, hostname) io.flush() self.assertEqual(power_state, extract_result(report)) self.assertThat( power.power_state_update, MockCalledOnceWith(system_id, power_state)) def test_get_power_state_pauses_inbetween_retries(self): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') power_type = random.choice(power.QUERY_POWER_TYPES) context = { factory.make_name('context-key'): factory.make_name('context-val') } self.patch(power, 'is_driver_available').return_value = False # Simulate two failures to power up the node, then a success. power_action, execute = patch_PowerAction(self, side_effect=[ poweraction.PowerActionFail, poweraction.PowerActionFail, 'off']) self.patch(power.query, "deferToThread", maybeDeferred) _, _, io = self.patch_rpc_methods() clock = Clock() calls_and_pause = [ ([ call(power_change='query', **context), ], 3), ([ call(power_change='query', **context), ], 5), ([ call(power_change='query', **context), ], 10), ] calls = [] d = power.query.get_power_state( system_id, hostname, power_type, context, clock=clock) for newcalls, waiting_time in calls_and_pause: calls.extend(newcalls) # This blocks until the deferred is complete. io.flush() self.assertThat(execute, MockCallsMatch(*calls)) clock.advance(waiting_time) self.assertEqual("off", extract_result(d)) class TestPowerQueryExceptions(MAASTestCase): scenarios = tuple( (power_type, { "power_type": power_type, "power_driver": power_drivers_by_name.get(power_type), "func": ( # Function to invoke driver. "perform_power_driver_query" if power_type in PowerDriverRegistry else "perform_power_query"), "waits": ( # Pauses between retries. [] if power_type in PowerDriverRegistry else DEFAULT_WAITING_POLICY), "calls": ( # No. of calls to the driver. 1 if power_type in PowerDriverRegistry else len(DEFAULT_WAITING_POLICY)), }) for power_type in power.QUERY_POWER_TYPES ) def test_report_power_state_reports_all_exceptions(self): logger_twisted = self.useFixture(TwistedLoggerFixture()) logger_maaslog = self.useFixture(FakeLogger("maas")) # Avoid threads here. self.patch(power.query, "deferToThread", maybeDeferred) exception_type = factory.make_exception_type() exception_message = factory.make_string() exception = exception_type(exception_message) # Pretend the query always fails with `exception`. query = self.patch_autospec(power.query, self.func) query.side_effect = always_fail_with(exception) # Intercept calls to power_state_update() and send_event_node(). power_state_update = self.patch_autospec(power, "power_state_update") power_state_update.return_value = succeed(None) send_event_node = self.patch_autospec(power.query, "send_event_node") send_event_node.return_value = succeed(None) self.patch( self.power_driver, "detect_missing_packages").return_value = [] system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') context = sentinel.context clock = Clock() d = power.query.get_power_state( system_id, hostname, self.power_type, context, clock) d = power.query.report_power_state( d, system_id, hostname) # Crank through some number of retries. for wait in self.waits: self.assertFalse(d.called) clock.advance(wait) self.assertTrue(d.called) # Finally the exception from the query is raised. self.assertRaises(exception_type, extract_result, d) # The broken power query function patched earlier was called the same # number of times as there are steps in the default waiting policy. expected_call = call(system_id, hostname, self.power_type, context) expected_calls = [expected_call] * self.calls self.assertThat(query, MockCallsMatch(*expected_calls)) expected_message = ( "Power state could not be queried: %s" % exception_message) # An attempt was made to report the failure to the region. self.assertThat( power_state_update, MockCalledOnceWith(system_id, 'error')) # An attempt was made to log a node event with details. self.assertThat( send_event_node, MockCalledOnceWith( EVENT_TYPES.NODE_POWER_QUERY_FAILED, system_id, hostname, expected_message)) # Nothing was logged to the Twisted log. self.assertEqual("", logger_twisted.output) # A brief message is written to maaslog. self.assertEqual(expected_message + "\n", logger_maaslog.output) class TestPowerQueryAsync(MAASTestCase): run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def setUp(self): super(TestPowerQueryAsync, self).setUp() do_not_pause(self) def make_node(self, power_type=None): system_id = factory.make_name('system_id') hostname = factory.make_name('hostname') if power_type is None: power_type = random.choice(power.QUERY_POWER_TYPES) state = random.choice(['on', 'off', 'unknown', 'error']) context = { factory.make_name('context-key'): ( factory.make_name('context-val')) } return { 'context': context, 'hostname': hostname, 'power_state': state, 'power_type': power_type, 'system_id': system_id, } def make_nodes(self, count=3): nodes = [self.make_node() for _ in xrange(count)] # Sanity check that these nodes are something that can emerge # from a call to ListNodePowerParameters. region.ListNodePowerParameters.makeResponse({"nodes": nodes}, None) return nodes def pick_alternate_state(self, state): return random.choice([ value for value in ['on', 'off', 'unknown', 'error'] if value != state]) @inlineCallbacks def test_query_all_nodes_gets_and_reports_power_state(self): nodes = self.make_nodes() # Report back that all nodes' power states are as recorded. power_states = [node['power_state'] for node in nodes] queries = list(imap(succeed, power_states)) get_power_state = self.patch(power.query, 'get_power_state') get_power_state.side_effect = queries report_power_state = self.patch(power.query, 'report_power_state') report_power_state.side_effect = lambda d, sid, hn: d yield power.query.query_all_nodes(nodes) self.assertThat(get_power_state, MockCallsMatch(*( call( node['system_id'], node['hostname'], node['power_type'], node['context'], clock=reactor) for node in nodes ))) self.assertThat(report_power_state, MockCallsMatch(*( call(query, node['system_id'], node['hostname']) for query, node in izip(queries, nodes) ))) @inlineCallbacks def test_query_all_nodes_logs_skip_if_node_in_action_registry(self): node = self.make_node() power.power_action_registry[node['system_id']] = sentinel.action with FakeLogger("maas.power", level=logging.DEBUG) as maaslog: yield power.query.query_all_nodes([node]) self.assertDocTestMatches( "hostname-...: Skipping query power status, " "power action already in progress.", maaslog.output) @inlineCallbacks def test_query_all_nodes_skips_nodes_in_action_registry(self): nodes = self.make_nodes() # First node is in the registry. power.power_action_registry[nodes[0]['system_id']] = sentinel.action # Report back power state of nodes' not in registry. power_states = [node['power_state'] for node in nodes[1:]] get_power_state = self.patch(power.query, 'get_power_state') get_power_state.side_effect = imap(succeed, power_states) suppress_reporting(self) yield power.query.query_all_nodes(nodes) self.assertThat(get_power_state, MockCallsMatch(*( call( node['system_id'], node['hostname'], node['power_type'], node['context'], clock=reactor) for node in nodes[1:] ))) self.assertThat( get_power_state, Not(MockCalledWith( nodes[0]['system_id'], nodes[0]['hostname'], nodes[0]['power_type'], nodes[0]['context'], clock=reactor))) @inlineCallbacks def test_query_all_nodes_only_queries_queryable_power_types(self): nodes = self.make_nodes() # nodes are all queryable, so add one that isn't: nodes.append(self.make_node(power_type='ether_wake')) # Report back that all nodes' power states are as recorded. power_states = [node['power_state'] for node in nodes] get_power_state = self.patch(power.query, 'get_power_state') get_power_state.side_effect = imap(succeed, power_states) suppress_reporting(self) yield power.query.query_all_nodes(nodes) self.assertThat(get_power_state, MockCallsMatch(*( call( node['system_id'], node['hostname'], node['power_type'], node['context'], clock=reactor) for node in nodes if node['power_type'] in power.QUERY_POWER_TYPES ))) @inlineCallbacks def test_query_all_nodes_swallows_PowerActionFail(self): node1, node2 = self.make_nodes(2) new_state_2 = self.pick_alternate_state(node2['power_state']) get_power_state = self.patch(power.query, 'get_power_state') error_msg = factory.make_name("error") get_power_state.side_effect = [ fail(poweraction.PowerActionFail(error_msg)), succeed(new_state_2), ] suppress_reporting(self) with FakeLogger("maas.power", level=logging.DEBUG) as maaslog: yield power.query.query_all_nodes([node1, node2]) self.assertDocTestMatches( """\ hostname-...: Could not query power state: %s. hostname-...: Power state has changed from ... to ... """ % error_msg, maaslog.output) @inlineCallbacks def test_query_all_nodes_swallows_NoSuchNode(self): node1, node2 = self.make_nodes(2) new_state_2 = self.pick_alternate_state(node2['power_state']) get_power_state = self.patch(power.query, 'get_power_state') get_power_state.side_effect = [ fail(exceptions.NoSuchNode()), succeed(new_state_2), ] suppress_reporting(self) with FakeLogger("maas.power", level=logging.DEBUG) as maaslog: yield power.query.query_all_nodes([node1, node2]) self.assertDocTestMatches( """\ hostname-...: Could not update power state: no such node. hostname-...: Power state has changed from ... to ... """, maaslog.output) @inlineCallbacks def test_query_all_nodes_swallows_Exception(self): node1, node2 = self.make_nodes(2) error_message = factory.make_name("error") error_type = factory.make_exception_type() new_state_2 = self.pick_alternate_state(node2['power_state']) get_power_state = self.patch(power.query, 'get_power_state') get_power_state.side_effect = [ fail(error_type(error_message)), succeed(new_state_2), ] suppress_reporting(self) maaslog = FakeLogger("maas.power", level=logging.DEBUG) twistlog = TwistedLoggerFixture() with maaslog, twistlog: yield power.query.query_all_nodes([node1, node2]) self.assertDocTestMatches( """\ hostname-...: Failed to refresh power state: %s hostname-...: Power state has changed from ... to ... """ % error_message, maaslog.output) self.assertDocTestMatches( """\ Failed to refresh power state. Traceback (most recent call last): Failure: maastesting.factory.TestException#...: %s """ % error_message, twistlog.output) @inlineCallbacks def test_query_all_nodes_returns_deferredlist_of_number_of_nodes(self): node1, node2 = self.make_nodes(2) get_power_state = self.patch(power.query, 'get_power_state') get_power_state.side_effect = [ succeed(node1['power_state']), succeed(node2['power_state']), ] suppress_reporting(self) results = yield power.query.query_all_nodes([node1, node2]) self.assertEqual( [(True, node1['power_state']), (True, node2['power_state'])], results) maas-1.9.5+bzr4599.orig/src/provisioningserver/pserv_services/__init__.py0000644000000000000000000000000013056115004024560 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/pserv_services/dhcp_probe_service.py0000644000000000000000000001230413056115004026660 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """ DHCP probing service.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "DHCPProbeService", ] from datetime import timedelta import socket from provisioningserver.dhcp.detect import probe_interface from provisioningserver.logger.log import get_maas_logger from provisioningserver.rpc.exceptions import NoConnectionsAvailable from provisioningserver.rpc.region import ( GetClusterInterfaces, ReportForeignDHCPServer, ) from provisioningserver.utils.twisted import ( pause, retries, ) from twisted.application.internet import TimerService from twisted.internet.defer import ( inlineCallbacks, returnValue, ) from twisted.internet.threads import deferToThread from twisted.protocols.amp import UnhandledCommand maaslog = get_maas_logger("dhcp.probe") class DHCPProbeService(TimerService, object): """Service to probe for DHCP servers on this cluster's network. Built on top of Twisted's `TimerService`. :param reactor: An `IReactor` instance. :param cluster_uuid: This cluster's UUID. """ check_interval = timedelta(minutes=10).total_seconds() def __init__(self, client_service, reactor, cluster_uuid): # Call self.try_probe_dhcp() every self.check_interval. super(DHCPProbeService, self).__init__( self.check_interval, self.try_probe_dhcp) self.clock = reactor self.uuid = cluster_uuid self.client_service = client_service @inlineCallbacks def _get_cluster_interfaces(self, client): """Return the interfaces for this cluster.""" try: response = yield client( GetClusterInterfaces, cluster_uuid=self.uuid) except UnhandledCommand: # The region hasn't been upgraded to support this method # yet, so give up. Returning an empty dict means that this # run will end, since there are no interfaces to check. maaslog.error( "Unable to query region for interfaces: Region does not " "support the GetClusterInterfaces RPC method.") returnValue({}) else: returnValue(response['interfaces']) @inlineCallbacks def _inform_region_of_foreign_dhcp(self, client, name, foreign_dhcp_ip): """Tell the region that there's a rogue DHCP server. :param client: The RPC client to use. :param name: The name of the network interface where the rogue DHCP server was found. :param foreign_dhcp_ip: The IP address of the rogue server. """ try: yield client( ReportForeignDHCPServer, cluster_uuid=self.uuid, interface_name=name, foreign_dhcp_ip=foreign_dhcp_ip) except UnhandledCommand: # Not a lot we can do here... The region doesn't support # this method yet. maaslog.error( "Unable to inform region of rogue DHCP server: the region " "does not yet support the ReportForeignDHCPServer RPC " "method.") @inlineCallbacks def probe_dhcp(self): """Find all the interfaces on this cluster and probe for DHCP servers. """ client = None for elapsed, remaining, wait in retries(15, 5, self.clock): try: client = self.client_service.getClient() break except NoConnectionsAvailable: yield pause(wait, self.clock) else: maaslog.error( "Can't initiate DHCP probe, no RPC connection to region.") return cluster_interfaces = yield self._get_cluster_interfaces(client) # Iterate over interfaces and probe each one. for interface in cluster_interfaces: try: servers = yield deferToThread( probe_interface, interface['interface'], interface['ip']) except socket.error: maaslog.error( "Failed to probe sockets; did you configure authbind as " "per HACKING.txt?") break else: if len(servers) > 0: # Only send one, if it gets cleared out then the # next detection pass will send a different one, if it # still exists. yield self._inform_region_of_foreign_dhcp( client, interface['name'], servers.pop()) else: yield self._inform_region_of_foreign_dhcp( client, interface['name'], None) @inlineCallbacks def try_probe_dhcp(self): maaslog.debug("Running periodic DHCP probe.") try: yield self.probe_dhcp() except Exception as error: maaslog.error( "Unable to probe for rogue DHCP servers: %s", unicode(error)) else: maaslog.debug("Finished periodic DHCP probe.") maas-1.9.5+bzr4599.orig/src/provisioningserver/pserv_services/image.py0000644000000000000000000000221113056115004024111 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Twisted Application Plugin for the MAAS Boot Image server""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "BootImageEndpointService", ] from twisted.application.internet import StreamServerEndpointService from twisted.web.resource import Resource from twisted.web.server import Site from twisted.web.static import File class BootImageEndpointService(StreamServerEndpointService): """Service for serving images to the TFTP server via HTTP :ivar site: The twisted site resource """ def __init__(self, resource_root, endpoint): """ :param resource_root: The root directory for the Image server. :param endpoint: The endpoint on which the server should listen. """ resource = Resource() resource.putChild('images', File(resource_root)) self.site = Site(resource) super(BootImageEndpointService, self).__init__(endpoint, self.site) maas-1.9.5+bzr4599.orig/src/provisioningserver/pserv_services/image_download_service.py0000644000000000000000000001112713056115004027526 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Service to periodically refresh the boot images.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "ImageDownloadService", ] from datetime import timedelta from provisioningserver.boot import tftppath from provisioningserver.logger import get_maas_logger from provisioningserver.rpc.boot_images import import_boot_images from provisioningserver.rpc.exceptions import NoConnectionsAvailable from provisioningserver.rpc.region import ( GetBootSources, GetBootSourcesV2, GetProxies, ) from provisioningserver.utils.twisted import ( pause, retries, ) from twisted.application.internet import TimerService from twisted.internet.defer import ( inlineCallbacks, returnValue, ) from twisted.python import log from twisted.spread.pb import NoSuchMethod maaslog = get_maas_logger("boot_image_download_service") class ImageDownloadService(TimerService, object): """Twisted service to periodically refresh ephemeral images.""" check_interval = timedelta(minutes=5).total_seconds() def __init__(self, client_service, cluster_uuid, tftp_root, reactor): """Twisted service to periodically refresh ephemeral images. :param client_service: A `ClusterClientService` instance. :param cluster_uuid: The UUID for this cluster, as a string. :param tftp_root: The path to the TFTP root directory. :param reactor: An `IReactor` instance. """ super(ImageDownloadService, self).__init__( self.check_interval, self.try_download) self.client_service = client_service self.uuid = cluster_uuid self.tftp_root = tftp_root self.clock = reactor def try_download(self): """Wrap download attempts in something that catches Failures. Log the full error to the Twisted log, and a concise error to the maas log. """ def download_failure(failure): log.err(failure, "Downloading images failed.") maaslog.error( "Failed to download images: %s", failure.getErrorMessage()) return self.maybe_start_download().addErrback(download_failure) @inlineCallbacks def _get_boot_sources(self, client): """Gets the boot sources from the region.""" try: sources = yield client(GetBootSourcesV2, uuid=self.uuid) except NoSuchMethod: # Region has not been upgraded to support the new call, use the # old call. The old call did not provide the new os selection # parameter. Region does not support boot source selection by os, # so its set too allow all operating systems. sources = yield client(GetBootSources, uuid=self.uuid) for source in sources['sources']: for selection in source['selections']: selection['os'] = '*' returnValue(sources) @inlineCallbacks def _start_download(self): client = None # Retry a few times, since this service usually comes up before # the RPC service. for elapsed, remaining, wait in retries(15, 5, self.clock): try: client = self.client_service.getClient() break except NoConnectionsAvailable: yield pause(wait, self.clock) else: maaslog.error( "Can't initiate image download, no RPC connection to region.") return # Get sources from region sources = yield self._get_boot_sources(client) # Get http proxy from region proxies = yield client(GetProxies) def get_proxy_url(scheme): url = proxies.get(scheme) # url is a ParsedResult. return None if url is None else url.geturl() yield import_boot_images( sources.get("sources"), get_proxy_url("http"), get_proxy_url("https")) @inlineCallbacks def maybe_start_download(self): """Check the time the last image refresh happened and initiate a new one if older than 15 minutes. """ last_modified = tftppath.maas_meta_last_modified(self.tftp_root) if last_modified is None: yield self._start_download() else: age_in_seconds = self.clock.seconds() - last_modified if age_in_seconds >= timedelta(minutes=15).total_seconds(): yield self._start_download() maas-1.9.5+bzr4599.orig/src/provisioningserver/pserv_services/lease_upload_service.py0000644000000000000000000001014713056115004027213 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Twisted service that periodically uploads DHCP leases to the region.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "convert_leases_to_mappings", "convert_mappings_to_leases", "LeaseUploadService", ] from provisioningserver.dhcp.leases import ( check_lease_changes, record_lease_state, ) from provisioningserver.logger import get_maas_logger from provisioningserver.rpc.exceptions import NoConnectionsAvailable from provisioningserver.rpc.region import UpdateLeases from provisioningserver.utils.twisted import ( pause, retries, ) from twisted.application.internet import TimerService from twisted.internet.defer import inlineCallbacks from twisted.internet.threads import deferToThread from twisted.python import log maaslog = get_maas_logger("lease_upload_service") def convert_mappings_to_leases(mappings): """Convert AMP mappings to record_lease_state() leases. Take mappings, as used by UpdateLeases, and turn into leases as used by record_lease_state(). """ return [ (mapping["ip"], mapping["mac"]) for mapping in mappings ] def convert_leases_to_mappings(leases): """Convert record_lease_state() leases into UpdateLeases mappings. Take the leases list, as returned by record_lease_state(), and turn it into a mappings list suitable for transportation in the UpdateLeases AMP command. """ return [ {"ip": ip, "mac": mac} for ip, mac in leases ] class LeaseUploadService(TimerService, object): """Twisted service to periodically upload DHCP leases to the region. :param client_service: A `ClusterClientService` instance for talking to the region controller. :param reactor: An `IReactor` instance. """ check_interval = 60 # In seconds. def __init__(self, client_service, reactor, cluster_uuid): # Call self.try_upload() every self.check_interval. super(LeaseUploadService, self).__init__( self.check_interval, self.try_upload) self.clock = reactor self.client_service = client_service self.uuid = cluster_uuid maaslog.info("LeaseUploadService starting.") def try_upload(self): """Wrap upload attempts in something that catches Failures. Log the full error to the Twisted log, and a concise error to the maas log. """ def upload_failure(failure): log.err(failure) maaslog.error( "Failed to upload leases: %s", failure.getErrorMessage()) return self._get_client_and_start_upload().addErrback(upload_failure) @inlineCallbacks def _get_client_and_start_upload(self): # Retry a few times, since this service usually comes up before # the RPC service. for elapsed, remaining, wait in retries(15, 5, self.clock): try: client = self.client_service.getClient() break except NoConnectionsAvailable: yield pause(wait, clock=self.clock) else: maaslog.error( "Failed to connect to region controller, cannot upload leases") return yield self._start_upload(client) @inlineCallbacks def _start_upload(self, client): maaslog.debug("Scanning DHCP leases...") updated_lease_info = yield deferToThread(check_lease_changes) if updated_lease_info is None: maaslog.debug("No leases changed since last scan") else: timestamp, leases = updated_lease_info record_lease_state(timestamp, leases) mappings = convert_leases_to_mappings(leases) maaslog.info( "Uploading %d DHCP leases to region controller.", len(mappings)) yield client( UpdateLeases, uuid=self.uuid, mappings=mappings) maaslog.debug("Lease upload complete.") maas-1.9.5+bzr4599.orig/src/provisioningserver/pserv_services/node_power_monitor_service.py0000644000000000000000000000574713056115004030500 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Service to periodically query the power state on this cluster's nodes.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "NodePowerMonitorService" ] from datetime import timedelta from provisioningserver.logger.log import get_maas_logger from provisioningserver.power.query import query_all_nodes from provisioningserver.rpc import getRegionClient from provisioningserver.rpc.exceptions import ( NoConnectionsAvailable, NoSuchCluster, ) from provisioningserver.rpc.region import ListNodePowerParameters from twisted.application.internet import TimerService from twisted.internet.defer import inlineCallbacks from twisted.python import log maaslog = get_maas_logger("power_monitor_service") class NodePowerMonitorService(TimerService, object): """Service to monitor the power status of all nodes in this cluster.""" check_interval = timedelta(seconds=15).total_seconds() max_nodes_at_once = 5 def __init__(self, cluster_uuid, clock=None): # Call self.query_nodes() every self.check_interval. super(NodePowerMonitorService, self).__init__( self.check_interval, self.try_query_nodes, cluster_uuid) self.clock = clock def try_query_nodes(self, uuid): """Attempt to query nodes' power states. Log errors on failure, but do not propagate them up; that will stop the timed loop from running. """ try: client = getRegionClient() except NoConnectionsAvailable: maaslog.debug( "Cannot monitor nodes' power status; " "region not available.") else: d = self.query_nodes(client, uuid) d.addErrback(self.query_nodes_failed, uuid) return d @inlineCallbacks def query_nodes(self, client, uuid): # Get the nodes' power parameters from the region. Keep getting more # power parameters until the region returns an empty list. while True: response = yield client(ListNodePowerParameters, uuid=uuid) power_parameters = response['nodes'] if len(power_parameters) > 0: yield query_all_nodes( power_parameters, max_concurrency=self.max_nodes_at_once, clock=self.clock) else: break def query_nodes_failed(self, failure, uuid): if failure.check(NoSuchCluster): maaslog.error("Cluster %s is not recognised.", uuid) else: # Log the error in full to the Twisted log. log.err(failure, "Querying node power states.") # Log something concise to the MAAS log. maaslog.error( "Failed to query nodes' power status: %s", failure.getErrorMessage()) maas-1.9.5+bzr4599.orig/src/provisioningserver/pserv_services/service_monitor_service.py0000644000000000000000000000311613056115004027763 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Service to periodically check that all the other services that MAAS depends on stays running.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "ServiceMonitorService" ] from datetime import timedelta from provisioningserver.config import is_dev_environment from provisioningserver.service_monitor import service_monitor from twisted.application.internet import TimerService from twisted.internet.threads import deferToThread from twisted.python import log class ServiceMonitorService(TimerService, object): """Service to monitor external services that the cluster requires.""" check_interval = timedelta(minutes=2).total_seconds() def __init__(self, clock=None): # Call self.monitor_services() every self.check_interval. super(ServiceMonitorService, self).__init__( self.check_interval, self.monitor_services) self.clock = clock def monitor_services(self): """Monitors all of the external services and makes sure they stay running. """ if is_dev_environment(): log.msg( "Skipping check of services; they're not running under " "the supervision of Upstart or systemd.") else: d = deferToThread(service_monitor.ensure_all_services) d.addErrback(log.err, "Failed to monitor services.") return d maas-1.9.5+bzr4599.orig/src/provisioningserver/pserv_services/tests/0000755000000000000000000000000013056115004023623 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/pserv_services/tftp.py0000644000000000000000000003201313056115004024007 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Twisted Application Plugin for the MAAS TFTP server.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "TFTPBackend", "TFTPService", ] from functools import partial import httplib import json from socket import ( AF_INET, AF_INET6, ) from urllib import urlencode from urlparse import ( parse_qsl, urlparse, ) from netaddr import IPAddress from provisioningserver.boot import ( BootMethodRegistry, get_remote_mac, ) from provisioningserver.drivers import ArchitectureRegistry from provisioningserver.events import ( EVENT_TYPES, send_event_node_mac_address, ) from provisioningserver.kernel_opts import KernelParameters from provisioningserver.utils import tftp from provisioningserver.utils.network import get_all_interface_addresses from provisioningserver.utils.twisted import ( deferred, PageFetcher, ) from tftp.backend import FilesystemSynchronousBackend from tftp.errors import ( BackendError, FileNotFound, ) from tftp.protocol import TFTP from twisted.application import internet from twisted.application.service import MultiService from twisted.internet import ( reactor, udp, ) from twisted.internet.abstract import isIPv6Address from twisted.internet.address import ( IPv4Address, IPv6Address, ) from twisted.internet.defer import ( inlineCallbacks, maybeDeferred, returnValue, ) from twisted.internet.task import deferLater from twisted.python import log import twisted.web.error def log_request(mac_address, file_name, clock=reactor): """Log a TFTP request. This will be logged at a later iteration of the `clock` so as to not delay the task currently in progress. """ d = deferLater( clock, 0, send_event_node_mac_address, event_type=EVENT_TYPES.NODE_TFTP_REQUEST, mac_address=mac_address, description=file_name) d.addErrback(log.err, "Logging TFTP request failed.") class TFTPBackend(FilesystemSynchronousBackend): """A partially dynamic read-only TFTP server. Static files such as kernels and initrds, as well as any non-MAAS files that the system may already be set up to serve, are served up normally. But PXE configurations are generated on the fly. When a PXE configuration file is requested, the server asynchronously requests the appropriate parameters from the API (at a configurable "generator URL") and generates a config file based on those. The regular expressions `re_config_file` and `re_mac_address` specify which files the server generates on the fly. Any other requests are passed on to the filesystem. Passing requests on to the API must be done very selectively, because failures cause the boot process to halt. This is why the expression for matching the MAC address is so narrowly defined: PXELINUX attempts to fetch files at many similar paths which must not be passed on. """ def __init__(self, base_path, generator_url, cluster_uuid): """ :param base_path: The root directory for this TFTP server. :param generator_url: The URL which can be queried for the PXE config. See `get_generator_url` for the types of queries it is expected to accept. :param cluster_uuid: The cluster's UUID, as a string. """ super(TFTPBackend, self).__init__( base_path, can_read=True, can_write=False) self.generator_url = urlparse(generator_url) self.cluster_uuid = cluster_uuid self.fetcher = PageFetcher(agent=self.__class__) self.get_page = self.fetcher.get def get_generator_url(self, params): """Calculate the URL, including query, from which we can fetch additional configuration parameters. :param params: A dict, or iterable suitable for updating a dict, of additional query parameters. """ query = {} # Merge parameters from the generator URL. query.update(parse_qsl(self.generator_url.query)) # Merge parameters obtained from the request. query.update(params) # Merge updated query into the generator URL. url = self.generator_url._replace(query=urlencode(query)) # TODO: do something more intelligent with unicode URLs here; see # apiclient.utils.ascii_url() for inspiration. return url.geturl().encode("ascii") @inlineCallbacks def get_boot_method(self, file_name): """Finds the correct boot method.""" for _, method in BootMethodRegistry: params = yield maybeDeferred(method.match_path, self, file_name) if params is not None: params["bios_boot_method"] = method.bios_boot_method returnValue((method, params)) returnValue((None, None)) @deferred def get_kernel_params(self, params): """Return kernel parameters obtained from the API. :param params: Parameters so far obtained, typically from the file path requested. :return: A `KernelParameters` instance. """ url = self.get_generator_url(params) def reassemble(data): return KernelParameters(**data) d = self.get_page(url) d.addCallback(json.loads) d.addCallback(reassemble) return d @deferred def get_boot_method_reader(self, boot_method, params): """Return an `IReader` for a boot method. :param boot_method: Boot method that is generating the config :param params: Parameters so far obtained, typically from the file path requested. """ def generate(kernel_params): return boot_method.get_reader( self, kernel_params=kernel_params, **params) d = self.get_kernel_params(params) d.addCallback(generate) return d @staticmethod def get_page_errback(failure, file_name): failure.trap(twisted.web.error.Error) # This twisted.web.error.Error.status object ends up being a # string for some reason, but the constants we can compare against # (both in httplib and twisted.web.http) are ints. try: status_int = int(failure.value.status) except ValueError: # Assume that it's some other error and propagate it return failure if status_int == httplib.NO_CONTENT: # Convert HTTP No Content to a TFTP file not found raise FileNotFound(file_name) elif status_int == httplib.NOT_FOUND: # Convert HTTP NotFound to a TFTP file not found raise FileNotFound(file_name) else: # Otherwise propogate the unknown error return failure @deferred def handle_boot_method(self, file_name, result): boot_method, params = result if boot_method is None: return super(TFTPBackend, self).get_reader(file_name) # Map pxe namespace architecture names to MAAS's. arch = params.get("arch") if arch is not None: maasarch = ArchitectureRegistry.get_by_pxealias(arch) if maasarch is not None: params["arch"] = maasarch.name.split("/")[0] # Send the local and remote endpoint addresses. local_host, local_port = tftp.get_local_address() params["local"] = local_host remote_host, remote_port = tftp.get_remote_address() params["remote"] = remote_host params["cluster_uuid"] = self.cluster_uuid d = self.get_boot_method_reader(boot_method, params) return d @staticmethod def all_is_lost_errback(failure): if failure.check(BackendError): # This failure is something that the TFTP server knows how to deal # with, so pass it through. return failure else: # Something broke badly; record it. log.err(failure, "Starting TFTP back-end failed.") # Don't keep people waiting; tell them something broke right now. raise BackendError(failure.getErrorMessage()) @deferred def get_reader(self, file_name): """See `IBackend.get_reader()`. If `file_name` matches a boot method then the response is obtained from that boot method. Otherwise the filesystem is used to service the response. """ # It is possible for a client to request the file with '\' instead # of '/', example being 'bootx64.efi'. Convert all '\' to '/' to be # unix compatiable. file_name = file_name.replace('\\', '/') mac_address = get_remote_mac() if mac_address is not None: log_request(mac_address, file_name) d = self.get_boot_method(file_name) d.addCallback(partial(self.handle_boot_method, file_name)) d.addErrback(self.get_page_errback, file_name) d.addErrback(self.all_is_lost_errback) return d class Port(udp.Port): """A :py:class:`udp.Port` that groks IPv6.""" # This must be set by call sites. addressFamily = None def getHost(self): """See :py:meth:`twisted.internet.udp.Port.getHost`.""" host, port = self.socket.getsockname()[:2] addr_type = IPv6Address if isIPv6Address(host) else IPv4Address return addr_type('UDP', host, port) class UDPServer(internet.UDPServer): """A :py:class:`~internet.UDPServer` that groks IPv6. This creates the port directly instead of using the reactor's ``listenUDP`` method so that we can do a switcharoo to our own IPv6-enabled port implementation. """ def _getPort(self): """See :py:meth:`twisted.application.internet.UDPServer._getPort`.""" return self._listenUDP(*self.args, **self.kwargs) def _listenUDP(self, port, protocol, interface='', maxPacketSize=8192): """See :py:meth:`twisted.internet.reactor.listenUDP`.""" p = Port(port, protocol, interface, maxPacketSize) p.addressFamily = AF_INET6 if isIPv6Address(interface) else AF_INET p.startListening() return p class TFTPService(MultiService, object): """An umbrella service representing a set of running TFTP servers. Creates a UDP server individually for each discovered network interface, so that we can detect the interface via which we have received a datagram. It then periodically updates the servers running in case there's a change to the host machine's network configuration. :ivar backend: The :class:`TFTPBackend` being used to service TFTP requests. :ivar port: The port on which each server is started. :ivar refresher: A :class:`TimerService` that calls ``updateServers`` periodically. """ def __init__(self, resource_root, port, generator, uuid): """ :param resource_root: The root directory for this TFTP server. :param port: The port on which each server should be started. :param generator: The URL to be queried for PXE configuration. This will normally point to the `pxeconfig` endpoint on the region-controller API. :param uuid: The cluster's UUID, as a string. """ super(TFTPService, self).__init__() self.backend = TFTPBackend(resource_root, generator, uuid) self.port = port # Establish a periodic call to self.updateServers() every 45 # seconds, so that this service eventually converges on truth. # TimerService ensures that a call is made to it's target # function immediately as it's started, so there's no need to # call updateServers() from here. self.refresher = internet.TimerService(45, self.updateServers) self.refresher.setName("refresher") self.refresher.setServiceParent(self) def getServers(self): """Return a set of all configured servers. :rtype: :class:`set` of :class:`internet.UDPServer` """ return { service for service in self if service is not self.refresher } def updateServers(self): """Run a server on every interface. For each configured network interface this will start a TFTP server. If called later it will bring up servers on newly configured interfaces and bring down servers on deconfigured interfaces. """ addrs_established = set(service.name for service in self.getServers()) addrs_desired = set(get_all_interface_addresses()) for address in addrs_desired - addrs_established: if not IPAddress(address).is_link_local(): tftp_service = UDPServer( self.port, TFTP(self.backend), interface=address) tftp_service.setName(address) tftp_service.setServiceParent(self) for address in addrs_established - addrs_desired: tftp_service = self.getServiceNamed(address) tftp_service.disownServiceParent() maas-1.9.5+bzr4599.orig/src/provisioningserver/pserv_services/tests/__init__.py0000644000000000000000000000000013056115004025722 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/pserv_services/tests/test_dhcp_probe_service.py0000644000000000000000000002064313056115004031066 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for periodic DHCP prober.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maastesting.factory import factory from maastesting.matchers import ( get_mock_calls, HasLength, MockCalledOnceWith, MockNotCalled, ) from maastesting.testcase import MAASTwistedRunTest from mock import ( Mock, sentinel, ) from provisioningserver.pserv_services import dhcp_probe_service from provisioningserver.pserv_services.dhcp_probe_service import ( DHCPProbeService, ) from provisioningserver.rpc import ( getRegionClient, region, ) from provisioningserver.rpc.testing import MockLiveClusterToRegionRPCFixture from provisioningserver.testing.testcase import PservTestCase from twisted.internet import defer from twisted.internet.task import Clock class TestDHCPProbeService(PservTestCase): run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def setUp(self): super(TestDHCPProbeService, self).setUp() self.cluster_uuid = factory.make_UUID() def patch_rpc_methods(self): fixture = self.useFixture(MockLiveClusterToRegionRPCFixture()) protocol, connecting = fixture.makeEventLoop( region.GetClusterInterfaces, region.ReportForeignDHCPServer) return protocol, connecting def make_cluster_interface_values(self, ip=None): """Return a dict describing a cluster interface.""" if ip is None: ip = factory.make_ipv4_address() return { 'name': factory.make_name('interface'), 'interface': factory.make_name('eth'), 'ip': ip, } def test_is_called_every_interval(self): clock = Clock() service = DHCPProbeService( sentinel.service, clock, self.cluster_uuid) # Avoid actually probing probe_dhcp = self.patch(service, 'probe_dhcp') # Until the service has started, periodicprobe_dhcp() won't # be called. self.assertThat(probe_dhcp, MockNotCalled()) # The first call is issued at startup. service.startService() self.assertThat(probe_dhcp, MockCalledOnceWith()) # Wind clock forward one second less than the desired interval. clock.advance(service.check_interval - 1) # No more periodic calls made. self.assertEqual(1, len(get_mock_calls(probe_dhcp))) # Wind clock forward one second, past the interval. clock.advance(1) # Now there were two calls. self.assertThat(get_mock_calls(probe_dhcp), HasLength(2)) def test_probe_is_initiated_in_new_thread(self): clock = Clock() interface = self.make_cluster_interface_values() rpc_service = Mock() rpc_client = rpc_service.getClient.return_value rpc_client.side_effect = [ defer.succeed(dict(interfaces=[interface])), ] # We could patch out 'periodic_probe_task' instead here but this # is better because: # 1. The former requires spinning the reactor again before being # able to test the result. # 2. This way there's no thread to clean up after the test. deferToThread = self.patch(dhcp_probe_service, 'deferToThread') deferToThread.return_value = defer.succeed(None) service = DHCPProbeService( rpc_service, clock, self.cluster_uuid) service.startService() self.assertThat( deferToThread, MockCalledOnceWith( dhcp_probe_service.probe_interface, interface['interface'], interface['ip'])) @defer.inlineCallbacks def test_exits_gracefully_if_cant_get_interfaces(self): clock = Clock() maaslog = self.patch(dhcp_probe_service, 'maaslog') protocol, connecting = self.patch_rpc_methods() self.addCleanup((yield connecting)) del protocol._commandDispatch[ region.GetClusterInterfaces.commandName] rpc_service = Mock() rpc_service.getClient.return_value = getRegionClient() service = DHCPProbeService( rpc_service, clock, self.cluster_uuid) yield service.startService() yield service.stopService() self.assertThat( maaslog.error, MockCalledOnceWith( "Unable to query region for interfaces: Region does not " "support the GetClusterInterfaces RPC method.")) @defer.inlineCallbacks def test_exits_gracefully_if_cant_report_foreign_dhcp_server(self): clock = Clock() maaslog = self.patch(dhcp_probe_service, 'maaslog') deferToThread = self.patch( dhcp_probe_service, 'deferToThread') deferToThread.return_value = defer.succeed(['192.168.0.100']) protocol, connecting = self.patch_rpc_methods() self.addCleanup((yield connecting)) del protocol._commandDispatch[ region.ReportForeignDHCPServer.commandName] protocol.GetClusterInterfaces.return_value = { 'interfaces': [ self.make_cluster_interface_values(ip='192.168.0.1'), ], } rpc_service = Mock() rpc_service.getClient.return_value = getRegionClient() service = DHCPProbeService( rpc_service, clock, self.cluster_uuid) yield service.startService() yield service.stopService() self.assertThat( maaslog.error, MockCalledOnceWith( "Unable to inform region of rogue DHCP server: the region " "does not yet support the ReportForeignDHCPServer RPC " "method.")) def test_logs_errors(self): clock = Clock() maaslog = self.patch(dhcp_probe_service, 'maaslog') service = DHCPProbeService( sentinel.service, clock, self.cluster_uuid) error_message = factory.make_string() self.patch(service, 'probe_dhcp').side_effect = Exception( error_message) service.startService() self.assertThat( maaslog.error, MockCalledOnceWith( "Unable to probe for rogue DHCP servers: %s", error_message)) @defer.inlineCallbacks def test_reports_foreign_dhcp_servers_to_region(self): clock = Clock() protocol, connecting = self.patch_rpc_methods() self.addCleanup((yield connecting)) deferToThread = self.patch( dhcp_probe_service, 'deferToThread') foreign_dhcp_ip = factory.make_ipv4_address() deferToThread.return_value = defer.succeed( [foreign_dhcp_ip]) interface = self.make_cluster_interface_values() protocol.GetClusterInterfaces.return_value = { 'interfaces': [interface], } rpc_service = Mock() rpc_service.getClient.return_value = getRegionClient() service = DHCPProbeService( rpc_service, clock, self.cluster_uuid) yield service.startService() yield service.stopService() self.assertThat( protocol.ReportForeignDHCPServer, MockCalledOnceWith( protocol, cluster_uuid=self.cluster_uuid, interface_name=interface['name'], foreign_dhcp_ip=foreign_dhcp_ip)) @defer.inlineCallbacks def test_reports_lack_of_foreign_dhcp_servers_to_region(self): clock = Clock() protocol, connecting = self.patch_rpc_methods() self.addCleanup((yield connecting)) deferToThread = self.patch( dhcp_probe_service, 'deferToThread') deferToThread.return_value = defer.succeed([]) interface = self.make_cluster_interface_values() protocol.GetClusterInterfaces.return_value = { 'interfaces': [interface], } rpc_service = Mock() rpc_service.getClient.return_value = getRegionClient() service = DHCPProbeService( rpc_service, clock, self.cluster_uuid) yield service.startService() yield service.stopService() self.assertThat( protocol.ReportForeignDHCPServer, MockCalledOnceWith( protocol, cluster_uuid=self.cluster_uuid, interface_name=interface['name'], foreign_dhcp_ip=None)) maas-1.9.5+bzr4599.orig/src/provisioningserver/pserv_services/tests/test_image_download_service.py0000644000000000000000000002433013056115004031727 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for provisioningserver.pserv_services.image_download_service""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from datetime import timedelta from urlparse import urlparse from fixtures import FakeLogger from maastesting.factory import factory from maastesting.matchers import ( get_mock_calls, MockCalledOnceWith, MockCallsMatch, MockNotCalled, ) from maastesting.testcase import MAASTwistedRunTest from maastesting.twisted import TwistedLoggerFixture from mock import ( call, Mock, sentinel, ) from provisioningserver.boot import tftppath from provisioningserver.pserv_services.image_download_service import ( ImageDownloadService, ) from provisioningserver.rpc import boot_images from provisioningserver.rpc.boot_images import _run_import from provisioningserver.rpc.exceptions import NoConnectionsAvailable from provisioningserver.rpc.region import ( GetBootSources, GetBootSourcesV2, ) from provisioningserver.testing.testcase import PservTestCase from testtools.deferredruntest import extract_result from twisted.application.internet import TimerService from twisted.internet import defer from twisted.internet.task import Clock from twisted.spread.pb import NoSuchMethod class TestPeriodicImageDownloadService(PservTestCase): run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def test_init(self): service = ImageDownloadService( sentinel.service, sentinel.uuid, sentinel.tftp_root, sentinel.clock) self.assertIsInstance(service, TimerService) self.assertIs(service.clock, sentinel.clock) self.assertIs(service.uuid, sentinel.uuid) self.assertIs(service.client_service, sentinel.service) self.assertIs(service.tftp_root, sentinel.tftp_root) def patch_download(self, service, return_value): patched = self.patch(service, '_start_download') patched.return_value = defer.succeed(return_value) return patched def test_is_called_every_interval(self): clock = Clock() service = ImageDownloadService( sentinel.service, sentinel.uuid, sentinel.tftp_root, clock) # Avoid actual downloads: self.patch_download(service, None) maas_meta_last_modified = self.patch( tftppath, 'maas_meta_last_modified') maas_meta_last_modified.return_value = None service.startService() # The first call is issued at startup. self.assertEqual(1, len(get_mock_calls(maas_meta_last_modified))) # Wind clock forward one second less than the desired interval. clock.advance(service.check_interval - 1) # No more periodic calls made. self.assertEqual(1, len(get_mock_calls(maas_meta_last_modified))) # Wind clock forward one second, past the interval. clock.advance(1) # Now there were two calls. self.assertEqual(2, len(get_mock_calls(maas_meta_last_modified))) # Forward another interval, should be three calls. clock.advance(service.check_interval) self.assertEqual(3, len(get_mock_calls(maas_meta_last_modified))) def test_initiates_download_if_no_meta_file(self): clock = Clock() service = ImageDownloadService( sentinel.service, sentinel.uuid, sentinel.tftp_root, clock) _start_download = self.patch_download(service, None) self.patch( tftppath, 'maas_meta_last_modified').return_value = None service.startService() self.assertThat(_start_download, MockCalledOnceWith()) def test_initiates_download_if_15_minutes_has_passed(self): clock = Clock() service = ImageDownloadService( sentinel.service, sentinel.uuid, sentinel.tftp_root, clock) _start_download = self.patch_download(service, None) one_week_ago = clock.seconds() - timedelta(minutes=15).total_seconds() self.patch( tftppath, 'maas_meta_last_modified').return_value = one_week_ago service.startService() self.assertThat(_start_download, MockCalledOnceWith()) def test_no_download_if_15_minutes_has_not_passed(self): clock = Clock() service = ImageDownloadService( sentinel.service, sentinel.uuid, sentinel.tftp_root, clock) _start_download = self.patch_download(service, None) one_week = timedelta(minutes=15).total_seconds() self.patch( tftppath, 'maas_meta_last_modified').return_value = clock.seconds() clock.advance(one_week - 1) service.startService() self.assertThat(_start_download, MockNotCalled()) def test_download_is_initiated_in_new_thread(self): clock = Clock() maas_meta_last_modified = self.patch( tftppath, 'maas_meta_last_modified') one_week = timedelta(minutes=15).total_seconds() maas_meta_last_modified.return_value = clock.seconds() - one_week http_proxy = factory.make_simple_http_url() https_proxy = factory.make_simple_http_url() rpc_client = Mock() client_call = Mock() client_call.side_effect = [ defer.succeed(dict(sources=sentinel.sources)), defer.succeed(dict( http=urlparse(http_proxy), https=urlparse(https_proxy))), ] rpc_client.getClient.return_value = client_call # We could patch out 'import_boot_images' instead here but I # don't do that for 2 reasons: # 1. It requires spinning the reactor again before being able to # test the result. # 2. It means there's no thread to clean up after the test. deferToThread = self.patch(boot_images, 'deferToThread') deferToThread.return_value = defer.succeed(None) service = ImageDownloadService( rpc_client, sentinel.uuid, sentinel.tftp_root, clock) service.startService() self.assertThat( deferToThread, MockCalledOnceWith( _run_import, sentinel.sources, http_proxy=http_proxy, https_proxy=https_proxy)) def test_no_download_if_no_rpc_connections(self): rpc_client = Mock() failure = NoConnectionsAvailable() rpc_client.getClient.side_effect = failure deferToThread = self.patch(boot_images, 'deferToThread') service = ImageDownloadService( rpc_client, sentinel.uuid, self.make_dir(), Clock()) service.startService() self.assertThat(deferToThread, MockNotCalled()) def test_logs_other_errors(self): service = ImageDownloadService( sentinel.rpc, sentinel.uuid, sentinel.tftp_root, Clock()) maybe_start_download = self.patch(service, "maybe_start_download") maybe_start_download.return_value = defer.fail( ZeroDivisionError("Such a shame I can't divide by zero")) with FakeLogger("maas") as maaslog, TwistedLoggerFixture() as logger: d = service.try_download() self.assertEqual(None, extract_result(d)) self.assertDocTestMatches( "Failed to download images: " "Such a shame I can't divide by zero", maaslog.output) self.assertDocTestMatches( """\ Downloading images failed. Traceback (most recent call last): Failure: exceptions.ZeroDivisionError: Such a shame ... """, logger.output) class TestGetBootSources(PservTestCase): run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) @defer.inlineCallbacks def test__get_boot_sources_calls_get_boot_sources_v2_before_v1(self): clock = Clock() client_call = Mock() client_call.side_effect = [ defer.succeed(dict(sources=sentinel.sources)), ] service = ImageDownloadService( sentinel.rpc, sentinel.uuid, sentinel.tftp_root, clock) sources = yield service._get_boot_sources(client_call) self.assertEqual(sources.get('sources'), sentinel.sources) self.assertThat( client_call, MockCalledOnceWith(GetBootSourcesV2, uuid=sentinel.uuid)) @defer.inlineCallbacks def test__get_boot_sources_calls_get_boot_sources_v1_on_v2_missing(self): clock = Clock() client_call = Mock() client_call.side_effect = [ defer.fail(NoSuchMethod()), defer.succeed(dict(sources=[])), ] service = ImageDownloadService( sentinel.rpc, sentinel.uuid, sentinel.tftp_root, clock) yield service._get_boot_sources(client_call) self.assertThat( client_call, MockCallsMatch( call(GetBootSourcesV2, uuid=sentinel.uuid), call(GetBootSources, uuid=sentinel.uuid))) @defer.inlineCallbacks def test__get_boot_sources_v1_sets_os_to_wildcard(self): sources = [ { 'path': factory.make_url(), 'selections': [ { 'release': "trusty", 'arches': ["amd64"], 'subarches': ["generic"], 'labels': ["release"], }, { 'release': "precise", 'arches': ["amd64"], 'subarches': ["generic"], 'labels': ["release"], }, ], }, ] clock = Clock() client_call = Mock() client_call.side_effect = [ defer.fail(NoSuchMethod()), defer.succeed(dict(sources=sources)), ] service = ImageDownloadService( sentinel.rpc, sentinel.uuid, sentinel.tftp_root, clock) sources = yield service._get_boot_sources(client_call) os_selections = [ selection.get('os') for source in sources['sources'] for selection in source['selections'] ] self.assertEqual(['*', '*'], os_selections) maas-1.9.5+bzr4599.orig/src/provisioningserver/pserv_services/tests/test_lease_upload_service.py0000644000000000000000000001603313056115004031414 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for src/provisioningserver/pserv_services/lease_upload_service.py""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from datetime import datetime from fixtures import FakeLogger from maastesting.factory import factory from maastesting.matchers import ( MockCalledOnceWith, MockCallsMatch, MockNotCalled, ) from maastesting.twisted import TwistedLoggerFixture from mock import ( ANY, call, Mock, sentinel, ) from provisioningserver import services from provisioningserver.dhcp.leases import check_lease_changes from provisioningserver.pserv_services import lease_upload_service from provisioningserver.pserv_services.lease_upload_service import ( convert_leases_to_mappings, convert_mappings_to_leases, LeaseUploadService, ) from provisioningserver.rpc.exceptions import NoConnectionsAvailable from provisioningserver.rpc.region import UpdateLeases from provisioningserver.rpc.testing import MockClusterToRegionRPCFixture from provisioningserver.testing.testcase import PservTestCase from testtools.deferredruntest import extract_result from twisted.application.internet import TimerService from twisted.internet import defer from twisted.internet.task import Clock def make_random_lease(): ip = factory.make_ipv4_address() mac = factory.make_mac_address() return (ip, mac) def make_random_mapping(): ip = factory.make_ipv4_address() mac = factory.make_mac_address() mapping = {"ip": ip, "mac": mac} return mapping class TestHelperFunctions(PservTestCase): def test_convert_leases_to_mappings_maps_correctly(self): mappings = list() for _ in xrange(3): mappings.append(make_random_mapping()) # Convert to leases. leases = convert_mappings_to_leases(mappings) # Convert back and test against our original mappings. observed = convert_leases_to_mappings(leases) self.assertItemsEqual(mappings, observed) def test_convert_leases_to_mappings_converts_correctly(self): leases = list() for _ in xrange(3): leases.append(make_random_lease()) # Convert to mappings. mappings = convert_leases_to_mappings(leases) # Convert back and test against our original leases. observed = convert_mappings_to_leases(mappings) self.assertEqual(observed, leases) class TestPeriodicImageDownloadService(PservTestCase): def test_init(self): service = LeaseUploadService( sentinel.service, sentinel.clock, sentinel.uuid) self.assertIsInstance(service, TimerService) self.assertIs(service.clock, sentinel.clock) self.assertIs(service.uuid, sentinel.uuid) self.assertIs(service.client_service, sentinel.service) def patch_upload(self, service, return_value=None): patched = self.patch(service, '_get_client_and_start_upload') patched.return_value = defer.succeed(return_value) return patched def test_is_called_every_interval(self): clock = Clock() service = LeaseUploadService( sentinel.service, clock, sentinel.uuid) # Avoid actual uploads: start_upload = self.patch_upload(service) # There are no calls before the service is started. self.assertThat(start_upload, MockNotCalled()) service.startService() # The first call is issued at startup. self.assertThat(start_upload, MockCalledOnceWith()) # Wind clock forward one second less than the desired interval. clock.advance(service.check_interval - 1) # No more periodic calls made. self.assertThat(start_upload, MockCalledOnceWith()) # Wind clock forward one second, past the interval. clock.advance(1) # Now there were two calls. self.assertThat(start_upload, MockCallsMatch(call(), call())) # Forward another interval, should be three calls. clock.advance(service.check_interval) self.assertThat( start_upload, MockCallsMatch(call(), call(), call())) def test_no_upload_if_no_rpc_connections(self): rpc_client = Mock() rpc_client.getClient.side_effect = NoConnectionsAvailable() clock = Clock() service = LeaseUploadService( rpc_client, clock, sentinel.uuid) start_upload = self.patch(service, '_start_upload') service.startService() # Wind clock past all the retries. You can't do this in one big # lump, it seems. The test looks like it passes, but the # maybe_start_upload() method never returns properly. clock.pump((5, 5, 5)) self.assertThat(start_upload, MockNotCalled()) def test_upload_is_initiated(self): # We're pretending to be the reactor in this thread. To ensure correct # operation from things like the @asynchronous decorators we need to # register as the IO thread. self.register_as_io_thread() # Create a fixture for the region side of the RPC. rpc_fixture = self.useFixture(MockClusterToRegionRPCFixture()) rpc_service = services.getServiceNamed('rpc') server, io = rpc_fixture.makeEventLoop(UpdateLeases) server.UpdateLeases.return_value = defer.succeed({}) # Create a mock response to "check_lease_changes()" fake_lease = [make_random_lease()] deferToThread = self.patch(lease_upload_service, 'deferToThread') deferToThread.return_value = defer.succeed( (datetime.now(), fake_lease),) mappings = convert_leases_to_mappings(fake_lease) # Start the service. uuid = factory.make_UUID() service = LeaseUploadService(rpc_service, Clock(), uuid) service.startService() # Gavin says that I need to pump my IO. I don't know what this # means but it sounds important! io.pump() # Ensure it called out to a new thread to get and parse the leases. self.assertThat(deferToThread, MockCalledOnceWith(check_lease_changes)) # Ensure it sent them to the region using RPC. self.assertThat( server.UpdateLeases, MockCalledOnceWith(ANY, uuid=uuid, mappings=mappings)) def test_logs_other_errors(self): service = LeaseUploadService( sentinel.rpc, Clock(), sentinel.uuid) _get_client_and_start_upload = self.patch_autospec( service, "_get_client_and_start_upload") _get_client_and_start_upload.return_value = defer.fail( ZeroDivisionError("Such a shame I can't divide by zero")) with FakeLogger("maas") as maaslog, TwistedLoggerFixture(): d = service.try_upload() self.assertEqual(None, extract_result(d)) self.assertDocTestMatches( "Failed to upload leases: " "Such a shame I can't divide by zero", maaslog.output) ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/pserv_services/tests/test_node_power_monitor_service.pymaas-1.9.5+bzr4599.orig/src/provisioningserver/pserv_services/tests/test_node_power_monitor_service.0000644000000000000000000001173613056115004032323 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for :py:module:`~provisioningserver.pserv_services.node_power_monitor_service`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from fixtures import FakeLogger from maastesting.factory import factory from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import ( MAASTestCase, MAASTwistedRunTest, ) from maastesting.twisted import TwistedLoggerFixture from mock import ( ANY, sentinel, ) from provisioningserver.pserv_services import ( node_power_monitor_service as npms, ) from provisioningserver.rpc import ( exceptions, getRegionClient, region, ) from provisioningserver.rpc.testing import MockClusterToRegionRPCFixture from testtools.deferredruntest import extract_result from testtools.matchers import MatchesStructure from twisted.internet.defer import ( fail, succeed, ) from twisted.internet.task import Clock class TestNodePowerMonitorService(MAASTestCase): run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def test_init_sets_up_timer_correctly(self): cluster_uuid = factory.make_UUID() service = npms.NodePowerMonitorService(cluster_uuid) self.assertThat(service, MatchesStructure.byEquality( call=(service.try_query_nodes, (cluster_uuid,), {}), step=15, clock=None)) def make_monitor_service(self): cluster_uuid = factory.make_UUID() service = npms.NodePowerMonitorService(cluster_uuid, Clock()) return cluster_uuid, service def test_query_nodes_calls_the_region(self): cluster_uuid, service = self.make_monitor_service() rpc_fixture = self.useFixture(MockClusterToRegionRPCFixture()) proto_region, io = rpc_fixture.makeEventLoop( region.ListNodePowerParameters) proto_region.ListNodePowerParameters.return_value = succeed( {"nodes": []}) d = service.query_nodes(getRegionClient(), cluster_uuid) io.flush() self.assertEqual(None, extract_result(d)) self.assertThat( proto_region.ListNodePowerParameters, MockCalledOnceWith(ANY, uuid=cluster_uuid)) def test_query_nodes_calls_query_all_nodes(self): cluster_uuid, service = self.make_monitor_service() service.max_nodes_at_once = sentinel.max_nodes_at_once example_power_parameters = { "system_id": factory.make_UUID(), "hostname": factory.make_hostname(), "power_state": factory.make_name("power_state"), "power_type": factory.make_name("power_type"), "context": {}, } rpc_fixture = self.useFixture(MockClusterToRegionRPCFixture()) proto_region, io = rpc_fixture.makeEventLoop( region.ListNodePowerParameters) proto_region.ListNodePowerParameters.side_effect = [ succeed({"nodes": [example_power_parameters]}), succeed({"nodes": []}), ] query_all_nodes = self.patch(npms, "query_all_nodes") d = service.query_nodes(getRegionClient(), cluster_uuid) io.flush() self.assertEqual(None, extract_result(d)) self.assertThat( query_all_nodes, MockCalledOnceWith( [example_power_parameters], max_concurrency=sentinel.max_nodes_at_once, clock=service.clock)) def test_query_nodes_copes_with_NoSuchCluster(self): cluster_uuid, service = self.make_monitor_service() rpc_fixture = self.useFixture(MockClusterToRegionRPCFixture()) proto_region, io = rpc_fixture.makeEventLoop( region.ListNodePowerParameters) proto_region.ListNodePowerParameters.return_value = fail( exceptions.NoSuchCluster.from_uuid(cluster_uuid)) d = service.query_nodes(getRegionClient(), cluster_uuid) d.addErrback(service.query_nodes_failed, cluster_uuid) with FakeLogger("maas") as maaslog: io.flush() self.assertEqual(None, extract_result(d)) self.assertDocTestMatches( "Cluster ... is not recognised.", maaslog.output) def test_try_query_nodes_logs_other_errors(self): cluster_uuid, service = self.make_monitor_service() self.patch(npms, "getRegionClient").return_value = sentinel.client query_nodes = self.patch(service, "query_nodes") query_nodes.return_value = fail( ZeroDivisionError("Such a shame I can't divide by zero")) with FakeLogger("maas") as maaslog, TwistedLoggerFixture(): d = service.try_query_nodes(cluster_uuid) self.assertEqual(None, extract_result(d)) self.assertDocTestMatches( "Failed to query nodes' power status: " "Such a shame I can't divide by zero", maaslog.output) maas-1.9.5+bzr4599.orig/src/provisioningserver/pserv_services/tests/test_service_monitor_service.py0000644000000000000000000000451613056115004032171 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for :py:module:`~provisioningserver.pserv_services.service_monitor_service`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maastesting.matchers import ( MockCalledOnceWith, MockNotCalled, ) from maastesting.testcase import ( MAASTestCase, MAASTwistedRunTest, ) from maastesting.twisted import TwistedLoggerFixture from provisioningserver.pserv_services import service_monitor_service as sms from provisioningserver.service_monitor import service_monitor from testtools.matchers import MatchesStructure from twisted.internet.task import Clock class TestServiceMonitorService(MAASTestCase): run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def test_init_sets_up_timer_correctly(self): service = sms.ServiceMonitorService() self.assertThat(service, MatchesStructure.byEquality( call=(service.monitor_services, (), {}), step=(2 * 60), clock=None)) def make_monitor_service(self): service = sms.ServiceMonitorService(Clock()) return service def test_monitor_services_does_not_do_anything_in_dev_environment(self): # Belt-n-braces make sure we're in a development environment. self.assertTrue(sms.is_dev_environment()) service = self.make_monitor_service() mock_deferToThread = self.patch(sms, "deferToThread") with TwistedLoggerFixture() as logger: service.monitor_services() self.assertThat(mock_deferToThread, MockNotCalled()) self.assertDocTestMatches( "Skipping check of services; they're not running under the " "supervision of Upstart or systemd.", logger.output) def test_monitor_services_defers_ensure_all_services_to_thread(self): # Pretend we're in a production environment. self.patch(sms, "is_dev_environment").return_value = False service = self.make_monitor_service() mock_deferToThread = self.patch(sms, "deferToThread") service.monitor_services() self.assertThat( mock_deferToThread, MockCalledOnceWith(service_monitor.ensure_all_services)) maas-1.9.5+bzr4599.orig/src/provisioningserver/pserv_services/tests/test_tftp.py0000644000000000000000000005521513056115004026221 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the maastftp Twisted plugin.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from functools import partial import httplib import json import os import random import re from socket import ( AF_INET, AF_INET6, ) from urllib import urlencode from urlparse import ( parse_qsl, urlparse, ) from maastesting.factory import factory from maastesting.matchers import ( MockCalledOnceWith, MockNotCalled, ) from maastesting.testcase import ( MAASTestCase, MAASTwistedRunTest, ) from maastesting.twisted import TwistedLoggerFixture import mock from mock import ( sentinel, ANY, ) from netaddr import IPNetwork from netaddr.ip import ( IPV4_LINK_LOCAL, IPV6_LINK_LOCAL, ) from provisioningserver.boot import BytesReader from provisioningserver.boot.pxe import PXEBootMethod from provisioningserver.boot.tests.test_pxe import compose_config_path from provisioningserver.config import ClusterConfiguration from provisioningserver.events import EVENT_TYPES from provisioningserver.pserv_services import tftp as tftp_module from provisioningserver.pserv_services.tftp import ( log_request, Port, TFTPBackend, TFTPService, UDPServer, ) from provisioningserver.testing.config import ClusterConfigurationFixture from provisioningserver.tests.test_kernel_opts import make_kernel_parameters from testtools import ExpectedException from testtools.matchers import ( AfterPreprocessing, AllMatch, Equals, HasLength, Is, IsInstance, MatchesAll, MatchesStructure, ) from tftp.backend import IReader from tftp.errors import ( BackendError, FileNotFound, ) from tftp.protocol import TFTP from twisted.application import internet from twisted.application.service import MultiService from twisted.internet import reactor from twisted.internet.address import ( IPv4Address, IPv6Address, ) from twisted.internet.defer import ( inlineCallbacks, succeed, ) from twisted.internet.protocol import Protocol from twisted.internet.task import Clock from twisted.python import context import twisted.web.error from zope.interface.verify import verifyObject class TestBytesReader(MAASTestCase): """Tests for `provisioningserver.tftp.BytesReader`.""" def test_interfaces(self): reader = BytesReader(b"") self.addCleanup(reader.finish) verifyObject(IReader, reader) def test_read(self): data = factory.make_string(size=10).encode("ascii") reader = BytesReader(data) self.addCleanup(reader.finish) self.assertEqual(data[:7], reader.read(7)) self.assertEqual(data[7:], reader.read(7)) self.assertEqual(b"", reader.read(7)) def test_finish(self): reader = BytesReader(b"1234") reader.finish() self.assertRaises(ValueError, reader.read, 1) class TestTFTPBackend(MAASTestCase): """Tests for `provisioningserver.tftp.TFTPBackend`.""" run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def setUp(self): super(TestTFTPBackend, self).setUp() self.useFixture(ClusterConfigurationFixture()) from provisioningserver import boot self.patch(boot, "find_mac_via_arp") self.patch(tftp_module, 'log_request') def test_init(self): temp_dir = self.make_dir() generator_url = "http://%s.example.com/%s" % ( factory.make_name("domain"), factory.make_name("path")) backend = TFTPBackend(temp_dir, generator_url, sentinel.uuid) self.assertEqual((True, False), (backend.can_read, backend.can_write)) self.assertEqual(temp_dir, backend.base.path) self.assertEqual(generator_url, backend.generator_url.geturl()) self.assertIs(backend.cluster_uuid, sentinel.uuid) def test_get_generator_url(self): # get_generator_url() merges the parameters obtained from the request # file path (arch, subarch, name) into the configured generator URL. mac = factory.make_mac_address("-") dummy = factory.make_name("dummy").encode("ascii") backend_url = b"http://example.com/?" + urlencode({b"dummy": dummy}) backend = TFTPBackend(self.make_dir(), backend_url, sentinel.uuid) # params is an example of the parameters obtained from a request. params = {"mac": mac} generator_url = urlparse(backend.get_generator_url(params)) self.assertEqual("example.com", generator_url.hostname) query = parse_qsl(generator_url.query) query_expected = [ ("dummy", dummy), ("mac", mac), ] self.assertItemsEqual(query_expected, query) def get_reader(self, data): temp_file = self.make_file(name="example", contents=data) temp_dir = os.path.dirname(temp_file) backend = TFTPBackend( temp_dir, "http://nowhere.example.com/", sentinel.uuid) return backend.get_reader("example") @inlineCallbacks def test_get_reader_regular_file(self): # TFTPBackend.get_reader() returns a regular FilesystemReader for # paths not matching re_config_file. self.patch(tftp_module, 'get_remote_mac') data = factory.make_string().encode("ascii") reader = yield self.get_reader(data) self.addCleanup(reader.finish) self.assertEqual(len(data), reader.size) self.assertEqual(data, reader.read(len(data))) self.assertEqual(b"", reader.read(1)) @inlineCallbacks def test_get_reader_handles_backslashes_in_path(self): self.patch(tftp_module, 'get_remote_mac') data = factory.make_string().encode("ascii") temp_dir = self.make_dir() subdir = factory.make_name('subdir') filename = factory.make_name('file') os.mkdir(os.path.join(temp_dir, subdir)) factory.make_file(os.path.join(temp_dir, subdir), filename, data) path = '\\%s\\%s' % (subdir, filename) backend = TFTPBackend( temp_dir, "http://nowhere.example.com/", sentinel.uuid) reader = yield backend.get_reader(path) self.addCleanup(reader.finish) self.assertEqual(len(data), reader.size) self.assertEqual(data, reader.read(len(data))) self.assertEqual(b"", reader.read(1)) @inlineCallbacks def test_get_reader_logs_node_event_with_mac_address(self): mac_address = factory.make_mac_address() self.patch(tftp_module, 'get_remote_mac').return_value = mac_address data = factory.make_string().encode("ascii") reader = yield self.get_reader(data) self.addCleanup(reader.finish) self.assertThat( tftp_module.log_request, MockCalledOnceWith(mac_address, ANY)) @inlineCallbacks def test_get_reader_does_not_log_when_mac_cannot_be_found(self): self.patch(tftp_module, 'get_remote_mac').return_value = None data = factory.make_string().encode("ascii") reader = yield self.get_reader(data) self.addCleanup(reader.finish) self.assertThat( tftp_module.log_request, MockNotCalled()) @inlineCallbacks def test_get_reader_converts_404s_to_tftp_error(self): with ClusterConfiguration.open_for_update() as config: config.cluster_uuid = factory.make_UUID() backend = TFTPBackend( self.make_dir(), "http://example.com/", sentinel.uuid) get_page = self.patch(backend, 'get_page') get_page.side_effect = twisted.web.error.Error(httplib.NOT_FOUND) with ExpectedException(FileNotFound): yield backend.get_reader('pxelinux.cfg/default') @inlineCallbacks def test_get_reader_converts_other_exceptions_to_tftp_error(self): with ClusterConfiguration.open_for_update() as config: config.cluster_uuid = factory.make_UUID() exception_type = factory.make_exception_type() exception_message = factory.make_string() backend = TFTPBackend( self.make_dir(), "http://example.com/", sentinel.uuid) get_page = self.patch(backend, 'get_page') get_page.side_effect = exception_type(exception_message) with TwistedLoggerFixture() as logger: with ExpectedException(BackendError, re.escape(exception_message)): yield backend.get_reader('pxelinux.cfg/default') # The original exception is logged. self.assertDocTestMatches( """\ Starting TFTP back-end failed. Traceback (most recent call last): ... maastesting.factory.TestException#... """, logger.output) @inlineCallbacks def _test_get_render_file(self, local, remote): # For paths matching PXEBootMethod.match_path, TFTPBackend.get_reader() # returns a Deferred that will yield a BytesReader. cluster_uuid = factory.make_UUID() with ClusterConfiguration.open_for_update() as config: config.cluster_uuid = cluster_uuid mac = factory.make_mac_address("-") config_path = compose_config_path(mac) backend = TFTPBackend( self.make_dir(), b"http://example.com/", cluster_uuid) # python-tx-tftp sets up call context so that backends can discover # more about the environment in which they're running. call_context = {"local": local, "remote": remote} @partial(self.patch, backend, "get_boot_method_reader") def get_boot_method_reader(boot_method, params): params_json = json.dumps(params) params_json_reader = BytesReader(params_json) return succeed(params_json_reader) reader = yield context.call( call_context, backend.get_reader, config_path) output = reader.read(10000) # The addresses provided by python-tx-tftp in the call context are # passed over the wire as address:port strings. expected_params = { "mac": mac, "local": call_context["local"][0], # address only. "remote": call_context["remote"][0], # address only. "cluster_uuid": cluster_uuid, "bios_boot_method": "pxe", } observed_params = json.loads(output) self.assertEqual(expected_params, observed_params) def test_get_render_file_with_ipv4_hosts(self): return self._test_get_render_file( local=( factory.make_ipv4_address(), factory.pick_port()), remote=( factory.make_ipv4_address(), factory.pick_port()), ) def test_get_render_file_with_ipv6_hosts(self): # Some versions of Twisted have the scope and flow info in the remote # address tuple. See https://twistedmatrix.com/trac/ticket/6826 (the # address is captured by tftp.protocol.TFTP.dataReceived). return self._test_get_render_file( local=( factory.make_ipv6_address(), factory.pick_port(), random.randint(1, 1000), random.randint(1, 1000)), remote=( factory.make_ipv6_address(), factory.pick_port(), random.randint(1, 1000), random.randint(1, 1000)), ) @inlineCallbacks def test_get_boot_method_reader_returns_rendered_params(self): # get_boot_method_reader() takes a dict() of parameters and returns an # `IReader` of a PXE configuration, rendered by # `PXEBootMethod.get_reader`. backend = TFTPBackend( self.make_dir(), b"http://example.com/", sentinel.uuid) # Fake configuration parameters, as discovered from the file path. fake_params = {"mac": factory.make_mac_address("-")} # Fake kernel configuration parameters, as returned from the API call. fake_kernel_params = make_kernel_parameters() # Stub get_page to return the fake API configuration parameters. fake_get_page_result = json.dumps(fake_kernel_params._asdict()) get_page_patch = self.patch(backend, "get_page") get_page_patch.return_value = succeed(fake_get_page_result) # Stub get_reader to return the render parameters. method = PXEBootMethod() fake_render_result = factory.make_name("render").encode("utf-8") render_patch = self.patch(method, "get_reader") render_patch.return_value = BytesReader(fake_render_result) # Get the rendered configuration, which will actually be a JSON dump # of the render-time parameters. reader = yield backend.get_boot_method_reader(method, fake_params) self.addCleanup(reader.finish) self.assertIsInstance(reader, BytesReader) output = reader.read(10000) # The kernel parameters were fetched using `backend.get_page`. self.assertThat(backend.get_page, MockCalledOnceWith(mock.ANY)) # The result has been rendered by `method.get_reader`. self.assertEqual(fake_render_result.encode("utf-8"), output) self.assertThat(method.get_reader, MockCalledOnceWith( backend, kernel_params=fake_kernel_params, **fake_params)) @inlineCallbacks def test_get_boot_method_render_substitutes_armhf_in_params(self): # get_config_reader() should substitute "arm" for "armhf" in the # arch field of the parameters (mapping from pxe to maas # namespace). cluster_uuid = factory.make_UUID() with ClusterConfiguration.open_for_update() as config: config.cluster_uuid = cluster_uuid config_path = "pxelinux.cfg/default-arm" backend = TFTPBackend( self.make_dir(), b"http://example.com/", cluster_uuid) # python-tx-tftp sets up call context so that backends can discover # more about the environment in which they're running. call_context = { "local": ( factory.make_ipv4_address(), factory.pick_port()), "remote": ( factory.make_ipv4_address(), factory.pick_port()), } @partial(self.patch, backend, "get_boot_method_reader") def get_boot_method_reader(boot_method, params): params_json = json.dumps(params) params_json_reader = BytesReader(params_json) return succeed(params_json_reader) reader = yield context.call( call_context, backend.get_reader, config_path) output = reader.read(10000) observed_params = json.loads(output) self.assertEqual("armhf", observed_params["arch"]) class TestTFTPService(MAASTestCase): def test_tftp_service(self): # A TFTP service is configured and added to the top-level service. interfaces = [ factory.make_ipv4_address(), factory.make_ipv6_address(), ] self.patch( tftp_module, "get_all_interface_addresses", lambda: interfaces) example_root = self.make_dir() example_generator = "http://example.com/generator" example_port = factory.pick_port() tftp_service = TFTPService( resource_root=example_root, generator=example_generator, port=example_port, uuid=sentinel.uuid) tftp_service.updateServers() # The "tftp" service is a multi-service containing UDP servers for # each interface defined by get_all_interface_addresses(). self.assertIsInstance(tftp_service, MultiService) # There's also a TimerService that updates the servers every 45s. self.assertThat( tftp_service.refresher, MatchesStructure.byEquality( step=45, parent=tftp_service, name="refresher", call=(tftp_service.updateServers, (), {}), )) expected_backend = MatchesAll( IsInstance(TFTPBackend), AfterPreprocessing( lambda backend: backend.base.path, Equals(example_root)), AfterPreprocessing( lambda backend: backend.generator_url.geturl(), Equals(example_generator)), AfterPreprocessing( lambda backend: backend.cluster_uuid, Is(sentinel.uuid))) expected_protocol = MatchesAll( IsInstance(TFTP), AfterPreprocessing( lambda protocol: protocol.backend, expected_backend)) expected_server = MatchesAll( IsInstance(internet.UDPServer), AfterPreprocessing( lambda service: len(service.args), Equals(2)), AfterPreprocessing( lambda service: service.args[0], # port Equals(example_port)), AfterPreprocessing( lambda service: service.args[1], # protocol expected_protocol)) self.assertThat( tftp_service.getServers(), AllMatch(expected_server)) # Only the interface used for each service differs. self.assertItemsEqual( [svc.kwargs for svc in tftp_service.getServers()], [{"interface": interface} for interface in interfaces]) def test_tftp_service_rebinds_on_HUP(self): # Initial set of interfaces to bind to. interfaces = {"1.1.1.1", "2.2.2.2"} self.patch( tftp_module, "get_all_interface_addresses", lambda: interfaces) tftp_service = TFTPService( resource_root=self.make_dir(), generator="http://mighty/wind", port=factory.pick_port(), uuid=sentinel.uuid) tftp_service.updateServers() # The child services of tftp_services are named after the # interface they bind to. self.assertEqual(interfaces, { server.name for server in tftp_service.getServers() }) # Update the set of interfaces to bind to. interfaces.add("3.3.3.3") interfaces.remove("1.1.1.1") # Ask the TFTP service to update its set of servers. tftp_service.updateServers() # We're in the reactor thread but we want to move the reactor # forwards, hence we need to get all explicit about it. reactor.runUntilCurrent() # The interfaces now bound match the updated interfaces set. self.assertEqual(interfaces, { server.name for server in tftp_service.getServers() }) def test_tftp_service_does_not_bind_to_link_local_addresses(self): # Initial set of interfaces to bind to. ipv4_test_net_3 = IPNetwork("203.0.113.0/24") # RFC 5737 normal_addresses = { factory.pick_ip_in_network(ipv4_test_net_3), factory.make_ipv6_address(), } link_local_addresses = { factory.pick_ip_in_network(IPV4_LINK_LOCAL), factory.pick_ip_in_network(IPV6_LINK_LOCAL), } self.patch( tftp_module, "get_all_interface_addresses", lambda: normal_addresses | link_local_addresses) tftp_service = TFTPService( resource_root=self.make_dir(), generator="http://mighty/wind", port=factory.pick_port(), uuid=sentinel.uuid) tftp_service.updateServers() # Only the "normal" addresses have been used. self.assertEqual(normal_addresses, { server.name for server in tftp_service.getServers() }) class DummyProtocol(Protocol): def doStop(self): pass class TestPort(MAASTestCase): """Tests for :py:class:`Port`.""" run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def test_getHost_works_with_IPv4_address(self): port = Port(0, DummyProtocol(), "127.0.0.1") port.addressFamily = AF_INET port.startListening() self.addCleanup(port.stopListening) self.assertEqual( IPv4Address('UDP', '127.0.0.1', port._realPortNumber), port.getHost()) def test_getHost_works_with_IPv6_address(self): port = Port(0, DummyProtocol(), "::1") port.addressFamily = AF_INET6 port.startListening() self.addCleanup(port.stopListening) self.assertEqual( IPv6Address('UDP', '::1', port._realPortNumber), port.getHost()) class TestUDPServer(MAASTestCase): run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def test__getPort_calls__listenUDP_with_args_from_constructor(self): server = UDPServer(sentinel.foo, bar=sentinel.bar) _listenUDP = self.patch(server, "_listenUDP") _listenUDP.return_value = sentinel.port self.assertEqual(sentinel.port, server._getPort()) self.assertThat(_listenUDP, MockCalledOnceWith( sentinel.foo, bar=sentinel.bar)) def test__listenUDP_with_IPv4_address(self): server = UDPServer(0, DummyProtocol(), "127.0.0.1") port = server._getPort() self.addCleanup(port.stopListening) self.assertEqual(AF_INET, port.addressFamily) def test__listenUDP_with_IPv6_address(self): server = UDPServer(0, DummyProtocol(), "::1") port = server._getPort() self.addCleanup(port.stopListening) self.assertEqual(AF_INET6, port.addressFamily) class TestLogRequest(MAASTestCase): """Tests for `log_request`.""" def test__defers_log_call_later(self): clock = Clock() log_request(sentinel.macaddr, sentinel.filename, clock) self.expectThat(clock.calls, HasLength(1)) [call] = clock.calls self.expectThat(call.getTime(), Equals(0.0)) def test__sends_event_later(self): send_event = self.patch(tftp_module, "send_event_node_mac_address") clock = Clock() log_request(sentinel.macaddr, sentinel.filename, clock) self.assertThat(send_event, MockNotCalled()) clock.advance(0.0) self.assertThat(send_event, MockCalledOnceWith( mac_address=sentinel.macaddr, description=sentinel.filename, event_type=EVENT_TYPES.NODE_TFTP_REQUEST)) def test__logs_when_sending_event_errors(self): send_event = self.patch(tftp_module, "send_event_node_mac_address") send_event.side_effect = factory.make_exception() clock = Clock() log_request(sentinel.macaddr, sentinel.filename, clock) self.assertThat(send_event, MockNotCalled()) with TwistedLoggerFixture() as logger: clock.advance(0.0) self.assertDocTestMatches( """\ Logging TFTP request failed. Traceback (most recent call last): ... maastesting.factory.TestException#... """, logger.output) maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/__init__.py0000644000000000000000000000203413056115004022313 0ustar 00000000000000# Copyright 2013-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Cluster Controller RPC.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "getRegionClient", ] import provisioningserver from provisioningserver.rpc import exceptions def getRegionClient(): """getRegionClient() Get a client with which to make RPCs to the region. :raises: :py:class:`~.exceptions.NoConnectionsAvailable` when there are no open connections to the region controller. """ # TODO: retry a couple of times before giving up if the service is # not running or if exceptions.NoConnectionsAvailable gets raised. try: rpc_service = provisioningserver.services.getServiceNamed('rpc') except KeyError: raise exceptions.NoConnectionsAvailable( "Cluster services are unavailable.") else: return rpc_service.getClient() maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/arguments.py0000644000000000000000000001047313056115004022567 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Additional AMP argument classes.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "Bytes", "Choice", "StructureAsJSON", "ParsedURL", ] import collections import json import urlparse import zlib from apiclient.utils import ascii_url from twisted.protocols import amp class Bytes(amp.Argument): """Encode a structure on the wire as bytes. In truth, this does nothing more than assert that the inputs are always bytes. """ def toString(self, inObject): if not isinstance(inObject, bytes): raise TypeError("Not a byte string: %r" % (inObject,)) return inObject def fromString(self, inString): # inString is always a byte string, as defined by amp.Argument. return inString class Choice(amp.Argument): """Encode a choice to a predefined bytestring on the wire.""" def __init__(self, choices, optional=False): """Default constructor. :param choices: A :py:class:`~collections.Mapping` of possible choices. The keys can be any Python object suitable for use as a mapping key, but the values must be byte strings. On the wire the Python objects will be represented by those byte strings, and mapped back at the receiving end. """ super(Choice, self).__init__(optional=optional) if not isinstance(choices, collections.Mapping): raise TypeError("Not a mapping: %r" % (choices,)) not_byte_strings = sorted( value for value in choices.itervalues() if not isinstance(value, bytes)) if len(not_byte_strings) != 0: raise TypeError("Not byte strings: %s" % ", ".join( repr(value) for value in not_byte_strings)) self._encode = {name: value for name, value in choices.iteritems()} self._decode = {value: name for name, value in choices.iteritems()} def toString(self, inObject): return self._encode[inObject] def fromString(self, inString): return self._decode[inString] class ParsedURL(amp.Argument): """Encode a URL on the wire. The URL should be an instance of :py:class:`~urlparse.ParseResult` or :py:class:`~urlparse.SplitResult` for encoding. When decoding, :py:class:`~urlparse.ParseResult` is always returned. """ def toString(self, inObject): """Encode a URL-like object into an ASCII URL. :raise TypeError: If `inObject` is not a URL-like object (meaning it doesn't have a `geturl` method). """ try: geturl = inObject.geturl except AttributeError: raise TypeError("Not a URL-like object: %r" % (inObject,)) else: return ascii_url(geturl()) def fromString(self, inString): """Decode an ASCII URL into a URL-like object. :return: :py:class:`~urlparse.ParseResult` """ return urlparse.urlparse(inString) class StructureAsJSON(amp.Argument): """Encode a structure on the wire as JSON, compressed with zlib. The compressed size of the structure should not exceed :py:data:`~twisted.protocols.amp.MAX_VALUE_LENGTH`, or ``0xffff`` bytes. This is pretty hard to be sure of ahead of time, so only use this for small structures that won't go near the limit. """ def toString(self, inObject): return zlib.compress(json.dumps(inObject)) def fromString(self, inString): return json.loads(zlib.decompress(inString)) class CompressedAmpList(amp.AmpList): """An :py:class:`amp.AmpList` that's compressed on the wire. The serialised form is transparently compressed and decompressed with zlib. This can be useful when there's a lot of repetition in the list being transmitted. """ def toStringProto(self, inObject, proto): toStringProto = super(CompressedAmpList, self).toStringProto return zlib.compress(toStringProto(inObject, proto)) def fromStringProto(self, inString, proto): fromStringProto = super(CompressedAmpList, self).fromStringProto return fromStringProto(zlib.decompress(inString), proto) maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/boot_images.py0000644000000000000000000001104713056115004023050 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """RPC relating to boot images.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "import_boot_images", "list_boot_images", "is_import_boot_images_running", ] from urlparse import urlparse from provisioningserver import concurrency from provisioningserver.auth import get_maas_user_gpghome from provisioningserver.boot import tftppath from provisioningserver.config import ClusterConfiguration from provisioningserver.import_images import boot_resources from provisioningserver.utils.env import environment_variables from provisioningserver.utils.twisted import synchronous from twisted.internet.threads import deferToThread CACHED_BOOT_IMAGES = None def list_boot_images(): """List the boot images that exist on the cluster. This return value of this function is cached. This helps reduce the amount of IO, as this function is called often. To update the cache call `reload_boot_images`. """ global CACHED_BOOT_IMAGES if CACHED_BOOT_IMAGES is None: with ClusterConfiguration.open() as config: tftp_root = config.tftp_root CACHED_BOOT_IMAGES = tftppath.list_boot_images(tftp_root) return CACHED_BOOT_IMAGES def reload_boot_images(): """Update the cached boot images so `list_boot_images` returns the most up-to-date boot images list.""" global CACHED_BOOT_IMAGES with ClusterConfiguration.open() as config: tftp_root = config.tftp_root CACHED_BOOT_IMAGES = tftppath.list_boot_images(tftp_root) def get_hosts_from_sources(sources): """Return set of hosts that are contained in the given sources.""" hosts = set() for source in sources: url = urlparse(source['url']) if url.hostname is not None: hosts.add(url.hostname) return hosts def fix_sources_for_cluster(sources): """Return modified sources that use the URL to the region defined in the cluster configuration instead of the one the region suggested.""" sources = list(sources) with ClusterConfiguration.open() as config: maas_url = config.maas_url maas_url_parsed = urlparse(maas_url) maas_url_path = maas_url_parsed.path.lstrip('/').rstrip('/') for source in sources: url = urlparse(source['url']) source_path = url.path.lstrip('/') # Most likely they will both have 'MAAS/' at the start. We can't just # append because then the URL would be 'MAAS/MAAS/' which is incorrect. # If the initial part of the URL defined in the config matches the # beginning of what the region told the cluster to use then strip it # out and build the new URL. if source_path.startswith(maas_url_path): source_path = source_path[len(maas_url_path):] url = maas_url.rstrip('/') + '/' + source_path.lstrip('/') source['url'] = url return sources @synchronous def _run_import(sources, http_proxy=None, https_proxy=None): """Run the import. This is function is synchronous so it must be called with deferToThread. """ # Fix the sources to download from the IP address defined in the cluster # configuration, instead of the URL that the region asked it to use. sources = fix_sources_for_cluster(sources) variables = { 'GNUPGHOME': get_maas_user_gpghome(), } if http_proxy is not None: variables['http_proxy'] = http_proxy if https_proxy is not None: variables['https_proxy'] = https_proxy # Communication to the sources and loopback should not go through proxy. no_proxy_hosts = ["localhost", "127.0.0.1", "::1"] no_proxy_hosts += list(get_hosts_from_sources(sources)) variables['no_proxy'] = ','.join(no_proxy_hosts) with environment_variables(variables): boot_resources.import_images(sources) # Update the boot images cache so `list_boot_images` returns the # correct information. reload_boot_images() def import_boot_images(sources, http_proxy=None, https_proxy=None): """Imports the boot images from the given sources.""" lock = concurrency.boot_images if not lock.locked: return lock.run( deferToThread, _run_import, sources, http_proxy=http_proxy, https_proxy=https_proxy) def is_import_boot_images_running(): """Return True if the import process is currently running.""" return concurrency.boot_images.locked maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/cluster.py0000644000000000000000000003142013056115004022236 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """RPC declarations for clusters. These are commands that a cluster controller ought to respond to. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "Authenticate", "ConfigureDHCPv4", "ConfigureDHCPv6", "CreateHostMaps", "DescribePowerTypes", "GetPreseedData", "Identify", "ListBootImages", "ListOperatingSystems", "ListSupportedArchitectures", "PowerOff", "PowerOn", "PowerQuery", "PowerDriverCheck", "ValidateLicenseKey", ] from provisioningserver.power.poweraction import ( PowerActionFail, UnknownPowerType, ) from provisioningserver.rpc import exceptions from provisioningserver.rpc.arguments import ( Bytes, CompressedAmpList, ParsedURL, StructureAsJSON, ) from provisioningserver.rpc.common import ( Authenticate, Identify, ) from twisted.protocols import amp class ListBootImages(amp.Command): """List the boot images available on this cluster controller. :since: 1.5 """ arguments = [] response = [ (b"images", amp.AmpList( [(b"osystem", amp.Unicode()), (b"architecture", amp.Unicode()), (b"subarchitecture", amp.Unicode()), (b"release", amp.Unicode()), (b"label", amp.Unicode()), (b"purpose", amp.Unicode()), (b"xinstall_type", amp.Unicode()), (b"xinstall_path", amp.Unicode())])) ] errors = [] class ListBootImagesV2(amp.Command): """List the boot images available on this cluster controller. This command compresses the images list to allow more images in the response and to remove the amp.TooLong error. :since: 1.7.6 """ arguments = [] response = [ (b"images", CompressedAmpList( [(b"osystem", amp.Unicode()), (b"architecture", amp.Unicode()), (b"subarchitecture", amp.Unicode()), (b"release", amp.Unicode()), (b"label", amp.Unicode()), (b"purpose", amp.Unicode()), (b"xinstall_type", amp.Unicode()), (b"xinstall_path", amp.Unicode())])) ] errors = [] class DescribePowerTypes(amp.Command): """Get a JSON Schema describing this cluster's power types. :since: 1.5 """ arguments = [] response = [ (b"power_types", StructureAsJSON()), ] errors = [] class ListSupportedArchitectures(amp.Command): """Report the cluster's supported architectures. :since: 1.5 """ arguments = [] response = [ (b"architectures", amp.AmpList([ (b"name", amp.Unicode()), (b"description", amp.Unicode()), ])), ] errors = [] class ListOperatingSystems(amp.Command): """Report the cluster's supported operating systems. :since: 1.7 """ arguments = [] response = [ (b"osystems", amp.AmpList([ (b"name", amp.Unicode()), (b"title", amp.Unicode()), (b"releases", amp.AmpList([ (b"name", amp.Unicode()), (b"title", amp.Unicode()), (b"requires_license_key", amp.Boolean()), (b"can_commission", amp.Boolean()), ])), (b"default_release", amp.Unicode(optional=True)), (b"default_commissioning_release", amp.Unicode(optional=True)), ])), ] errors = [] class GetOSReleaseTitle(amp.Command): """Get the title for the operating systems release. :since: 1.7 """ arguments = [ (b"osystem", amp.Unicode()), (b"release", amp.Unicode()), ] response = [ (b"title", amp.Unicode()), ] errors = { exceptions.NoSuchOperatingSystem: ( b"NoSuchOperatingSystem"), } class ValidateLicenseKey(amp.Command): """Validate an OS license key. :since: 1.7 """ arguments = [ (b"osystem", amp.Unicode()), (b"release", amp.Unicode()), (b"key", amp.Unicode()), ] response = [ (b"is_valid", amp.Boolean()), ] errors = { exceptions.NoSuchOperatingSystem: ( b"NoSuchOperatingSystem"), } class PowerDriverCheck(amp.Command): """Check power driver on cluster for missing packages :since: 1.9 """ arguments = [ (b"power_type", amp.Unicode()), ] response = [ (b"missing_packages", amp.ListOf(amp.Unicode())), ] errors = { UnknownPowerType: ( b"UnknownPowerType"), NotImplementedError: ( b"NotImplementedError"), } class GetPreseedData(amp.Command): """Get OS-specific preseed data. :since: 1.7 """ arguments = [ (b"osystem", amp.Unicode()), (b"preseed_type", amp.Unicode()), (b"node_system_id", amp.Unicode()), (b"node_hostname", amp.Unicode()), (b"consumer_key", amp.Unicode()), (b"token_key", amp.Unicode()), (b"token_secret", amp.Unicode()), (b"metadata_url", ParsedURL()), ] response = [ (b"data", StructureAsJSON()), ] errors = { exceptions.NoSuchOperatingSystem: ( b"NoSuchOperatingSystem"), NotImplementedError: ( b"NotImplementedError"), } class _Power(amp.Command): """Base class for power control commands. :since: 1.7 """ arguments = [ (b"system_id", amp.Unicode()), (b"hostname", amp.Unicode()), (b"power_type", amp.Unicode()), # We can't define a tighter schema here because this is a highly # variable bag of arguments from a variety of sources. (b"context", StructureAsJSON()), ] response = [] errors = { UnknownPowerType: ( b"UnknownPowerType"), NotImplementedError: ( b"NotImplementedError"), PowerActionFail: ( b"PowerActionFail"), exceptions.PowerActionAlreadyInProgress: ( b"PowerActionAlreadyInProgress"), } class PowerOn(_Power): """Turn a node's power on. :since: 1.7 """ class PowerOff(_Power): """Turn a node's power off. :since: 1.7 """ class PowerQuery(_Power): """Query a node's power state. :since: 1.7 """ response = [ (b"state", amp.Unicode()), ] class _ConfigureDHCP(amp.Command): """Configure a DHCP server. :since: 1.7 """ arguments = [ (b"omapi_key", amp.Unicode()), (b"subnet_configs", amp.AmpList([ (b"subnet", amp.Unicode()), (b"subnet_mask", amp.Unicode()), (b"subnet_cidr", amp.Unicode()), (b"broadcast_ip", amp.Unicode()), (b"interface", amp.Unicode()), (b"router_ip", amp.Unicode()), (b"dns_servers", amp.Unicode()), (b"ntp_server", amp.Unicode()), (b"domain_name", amp.Unicode()), (b"ip_range_low", amp.Unicode()), (b"ip_range_high", amp.Unicode()), ])), ] response = [] errors = {exceptions.CannotConfigureDHCP: b"CannotConfigureDHCP"} class ConfigureDHCPv4(_ConfigureDHCP): """Configure the DHCPv4 server. :since: 1.7 """ class ConfigureDHCPv6(_ConfigureDHCP): """Configure the DHCPv6 server. :since: 1.7 """ class CreateHostMaps(amp.Command): """Create host maps in the DHCP server's configuration. :since: 1.7 """ arguments = [ (b"mappings", amp.AmpList([ (b"ip_address", amp.Unicode()), (b"mac_address", amp.Unicode()), ])), (b"shared_key", amp.Unicode()), ] response = [] errors = { exceptions.CannotCreateHostMap: ( b"CannotCreateHostMap"), } class RemoveHostMaps(amp.Command): """Remove host maps from the DHCP server's configuration. :since: 1.7 """ arguments = [ (b"ip_addresses", amp.ListOf(amp.Unicode())), (b"shared_key", amp.Unicode()), ] response = [] errors = { exceptions.CannotRemoveHostMap: ( b"CannotRemoveHostMap"), } class ImportBootImages(amp.Command): """Import boot images and report the final boot images that exist on the cluster. :since: 1.7 """ arguments = [ (b"sources", amp.AmpList( [(b"url", amp.Unicode()), (b"keyring_data", Bytes()), (b"selections", amp.AmpList( [(b"os", amp.Unicode()), (b"release", amp.Unicode()), (b"arches", amp.ListOf(amp.Unicode())), (b"subarches", amp.ListOf(amp.Unicode())), (b"labels", amp.ListOf(amp.Unicode()))]))])), (b"http_proxy", ParsedURL(optional=True)), (b"https_proxy", ParsedURL(optional=True)), ] response = [] errors = [] class StartMonitors(amp.Command): """Starts monitors(s) on the cluster. :since: 1.7 """ arguments = [ (b"monitors", amp.AmpList( [(b"deadline", amp.DateTime()), (b"context", StructureAsJSON()), (b"id", amp.Unicode()), ])) ] response = [] errors = [] class CancelMonitor(amp.Command): """Cancels an existing monitor on the cluster. :since: 1.7 """ arguments = [ (b"id", amp.Unicode()), ] response = [] error = [] class EvaluateTag(amp.Command): """Evaluate a tag against all of the cluster's nodes. :since: 1.7 """ arguments = [ (b"tag_name", amp.Unicode()), (b"tag_definition", amp.Unicode()), (b"tag_nsmap", amp.AmpList([ (b"prefix", amp.Unicode()), (b"uri", amp.Unicode()), ])), # A 3-part credential string for the web API. (b"credentials", amp.Unicode()), ] response = [] errors = [] class AddVirsh(amp.Command): """Probe for and enlist virsh VMs attached to the cluster. :since: 1.7 """ arguments = [ (b"user", amp.Unicode()), (b"poweraddr", amp.Unicode()), (b"password", amp.Unicode(optional=True)), (b"prefix_filter", amp.Unicode(optional=True)), (b"accept_all", amp.Boolean(optional=True)), ] response = [] errors = [] class AddSeaMicro15k(amp.Command): """Probe for and enlist seamicro15k machines attached to the cluster. :since: 1.7 """ arguments = [ (b"user", amp.Unicode()), (b"mac", amp.Unicode()), (b"username", amp.Unicode()), (b"password", amp.Unicode()), (b"power_control", amp.Unicode(optional=True)), (b"accept_all", amp.Boolean(optional=True)), ] response = [] errors = { exceptions.NoIPFoundForMACAddress: b"NoIPFoundForMACAddress", } class AddVMware(amp.Command): """Probe for and enlist VMware virtual machines. :since: 1.8 """ arguments = [ (b"user", amp.Unicode()), (b"host", amp.Unicode()), (b"username", amp.Unicode()), (b"password", amp.Unicode()), (b"port", amp.Integer(optional=True)), (b"protocol", amp.Unicode(optional=True)), (b"prefix_filter", amp.Unicode(optional=True)), (b"accept_all", amp.Boolean(optional=True)), ] response = [] errors = {} class EnlistNodesFromMSCM(amp.Command): """Probe for and enlist mscm machines attached to the cluster. :since: 1.7 """ arguments = [ (b"user", amp.Unicode()), (b"host", amp.Unicode()), (b"username", amp.Unicode()), (b"password", amp.Unicode()), (b"accept_all", amp.Boolean(optional=True)), ] response = [] errors = {} class EnlistNodesFromUCSM(amp.Command): """Probe for and enlist ucsm machines attached to the cluster. :since: 1.7 """ arguments = [ (b"user", amp.Unicode()), (b"url", amp.Unicode()), (b"username", amp.Unicode()), (b"password", amp.Unicode()), (b"accept_all", amp.Boolean(optional=True)), ] response = [] errors = {} class EnlistNodesFromMicrosoftOCS(amp.Command): """Probe for and enlist msftocs machines attached to the cluster. :since: 1.8 """ arguments = [ (b"user", amp.Unicode()), (b"ip", amp.Unicode()), (b"port", amp.Unicode()), (b"username", amp.Unicode()), (b"password", amp.Unicode()), (b"accept_all", amp.Boolean(optional=True)), ] response = [] errors = {} class IsImportBootImagesRunning(amp.Command): """Check if the import boot images task is running on the cluster. :since: 1.7 """ arguments = [] response = [ (b"running", amp.Boolean()), ] errors = {} maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/clusterservice.py0000644000000000000000000010311413056115004023617 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """RPC implementation for clusters.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "ClusterClientService", ] from functools import partial import json from os import urandom import random import re from urlparse import urlparse from apiclient.creds import convert_string_to_tuple from apiclient.utils import ascii_url from provisioningserver import concurrency from provisioningserver.config import ClusterConfiguration from provisioningserver.drivers import ( ArchitectureRegistry, gen_power_types, ) from provisioningserver.drivers.hardware.mscm import probe_and_enlist_mscm from provisioningserver.drivers.hardware.msftocs import ( probe_and_enlist_msftocs, ) from provisioningserver.drivers.hardware.seamicro import ( probe_seamicro15k_and_enlist, ) from provisioningserver.drivers.hardware.ucsm import probe_and_enlist_ucsm from provisioningserver.drivers.hardware.virsh import probe_virsh_and_enlist from provisioningserver.drivers.hardware.vmware import probe_vmware_and_enlist from provisioningserver.drivers.power import power_drivers_by_name from provisioningserver.logger.log import get_maas_logger from provisioningserver.network import ( discover_networks, get_ip_addr_json, ) from provisioningserver.power.change import maybe_change_power_state from provisioningserver.power.poweraction import UnknownPowerType from provisioningserver.power.query import get_power_state from provisioningserver.rpc import ( cluster, common, dhcp, exceptions, region, ) from provisioningserver.rpc.boot_images import ( import_boot_images, is_import_boot_images_running, list_boot_images, ) from provisioningserver.rpc.common import RPCProtocol from provisioningserver.rpc.dhcp import ( create_host_maps, remove_host_maps, ) from provisioningserver.rpc.interfaces import IConnection from provisioningserver.rpc.monitors import ( cancel_monitor, start_monitors, ) from provisioningserver.rpc.osystems import ( gen_operating_systems, get_os_release_title, get_preseed_data, validate_license_key, ) from provisioningserver.rpc.tags import evaluate_tag from provisioningserver.security import ( calculate_digest, get_shared_secret_from_filesystem, ) from provisioningserver.utils.network import find_ip_via_arp from provisioningserver.utils.shell import ExternalProcessError from provisioningserver.utils.twisted import DeferredValue from twisted import web from twisted.application.internet import TimerService from twisted.internet.defer import ( inlineCallbacks, returnValue, ) from twisted.internet.endpoints import ( connectProtocol, TCP4ClientEndpoint, ) from twisted.internet.error import ( ConnectError, ConnectionClosed, ) from twisted.internet.threads import deferToThread from twisted.protocols import amp from twisted.python import log from twisted.python.reflect import fullyQualifiedName from twisted.web import http import twisted.web.client from twisted.web.client import getPage from zope.interface import implementer # From python-twisted 15+ changes the name of _URI to URI. try: from twisted.web.client import _URI as URI except ImportError: from twisted.web.client import URI maaslog = get_maas_logger("rpc.cluster") def catch_probe_and_enlist_error(name, failure): """Logs any errors when trying to probe and enlist a chassis.""" maaslog.error( "Failed to probe and enlist %s nodes: %s", name, failure.getErrorMessage()) return None class Cluster(RPCProtocol): """The RPC protocol supported by a cluster controller. This can be used on the client or server end of a connection; once a connection is established, AMP is symmetric. """ @cluster.Identify.responder def identify(self): """identify() Implementation of :py:class:`~provisioningserver.rpc.cluster.Identify`. """ with ClusterConfiguration.open() as config: return {b"ident": config.cluster_uuid} @cluster.Authenticate.responder def authenticate(self, message): secret = get_shared_secret_from_filesystem() salt = urandom(16) # 16 bytes of high grade noise. digest = calculate_digest(secret, message, salt) return {"digest": digest, "salt": salt} @cluster.ListBootImages.responder def list_boot_images(self): """list_boot_images() Implementation of :py:class:`~provisioningserver.rpc.cluster.ListBootImages`. """ return {"images": list_boot_images()} @cluster.ListBootImagesV2.responder def list_boot_images_v2(self): """list_boot_images_v2() Implementation of :py:class:`~provisioningserver.rpc.cluster.ListBootImagesV2`. """ return {"images": list_boot_images()} @cluster.ImportBootImages.responder def import_boot_images(self, sources, http_proxy=None, https_proxy=None): """import_boot_images() Implementation of :py:class:`~provisioningserver.rpc.cluster.ImportBootImages`. """ get_proxy_url = lambda url: None if url is None else url.geturl() import_boot_images( sources, http_proxy=get_proxy_url(http_proxy), https_proxy=get_proxy_url(https_proxy)) return {} @cluster.IsImportBootImagesRunning.responder def is_import_boot_images_running(self): """is_import_boot_images_running() Implementation of :py:class:`~provisioningserver.rpc.cluster.IsImportBootImagesRunning`. """ return {"running": is_import_boot_images_running()} @cluster.DescribePowerTypes.responder def describe_power_types(self): """describe_power_types() Implementation of :py:class:`~provisioningserver.rpc.cluster.DescribePowerTypes`. """ return { 'power_types': list(gen_power_types()), } @cluster.ListSupportedArchitectures.responder def list_supported_architectures(self): return { 'architectures': [ {'name': arch.name, 'description': arch.description} for _, arch in ArchitectureRegistry ], } @cluster.ListOperatingSystems.responder def list_operating_systems(self): """list_operating_systems() Implementation of :py:class:`~provisioningserver.rpc.cluster.ListOperatingSystems`. """ return {"osystems": gen_operating_systems()} @cluster.GetOSReleaseTitle.responder def get_os_release_title(self, osystem, release): """get_os_release_title() Implementation of :py:class:`~provisioningserver.rpc.cluster.GetOSReleaseTitle`. """ return {"title": get_os_release_title(osystem, release)} @cluster.ValidateLicenseKey.responder def validate_license_key(self, osystem, release, key): """validate_license_key() Implementation of :py:class:`~provisioningserver.rpc.cluster.ValidateLicenseKey`. """ return {"is_valid": validate_license_key(osystem, release, key)} @cluster.GetPreseedData.responder def get_preseed_data( self, osystem, preseed_type, node_system_id, node_hostname, consumer_key, token_key, token_secret, metadata_url): """get_preseed_data() Implementation of :py:class:`~provisioningserver.rpc.cluster.GetPreseedData`. """ return { "data": get_preseed_data( osystem, preseed_type, node_system_id, node_hostname, consumer_key, token_key, token_secret, metadata_url), } @cluster.PowerOn.responder def power_on(self, system_id, hostname, power_type, context): """Turn a node on.""" d = maybe_change_power_state( system_id, hostname, power_type, power_change='on', context=context) d.addCallback(lambda _: {}) return d @cluster.PowerOff.responder def power_off(self, system_id, hostname, power_type, context): """Turn a node off.""" d = maybe_change_power_state( system_id, hostname, power_type, power_change='off', context=context) d.addCallback(lambda _: {}) return d @cluster.PowerQuery.responder def power_query(self, system_id, hostname, power_type, context): d = get_power_state( system_id, hostname, power_type, context=context) d.addCallback(lambda x: {'state': x}) return d @cluster.PowerDriverCheck.responder def power_driver_check(self, power_type): """Return a list of missing power driver packages, if any.""" driver = power_drivers_by_name.get(power_type) if driver is None: raise UnknownPowerType( "No driver found for power type '%s'" % power_type) return {"missing_packages": driver.detect_missing_packages()} @cluster.ConfigureDHCPv4.responder def configure_dhcpv4(self, omapi_key, subnet_configs): server = dhcp.DHCPv4Server(omapi_key) d = concurrency.dhcp.run( deferToThread, dhcp.configure, server, subnet_configs) d.addCallback(lambda _: {}) return d @cluster.ConfigureDHCPv6.responder def configure_dhcpv6(self, omapi_key, subnet_configs): server = dhcp.DHCPv6Server(omapi_key) d = concurrency.dhcp.run( deferToThread, dhcp.configure, server, subnet_configs) d.addCallback(lambda _: {}) return d @cluster.CreateHostMaps.responder def create_host_maps(self, mappings, shared_key): d = concurrency.dhcp.run( deferToThread, create_host_maps, mappings, shared_key) d.addCallback(lambda _: {}) return d @cluster.RemoveHostMaps.responder def remove_host_maps(self, ip_addresses, shared_key): # Note that the `ip_addresses` parameter is now a list of # IPs *and* MAC addresses. Prior to MAAS 1.9 it was a list of # IP addresses because that's what was used as the key for the host # mappings but we now use the MAC as the key in order to be able # to assign the same IP to two NICs. As a result, this code (the host # map removal code) has to deal with legacy host maps using the IP # as the key and host maps with the MAC as the key. d = concurrency.dhcp.run( deferToThread, remove_host_maps, ip_addresses, shared_key) d.addCallback(lambda _: {}) return d @cluster.StartMonitors.responder def start_monitors(self, monitors): start_monitors(monitors) return {} @cluster.CancelMonitor.responder def cancel_timer(self, id): cancel_monitor(id) return {} @amp.StartTLS.responder def get_tls_parameters(self): """get_tls_parameters() Implementation of :py:class:`~twisted.protocols.amp.StartTLS`. """ try: from provisioningserver.rpc.testing import tls except ImportError: # This is not a development/test environment. # XXX: Return production TLS parameters. return {} else: return tls.get_tls_parameters_for_cluster() @cluster.EvaluateTag.responder def evaluate_tag(self, tag_name, tag_definition, tag_nsmap, credentials): """evaluate_tag() Implementation of :py:class:`~provisioningserver.rpc.cluster.EvaluateTag`. """ # It's got to run in a thread because it does blocking IO. d = deferToThread( evaluate_tag, tag_name, tag_definition, # Transform tag_nsmap into a format that LXML likes. {entry["prefix"]: entry["uri"] for entry in tag_nsmap}, # Parse the credential string into a 3-tuple. convert_string_to_tuple(credentials)) return d.addCallback(lambda _: {}) @cluster.AddVirsh.responder def add_virsh(self, user, poweraddr, password, prefix_filter, accept_all): """add_virsh() Implementation of :py:class:`~provisioningserver.rpc.cluster.AddVirsh`. """ d = deferToThread( probe_virsh_and_enlist, user, poweraddr, password, prefix_filter, accept_all) d.addErrback(partial(catch_probe_and_enlist_error, "virsh")) return {} @cluster.AddSeaMicro15k.responder def add_seamicro15k(self, user, mac, username, password, power_control, accept_all): """add_virsh() Implementation of :py:class:`~provisioningserver.rpc.cluster.AddSeaMicro15k`. """ ip = find_ip_via_arp(mac) if ip is not None: d = deferToThread( probe_seamicro15k_and_enlist, user, ip, username, password, power_control=power_control, accept_all=accept_all) d.addErrback( partial(catch_probe_and_enlist_error, "SeaMicro 15000")) else: message = "Couldn't find IP address for MAC %s" % mac maaslog.warning(message) raise exceptions.NoIPFoundForMACAddress(message) return {} @cluster.AddVMware.responder def add_vmware(self, user, host, username, password, port, protocol, prefix_filter, accept_all): """add_vmware() Implementation of :py:class:`~provisioningserver.rpc.cluster.AddVMware`. """ d = deferToThread( probe_vmware_and_enlist, user, host, username, password, port=port, protocol=protocol, prefix_filter=prefix_filter, accept_all=accept_all) d.addErrback( partial(catch_probe_and_enlist_error, "VMware")) return {} @cluster.EnlistNodesFromMSCM.responder def enlist_nodes_from_mscm(self, user, host, username, password, accept_all): """enlist_nodes_from_mscm() Implemention of :py:class:`~provisioningserver.rpc.cluster.EnlistNodesFromMSCM`. """ d = deferToThread( probe_and_enlist_mscm, user, host, username, password, accept_all) d.addErrback(partial(catch_probe_and_enlist_error, "Moonshot")) return {} @cluster.EnlistNodesFromUCSM.responder def enlist_nodes_from_ucsm(self, user, url, username, password, accept_all): """enlist_nodes_from_ucsm() Implemention of :py:class:`~provisioningserver.rpc.cluster.EnlistNodesFromUCSM`. """ d = deferToThread( probe_and_enlist_ucsm, user, url, username, password, accept_all) d.addErrback(partial(catch_probe_and_enlist_error, "UCS")) return {} @cluster.EnlistNodesFromMicrosoftOCS.responder def enlist_nodes_from_msftocs(self, user, ip, port, username, password, accept_all): """enlist_nodes_from_msftocs() Implemention of :py:class: `~provisioningserver.rpc.cluster.EnlistNodesFromMicrosoftOCS`. """ d = deferToThread( probe_and_enlist_msftocs, user, ip, port, username, password, accept_all) d.addErrback(partial(catch_probe_and_enlist_error, "MicrosoftOCS")) return {} @implementer(IConnection) class ClusterClient(Cluster): """The RPC protocol supported by a cluster controller, client version. This works hand-in-hand with ``ClusterClientService``, maintaining the latter's `connections` map. :ivar address: The `(host, port)` of the remote endpoint. :ivar eventloop: The event-loop this client is related to. :ivar service: A reference to the :class:`ClusterClientService` that made self. :ivar authenticated: A py:class:`DeferredValue` that will be set when the region has been authenticated. If the region has been authenticated, this will be ``True``, otherwise it will be ``False``. If there was an error, it will return a :py:class:`twisted.python.failure.Failure` via errback. :ivar ready: A py:class:`DeferredValue` that will be set when this connection is up and has performed authentication on the region. If everything has gone smoothly it will be set to the name of the event-loop connected to, otherwise it will be set to: `RuntimeError` if the client service is not running; `KeyError` if there's already a live connection for this event-loop; or `AuthenticationFailed` if, guess, the authentication failed. """ address = None eventloop = None service = None def __init__(self, address, eventloop, service): super(ClusterClient, self).__init__() self.address = address self.eventloop = eventloop self.service = service # Events for this protocol's life-cycle. self.authenticated = DeferredValue() self.ready = DeferredValue() @property def ident(self): """The ident of the remote event-loop.""" return self.eventloop @inlineCallbacks def authenticateRegion(self): """Authenticate the region.""" secret = get_shared_secret_from_filesystem() message = urandom(16) # 16 bytes of the finest. response = yield self.callRemote( region.Authenticate, message=message) salt, digest = response["salt"], response["digest"] digest_local = calculate_digest(secret, message, salt) returnValue(digest == digest_local) def registerWithRegion(self): with ClusterConfiguration.open() as config: uuid = config.cluster_uuid url = config.maas_url networks = discover_networks() ip_addr_json = None try: ip_addr_json = get_ip_addr_json() except ExternalProcessError as epe: log.msg( "Warning: Could not gather IP address information: %s" % epe) def cb_register(_): log.msg( "Cluster '%s' registered (via %s)." % (uuid, self.eventloop)) return True def eb_register(failure): failure.trap(exceptions.CannotRegisterCluster) log.msg( "Cluster '%s' REJECTED by the region (via %s)." % (uuid, self.eventloop)) return False d = self.callRemote( region.Register, uuid=uuid, networks=networks, url=urlparse(url), ip_addr_json=ip_addr_json) return d.addCallbacks(cb_register, eb_register) @inlineCallbacks def performHandshake(self): d_authenticate = self.authenticateRegion() self.authenticated.observe(d_authenticate) authenticated = yield d_authenticate if authenticated: log.msg("Event-loop '%s' authenticated." % self.ident) registered = yield self.registerWithRegion() if registered: self.service.connections[self.eventloop] = self self.ready.set(self.eventloop) else: self.transport.loseConnection() self.ready.fail( exceptions.RegistrationFailed( "Event-loop '%s' rejected registration." % self.ident)) else: log.msg( "Event-loop '%s' FAILED authentication; " "dropping connection." % self.ident) self.transport.loseConnection() self.ready.fail( exceptions.AuthenticationFailed( "Event-loop '%s' failed authentication." % self.eventloop)) def handshakeSucceeded(self, result): """The handshake (identify and authenticate) succeeded. This does *NOT* mean that the region was successfully authenticated, merely that the process of authentication did not encounter an error. """ def handshakeFailed(self, failure): """The handshake (identify and authenticate) failed.""" if failure.check(ConnectionClosed): # There has been a disconnection, clean or otherwise. There's # nothing we can do now, so do nothing. The reason will have been # logged elsewhere. self.ready.fail(failure) else: log.err( failure, "Event-loop '%s' handshake failed; " "dropping connection." % self.ident) self.transport.loseConnection() self.ready.fail(failure) def connectionMade(self): super(ClusterClient, self).connectionMade() if not self.service.running: log.msg( "Event-loop '%s' will be disconnected; the cluster's " "client service is not running." % self.ident) self.transport.loseConnection() self.authenticated.set(None) self.ready.fail(RuntimeError("Service not running.")) elif self.eventloop in self.service.connections: log.msg( "Event-loop '%s' is already connected; " "dropping connection." % self.ident) self.transport.loseConnection() self.authenticated.set(None) self.ready.fail(KeyError( "Event-loop '%s' already connected." % self.eventloop)) else: return self.performHandshake().addCallbacks( self.handshakeSucceeded, self.handshakeFailed) def connectionLost(self, reason): if self.eventloop in self.service.connections: if self.service.connections[self.eventloop] is self: del self.service.connections[self.eventloop] super(ClusterClient, self).connectionLost(reason) @inlineCallbacks def secureConnection(self): yield self.callRemote(amp.StartTLS, **self.get_tls_parameters()) # For some weird reason (it's mentioned in Twisted's source), # TLS negotiation does not complete until we do something with # the connection. Here we check that the remote event-loop is # who we expected it to be. response = yield self.callRemote(region.Identify) remote_name = response.get("ident") if remote_name != self.eventloop: log.msg( "The remote event-loop identifies itself as %s, but " "%s was expected." % (remote_name, self.eventloop)) self.transport.loseConnection() return # We should now have a full set of parameters for the transport. log.msg("Host certificate: %r" % self.hostCertificate) log.msg("Peer certificate: %r" % self.peerCertificate) class PatchedURI(URI): @classmethod def fromBytes(cls, uri, defaultPort=None): """Patched replacement for `twisted.web.client._URI.fromBytes`. The Twisted version of this function breaks when you give it a URL whose netloc is based on an IPv6 address. """ uri = uri.strip() scheme, netloc, path, params, query, fragment = http.urlparse(uri) if defaultPort is None: scheme_ports = { 'https': 443, 'http': 80, } defaultPort = scheme_ports.get(scheme, 80) if '[' in netloc: # IPv6 address. This is complicated. parsed_netloc = re.match( '\\[(?P[0-9A-Fa-f:.]+)\\]([:](?P[0-9]+))?$', netloc) host, port = parsed_netloc.group('host', 'port') elif ':' in netloc: # IPv4 address or hostname, with port spec. This is easy. host, port = netloc.split(':') else: # IPv4 address or hostname, without port spec. This is trivial. host = netloc port = None if port is None: port = defaultPort try: port = int(port) except ValueError: port = defaultPort return cls(scheme, netloc, host, port, path, params, query, fragment) class ClusterClientService(TimerService, object): """A cluster controller RPC client service. This is a service - in the Twisted sense - that connects to a set of remote AMP endpoints. The endpoints are obtained from a view in the region controller and periodically refreshed; this list is used to update the connections maintained in this service. :ivar connections: A mapping of eventloop names to protocol instances connected to it. :ivar time_started: Records the time that `startService` was last called, or `None` if it hasn't yet. """ INTERVAL_LOW = 2 # seconds. INTERVAL_MID = 10 # seconds. INTERVAL_HIGH = 30 # seconds. time_started = None def __init__(self, reactor): super(ClusterClientService, self).__init__( self._calculate_interval(None, None), self.update) self.connections = {} self.clock = reactor # XXX jtv 2014-09-23, bug=1372767: Fix # twisted.web.client._URI.fromBytes to handle IPv6 addresses. # A `getPage` call on Twisted's web client breaks if you give it a # URL with an IPv6 address, at the point where `_makeGetterFactory` # calls `fromBytes`. That last function assumes that a colon can only # occur in the URL's netloc portion as part of a port specification. if hasattr(twisted.web.client, "_URI"): twisted.web.client._URI = PatchedURI else: twisted.web.client.URI = PatchedURI def startService(self): self.time_started = self.clock.seconds() super(ClusterClientService, self).startService() def getClient(self): """Returns a :class:`common.Client` connected to a region. The client is chosen at random. :raises: :py:class:`~.exceptions.NoConnectionsAvailable` when there are no open connections to a region controller. """ conns = list(self.connections.viewvalues()) if len(conns) == 0: raise exceptions.NoConnectionsAvailable() else: return common.Client(random.choice(conns)) @inlineCallbacks def update(self): """Refresh outgoing connections. This obtains a list of endpoints from the region then connects to new ones and drops connections to those no longer used. """ try: info_url = self._get_rpc_info_url() info = yield self._fetch_rpc_info(info_url) eventloops = info["eventloops"] if eventloops is None: # This means that the region process we've just asked about # RPC event-loop endpoints is not running the RPC advertising # service. It could be just starting up for example. log.msg("Region is not advertising RPC endpoints.") else: yield self._update_connections(eventloops) except ConnectError as error: self._update_interval(None, len(self.connections)) log.msg( "Region not available: %s (While requesting RPC info at %s)." % (error, info_url)) except: self._update_interval(None, len(self.connections)) log.err() else: if eventloops is None: # The advertising service on the region was not running yet. self._update_interval(None, len(self.connections)) else: self._update_interval(len(eventloops), len(self.connections)) @staticmethod def _get_rpc_info_url(): """Return the URL to the RPC infomation page on the region.""" with ClusterConfiguration.open() as config: url = urlparse(config.maas_url) url = url._replace(path="%s/rpc/" % url.path.rstrip("/")) url = url.geturl() return ascii_url(url) @classmethod def _fetch_rpc_info(cls, url): def catch_503_error(failure): # Catch `twisted.web.error.Error` if has a 503 status code. That # means the region is not all the way up. Ignore the error as this # service will try again after the calculated interval. failure.trap(web.error.Error) if failure.value.status != "503": failure.raiseException() else: return {"eventloops": None} d = getPage(url, agent=fullyQualifiedName(cls)) d.addCallback(json.loads) d.addErrback(catch_503_error) return d def _calculate_interval(self, num_eventloops, num_connections): """Calculate the update interval. The interval is `INTERVAL_LOW` seconds when there are no connections, so that this can quickly obtain its first connection. The interval is also `INTERVAL_LOW` for a time after the service starts. This helps to get everything connected quickly when the cluster is started at a similar time to the region. The interval changes to `INTERVAL_MID` seconds when there are some connections, but fewer than there are event-loops. After that it drops back to `INTERVAL_HIGH` seconds. """ if self.time_started is not None: time_running = self.clock.seconds() - self.time_started if time_running < self.INTERVAL_HIGH: # This service has recently started; keep trying regularly. return self.INTERVAL_LOW if num_eventloops is None: # The region is not available; keep trying regularly. return self.INTERVAL_LOW elif num_eventloops == 0: # The region is coming up; keep trying regularly. return self.INTERVAL_LOW elif num_connections == 0: # No connections to the region; keep trying regularly. return self.INTERVAL_LOW elif num_connections < num_eventloops: # Some connections to the region, but not to all event # loops; keep updating reasonably frequently. return self.INTERVAL_MID else: # Fully connected to the region; update every so often. return self.INTERVAL_HIGH def _update_interval(self, num_eventloops, num_connections): """Change the update interval.""" self._loop.interval = self.step = self._calculate_interval( num_eventloops, num_connections) @inlineCallbacks def _update_connections(self, eventloops): """Update the persistent connections to the region. For each event-loop, ensure that there is (a) a connection established and that (b) that connection corresponds to one of the endpoints declared. If not (a), attempt to connect to each endpoint in turn. If not (b), immediately drop the connection and proceed as if not (a). For each established connection to an event-loop, check that it's still in the list of event-loops to which this cluster should connect. If not, immediately drop the connection. """ # Ensure that the event-loop addresses are tuples so that # they'll work as dictionary keys. eventloops = { name: [tuple(address) for address in addresses] for name, addresses in eventloops.iteritems() } # Drop connections to event-loops that no longer include one of # this cluster's established connections among its advertised # endpoints. This is most likely to have happened because of # network reconfiguration on the machine hosting the event-loop, # and so the connection may have dropped already, but there's # nothing wrong with a bit of belt-and-braces engineering # between consenting adults. for eventloop, addresses in eventloops.iteritems(): if eventloop in self.connections: connection = self.connections[eventloop] if connection.address not in addresses: yield self._drop_connection(connection) # Create new connections to event-loops that the cluster does # not yet have a connection to. Try each advertised endpoint # (address) in turn until one of them bites. for eventloop, addresses in eventloops.iteritems(): if eventloop not in self.connections: for address in addresses: try: yield self._make_connection(eventloop, address) except ConnectError as error: host, port = address log.msg("Event-loop %s (%s:%d): %s" % ( eventloop, host, port, error)) except: log.err() else: break # Remove connections to event-loops that are no longer # advertised by the RPC info view. Most likely this means that # the process in which the event-loop is no longer running, but # it could be an indicator of a heavily loaded machine, or a # fault. In any case, it seems to make sense to disconnect. for eventloop in self.connections: if eventloop not in eventloops: connection = self.connections[eventloop] yield self._drop_connection(connection) def _make_connection(self, eventloop, address): """Connect to `eventloop` at `address`.""" endpoint = TCP4ClientEndpoint(self.clock, *address) protocol = ClusterClient(address, eventloop, self) return connectProtocol(endpoint, protocol) def _drop_connection(self, connection): """Drop the given `connection`.""" return connection.transport.loseConnection() maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/common.py0000644000000000000000000002102713056115004022047 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Common RPC classes and utilties.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "Authenticate", "Client", "Identify", "RPCProtocol", ] from os import getpid from socket import gethostname from provisioningserver.rpc.interfaces import IConnection from provisioningserver.utils.twisted import asynchronous from twisted.internet.defer import Deferred from twisted.protocols import amp from twisted.python import log from twisted.python.failure import Failure class Identify(amp.Command): """Request the identity of the remote side, e.g. its UUID. :since: 1.5 """ response = [(b"ident", amp.Unicode())] class Authenticate(amp.Command): """Authenticate the remote side. The procedure is as follows: - When establishing a new connection, the region and the cluster call `Authenticate` on each other, passing a random chunk of data in `message`. This message must be unique to avoid replay attacks. - The remote side adds some salt to the message, and calculates an HMAC digest, keyed with the shared secret. The salt is intended to prevent replay attacks: it prevents an intruder from authenticating itself by calling `Authenticate` on the caller (or another endpoint in the same MAAS installation) and sending the same message, receiving the digest and passing it back to the caller. - The remote side returns this digest and the salt. The caller performs the same calculation, and compares the digests. - If the digests match, the connection is put into rotation. - If the digests do not match, the connection is closed immediately, and an error is logged. :since: 1.7 """ arguments = [ (b"message", amp.String()), ] response = [ (b"digest", amp.String()), (b"salt", amp.String()), # Is 'salt' the right term here? ] errors = [] class Client: """Wrapper around an :class:`amp.AMP` instance. Limits the API to a subset of the behaviour of :class:`amp.AMP`'s, with alterations to make it suitable for use from a thread outside of the reactor. """ def __init__(self, conn): super(Client, self).__init__() assert IConnection.providedBy(conn), ( "%r does not provide IConnection" % (conn,)) self._conn = conn @property def ident(self): """Something that identifies the far end of the connection.""" return self._conn.ident @asynchronous def __call__(self, cmd, *args, **kwargs): """Call a remote RPC method. This is how the client is normally used. :note: Though the call signature shows positional arguments, their use is an error. They're in the signature is so this method can detect them and provide a better error message than that from Python. Python's error message when arguments don't match the call's signature is not great at best, but it also makes it hard to figure out the receiver when the `TypeError` is raised in a different stack from the caller's, e.g. when calling into the Twisted reactor from a thread. :param cmd: The `amp.Command` child class representing the remote method to be invoked. :param kwargs: Any parameters to the remote method. Only keyword arguments are accepted. :return: A deferred result. Call its `wait` method (with a timeout in seconds) to block on the call's completion. """ if len(args) != 0: receiver_name = "%s.%s" % ( self.__module__, self.__class__.__name__) raise TypeError( "%s called with %d positional arguments, %r, but positional " "arguments are not supported. Usage: client(command, arg1=" "value1, ...)" % (receiver_name, len(args), args)) return self._conn.callRemote(cmd, **kwargs) @asynchronous def getHostCertificate(self): return self._conn.hostCertificate @asynchronous def getPeerCertificate(self): return self._conn.peerCertificate @asynchronous def isSecure(self): return self._conn.peerCertificate is not None def __eq__(self, other): return type(other) is type(self) and other._conn is self._conn def __hash__(self): return hash(self._conn) def make_command_ref(box): """Make a textual description of an AMP command box. This is intended to help correlating exceptions between distributed parts of MAAS. The reference takes the form:: $hostname:pid=$pid:cmd=$command_name:ask=$ask_sequence where: * ``hostname`` is the hostname of the machine on which the error occurred. * ``pid`` is the process ID of where the error originated. * ``command_name`` is the AMP command name. * ``ask_sequence`` is the sequence number used for RPC calls that expect a reply; see http://amp-protocol.net/ for details. An extended variant might be valuable: a ``make_box_ref`` function that returns unambiguous references for command, answer, and errors boxes. """ return "%s:pid=%d:cmd=%s:ask=%s" % ( gethostname(), getpid(), box[amp.COMMAND], box.get(amp.ASK, "none")) class RPCProtocol(amp.AMP, object): """A specialisation of `amp.AMP`. It's hard to track exactly when an `amp.AMP` protocol is connected to its transport, or disconnected, from the "outside". It's necessary to subclass and override `connectionMade` and `connectionLost` and signal from there, which is what this class does. :ivar onConnectionMade: A `Deferred` that fires when `connectionMade` has been called, i.e. this protocol is now connected. :ivar onConnectionLost: A `Deferred` that fires when `connectionLost` has been called, i.e. this protocol is no longer connected. """ def __init__(self): super(RPCProtocol, self).__init__() self.onConnectionMade = Deferred() self.onConnectionLost = Deferred() def connectionMade(self): super(RPCProtocol, self).connectionMade() self.onConnectionMade.callback(None) def connectionLost(self, reason): super(RPCProtocol, self).connectionLost(reason) self.onConnectionLost.callback(None) def dispatchCommand(self, box): """Call up, but coerce errors into non-fatal failures. This is called by `_commandReceived`, which is responsible for capturing unhandled errors and transmitting them back to the remote side. It does this within a :class:`amp.QuitBox` which immediately disconnects the transport after being transmitted. Here we capture all errors before `_commandReceived` sees them and wrap them with :class:`amp.RemoteAmpError`. This prevents the disconnecting behaviour. """ d = super(RPCProtocol, self).dispatchCommand(box) def coerce_error(failure): if failure.check(amp.RemoteAmpError): return failure else: command = box[amp.COMMAND] command_ref = make_command_ref(box) log.err(failure, ( "Unhandled failure dispatching AMP command. This is " "probably a bug. Please ensure that this error is handled " "within application code or declared in the signature of " "the %s command. [%s]") % (command, command_ref)) return Failure(amp.RemoteAmpError( amp.UNHANDLED_ERROR_CODE, b"Unknown Error [%s]" % command_ref.encode("ascii"), fatal=False, local=failure)) return d.addErrback(coerce_error) def unhandledError(self, failure): """Terminal errback, after application code has seen the failure. `amp.BoxDispatcher.unhandledError` calls the `amp.IBoxSender`'s `unhandledError`. In the default implementation this disconnects the transport. Here we instead log the failure but do *not* disconnect because it's too disruptive to the running of MAAS. """ log.err(failure, ( "Unhandled failure during AMP request. This is probably a bug. " "Please ensure that this error is handled within application " "code.")) maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/dhcp.py0000644000000000000000000002334213056115004021477 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """RPC helpers relating to DHCP.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "configure", "create_host_maps", "DHCPv4Server", "DHCPv6Server", "remove_host_maps", ] import os import time from provisioningserver.dhcp import ( DHCPv4Server, DHCPv6Server, ) from provisioningserver.dhcp.config import get_config from provisioningserver.dhcp.omshell import Omshell from provisioningserver.drivers.service import ( SERVICE_STATE, ServiceRegistry, ) from provisioningserver.logger import get_maas_logger from provisioningserver.rpc.exceptions import ( CannotConfigureDHCP, CannotCreateHostMap, CannotRemoveHostMap, ) from provisioningserver.service_monitor import ( service_monitor, ServiceActionError, ) from provisioningserver.utils.fs import ( sudo_delete_file, sudo_write_file, ) from provisioningserver.utils.shell import ExternalProcessError from provisioningserver.utils.twisted import synchronous maaslog = get_maas_logger("dhcp") @synchronous def configure(server, subnet_configs): """Configure the DHCPv6/DHCPv4 server, and restart it as appropriate. :param server: A `DHCPServer` instance. :param subnet_configs: List of dicts with subnet parameters for each subnet for which the DHCP server should serve DHCP. If no subnets are defined, the DHCP server will be stopped. """ stopping = len(subnet_configs) == 0 if stopping: if os.path.exists(server.config_filename): sudo_delete_file(server.config_filename) service = ServiceRegistry.get_item(server.dhcp_service) service.off() try: service_monitor.ensure_service(server.dhcp_service) except ServiceActionError as e: # Error is already logged by the service monitor, nothing to # log for this exception. raise CannotConfigureDHCP( "%s server failed to stop: %s" % ( server.descriptive_name, e)) except Exception as e: maaslog.error( "%s server failed to stop: %s", server.descriptive_name, e) raise CannotConfigureDHCP( "%s server failed to stop: %s" % ( server.descriptive_name, e)) else: dhcpd_config = get_config( server.template_basename, omapi_key=server.omapi_key, dhcp_subnets=subnet_configs) interfaces = {subnet['interface'] for subnet in subnet_configs} interfaces_config = ' '.join(sorted(interfaces)) try: sudo_write_file(server.config_filename, dhcpd_config) sudo_write_file(server.interfaces_filename, interfaces_config) except ExternalProcessError as e: # ExternalProcessError.__unicode__ contains a generic failure # message as well as the command and its error output. On the # other hand, ExternalProcessError.output_as_unicode contains just # the error output which is probably the best information on what # went wrong. Log the full error information, but keep the # exception message short and to the point. maaslog.error( "Could not rewrite %s server configuration (for network " "interfaces %s): %s", server.descriptive_name, interfaces_config, unicode(e)) raise CannotConfigureDHCP( "Could not rewrite %s server configuration: %s" % ( server.descriptive_name, e.output_as_unicode)) service = ServiceRegistry.get_item(server.dhcp_service) service.on() try: service_monitor.restart_service(server.dhcp_service) except ServiceActionError as e: # Error is already logged by the service monitor, nothing to # log for this exception. raise CannotConfigureDHCP( "%s server failed to restart: %s" % ( server.descriptive_name, e)) except Exception as e: maaslog.error( "%s server failed to restart (for network interfaces " "%s): %s", server.descriptive_name, interfaces_config, e) raise CannotConfigureDHCP( "%s server failed to restart: %s" % ( server.descriptive_name, e)) def _try_omshell_connection(): """Try to connect to the DHCP server using Omshell. Tries a maximum of 3 times for a total of 1.5 seconds. """ omshell = Omshell( server_address='127.0.0.1', shared_key="") for _ in range(3): connectable = omshell.try_connection() if connectable: return True else: # Not able to connect. Wait half a second and # try again. time.sleep(0.5) return False def _is_dhcpv4_managed_and_active(exception): """Ensure that the DHCPv4 server is accessible (if necessary), and return its status. Returns True if the DHCPv4 server is accessible and should be accessible. Returns False if the DHCPv4 server is not accessible, and should not be accessible. Raise `exception` if the DHCP server should be accessible, but cannot be started. :return:bool """ service = ServiceRegistry.get_item("dhcp4") if service.is_on(): if service_monitor.get_service_state("dhcp4") != SERVICE_STATE.ON: try: service_monitor.ensure_service("dhcp4") if not _try_omshell_connection(): raise exception( "DHCPv4 server started but was unable to connect " "to omshell.") else: return True except ServiceActionError as e: # Error is already logged by the service monitor, nothing to # log for this exception. raise exception("DHCPv4 server failed to start: %s" % e) except Exception as e: error_msg = "DHCPv4 server failed to start: %s" % e maaslog.error(error_msg) raise exception(error_msg) else: # Service should be on and is already on, nothing needs to be done. return True else: # This is not an error; it just needs to be distinguished here so it # can be handled differently. return False @synchronous def create_host_maps(mappings, shared_key): """Create DHCP host maps for the given mappings. :param mappings: A list of dicts containing ``ip_address`` and ``mac_address`` keys. :param shared_key: The key used to access the DHCP server via OMAPI. """ if not _is_dhcpv4_managed_and_active(CannotCreateHostMap): # This will raise if DHCP is offline, but should be online. # If the server is offline *and* should be offline, we'll get False # back, which means this is a no-op. return # See bug 1039362 regarding server_address. omshell = Omshell(server_address='127.0.0.1', shared_key=shared_key) for mapping in mappings: ip_address = mapping["ip_address"] mac_address = mapping["mac_address"] try: omshell.create(ip_address, mac_address) except ExternalProcessError as e: maaslog.error( "Could not create host map for %s with address %s: %s", mac_address, ip_address, unicode(e)) if 'not connected.' in e.output_as_unicode: raise CannotCreateHostMap( "The DHCP server could not be reached.") else: raise CannotCreateHostMap("%s -> %s: %s" % ( mac_address, ip_address, e.output_as_unicode)) @synchronous def remove_host_maps(identifiers, shared_key): """Remove DHCP host maps for the given IP addresses. Additionally, this will ensure that any lease present for the MAC address(es) supplied is also forcefully expired. Generally, host maps don't create leases unless the host map is inside the dynamic range, however this is still safe to call and can be called to guarantee that any IP address is left expired regardless of whether it's in the dynamic range or not. :param mac_addresses: A list of MAC addresses. :param shared_key: The key used to access the DHCP server via OMAPI. """ if not _is_dhcpv4_managed_and_active(CannotRemoveHostMap): # This will raise if DHCP is offline, but should be online. # If the server is offline *and* should be offline, we'll get False # back, which means this is a no-op. return # See bug 1039362 regarding server_address. omshell = Omshell(server_address='127.0.0.1', shared_key=shared_key) for identifier in identifiers: try: # Note: identifiers can be either MAC addresses or IP addresses; # the region will send us a set of both, due to backward # compatibility issues (MAAS 1.8 used the IP address as the # identifier; now we use the MAC.) omshell.remove(identifier) omshell.nullify_lease(identifier) except ExternalProcessError as e: maaslog.error( "Could not remove host map for %s: %s.", identifier, unicode(e)) if 'not connected.' in e.output_as_unicode: raise CannotRemoveHostMap( "The DHCP server could not be reached.") else: raise CannotRemoveHostMap("%s: %s." % ( identifier, e.output_as_unicode)) maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/exceptions.py0000644000000000000000000000620713056115004022743 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Errors arising from the RPC system.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "AuthenticationFailed", "CannotConfigureDHCP", "CannotCreateHostMap", "CannotRegisterCluster", "CannotRemoveHostMap", "CommissionNodeFailed", "NoConnectionsAvailable", "NodeAlreadyExists", "NodeStateViolation", "NoIPFoundForMACAddress", "NoSuchCluster", "NoSuchEventType", "NoSuchNode", "NoSuchOperatingSystem", "PowerActionAlreadyInProgress", "RegistrationFailed", ] class NoConnectionsAvailable(Exception): """There is no connection available.""" def __init__(self, message='', uuid=None): super(NoConnectionsAvailable, self).__init__(message) self.uuid = uuid class NoSuchEventType(Exception): """The specified event type was not found.""" @classmethod def from_name(cls, name): return cls( "Event type with name=%s could not be found." % name ) class NoSuchNode(Exception): """The specified node was not found.""" @classmethod def from_system_id(cls, system_id): return cls( "Node with system_id=%s could not be found." % system_id ) @classmethod def from_mac_address(cls, mac_address): return cls( "Node with mac_address=%s could not be found." % mac_address ) class NodeStateViolation(Exception): """The specified state transition cannot be performed.""" class NoSuchCluster(Exception): """The specified cluster was not found.""" @classmethod def from_uuid(cls, uuid): return cls( "The cluster with UUID %s could not " "be found." % uuid ) class NoSuchOperatingSystem(Exception): """The specified OS was not found.""" class CommissionNodeFailed(Exception): """Failure to commission node.""" class CannotConfigureDHCP(Exception): """Failure while configuring a DHCP server.""" class CannotCreateHostMap(Exception): """The host map could not be created.""" class CannotRemoveHostMap(Exception): """The host map could not be removed.""" class NodeAlreadyExists(Exception): """A node already exists with a given MAC address.""" class NoIPFoundForMACAddress(Exception): """No IP was found for a given MAC address.""" class PowerActionAlreadyInProgress(Exception): """A power action was requested on a node where a power action is already in progress. """ class CannotRegisterCluster(Exception): """The cluster could not be registered.""" @classmethod def from_uuid(cls, uuid, message): return cls( "The cluster with UUID %s could not " "be registered:\n%s" % (uuid, message) ) class AuthenticationFailed(Exception): """One or both sides of the connection failed to authenticate.""" class RegistrationFailed(Exception): """The region did not or was not able to register the cluster.""" maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/interfaces.py0000644000000000000000000000146113056115004022702 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Interfaces for the RPC implementation.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from zope import interface class IConnection(interface.Interface): ident = interface.Attribute( "ident", "An identifier far end of the connection.") hostCertificate = interface.Attribute( "hostCertificate", "The certificate used locally for TLS.") peerCertificate = interface.Attribute( "peerCertificate", "The certificate used remotely for TLS.") def callRemote(cmd, **arguments): """Call a remote method with the given arguments.""" maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/monitors.py0000644000000000000000000000444713056115004022440 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """RPC helpers for monitors.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "cancel_monitor", "start_monitors", ] from datetime import datetime from provisioningserver.logger import get_maas_logger from provisioningserver.rpc import getRegionClient from provisioningserver.rpc.exceptions import NoConnectionsAvailable from provisioningserver.rpc.region import MonitorExpired from twisted.internet import reactor from twisted.protocols import amp maaslog = get_maas_logger("monitors") # Currently running timers; contains dict with keys of ID mapping to a # (delayed_call, context) pair. running_monitors = dict() def start_monitors(monitors, clock=reactor): """RPC responder to start monitors as specified. :param monitors: a `StartMonitors` message. Right now the monitors only implement a timer. Will create one delayed callback for each of the monitors and if it reaches its deadline, call `MonitorExpired` in the region passing back the monitor ID. """ for monitor in monitors: delay = monitor["deadline"] - datetime.now(amp.utc) monitor_id = monitor["id"] if monitor_id in running_monitors: dc, _ = running_monitors.pop(monitor_id) dc.cancel() call = clock.callLater( delay.total_seconds(), monitor_expired, monitor_id) running_monitors[monitor_id] = (call, monitor["context"]) def monitor_expired(monitor_id): """Called when a monitor hits its deadline. Call MonitorExpired with the context for the monitor. """ _, context = running_monitors.pop(monitor_id) try: client = getRegionClient() except NoConnectionsAvailable: maaslog.error( "Lost connection to the region, unable to fire timer with ID: %s", monitor_id) return None return client(MonitorExpired, id=monitor_id, context=context) def cancel_monitor(monitor_id): """Called from the region to cancel a running timer.""" try: dc, _ = running_monitors.pop(monitor_id) except KeyError: return dc.cancel() maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/osystems.py0000644000000000000000000000720013056115004022442 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """RPC helpers relating to operating systems.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "gen_operating_systems", "validate_license_key", ] from provisioningserver.drivers.osystem import ( Node, OperatingSystemRegistry, Token, ) from provisioningserver.rpc import exceptions def gen_operating_system_releases(osystem): """Yield operating system release dicts. Each dict adheres to the response specification of an operating system release in the ``ListOperatingSystems`` RPC call. """ releases_for_commissioning = set( osystem.get_supported_commissioning_releases()) for release in osystem.get_supported_releases(): requires_license_key = osystem.requires_license_key(release) can_commission = release in releases_for_commissioning yield { "name": release, "title": osystem.get_release_title(release), "requires_license_key": requires_license_key, "can_commission": can_commission, } def gen_operating_systems(): """Yield operating system dicts. Each dict adheres to the response specification of an operating system in the ``ListOperatingSystems`` RPC call. """ for _, os in sorted(OperatingSystemRegistry): default_release = os.get_default_release() default_commissioning_release = os.get_default_commissioning_release() yield { "name": os.name, "title": os.title, "releases": gen_operating_system_releases(os), "default_release": default_release, "default_commissioning_release": default_commissioning_release, } def get_os_release_title(osystem, release): """Get the title for the operating systems release. :raises NoSuchOperatingSystem: If ``osystem`` is not found. """ try: osystem = OperatingSystemRegistry[osystem] except KeyError: raise exceptions.NoSuchOperatingSystem(osystem) else: title = osystem.get_release_title(release) if title is None: return "" return title def validate_license_key(osystem, release, key): """Validate a license key. :raises NoSuchOperatingSystem: If ``osystem`` is not found. """ try: osystem = OperatingSystemRegistry[osystem] except KeyError: raise exceptions.NoSuchOperatingSystem(osystem) else: return osystem.validate_license_key(release, key) def get_preseed_data( osystem, preseed_type, node_system_id, node_hostname, consumer_key, token_key, token_secret, metadata_url): """Composes preseed data for the given node. :param preseed_type: The preseed type being composed. :param node: The node for which a preseed is being composed. :param token: OAuth token for the metadata URL. :param metadata_url: The metdata URL for the node. :type metadata_url: :py:class:`urlparse.ParseResult` :return: Preseed data for the given node. :raise NotImplementedError: when the specified operating system does not require custom preseed data. """ try: osystem = OperatingSystemRegistry[osystem] except KeyError: raise exceptions.NoSuchOperatingSystem(osystem) else: return osystem.compose_preseed( preseed_type, Node(node_system_id, node_hostname), Token(consumer_key, token_key, token_secret), metadata_url.geturl()) maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/region.py0000644000000000000000000002546213056115004022051 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """RPC declarations for the region. These are commands that a region controller ought to respond to. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "Authenticate", "CreateNode", "CommissionNode", "GetArchiveMirrors", "GetBootSources", "GetBootSourcesV2", "GetClusterInterfaces", "GetProxies", "Identify", "ListNodePowerParameters", "MarkNodeFailed", "MonitorExpired", "Register", "RegisterEventType", "ReloadCluster", "ReportBootImages", "ReportForeignDHCPServer", "RequestNodeInfoByMACAddress", "SendEvent", "SendEventMACAddress", "UpdateLeases", "UpdateNodePowerState", ] from provisioningserver.rpc.arguments import ( Bytes, CompressedAmpList, ParsedURL, StructureAsJSON, ) from provisioningserver.rpc.common import ( Authenticate, Identify, ) from provisioningserver.rpc.exceptions import ( CannotRegisterCluster, CommissionNodeFailed, NodeAlreadyExists, NodeStateViolation, NoSuchCluster, NoSuchEventType, NoSuchNode, ) from twisted.protocols import amp class Register(amp.Command): """Register a cluster with the region controller. This is the last part of the Authenticate and Register two-step. See cluster-bootstrap_ for an explanation. :since: 1.7 """ arguments = [ (b"uuid", amp.Unicode()), (b"networks", amp.AmpList([ (b"interface", amp.Unicode()), (b"ip", amp.Unicode()), (b"subnet_mask", amp.Unicode()), ], optional=True)), # The URL for the region as seen by the cluster. (b"url", ParsedURL(optional=True)), (b"ip_addr_json", amp.Unicode(optional=True)), ] response = [] errors = { CannotRegisterCluster: b"CannotRegisterCluster", } class ReportBootImages(amp.Command): """Report boot images available on the invoking cluster controller. :since: 1.5 """ arguments = [ # The cluster UUID. (b"uuid", amp.Unicode()), (b"images", amp.AmpList( [(b"architecture", amp.Unicode()), (b"subarchitecture", amp.Unicode()), (b"release", amp.Unicode()), (b"purpose", amp.Unicode())])), ] response = [] errors = [] class GetBootSources(amp.Command): """Report boot sources and selections for the given cluster. :since: 1.6 :deprecated: 1.7 """ arguments = [ # The cluster UUID. (b"uuid", amp.Unicode()), ] response = [ (b"sources", amp.AmpList( [(b"url", amp.Unicode()), (b"keyring_data", Bytes()), (b"selections", amp.AmpList( [(b"release", amp.Unicode()), (b"arches", amp.ListOf(amp.Unicode())), (b"subarches", amp.ListOf(amp.Unicode())), (b"labels", amp.ListOf(amp.Unicode()))]))])), ] errors = [] class GetBootSourcesV2(amp.Command): """Report boot sources and selections for the given cluster. Includes the new os field for the selections. :since: 1.7 """ arguments = [ # The cluster UUID. (b"uuid", amp.Unicode()), ] response = [ (b"sources", amp.AmpList( [(b"url", amp.Unicode()), (b"keyring_data", Bytes()), (b"selections", amp.AmpList( [(b"os", amp.Unicode()), (b"release", amp.Unicode()), (b"arches", amp.ListOf(amp.Unicode())), (b"subarches", amp.ListOf(amp.Unicode())), (b"labels", amp.ListOf(amp.Unicode()))]))])), ] errors = [] class UpdateLeases(amp.Command): """Report DHCP leases on the invoking cluster controller. :since: 1.7 """ arguments = [ # The cluster UUID. (b"uuid", amp.Unicode()), (b"mappings", CompressedAmpList( [(b"ip", amp.Unicode()), (b"mac", amp.Unicode())])) ] response = [] errors = { NoSuchCluster: b"NoSuchCluster", } class GetArchiveMirrors(amp.Command): """Return the Main and Port mirrors to use. :since: 1.7 """ arguments = [] response = [ (b"main", ParsedURL()), (b"ports", ParsedURL()), ] errors = [] class GetProxies(amp.Command): """Return the HTTP and HTTPS proxies to use. :since: 1.6 """ arguments = [] response = [ (b"http", ParsedURL(optional=True)), (b"https", ParsedURL(optional=True)), ] errors = [] class MarkNodeFailed(amp.Command): """Mark a node as 'broken'. :since: 1.7 """ arguments = [ # The node's system_id. (b"system_id", amp.Unicode()), # The error description. (b"error_description", amp.Unicode()), ] response = [] errors = { NodeStateViolation: b"NodeStateViolation", NoSuchNode: b"NoSuchNode", } class ListNodePowerParameters(amp.Command): """Return power parameters for the nodes in the specified cluster. This will only return power parameters for nodes that have power types for which MAAS has a query capability. It will return nodes in priority order. Those nodes at the beginning of the list should be queried first. It may return an empty list. This means that all nodes have been recently queried. Take a break before asking again. :since: 1.7 """ arguments = [ # The cluster UUID. (b"uuid", amp.Unicode()), ] response = [ (b"nodes", amp.AmpList( [(b"system_id", amp.Unicode()), (b"hostname", amp.Unicode()), (b"power_state", amp.Unicode()), (b"power_type", amp.Unicode()), # We can't define a tighter schema here because this is a highly # variable bag of arguments from a variety of sources. (b"context", StructureAsJSON())])), ] errors = { NoSuchCluster: b"NoSuchCluster", } class UpdateNodePowerState(amp.Command): """Update Node Power State. :since: 1.7 """ arguments = [ # The node's system_id. (b"system_id", amp.Unicode()), # The node's power_state. (b"power_state", amp.Unicode()), ] response = [] errors = {NoSuchNode: b"NoSuchNode"} class RegisterEventType(amp.Command): """Register an event type. :since: 1.7 """ arguments = [ (b"name", amp.Unicode()), (b"description", amp.Unicode()), (b"level", amp.Integer()), ] response = [] errors = [] class SendEvent(amp.Command): """Send an event. :since: 1.7 """ arguments = [ (b"system_id", amp.Unicode()), (b"type_name", amp.Unicode()), (b"description", amp.Unicode()), ] response = [] errors = { # In practice, neither NoSuchNode nor NoSuchEventType will be returned # by the region controller as of MAAS 1.9 because the region no longer # waits for the database work to complete. NoSuchNode: b"NoSuchNode", NoSuchEventType: b"NoSuchEventType" } class SendEventMACAddress(amp.Command): """Send an event. :since: 1.7 """ arguments = [ (b"mac_address", amp.Unicode()), (b"type_name", amp.Unicode()), (b"description", amp.Unicode()), ] response = [] errors = { # In practice, neither NoSuchNode nor NoSuchEventType will be returned # by the region controller as of MAAS 1.9 because the region no longer # waits for the database work to complete. NoSuchNode: b"NoSuchNode", NoSuchEventType: b"NoSuchEventType" } class ReportForeignDHCPServer(amp.Command): """Report a foreign DHCP server on the cluster's network. :since: 1.7 """ arguments = [ (b"cluster_uuid", amp.Unicode()), (b"interface_name", amp.Unicode()), (b"foreign_dhcp_ip", amp.Unicode(optional=True)), ] response = [] errors = [] class GetClusterInterfaces(amp.Command): """Fetch the known interfaces for a cluster from the region. :since: 1.7 """ arguments = [ (b"cluster_uuid", amp.Unicode()), ] response = [ (b"interfaces", amp.AmpList( [(b"name", amp.Unicode()), (b"interface", amp.Unicode()), (b"ip", amp.Unicode())])) ] errors = [] class CreateNode(amp.Command): """Create a node on a given cluster. :since: 1.7 """ arguments = [ (b'cluster_uuid', amp.Unicode()), (b'architecture', amp.Unicode()), (b'power_type', amp.Unicode()), (b'power_parameters', amp.Unicode()), (b'mac_addresses', amp.ListOf(amp.Unicode())), (b'hostname', amp.Unicode(optional=True)), ] response = [ (b'system_id', amp.Unicode()), ] errors = { NodeAlreadyExists: b"NodeAlreadyExists", } class CommissionNode(amp.Command): """Commission node. :since: 1.7 """ arguments = [ (b'system_id', amp.Unicode()), (b'user', amp.Unicode()), ] response = [] errors = { CommissionNodeFailed: b"CommissionNodeFailed", } class MonitorExpired(amp.Command): """Called by a cluster when a running monitor hits its deadline. The original context parameter from the StartMonitors call is returned. :since: 1.7 """ arguments = [ (b"id", amp.Unicode()), (b"context", StructureAsJSON()), ] response = [] errors = [] class ReloadCluster(amp.Command): """Called by a cluster when it wants to reload its state. The region may respond with many different calls to the cluster that will give it all the information it needs to restore state (for example when it got restarted). For example, the region will restore all the timers in the cluster, so none or many StartTimer calls may be received after the cluster issues this command. :since: 1.7 """ arguments = [ (b"cluster_uuid", amp.Unicode()), ] response = [] errors = [] class RequestNodeInfoByMACAddress(amp.Command): """Request Node information by mac address. :since: 1.7 """ arguments = [ (b"mac_address", amp.Unicode()), ] response = [ (b"system_id", amp.Unicode()), (b"hostname", amp.Unicode()), (b"status", amp.Integer()), (b"boot_type", amp.Unicode()), (b"osystem", amp.Unicode()), (b"distro_series", amp.Unicode()), (b"architecture", amp.Unicode()), (b"purpose", amp.Unicode()), ] errors = { NoSuchNode: b"NoSuchNode", } maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/tags.py0000644000000000000000000000261513056115004021517 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """RPC helpers for dealing with tags.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "evaluate_tag", ] from apiclient.maas_client import ( MAASClient, MAASDispatcher, MAASOAuth, ) from provisioningserver.config import ClusterConfiguration from provisioningserver.tags import process_node_tags from provisioningserver.utils.twisted import synchronous @synchronous def evaluate_tag(tag_name, tag_definition, tag_nsmap, credentials): """Evaluate `tag_definition` against this cluster's nodes' details. :param tag_name: The name of the tag, used for logging. :param tag_definition: The XPath expression of the tag. :param tag_nsmap: The namespace map as used by LXML's ETree library. :param credentials: A 3-tuple of OAuth credentials. """ with ClusterConfiguration.open() as config: cluster_uuid = config.cluster_uuid maas_url = config.maas_url client = MAASClient( auth=MAASOAuth(*credentials), dispatcher=MAASDispatcher(), base_url=maas_url) process_node_tags( tag_name=tag_name, tag_definition=tag_definition, tag_nsmap=tag_nsmap, client=client, nodegroup_uuid=cluster_uuid) maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/testing/0000755000000000000000000000000013056115004021660 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/tests/0000755000000000000000000000000013056115004021345 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/testing/__init__.py0000644000000000000000000004176713056115004024010 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Testing helpers for RPC implementations.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "are_valid_tls_parameters", "call_responder", "make_amp_protocol_factory", "MockClusterToRegionRPCFixture", "MockLiveClusterToRegionRPCFixture", ] from abc import ( ABCMeta, abstractmethod, ) import collections import itertools from os import path import fixtures from maastesting.factory import factory from maastesting.fixtures import TempDirectory from maastesting.twisted import always_succeed_with from mock import ( Mock, sentinel, ) import provisioningserver from provisioningserver.rpc import region from provisioningserver.rpc.clusterservice import ( Cluster, ClusterClient, ClusterClientService, ) from provisioningserver.rpc.common import RPCProtocol from provisioningserver.rpc.testing.tls import get_tls_parameters_for_region from provisioningserver.security import ( get_shared_secret_from_filesystem, set_shared_secret_on_filesystem, ) from provisioningserver.utils.twisted import ( asynchronous, callOut, ) from testtools.matchers import ( AllMatch, IsInstance, MatchesAll, MatchesDict, ) from twisted.internet import ( defer, endpoints, reactor, ssl, ) from twisted.internet.defer import ( inlineCallbacks, returnValue, ) from twisted.internet.protocol import Factory from twisted.internet.task import Clock from twisted.protocols import amp from twisted.python import reflect from twisted.python.failure import Failure from twisted.test import iosim def call_responder(protocol, command, arguments): """Call `command` responder in `protocol` with given `arguments`. Serialises the arguments and deserialises the response too. """ responder = protocol.locateResponder(command.commandName) arguments = command.makeArguments(arguments, protocol) d = responder(arguments) d.addCallback(command.parseResponse, protocol) def eb_massage_error(error): if error.check(amp.RemoteAmpError): # Convert remote errors back into local errors using the # command's error map if possible. error_type = command.reverseErrors.get( error.value.errorCode, amp.UnknownRemoteError) return Failure(error_type(error.value.description)) else: # Exceptions raised in responders that aren't declared in that # responder's schema can get through to here without being wrapped # in RemoteAmpError. This is because call_responder() bypasses the # network marshall/unmarshall steps, where these exceptions would # ordinarily get squashed. return Failure(amp.UnknownRemoteError("%s: %s" % ( reflect.qual(error.type), reflect.safe_str(error.value)))) d.addErrback(eb_massage_error) return d are_valid_tls_parameters = MatchesDict({ "tls_localCertificate": IsInstance(ssl.PrivateCertificate), "tls_verifyAuthorities": MatchesAll( IsInstance(collections.Sequence), AllMatch(IsInstance(ssl.Certificate)), ), }) class MockClusterToRegionRPCFixtureBase(fixtures.Fixture): """Patch in a stub region RPC implementation to enable end-to-end testing. This is an abstract base class. Derive concrete fixtures from this by implementing the `connect` method. """ __metaclass__ = ABCMeta starting = None stopping = None def checkServicesClean(self): # If services are running, what do we do with any existing RPC # service? Do we shut it down and patch in? Do we just patch in and # move the running service aside? If it's not running, do we patch # into it without moving it aside? For now, keep it simple and avoid # these questions by requiring that services are stopped and that no # RPC service is globally registered. if provisioningserver.services.running: raise AssertionError( "Please ensure that cluster services are *not* running " "before using this fixture.") if "rpc" in provisioningserver.services.namedServices: raise AssertionError( "Please ensure that no RPC service is registered globally " "before using this fixture.") def asyncStart(self): # Check that no cluster services are running and that there's no RPC # service already registered. self.checkServicesClean() # Patch it into the global services object. self.rpc_service.setName("rpc") self.rpc_service.setServiceParent(provisioningserver.services) # Pretend event-loops only exist for those connections that already # exist. The chicken-and-egg will be resolved by injecting a # connection later on. self.rpc_service._get_rpc_info_url = self._get_rpc_info_url self.rpc_service._fetch_rpc_info = self._fetch_rpc_info # Finally, start the service. If the clock is advanced, this will do # its usual update() calls, but we've patched out _get_rpc_info_url # and _fetch_rpc_info so no traffic will result. self.starting = defer.maybeDeferred(self.rpc_service.startService) def asyncStop(self): if self.starting is None: # Nothing to do; it never started. self.stopping = defer.succeed(None) else: self.starting.cancel() self.stopping = defer.maybeDeferred( self.rpc_service.disownServiceParent) # Ensure the cluster's services will be left in a consistent state. self.stopping.addCallback(callOut, self.checkServicesClean) @asynchronous(timeout=15) def setUp(self): super(MockClusterToRegionRPCFixtureBase, self).setUp() # Use an inert clock with ClusterClientService so it doesn't update # itself except when we ask it to. self.rpc_service = ClusterClientService(Clock()) # Start up, but schedule stop first. self.addCleanup(self.asyncStop) self.asyncStart() # Return the Deferred so that callers from threads outside of the # reactor will block. In the reactor thread, a supporting test # framework may know how to handle this sanely. return self.starting @asynchronous(timeout=15) def cleanUp(self): super(MockClusterToRegionRPCFixtureBase, self).cleanUp() # Return the Deferred so that callers from threads outside of the # reactor will block. In the reactor thread, a supporting test # framework may know how to handle this sanely. return self.stopping def getEventLoopName(self, protocol): """Return `protocol`'s event-loop name. If one has not been set already, one is generated and saved as `protocol.ident`. """ try: return protocol.ident except AttributeError: protocol.ident = factory.make_name("eventloop") return protocol.ident def ensureSharedSecret(self): """Make sure the shared-secret is set.""" if get_shared_secret_from_filesystem() is None: set_shared_secret_on_filesystem(factory.make_bytes()) @asynchronous(timeout=5) def addEventLoop(self, protocol): """Add a new stub event-loop using the given `protocol`. The `protocol` should be an instance of `amp.AMP`. :return: py:class:`twisted.test.iosim.IOPump` """ self.ensureSharedSecret() eventloop = self.getEventLoopName(protocol) address = factory.make_ipv4_address(), factory.pick_port() client = ClusterClient(address, eventloop, self.rpc_service) return self.connect(client, protocol) def makeEventLoop(self, *commands): """Make and add a new stub event-loop for the given `commands`. See `make_amp_protocol_factory` for details. """ if region.Identify not in commands: commands = commands + (region.Identify,) if region.Authenticate not in commands: commands = commands + (region.Authenticate,) if region.Register not in commands: commands = commands + (region.Register,) if amp.StartTLS not in commands: commands = commands + (amp.StartTLS,) protocol_factory = make_amp_protocol_factory(*commands) protocol = protocol_factory() eventloop = self.getEventLoopName(protocol) protocol.Identify.return_value = {"ident": eventloop} protocol.Authenticate.side_effect = self._authenticate_with_cluster_key protocol.Register.side_effect = always_succeed_with({}) protocol.StartTLS.return_value = get_tls_parameters_for_region() return protocol, self.addEventLoop(protocol) @abstractmethod def connect(self, cluster, region): """Wire up a connection between cluster and region. :type cluster: `twisted.internet.interfaces.IProtocol` :type region: `twisted.internet.interfaces.IProtocol` :return: ... """ def _get_rpc_info_url(self): """Patch-in for `ClusterClientService._get_rpc_info_url`. Returns a dummy value. """ return sentinel.url def _fetch_rpc_info(self, url): """Patch-in for `ClusterClientService._fetch_rpc_info`. Describes event-loops only for those event-loops already known to the service, thus new connections must be injected into the service. """ connections = self.rpc_service.connections.viewitems() return { "eventloops": { eventloop: [client.address] for eventloop, client in connections }, } def _authenticate_with_cluster_key(self, protocol, message): """Patch-in for `Authenticate` calls. This ought to always return the correct digest because it'll be using the same shared-secret as the cluster. """ return Cluster().authenticate(message) class MockClusterToRegionRPCFixture(MockClusterToRegionRPCFixtureBase): """Patch in a stub region RPC implementation to enable end-to-end testing. Use this in *cluster* tests when you're not running with a reactor, or when you need fine-grained control over IO. This has low overhead and is useful for writing tests where there are obvious points where you can pump IO "by hand". Example usage (assuming `inlineCallbacks`):: fixture = self.useFixture(MockClusterToRegionRPCFixture()) yield fixture.starting # Wait for the fixture to start. protocol, io = fixture.makeEventLoop(region.Identify) protocol.Identify.return_value = defer.succeed({"ident": "foobar"}) client = getRegionClient() result = client(region.Identify) io.flush() # Call this in the reactor thread. self.assertThat(result, ...) """ def connect(self, cluster, region): """Wire up a connection between cluster and region. :type cluster: `twisted.internet.interfaces.IProtocol` :type region: `twisted.internet.interfaces.IProtocol` :return: py:class:`twisted.test.iosim.IOPump` """ return iosim.connect( region, iosim.makeFakeServer(region), cluster, iosim.makeFakeClient(cluster), debug=False, # Debugging is useful, but too noisy by default. ) class MockLiveClusterToRegionRPCFixture(MockClusterToRegionRPCFixtureBase): """Patch in a stub region RPC implementation to enable end-to-end testing. This differs from `MockClusterToRegionRPCFixture` in that the connections between the region and the cluster are _live_, by which I mean that they're connected by reactor-managed IO, rather than by an `IOPump`. This means that the reactor must be running in order to use this fixture. Use this in *cluster* tests where the reactor is running, for example when using `MAASTwistedRunTest` or its siblings. There's a slightly greater overhead than when using `MockClusterToRegionRPCFixture`, but it's not huge. You must be careful to follow the usage instructions otherwise you'll be plagued by dirty reactor errors. Example usage (assuming `inlineCallbacks`):: fixture = self.useFixture(MockLiveClusterToRegionRPCFixture()) protocol, connecting = fixture.makeEventLoop(region.Identify) protocol.Identify.return_value = defer.succeed({"ident": "foobar"}) # This allows the connections to get established via IO through the # reactor. The result of `connecting` is a callable that arranges for # the correct shutdown of the connections being established. self.addCleanup((yield connecting)) client = getRegionClient() result = yield client(region.Identify) self.assertThat(result, ...) """ def setUp(self): self.sockdir = TempDirectory() # Place for UNIX sockets. self.socknames = itertools.imap(unicode, itertools.count(1)) return super(MockLiveClusterToRegionRPCFixture, self).setUp() def asyncStart(self): super(MockLiveClusterToRegionRPCFixture, self).asyncStart() def started(result): self.sockdir.setUp() return result self.starting.addCallback(started) def asyncStop(self): super(MockLiveClusterToRegionRPCFixture, self).asyncStop() def stopped(result): self.sockdir.cleanUp() return result self.stopping.addCallback(stopped) @inlineCallbacks def connect(self, cluster, region): """Wire up a connection between cluster and region. Uses a UNIX socket to very rapidly connect the two ends. :type cluster: `twisted.internet.interfaces.IProtocol` :type region: `twisted.internet.interfaces.IProtocol` """ # Wire up the region and cluster protocols via the sockfile. sockfile = path.join(self.sockdir.path, next(self.socknames)) class RegionFactory(Factory): def buildProtocol(self, addr): return region endpoint_region = endpoints.UNIXServerEndpoint(reactor, sockfile) port = yield endpoint_region.listen(RegionFactory()) endpoint_cluster = endpoints.UNIXClientEndpoint(reactor, sockfile) client = yield endpoints.connectProtocol(endpoint_cluster, cluster) # Wait for the client to be fully connected. Because onReady will have # been capped-off by now (see ClusterClient.connectionMade) this will # not raise any exceptions. In some ways this is convenient because it # allows the resulting issues to be encountered within test code. yield client.ready.get() @inlineCallbacks def shutdown(): # We need to make sure that everything is shutdown correctly. TLS # seems to make this even more important: it complains loudly if # connections are not closed cleanly. An interesting article to # read now is Jono Lange's "How to Disconnect in Twisted, Really" # . yield port.loseConnection() yield port.deferred if region.transport is not None: yield region.transport.loseConnection() yield region.onConnectionLost if client.transport is not None: yield client.transport.loseConnection() yield client.onConnectionLost # Fixtures don't wait for deferred work in clean-up tasks (or anywhere # else), so we can't use `self.addCleanup(shutdown)` here. We need to # get the user to add `shutdown` to the clean-up tasks for the *test*, # on the assumption they're using a test framework that accommodates # deferred work (like testtools with `MAASTwistedRunTest`). returnValue(shutdown) # An iterable of names for new dynamically-created AMP protocol factories. amp_protocol_factory_names = ( "AMPTestProtocol#%d".encode("ascii") % seq for seq in itertools.count(1)) def make_amp_protocol_factory(*commands): """Make a new protocol factory based on `RPCProtocol`.""" def __init__(self): super(cls, self).__init__() self._commandDispatch = self._commandDispatch.copy() for command in commands: # Get a class-level responder, if set. responder = getattr(self, command.commandName, None) if responder is None: # There's no class-level responder, so create an # instance-level responder using a Mock. responder = Mock(name=command.commandName) setattr(self, command.commandName, responder) # Register whichever responder we've found. self._commandDispatch[command.commandName] = (command, responder) name = next(amp_protocol_factory_names) cls = type(name, (RPCProtocol,), {"__init__": __init__}) return cls maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/testing/cluster.crt0000644000000000000000000000522113056115004024053 0ustar 00000000000000-----BEGIN CERTIFICATE----- MIICtTCCAZ0CAXswDQYJKoZIhvcNAQEEBQAwIDEeMBwGA1UEAxQVTUFBUyBSZWdp b24gKlRFU1RJTkcqMB4XDTE0MDIyNjE3NDEyMFoXDTE5MDIyNTE3NDEyMFowITEf MB0GA1UEAxQWTUFBUyBDbHVzdGVyICpURVNUSU5HKjCCASIwDQYJKoZIhvcNAQEB BQADggEPADCCAQoCggEBALUhs5iNeoFoLsr5Q/GlgVAOaceE2JII6t8c4wdoWDbQ CuZlXu8ERFfgFREl+xMbo+uM9x4eJasCBl9+2jAkinkpMJM/JyNSIWlw39riH6ZO pMxr4PStt9q1jU8mVxh2lsP+s84LTLpgC/52fvXGm6tQtPyK8U7KTfXGyDl7cXc0 WDHLoaMk9hXimrAUK+BF18WZrdpyF8fhBfdIgveQDFfoGI45hrYnV+iIdskEDgMs Ysk0nfq0SLIsJ6qqfkSzHeXbVK9eLLiAKckEhJT8rZKp/Ponb1QfI3i/2aT0zvk4 qCeGjKDUQNtX/2GqBwA6yA5Hf53CFPscI8xLMNtnYzMCAwEAATANBgkqhkiG9w0B AQQFAAOCAQEAvl56VhC1FEsnXjk4Auj53b+fnVlgKPMfIcW4ATYpA5xi5uWPu8s6 8+4hdDYzIU50o5WjcIQuJLEa6u4Mo4pEggs+c0TZwMW5CqhUvwLgMTRE7hCfLvAe tIo9aKHGczeU2piC1XEGoqCofQ6j/NvKZGWlkyZQPoDszohpT5GvpI6wr8mssysP ZeLhtbl8IOK83YR8wLu7ZDil0hFi0RuYx5nQSCRqxeMdMOIAOHVzQHnndwX6lCXx WaahSErkuaHpeUek1Uq5RoP+wfRBY+VY9gLKIBLGBGNXEvvfsJjmtZ4N8eUnjMsp FUzNelQK//B28o0GmPzTZ2Ro6LwsyAc25A== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC1IbOYjXqBaC7K +UPxpYFQDmnHhNiSCOrfHOMHaFg20ArmZV7vBERX4BURJfsTG6PrjPceHiWrAgZf ftowJIp5KTCTPycjUiFpcN/a4h+mTqTMa+D0rbfatY1PJlcYdpbD/rPOC0y6YAv+ dn71xpurULT8ivFOyk31xsg5e3F3NFgxy6GjJPYV4pqwFCvgRdfFma3achfH4QX3 SIL3kAxX6BiOOYa2J1foiHbJBA4DLGLJNJ36tEiyLCeqqn5Esx3l21SvXiy4gCnJ BISU/K2Sqfz6J29UHyN4v9mk9M75OKgnhoyg1EDbV/9hqgcAOsgOR3+dwhT7HCPM SzDbZ2MzAgMBAAECggEAIzgdWrFPu6/NQWw8WNUskqtqx/+9uI+BU8ajIkKYvHex VuEpF2KGA2wvzHjNHAcI1XlAZ5vHCTxIjvL0a93/CtYGeusRqh2DH6Rx8nZ7XvP1 uUXUpN7OW/lnM5ep4bjVUeDJh2vNDIQ2cNELmLB4flQgDObbSs2tYPe44OxuwJWa PqccZt9ROxXlqvkq5oUQme3Oxm9q826PMQumu+qhLxCFEF3gV5RdOFQbJsWQIelK 1fHTPn7c3FqD1z17XAjKC9g7FHKoI0vfLCWZllDv1oufatx7o5ym2iyeX4gg9KTs sU4c7utEMMJTrs+7qHdl9TGK0q7FugnLQdrSa+/+SQKBgQDX/rmwqB+JOGADeMBW BsV9rONvBSSL2YriVxTAK5noDN4gZ9g8hOBdWbDJIhe+m8y/aE97hWoAxftKHrhK xGxX91bVQQZKI6f4Jf3O3pkp5Is1i1H10VBljG2S/CNeq2EPEqFb+AqnMldE0zOs +U+iixOf3n8NbzpL6ihzV2MCZwKBgQDWrfM+WGw4kHbnUL5z4S0eblMwtJfo9+OJ TnYZyvMgV3XOlky0NytcGTZPZQ4z+N9+G5fd4uqMKYQ2covp5yvzvjeJXJbp/tDz x+9kK8fwQQZzkd81Pr0YEWrLU8T0Dik29cfVp0bbF02+K3kiecTT0R6eeND0Cxkp 6mk1ZPNRVQKBgQC9UNSs1A8gFLuDD3l1bWX4K2sEyfsoYLpsfGsBrLWvQGgI+zv6 f3SwHYboI+uv85rYQbM4zs0EdIM2igp57gQQbgg7zkyW+v2kcJgvk8englvJyJ1D 2fyfpR/9buYJeikOLwQjOhsMRQFcn8lxWOvFyiSfQUMXIWB1XX2q6vMrDQKBgDX/ +XCUQ1yTEiWrTMzPNk8yRyC4PIHwYL64CsMAJtnaxhpZ+h9WxURcXynnbIQsNON+ w1uf6YmqI8SiWYlTid282jFmJdvkKeE4SII09Qz8/kdxyOiWyG/WabuNbbxKF5no hwS6F/uiR/7u5oCCir0RHBeWolDQvdqqSR6SLxYlAoGBAKDPau2bOh3r4A+M26CY 8hrsc567JNjKRPVfY6tB3VwlL1b7WaVsnjca96p51EAoIPe+Fs+BXat0iBiq3e00 r0TA7O3atpSejiseCnXmYyVVi0VJD1oKYnfkRYmC3qkFGMpHV5NnJLa/PT9tKuVQ lW8A/d8EkOMLmiEOH0j4BtgI -----END PRIVATE KEY----- maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/testing/doubles.py0000644000000000000000000000471413056115004023675 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test doubles for the region's RPC implementation.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "DummyConnection", "StubOS", ] from provisioningserver.drivers.osystem import OperatingSystem from provisioningserver.rpc.interfaces import IConnection from zope.interface import implementer @implementer(IConnection) class DummyConnection: """A dummy connection. Implements `IConnection`. """ class StubOS(OperatingSystem): """An :py:class:`OperatingSystem` subclass that has canned answers. - The name is capitalised to derive the title. - The first release is the default. - Odd releases (in the order they're specified) require license keys. """ name = title = None def __init__(self, name, releases): """ :param name: A string name, usually all lowercase. :param releases: A list of (name, title) tuples. """ super(StubOS, self).__init__() self.name = name self.title = name.capitalize() self.releases = releases def is_release_supported(self, release): return release in self.releases def get_supported_releases(self): return [name for name, _ in self.releases] def get_default_release(self): if len(self.releases) == 0: return None else: name, _ = self.releases[0] return name def get_release_title(self, release): for name, title in self.releases: if name == release: return title else: return None def format_release_choices(self): raise NotImplementedError() def get_boot_image_purposes(self, arch, subarch, release, label): raise NotImplementedError() def requires_license_key(self, release): for index, (name, _) in enumerate(self.releases): if name == release: return index % 2 == 1 else: return False def get_default_commissioning_release(self): if len(self.releases) >= 2: name, _ = self.releases[1] return name else: return None def get_supported_commissioning_releases(self): return [name for name, _ in self.releases[1:3]] maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/testing/region.crt0000644000000000000000000000521513056115004023660 0ustar 00000000000000-----BEGIN CERTIFICATE----- MIICtDCCAZwCAQEwDQYJKoZIhvcNAQEEBQAwIDEeMBwGA1UEAxQVTUFBUyBSZWdp b24gKlRFU1RJTkcqMB4XDTE0MDIyNjE3NDEyMFoXDTE1MDIyNjE3NDEyMFowIDEe MBwGA1UEAxQVTUFBUyBSZWdpb24gKlRFU1RJTkcqMIIBIjANBgkqhkiG9w0BAQEF AAOCAQ8AMIIBCgKCAQEA1q+LGY7CWiHjOGTBMvvhSK7/ke/fX0TXwxb8nO/1tscO iaWjVfIgLVnPMDZOF38BoYjjcGIjNH2/W4kypqpuqoyST7fjN0plin7zmNZwFGKy BRBKbbPi+jTYy3FXm81pHmJLnJwbfUlKFO9M/sZUDu3QFYrMyo1m8p3/wjFD3+vy S3HXdbw7FuT7tSoIY7SPYAYjMuH0goT914d2O4ZblO1DDRsnvixyAIqF+gX0nF5/ rOlv/QU1rEE9uCUxYhSoB97qoUeETd2axeQa8NSUCw49cAjRlXAz0+y426c8II6O 5xuX42JXYA1tFfiBdSByWOYs+fPUNOLlk+oUN/MoGwIDAQABMA0GCSqGSIb3DQEB BAUAA4IBAQBA3N9gZWIymXqlBMEmN4rV7CWrgfeG6kioDtE2F5HMLVEQ9BuFTLz4 QK+G+N52lUjkuS2GUvA+7nlzGyoVjAdLqj5gPgAPe4kWheueyRsaxrH5rU+0KB1K o3RWS9pSPbugkFU27jM4vumOz2ua1+xxZFsC+mPsYccf1LnhZhp/iGLnueQ1CkSJ bQuopk66GlookZqvN+wUo5bjh3/8NccNnmtuuR5rv/Xy1k/+Vk2lIDF8dE65MR1V 4arGvtmK1i8iHIKIJ0nmk0Y6SppN+3KjeB4iPpQKBdc/s8EiL3WZqVARx47lRGxD Bj9FjbyUAlkl9PQHoZW/2lPpTQXTqFj4 -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDWr4sZjsJaIeM4 ZMEy++FIrv+R799fRNfDFvyc7/W2xw6JpaNV8iAtWc8wNk4XfwGhiONwYiM0fb9b iTKmqm6qjJJPt+M3SmWKfvOY1nAUYrIFEEpts+L6NNjLcVebzWkeYkucnBt9SUoU 70z+xlQO7dAViszKjWbynf/CMUPf6/JLcdd1vDsW5Pu1KghjtI9gBiMy4fSChP3X h3Y7hluU7UMNGye+LHIAioX6BfScXn+s6W/9BTWsQT24JTFiFKgH3uqhR4RN3ZrF 5Brw1JQLDj1wCNGVcDPT7Ljbpzwgjo7nG5fjYldgDW0V+IF1IHJY5iz589Q04uWT 6hQ38ygbAgMBAAECggEAW1ARBw7chX+yaJMaRbgyqzqbw5PWW2wppXYKfinmRhbG jS9hmLXCj+eKI4SFlKLVq8JQksV7GeF+Wc2yOId3SJ0/wh6By0uegtjafaB/zXvp IhQ6xAxmN2vw5h9QVxl4Y48FgBg57QEWPG9IPXlX2X23KuJ9lo2sQveHCC7yIrQu Ns/ywgtrEBwcK9nSsdNR8OWlwtMRzg8ZuBCBIADiWdw0H3FRfjcyI9+qEas59Vaj fGh/NIg0rYLTmYdbGHY37v+Scsmn578yj6qKfVQCHpsXn9IfV4jyyjcHMKy54DAB mcJSEgCjPe2OGrqHZgDJIo4/BAywXNSOKLP7UA6ZCQKBgQDxltc43LoKhi88rFFo 6XrM//F19nMuCQoPX8aYFNJiD38E8Lhb3I2e2k7p/5oX2nQe0pfYrUlEoIcHWN3k nKR0H5MYJHJO+pbEO2Qnpy7radROKhkMl8HcUCr95NOk71naIEY6RMVbdbp5BQii 4tcGPF1Ti1xKoJXszk9tldNjTwKBgQDjfeA6WMZpNzPWhvfX0tmGFyoriY0V3ieN pmodhjlnWBgSFjN9r2DeA2HEzCHJ7OZSGAsniNOPf1tQiKJCn/AOiUsxkJYb3Svf FVAXgpBnNSFf+nA9sDZoKduPkGGh2v+D5Kd3YvLJXLhTEg+PzkiDB/73g/435LAO djex2K+rdQKBgQDf3Y/4AAlhITB6f488sUX62pysW8Alg3jQAEctu/WrqoJgWJPt g7Wz4sXHbfQjcGmz+h99SC4hqsMVGtMbOc/mTy/l6jgGWY0Fr6dCW1hOCxYRjE8D 7UhV1+/Or5EzQBw51TheXEWpV7GA3RpAngT0oVE1zmiqOp5S/mZ5vKFSXwKBgHqE 8J578V/5OxHu3sx94mk1UKDiE/oTC3pehgggognmiBA1pMWsDp+DcsRqrEf0LpdI G7nPFyHRlXxqGfmH6eHqT3UCSdX36AjdkaUXzG3JT5BBcPHIVYUEBhvrxqlFKRf/ rbG+uMN6DEuxDnCEyMjJJahojiHMKIWhZZ2C9hmBAoGAAQKXcOPdU3HGUBOihCf1 +Sg9sabFnjSb7TZlFZ6zuf+XxX4+gOB/01tSu048SBsc8kiVXHW0YzuhRM2dkbsd 2G0tmDhM1aqEvVCJJIimZ4FOZRKK/WDub0R//j9iM7IpNvuPIju1MsU+CIo2Ip9B gdiTpWUJj/Qw7vvpREmQPI4= -----END PRIVATE KEY----- maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/testing/tls.py0000644000000000000000000000245513056115004023042 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test helpers for TLS negotiation with AMP.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "get_tls_parameters_for_cluster", "get_tls_parameters_for_region", ] from functools import partial from twisted.internet import ssl from twisted.python import filepath def get_tls_parameters(private_cert_name, trust_cert_name): """get_tls_parameters() Implementation of :py:class:`~twisted.protocols.amp.StartTLS`. """ testing = filepath.FilePath(__file__).parent() with testing.child(private_cert_name).open() as fin: tls_localCertificate = ssl.PrivateCertificate.loadPEM(fin.read()) with testing.child(trust_cert_name).open() as fin: tls_verifyAuthorities = [ ssl.Certificate.loadPEM(fin.read()), ] return { "tls_localCertificate": tls_localCertificate, "tls_verifyAuthorities": tls_verifyAuthorities, } get_tls_parameters_for_cluster = partial( get_tls_parameters, "cluster.crt", "trust.crt") get_tls_parameters_for_region = partial( get_tls_parameters, "region.crt", "trust.crt") maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/testing/trust.crt0000644000000000000000000000174513056115004023562 0ustar 00000000000000-----BEGIN CERTIFICATE----- MIICtDCCAZwCAQEwDQYJKoZIhvcNAQEEBQAwIDEeMBwGA1UEAxQVTUFBUyBSZWdp b24gKlRFU1RJTkcqMB4XDTE0MDIyNjE3NDEyMFoXDTE1MDIyNjE3NDEyMFowIDEe MBwGA1UEAxQVTUFBUyBSZWdpb24gKlRFU1RJTkcqMIIBIjANBgkqhkiG9w0BAQEF AAOCAQ8AMIIBCgKCAQEA1q+LGY7CWiHjOGTBMvvhSK7/ke/fX0TXwxb8nO/1tscO iaWjVfIgLVnPMDZOF38BoYjjcGIjNH2/W4kypqpuqoyST7fjN0plin7zmNZwFGKy BRBKbbPi+jTYy3FXm81pHmJLnJwbfUlKFO9M/sZUDu3QFYrMyo1m8p3/wjFD3+vy S3HXdbw7FuT7tSoIY7SPYAYjMuH0goT914d2O4ZblO1DDRsnvixyAIqF+gX0nF5/ rOlv/QU1rEE9uCUxYhSoB97qoUeETd2axeQa8NSUCw49cAjRlXAz0+y426c8II6O 5xuX42JXYA1tFfiBdSByWOYs+fPUNOLlk+oUN/MoGwIDAQABMA0GCSqGSIb3DQEB BAUAA4IBAQBA3N9gZWIymXqlBMEmN4rV7CWrgfeG6kioDtE2F5HMLVEQ9BuFTLz4 QK+G+N52lUjkuS2GUvA+7nlzGyoVjAdLqj5gPgAPe4kWheueyRsaxrH5rU+0KB1K o3RWS9pSPbugkFU27jM4vumOz2ua1+xxZFsC+mPsYccf1LnhZhp/iGLnueQ1CkSJ bQuopk66GlookZqvN+wUo5bjh3/8NccNnmtuuR5rv/Xy1k/+Vk2lIDF8dE65MR1V 4arGvtmK1i8iHIKIJ0nmk0Y6SppN+3KjeB4iPpQKBdc/s8EiL3WZqVARx47lRGxD Bj9FjbyUAlkl9PQHoZW/2lPpTQXTqFj4 -----END CERTIFICATE----- maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/tests/__init__.py0000644000000000000000000000000013056115004023444 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/tests/test_arguments.py0000644000000000000000000001210413056115004024761 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test AMP argument classes.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random import zlib from maastesting.factory import factory from maastesting.testcase import MAASTestCase from provisioningserver.rpc import arguments from testtools import ExpectedException from testtools.matchers import ( Equals, IsInstance, LessThan, ) from twisted.protocols import amp class TestBytes(MAASTestCase): def test_round_trip(self): argument = arguments.Bytes() example = factory.make_bytes() encoded = argument.toString(example) self.assertThat(encoded, IsInstance(bytes)) decoded = argument.fromString(encoded) self.assertThat(decoded, Equals(example)) def test_error_when_input_is_not_a_byte_string(self): with ExpectedException(TypeError, "^Not a byte string: <.*"): arguments.Bytes().toString(object()) class TestChoice(MAASTestCase): def test_round_trip(self): choices = { factory.make_name("name"): factory.make_bytes() for _ in xrange(10) } argument = arguments.Choice(choices) choice = random.choice(list(choices)) encoded = argument.toString(choice) self.assertThat(encoded, IsInstance(bytes)) decoded = argument.fromString(encoded) self.assertThat(decoded, Equals(choice)) def test_error_when_input_is_not_in_choices(self): with ExpectedException(KeyError, "^ 1. "num_connections": 0, "expected": ClusterClientService.INTERVAL_LOW, }), ("fewer-connections-than-event-loops", { "time_running": 1000, "num_eventloops": 2, # anything > num_connections. "num_connections": 1, # anything > 0. "expected": ClusterClientService.INTERVAL_MID, }), ("default", { "time_running": 1000, "num_eventloops": 3, # same as num_connections. "num_connections": 3, # same as num_eventloops. "expected": ClusterClientService.INTERVAL_HIGH, }), ) def make_inert_client_service(self): service = ClusterClientService(Clock()) # ClusterClientService's superclass, TimerService, creates a # LoopingCall with now=True. We neuter it here to allow # observation of the behaviour of _update_interval() for # example. service.call = (lambda: None, (), {}) return service def test__calculate_interval(self): service = self.make_inert_client_service() service.startService() service.clock.advance(self.time_running) self.assertEqual( self.expected, service._calculate_interval( self.num_eventloops, self.num_connections)) class TestClusterClient(MAASTestCase): run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def setUp(self): super(TestClusterClient, self).setUp() self.useFixture(ClusterConfigurationFixture( maas_url=factory.make_simple_http_url(), cluster_uuid=factory.make_UUID())) def make_running_client(self): client = clusterservice.ClusterClient( address=("example.com", 1234), eventloop="eventloop:pid=12345", service=ClusterClientService(Clock())) client.service.running = True return client def patch_authenticate_for_success(self, client): authenticate = self.patch_autospec(client, "authenticateRegion") authenticate.side_effect = always_succeed_with(True) def patch_authenticate_for_failure(self, client): authenticate = self.patch_autospec(client, "authenticateRegion") authenticate.side_effect = always_succeed_with(False) def patch_authenticate_for_error(self, client, exception): authenticate = self.patch_autospec(client, "authenticateRegion") authenticate.side_effect = always_fail_with(exception) def patch_register_for_success(self, client): register = self.patch_autospec(client, "registerWithRegion") register.side_effect = always_succeed_with(True) def patch_register_for_failure(self, client): register = self.patch_autospec(client, "registerWithRegion") register.side_effect = always_succeed_with(False) def patch_register_for_error(self, client, exception): register = self.patch_autospec(client, "registerWithRegion") register.side_effect = always_fail_with(exception) def test_interfaces(self): client = self.make_running_client() # transport.getHandle() is used by AMP._getPeerCertificate, which we # call indirectly via the peerCertificate attribute in IConnection. self.patch(client, "transport") verifyObject(IConnection, client) def test_ident(self): client = self.make_running_client() client.eventloop = self.getUniqueString() self.assertThat(client.ident, Equals(client.eventloop)) def test_connecting(self): client = self.make_running_client() self.patch_authenticate_for_success(client) self.patch_register_for_success(client) self.assertEqual(client.service.connections, {}) wait_for_authenticated = client.authenticated.get() self.assertThat(wait_for_authenticated, IsUnfiredDeferred()) wait_for_ready = client.ready.get() self.assertThat(wait_for_ready, IsUnfiredDeferred()) client.connectionMade() # authenticated has been set to True, denoting a successfully # authenticated region. self.assertTrue(extract_result(wait_for_authenticated)) # ready has been set with the name of the event-loop. self.assertEqual(client.eventloop, extract_result(wait_for_ready)) self.assertEqual( client.service.connections, {client.eventloop: client}) def test_disconnects_when_there_is_an_existing_connection(self): client = self.make_running_client() # Pretend that a connection already exists for this address. client.service.connections[client.eventloop] = sentinel.connection # Connect via an in-memory transport. transport = StringTransportWithDisconnection() transport.protocol = client client.makeConnection(transport) # authenticated was set to None to signify that authentication was not # attempted. self.assertIsNone(extract_result(client.authenticated.get())) # ready was set with KeyError to signify that a connection to the # same event-loop already existed. self.assertRaises(KeyError, extract_result, client.ready.get()) # The connections list is unchanged because the new connection # immediately disconnects. self.assertEqual( client.service.connections, {client.eventloop: sentinel.connection}) self.assertFalse(client.connected) self.assertIsNone(client.transport) def test_disconnects_when_service_is_not_running(self): client = self.make_running_client() client.service.running = False # Connect via an in-memory transport. transport = StringTransportWithDisconnection() transport.protocol = client client.makeConnection(transport) # authenticated was set to None to signify that authentication was not # attempted. self.assertIsNone(extract_result(client.authenticated.get())) # ready was set with RuntimeError to signify that the client # service was not running. self.assertRaises(RuntimeError, extract_result, client.ready.get()) # The connections list is unchanged because the new connection # immediately disconnects. self.assertEqual(client.service.connections, {}) self.assertFalse(client.connected) def test_disconnects_when_authentication_fails(self): client = self.make_running_client() self.patch_authenticate_for_failure(client) self.patch_register_for_success(client) # Connect via an in-memory transport. transport = StringTransportWithDisconnection() transport.protocol = client client.makeConnection(transport) # authenticated was set to False. self.assertIs(False, extract_result(client.authenticated.get())) # ready was set with AuthenticationFailed. self.assertRaises( exceptions.AuthenticationFailed, extract_result, client.ready.get()) # The connections list is unchanged because the new connection # immediately disconnects. self.assertEqual(client.service.connections, {}) self.assertFalse(client.connected) def test_disconnects_when_authentication_errors(self): client = self.make_running_client() exception_type = factory.make_exception_type() self.patch_authenticate_for_error(client, exception_type()) self.patch_register_for_success(client) logger = self.useFixture(TwistedLoggerFixture()) # Connect via an in-memory transport. transport = StringTransportWithDisconnection() transport.protocol = client client.makeConnection(transport) # authenticated errbacks with the error. self.assertRaises( exception_type, extract_result, client.authenticated.get()) # ready also errbacks with the same error. self.assertRaises( exception_type, extract_result, client.ready.get()) # The log was written to. self.assertDocTestMatches( """... Event-loop 'eventloop:pid=12345' handshake failed; dropping connection. Traceback (most recent call last):... """, logger.dump()) # The connections list is unchanged because the new connection # immediately disconnects. self.assertEqual(client.service.connections, {}) self.assertFalse(client.connected) def test_disconnects_when_registration_fails(self): client = self.make_running_client() self.patch_authenticate_for_success(client) self.patch_register_for_failure(client) # Connect via an in-memory transport. transport = StringTransportWithDisconnection() transport.protocol = client client.makeConnection(transport) # authenticated was set to True because it succeeded. self.assertIs(True, extract_result(client.authenticated.get())) # ready was set with AuthenticationFailed. self.assertRaises( exceptions.RegistrationFailed, extract_result, client.ready.get()) # The connections list is unchanged because the new connection # immediately disconnects. self.assertEqual(client.service.connections, {}) self.assertFalse(client.connected) def test_disconnects_when_registration_errors(self): client = self.make_running_client() exception_type = factory.make_exception_type() self.patch_authenticate_for_success(client) self.patch_register_for_error(client, exception_type()) logger = self.useFixture(TwistedLoggerFixture()) # Connect via an in-memory transport. transport = StringTransportWithDisconnection() transport.protocol = client client.makeConnection(transport) # authenticated was set to True because it succeeded. self.assertIs(True, extract_result(client.authenticated.get())) # ready was set with the exception we made. self.assertRaises(exception_type, extract_result, client.ready.get()) # The log was written to. self.assertDocTestMatches( """... Event-loop 'eventloop:pid=12345' handshake failed; dropping connection. Traceback (most recent call last):... """, logger.dump()) # The connections list is unchanged because the new connection # immediately disconnects. self.assertEqual(client.service.connections, {}) self.assertFalse(client.connected) def test_handshakeFailed_does_not_log_when_connection_is_closed(self): client = self.make_running_client() with TwistedLoggerFixture() as logger: client.handshakeFailed(Failure(ConnectionClosed())) # ready was set with ConnectionClosed. self.assertRaises( ConnectionClosed, extract_result, client.ready.get()) # Nothing was logged. self.assertEqual("", logger.output) @inlineCallbacks def test_secureConnection_calls_StartTLS_and_Identify(self): client = self.make_running_client() callRemote = self.patch(client, "callRemote") callRemote_return_values = [ {}, # In response to a StartTLS call. {"ident": client.eventloop}, # Identify. ] callRemote.side_effect = lambda cmd, **kwargs: ( callRemote_return_values.pop(0)) transport = self.patch(client, "transport") logger = self.useFixture(TwistedLoggerFixture()) yield client.secureConnection() self.assertThat( callRemote, MockCallsMatch( call(amp.StartTLS, **client.get_tls_parameters()), call(region.Identify), )) # The connection is not dropped. self.assertThat(transport.loseConnection, MockNotCalled()) # The certificates used are echoed to the log. self.assertDocTestMatches( """\ Host certificate: ... --- Peer certificate: ... """, logger.dump()) @inlineCallbacks def test_secureConnection_disconnects_if_ident_does_not_match(self): client = self.make_running_client() callRemote = self.patch(client, "callRemote") callRemote.side_effect = [ {}, # In response to a StartTLS call. {"ident": "bogus-name"}, # Identify. ] transport = self.patch(client, "transport") logger = self.useFixture(TwistedLoggerFixture()) yield client.secureConnection() # The connection is dropped. self.assertThat( transport.loseConnection, MockCalledOnceWith()) # The log explains why. self.assertDocTestMatches( """\ The remote event-loop identifies itself as bogus-name, but eventloop:pid=12345 was expected. """, logger.dump()) # XXX: blake_r 2015-02-26 bug=1426089: Failing because of an unknown # reason. This is commented out instead of using @skip because of # running MAASTwistedRunTest will cause twisted to complain. # @inlineCallbacks # def test_secureConnection_end_to_end(self): # fixture = self.useFixture(MockLiveClusterToRegionRPCFixture()) # protocol, connecting = fixture.makeEventLoop() # self.addCleanup((yield connecting)) # client = yield getRegionClient() # # XXX: Expose secureConnection() in the client. # yield client._conn.secureConnection() # self.assertTrue(client.isSecure()) def test_authenticateRegion_accepts_matching_digests(self): client = self.make_running_client() def calculate_digest(_, message): # Use the cluster's own authentication responder. response = Cluster().authenticate(message) return succeed(response) callRemote = self.patch_autospec(client, "callRemote") callRemote.side_effect = calculate_digest d = client.authenticateRegion() self.assertTrue(extract_result(d)) def test_authenticateRegion_rejects_non_matching_digests(self): client = self.make_running_client() def calculate_digest(_, message): # Return some nonsense. response = { "digest": factory.make_bytes(), "salt": factory.make_bytes(), } return succeed(response) callRemote = self.patch_autospec(client, "callRemote") callRemote.side_effect = calculate_digest d = client.authenticateRegion() self.assertFalse(extract_result(d)) def test_authenticateRegion_propagates_errors(self): client = self.make_running_client() exception_type = factory.make_exception_type() callRemote = self.patch_autospec(client, "callRemote") callRemote.return_value = fail(exception_type()) d = client.authenticateRegion() self.assertRaises(exception_type, extract_result, d) @inlineCallbacks def test_authenticateRegion_end_to_end(self): fixture = self.useFixture(MockLiveClusterToRegionRPCFixture()) protocol, connecting = fixture.makeEventLoop() self.addCleanup((yield connecting)) yield getRegionClient() self.assertThat( protocol.Authenticate, MockCalledOnceWith(protocol, message=ANY)) def test_registerWithRegion_returns_True_when_accepted(self): client = self.make_running_client() callRemote = self.patch_autospec(client, "callRemote") callRemote.side_effect = always_succeed_with({}) logger = self.useFixture(TwistedLoggerFixture()) d = client.registerWithRegion() self.assertTrue(extract_result(d)) self.assertDocTestMatches( "Cluster '...' registered (via ...).", logger.output) def test_registerWithRegion_returns_False_when_rejected(self): client = self.make_running_client() callRemote = self.patch_autospec(client, "callRemote") callRemote.return_value = fail(exceptions.CannotRegisterCluster()) logger = self.useFixture(TwistedLoggerFixture()) d = client.registerWithRegion() self.assertFalse(extract_result(d)) self.assertDocTestMatches( "Cluster '...' REJECTED by the region (via ...).", logger.output) def test_registerWithRegion_propagates_errors(self): client = self.make_running_client() exception_type = factory.make_exception_type() callRemote = self.patch_autospec(client, "callRemote") callRemote.return_value = fail(exception_type()) d = client.registerWithRegion() self.assertRaises(exception_type, extract_result, d) @inlineCallbacks def test_registerWithRegion_end_to_end(self): maas_url = factory.make_simple_http_url() cluster_uuid = factory.make_UUID() self.useFixture(ClusterConfigurationFixture( maas_url=maas_url, cluster_uuid=cluster_uuid)) fixture = self.useFixture(MockLiveClusterToRegionRPCFixture()) protocol, connecting = fixture.makeEventLoop() self.addCleanup((yield connecting)) yield getRegionClient() self.assertThat( protocol.Register, MockCalledOnceWith( protocol, uuid=cluster_uuid, networks=discover_networks(), url=urlparse(maas_url), ip_addr_json=get_ip_addr_json())) class TestClusterProtocol_ListSupportedArchitectures(MAASTestCase): run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def test_is_registered(self): protocol = Cluster() responder = protocol.locateResponder( cluster.ListSupportedArchitectures.commandName) self.assertIsNotNone(responder) @inlineCallbacks def test_returns_architectures(self): architectures = yield call_responder( Cluster(), cluster.ListSupportedArchitectures, {}) # Assert that one of the built-in architectures is in the data # returned by ListSupportedArchitectures. self.assertIn( { 'name': 'i386/generic', 'description': 'i386', }, architectures['architectures']) class TestClusterProtocol_ListOperatingSystems(MAASTestCase): run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def test_is_registered(self): protocol = Cluster() responder = protocol.locateResponder( cluster.ListOperatingSystems.commandName) self.assertIsNotNone(responder) @inlineCallbacks def test_returns_oses(self): # Patch in some operating systems with some randomised data. See # StubOS for details of the rules that are used to populate the # non-random elements. operating_systems = [ StubOS(factory.make_name("os"), releases=[ (factory.make_name("name"), factory.make_name("title")) for _ in range(randint(2, 5)) ]) for _ in range(randint(2, 5)) ] self.patch( osystems_rpc_module, "OperatingSystemRegistry", [(os.name, os) for os in operating_systems]) osystems = yield call_responder( Cluster(), cluster.ListOperatingSystems, {}) # The fully-populated output from gen_operating_systems() sent # back over the wire. expected_osystems = list(gen_operating_systems()) for expected_osystem in expected_osystems: expected_osystem["releases"] = list(expected_osystem["releases"]) expected = {"osystems": expected_osystems} self.assertEqual(expected, osystems) class TestClusterProtocol_GetOSReleaseTitle(MAASTestCase): run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def test_is_registered(self): protocol = Cluster() responder = protocol.locateResponder( cluster.GetOSReleaseTitle.commandName) self.assertIsNotNone(responder) @inlineCallbacks def test_calls_get_os_release_title(self): title = factory.make_name('title') get_os_release_title = self.patch( clusterservice, "get_os_release_title") get_os_release_title.return_value = title arguments = { "osystem": factory.make_name("osystem"), "release": factory.make_name("release"), } observed = yield call_responder( Cluster(), cluster.GetOSReleaseTitle, arguments) expected = {"title": title} self.assertEqual(expected, observed) # The arguments are passed to the responder positionally. self.assertThat(get_os_release_title, MockCalledOnceWith( arguments["osystem"], arguments["release"])) @inlineCallbacks def test_exception_when_os_does_not_exist(self): # A remote NoSuchOperatingSystem exception is re-raised locally. get_os_release_title = self.patch( clusterservice, "get_os_release_title") get_os_release_title.side_effect = exceptions.NoSuchOperatingSystem() arguments = { "osystem": factory.make_name("osystem"), "release": factory.make_name("release"), } with ExpectedException(exceptions.NoSuchOperatingSystem): yield call_responder( Cluster(), cluster.GetOSReleaseTitle, arguments) class TestClusterProtocol_ValidateLicenseKey(MAASTestCase): run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def test_is_registered(self): protocol = Cluster() responder = protocol.locateResponder( cluster.ValidateLicenseKey.commandName) self.assertIsNotNone(responder) @inlineCallbacks def test_calls_validate_license_key(self): validate_license_key = self.patch( clusterservice, "validate_license_key") validate_license_key.return_value = factory.pick_bool() arguments = { "osystem": factory.make_name("osystem"), "release": factory.make_name("release"), "key": factory.make_name("key"), } observed = yield call_responder( Cluster(), cluster.ValidateLicenseKey, arguments) expected = {"is_valid": validate_license_key.return_value} self.assertEqual(expected, observed) # The arguments are passed to the responder positionally. self.assertThat(validate_license_key, MockCalledOnceWith( arguments["osystem"], arguments["release"], arguments["key"])) @inlineCallbacks def test_exception_when_os_does_not_exist(self): # A remote NoSuchOperatingSystem exception is re-raised locally. validate_license_key = self.patch( clusterservice, "validate_license_key") validate_license_key.side_effect = exceptions.NoSuchOperatingSystem() arguments = { "osystem": factory.make_name("osystem"), "release": factory.make_name("release"), "key": factory.make_name("key"), } with ExpectedException(exceptions.NoSuchOperatingSystem): yield call_responder( Cluster(), cluster.ValidateLicenseKey, arguments) class TestClusterProtocol_GetPreseedData(MAASTestCase): run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def make_arguments(self): return { "osystem": factory.make_name("osystem"), "preseed_type": factory.make_name("preseed_type"), "node_system_id": factory.make_name("system_id"), "node_hostname": factory.make_name("hostname"), "consumer_key": factory.make_name("consumer_key"), "token_key": factory.make_name("token_key"), "token_secret": factory.make_name("token_secret"), "metadata_url": urlparse( "https://%s/path/to/metadata" % factory.make_hostname()), } def test_is_registered(self): protocol = Cluster() responder = protocol.locateResponder( cluster.GetPreseedData.commandName) self.assertIsNotNone(responder) @inlineCallbacks def test_calls_get_preseed_data(self): get_preseed_data = self.patch(clusterservice, "get_preseed_data") get_preseed_data.return_value = factory.make_name("data") arguments = self.make_arguments() observed = yield call_responder( Cluster(), cluster.GetPreseedData, arguments) expected = {"data": get_preseed_data.return_value} self.assertEqual(expected, observed) # The arguments are passed to the responder positionally. self.assertThat(get_preseed_data, MockCalledOnceWith( arguments["osystem"], arguments["preseed_type"], arguments["node_system_id"], arguments["node_hostname"], arguments["consumer_key"], arguments["token_key"], arguments["token_secret"], arguments["metadata_url"])) @inlineCallbacks def test_exception_when_os_does_not_exist(self): # A remote NoSuchOperatingSystem exception is re-raised locally. get_preseed_data = self.patch( clusterservice, "get_preseed_data") get_preseed_data.side_effect = exceptions.NoSuchOperatingSystem() arguments = self.make_arguments() with ExpectedException(exceptions.NoSuchOperatingSystem): yield call_responder( Cluster(), cluster.GetPreseedData, arguments) @inlineCallbacks def test_exception_when_preseed_not_implemented(self): # A remote NotImplementedError exception is re-raised locally. # Choose an operating system which has not overridden the # default compose_preseed. osystem_name = next( osystem_name for osystem_name, osystem in OperatingSystemRegistry if osystem.compose_preseed == OperatingSystem.compose_preseed) arguments = self.make_arguments() arguments["osystem"] = osystem_name with ExpectedException(exceptions.NoSuchOperatingSystem): yield call_responder( Cluster(), cluster.GetPreseedData, arguments) class TestClusterProtocol_PowerOn_PowerOff(MAASTestCase): run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) scenarios = ( ("power-on", { "command": cluster.PowerOn, "expected_power_change": "on", }), ("power-off", { "command": cluster.PowerOff, "expected_power_change": "off", }), ) def test_is_registered(self): protocol = Cluster() responder = protocol.locateResponder(self.command.commandName) self.assertIsNotNone(responder) def test_executes_maybe_change_power_state(self): maybe_change_power_state = self.patch( clusterservice, "maybe_change_power_state") system_id = factory.make_name("system_id") hostname = factory.make_name("hostname") power_type = factory.make_name("power_type") context = { factory.make_name("name"): factory.make_name("value"), } d = call_responder(Cluster(), self.command, { "system_id": system_id, "hostname": hostname, "power_type": power_type, "context": context, }) def check(response): self.assertThat( maybe_change_power_state, MockCalledOnceWith( system_id, hostname, power_type, power_change=self.expected_power_change, context=context)) return d.addCallback(check) def test_power_on_can_propagate_UnknownPowerType(self): self.patch(clusterservice, "maybe_change_power_state").side_effect = ( UnknownPowerType) d = call_responder(Cluster(), self.command, { "system_id": "id", "hostname": "hostname", "power_type": "type", "context": {}, }) # If the call doesn't fail then we have a test failure; we're # *expecting* UnknownPowerType to be raised. d.addCallback(self.fail) def check(failure): failure.trap(UnknownPowerType) return d.addErrback(check) def test_power_on_can_propagate_NotImplementedError(self): self.patch(clusterservice, "maybe_change_power_state").side_effect = ( NotImplementedError) d = call_responder(Cluster(), self.command, { "system_id": "id", "hostname": "hostname", "power_type": "type", "context": {}, }) # If the call doesn't fail then we have a test failure; we're # *expecting* NotImplementedError to be raised. d.addCallback(self.fail) def check(failure): failure.trap(NotImplementedError) return d.addErrback(check) def test_power_on_can_propagate_PowerActionFail(self): self.patch(clusterservice, "maybe_change_power_state").side_effect = ( PowerActionFail) d = call_responder(Cluster(), self.command, { "system_id": "id", "hostname": "hostname", "power_type": "type", "context": {}, }) # If the call doesn't fail then we have a test failure; we're # *expecting* PowerActionFail to be raised. d.addCallback(self.fail) def check(failure): failure.trap(PowerActionFail) return d.addErrback(check) def test_power_on_can_propagate_PowerActionAlreadyInProgress(self): self.patch(clusterservice, "maybe_change_power_state").side_effect = ( exceptions.PowerActionAlreadyInProgress) d = call_responder(Cluster(), self.command, { "system_id": "id", "hostname": "hostname", "power_type": "type", "context": {}, }) # If the call doesn't fail then we have a test failure; we're # *expecting* PowerActionFail to be raised. d.addCallback(self.fail) def check(failure): failure.trap(exceptions.PowerActionAlreadyInProgress) return d.addErrback(check) class TestClusterProtocol_PowerQuery(MAASTestCase): run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def test_is_registered(self): protocol = Cluster() responder = protocol.locateResponder( cluster.PowerQuery.commandName) self.assertIsNotNone(responder) @inlineCallbacks def test_returns_power_state(self): state = random.choice(['on', 'off']) perform_power_query = self.patch( power_module.query, "perform_power_query") perform_power_query.return_value = state # During the transition from template-based power drivers to Python # drivers, alias perform_power_driver_query to perform_power_query. self.patch( power_module.query, "perform_power_driver_query", perform_power_query) # Intercept calls to report the status. report_power_state = self.patch( power_module.query, "report_power_state") power_type = random.choice(QUERY_POWER_TYPES) arguments = { 'system_id': factory.make_name('system'), 'hostname': factory.make_name('hostname'), 'power_type': power_type, 'context': factory.make_name('context'), } # Make sure power driver doesn't check for installed packages. power_driver = power_drivers_by_name.get(power_type) if power_driver: self.patch_autospec( power_driver, "detect_missing_packages").return_value = [] observed = yield call_responder( Cluster(), cluster.PowerQuery, arguments) self.assertEqual({'state': state}, observed) self.assertThat( perform_power_query, MockCalledOnceWith( arguments['system_id'], arguments['hostname'], arguments['power_type'], arguments['context'])) # The region is NOT told about the change. self.assertThat(report_power_state, MockNotCalled()) class TestClusterProtocol_ConfigureDHCP(MAASTestCase): scenarios = ( ("DHCPv4", { "dhcp_server": (dhcp, "DHCPv4Server"), "command": cluster.ConfigureDHCPv4, "make_network": factory.make_ipv4_network, }), ("DHCPv6", { "dhcp_server": (dhcp, "DHCPv6Server"), "command": cluster.ConfigureDHCPv6, "make_network": factory.make_ipv6_network, }), ) run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def test__is_registered(self): self.assertIsNotNone( Cluster().locateResponder(self.command.commandName)) @inlineCallbacks def test__executes_configure_dhcp(self): DHCPServer = self.patch_autospec(*self.dhcp_server) configure = self.patch_autospec(dhcp, "configure") omapi_key = factory.make_name('key') subnet_configs = [make_subnet_config()] yield call_responder(Cluster(), self.command, { 'omapi_key': omapi_key, 'subnet_configs': subnet_configs, }) self.assertThat(DHCPServer, MockCalledOnceWith(omapi_key)) self.assertThat(configure, MockCalledOnceWith( DHCPServer.return_value, subnet_configs)) @inlineCallbacks def test__limits_concurrency(self): self.patch_autospec(*self.dhcp_server) def check_dhcp_locked(server, subnet_configs): self.assertTrue(concurrency.dhcp.locked) # While we're here, check this is *not* the IO thread. self.expectThat(isInIOThread(), Is(False)) self.patch(dhcp, "configure", check_dhcp_locked) self.assertFalse(concurrency.dhcp.locked) yield call_responder(Cluster(), self.command, { 'omapi_key': factory.make_name('key'), 'subnet_configs': [], }) self.assertFalse(concurrency.dhcp.locked) @inlineCallbacks def test__propagates_CannotConfigureDHCP(self): configure = self.patch_autospec(dhcp, "configure") configure.side_effect = ( exceptions.CannotConfigureDHCP("Deliberate failure")) omapi_key = factory.make_name('key') network = self.make_network() ip_low, ip_high = factory.make_ip_range(network) subnet_configs = [make_subnet_config()] with ExpectedException(exceptions.CannotConfigureDHCP): yield call_responder(Cluster(), self.command, { 'omapi_key': omapi_key, 'subnet_configs': subnet_configs, }) class TestClusterProtocol_CreateHostMaps(MAASTestCase): run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def test_is_registered(self): protocol = Cluster() responder = protocol.locateResponder( cluster.CreateHostMaps.commandName) self.assertIsNotNone(responder) @inlineCallbacks def test_executes_create_host_maps(self): create_host_maps = self.patch(clusterservice, "create_host_maps") mappings = [ {"ip_address": factory.make_ipv4_address(), "mac_address": factory.make_mac_address()} for _ in range(2) ] shared_key = factory.make_name("shared_key") yield call_responder(Cluster(), cluster.CreateHostMaps, { "mappings": mappings, "shared_key": shared_key, }) self.assertThat( create_host_maps, MockCalledOnceWith( mappings, shared_key)) @inlineCallbacks def test__limits_concurrency(self): def check_dhcp_locked(mappings, shared_key): self.assertTrue(concurrency.dhcp.locked) # While we're here, check this is *not* the IO thread. self.expectThat(isInIOThread(), Is(False)) self.patch(clusterservice, "create_host_maps", check_dhcp_locked) self.assertFalse(concurrency.dhcp.locked) yield call_responder(Cluster(), cluster.CreateHostMaps, { "mappings": {}, "shared_key": factory.make_name("key"), }) self.assertFalse(concurrency.dhcp.locked) class TestClusterProtocol_RemoveHostMaps(MAASTestCase): run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def test_is_registered(self): protocol = Cluster() responder = protocol.locateResponder( cluster.RemoveHostMaps.commandName) self.assertIsNotNone(responder) @inlineCallbacks def test_executes_remove_host_maps(self): remove_host_maps = self.patch(clusterservice, "remove_host_maps") ip_addresses = [factory.make_ipv4_address() for _ in range(2)] shared_key = factory.make_name("shared_key") yield call_responder(Cluster(), cluster.RemoveHostMaps, { "ip_addresses": ip_addresses, "shared_key": shared_key, }) self.assertThat( remove_host_maps, MockCalledOnceWith( ip_addresses, shared_key)) @inlineCallbacks def test__limits_concurrency(self): def check_dhcp_locked(ip_addresses, shared_key): self.assertTrue(concurrency.dhcp.locked) # While we're here, check this is *not* the IO thread. self.expectThat(isInIOThread(), Is(False)) self.patch(clusterservice, "remove_host_maps", check_dhcp_locked) self.assertFalse(concurrency.dhcp.locked) yield call_responder(Cluster(), cluster.RemoveHostMaps, { "ip_addresses": [], "shared_key": factory.make_name("key"), }) self.assertFalse(concurrency.dhcp.locked) class TestClusterProtocol_StartMonitors(MAASTestCase): def test__is_registered(self): protocol = Cluster() responder = protocol.locateResponder( cluster.StartMonitors.commandName) self.assertIsNotNone(responder) def test__executes_start_monitors(self): deadline = datetime.now(amp.utc) + timedelta(seconds=10) monitors = [{ "deadline": deadline, "context": factory.make_name("ctx"), "id": factory.make_name("id")}] d = call_responder( Cluster(), cluster.StartMonitors, {"monitors": monitors}) self.addCleanup(cancel_monitor, monitors[0]["id"]) self.assertTrue(d.called) self.assertThat(running_monitors, Contains(monitors[0]["id"])) class TestClusterProtocol_CancelMonitor(MAASTestCase): def test__is_registered(self): protocol = Cluster() responder = protocol.locateResponder( cluster.CancelMonitor.commandName) self.assertIsNotNone(responder) def test__executes_cancel_monitor(self): deadline = datetime.now(amp.utc) + timedelta(seconds=10) monitors = [{ "deadline": deadline, "context": factory.make_name("ctx"), "id": factory.make_name("id")}] call_responder( Cluster(), cluster.StartMonitors, {"monitors": monitors}) call_responder( Cluster(), cluster.CancelMonitor, {"id": monitors[0]["id"]}) self.assertThat(running_monitors, Not(Contains(monitors[0]["id"]))) class TestClusterProtocol_EvaluateTag(MAASTestCase): run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def test__is_registered(self): protocol = Cluster() responder = protocol.locateResponder( cluster.EvaluateTag.commandName) self.assertIsNotNone(responder) @inlineCallbacks def test_happy_path(self): self.useFixture(ClusterConfigurationFixture()) # Prevent real work being done, which would involve HTTP calls. self.patch_autospec(tags, "process_node_tags") response = yield call_responder( Cluster(), cluster.EvaluateTag, { "tag_name": "all-nodes", "tag_definition": "//*", "tag_nsmap": [ {"prefix": "foo", "uri": "http://foo.example.com/"}, ], "credentials": "abc:def:ghi", }) self.assertEqual({}, response) @inlineCallbacks def test__calls_through_to_evaluate_tag_helper(self): evaluate_tag = self.patch_autospec(clusterservice, "evaluate_tag") tag_name = factory.make_name("tag-name") tag_definition = factory.make_name("tag-definition") tag_ns_prefix = factory.make_name("tag-ns-prefix") tag_ns_uri = factory.make_name("tag-ns-uri") consumer_key = factory.make_name("ckey") resource_token = factory.make_name("rtok") resource_secret = factory.make_name("rsec") credentials = convert_tuple_to_string( (consumer_key, resource_token, resource_secret)) yield call_responder( Cluster(), cluster.EvaluateTag, { "tag_name": tag_name, "tag_definition": tag_definition, "tag_nsmap": [ {"prefix": tag_ns_prefix, "uri": tag_ns_uri}, ], "credentials": credentials, }) self.assertThat(evaluate_tag, MockCalledOnceWith( tag_name, tag_definition, {tag_ns_prefix: tag_ns_uri}, (consumer_key, resource_token, resource_secret), )) class TestClusterProtocol_AddVirsh(MAASTestCase): def test__is_registered(self): protocol = Cluster() responder = protocol.locateResponder( cluster.AddVirsh.commandName) self.assertIsNotNone(responder) def test__calls_deferToThread_with_probe_virsh_and_enlist(self): mock_deferToThread = self.patch_autospec( clusterservice, 'deferToThread') user = factory.make_name('user') poweraddr = factory.make_name('poweraddr') password = factory.make_name('password') prefix_filter = factory.make_name('prefix_filter') call_responder(Cluster(), cluster.AddVirsh, { "user": user, "poweraddr": poweraddr, "password": password, "prefix_filter": prefix_filter, "accept_all": True, }) self.assertThat( mock_deferToThread, MockCalledOnceWith( clusterservice.probe_virsh_and_enlist, user, poweraddr, password, prefix_filter, True)) def test__password_is_optional(self): mock_deferToThread = self.patch_autospec( clusterservice, 'deferToThread') user = factory.make_name('user') poweraddr = factory.make_name('poweraddr') prefix_filter = factory.make_name('prefix_filter') call_responder(Cluster(), cluster.AddVirsh, { "user": user, "poweraddr": poweraddr, "password": None, "prefix_filter": prefix_filter, "accept_all": True, }) self.assertThat( mock_deferToThread, MockCalledOnceWith( clusterservice.probe_virsh_and_enlist, user, poweraddr, None, prefix_filter, True)) def test__can_be_called_without_password_key(self): mock_deferToThread = self.patch_autospec( clusterservice, 'deferToThread') user = factory.make_name('user') poweraddr = factory.make_name('poweraddr') prefix_filter = factory.make_name('prefix_filter') call_responder(Cluster(), cluster.AddVirsh, { "user": user, "poweraddr": poweraddr, "prefix_filter": prefix_filter, "accept_all": True, }) self.assertThat( mock_deferToThread, MockCalledOnceWith( clusterservice.probe_virsh_and_enlist, user, poweraddr, None, prefix_filter, True)) def test__logs_error_to_maaslog(self): fake_error = factory.make_name('error') self.patch(clusterservice, 'maaslog') mock_deferToThread = self.patch_autospec( clusterservice, 'deferToThread') mock_deferToThread.return_value = fail(Exception(fake_error)) user = factory.make_name('user') poweraddr = factory.make_name('poweraddr') password = factory.make_name('password') prefix_filter = factory.make_name('prefix_filter') call_responder(Cluster(), cluster.AddVirsh, { "user": user, "poweraddr": poweraddr, "password": password, "prefix_filter": prefix_filter, "accept_all": True, }) self.assertThat( clusterservice.maaslog.error, MockAnyCall( "Failed to probe and enlist %s nodes: %s", "virsh", fake_error)) class TestClusterProtocol_AddVMware(MAASTestCase): def test__is_registered(self): protocol = Cluster() responder = protocol.locateResponder( cluster.AddVMware.commandName) self.assertIsNotNone(responder) def test__calls_deferToThread_with_probe_vmware_and_enlist(self): mock_deferToThread = self.patch_autospec( clusterservice, 'deferToThread') user = factory.make_name('user') host = factory.make_ip_address() username = factory.make_name('username') password = factory.make_name('password') port = random.choice([80, 443, 8080, 8443]) protocol = random.choice(["http", "https"]) prefix_filter = factory.make_name('prefix_filter') call_responder(Cluster(), cluster.AddVMware, { "user": user, "host": host, "username": username, "password": password, "port": port, "protocol": protocol, "prefix_filter": prefix_filter, "accept_all": True, }) self.assertThat( mock_deferToThread, MockCalledOnceWith( clusterservice.probe_vmware_and_enlist, user, host, username, password, port=port, protocol=protocol, prefix_filter=prefix_filter, accept_all=True)) def test__port_and_protocol_are_optional(self): mock_deferToThread = self.patch_autospec( clusterservice, 'deferToThread') user = factory.make_name('user') host = factory.make_ip_address() username = factory.make_name('username') password = factory.make_name('password') prefix_filter = factory.make_name('prefix_filter') call_responder(Cluster(), cluster.AddVMware, { "user": user, "host": host, "username": username, "password": password, "port": None, "protocol": None, "prefix_filter": prefix_filter, "accept_all": True, }) self.assertThat( mock_deferToThread, MockCalledOnceWith( clusterservice.probe_vmware_and_enlist, user, host, username, password, port=None, protocol=None, prefix_filter=prefix_filter, accept_all=True)) def test__can_be_called_without_port_or_protocol_key(self): mock_deferToThread = self.patch_autospec( clusterservice, 'deferToThread') user = factory.make_name('user') host = factory.make_ip_address() username = factory.make_name('username') password = factory.make_name('password') prefix_filter = factory.make_name('prefix_filter') call_responder(Cluster(), cluster.AddVMware, { "user": user, "host": host, "username": username, "password": password, "prefix_filter": prefix_filter, "accept_all": True, }) self.assertThat( mock_deferToThread, MockCalledOnceWith( clusterservice.probe_vmware_and_enlist, user, host, username, password, port=None, protocol=None, prefix_filter=prefix_filter, accept_all=True)) def test__logs_error_to_maaslog(self): fake_error = factory.make_name('error') self.patch(clusterservice, 'maaslog') mock_deferToThread = self.patch_autospec( clusterservice, 'deferToThread') mock_deferToThread.return_value = fail(Exception(fake_error)) user = factory.make_name('user') host = factory.make_ip_address() username = factory.make_name('username') password = factory.make_name('password') prefix_filter = factory.make_name('prefix_filter') call_responder(Cluster(), cluster.AddVMware, { "user": user, "host": host, "username": username, "password": password, "prefix_filter": prefix_filter, "accept_all": True, }) self.assertThat( clusterservice.maaslog.error, MockAnyCall( "Failed to probe and enlist %s nodes: %s", "VMware", fake_error)) class TestClusterProtocol_AddSeaMicro15k(MAASTestCase): def test__is_registered(self): protocol = Cluster() responder = protocol.locateResponder( cluster.AddSeaMicro15k.commandName) self.assertIsNotNone(responder) def test__calls_find_ip_via_arp(self): # Prevent any actual probing from happing. self.patch_autospec( clusterservice, 'deferToThread') find_ip_via_arp = self.patch_autospec( clusterservice, 'find_ip_via_arp') find_ip_via_arp.return_value = factory.make_ipv4_address() user = factory.make_name('user') mac = factory.make_mac_address() username = factory.make_name('user') password = factory.make_name('password') power_control = factory.make_name('power_control') call_responder(Cluster(), cluster.AddSeaMicro15k, { "user": user, "mac": mac, "username": username, "password": password, "power_control": power_control, "accept_all": True, }) self.assertThat( find_ip_via_arp, MockCalledOnceWith(mac)) @inlineCallbacks def test__raises_and_logs_warning_if_no_ip_found_for_mac(self): maaslog = self.patch(clusterservice, 'maaslog') find_ip_via_arp = self.patch_autospec( clusterservice, 'find_ip_via_arp') find_ip_via_arp.return_value = None user = factory.make_name('user') mac = factory.make_mac_address() username = factory.make_name('user') password = factory.make_name('password') power_control = factory.make_name('power_control') with ExpectedException(exceptions.NoIPFoundForMACAddress): yield call_responder(Cluster(), cluster.AddSeaMicro15k, { "user": user, "mac": mac, "username": username, "password": password, "power_control": power_control, "accept_all": True, }) self.assertThat( maaslog.warning, MockCalledOnceWith( "Couldn't find IP address for MAC %s" % mac)) def test__calls_deferToThread_with_probe_seamicro15k_and_enlist(self): mock_deferToThread = self.patch_autospec( clusterservice, 'deferToThread') find_ip_via_arp = self.patch_autospec( clusterservice, 'find_ip_via_arp') find_ip_via_arp.return_value = factory.make_ipv4_address() user = factory.make_name('user') mac = factory.make_mac_address() username = factory.make_name('user') password = factory.make_name('password') power_control = factory.make_name('power_control') call_responder(Cluster(), cluster.AddSeaMicro15k, { "user": user, "mac": mac, "username": username, "password": password, "power_control": power_control, "accept_all": True, }) self.assertThat( mock_deferToThread, MockCalledOnceWith( clusterservice.probe_seamicro15k_and_enlist, user, find_ip_via_arp.return_value, username, password, power_control=power_control, accept_all=True)) def test__logs_error_to_maaslog(self): fake_error = factory.make_name('error') self.patch(clusterservice, 'maaslog') mock_deferToThread = self.patch_autospec( clusterservice, 'deferToThread') mock_deferToThread.return_value = fail(Exception(fake_error)) find_ip_via_arp = self.patch_autospec( clusterservice, 'find_ip_via_arp') find_ip_via_arp.return_value = factory.make_ipv4_address() user = factory.make_name('user') mac = factory.make_mac_address() username = factory.make_name('user') password = factory.make_name('password') power_control = factory.make_name('power_control') call_responder(Cluster(), cluster.AddSeaMicro15k, { "user": user, "mac": mac, "username": username, "password": password, "power_control": power_control, "accept_all": True, }) self.assertThat( clusterservice.maaslog.error, MockAnyCall( "Failed to probe and enlist %s nodes: %s", "SeaMicro 15000", fake_error)) class TestClusterProtocol_EnlistNodesFromMSCM(MAASTestCase): def test__is_registered(self): protocol = Cluster() responder = protocol.locateResponder( cluster.EnlistNodesFromMSCM.commandName) self.assertIsNotNone(responder) def test__deferToThread_with_probe_and_enlist_mscm(self): mock_deferToThread = self.patch_autospec( clusterservice, 'deferToThread') user = factory.make_name('user') host = factory.make_name('host') username = factory.make_name('user') password = factory.make_name('password') call_responder(Cluster(), cluster.EnlistNodesFromMSCM, { "user": user, "host": host, "username": username, "password": password, "accept_all": True, }) self.assertThat( mock_deferToThread, MockCalledOnceWith( clusterservice.probe_and_enlist_mscm, user, host, username, password, True)) def test__logs_error_to_maaslog(self): fake_error = factory.make_name('error') self.patch(clusterservice, 'maaslog') mock_deferToThread = self.patch_autospec( clusterservice, 'deferToThread') mock_deferToThread.return_value = fail(Exception(fake_error)) user = factory.make_name('user') host = factory.make_name('host') username = factory.make_name('user') password = factory.make_name('password') call_responder(Cluster(), cluster.EnlistNodesFromMSCM, { "user": user, "host": host, "username": username, "password": password, "accept_all": True, }) self.assertThat( clusterservice.maaslog.error, MockAnyCall( "Failed to probe and enlist %s nodes: %s", "Moonshot", fake_error)) class TestClusterProtocol_EnlistNodesFromUCSM(MAASTestCase): def test__is_registered(self): protocol = Cluster() responder = protocol.locateResponder( cluster.EnlistNodesFromUCSM.commandName) self.assertIsNotNone(responder) def test__calls_deferToThread_with_probe_and_enlist_ucsm(self): mock_deferToThread = self.patch_autospec( clusterservice, 'deferToThread') user = factory.make_name('user') url = factory.make_url() username = factory.make_name('user') password = factory.make_name('password') call_responder(Cluster(), cluster.EnlistNodesFromUCSM, { "user": user, "url": url, "username": username, "password": password, "accept_all": True, }) self.assertThat( mock_deferToThread, MockCalledOnceWith( clusterservice.probe_and_enlist_ucsm, user, url, username, password, True)) def test__logs_error_to_maaslog(self): fake_error = factory.make_name('error') self.patch(clusterservice, 'maaslog') mock_deferToThread = self.patch_autospec( clusterservice, 'deferToThread') mock_deferToThread.return_value = fail(Exception(fake_error)) user = factory.make_name('user') url = factory.make_url() username = factory.make_name('user') password = factory.make_name('password') call_responder(Cluster(), cluster.EnlistNodesFromUCSM, { "user": user, "url": url, "username": username, "password": password, "accept_all": True, }) self.assertThat( clusterservice.maaslog.error, MockAnyCall( "Failed to probe and enlist %s nodes: %s", "UCS", fake_error)) class TestClusterProtocol_EnlistNodesFromMicrosoftOCS(MAASTestCase): def test__is_registered(self): protocol = Cluster() responder = protocol.locateResponder( cluster.EnlistNodesFromMicrosoftOCS.commandName) self.assertIsNotNone(responder) def test__defers_probe_and_enlist_msftocs_to_thread(self): mock_deferToThread = self.patch_autospec( clusterservice, 'deferToThread') user = factory.make_name('user') ip = factory.make_ipv4_address() port = '%d' % randint(2000, 4000) username = factory.make_name('user') password = factory.make_name('password') call_responder(Cluster(), cluster.EnlistNodesFromMicrosoftOCS, { "user": user, "ip": ip, "port": port, "username": username, "password": password, "accept_all": True, }) self.assertThat( mock_deferToThread, MockCalledOnceWith( clusterservice.probe_and_enlist_msftocs, user, ip, port, username, password, True)) def test__logs_error_to_maaslog(self): fake_error = factory.make_name('error') self.patch(clusterservice, 'maaslog') mock_deferToThread = self.patch_autospec( clusterservice, 'deferToThread') mock_deferToThread.return_value = fail(Exception(fake_error)) user = factory.make_name('user') ip = factory.make_ipv4_address() port = '%d' % randint(2000, 4000) username = factory.make_name('user') password = factory.make_name('password') call_responder(Cluster(), cluster.EnlistNodesFromMicrosoftOCS, { "user": user, "ip": ip, "port": port, "username": username, "password": password, "accept_all": True, }) self.assertThat( clusterservice.maaslog.error, MockAnyCall( "Failed to probe and enlist %s nodes: %s", "MicrosoftOCS", fake_error)) maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/tests/test_common.py0000644000000000000000000002224713056115004024255 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for common RPC code.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import random import re from maastesting.factory import factory from maastesting.matchers import ( IsFiredDeferred, IsUnfiredDeferred, MockCalledOnceWith, ) from maastesting.testcase import MAASTestCase from maastesting.twisted import ( always_fail_with, TwistedLoggerFixture, ) from mock import ( create_autospec, sentinel, ) from provisioningserver.rpc import common from provisioningserver.rpc.testing.doubles import DummyConnection from testtools import ExpectedException from testtools.deferredruntest import extract_result from testtools.matchers import ( Equals, Is, IsInstance, Not, ) from twisted.internet.defer import Deferred from twisted.internet.protocol import connectionDone from twisted.protocols import amp from twisted.test.proto_helpers import StringTransport class TestClient(MAASTestCase): def test_init(self): conn = DummyConnection() client = common.Client(conn) self.assertThat(client._conn, Is(conn)) def make_connection_and_client(self): conn = create_autospec(common.RPCProtocol()) client = common.Client(conn) return conn, client def test_ident(self): conn, client = self.make_connection_and_client() conn.ident = self.getUniqueString() self.assertThat(client.ident, Equals(conn.ident)) def test_call(self): conn, client = self.make_connection_and_client() conn.callRemote.return_value = sentinel.response response = client(sentinel.command, foo=sentinel.foo, bar=sentinel.bar) self.assertThat(response, Is(sentinel.response)) self.assertThat(conn.callRemote, MockCalledOnceWith( sentinel.command, foo=sentinel.foo, bar=sentinel.bar)) def test_call_with_keyword_arguments_raises_useful_error(self): conn = DummyConnection() client = common.Client(conn) expected_message = re.escape( "provisioningserver.rpc.common.Client called with 3 positional " "arguments, (1, 2, 3), but positional arguments are not " "supported. Usage: client(command, arg1=value1, ...)") with ExpectedException(TypeError, expected_message): client(sentinel.command, 1, 2, 3) def test_getHostCertificate(self): conn, client = self.make_connection_and_client() conn.hostCertificate = sentinel.hostCertificate self.assertThat( client.getHostCertificate(), Is(sentinel.hostCertificate)) def test_getPeerCertificate(self): conn, client = self.make_connection_and_client() conn.peerCertificate = sentinel.peerCertificate self.assertThat( client.getPeerCertificate(), Is(sentinel.peerCertificate)) def test_isSecure(self): conn, client = self.make_connection_and_client() conn.peerCertificate = sentinel.peerCertificate self.assertTrue(client.isSecure()) def test_isSecure_not(self): conn, client = self.make_connection_and_client() conn.peerCertificate = None self.assertFalse(client.isSecure()) def test___eq__(self): conn, client = self.make_connection_and_client() self.assertThat(client, Equals(client)) client_for_same_connection = common.Client(conn) self.assertThat(client, Equals(client_for_same_connection)) _, client_for_another_connection = self.make_connection_and_client() self.assertThat(client, Not(Equals(client_for_another_connection))) def test___hash__(self): conn, client = self.make_connection_and_client() # The hash of a common.Client object is that of its connection. self.assertThat(hash(conn), Equals(hash(client))) class TestRPCProtocol(MAASTestCase): def test_init(self): protocol = common.RPCProtocol() self.assertThat(protocol.onConnectionMade, IsUnfiredDeferred()) self.assertThat(protocol.onConnectionLost, IsUnfiredDeferred()) self.assertThat(protocol, IsInstance(amp.AMP)) def test_onConnectionMade_fires_when_connection_is_made(self): protocol = common.RPCProtocol() protocol.connectionMade() self.assertThat(protocol.onConnectionMade, IsFiredDeferred()) def test_onConnectionLost_fires_when_connection_is_lost(self): protocol = common.RPCProtocol() protocol.makeConnection(StringTransport()) protocol.connectionLost(connectionDone) self.assertThat(protocol.onConnectionLost, IsFiredDeferred()) class TestRPCProtocol_UnhandledErrorsWhenHandlingResponses(MAASTestCase): answer_seq = b"%d" % random.randrange(0, 2 ** 32) answer_box = amp.AmpBox(_answer=answer_seq) error_seq = b"%d" % random.randrange(0, 2 ** 32) error_box = amp.AmpBox( _error=error_seq, _error_code=amp.UNHANDLED_ERROR_CODE, _error_description=factory.make_string()) scenarios = ( ("_answerReceived", {"seq": answer_seq, "box": answer_box}), ("_errorReceived", {"seq": error_seq, "box": error_box}), ) def test_unhandled_errors_logged_and_do_not_cause_disconnection(self): protocol = common.RPCProtocol() protocol.makeConnection(StringTransport()) # Poke a request into the dispatcher that will always fail. d = Deferred().addCallback(lambda _: 0 / 0) protocol._outstandingRequests[self.seq] = d # Push a box in response to the request. with TwistedLoggerFixture() as logger: protocol.ampBoxReceived(self.box) # The Deferred does not have a dangling error. self.assertThat(extract_result(d), Is(None)) # The transport is still connected. self.assertThat(protocol.transport.disconnecting, Is(False)) # The error has been logged. self.assertDocTestMatches( """\ Unhandled failure during AMP request. This is probably a bug. Please ensure that this error is handled within application code. Traceback (most recent call last): ... """, logger.output) class TestRPCProtocol_UnhandledErrorsWhenHandlingCommands(MAASTestCase): def test_unhandled_errors_do_not_cause_disconnection(self): protocol = common.RPCProtocol() protocol.makeConnection(StringTransport()) # Ensure that the superclass dispatchCommand() will fail. dispatchCommand = self.patch(amp.AMP, "dispatchCommand") dispatchCommand.side_effect = always_fail_with(ZeroDivisionError()) # Push a command box into the protocol. seq = b"%d" % random.randrange(0, 2 ** 32) cmd = factory.make_string() box = amp.AmpBox(_ask=seq, _command=cmd) with TwistedLoggerFixture() as logger: protocol.ampBoxReceived(box) # The transport is still connected. self.expectThat(protocol.transport.disconnecting, Is(False)) # The error has been logged on the originating side of the AMP # session, along with an explanatory message. The message includes a # command reference. cmd_ref = common.make_command_ref(box) self.assertDocTestMatches( """\ Unhandled failure dispatching AMP command. This is probably a bug. Please ensure that this error is handled within application code or declared in the signature of the %s command. [%s] Traceback (most recent call last): ... """ % (cmd, cmd_ref), logger.output) # A simpler error message has been transmitted over the wire. It # includes the same command reference as logged locally. protocol.transport.io.seek(0) observed_boxes_sent = amp.parse(protocol.transport.io) expected_boxes_sent = [ amp.AmpBox( _error=seq, _error_code=amp.UNHANDLED_ERROR_CODE, _error_description="Unknown Error [%s]" % cmd_ref), ] self.assertThat(observed_boxes_sent, Equals(expected_boxes_sent)) class TestMakeCommandRef(MAASTestCase): """Tests for `common.make_command_ref`.""" def test__command_ref_includes_host_pid_command_and_ask_sequence(self): host = factory.make_name("hostname") pid = random.randint(99, 9999999) cmd = factory.make_name("command") ask = random.randint(99, 9999999) box = amp.AmpBox(_command=cmd, _ask=ask) self.patch(common, "gethostname").return_value = host self.patch(common, "getpid").return_value = pid self.assertThat(common.make_command_ref(box), Equals( "%s:pid=%s:cmd=%s:ask=%s" % (host, pid, cmd, ask))) def test__replaces_missing_ask_with_none(self): box = amp.AmpBox(_command="command") self.patch(common, "gethostname").return_value = "host" self.patch(common, "getpid").return_value = 1234 self.assertThat(common.make_command_ref(box), Equals( "host:pid=1234:cmd=command:ask=none")) maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/tests/test_dhcp.py0000644000000000000000000004651113056115004023703 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for :py:module:`~provisioningserver.rpc.dhcp`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from fixtures import FakeLogger from maastesting.factory import factory from maastesting.matchers import ( MockAnyCall, MockCalledOnceWith, MockCalledWith, MockCallsMatch, MockNotCalled, ) from maastesting.testcase import MAASTestCase from mock import ( ANY, call, sentinel, ) from provisioningserver.dhcp.omshell import Omshell from provisioningserver.dhcp.testing.config import make_subnet_config from provisioningserver.drivers.service import ( SERVICE_STATE, ServiceRegistry, ) from provisioningserver.drivers.service.dhcp import DHCPv4Service from provisioningserver.rpc import ( dhcp, exceptions, ) from provisioningserver.rpc.exceptions import ( CannotCreateHostMap, CannotRemoveHostMap, ) from provisioningserver.service_monitor import ServiceActionError from provisioningserver.utils.shell import ExternalProcessError from provisioningserver.utils.testing import RegistryFixture from testtools import ExpectedException from testtools.matchers import Equals class TestConfigureDHCP(MAASTestCase): scenarios = ( ("DHCPv4", {"server": dhcp.DHCPv4Server}), ("DHCPv6", {"server": dhcp.DHCPv6Server}), ) def configure(self, omapi_key, subnets): server = self.server(omapi_key) dhcp.configure(server, subnets) def patch_os_exists(self): return self.patch_autospec(dhcp.os.path, "exists") def patch_sudo_delete_file(self): return self.patch_autospec(dhcp, 'sudo_delete_file') def patch_sudo_write_file(self): return self.patch_autospec(dhcp, 'sudo_write_file') def patch_restart_service(self): return self.patch(dhcp.service_monitor, 'restart_service') def patch_ensure_service(self): return self.patch(dhcp.service_monitor, 'ensure_service') def patch_get_config(self): return self.patch_autospec(dhcp, 'get_config') def test__extracts_interfaces(self): write_file = self.patch_sudo_write_file() self.patch_restart_service() subnets = [make_subnet_config() for _ in range(3)] self.configure(factory.make_name('key'), subnets) self.assertThat( write_file, MockCalledWith( ANY, ' '.join(sorted(subnet['interface'] for subnet in subnets)))) def test__eliminates_duplicate_interfaces(self): write_file = self.patch_sudo_write_file() self.patch_restart_service() interface = factory.make_name('interface') subnets = [make_subnet_config() for _ in range(2)] for subnet in subnets: subnet['interface'] = interface self.configure(factory.make_name('key'), subnets) self.assertThat(write_file, MockCalledWith(ANY, interface)) def test__composes_dhcp_config(self): self.patch_sudo_write_file() self.patch_restart_service() get_config = self.patch_get_config() omapi_key = factory.make_name('key') subnet = make_subnet_config() self.configure(omapi_key, [subnet]) self.assertThat( get_config, MockCalledOnceWith( self.server.template_basename, omapi_key=omapi_key, dhcp_subnets=[subnet])) def test__writes_dhcp_config(self): write_file = self.patch_sudo_write_file() self.patch_restart_service() subnet = make_subnet_config() expected_config = factory.make_name('config') self.patch_get_config().return_value = expected_config self.configure(factory.make_name('key'), [subnet]) self.assertThat( write_file, MockAnyCall(self.server.config_filename, expected_config)) def test__writes_interfaces_file(self): write_file = self.patch_sudo_write_file() self.patch_restart_service() self.configure(factory.make_name('key'), [make_subnet_config()]) self.assertThat( write_file, MockCalledWith(self.server.interfaces_filename, ANY)) def test__restarts_dhcp_server_if_subnets_defined(self): self.patch_sudo_write_file() dhcp_service = ServiceRegistry[self.server.dhcp_service] on = self.patch_autospec(dhcp_service, "on") restart_service = self.patch_restart_service() self.configure(factory.make_name('key'), [make_subnet_config()]) self.assertThat(on, MockCalledOnceWith()) self.assertThat( restart_service, MockCalledOnceWith(self.server.dhcp_service)) def test__deletes_dhcp_config_if_no_subnets_defined(self): mock_exists = self.patch_os_exists() mock_exists.return_value = True mock_sudo_delete = self.patch_sudo_delete_file() dhcp_service = ServiceRegistry[self.server.dhcp_service] self.patch_autospec(dhcp_service, "off") self.patch_restart_service() self.patch_ensure_service() self.configure(factory.make_name('key'), []) self.assertThat( mock_sudo_delete, MockCalledOnceWith(self.server.config_filename)) def test__stops_dhcp_server_if_no_subnets_defined(self): mock_exists = self.patch_os_exists() mock_exists.return_value = False dhcp_service = ServiceRegistry[self.server.dhcp_service] off = self.patch_autospec(dhcp_service, "off") restart_service = self.patch_restart_service() ensure_service = self.patch_ensure_service() self.configure(factory.make_name('key'), []) self.assertThat(off, MockCalledOnceWith()) self.assertThat( ensure_service, MockCalledOnceWith(self.server.dhcp_service)) self.assertThat(restart_service, MockNotCalled()) def test__converts_failure_writing_file_to_CannotConfigureDHCP(self): self.patch_sudo_write_file().side_effect = ( ExternalProcessError(1, "sudo something")) self.patch_restart_service() self.assertRaises( exceptions.CannotConfigureDHCP, self.configure, factory.make_name('key'), [make_subnet_config()]) def test__converts_dhcp_restart_failure_to_CannotConfigureDHCP(self): self.patch_sudo_write_file() self.patch_restart_service().side_effect = ServiceActionError() self.assertRaises( exceptions.CannotConfigureDHCP, self.configure, factory.make_name('key'), [make_subnet_config()]) def test__converts_stop_dhcp_server_failure_to_CannotConfigureDHCP(self): self.patch_sudo_write_file() self.patch_ensure_service().side_effect = ServiceActionError() self.assertRaises( exceptions.CannotConfigureDHCP, self.configure, factory.make_name('key'), []) def test__does_not_log_ServiceActionError(self): self.patch_sudo_write_file() self.patch_ensure_service().side_effect = ServiceActionError() with FakeLogger("maas") as logger: self.assertRaises( exceptions.CannotConfigureDHCP, self.configure, factory.make_name('key'), []) self.assertDocTestMatches("", logger.output) def test__does_log_other_exceptions(self): self.patch_sudo_write_file() self.patch_ensure_service().side_effect = ( factory.make_exception("DHCP is on strike today")) with FakeLogger("maas") as logger: self.assertRaises( exceptions.CannotConfigureDHCP, self.configure, factory.make_name('key'), []) self.assertDocTestMatches( "DHCPv... server failed to stop: DHCP is on strike today", logger.output) def test__does_not_log_ServiceActionError_when_restarting(self): self.patch_sudo_write_file() self.patch_restart_service().side_effect = ServiceActionError() with FakeLogger("maas") as logger: self.assertRaises( exceptions.CannotConfigureDHCP, self.configure, factory.make_name('key'), [make_subnet_config()]) self.assertDocTestMatches("", logger.output) def test__does_log_other_exceptions_when_restarting(self): self.patch_sudo_write_file() self.patch_restart_service().side_effect = ( factory.make_exception("DHCP is on strike today")) with FakeLogger("maas") as logger: self.assertRaises( exceptions.CannotConfigureDHCP, self.configure, factory.make_name('key'), [make_subnet_config()]) self.assertDocTestMatches( "DHCPv... server failed to restart (for network interfaces ...): " "DHCP is on strike today", logger.output) class TestEnsureDHCPv4IsAccessible(MAASTestCase): def setUp(self): super(TestEnsureDHCPv4IsAccessible, self).setUp() # Ensure the global registry is empty for each test run. self.useFixture(RegistryFixture()) def make_dhcpv4_service(self): service = DHCPv4Service() ServiceRegistry.register_item(service.name, service) return service def test__returns_false_if_service_should_be_off(self): service = self.make_dhcpv4_service() service.off() exception_type = factory.make_exception_type() return_value = dhcp._is_dhcpv4_managed_and_active(exception_type) self.assertThat(return_value, Equals(False)) def test__returns_true_if_service_already_on(self): service = self.make_dhcpv4_service() service.on() mock_get_state = self.patch( dhcp.service_monitor, "get_service_state") mock_get_state.return_value = SERVICE_STATE.ON mock_ensure_service = self.patch( dhcp.service_monitor, "ensure_service") return_value = dhcp._is_dhcpv4_managed_and_active( factory.make_exception_type()) self.assertThat(mock_ensure_service, MockNotCalled()) self.assertThat(return_value, Equals(True)) def test__calls_try_connection_to_check_omshell(self): service = self.make_dhcpv4_service() service.on() mock_get_state = self.patch( dhcp.service_monitor, "get_service_state") mock_get_state.return_value = SERVICE_STATE.OFF mock_ensure_service = self.patch( dhcp.service_monitor, "ensure_service") mock_omshell = self.patch_autospec(dhcp, "Omshell") mock_try_connection = mock_omshell.return_value.try_connection mock_try_connection.return_value = True dhcp._is_dhcpv4_managed_and_active(factory.make_exception_type()) self.assertThat(mock_ensure_service, MockCalledOnceWith("dhcp4")) self.assertThat(mock_try_connection, MockCalledOnceWith()) def test__calls_try_connection_three_times_to_check_omshell(self): service = self.make_dhcpv4_service() service.on() mock_get_state = self.patch( dhcp.service_monitor, "get_service_state") mock_get_state.return_value = SERVICE_STATE.OFF mock_ensure_service = self.patch( dhcp.service_monitor, "ensure_service") mock_omshell = self.patch_autospec(dhcp, "Omshell") self.patch_autospec(dhcp.time, "sleep") mock_try_connection = mock_omshell.return_value.try_connection mock_try_connection.return_value = False fake_exception_type = factory.make_exception_type() with ExpectedException(fake_exception_type): dhcp._is_dhcpv4_managed_and_active(fake_exception_type) self.assertThat(mock_ensure_service, MockCalledOnceWith("dhcp4")) self.assertEquals(mock_try_connection.call_count, 3) def test__raises_exception_on_ServiceActionError(self): service = self.make_dhcpv4_service() service.on() mock_get_state = self.patch( dhcp.service_monitor, "get_service_state") mock_get_state.return_value = SERVICE_STATE.OFF mock_ensure_service = self.patch( dhcp.service_monitor, "ensure_service") mock_ensure_service.side_effect = ServiceActionError() exception_type = factory.make_exception_type() with ExpectedException(exception_type): dhcp._is_dhcpv4_managed_and_active(exception_type) def test__raises_exception_on_other_exceptions(self): service = self.make_dhcpv4_service() service.on() mock_get_state = self.patch( dhcp.service_monitor, "get_service_state") mock_get_state.return_value = SERVICE_STATE.OFF mock_ensure_service = self.patch( dhcp.service_monitor, "ensure_service") mock_ensure_service.side_effect = factory.make_exception() exception_type = factory.make_exception_type() with ExpectedException(exception_type): dhcp._is_dhcpv4_managed_and_active(exception_type) class TestCreateHostMaps(MAASTestCase): def setUp(self): super(TestCreateHostMaps, self).setUp() # Patch _is_dhcpv4_managed_and_active. self._is_dhcpv4_managed_and_active = self.patch_autospec( dhcp, "_is_dhcpv4_managed_and_active") def test_calls__is_dhcpv4_managed_and_active(self): self.patch(dhcp, "Omshell") dhcp.create_host_maps([], sentinel.shared_key) self.assertThat( self._is_dhcpv4_managed_and_active, MockCalledOnceWith(CannotCreateHostMap)) def test_creates_omshell(self): omshell = self.patch(dhcp, "Omshell") dhcp.create_host_maps([], sentinel.shared_key) self.assertThat(omshell, MockCallsMatch( call(server_address=ANY, shared_key=sentinel.shared_key), )) def test_calls_omshell_create(self): omshell_create = self.patch(Omshell, "create") mappings = [ {"ip_address": factory.make_ipv4_address(), "mac_address": factory.make_mac_address()} for _ in range(5) ] dhcp.create_host_maps(mappings, sentinel.shared_key) self.assertThat(omshell_create, MockCallsMatch(*( call(mapping["ip_address"], mapping["mac_address"]) for mapping in mappings ))) def test_raises_error_when_omshell_crashes(self): error_message = factory.make_name("error").encode("ascii") omshell_create = self.patch(Omshell, "create") omshell_create.side_effect = ExternalProcessError( returncode=2, cmd=("omshell",), output=error_message) ip_address = factory.make_ipv4_address() mac_address = factory.make_mac_address() mappings = [{"ip_address": ip_address, "mac_address": mac_address}] with FakeLogger("maas.dhcp") as logger: error = self.assertRaises( exceptions.CannotCreateHostMap, dhcp.create_host_maps, mappings, sentinel.shared_key) # The CannotCreateHostMap exception includes a message describing the # problematic mapping. self.assertDocTestMatches( "%s -> %s: ..." % (mac_address, ip_address), unicode(error)) # A message is also written to the maas.dhcp logger that describes the # problematic mapping. self.assertDocTestMatches( "Could not create host map for ... with address ...: ...", logger.output) class TestRemoveHostMaps(MAASTestCase): def setUp(self): super(TestRemoveHostMaps, self).setUp() self.patch(Omshell, "remove") self.patch(Omshell, "nullify_lease") # Patch _is_dhcpv4_managed_and_active. self._is_dhcpv4_managed_and_active = self.patch_autospec( dhcp, "_is_dhcpv4_managed_and_active") def test_calls__is_dhcpv4_managed_and_active(self): self.patch(dhcp, "Omshell") dhcp.remove_host_maps([], sentinel.shared_key) self.assertThat( self._is_dhcpv4_managed_and_active, MockCalledOnceWith(CannotRemoveHostMap)) def test_removes_omshell(self): omshell = self.patch(dhcp, "Omshell") dhcp.remove_host_maps([], sentinel.shared_key) self.assertThat(omshell, MockCallsMatch( call(server_address=ANY, shared_key=sentinel.shared_key), )) def test_calls_omshell_remove(self): mac_addresses = [factory.make_mac_address() for _ in range(5)] dhcp.remove_host_maps(mac_addresses, sentinel.shared_key) self.assertThat(Omshell.remove, MockCallsMatch(*( call(mac_address) for mac_address in mac_addresses ))) def test_calls_omshell_nullify_lease(self): mac_addresses = [factory.make_mac_address() for _ in range(5)] dhcp.remove_host_maps(mac_addresses, sentinel.shared_key) self.assertThat(Omshell.nullify_lease, MockCallsMatch(*( call(mac_address) for mac_address in mac_addresses ))) def test_raises_error_when_omshell_crashes(self): error_message = factory.make_name("error").encode("ascii") Omshell.remove.side_effect = ExternalProcessError( returncode=2, cmd=("omshell",), output=error_message) mac_address = factory.make_mac_address() with FakeLogger("maas.dhcp") as logger: error = self.assertRaises( exceptions.CannotRemoveHostMap, dhcp.remove_host_maps, [mac_address], sentinel.shared_key) # The CannotRemoveHostMap exception includes a message describing the # problematic mapping. self.assertDocTestMatches("%s: ..." % mac_address, unicode(error)) # A message is also written to the maas.dhcp logger that describes the # problematic mapping. self.assertDocTestMatches( "Could not remove host map for ...: ...", logger.output) class TestOmshellError(MAASTestCase): """Test omshell error reporting""" def setUp(self): super(TestOmshellError, self).setUp() # Patch _is_dhcpv4_managed_and_active. self._is_dhcpv4_managed_and_active = self.patch_autospec( dhcp, "_is_dhcpv4_managed_and_active") self.patch(ExternalProcessError, '__unicode__', lambda x: 'Error') def raise_ExternalProcessError(*args, **kwargs): raise ExternalProcessError(*args, **kwargs) self.patch(Omshell, "remove", raise_ExternalProcessError) def test__raises_CannotRemoveHostMap_if_omshell_offline(self): """If the DHCP server is offline, report a specific message""" omapi_key = factory.make_name('omapi-key') ip_address = factory.make_ipv4_address() self.patch(ExternalProcessError, 'output_as_unicode', 'not connected.') self.assertRaises(CannotRemoveHostMap, dhcp.remove_host_maps, [ip_address], omapi_key) try: dhcp.remove_host_maps([ip_address], omapi_key) except CannotRemoveHostMap as e: self.assertEqual(e.args[0], "The DHCP server could not be reached.") def test__raises_CannotRemoveHostMap_if_omshell_error(self): """Raise a CannotRemoveHostMap if omshell returns an error""" omapi_key = factory.make_name('omapi-key') ip_address = factory.make_ipv4_address() self.patch(ExternalProcessError, 'output_as_unicode', 'error.') self.assertRaises(CannotRemoveHostMap, dhcp.remove_host_maps, [ip_address], omapi_key) maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/tests/test_docs.py0000644000000000000000000000427113056115004023712 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test the documentation of defined commands. Specifically, check :py:class:`~twisted.protocols.amp.Command` subclasses in the MAAS codebase. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from inspect import getdoc from itertools import chain import re from maastesting.testcase import MAASTestCase import provisioningserver.rpc.cluster import provisioningserver.rpc.common import provisioningserver.rpc.region from testtools.matchers import ( Annotate, Contains, MatchesAll, MatchesRegex, ) from twisted.protocols import amp def get_commands(module): """Return command classes from the given module.""" for name, value in vars(module).iteritems(): if isinstance(value, type): if issubclass(value, amp.Command): yield value class TestDocs(MAASTestCase): scenarios = sorted( (command.__name__, {"command": command}) for command in chain( get_commands(provisioningserver.rpc.common), get_commands(provisioningserver.rpc.cluster), get_commands(provisioningserver.rpc.region), ) ) since_clause_missing_message = ( "Command class does not have a :since: clause. The version in " "which this command will be (or already has been) introduced " "must be recorded, 1.6 for example." ) since_clause_version_not_recognised = ( "Command's :since: clause does not contain a recognised version, " "1.6 for example." ) def test_since_clause(self): contains_since_clause = Annotate( self.since_clause_missing_message, Contains(":since:")) since_clause_contains_version = Annotate( self.since_clause_version_not_recognised, MatchesRegex( ".*^:since: *[1-9][.][0-9]+([.][0-9]+)?$", re.DOTALL | re.MULTILINE)) self.assertThat(getdoc(self.command), MatchesAll( contains_since_clause, since_clause_contains_version)) maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/tests/test_module.py0000644000000000000000000000207513056115004024247 0ustar 00000000000000# Copyright 2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the top-level cluster RPC API.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ ] from maastesting.testcase import MAASTestCase import provisioningserver from provisioningserver.rpc.exceptions import NoConnectionsAvailable class TestUtilities(MAASTestCase): def test_get_rpc_client_returns_client(self): services = self.patch(provisioningserver, "services") client = provisioningserver.rpc.getRegionClient() self.assertEqual( services.getServiceNamed('rpc').getClient(), client, ) def test_error_when_cluster_services_are_down(self): services = self.patch(provisioningserver, "services") services.getServiceNamed.side_effect = KeyError self.assertRaises( NoConnectionsAvailable, provisioningserver.rpc.getRegionClient) maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/tests/test_monitors.py0000644000000000000000000001366213056115004024640 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for :py:module:`~provisioningserver.rpc.monitors`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from datetime import ( datetime, timedelta, ) from maastesting.factory import factory from maastesting.matchers import ( MockCalledOnceWith, MockNotCalled, ) from mock import ( Mock, sentinel, ) from provisioningserver.rpc import monitors as monitors_module from provisioningserver.rpc.monitors import ( cancel_monitor, running_monitors, start_monitors, ) from provisioningserver.rpc.region import MonitorExpired from provisioningserver.testing.testcase import PservTestCase from testtools.matchers import ( Contains, Equals, HasLength, IsInstance, Not, ) from twisted.internet.base import DelayedCall from twisted.internet.task import Clock from twisted.protocols import amp def make_monitors(time_now=None): """Make some StartMonitors, set to go off one second apart starting in one second""" if time_now is None: time_now = datetime.now(amp.utc) monitors = [] for i in xrange(2): monitors.append({ "deadline": time_now + timedelta(seconds=i + 1), "context": factory.make_name("context"), "id": factory.make_name("id"), }) return monitors class TestStartMonitors(PservTestCase): """Tests for `~provisioningserver.rpc.monitors.start_monitors`.""" def tearDown(self): super(TestStartMonitors, self).tearDown() for dc, _ in running_monitors.viewvalues(): if dc.active(): dc.cancel() running_monitors.clear() def test__sets_up_running_monitors(self): clock = Clock() monitors = make_monitors() start_monitors(monitors, clock) self.expectThat(running_monitors, HasLength(len(monitors))) for monitor in monitors: id = monitor["id"] self.expectThat(running_monitors[id], IsInstance(tuple)) delayed_call, context = running_monitors[id] self.expectThat(delayed_call, IsInstance(DelayedCall)) self.expectThat(context, Equals(monitor["context"])) def test__reschedules_existing_monitor(self): clock = Clock() monitor_expired = self.patch_autospec( monitors_module, "monitor_expired") monitor_id = factory.make_name("id") # The first monitor with the ID is scheduled as expected. monitor1 = { "deadline": datetime.now(amp.utc) + timedelta(seconds=10), "context": sentinel.context1, "id": monitor_id, } start_monitors([monitor1], clock) self.expectThat(running_monitors, HasLength(1)) dc1, context = running_monitors[monitor_id] self.assertAlmostEqual(dc1.getTime(), 10, delta=1) self.assertIs(sentinel.context1, context) self.assertTrue(dc1.active()) # The second monitor with the ID is also scheduled as expected, taking # the place of the previous monitor. monitor2 = { "deadline": monitor1["deadline"] + timedelta(seconds=10), "context": sentinel.context2, "id": monitor_id, } start_monitors([monitor2], clock) self.expectThat(running_monitors, HasLength(1)) dc2, context = running_monitors[monitor_id] self.assertAlmostEqual(dc2.getTime(), 20, delta=2) self.assertIs(sentinel.context2, context) self.assertTrue(dc2.active()) # However, the first monitor has been cancelled, without calling back # to the region. self.assertTrue(dc1.cancelled, "First monitor has not been cancelled") self.assertThat(monitor_expired, MockNotCalled()) def test__removes_from_running_monitors_when_monitor_expires(self): self.patch(monitors_module, "getRegionClient") clock = Clock() monitors = make_monitors() start_monitors(monitors, clock) # Expire the first monitor. clock.advance(1) self.assertThat(running_monitors, Not(Contains(monitors[0]["id"]))) self.assertThat(running_monitors, Contains(monitors[1]["id"])) # Expire the other time. clock.advance(1) self.assertThat(running_monitors, Not(Contains(monitors[1]["id"]))) def test__calls_MonitorExpired_when_monitor_expires(self): getRegionClient = self.patch(monitors_module, "getRegionClient") client = Mock() getRegionClient.return_value = client clock = Clock() monitors = make_monitors() # Just use the first one for this test. monitor = monitors[0] start_monitors([monitor], clock) clock.advance(1) self.assertThat( client, MockCalledOnceWith( MonitorExpired, id=monitor["id"], context=monitor["context"])) class TestCancelMonitor(PservTestCase): """Tests for `~provisioningserver.rpc.monitors.cancel_monitor`.""" def test__cancels_running_monitor(self): monitors = make_monitors() clock = Clock() start_monitors(monitors, clock) dc, _ = running_monitors[monitors[0]["id"]] cancel_monitor(monitors[0]["id"]) self.expectThat(running_monitors, Not(Contains(monitors[0]["id"]))) self.expectThat(running_monitors, Contains(monitors[1]["id"])) self.assertTrue(dc.cancelled) def test__silently_ignores_already_cancelled_monitor(self): monitors = make_monitors() clock = Clock() self.addCleanup(running_monitors.clear) start_monitors(monitors, clock) cancel_monitor(factory.make_string()) self.expectThat(running_monitors, Contains(monitors[0]["id"])) self.expectThat(running_monitors, Contains(monitors[1]["id"])) maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/tests/test_osystems.py0000644000000000000000000001627413056115004024656 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for :py:module:`~provisioningserver.rpc.osystems`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from collections import Iterable import random from maastesting.factory import factory from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import MAASTestCase from mock import sentinel from provisioningserver.drivers.osystem import ( BOOT_IMAGE_PURPOSE, OperatingSystemRegistry, ) from provisioningserver.rpc import ( exceptions, osystems, ) from provisioningserver.rpc.testing.doubles import StubOS from provisioningserver.testing.os import make_osystem class TestListOperatingSystemHelpers(MAASTestCase): def test_gen_operating_systems_returns_dicts_for_registered_oses(self): # Patch in some operating systems with some randomised data. See # StubOS for details of the rules that are used to populate the # non-random elements. os1 = StubOS("kermit", [ ("statler", "Statler"), ("waldorf", "Waldorf"), ]) os2 = StubOS("fozzie", [ ("swedish-chef", "Swedish-Chef"), ("beaker", "Beaker"), ]) self.patch( osystems, "OperatingSystemRegistry", [(os1.name, os1), (os2.name, os2)]) # The `releases` field in the dict returned is populated by # gen_operating_system_releases. That's not under test, so we # mock it. gen_operating_system_releases = self.patch( osystems, "gen_operating_system_releases") gen_operating_system_releases.return_value = sentinel.releases # The operating systems are yielded in name order. expected = [ { "name": "fozzie", "title": "Fozzie", "releases": sentinel.releases, "default_release": "swedish-chef", "default_commissioning_release": "beaker", }, { "name": "kermit", "title": "Kermit", "releases": sentinel.releases, "default_release": "statler", "default_commissioning_release": "waldorf", }, ] observed = osystems.gen_operating_systems() self.assertIsInstance(observed, Iterable) self.assertEqual(expected, list(observed)) def test_gen_operating_system_releases_returns_dicts_for_releases(self): # Use an operating system with some randomised data. See StubOS # for details of the rules that are used to populate the # non-random elements. osystem = StubOS("fozzie", [ ("swedish-chef", "I Am The Swedish-Chef"), ("beaker", "Beaker The Phreaker"), ]) expected = [ { "name": "swedish-chef", "title": "I Am The Swedish-Chef", "requires_license_key": False, "can_commission": False, }, { "name": "beaker", "title": "Beaker The Phreaker", "requires_license_key": True, "can_commission": True, }, ] observed = osystems.gen_operating_system_releases(osystem) self.assertIsInstance(observed, Iterable) self.assertEqual(expected, list(observed)) class TestGetOSReleaseTitle(MAASTestCase): def test_returns_release_title(self): os_name = factory.make_name('os') title = factory.make_name('title') purposes = [BOOT_IMAGE_PURPOSE.XINSTALL] osystem = make_osystem(self, os_name, purposes) release = random.choice(osystem.get_supported_releases()) self.patch(osystem, 'get_release_title').return_value = title self.assertEqual( title, osystems.get_os_release_title(osystem.name, release)) def test_returns_empty_release_title_when_None_returned(self): os_name = factory.make_name('os') purposes = [BOOT_IMAGE_PURPOSE.XINSTALL] osystem = make_osystem(self, os_name, purposes) release = random.choice(osystem.get_supported_releases()) self.patch(osystem, 'get_release_title').return_value = None self.assertEqual( "", osystems.get_os_release_title(osystem.name, release)) def test_throws_exception_when_os_does_not_exist(self): self.assertRaises( exceptions.NoSuchOperatingSystem, osystems.get_os_release_title, factory.make_name("no-such-os"), factory.make_name("bogus-release")) class TestValidateLicenseKeyErrors(MAASTestCase): def test_throws_exception_when_os_does_not_exist(self): self.assertRaises( exceptions.NoSuchOperatingSystem, osystems.validate_license_key, factory.make_name("no-such-os"), factory.make_name("bogus-release"), factory.make_name("key-to-not-much")) class TestValidateLicenseKey(MAASTestCase): def test_validates_key(self): os_name = factory.make_name('os') purposes = [BOOT_IMAGE_PURPOSE.XINSTALL] osystem = make_osystem(self, os_name, purposes) release = random.choice(osystem.get_supported_releases()) os_specific_validate_license_key = self.patch( osystem, "validate_license_key") osystems.validate_license_key( osystem.name, release, sentinel.key) self.assertThat( os_specific_validate_license_key, MockCalledOnceWith(release, sentinel.key)) class TestGetPreseedDataErrors(MAASTestCase): def test_throws_exception_when_os_does_not_exist(self): self.assertRaises( exceptions.NoSuchOperatingSystem, osystems.get_preseed_data, factory.make_name("no-such-os"), sentinel.preseed_type, sentinel.node_system_id, sentinel.node_hostname, sentinel.consumer_key, sentinel.token_key, sentinel.token_secret, sentinel.metadata_url) class TestGetPreseedData(MAASTestCase): # Check for every OS. scenarios = [ (osystem.name, {"osystem": osystem}) for _, osystem in OperatingSystemRegistry ] def test_get_preseed_data_calls_compose_preseed(self): # get_preseed_data() calls compose_preseed() on the # OperatingSystem instances. os_specific_compose_preseed = self.patch( self.osystem, "compose_preseed") metadata_url = factory.make_parsed_url() osystems.get_preseed_data( self.osystem.name, sentinel.preseed_type, sentinel.node_system_id, sentinel.node_hostname, sentinel.consumer_key, sentinel.token_key, sentinel.token_secret, metadata_url) self.assertThat( os_specific_compose_preseed, MockCalledOnceWith( sentinel.preseed_type, (sentinel.node_system_id, sentinel.node_hostname), (sentinel.consumer_key, sentinel.token_key, sentinel.token_secret), metadata_url.geturl())) maas-1.9.5+bzr4599.orig/src/provisioningserver/rpc/tests/test_tags.py0000644000000000000000000000504013056115004023713 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for :py:module:`~provisioningserver.rpc.dhcp`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from apiclient.maas_client import ( MAASClient, MAASDispatcher, MAASOAuth, ) from maastesting.factory import factory from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import MAASTestCase from mock import ( ANY, sentinel, ) from provisioningserver.rpc import tags from provisioningserver.testing.config import ClusterConfigurationFixture class TestEvaluateTag(MAASTestCase): def setUp(self): super(TestEvaluateTag, self).setUp() self.mock_cluster_uuid = factory.make_UUID() self.mock_url = factory.make_simple_http_url() self.useFixture(ClusterConfigurationFixture( cluster_uuid=self.mock_cluster_uuid, maas_url=self.mock_url)) def test__calls_process_node_tags(self): credentials = "aaa", "bbb", "ccc" process_node_tags = self.patch_autospec(tags, "process_node_tags") tags.evaluate_tag( sentinel.tag_name, sentinel.tag_definition, sentinel.tag_nsmap, credentials) self.assertThat( process_node_tags, MockCalledOnceWith( tag_name=sentinel.tag_name, tag_definition=sentinel.tag_definition, tag_nsmap=sentinel.tag_nsmap, client=ANY, nodegroup_uuid=self.mock_cluster_uuid)) def test__constructs_client_with_credentials(self): consumer_key = factory.make_name("ckey") resource_token = factory.make_name("rtok") resource_secret = factory.make_name("rsec") credentials = consumer_key, resource_token, resource_secret self.patch_autospec(tags, "process_node_tags") self.patch_autospec(tags, "MAASOAuth").side_effect = MAASOAuth tags.evaluate_tag( sentinel.tag_name, sentinel.tag_definition, sentinel.tag_nsmap, credentials) client = tags.process_node_tags.call_args[1]["client"] self.assertIsInstance(client, MAASClient) self.assertEqual(self.mock_url, client.url) self.assertIsInstance(client.dispatcher, MAASDispatcher) self.assertIsInstance(client.auth, MAASOAuth) self.assertThat(tags.MAASOAuth, MockCalledOnceWith( consumer_key, resource_token, resource_secret)) maas-1.9.5+bzr4599.orig/src/provisioningserver/testing/__init__.py0000644000000000000000000000000013056115004023173 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/testing/bindfixture.py0000644000000000000000000003114113056115004023771 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Server fixture for BIND.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'BINDServer', ] import argparse import os from shutil import copy import signal import socket import subprocess from textwrap import dedent import time import fixtures from maastesting.fixtures import TempDirectory from provisioningserver.dns.config import generate_rndc from provisioningserver.utils.fs import ( atomic_write, ensure_dir, ) import tempita from testtools.content import Content from testtools.content_type import UTF8_TEXT GENERATED_HEADER = """ # This is a file generated by the bindfixture. # The bindfixture tries not to overwrite existing configuration files # so it's safe to edit this file if you need to but be aware that # these changes won't be persisted. """ def preexec_fn(): # Revert Python's handling of SIGPIPE. See # http://bugs.python.org/issue1652 for more info. signal.signal(signal.SIGPIPE, signal.SIG_DFL) def get_port(socket): """Return the port to which a socket is bound.""" addr, port = socket.getsockname() return port def allocate_ports(*addrs): """Allocate `len(addrs)` unused ports. A port is allocated for each element in `addrs`. There is a small race condition here (between the time we allocate the port, and the time it actually gets used), but for the purposes for which this function gets used it isn't a problem in practice. """ sockets = [socket.socket() for addr in addrs] try: for addr, sock in zip(addrs, sockets): sock.bind((addr, 0)) return [get_port(sock) for sock in sockets] finally: for sock in sockets: sock.close() def should_write(path, overwrite_config=False): """Does the DNS config file at `path` need writing? :param path: File that may need to be written out. :param overwrite_config: Overwrite config files even if they already exist? :return: Whether the file should be written. :rtype: bool """ return overwrite_config or not os.path.exists(path) class BINDServerResources(fixtures.Fixture): """Allocate the resources a BIND server needs. :ivar port: A port that was free at the time setUp() was called. :ivar rndc_port: A port that was free at the time setUp() was called (used for rndc communication). :ivar homedir: A directory where to put all the files the BIND server needs (configuration files and executable). :ivar log_file: The log file allocated for the server. :ivar include_in_options: Name of a file under homedir to include inside the options block. """ # The full path where the 'named' executable can be # found. # Note that it will be copied over to a temporary # location in order to by-pass the limitations imposed by # apparmor if the executable is in /usr/sbin/named. NAMED_PATH = '/usr/sbin/named' # The configuration template for the BIND server. The goal here # is to override the defaults (default configuration files location, # default port) to avoid clashing with the system's BIND (if # running). NAMED_CONF_TEMPLATE = tempita.Template(dedent(""" options { directory "{{homedir}}"; listen-on port {{port}} {127.0.0.1;}; listen-on-v6 port {{port}} {::1;}; pid-file "{{homedir}}/named.pid"; session-keyfile "{{homedir}}/session.key"; {{if include_in_options}} include "{{homedir}}/{{include_in_options}}"; {{endif}} }; logging{ channel simple_log { file "{{log_file}}"; severity info; print-severity yes; }; category default{ simple_log; }; }; {{extra}} """)) def __init__(self, port=None, rndc_port=None, homedir=None, log_file=None, include_in_options=None): super(BINDServerResources, self).__init__() self._defaults = dict( port=port, rndc_port=rndc_port, homedir=homedir, log_file=log_file, include_in_options=include_in_options, ) def setUp(self, overwrite_config=False): super(BINDServerResources, self).setUp() self.__dict__.update(self._defaults) self.set_up_config() self.set_up_named(overwrite_config=overwrite_config) def set_up_named(self, overwrite_config=True): """Setup an environment to run 'named'. - Creates the default configuration for 'named' and sets up rndc. - Copies the 'named' executable inside homedir. AppArmor won't let us run the installed version the way we want. """ # Generate rndc configuration (rndc config and named snippet). # Disable remote administration for init scripts by suppressing the # "controls" statement. rndcconf, namedrndcconf = generate_rndc( port=self.rndc_port, key_name='dnsfixture-rndc-key', include_default_controls=False) # Write main BIND config file. if should_write(self.conf_file, overwrite_config): named_conf = ( self.NAMED_CONF_TEMPLATE.substitute( homedir=self.homedir, port=self.port, log_file=self.log_file, include_in_options=self.include_in_options, extra=namedrndcconf)) atomic_write( GENERATED_HEADER + named_conf, self.conf_file) # Write rndc config file. if should_write(self.rndcconf_file, overwrite_config): atomic_write( GENERATED_HEADER + rndcconf, self.rndcconf_file) # Copy named executable to home dir. This is done to avoid # the limitations imposed by apparmor if the executable # is in /usr/sbin/named. # named's apparmor profile prevents loading of zone and # configuration files from outside of a restricted set, # none of which an ordinary user has write access to. if should_write(self.named_file, overwrite_config): named_path = self.NAMED_PATH assert os.path.exists(named_path), ( "'%s' executable not found. Install the package " "'bind9' or define an environment variable named " "NAMED_PATH with the path where the 'named' " "executable can be found." % named_path) copy(named_path, self.named_file) def set_up_config(self): if self.port is None: [self.port] = allocate_ports("localhost") if self.rndc_port is None: [self.rndc_port] = allocate_ports("localhost") if self.homedir is None: self.homedir = self.useFixture(TempDirectory()).path if self.log_file is None: self.log_file = os.path.join(self.homedir, 'named.log') self.named_file = os.path.join( self.homedir, os.path.basename(self.NAMED_PATH)) self.conf_file = os.path.join(self.homedir, 'named.conf') self.rndcconf_file = os.path.join(self.homedir, 'rndc.conf') class BINDServerRunner(fixtures.Fixture): """Run a BIND server.""" # Where the executable 'rndc' can be found (belongs to the # package 'bind9utils'). RNDC_PATH = "/usr/sbin/rndc" def __init__(self, config): """Create a `BINDServerRunner` instance. :param config: An object exporting the variables `BINDServerResources` exports. """ super(BINDServerRunner, self).__init__() self.config = config self.process = None def setUp(self): super(BINDServerRunner, self).setUp() self._start() def is_running(self): """Is the BIND server process still running?""" if self.process is None: return False else: return self.process.poll() is None def _spawn(self): """Spawn the BIND server process.""" env = dict(os.environ, HOME=self.config.homedir) with open(self.config.log_file, "wb") as log_file: with open(os.devnull, "rb") as devnull: self.process = subprocess.Popen( [self.config.named_file, "-f", "-c", self.config.conf_file], stdin=devnull, stdout=log_file, stderr=log_file, close_fds=True, cwd=self.config.homedir, env=env, preexec_fn=preexec_fn) self.addCleanup(self._stop) # Keep the log_file open for reading so that we can still get the log # even if the log is deleted. open_log_file = open(self.config.log_file, "rb") self.addDetail( os.path.basename(self.config.log_file), Content(UTF8_TEXT, lambda: open_log_file)) def rndc(self, command): """Executes a ``rndc`` command and returns status.""" if isinstance(command, unicode): command = (command,) ctl = subprocess.Popen( (self.RNDC_PATH, "-c", self.config.rndcconf_file) + command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=preexec_fn) outstr, errstr = ctl.communicate() return outstr, errstr def is_server_running(self): """Checks that the BIND server is up and running.""" outdata, errdata = self.rndc("status") return "server is up and running" in outdata def _start(self): """Start the BIND server.""" self._spawn() # Wait for the server to come up: stop when the process is dead, or # the timeout expires, or the server responds. timeout = time.time() + 15 while time.time() < timeout and self.is_running(): if self.is_server_running(): break time.sleep(0.3) else: raise Exception( "Timeout waiting for BIND server to start: log in %r." % (self.config.log_file,)) def _request_stop(self): outstr, errstr = self.rndc("stop") if outstr: self.addDetail('stop-out', Content(UTF8_TEXT, lambda: [outstr])) if errstr: self.addDetail('stop-err', Content(UTF8_TEXT, lambda: [errstr])) def _stop(self): """Stop the running server. Normally called by cleanups.""" self._request_stop() self.process.wait() class BINDServer(fixtures.Fixture): """A BIND server fixture. When setup a BIND instance will be running. :ivar config: The `BINDServerResources` used to start the server. """ def __init__(self, config=None): super(BINDServer, self).__init__() self.config = config def setUp(self): super(BINDServer, self).setUp() if self.config is None: self.config = BINDServerResources() self.useFixture(self.config) self.runner = BINDServerRunner(self.config) self.useFixture(self.runner) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Run a BIND server.') parser.add_argument( '--homedir', help=( "A directory where to put all the files the BIND" "server needs (configuration files and executable)" )) parser.add_argument( '--log-file', help="The log file allocated for the server") parser.add_argument( '--port', type=int, help="The port that will be used by BIND") parser.add_argument( '--rndc-port', type=int, help="The rndc port that will be used by BIND") parser.add_argument( '--overwrite-config', action='store_true', help="Whether or not to overwrite the configuration files " "if they already exist", default=False) parser.add_argument( '--create-config-only', action='store_true', help="If set, only create the config files instead of " "also running the service [default: %(default)s].", default=False) arguments = parser.parse_args() ensure_dir(arguments.homedir) # Create BINDServerResources with the provided options. resources = BINDServerResources( homedir=arguments.homedir, log_file=arguments.log_file, port=arguments.port, rndc_port=arguments.rndc_port) resources.setUp(overwrite_config=arguments.overwrite_config) # exec named. if not arguments.create_config_only: os.execlp( resources.named_file, resources.named_file, "-g", "-c", resources.conf_file) maas-1.9.5+bzr4599.orig/src/provisioningserver/testing/boot_images.py0000644000000000000000000000460513056115004023743 0ustar 00000000000000# Copyright 2012-2014 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test helpers for boot-image parameters.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'make_boot_image_params', ] from maastesting.factory import factory def make_boot_image_params(): """Create an arbitrary dict of boot-image parameters. These are the parameters that together describe a kind of boot for which we may need a kernel and initrd: operating system, architecture, sub-architecture, Ubuntu release, boot purpose, and release label. """ return dict( osystem=factory.make_name('osystem'), architecture=factory.make_name('architecture'), subarchitecture=factory.make_name('subarchitecture'), release=factory.make_name('release'), label=factory.make_name('label'), purpose=factory.make_name('purpose'), supported_subarches=factory.make_name("sup_subarches"), ) def make_boot_image_storage_params(): """Create a dict of boot-image parameters as used to store the image. These are the parameters that together describe a path to store a boot image: operating system, architecture, sub-architecture, Ubuntu release, and release label. """ return dict( osystem=factory.make_name('osystem'), architecture=factory.make_name('architecture'), subarchitecture=factory.make_name('subarchitecture'), release=factory.make_name('release'), label=factory.make_name('label'), ) def make_image(params, purpose, metadata=None, xinstall_path=None, xinstall_type=None): """Describe an image as a dict similar to what `list_boot_images` returns. The `params` are as returned from `make_boot_image_storage_params`. """ image = params.copy() image['purpose'] = purpose if metadata is not None: image.update(metadata) if purpose == 'xinstall': if xinstall_path is None: xinstall_path = 'root-tgz' if xinstall_type is None: xinstall_type = 'tgz' image['xinstall_path'] = xinstall_path image['xinstall_type'] = xinstall_type else: image['xinstall_path'] = '' image['xinstall_type'] = '' return image maas-1.9.5+bzr4599.orig/src/provisioningserver/testing/config.py0000644000000000000000000001006113056115004022711 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Fixtures for working with local configuration in the cluster.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "BootSourcesFixture", "ClusterConfigurationFixture", "ConfigFixtureBase", "ConfigurationFixtureBase", ] from os import path from fixtures import ( EnvironmentVariableFixture, Fixture, ) from maastesting.fixtures import TempDirectory from provisioningserver.config import ( BootSources, ClusterConfiguration, ) import yaml class ConfigFixtureBase(Fixture): """Base class for creating configuration testing fixtures. Subclass this to create a fixture class that'll help with testing configuration schemas. :cvar schema: A subclass of :class:`provisioningserver.config.ConfigBase`. """ schema = None # Customise this in subclasses. def __init__(self, config=None, name="config.yaml"): super(ConfigFixtureBase, self).__init__() self.config = {} if config is None else config self.name = name def setUp(self): super(ConfigFixtureBase, self).setUp() # Create a real configuration file, and populate it. self.dir = self.useFixture(TempDirectory()).path self.filename = path.join(self.dir, self.name) with open(self.filename, "wb") as stream: yaml.safe_dump(self.config, stream=stream) # Export this filename to the environment, so that subprocesses will # pick up this configuration. Define the new environment as an # instance variable so that users of this fixture can use this to # extend custom subprocess environments. self.environ = {self.schema.envvar: self.filename} for name, value in self.environ.items(): self.useFixture(EnvironmentVariableFixture(name, value)) class BootSourcesFixture(ConfigFixtureBase): """Fixture to substitute for :class:`BootSources` in tests. :ivar sources: A list of dicts defining boot sources. :ivar name: Base name for the file that will hold the YAML representation of `sources`. It will be in a temporary directory. :ivar filename: Full path to the YAML file. """ schema = BootSources def __init__(self, sources=None, name='sources.yaml'): super(BootSourcesFixture, self).__init__(config=sources, name=name) class ConfigurationFixtureBase(Fixture): """Base class for new-style configuration testing fixtures. Subclass this to create a fixture class that'll help with testing new-style configuration objects. :cvar configuration: A subclass of :class:`provisioningserver.config.Configuration`. """ configuration = None # Customise this in subclasses. def __init__(self, **options): super(ConfigurationFixtureBase, self).__init__() self.options = options def setUp(self): super(ConfigurationFixtureBase, self).setUp() # Create a real configuration file, and populate it. self.path = path.join( self.useFixture(TempDirectory()).path, path.basename(self.configuration.DEFAULT_FILENAME)) with self.configuration.open_for_update(self.path) as config: for key, value in self.options.viewitems(): setattr(config, key, value) # Export this filename to the environment, so that subprocesses will # pick up this configuration. Define the new environment as an # instance variable so that users of this fixture can use this to # extend custom subprocess environments. self.environ = {self.configuration.envvar: self.path} for name, value in self.environ.items(): self.useFixture(EnvironmentVariableFixture(name, value)) class ClusterConfigurationFixture(ConfigurationFixtureBase): """Fixture to configure local cluster settings in tests.""" configuration = ClusterConfiguration maas-1.9.5+bzr4599.orig/src/provisioningserver/testing/events.py0000644000000000000000000000147513056115004022761 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test helpers for `provisioningserver.events`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "EventTypesAllRegistered", ] from fixtures import Fixture from provisioningserver import events class EventTypesAllRegistered(Fixture): """Pretend that all event types are registered. This prevents `RegisterEventType` calls. """ def setUp(self): super(EventTypesAllRegistered, self).setUp() types_registered = events.nodeEventHub._types_registered types_registered.update(events.EVENT_DETAILS) self.addCleanup(types_registered.clear) maas-1.9.5+bzr4599.orig/src/provisioningserver/testing/os.py0000644000000000000000000000371513056115004022075 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Utilities for testing operating systems-related code.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'FakeOS', ] from maastesting.factory import factory from provisioningserver.drivers.osystem import ( OperatingSystem, OperatingSystemRegistry, ) class FakeOS(OperatingSystem): name = "" title = "" def __init__(self, name, purpose, releases=None): self.name = name self.title = name self.purpose = purpose if releases is None: self.fake_list = [ factory.make_string() for _ in range(3) ] else: self.fake_list = releases def get_boot_image_purposes(self, *args): return self.purpose def is_release_supported(self, release): return release in self.fake_list def get_supported_releases(self): return self.fake_list def get_default_release(self): return self.fake_list[0] def get_release_title(self, release): return release def make_osystem(testcase, osystem, purpose): """Makes the operating system class and registers it.""" if osystem not in OperatingSystemRegistry: fake = FakeOS(osystem, purpose) OperatingSystemRegistry.register_item(fake.name, fake) testcase.addCleanup( OperatingSystemRegistry.unregister_item, osystem) return fake else: obj = OperatingSystemRegistry[osystem] old_func = obj.get_boot_image_purposes testcase.patch(obj, 'get_boot_image_purposes').return_value = purpose def reset_func(obj, old_func): obj.get_boot_image_purposes = old_func testcase.addCleanup(reset_func, obj, old_func) return obj maas-1.9.5+bzr4599.orig/src/provisioningserver/testing/testcase.py0000644000000000000000000000230013056115004023254 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Provisioningserver-specific test-case classes.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'PservTestCase', ] from maastesting import testcase from twisted.internet import reactor from twisted.python import threadable class PservTestCase(testcase.MAASTestCase): def register_as_io_thread(self): """Make the current thread the IO thread. When pretending to be the reactor, by using clocks and suchlike, register the current thread as the reactor thread, a.k.a. the IO thread, to ensure correct operation from things like the `synchronous` and `asynchronous` decorators. Do not use this when the reactor is running. """ self.assertFalse( reactor.running, "Do not use this to change the IO thread " "while the reactor is running.") self.addCleanup(setattr, threadable, "ioThread", threadable.ioThread) threadable.ioThread = threadable.getThreadID() maas-1.9.5+bzr4599.orig/src/provisioningserver/testing/tests/0000755000000000000000000000000013056115004022236 5ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/testing/tests/__init__.py0000644000000000000000000000000013056115004024335 0ustar 00000000000000maas-1.9.5+bzr4599.orig/src/provisioningserver/testing/tests/test_bindfixture.py0000644000000000000000000001170213056115004026173 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the BIND fixture.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import os from subprocess import check_output from maastesting.testcase import MAASTestCase from provisioningserver.testing.bindfixture import ( BINDServer, BINDServerResources, ) from testtools.matchers import ( Contains, FileContains, FileExists, Not, ) from testtools.testcase import gather_details def dig_call(port=53, server='127.0.0.1', commands=None): """Call `dig` with the given command. Note that calling dig without a command will perform an NS query for "." (the root) which is useful to check if there is a running server. :param port: Port of the queried DNS server (defaults to 53). :param server: IP address of the queried DNS server (defaults to '127.0.0.1'). :param commands: List of dig commands to run (defaults to None which will perform an NS query for "." (the root)). :return: The output as a string. :rtype: unicode """ # The time and tries below are high so that tests pass in environments # that are much slower than the average developer's machine, so beware # before lowering. Many Bothans died to discover these parameters. cmd = [ 'dig', '+time=10', '+tries=5', '@%s' % server, '-p', '%d' % port] if commands is not None: if not isinstance(commands, list): commands = (commands, ) cmd.extend(commands) return check_output(cmd).strip() class TestBINDFixture(MAASTestCase): def test_start_check_shutdown(self): # The fixture correctly starts and stops BIND. with BINDServer() as fixture: try: result = dig_call(fixture.config.port) self.assertIn("Got answer", result) except Exception: # self.useFixture() is not being used because we want to # handle the fixture's lifecycle, so we must also be # responsible for propagating fixture details. gather_details(fixture.getDetails(), self.getDetails()) raise self.assertFalse(fixture.runner.is_running()) def test_config(self): # The configuration can be passed in. config = BINDServerResources() fixture = self.useFixture(BINDServer(config)) self.assertIs(config, fixture.config) class TestBINDServerResources(MAASTestCase): def test_defaults(self): with BINDServerResources() as resources: self.assertIsInstance(resources.port, int) self.assertIsInstance(resources.rndc_port, int) self.assertIsInstance(resources.homedir, unicode) self.assertIsInstance(resources.log_file, unicode) self.assertIs(resources.include_in_options, None) self.assertIsInstance(resources.named_file, unicode) self.assertIsInstance(resources.conf_file, unicode) self.assertIsInstance( resources.rndcconf_file, unicode) def test_setUp_copies_executable(self): with BINDServerResources() as resources: self.assertThat(resources.named_file, FileExists()) def test_setUp_creates_config_files(self): with BINDServerResources() as resources: self.assertThat( resources.conf_file, FileContains(matcher=Contains( b'listen-on port %s' % resources.port))) self.assertThat( resources.rndcconf_file, FileContains(matcher=Contains( b'default-port %s' % ( resources.rndc_port)))) # This should ideally be in its own test but it's here to cut # test run time. See test_setUp_honours_include_in_options() # as its counterpart. self.assertThat( resources.conf_file, Not(FileContains(matcher=Contains( "forwarders")))) def test_setUp_honours_include_in_options(self): forwarders = "forwarders { 1.2.3.4; };" with BINDServerResources(include_in_options=forwarders) as resources: expected_in_file = ( resources.homedir + '/' + forwarders).encode("ascii") self.assertThat( resources.conf_file, FileContains(matcher=Contains( expected_in_file))) def test_defaults_reallocated_after_teardown(self): seen_homedirs = set() resources = BINDServerResources() for _ in range(2): with resources: self.assertTrue(os.path.exists(resources.homedir)) self.assertNotIn(resources.homedir, seen_homedirs) seen_homedirs.add(resources.homedir) maas-1.9.5+bzr4599.orig/src/provisioningserver/tests/__init__.py0000644000000000000000000000050513056115004022672 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] maas-1.9.5+bzr4599.orig/src/provisioningserver/tests/test_cluster_config_command.py0000644000000000000000000002146113056115004026702 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for configuration update code.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from argparse import ArgumentParser from exceptions import SystemExit from itertools import combinations import pprint import StringIO import uuid from maastesting.factory import factory from maastesting.testcase import MAASTestCase from mock import ( Mock, patch, ) from provisioningserver import cluster_config_command from provisioningserver.config import ( ClusterConfiguration, UUID_NOT_SET, ) from provisioningserver.testing.config import ClusterConfigurationFixture from testtools import ExpectedException class TestAddArguments(MAASTestCase): def test_accepts_all_args(self): all_test_arguments = cluster_config_command.all_arguments default_arg_values = { '--region-url': None, '--uuid': None, '--init': False, '--tftp-port': None, '--tftp-root': None, } failures = {} # Try all cardinalities of combinations of arguments for r in range(len(all_test_arguments) + 1): for test_arg_names in combinations(all_test_arguments, r): test_values = { '--region-url': factory.make_simple_http_url(), '--uuid': unicode(uuid.uuid4()), '--init': '', '--tftp-port': unicode(factory.pick_port()), '--tftp-root': factory.make_string(), } # Build a query dictionary for the given combination of args args_under_test = [] for param_name in test_arg_names: args_under_test.append(param_name) if param_name != '--init': args_under_test.append(test_values[param_name]) parser = ArgumentParser() cluster_config_command.add_arguments(parser) # If both init and uuid are passed, argparse will generate # a nice ArgumentError exception, which unfortunately, # gets caught and sent to exit. if '--init' in test_arg_names and '--uuid' in test_arg_names: expected_exception = ExpectedException(SystemExit, '2') with expected_exception, patch('sys.stderr'): parser.parse_known_args(args_under_test) else: # Otherwise, parsed args with defaults as usual observed_args = vars( parser.parse_args(args_under_test)) expected_args = {} for param_name in all_test_arguments: parsed_param_name = param_name[2:].replace('-', '_') if param_name not in test_arg_names: expected_args[parsed_param_name] = \ default_arg_values[param_name] else: expected_args[parsed_param_name] = \ observed_args[parsed_param_name] if expected_args != observed_args: failures[unicode(test_arg_names)] = { 'expected_args': expected_args, 'observed_args': observed_args, } error_message = StringIO.StringIO() error_message.write( "One or more key / value argument list(s)" "passed in the query string (expected_args)" "to the API do not match the values in " "the returned query string. This " "means that some arguments were " "dropped / added / changed by the " "the function, which is incorrect " "behavior. The list of incorrect " "arguments is as follows: \n") pp = pprint.PrettyPrinter(depth=3, stream=error_message) pp.pprint(failures) self.assertDictEqual({}, failures, error_message.getvalue()) class TestUpdateMaasClusterConf(MAASTestCase): def setUp(self): super(TestUpdateMaasClusterConf, self).setUp() self.useFixture(ClusterConfigurationFixture()) def make_args(self, **kwargs): args = Mock() args.__dict__.update(kwargs) return args def test_config_set_maas_url_sets_url(self): expected = factory.make_simple_http_url() cluster_config_command.run(self.make_args(region_url=expected)) with ClusterConfiguration.open() as config: observed = config.maas_url self.assertEqual(expected, observed) def test_config_set_maas_url_without_setting_does_nothing(self): with ClusterConfiguration.open() as config: expected = config.maas_url cluster_config_command.run(self.make_args(region_url=None)) with ClusterConfiguration.open() as config: observed = config.maas_url self.assertEqual(expected, observed) def test_config_set_cluster_uuid_sets_cluster_uuid(self): expected = unicode(uuid.uuid4()) cluster_config_command.run(self.make_args(uuid=expected)) with ClusterConfiguration.open() as config: observed = config.cluster_uuid self.assertEqual(expected, observed) def get_parsed_uuid_from_config(self): with ClusterConfiguration.open() as config: observed = config.cluster_uuid try: parsed_observed = unicode(uuid.UUID(observed)) except: parsed_observed = None return (parsed_observed, observed) def test_config_set_cluster_uuid_without_setting_does_nothing(self): expected_previous_value = unicode(uuid.uuid4()) with ClusterConfiguration.open_for_update() as config: config.cluster_uuid = expected_previous_value with ClusterConfiguration.open() as config: observed_previous_value = config.cluster_uuid self.assertEqual(expected_previous_value, observed_previous_value) cluster_config_command.run(self.make_args(uuid=None)) parsed_observed, observed = self.get_parsed_uuid_from_config() self.assertEqual(parsed_observed, observed) self.assertEqual(parsed_observed, expected_previous_value) def test_config_init_creates_initial_cluster_id(self): with ClusterConfiguration.open() as config: observed_default = config.cluster_uuid self.assertEqual(UUID_NOT_SET, observed_default) cluster_config_command.run(self.make_args(init=True)) expected, observed = self.get_parsed_uuid_from_config() self.assertEqual(expected, observed) def test_config_init_when_already_configured_does_nothing(self): expected_previous_value = unicode(uuid.uuid4()) with ClusterConfiguration.open_for_update() as config: config.cluster_uuid = expected_previous_value with ClusterConfiguration.open() as config: observed_previous_value = config.cluster_uuid self.assertEqual(expected_previous_value, observed_previous_value) cluster_config_command.run(self.make_args(init=True)) parsed_observed, observed = self.get_parsed_uuid_from_config() self.assertEqual(parsed_observed, observed) self.assertEqual(parsed_observed, expected_previous_value) def test_config_set_tftp_port_sets_tftp_port(self): expected = factory.pick_port() cluster_config_command.run(self.make_args(tftp_port=expected)) with ClusterConfiguration.open() as config: observed = config.tftp_port self.assertEqual(expected, observed) def test_config_set_tftp_port_without_setting_does_nothing(self): with ClusterConfiguration.open() as config: expected = config.tftp_port cluster_config_command.run(self.make_args(tftp_port=None)) with ClusterConfiguration.open() as config: observed = config.tftp_port self.assertEqual(expected, observed) def test_config_set_tftp_port_sets_tftp_root(self): expected = self.make_dir() cluster_config_command.run(self.make_args(tftp_root=expected)) with ClusterConfiguration.open() as config: observed = config.tftp_root self.assertEqual(expected, observed) def test_config_set_tftp_root_without_setting_does_nothing(self): with ClusterConfiguration.open() as config: expected = config.tftp_root cluster_config_command.run(self.make_args(tftp_root=None)) with ClusterConfiguration.open() as config: observed = config.tftp_root self.assertEqual(expected, observed) maas-1.9.5+bzr4599.orig/src/provisioningserver/tests/test_config.py0000644000000000000000000010727513056115004023453 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for provisioning configuration.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import contextlib from operator import ( delitem, methodcaller, setitem, ) import os.path import random import re import sqlite3 from uuid import uuid4 from fixtures import EnvironmentVariableFixture import formencode import formencode.validators from formencode.validators import Invalid from maastesting.factory import factory from maastesting.fixtures import ImportErrorFixture from maastesting.matchers import ( MockCalledOnceWith, MockNotCalled, ) from maastesting.testcase import MAASTestCase from mock import sentinel from provisioningserver.config import ( ClusterConfiguration, Configuration, ConfigurationDatabase, ConfigurationFile, ConfigurationImmutable, ConfigurationMeta, ConfigurationOption, Directory, ExtendedURL, is_dev_environment, UUID, ) from provisioningserver.path import get_path from provisioningserver.testing.config import ClusterConfigurationFixture from provisioningserver.utils.fs import RunLock from testtools import ExpectedException from testtools.matchers import ( Equals, FileContains, FileExists, Is, MatchesStructure, ) from twisted.python.filepath import FilePath import yaml class TestUUID(MAASTestCase): """Tests for `Directory`.""" def test__validation_succeeds_when_uuid_is_good(self): uuid = unicode(uuid4()) validator = UUID(accept_python=False) self.assertEqual(uuid, validator.from_python(uuid)) self.assertEqual(uuid, validator.to_python(uuid)) def test__validation_fails_when_uuid_is_bad(self): uuid = unicode(uuid4()) + "can't-be-a-uuid" validator = UUID(accept_python=False) expected_exception = ExpectedException( formencode.validators.Invalid, "^%s$" % re.escape( "%r Failed to parse UUID" % uuid)) with expected_exception: validator.from_python(uuid) with expected_exception: validator.to_python(uuid) class TestDirectory(MAASTestCase): """Tests for `Directory`.""" def test__validation_succeeds_when_directory_exists(self): directory = self.make_dir() validator = Directory(accept_python=False) self.assertEqual(directory, validator.from_python(directory)) self.assertEqual(directory, validator.to_python(directory)) def test__validation_fails_when_directory_does_not_exist(self): directory = os.path.join(self.make_dir(), "not-here") validator = Directory(accept_python=False) expected_exception = ExpectedException( formencode.validators.Invalid, "^%s$" % re.escape( "%r does not exist or is not a directory" % directory)) with expected_exception: validator.from_python(directory) with expected_exception: validator.to_python(directory) class TestExtendedURL(MAASTestCase): def setUp(self): super(TestExtendedURL, self).setUp() self.validator = ExtendedURL( require_tld=False, accept_python=False) def test_takes_numbers_anywhere(self): # Could use factory.make_string() here, as it contains # digits, but this is a little bit more explicit and # clear to troubleshoot. hostname = '%dstart' % random.randint(0, 9) url = factory.make_simple_http_url(netloc=hostname) hostname = 'mid%ddle' % random.randint(0, 9) url = factory.make_simple_http_url(netloc=hostname) self.assertEqual(url, self.validator.to_python(url), "url: %s" % url) hostname = 'end%d' % random.randint(0, 9) url = factory.make_simple_http_url(netloc=hostname) self.assertEqual(url, self.validator.to_python(url), "url: %s" % url) def test_takes_hyphen_but_not_start_or_end(self): # Reject leading hyphen hostname = '-start' url = factory.make_simple_http_url(netloc=hostname) with ExpectedException(Invalid, 'That is not a valid URL'): self.assertEqual(url, self.validator.to_python(url), "url: %s" % url) # Allow hyphens in the middle hostname = 'mid-dle' url = factory.make_simple_http_url(netloc=hostname) self.assertEqual(url, self.validator.to_python(url), "url: %s" % url) # Reject trailing hyphen hostname = 'end-' url = factory.make_simple_http_url(netloc=hostname) with ExpectedException(Invalid, 'That is not a valid URL'): self.assertEqual(url, self.validator.to_python(url), "url: %s" % url) def test_allows_hostnames_as_short_as_a_single_char(self): # Single digit hostname = unicode(random.randint(0, 9)) url = factory.make_simple_http_url(netloc=hostname) self.assertEqual(url, self.validator.to_python(url), "url: %s" % url) # Single char hostname = factory.make_string(1) url = factory.make_simple_http_url(netloc=hostname) self.assertEqual(url, self.validator.to_python(url), "url: %s" % url) # Reject single hyphen hostname = '-' url = factory.make_simple_http_url(netloc=hostname) with ExpectedException(Invalid, 'That is not a valid URL'): self.assertEqual(url, self.validator.to_python(url), "url: %s" % url) def test_allows_hostnames_up_to_63_chars_long(self): max_length = 63 # Alow 63 chars hostname = factory.make_string(max_length) url = factory.make_simple_http_url(netloc=hostname) self.assertEqual(url, self.validator.to_python(url), "url: %s" % url) # Reject 64 chars hostname = factory.make_string(max_length + 1) url = factory.make_simple_http_url(netloc=hostname) with ExpectedException(Invalid, 'That is not a valid URL'): self.assertEqual(url, self.validator.to_python(url), "url: %s" % url) def test_allows_domain_names_up_to_63_chars_long(self): max_length = 63 # Alow 63 chars without hypen hostname = '%s.example.com' % factory.make_string(max_length) url = factory.make_simple_http_url(netloc=hostname) self.assertEqual(url, self.validator.to_python(url), "url: %s" % url) # Reject 64 chars without hypen hostname = '%s.example.com' % factory.make_string(max_length + 1) url = factory.make_simple_http_url(netloc=hostname) with ExpectedException(Invalid, 'That is not a valid URL'): self.assertEqual(url, self.validator.to_python(url), "url: %s" % url) # Alow 63 chars with hypen hyphen_loc = random.randint(1, max_length - 1) name = factory.make_string(max_length - 1) hname = name[:hyphen_loc] + '-' + name[hyphen_loc:] hostname = '%s.example.com' % (hname) url = factory.make_simple_http_url(netloc=hostname) self.assertEqual(url, self.validator.to_python(url), "url: %s" % url) # Reject 64 chars with hypen hyphen_loc = random.randint(1, max_length) name = factory.make_string(max_length) hname = name[:hyphen_loc] + '-' + name[hyphen_loc:] hostname = '%s.example.com' % (hname) url = factory.make_simple_http_url(netloc=hostname) with ExpectedException(Invalid, 'That is not a valid URL'): self.assertEqual(url, self.validator.to_python(url), "url: %s" % url) ############################################################################### # New configuration API follows. ############################################################################### class ExampleConfiguration(Configuration): """An example configuration object. It derives from :class:`ConfigurationBase` and has a metaclass derived from :class:`ConfigurationMeta`, just as a "real" configuration object must. """ class __metaclass__(ConfigurationMeta): envvar = "MAAS_TESTING_SETTINGS" default = get_path("example.db") backend = None # Define this in sub-classes. something = ConfigurationOption( "something", "Something alright, don't know what, just something.", formencode.validators.IPAddress(if_missing=sentinel.missing)) class ExampleConfigurationForDatabase(ExampleConfiguration): """An example configuration object using an SQLite3 database.""" backend = ConfigurationDatabase class ExampleConfigurationForFile(ExampleConfiguration): """An example configuration object using a file.""" backend = ConfigurationFile class TestConfigurationMeta(MAASTestCase): """Tests for `ConfigurationMeta`.""" scenarios = ( ("db", dict(example_configuration=ExampleConfigurationForDatabase)), ("file", dict(example_configuration=ExampleConfigurationForFile)), ) def setUp(self): super(TestConfigurationMeta, self).setUp() self.useFixture(EnvironmentVariableFixture( "MAAS_ROOT", self.make_dir())) def set_envvar(self, filepath=None): """Set the env. variable named by `ExampleConfiguration.envvar".""" self.useFixture(EnvironmentVariableFixture( self.example_configuration.envvar, filepath)) def test_gets_filename_from_environment(self): dummy_filename = factory.make_name("config") self.set_envvar(dummy_filename) self.assertEqual( dummy_filename, self.example_configuration.DEFAULT_FILENAME) def test_falls_back_to_default(self): self.set_envvar(None) self.assertEqual( get_path(self.example_configuration.default), self.example_configuration.DEFAULT_FILENAME) def test_set(self): dummy_filename = factory.make_name("config") self.example_configuration.DEFAULT_FILENAME = dummy_filename self.assertEqual( dummy_filename, self.example_configuration.DEFAULT_FILENAME) def test_delete(self): self.set_envvar(None) example_file = factory.make_name("config") self.example_configuration.DEFAULT_FILENAME = example_file del self.example_configuration.DEFAULT_FILENAME self.assertEqual( get_path(self.example_configuration.default), self.example_configuration.DEFAULT_FILENAME) # The delete does not fail when called multiple times. del self.example_configuration.DEFAULT_FILENAME class TestConfiguration(MAASTestCase): """Tests for `Configuration`. The most interesting tests that exercise `Configuration` are actually in `TestConfigurationOption`. """ def test_create(self): config = Configuration({}) self.assertEqual({}, config.store) def test_cannot_set_attributes(self): config = Configuration({}) expected_exception = ExpectedException( AttributeError, "^'Configuration' object has no attribute 'foo'$") with expected_exception: config.foo = "bar" def test_open_uses_backend_as_context_manager(self): config_file = self.make_file() backend = self.patch(ExampleConfiguration, "backend") with ExampleConfiguration.open(config_file) as config: # The backend was opened using open() too. self.assertThat(backend.open, MockCalledOnceWith(config_file)) # The object returned from backend.open() has been used as the # context manager, providing `config`. backend_ctx = backend.open.return_value self.assertThat(config.store, Is( backend_ctx.__enter__.return_value)) # We're within the context, as expected. self.assertThat(backend_ctx.__exit__, MockNotCalled()) # The backend context has also been exited. self.assertThat( backend_ctx.__exit__, MockCalledOnceWith(None, None, None)) def test_open_for_update_uses_backend_as_context_manager(self): config_file = self.make_file() backend = self.patch(ExampleConfiguration, "backend") with ExampleConfiguration.open_for_update(config_file) as config: # The backend was opened using open_for_update() too. self.assertThat( backend.open_for_update, MockCalledOnceWith(config_file)) # The object returned from backend.open_for_update() has been used # as the context manager, providing `config`. backend_ctx = backend.open_for_update.return_value self.assertThat(config.store, Is( backend_ctx.__enter__.return_value)) # We're within the context, as expected. self.assertThat(backend_ctx.__exit__, MockNotCalled()) # The backend context has also been exited. self.assertThat( backend_ctx.__exit__, MockCalledOnceWith(None, None, None)) class TestConfigurationOption(MAASTestCase): """Tests for `ConfigurationOption`.""" scenarios = ( ("db", dict(make_store=methodcaller("make_database_store"))), ("file", dict(make_store=methodcaller("make_file_store"))), ) def make_database_store(self): database = sqlite3.connect(":memory:") self.addCleanup(database.close) return ConfigurationDatabase(database, mutable=True) def make_file_store(self): return ConfigurationFile(self.make_file(), mutable=True) def make_config(self): store = self.make_store(self) return ExampleConfiguration(store) def test_getting_something(self): config = self.make_config() self.assertIs(sentinel.missing, config.something) def test_getting_something_is_not_validated(self): # The value in the database is trusted. config = self.make_config() example_value = factory.make_name('not-an-ip-address') config.store[config.__class__.something.name] = example_value self.assertEqual(example_value, config.something) def test_setting_something(self): config = self.make_config() example_value = factory.make_ipv4_address() config.something = example_value self.assertEqual(example_value, config.something) def test_setting_something_is_validated(self): config = self.make_config() with ExpectedException(formencode.validators.Invalid): config.something = factory.make_name("not-an-ip-address") def test_deleting_something(self): config = self.make_config() config.something = factory.make_ipv4_address() del config.something self.assertIs(sentinel.missing, config.something) class TestConfigurationDatabase(MAASTestCase): """Tests for `ConfigurationDatabase`.""" def test_init(self): database = sqlite3.connect(":memory:") config = ConfigurationDatabase(database) with config.cursor() as cursor: # The "configuration" table has been created. self.assertEqual( cursor.execute( "SELECT COUNT(*) FROM sqlite_master" " WHERE type = 'table'" " AND name = 'configuration'").fetchone(), (1,)) def test_configuration_pristine(self): # A pristine configuration has no entries. database = sqlite3.connect(":memory:") config = ConfigurationDatabase(database) self.assertSetEqual(set(), set(config)) def test_adding_configuration_option(self): database = sqlite3.connect(":memory:") config = ConfigurationDatabase(database, mutable=True) config["alice"] = {"abc": 123} self.assertEqual({"alice"}, set(config)) self.assertEqual({"abc": 123}, config["alice"]) def test_replacing_configuration_option(self): database = sqlite3.connect(":memory:") config = ConfigurationDatabase(database, mutable=True) config["alice"] = {"abc": 123} config["alice"] = {"def": 456} self.assertEqual({"alice"}, set(config)) self.assertEqual({"def": 456}, config["alice"]) def test_getting_configuration_option(self): database = sqlite3.connect(":memory:") config = ConfigurationDatabase(database, mutable=True) config["alice"] = {"abc": 123} self.assertEqual({"abc": 123}, config["alice"]) def test_getting_non_existent_configuration_option(self): database = sqlite3.connect(":memory:") config = ConfigurationDatabase(database) self.assertRaises(KeyError, lambda: config["alice"]) def test_removing_configuration_option(self): database = sqlite3.connect(":memory:") config = ConfigurationDatabase(database, mutable=True) config["alice"] = {"abc": 123} del config["alice"] self.assertEqual(set(), set(config)) def test_open_and_close(self): # ConfigurationDatabase.open() returns a context manager that closes # the database on exit. config_file = os.path.join(self.make_dir(), "config") config = ConfigurationDatabase.open_for_update(config_file) self.assertIsInstance(config, contextlib.GeneratorContextManager) with config as config: self.assertIsInstance(config, ConfigurationDatabase) with config.cursor() as cursor: self.assertEqual( (1,), cursor.execute("SELECT 1").fetchone()) self.assertRaises(sqlite3.ProgrammingError, config.cursor) def test_open_permissions_new_database(self): # ConfigurationDatabase.open() applies restrictive file permissions to # newly created configuration databases. config_file = os.path.join(self.make_dir(), "config") with ConfigurationDatabase.open(config_file): perms = FilePath(config_file).getPermissions() self.assertEqual("rw-r-----", perms.shorthand()) def test_open_permissions_existing_database(self): # ConfigurationDatabase.open() leaves the file permissions of existing # configuration databases. config_file = os.path.join(self.make_dir(), "config") open(config_file, "wb").close() # touch. os.chmod(config_file, 0o644) # u=rw,go=r with ConfigurationDatabase.open(config_file): perms = FilePath(config_file).getPermissions() self.assertEqual("rw-r--r--", perms.shorthand()) def test_opened_database_commits_on_exit(self): config_file = os.path.join(self.make_dir(), "config") config_key = factory.make_name("key") config_value = factory.make_name("value") with ConfigurationDatabase.open_for_update(config_file) as config: config[config_key] = config_value with ConfigurationDatabase.open(config_file) as config: self.assertEqual(config_value, config[config_key]) def test_opened_database_rolls_back_on_unclean_exit(self): config_file = os.path.join(self.make_dir(), "config") config_key = factory.make_name("key") config_value = factory.make_name("value") exception_type = factory.make_exception_type() # Set a configuration option, then crash. with ExpectedException(exception_type): with ConfigurationDatabase.open_for_update(config_file) as config: config[config_key] = config_value raise exception_type() # No value has been saved for `config_key`. with ConfigurationDatabase.open(config_file) as config: self.assertRaises(KeyError, lambda: config[config_key]) def test_as_string(self): database = sqlite3.connect(":memory:") config = ConfigurationDatabase(database) self.assertThat(unicode(config), Equals( "ConfigurationDatabase(main=:memory:)")) class TestConfigurationDatabaseMutability(MAASTestCase): """Tests for `ConfigurationDatabase` mutability.""" def test_immutable(self): database = sqlite3.connect(":memory:") config = ConfigurationDatabase(database, mutable=False) self.assertRaises(ConfigurationImmutable, setitem, config, "alice", 1) self.assertRaises(ConfigurationImmutable, delitem, config, "alice") def test_mutable(self): database = sqlite3.connect(":memory:") config = ConfigurationDatabase(database, mutable=True) config["alice"] = 1234 del config["alice"] def test_open_yields_immutable_backend(self): config_file = os.path.join(self.make_dir(), "config") config_key = factory.make_name("key") with ConfigurationDatabase.open(config_file) as config: with ExpectedException(ConfigurationImmutable): config[config_key] = factory.make_name("value") with ExpectedException(ConfigurationImmutable): del config[config_key] def test_open_for_update_yields_mutable_backend(self): config_file = os.path.join(self.make_dir(), "config") config_key = factory.make_name("key") with ConfigurationDatabase.open_for_update(config_file) as config: config[config_key] = factory.make_name("value") del config[config_key] class TestConfigurationFile(MAASTestCase): """Tests for `ConfigurationFile`.""" def test_configuration_pristine(self): # A pristine configuration has no entries. config = ConfigurationFile(sentinel.filename) self.assertThat( config, MatchesStructure.byEquality( config={}, dirty=False, path=sentinel.filename)) def test_adding_configuration_option(self): config = ConfigurationFile(sentinel.filename, mutable=True) config["alice"] = {"abc": 123} self.assertEqual({"alice"}, set(config)) self.assertEqual({"abc": 123}, config["alice"]) self.assertTrue(config.dirty) def test_replacing_configuration_option(self): config = ConfigurationFile(sentinel.filename, mutable=True) config["alice"] = {"abc": 123} config["alice"] = {"def": 456} self.assertEqual({"alice"}, set(config)) self.assertEqual({"def": 456}, config["alice"]) self.assertTrue(config.dirty) def test_getting_configuration_option(self): config = ConfigurationFile(sentinel.filename, mutable=True) config["alice"] = {"abc": 123} self.assertEqual({"abc": 123}, config["alice"]) def test_getting_non_existent_configuration_option(self): config = ConfigurationFile(sentinel.filename) self.assertRaises(KeyError, lambda: config["alice"]) def test_removing_configuration_option(self): config = ConfigurationFile(sentinel.filename, mutable=True) config["alice"] = {"abc": 123} del config["alice"] self.assertEqual(set(), set(config)) self.assertTrue(config.dirty) def test_load_non_existent_file_crashes(self): config_file = os.path.join(self.make_dir(), "config") config = ConfigurationFile(config_file) self.assertRaises(IOError, config.load) def test_load_empty_file_results_in_empty_config(self): config_file = os.path.join(self.make_dir(), "config") with open(config_file, "wb"): pass # Write nothing to the file. config = ConfigurationFile(config_file) config.load() self.assertItemsEqual(set(config), set()) def test_load_file_with_non_mapping_crashes(self): config_file = os.path.join(self.make_dir(), "config") with open(config_file, "wb") as fd: yaml.safe_dump([1, 2, 3], stream=fd) config = ConfigurationFile(config_file) error = self.assertRaises(ValueError, config.load) self.assertDocTestMatches( "Configuration in /.../config is not a mapping: [1, 2, 3]", unicode(error)) def test_open_and_close(self): # ConfigurationFile.open() returns a context manager. config_file = os.path.join(self.make_dir(), "config") config_ctx = ConfigurationFile.open(config_file) self.assertIsInstance(config_ctx, contextlib.GeneratorContextManager) with config_ctx as config: self.assertIsInstance(config, ConfigurationFile) self.assertThat(config_file, FileExists()) self.assertEqual({}, config.config) self.assertFalse(config.dirty) self.assertThat(config_file, FileContains("")) def test_open_permissions_new_database(self): # ConfigurationFile.open() applies restrictive file permissions to # newly created configuration databases. config_file = os.path.join(self.make_dir(), "config") with ConfigurationFile.open(config_file): perms = FilePath(config_file).getPermissions() self.assertEqual("rw-r-----", perms.shorthand()) def test_unmodified_database_retains_permissions(self): # ConfigurationFile.open() leaves the file permissions of existing # configuration databases if they're not modified. config_file = os.path.join(self.make_dir(), "config") open(config_file, "wb").close() # touch. os.chmod(config_file, 0o644) # u=rw,go=r with ConfigurationFile.open_for_update(config_file): perms = FilePath(config_file).getPermissions() self.assertEqual("rw-r--r--", perms.shorthand()) perms = FilePath(config_file).getPermissions() self.assertEqual("rw-r--r--", perms.shorthand()) def test_modified_database_retains_permissions(self): # ConfigurationFile.open() leaves the file permissions of existing # configuration databases if they're modified. config_file = os.path.join(self.make_dir(), "config") open(config_file, "wb").close() # touch. os.chmod(config_file, 0o644) # u=rw,go=r with ConfigurationFile.open_for_update(config_file) as config: perms = FilePath(config_file).getPermissions() self.assertEqual("rw-r--r--", perms.shorthand()) config["foobar"] = "I am a modification" perms = FilePath(config_file).getPermissions() self.assertEqual("rw-r--r--", perms.shorthand()) def test_modified_database_uses_safe_permissions_if_file_missing(self): # ConfigurationFile.open() uses a sensible u=rw,g=r file mode when # saving if the database file has been inexplicably removed. This is # the same mode as used when opening a new database. config_file = os.path.join(self.make_dir(), "config") open(config_file, "wb").close() # touch. os.chmod(config_file, 0o644) # u=rw,go=r with ConfigurationFile.open_for_update(config_file) as config: config["foobar"] = "I am a modification" os.unlink(config_file) perms = FilePath(config_file).getPermissions() self.assertEqual("rw-r-----", perms.shorthand()) def test_opened_configuration_file_saves_on_exit(self): # ConfigurationFile.open() returns a context manager that will save an # updated configuration on a clean exit. config_file = os.path.join(self.make_dir(), "config") config_key = factory.make_name("key") config_value = factory.make_name("value") with ConfigurationFile.open_for_update(config_file) as config: config[config_key] = config_value self.assertEqual({config_key: config_value}, config.config) self.assertTrue(config.dirty) with ConfigurationFile.open(config_file) as config: self.assertEqual(config_value, config[config_key]) def test_opened_configuration_file_does_not_save_on_unclean_exit(self): config_file = os.path.join(self.make_dir(), "config") config_key = factory.make_name("key") config_value = factory.make_name("value") exception_type = factory.make_exception_type() # Set a configuration option, then crash. with ExpectedException(exception_type): with ConfigurationFile.open_for_update(config_file) as config: config[config_key] = config_value raise exception_type() # No value has been saved for `config_key`. with ConfigurationFile.open(config_file) as config: self.assertRaises(KeyError, lambda: config[config_key]) def test_open_takes_exclusive_lock(self): config_file = os.path.join(self.make_dir(), "config") config_lock = RunLock(config_file) self.assertFalse(config_lock.is_locked()) with ConfigurationFile.open_for_update(config_file): self.assertTrue(config_lock.is_locked()) self.assertFalse(config_lock.is_locked()) def test_as_string(self): config_file = os.path.join(self.make_dir(), "config") config = ConfigurationFile(config_file) self.assertThat(unicode(config), Equals( "ConfigurationFile(%r)" % config_file)) class TestConfigurationFileMutability(MAASTestCase): """Tests for `ConfigurationFile` mutability.""" def test_immutable(self): config_file = os.path.join(self.make_dir(), "config") config = ConfigurationFile(config_file, mutable=False) self.assertRaises(ConfigurationImmutable, setitem, config, "alice", 1) self.assertRaises(ConfigurationImmutable, delitem, config, "alice") def test_mutable(self): config_file = os.path.join(self.make_dir(), "config") config = ConfigurationFile(config_file, mutable=True) config["alice"] = 1234 del config["alice"] def test_open_yields_immutable_backend(self): config_file = os.path.join(self.make_dir(), "config") config_key = factory.make_name("key") with ConfigurationFile.open(config_file) as config: with ExpectedException(ConfigurationImmutable): config[config_key] = factory.make_name("value") with ExpectedException(ConfigurationImmutable): del config[config_key] def test_open_for_update_yields_mutable_backend(self): config_file = os.path.join(self.make_dir(), "config") config_key = factory.make_name("key") with ConfigurationFile.open_for_update(config_file) as config: config[config_key] = factory.make_name("value") del config[config_key] class TestClusterConfiguration(MAASTestCase): """Tests for `ClusterConfiguration`.""" def test_default_maas_url(self): config = ClusterConfiguration({}) self.assertEqual("http://localhost:5240/MAAS", config.maas_url) def test_set_and_get_maas_url(self): config = ClusterConfiguration({}) example_url = factory.make_simple_http_url() config.maas_url = example_url self.assertEqual(example_url, config.maas_url) # It's also stored in the configuration database. self.assertEqual({"maas_url": example_url}, config.store) def test_set_maas_url_accepts_hostnames(self): config = ClusterConfiguration({}) example_url = factory.make_simple_http_url() config.maas_url = example_url self.assertEqual(example_url, config.maas_url) self.assertEqual({"maas_url": example_url}, config.store) def test_set_maas_url_accepts_very_short_hostnames(self): config = ClusterConfiguration({}) example_url = factory.make_simple_http_url( netloc=factory.make_string(size=1)) config.maas_url = example_url self.assertEqual(example_url, config.maas_url) self.assertEqual({"maas_url": example_url}, config.store) def test_set_maas_url_accepts_ipv6_addresses(self): config = ClusterConfiguration({}) example_url = factory.make_simple_http_url( netloc=factory.make_ipv6_address()) config.maas_url = example_url self.assertEqual(example_url, config.maas_url) self.assertEqual({"maas_url": example_url}, config.store) def test_set_maas_url_accepts_ipv6_addresses_with_brackets(self): config = ClusterConfiguration({}) example_url = factory.make_simple_http_url( netloc="[%s]" % factory.make_ipv6_address()) config.maas_url = example_url self.assertEqual(example_url, config.maas_url) self.assertEqual({"maas_url": example_url}, config.store) def test_default_tftp_port(self): config = ClusterConfiguration({}) self.assertEqual(69, config.tftp_port) def test_set_and_get_tftp_port(self): config = ClusterConfiguration({}) example_port = factory.pick_port() config.tftp_port = example_port self.assertEqual(example_port, config.tftp_port) # It's also stored in the configuration database. self.assertEqual({"tftp_port": example_port}, config.store) def test_default_tftp_root(self): maas_root = os.getenv("MAAS_ROOT") self.assertIsNotNone(maas_root) config = ClusterConfiguration({}) self.assertEqual( os.path.join(maas_root, "var/lib/maas/boot-resources/current"), config.tftp_root) def test_set_and_get_tftp_root(self): config = ClusterConfiguration({}) example_dir = self.make_dir() config.tftp_root = example_dir self.assertEqual(example_dir, config.tftp_root) # It's also stored in the configuration database. self.assertEqual({"tftp_root": example_dir}, config.store) def test_default_cluster_uuid(self): config = ClusterConfiguration({}) self.assertEqual("** UUID NOT SET **", config.cluster_uuid) def test_set_and_get_cluster_uuid(self): example_uuid = uuid4() config = ClusterConfiguration({}) config.cluster_uuid = example_uuid self.assertEqual(unicode(example_uuid), config.cluster_uuid) # It's also stored in the configuration database. self.assertEqual({"cluster_uuid": unicode(example_uuid)}, config.store) class TestClusterConfigurationTFTPGeneratorURL(MAASTestCase): """Tests for `ClusterConfiguration.tftp_generator_url`.""" def test__is_relative_to_maas_url(self): random_url = factory.make_simple_http_url() self.useFixture(ClusterConfigurationFixture(maas_url=random_url)) with ClusterConfiguration.open() as configuration: self.assertEqual( random_url + "/api/1.0/pxeconfig/", configuration.tftp_generator_url) def test__strips_trailing_slashes_from_maas_url(self): random_url = factory.make_simple_http_url(path="foobar/") self.useFixture(ClusterConfigurationFixture(maas_url=random_url)) with ClusterConfiguration.open() as configuration: self.assertEqual( random_url.rstrip("/") + "/api/1.0/pxeconfig/", configuration.tftp_generator_url) class TestClusterConfigurationGRUBRoot(MAASTestCase): """Tests for `ClusterConfiguration.grub_root`.""" def test__is_relative_to_tftp_root_without_trailing_slash(self): random_dir = self.make_dir().rstrip("/") self.useFixture(ClusterConfigurationFixture(tftp_root=random_dir)) with ClusterConfiguration.open() as configuration: self.assertEqual(random_dir + "/grub", configuration.grub_root) def test__is_relative_to_tftp_root_with_trailing_slash(self): random_dir = self.make_dir().rstrip("/") + "/" self.useFixture(ClusterConfigurationFixture(tftp_root=random_dir)) with ClusterConfiguration.open() as configuration: self.assertEqual(random_dir + "grub", configuration.grub_root) class TestConfig(MAASTestCase): """Tests for `maasserver.config`.""" def test_is_dev_environment_returns_false(self): self.useFixture(ImportErrorFixture('maastesting', 'root')) self.assertFalse(is_dev_environment()) def test_is_dev_environment_returns_true(self): self.assertTrue(is_dev_environment()) maas-1.9.5+bzr4599.orig/src/provisioningserver/tests/test_diskless.py0000644000000000000000000004721313056115004024022 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for creating disks for diskless booting.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import os from textwrap import dedent from maastesting.factory import factory from maastesting.matchers import ( MockCalledOnceWith, MockNotCalled, ) from maastesting.testcase import MAASTestCase from mock import sentinel from provisioningserver import diskless from provisioningserver.config import ClusterConfiguration from provisioningserver.diskless import ( compose_diskless_link_path, compose_diskless_tgt_config, compose_source_path, create_diskless_disk, create_diskless_link, delete_diskless_disk, delete_diskless_link, DisklessError, get_diskless_driver, get_diskless_store, get_diskless_target, get_diskless_tgt_path, read_diskless_link, reload_diskless_tgt, tgt_entry, update_diskless_tgt, ) from provisioningserver.drivers.diskless import DisklessDriverRegistry from provisioningserver.drivers.diskless.tests.test_base import ( make_diskless_driver, ) from provisioningserver.drivers.osystem import ( BOOT_IMAGE_PURPOSE, OperatingSystemRegistry, ) from provisioningserver.testing.config import ClusterConfigurationFixture from provisioningserver.testing.os import FakeOS from provisioningserver.utils.testing import RegistryFixture from testtools.matchers import ( FileExists, Not, ) class DisklessTestMixin: """Helper mixin for diskless tests. Uses the RegistryFixture so the provisioningserver registry is empty. """ def setUp(self): super(DisklessTestMixin, self).setUp() self.useFixture(ClusterConfigurationFixture()) # Ensure the global registry is empty for each test run. self.useFixture(RegistryFixture()) def configure_resource_storage(self): resource_dir = self.make_dir() os.mkdir(os.path.join(resource_dir, 'diskless')) current_dir = os.path.join(resource_dir, 'current') + '/' os.mkdir(current_dir) with ClusterConfiguration.open_for_update() as config: config.tftp_root = current_dir return resource_dir def configure_diskless_storage(self): storage_dir = self.make_dir() self.patch(diskless, 'get_diskless_store').return_value = storage_dir return storage_dir def configure_compose_source_path(self, path=None): if path is None: path = self.make_file() self.patch(diskless, 'compose_source_path').return_value = path return path def make_usable_osystem_with_release(self, purposes=None): os_name = factory.make_name('os') release_name = factory.make_name('release') if purposes is None: purposes = [BOOT_IMAGE_PURPOSE.DISKLESS] osystem = FakeOS( os_name, purposes, releases=[release_name]) OperatingSystemRegistry.register_item(os_name, osystem) return os_name, release_name def make_usable_diskless_driver(self, name=None, description=None, settings=None): driver = make_diskless_driver( name=name, description=description, settings=settings) DisklessDriverRegistry.register_item(driver.name, driver) return driver def patch_reload_diskless_tgt(self): """Stops `reload_diskless_tgt` from running.""" self.patch(diskless, 'reload_diskless_tgt') class TestHelpers(DisklessTestMixin, MAASTestCase): def test_get_diskless_store(self): storage_dir = self.make_dir() current_dir = os.path.join(storage_dir, 'current') + '/' os.mkdir(current_dir) with ClusterConfiguration.open_for_update() as config: config.tftp_root = current_dir self.assertEqual( os.path.join(storage_dir, 'diskless', 'store'), get_diskless_store()) def test_compose_diskless_link_path(self): system_id = factory.make_name('system_id') storage_dir = self.configure_diskless_storage() self.assertEqual( os.path.join(storage_dir, system_id), compose_diskless_link_path(system_id)) def test_create_diskless_link_creates_link(self): system_id = factory.make_name('system_id') storage_dir = self.configure_diskless_storage() link_path = factory.make_name('link_path') create_diskless_link(system_id, link_path) self.assertEqual( link_path, os.readlink(os.path.join(storage_dir, system_id))) def test_create_diskless_link_error_on_already_exists(self): system_id = factory.make_name('system_id') storage_dir = self.configure_diskless_storage() factory.make_file(storage_dir, system_id) self.assertRaises( DisklessError, create_diskless_link, system_id, 'link_path') def test_create_diskless_link_uses_lexists(self): system_id = factory.make_name('system_id') storage_dir = self.configure_diskless_storage() mock_lexists = self.patch(os.path, 'lexists') mock_lexists.return_value = False create_diskless_link(system_id, factory.make_name('link_path')) self.assertThat( mock_lexists, MockCalledOnceWith(os.path.join(storage_dir, system_id))) def test_delete_diskless_link_deletes_link(self): system_id = factory.make_name('system_id') storage_dir = self.configure_diskless_storage() factory.make_file(storage_dir, system_id) delete_diskless_link(system_id) self.assertThat( os.path.join(storage_dir, system_id), Not(FileExists())) def test_delete_diskless_link_uses_lexists(self): system_id = factory.make_name('system_id') storage_dir = self.configure_diskless_storage() mock_lexists = self.patch(os.path, 'lexists') mock_lexists.return_value = False delete_diskless_link(system_id) self.assertThat( mock_lexists, MockCalledOnceWith(os.path.join(storage_dir, system_id))) def test_read_diskless_link_returns_link_path(self): system_id = factory.make_name('system_id') self.configure_diskless_storage() link_path = factory.make_name('link_path') create_diskless_link(system_id, link_path) self.assertEqual(link_path, read_diskless_link(system_id)) def test_read_diskless_link_uses_lexists(self): system_id = factory.make_name('system_id') storage_dir = self.configure_diskless_storage() mock_lexists = self.patch(os.path, 'lexists') mock_lexists.return_value = False read_diskless_link(system_id) self.assertThat( mock_lexists, MockCalledOnceWith(os.path.join(storage_dir, system_id))) def test_get_diskless_driver_returns_driver(self): driver = self.make_usable_diskless_driver() self.assertEqual(driver, get_diskless_driver(driver.name)) def test_get_diskless_driver_errors_on_missing_driver(self): invalid_name = factory.make_name('invalid_driver') self.assertRaises(DisklessError, get_diskless_driver, invalid_name) class TestTgtHelpers(DisklessTestMixin, MAASTestCase): def test_get_diskless_target(self): system_id = factory.make_name('system_id') self.assertEqual( 'iqn.2004-05.com.ubuntu:maas:root-diskless-%s' % system_id, get_diskless_target(system_id)) def test_get_diskless_tgt_path(self): storage_dir = self.configure_resource_storage() self.assertEqual( os.path.join(storage_dir, 'diskless', 'maas-diskless.tgt'), get_diskless_tgt_path()) def test_tgt_entry(self): system_id = factory.make_name('system_id') image_path = factory.make_name('image_path') expected_entry = dedent("""\ readonly 0 backing-store "{image}" driver iscsi """).format(system_id=system_id, image=image_path) self.assertEqual( expected_entry, tgt_entry(system_id, image_path)) def test_compose_diskless_tgt_config(self): storage_dir = self.configure_diskless_storage() system_ids = [factory.make_name('system_id') for _ in range(3)] tgt_entries = [] for system_id in system_ids: factory.make_file(storage_dir, system_id) tgt_entries.append( tgt_entry(system_id, os.path.join(storage_dir, system_id))) tgt_output = compose_diskless_tgt_config() self.assertItemsEqual( tgt_entries, [ '%s\n' % entry for entry in tgt_output.split('\n') if entry != "" ]) def test_reload_diskless_tgt(self): tgt_path = factory.make_name('tgt_path') self.patch(diskless, 'get_diskless_tgt_path').return_value = tgt_path mock_call = self.patch(diskless, 'call_and_check') reload_diskless_tgt() self.assertThat( mock_call, MockCalledOnceWith([ 'sudo', '/usr/sbin/tgt-admin', '--conf', tgt_path, '--update', 'ALL', ])) def test_update_diskless_tgt_calls_atomic_write(self): tgt_path = factory.make_name('tgt_path') self.patch( diskless, 'get_diskless_tgt_path').return_value = tgt_path tgt_config = factory.make_name('tgt_config') self.patch( diskless, 'compose_diskless_tgt_config').return_value = tgt_config mock_write = self.patch(diskless, 'atomic_write') self.patch_reload_diskless_tgt() update_diskless_tgt() self.assertThat( mock_write, MockCalledOnceWith(tgt_config, tgt_path, mode=0o644)) class TestComposeSourcePath(DisklessTestMixin, MAASTestCase): def test__raises_error_on_missing_os_from_registry(self): self.assertRaises( DisklessError, compose_source_path, factory.make_name('osystem'), sentinel.arch, sentinel.subarch, sentinel.release, sentinel.label) def test__raises_error_when_os_doesnt_support_diskless(self): osystem, release = self.make_usable_osystem_with_release( purposes=[BOOT_IMAGE_PURPOSE.XINSTALL]) self.assertRaises( DisklessError, compose_source_path, osystem, sentinel.arch, sentinel.subarch, release, sentinel.label) def test__returns_valid_path(self): os_name, release = self.make_usable_osystem_with_release() arch = factory.make_name('arch') subarch = factory.make_name('subarch') label = factory.make_name('label') root_path = factory.make_name('root_path') osystem = OperatingSystemRegistry[os_name] mock_xi_params = self.patch(osystem, 'get_xinstall_parameters') mock_xi_params.return_value = (root_path, 'tgz') with ClusterConfiguration.open_for_update() as config: tftp_root = config.tftp_root self.assertEqual( os.path.join( tftp_root, os_name, arch, subarch, release, label, root_path), compose_source_path(os_name, arch, subarch, release, label)) class TestCreateDisklessDisk(DisklessTestMixin, MAASTestCase): def test__raises_error_on_doesnt_exist_source_path(self): self.configure_compose_source_path(factory.make_name('invalid_path')) self.assertRaises( DisklessError, create_diskless_disk, sentinel.driver, sentinel.driver_options, sentinel.system_id, sentinel.osystem, sentinel.arch, sentinel.subarch, sentinel.release, sentinel.label) def test__raises_error_on_link_already_exists(self): self.configure_diskless_storage() self.configure_compose_source_path() system_id = factory.make_name('system_id') create_diskless_link(system_id, factory.make_name('link_path')) self.assertRaises( DisklessError, create_diskless_disk, sentinel.driver, sentinel.driver_options, system_id, sentinel.osystem, sentinel.arch, sentinel.subarch, sentinel.release, sentinel.label) def test__calls_create_disk_on_driver(self): self.patch_reload_diskless_tgt() self.configure_resource_storage() self.configure_diskless_storage() source_path = self.configure_compose_source_path() driver = self.make_usable_diskless_driver() mock_create = self.patch(driver, 'create_disk') mock_create.return_value = self.make_file() system_id = factory.make_name('system_id') driver_options = { factory.make_name('arg'): factory.make_name('value') for _ in range(3) } create_diskless_disk( driver.name, driver_options, system_id, sentinel.osystem, sentinel.arch, sentinel.subarch, sentinel.release, sentinel.label) self.assertThat( mock_create, MockCalledOnceWith(system_id, source_path, **driver_options)) def test__errors_when_driver_create_disk_returns_None(self): self.patch_reload_diskless_tgt() self.configure_resource_storage() self.configure_diskless_storage() self.configure_compose_source_path() driver = self.make_usable_diskless_driver() mock_create = self.patch(driver, 'create_disk') mock_create.return_value = None system_id = factory.make_name('system_id') self.assertRaises( DisklessError, create_diskless_disk, driver.name, {}, system_id, sentinel.osystem, sentinel.arch, sentinel.subarch, sentinel.release, sentinel.label) def test__errors_when_driver_create_disk_returns_invalid_path(self): self.patch_reload_diskless_tgt() self.configure_resource_storage() self.configure_diskless_storage() self.configure_compose_source_path() driver = self.make_usable_diskless_driver() mock_create = self.patch(driver, 'create_disk') mock_create.return_value = factory.make_name('invalid_path') system_id = factory.make_name('system_id') self.assertRaises( DisklessError, create_diskless_disk, driver.name, {}, system_id, sentinel.osystem, sentinel.arch, sentinel.subarch, sentinel.release, sentinel.label) def test__creates_diskless_link(self): self.patch_reload_diskless_tgt() self.configure_resource_storage() self.configure_diskless_storage() self.configure_compose_source_path() driver = self.make_usable_diskless_driver() create_file = self.make_file() mock_create = self.patch(driver, 'create_disk') mock_create.return_value = create_file system_id = factory.make_name('system_id') create_diskless_disk( driver.name, {}, system_id, sentinel.osystem, sentinel.arch, sentinel.subarch, sentinel.release, sentinel.label) self.assertEqual(create_file, read_diskless_link(system_id)) def test__calls_update_diskless_tgt(self): self.configure_resource_storage() self.configure_diskless_storage() self.configure_compose_source_path() driver = self.make_usable_diskless_driver() mock_create = self.patch(driver, 'create_disk') mock_create.return_value = self.make_file() system_id = factory.make_name('system_id') mock_update_tgt = self.patch(diskless, 'update_diskless_tgt') create_diskless_disk( driver.name, {}, system_id, sentinel.osystem, sentinel.arch, sentinel.subarch, sentinel.release, sentinel.label) self.assertThat(mock_update_tgt, MockCalledOnceWith()) class TestDeleteDisklessDisk(DisklessTestMixin, MAASTestCase): def test__exits_early_on_missing_link(self): self.configure_diskless_storage() system_id = factory.make_name('system_id') # if read_diskless_link is called then, did not exit early mock_read_link = self.patch(diskless, 'read_diskless_link') delete_diskless_disk( sentinel.driver, sentinel.driver_options, system_id) self.assertThat(mock_read_link, MockNotCalled()) def test__checks_for_link_using_lexists(self): self.configure_diskless_storage() system_id = factory.make_name('system_id') mock_lexists = self.patch(os.path, 'lexists') mock_lexists.return_value = False delete_diskless_disk( sentinel.driver, sentinel.driver_options, system_id) self.assertThat( mock_lexists, MockCalledOnceWith(compose_diskless_link_path(system_id))) def test__raises_error_if_read_diskless_link_returns_None(self): self.configure_diskless_storage() system_id = factory.make_name('system_id') create_diskless_link(system_id, factory.make_name('link')) self.patch(diskless, 'read_diskless_link').return_value = None self.assertRaises( DisklessError, delete_diskless_disk, sentinel.driver, sentinel.driver_options, system_id) def test__calls_delete_disk_on_driver_when_link_points_to_valid_path(self): self.patch_reload_diskless_tgt() self.configure_resource_storage() self.configure_diskless_storage() system_id = factory.make_name('system_id') link_path = self.make_file() create_diskless_link(system_id, link_path) driver = self.make_usable_diskless_driver() mock_delete = self.patch(driver, 'delete_disk') driver_options = { factory.make_name('arg'): factory.make_name('value') for _ in range(3) } delete_diskless_disk(driver.name, driver_options, system_id) self.assertThat( mock_delete, MockCalledOnceWith(system_id, link_path, **driver_options)) def test__doenst_call_delete_disk_on_driver_when_link_is_invalid(self): self.patch_reload_diskless_tgt() self.configure_resource_storage() self.configure_diskless_storage() system_id = factory.make_name('system_id') create_diskless_link(system_id, factory.make_name('link')) driver = self.make_usable_diskless_driver() mock_delete = self.patch(driver, 'delete_disk') delete_diskless_disk(driver.name, {}, system_id) self.assertThat(mock_delete, MockNotCalled()) def test__deletes_diskless_link(self): self.patch_reload_diskless_tgt() self.configure_resource_storage() storage_dir = self.configure_diskless_storage() system_id = factory.make_name('system_id') create_diskless_link(system_id, self.make_file()) driver = self.make_usable_diskless_driver() self.patch(driver, 'delete_disk') delete_diskless_disk(driver.name, {}, system_id) self.assertThat( os.path.join(storage_dir, system_id), Not(FileExists())) def test__calls_update_diskless_tgt(self): self.configure_resource_storage() self.configure_diskless_storage() system_id = factory.make_name('system_id') create_diskless_link(system_id, self.make_file()) driver = self.make_usable_diskless_driver() self.patch(driver, 'delete_disk') mock_update_tgt = self.patch(diskless, 'update_diskless_tgt') delete_diskless_disk(driver.name, {}, system_id) self.assertThat(mock_update_tgt, MockCalledOnceWith()) maas-1.9.5+bzr4599.orig/src/provisioningserver/tests/test_events.py0000644000000000000000000002374613056115004023512 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test event catalog.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ ] import random from maastesting.factory import factory from maastesting.matchers import ( MockCalledOnce, MockCalledOnceWith, MockNotCalled, ) from maastesting.testcase import ( MAASTestCase, MAASTwistedRunTest, ) from mock import ( ANY, sentinel, ) from provisioningserver.events import ( EVENT_DETAILS, EVENT_TYPES, EventDetail, NodeEventHub, nodeEventHub, send_event_node, send_event_node_mac_address, ) from provisioningserver.rpc import region from provisioningserver.rpc.exceptions import ( NoSuchEventType, NoSuchNode, ) from provisioningserver.rpc.testing import MockLiveClusterToRegionRPCFixture from provisioningserver.utils.enum import map_enum from testtools import ExpectedException from testtools.matchers import ( AllMatch, Equals, HasLength, Is, IsInstance, ) from twisted.internet.defer import ( fail, inlineCallbacks, succeed, ) class TestEvents(MAASTestCase): def test_every_event_has_details(self): all_events = map_enum(EVENT_TYPES) self.assertItemsEqual(all_events.values(), EVENT_DETAILS) self.assertThat( EVENT_DETAILS.values(), AllMatch(IsInstance(EventDetail))) class TestSendEventNode(MAASTestCase): """Tests for `send_event_node`.""" def test__calls_singleton_hub_logByID_directly(self): self.patch_autospec(nodeEventHub, "logByID").return_value = sentinel.d result = send_event_node( sentinel.event_type, sentinel.system_id, sentinel.hostname, sentinel.description) self.assertThat(result, Is(sentinel.d)) self.assertThat(nodeEventHub.logByID, MockCalledOnceWith( sentinel.event_type, sentinel.system_id, sentinel.description)) class TestSendEventNodeMACAddress(MAASTestCase): """Tests for `send_event_node_mac_address`.""" def test__calls_singleton_hub_logByMAC_directly(self): self.patch_autospec(nodeEventHub, "logByMAC").return_value = sentinel.d result = send_event_node_mac_address( sentinel.event_type, sentinel.mac_address, sentinel.description) self.assertThat(result, Is(sentinel.d)) self.assertThat(nodeEventHub.logByMAC, MockCalledOnceWith( sentinel.event_type, sentinel.mac_address, sentinel.description)) class TestNodeEventHubLogByID(MAASTestCase): """Tests for `NodeEventHub.logByID`.""" run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def patch_rpc_methods(self, side_effect=None): fixture = self.useFixture(MockLiveClusterToRegionRPCFixture()) protocol, connecting = fixture.makeEventLoop( region.SendEvent, region.RegisterEventType) protocol.SendEvent.side_effect = side_effect return protocol, connecting @inlineCallbacks def test__event_is_sent_to_region(self): protocol, connecting = self.patch_rpc_methods() self.addCleanup((yield connecting)) system_id = factory.make_name('system_id') description = factory.make_name('description') event_name = random.choice(map_enum(EVENT_TYPES).keys()) yield NodeEventHub().logByID(event_name, system_id, description) self.assertThat( protocol.SendEvent, MockCalledOnceWith( ANY, type_name=event_name, system_id=system_id, description=description)) @inlineCallbacks def test__event_type_is_registered_on_first_call_only(self): protocol, connecting = self.patch_rpc_methods( side_effect=[succeed({}), succeed({})]) self.addCleanup((yield connecting)) system_id = factory.make_name('system_id') description = factory.make_name('description') event_name = random.choice(map_enum(EVENT_TYPES).keys()) event_detail = EVENT_DETAILS[event_name] event_hub = NodeEventHub() # On the first call, the event type is registered before the log is # sent to the region. yield event_hub.logByID(event_name, system_id, description) self.assertThat( protocol.RegisterEventType, MockCalledOnceWith( ANY, name=event_name, description=event_detail.description, level=event_detail.level)) self.assertThat(protocol.SendEvent, MockCalledOnce()) # Reset RPC call handlers. protocol.RegisterEventType.reset_mock() protocol.SendEvent.reset_mock() # On the second call, the event type is known to be registered, so the # log is sent to the region immediately. yield event_hub.logByID(event_name, system_id, description) self.assertThat(protocol.RegisterEventType, MockNotCalled()) self.assertThat(protocol.SendEvent, MockCalledOnce()) @inlineCallbacks def test__updates_cache_if_event_type_not_found(self): protocol, connecting = self.patch_rpc_methods( side_effect=[succeed({}), fail(NoSuchEventType())]) self.addCleanup((yield connecting)) system_id = factory.make_name('system_id') description = factory.make_name('description') event_name = random.choice(map_enum(EVENT_TYPES).keys()) event_hub = NodeEventHub() # Fine the first time. yield event_hub.logByID(event_name, system_id, description) # The cache has been populated with the event name. self.assertThat(event_hub._types_registered, Equals({event_name})) # Second time it crashes. with ExpectedException(NoSuchEventType): yield event_hub.logByID(event_name, system_id, description) # The event has been removed from the cache. self.assertThat(event_hub._types_registered, HasLength(0)) class TestSendEventMACAddress(MAASTestCase): """Tests for `NodeEventHub.logByMAC`.""" run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def patch_rpc_methods(self, side_effect=None): fixture = self.useFixture(MockLiveClusterToRegionRPCFixture()) protocol, connecting = fixture.makeEventLoop( region.SendEventMACAddress, region.RegisterEventType) protocol.SendEventMACAddress.side_effect = side_effect return protocol, connecting @inlineCallbacks def test__event_is_sent_to_region(self): protocol, connecting = self.patch_rpc_methods() self.addCleanup((yield connecting)) mac_address = factory.make_mac_address() description = factory.make_name('description') event_name = random.choice(map_enum(EVENT_TYPES).keys()) yield NodeEventHub().logByMAC(event_name, mac_address, description) self.assertThat( protocol.SendEventMACAddress, MockCalledOnceWith( ANY, type_name=event_name, mac_address=mac_address, description=description)) @inlineCallbacks def test__failure_is_suppressed_if_node_not_found(self): protocol, connecting = self.patch_rpc_methods( side_effect=[fail(NoSuchNode())]) self.addCleanup((yield connecting)) mac_address = factory.make_mac_address() description = factory.make_name('description') event_name = random.choice(map_enum(EVENT_TYPES).keys()) yield NodeEventHub().logByMAC(event_name, mac_address, description) self.assertThat( protocol.SendEventMACAddress, MockCalledOnceWith( ANY, type_name=event_name, mac_address=mac_address, description=description)) @inlineCallbacks def test__event_type_is_registered_on_first_call_only(self): protocol, connecting = self.patch_rpc_methods(side_effect=[{}, {}]) self.addCleanup((yield connecting)) mac_address = factory.make_mac_address() description = factory.make_name('description') event_name = random.choice(map_enum(EVENT_TYPES).keys()) event_detail = EVENT_DETAILS[event_name] event_hub = NodeEventHub() # On the first call, the event type is registered before the log is # sent to the region. yield event_hub.logByMAC(event_name, mac_address, description) self.assertThat( protocol.RegisterEventType, MockCalledOnceWith( ANY, name=event_name, description=event_detail.description, level=event_detail.level)) self.assertThat(protocol.SendEventMACAddress, MockCalledOnce()) # Reset RPC call handlers. protocol.RegisterEventType.reset_mock() protocol.SendEventMACAddress.reset_mock() # On the second call, the event type is known to be registered, so the # log is sent to the region immediately. yield event_hub.logByMAC(event_name, mac_address, description) self.assertThat(protocol.RegisterEventType, MockNotCalled()) self.assertThat(protocol.SendEventMACAddress, MockCalledOnce()) @inlineCallbacks def test__updates_cache_if_event_type_not_found(self): protocol, connecting = self.patch_rpc_methods( side_effect=[succeed({}), fail(NoSuchEventType())]) self.addCleanup((yield connecting)) mac_address = factory.make_mac_address() description = factory.make_name('description') event_name = random.choice(map_enum(EVENT_TYPES).keys()) event_hub = NodeEventHub() # Fine the first time. yield event_hub.logByMAC(event_name, mac_address, description) # The cache has been populated with the event name. self.assertThat(event_hub._types_registered, Equals({event_name})) # Second time it crashes. with ExpectedException(NoSuchEventType): yield event_hub.logByMAC(event_name, mac_address, description) # The event has been removed from the cache. self.assertThat(event_hub._types_registered, HasLength(0)) maas-1.9.5+bzr4599.orig/src/provisioningserver/tests/test_kernel_opts.py0000644000000000000000000003042413056115004024522 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test composition of kernel command lines.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "make_kernel_parameters", ] import os from maastesting.factory import factory from maastesting.testcase import MAASTestCase from mock import sentinel from provisioningserver import kernel_opts from provisioningserver.drivers import ( Architecture, ArchitectureRegistry, ) from provisioningserver.kernel_opts import ( compose_arch_opts, compose_kernel_command_line, compose_preseed_opt, CURTIN_KERNEL_CMDLINE_NAME, get_curtin_kernel_cmdline_sep, get_ephemeral_name, get_last_directory, ISCSI_TARGET_NAME_PREFIX, KernelParameters, prefix_target_name, ) from testtools.matchers import ( Contains, ContainsAll, Not, ) def make_kernel_parameters(testcase=None, **parms): """Make a randomly populated `KernelParameters` instance. If testcase is passed, we poke the generated arch/subarch into the ArchitectureRegistry and call addCleanup on the testcase to make sure it is removed after the test completes. """ parms.update( {field: factory.make_name(field) for field in KernelParameters._fields if field not in parms}) params = KernelParameters(**parms) if testcase is not None: name = "%s/%s" % (params.arch, params.subarch) if name in ArchitectureRegistry: # It's already there, no need to patch and risk overwriting # preset kernel options. return params resource = Architecture(name, name) ArchitectureRegistry.register_item(name, resource) testcase.addCleanup( ArchitectureRegistry.unregister_item, name) return params class TestUtilitiesKernelOpts(MAASTestCase): def test_get_last_directory(self): root = self.make_dir() dir1 = os.path.join(root, '20120405') dir2 = os.path.join(root, '20120105') dir3 = os.path.join(root, '20120403') os.makedirs(dir1) os.makedirs(dir2) os.makedirs(dir3) self.assertEqual(dir1, get_last_directory(root)) def test_kernel_parameters_callable(self): # KernelParameters instances are callable; an alias for _replace(). params = make_kernel_parameters() self.assertTrue(callable(params)) self.assertIs(params._replace.im_func, params.__call__.im_func) def test_prefix_target_name_adds_prefix(self): prefix = factory.make_name('prefix') target = factory.make_name('tgt') self.patch(kernel_opts, 'ISCSI_TARGET_NAME_PREFIX', prefix) self.assertEqual( '%s:%s' % (prefix, target), prefix_target_name(target)) def test_prefix_target_name_produces_exactly_one_separating_colon(self): target = factory.make_name('tgt') full_name = prefix_target_name(target) self.assertIn(':' + target, full_name) self.assertNotIn('::' + target, full_name) class TestGetCurtinKernelCmdlineSepTest(MAASTestCase): def test_get_curtin_kernel_cmdline_sep_returns_curtin_value(self): sep = factory.make_name('separator') self.patch( kernel_opts.curtin, CURTIN_KERNEL_CMDLINE_NAME, sep) self.assertEqual(sep, get_curtin_kernel_cmdline_sep()) def test_get_curtin_kernel_cmdline_sep_returns_default(self): original_sep = getattr( kernel_opts.curtin, CURTIN_KERNEL_CMDLINE_NAME, sentinel.missing) if original_sep != sentinel.missing: def restore_sep(): setattr( kernel_opts.curtin, CURTIN_KERNEL_CMDLINE_NAME, original_sep) self.addCleanup(restore_sep) delattr(kernel_opts.curtin, CURTIN_KERNEL_CMDLINE_NAME) self.assertEqual('--', get_curtin_kernel_cmdline_sep()) class TestKernelOpts(MAASTestCase): def make_kernel_parameters(self, *args, **kwargs): return make_kernel_parameters(self, *args, **kwargs) def test_compose_kernel_command_line_includes_preseed_url(self): params = self.make_kernel_parameters() self.assertIn( "auto url=%s" % params.preseed_url, compose_kernel_command_line(params)) def test_install_compose_kernel_command_line_includes_name_domain(self): params = self.make_kernel_parameters(purpose="install") self.assertThat( compose_kernel_command_line(params), ContainsAll([ "hostname=%s" % params.hostname, "domain=%s" % params.domain, ])) def test_install_compose_kernel_command_line_omits_domain_if_omitted(self): params = self.make_kernel_parameters(purpose="install", domain=None) kernel_command_line = compose_kernel_command_line(params) self.assertIn("hostname=%s" % params.hostname, kernel_command_line) self.assertNotIn('domain=', kernel_command_line) def test_install_compose_kernel_command_line_includes_locale(self): params = self.make_kernel_parameters(purpose="install") locale = "en_US" self.assertIn( "locale=%s" % locale, compose_kernel_command_line(params)) def test_install_compose_kernel_command_line_includes_log_settings(self): params = self.make_kernel_parameters(purpose="install") # Port 514 (UDP) is syslog. log_port = "514" self.assertThat( compose_kernel_command_line(params), ContainsAll([ "log_host=%s" % params.log_host, "log_port=%s" % log_port, ])) def test_install_compose_kernel_command_line_includes_di_settings(self): params = self.make_kernel_parameters(purpose="install") self.assertThat( compose_kernel_command_line(params), Contains("text priority=critical")) def test_install_compose_kernel_command_line_inc_purpose_opts(self): # The result of compose_kernel_command_line includes the purpose # options for a non "commissioning" node. params = self.make_kernel_parameters(purpose="install") self.assertIn( "netcfg/choose_interface=auto", compose_kernel_command_line(params)) def test_xinstall_compose_kernel_command_line_inc_purpose_opts(self): # The result of compose_kernel_command_line includes the purpose # options for a non "xinstall" node. params = self.make_kernel_parameters(purpose="xinstall") cmdline = compose_kernel_command_line(params) self.assertThat( cmdline, ContainsAll([ "root=/dev/disk/by-path/ip-", "iscsi_initiator=", "overlayroot=tmpfs", "ip=::::%s:BOOTIF" % params.hostname])) def test_commissioning_compose_kernel_command_line_inc_purpose_opts(self): # The result of compose_kernel_command_line includes the purpose # options for a non "commissioning" node. params = self.make_kernel_parameters(purpose="commissioning") cmdline = compose_kernel_command_line(params) self.assertThat( cmdline, ContainsAll([ "root=/dev/disk/by-path/ip-", "iscsi_initiator=", "overlayroot=tmpfs", "ip=::::%s:BOOTIF" % params.hostname])) def test_enlist_compose_kernel_command_line_inc_purpose_opts(self): # The result of compose_kernel_command_line includes the purpose # options for a non "commissioning" node. params = self.make_kernel_parameters(purpose="enlist") cmdline = compose_kernel_command_line(params) self.assertThat( cmdline, ContainsAll([ "root=/dev/disk/by-path/ip-", "iscsi_initiator=", "overlayroot=tmpfs", "ip=::::%s:BOOTIF" % params.hostname])) def test_commissioning_compose_kernel_command_line_inc_extra_opts(self): mock_get_curtin_sep = self.patch( kernel_opts, 'get_curtin_kernel_cmdline_sep') sep = factory.make_name('sep') mock_get_curtin_sep.return_value = sep extra_opts = "special console=ABCD -- options to pass" params = self.make_kernel_parameters(extra_opts=extra_opts) cmdline = compose_kernel_command_line(params) # There should be KERNEL_CMDLINE_COPY_TO_INSTALL_SEP surrounded by # spaces before the options, but otherwise added verbatim. self.assertThat(cmdline, Contains(' %s ' % sep + extra_opts)) def test_commissioning_compose_kernel_handles_extra_opts_None(self): params = self.make_kernel_parameters(extra_opts=None) cmdline = compose_kernel_command_line(params) self.assertNotIn(cmdline, "None") def test_compose_kernel_command_line_inc_common_opts(self): # Test that some kernel arguments appear on commissioning, install # and xinstall command lines. expected = ["nomodeset"] params = self.make_kernel_parameters( purpose="commissioning", arch="i386") cmdline = compose_kernel_command_line(params) self.assertThat(cmdline, ContainsAll(expected)) params = self.make_kernel_parameters( purpose="xinstall", arch="i386") cmdline = compose_kernel_command_line(params) self.assertThat(cmdline, ContainsAll(expected)) params = self.make_kernel_parameters( purpose="install", arch="i386") cmdline = compose_kernel_command_line(params) self.assertThat(cmdline, ContainsAll(expected)) def test_compose_kernel_command_line_inc_purpose_opts_xinstall_node(self): # The result of compose_kernel_command_line includes the purpose # options for a "xinstall" node. params = self.make_kernel_parameters(purpose="xinstall") ephemeral_name = get_ephemeral_name( params.osystem, params.arch, params.subarch, params.release, params.label) self.assertThat( compose_kernel_command_line(params), ContainsAll([ "iscsi_target_name=%s:%s" % ( ISCSI_TARGET_NAME_PREFIX, ephemeral_name), "iscsi_target_port=3260", "iscsi_target_ip=%s" % params.fs_host, ])) def test_compose_kernel_command_line_inc_purpose_opts_comm_node(self): # The result of compose_kernel_command_line includes the purpose # options for a "commissioning" node. params = self.make_kernel_parameters(purpose="commissioning") ephemeral_name = get_ephemeral_name( params.osystem, params.arch, params.subarch, params.release, params.label) self.assertThat( compose_kernel_command_line(params), ContainsAll([ "iscsi_target_name=%s:%s" % ( ISCSI_TARGET_NAME_PREFIX, ephemeral_name), "iscsi_target_port=3260", "iscsi_target_ip=%s" % params.fs_host, ])) def test_compose_preseed_kernel_opt_returns_kernel_option(self): dummy_preseed_url = factory.make_name("url") self.assertEqual( "auto url=%s" % dummy_preseed_url, compose_preseed_opt(dummy_preseed_url)) def test_compose_kernel_command_line_inc_arm_specific_option(self): params = self.make_kernel_parameters(arch="armhf", subarch="highbank") self.assertThat( compose_kernel_command_line(params), Contains("console=ttyAMA0")) def test_compose_kernel_command_line_not_inc_arm_specific_option(self): params = self.make_kernel_parameters(arch="i386") self.assertThat( compose_kernel_command_line(params), Not(Contains("console=ttyAMA0"))) def test_compose_arch_opts_copes_with_unknown_subarch(self): # Pass a None testcase so that the architecture doesn't get # registered. params = make_kernel_parameters( testcase=None, arch=factory.make_name("arch"), subarch=factory.make_name("subarch")) self.assertEquals([], compose_arch_opts(params)) maas-1.9.5+bzr4599.orig/src/provisioningserver/tests/test_monkey.py0000644000000000000000000000434213056115004023477 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test monkey patches.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ ] import sys from maastesting.testcase import MAASTestCase from mock import sentinel from provisioningserver.monkey import ( add_term_error_code_to_tftp, force_simplestreams_to_use_urllib2, ) from simplestreams import contentsource import tftp.datagram if sys.version_info > (3, 0): import urllib.request as urllib_request import urllib.error as urllib_error else: import urllib2 as urllib_request urllib_error = urllib_request class TestForceSimplestreamsToUseUrllib2Events(MAASTestCase): scenarios = ( ('URL_READER', { 'value': contentsource.Urllib2UrlReader, 'key': 'URL_READER', }), ('URL_READER_CLASSNAME', { 'value': 'Urllib2UrlReader', 'key': 'URL_READER_CLASSNAME', }), ('urllib_error', { 'value': urllib_error, 'key': 'urllib_error', }), ('urllib_request', { 'value': urllib_request, 'key': 'urllib_request', }), ) def test_replaces_urlreader_object(self): self.patch(contentsource, self.key, sentinel.pre_value) force_simplestreams_to_use_urllib2() self.assertEqual( self.value, getattr(contentsource, self.key)) class TestAddTermErrorCodeToTFT(MAASTestCase): def test_adds_error_code_8(self): self.patch(tftp.datagram, 'errors', {}) add_term_error_code_to_tftp() self.assertIn(8, tftp.datagram.errors) self.assertEqual( "Terminate transfer due to option negotiation", tftp.datagram.errors.get(8)) def test_skips_adding_error_code_if_already_present(self): self.patch(tftp.datagram, 'errors', {8: sentinel.error_8}) add_term_error_code_to_tftp() self.assertEqual( sentinel.error_8, tftp.datagram.errors.get(8)) maas-1.9.5+bzr4599.orig/src/provisioningserver/tests/test_network.py0000644000000000000000000002204613056115004023667 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the `network` module.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maastesting.factory import factory from maastesting.testcase import MAASTestCase from netaddr import ( IPAddress, IPNetwork, ) from netifaces import ( AF_INET, AF_INET6, ) from provisioningserver import network from provisioningserver.network import sort_networks_by_priority from testtools.matchers import ( Equals, HasLength, ) def make_inet_address(subnet=None): """Fake an `AF_INET` or `AF_INET6` address.""" if subnet is None: subnet = factory.make_ipv4_network() subnet = IPNetwork(subnet) addr = { 'netmask': unicode(subnet.netmask), 'addr': factory.pick_ip_in_network(subnet), } if subnet.version == 4: # IPv4 addresses also have a broadcast field. addr['broadcast'] = subnet.broadcast return addr def make_loopback(): """Fake a loopback AF_INET address.""" return make_inet_address(IPNetwork('127.0.0.0/8')) def make_interface(inet_address=None): """Minimally fake up an interface definition as returned by netifaces.""" if inet_address is None: inet_address = make_inet_address() addr = inet_address.get('addr') if addr is None or IPAddress(addr).version == 4: address_family = AF_INET else: address_family = AF_INET6 return {address_family: [inet_address]} class TestNetworks(MAASTestCase): def patch_netifaces(self, interfaces): """Patch up netifaces to pretend we have given `interfaces`. :param interfaces: A dict mapping each interface's name to its definition as `netifaces` would return it. """ self.patch(network, 'interfaces').return_value = interfaces.keys() self.patch( network, 'ifaddresses', lambda interface: interfaces[interface]) def test_discover_networks_ignores_interface_without_IP_address(self): self.patch_netifaces({factory.make_name('eth'): {}}) self.assertEqual([], network.discover_networks()) def test_discover_networks_ignores_IPv4_loopback(self): self.patch_netifaces({'lo': make_interface(make_loopback())}) self.assertEqual([], network.discover_networks()) def test_discover_networks_ignores_IPv6_loopback(self): self.patch_netifaces( {'lo': make_interface(make_inet_address('::1/128'))}) self.assertEqual([], network.discover_networks()) def test_discover_networks_discovers_IPv4_network(self): eth = factory.make_name('eth') interface = make_interface() self.patch_netifaces({eth: interface}) self.assertEqual([{ 'interface': eth, 'ip': interface[AF_INET][0]['addr'], 'subnet_mask': interface[AF_INET][0]['netmask'], }], network.discover_networks()) def test_discover_networks_discovers_IPv6_network(self): eth = factory.make_name('eth') addr = make_inet_address(factory.make_ipv6_network()) interface = make_interface(addr) self.patch_netifaces({eth: interface}) self.assertEqual([{ 'interface': eth, 'ip': addr['addr'], 'subnet_mask': addr['netmask'], }], network.discover_networks()) def test_discover_networks_returns_suitable_interfaces(self): eth = factory.make_name('eth') self.patch_netifaces({ eth: make_interface(), 'lo': make_interface(make_loopback()), factory.make_name('dummy'): make_interface({}), }) self.assertEqual( [eth], [ interface['interface'] for interface in network.discover_networks()]) def test_discover_networks_coalesces_networks_on_interface(self): eth = factory.make_name('eth') net = factory.make_ipv6_network() self.patch_netifaces({ eth: { AF_INET6: [ make_inet_address(net), make_inet_address(net), ], }, }) interfaces = network.discover_networks() self.assertThat(interfaces, HasLength(1)) [interface] = interfaces self.assertEqual(eth, interface['interface']) self.assertIn(IPAddress(interface['ip']), net) def test_discover_networks_discovers_multiple_networks_per_interface(self): eth = factory.make_name('eth') net1 = factory.make_ipv6_network() net2 = factory.make_ipv6_network(disjoint_from=[net1]) addr1 = factory.pick_ip_in_network(net1) addr2 = factory.pick_ip_in_network(net2) self.patch_netifaces({ eth: { AF_INET6: [ make_inet_address(addr1), make_inet_address(addr2), ], }, }) interfaces = network.discover_networks() self.assertThat(interfaces, HasLength(2)) self.assertEqual( [eth, eth], [interface['interface'] for interface in interfaces]) self.assertItemsEqual( [addr1, addr2], [interface['ip'] for interface in interfaces]) def test_discover_networks_discovers_IPv4_and_IPv6_on_same_interface(self): eth = factory.make_name('eth') ipv4_net = factory.make_ipv4_network() ipv6_net = factory.make_ipv6_network() ipv4_addr = factory.pick_ip_in_network(ipv4_net) ipv6_addr = factory.pick_ip_in_network(ipv6_net) self.patch_netifaces({ eth: { AF_INET: [make_inet_address(ipv4_addr)], AF_INET6: [make_inet_address(ipv6_addr)], }, }) interfaces = network.discover_networks() self.assertThat(interfaces, HasLength(2)) self.assertEqual( [eth, eth], [interface['interface'] for interface in interfaces]) self.assertItemsEqual( [ipv4_addr, ipv6_addr], [interface['ip'] for interface in interfaces]) def test_discover_networks_ignores_link_local_IPv4_addresses(self): interface = factory.make_name('eth') ip = factory.pick_ip_in_network(IPNetwork('169.254.0.0/16')) self.patch_netifaces({interface: {AF_INET: [make_inet_address(ip)]}}) self.assertEqual([], network.discover_networks()) def test_discover_networks_ignores_link_local_IPv6_addresses(self): interface = factory.make_name('eth') ip = factory.pick_ip_in_network(IPNetwork('fe80::/10')) self.patch_netifaces({interface: {AF_INET6: [make_inet_address(ip)]}}) self.assertEqual([], network.discover_networks()) def test_discover_networks_runs_in_real_life(self): interfaces = network.discover_networks() self.assertIsInstance(interfaces, list) def test_filter_unique_networks_returns_networks(self): net = network.AttachedNetwork('eth0', '10.1.1.1', '255.255.255.0') self.assertEqual([net], network.filter_unique_networks([net])) def test_filter_unique_networks_drops_redundant_networks(self): entry1 = network.AttachedNetwork('eth0', '10.1.1.1', '255.255.255.0') entry2 = network.AttachedNetwork('eth0', '10.1.1.2', '255.255.255.0') networks = network.filter_unique_networks([entry1, entry2]) self.assertThat(networks, HasLength(1)) self.assertIn(networks[0], [entry1, entry2]) def test_filter_unique_networks_orders_consistently(self): networks = [ network.AttachedNetwork('eth1', '10.1.1.1', '255.255.255.0'), network.AttachedNetwork('eth2', '10.2.2.2', '255.255.255.0'), ] self.assertEqual( network.filter_unique_networks(networks), network.filter_unique_networks(reversed(networks))) class TestSortNetworksByPriority(MAASTestCase): def test__sorts_by_type_then_ip_version(self): interfaces = [ {'ip': "2001:db8::1", 'type': "ethernet.vlan", 'interface': 'vlan40'}, {'ip': "10.0.0.1", 'type': "ethernet.vlan", 'interface': 'vlan40'}, {'ip': "2001:db8:1::1", 'type': "ethernet.physical", 'interface': 'eth1'}, {'ip': "10.0.1.1", 'type': "ethernet.physical", 'interface': 'eth1'}, {'ip': "10.0.2.1", 'type': "ethernet.bridge", 'interface': 'br0'}, ] sorted_interfaces = sort_networks_by_priority(interfaces) self.expectThat(sorted_interfaces[0], Equals(interfaces[3])) self.expectThat(sorted_interfaces[1], Equals(interfaces[2])) self.expectThat(sorted_interfaces[2], Equals(interfaces[4])) self.expectThat(sorted_interfaces[3], Equals(interfaces[1])) self.expectThat(sorted_interfaces[4], Equals(interfaces[0])) maas-1.9.5+bzr4599.orig/src/provisioningserver/tests/test_path.py0000644000000000000000000000663713056115004023142 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for filesystem paths.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from os import getcwdu import os.path from fixtures import EnvironmentVariableFixture from maastesting.factory import factory from maastesting.testcase import MAASTestCase import provisioningserver.path from provisioningserver.path import ( get_path, get_tentative_path, ) from testtools.matchers import ( DirExists, Not, StartsWith, ) class TestGetPathFunctions(MAASTestCase): """Tests for `get_path` and `get_tentative_path`.""" scenarios = ( ("get_path", { "get_path_function": get_path, "ensures_directory": True, }), ("get_tentative_path", { "get_path_function": get_tentative_path, "ensures_directory": False, }), ) def set_root(self, root_path=None): """For the duration of this test, set the `MAAS_ROOT` variable`.""" self.useFixture(EnvironmentVariableFixture('MAAS_ROOT', root_path)) def test__defaults_to_root(self): self.set_root() self.patch(provisioningserver.path, 'ensure_dir') self.assertEqual('/', self.get_path_function()) def test__appends_path_elements(self): self.set_root('/') self.patch(provisioningserver.path, 'ensure_dir') part1 = factory.make_name('dir') part2 = factory.make_name('file') self.assertEqual( os.path.join('/', part1, part2), self.get_path_function(part1, part2)) def test__obeys_MAAS_ROOT_variable(self): root = factory.make_name('/root') self.set_root(root) self.patch(provisioningserver.path, 'ensure_dir') path = factory.make_name('path') self.assertEqual( os.path.join(root, path), self.get_path_function(path)) def test__assumes_MAAS_ROOT_is_unset_if_empty(self): self.set_root("") self.patch(provisioningserver.path, 'ensure_dir') path = factory.make_name('path') self.assertEqual( os.path.join("/", path), self.get_path_function(path)) def test__returns_absolute_path(self): self.set_root('.') self.patch(provisioningserver.path, 'ensure_dir') self.assertThat(self.get_path_function(), StartsWith('/')) self.assertEqual(getcwdu(), self.get_path_function()) def test__concatenates_despite_leading_slash(self): root = self.make_dir() self.set_root(root) self.patch(provisioningserver.path, 'ensure_dir') filename = factory.make_name('file') self.assertEqual( os.path.join(root, filename), self.get_path_function('/' + filename)) def test__normalises(self): self.set_root() self.patch(provisioningserver.path, 'ensure_dir') self.assertEqual( '/foo/bar', self.get_path_function('foo///szut//..///bar//')) def test__maybe_creates_dirpath_if_not_exists(self): root_path = self.make_dir() self.set_root(root_path) self.assertThat( os.path.dirname(self.get_path_function('/foo/bar')), DirExists() if self.ensures_directory else Not(DirExists())) maas-1.9.5+bzr4599.orig/src/provisioningserver/tests/test_plugin.py0000644000000000000000000001610613056115004023474 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the ``maasclusterd`` TAP.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import ( MAASTestCase, MAASTwistedRunTest, ) import provisioningserver from provisioningserver import plugin as plugin_module from provisioningserver.config import ClusterConfiguration from provisioningserver.plugin import ( Options, ProvisioningServiceMaker, ) from provisioningserver.pserv_services.dhcp_probe_service import ( DHCPProbeService, ) from provisioningserver.pserv_services.image import BootImageEndpointService from provisioningserver.pserv_services.image_download_service import ( ImageDownloadService, ) from provisioningserver.pserv_services.node_power_monitor_service import ( NodePowerMonitorService, ) from provisioningserver.pserv_services.service_monitor_service import ( ServiceMonitorService, ) from provisioningserver.pserv_services.tftp import ( TFTPBackend, TFTPService, ) from provisioningserver.testing.config import ClusterConfigurationFixture from testtools.matchers import ( AfterPreprocessing, Equals, IsInstance, MatchesAll, MatchesStructure, ) from twisted.application.service import MultiService from twisted.python.filepath import FilePath from twisted.web.server import Site class TestOptions(MAASTestCase): """Tests for `provisioningserver.plugin.Options`.""" def test_defaults(self): options = Options() expected = {"introspect": None} self.assertEqual(expected, options.defaults) def test_parse_minimal_options(self): options = Options() # The minimal set of options that must be provided. arguments = [] options.parseOptions(arguments) # No error. class TestProvisioningServiceMaker(MAASTestCase): """Tests for `provisioningserver.plugin.ProvisioningServiceMaker`.""" run_tests_with = MAASTwistedRunTest.make_factory(timeout=5) def setUp(self): super(TestProvisioningServiceMaker, self).setUp() self.useFixture(ClusterConfigurationFixture()) self.patch(provisioningserver, "services", MultiService()) self.tempdir = self.make_dir() def test_init(self): service_maker = ProvisioningServiceMaker("Harry", "Hill") self.assertEqual("Harry", service_maker.tapname) self.assertEqual("Hill", service_maker.description) def test_makeService(self): """ Only the site service is created when no options are given. """ options = Options() service_maker = ProvisioningServiceMaker("Harry", "Hill") service = service_maker.makeService(options) self.assertIsInstance(service, MultiService) expected_services = [ "dhcp_probe", "image_download", "lease_upload", "node_monitor", "rpc", "tftp", "image_service", "service_monitor", ] self.assertItemsEqual(expected_services, service.namedServices) self.assertEqual( len(service.namedServices), len(service.services), "Not all services are named.") self.assertEqual(service, provisioningserver.services) def test_makeService_patches_simplestreams(self): mock_simplestreams_patch = ( self.patch(plugin_module, 'force_simplestreams_to_use_urllib2')) options = Options() service_maker = ProvisioningServiceMaker("Harry", "Hill") service_maker.makeService(options) self.assertThat(mock_simplestreams_patch, MockCalledOnceWith()) def test_makeService_patches_tftp_service(self): mock_tftp_patch = ( self.patch(plugin_module, 'add_term_error_code_to_tftp')) options = Options() service_maker = ProvisioningServiceMaker("Harry", "Hill") service_maker.makeService(options) self.assertThat(mock_tftp_patch, MockCalledOnceWith()) def test_image_download_service(self): options = Options() service_maker = ProvisioningServiceMaker("Harry", "Hill") service = service_maker.makeService(options) image_service = service.getServiceNamed("image_download") self.assertIsInstance(image_service, ImageDownloadService) def test_node_monitor_service(self): options = Options() service_maker = ProvisioningServiceMaker("Harry", "Hill") service = service_maker.makeService(options) node_monitor = service.getServiceNamed("node_monitor") self.assertIsInstance(node_monitor, NodePowerMonitorService) def test_dhcp_probe_service(self): options = Options() service_maker = ProvisioningServiceMaker("Spike", "Milligan") service = service_maker.makeService(options) dhcp_probe = service.getServiceNamed("dhcp_probe") self.assertIsInstance(dhcp_probe, DHCPProbeService) def test_service_monitor_service(self): options = Options() service_maker = ProvisioningServiceMaker("Harry", "Hill") service = service_maker.makeService(options) service_monitor = service.getServiceNamed("service_monitor") self.assertIsInstance(service_monitor, ServiceMonitorService) def test_tftp_service(self): # A TFTP service is configured and added to the top-level service. options = Options() service_maker = ProvisioningServiceMaker("Harry", "Hill") service = service_maker.makeService(options) tftp_service = service.getServiceNamed("tftp") self.assertIsInstance(tftp_service, TFTPService) with ClusterConfiguration.open() as config: tftp_generator_url = config.tftp_generator_url tftp_root = config.tftp_root tftp_port = config.tftp_port expected_backend = MatchesAll( IsInstance(TFTPBackend), AfterPreprocessing( lambda backend: backend.base.path, Equals(tftp_root)), AfterPreprocessing( lambda backend: backend.generator_url.geturl(), Equals(tftp_generator_url))) self.assertThat( tftp_service, MatchesStructure( backend=expected_backend, port=Equals(tftp_port), )) def test_image_service(self): options = Options() service_maker = ProvisioningServiceMaker("Harry", "Hill") service = service_maker.makeService(options) image_service = service.getServiceNamed("image_service") self.assertIsInstance(image_service, BootImageEndpointService) self.assertIsInstance(image_service.site, Site) resource = image_service.site.resource root = resource.getChildWithDefault("images", request=None) self.assertThat(root, IsInstance(FilePath)) with ClusterConfiguration.open() as config: resource_root = FilePath(config.tftp_root) self.assertEqual(resource_root, root) maas-1.9.5+bzr4599.orig/src/provisioningserver/tests/test_security.py0000644000000000000000000002437413056115004024053 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for MAAS's cluster security module.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from os import ( chmod, stat, ) from os.path import dirname from fixtures import EnvironmentVariableFixture from maastesting.factory import factory from maastesting.matchers import MockCalledOnceWith from maastesting.testcase import MAASTestCase from mock import ( ANY, sentinel, ) from provisioningserver import security from provisioningserver.utils.fs import ( ensure_dir, FileLock, read_text_file, write_text_file, ) class TestGetSharedSecretFromFilesystem(MAASTestCase): def setUp(self): super(TestGetSharedSecretFromFilesystem, self).setUp() self.useFixture(EnvironmentVariableFixture( "MAAS_ROOT", self.make_dir())) def write_secret(self): secret = factory.make_bytes() secret_path = security.get_shared_secret_filesystem_path() ensure_dir(dirname(secret_path)) write_text_file(secret_path, security.to_hex(secret)) return secret def test__returns_None_when_no_secret_exists(self): self.assertIsNone(security.get_shared_secret_from_filesystem()) def test__returns_secret_when_one_exists(self): secret = self.write_secret() self.assertEqual( secret, security.get_shared_secret_from_filesystem()) def test__same_secret_is_returned_on_subsequent_calls(self): self.write_secret() self.assertEqual( security.get_shared_secret_from_filesystem(), security.get_shared_secret_from_filesystem()) def test__errors_reading_file_are_raised(self): self.write_secret() secret_path = security.get_shared_secret_filesystem_path() self.addCleanup(chmod, secret_path, 0o600) chmod(secret_path, 0o000) self.assertRaises(IOError, security.get_shared_secret_from_filesystem) def test__errors_when_filesystem_value_cannot_be_decoded(self): self.write_secret() write_text_file(security.get_shared_secret_filesystem_path(), "_") self.assertRaises( TypeError, security.get_shared_secret_from_filesystem) def test__deals_fine_with_whitespace_in_filesystem_value(self): secret = self.write_secret() write_text_file( security.get_shared_secret_filesystem_path(), " %s\n" % security.to_hex(secret)) self.assertEqual(secret, security.get_shared_secret_from_filesystem()) def test__reads_with_lock(self): lock = FileLock(security.get_shared_secret_filesystem_path()) self.assertFalse(lock.is_locked()) def check_lock(path): self.assertTrue(lock.is_locked()) return "12" # Two arbitrary hex characters. read_text_file = self.patch_autospec(security, "read_text_file") read_text_file.side_effect = check_lock security.get_shared_secret_from_filesystem() self.assertThat(read_text_file, MockCalledOnceWith(ANY)) self.assertFalse(lock.is_locked()) class TestSetSharedSecretOnFilesystem(MAASTestCase): def setUp(self): super(TestSetSharedSecretOnFilesystem, self).setUp() self.useFixture(EnvironmentVariableFixture( "MAAS_ROOT", self.make_dir())) def read_secret(self): secret_path = security.get_shared_secret_filesystem_path() secret_hex = read_text_file(secret_path) return security.to_bin(secret_hex) def test__writes_secret(self): secret = factory.make_bytes() security.set_shared_secret_on_filesystem(secret) self.assertEqual(secret, self.read_secret()) def test__writes_with_lock(self): lock = FileLock(security.get_shared_secret_filesystem_path()) self.assertFalse(lock.is_locked()) def check_lock(path, data): self.assertTrue(lock.is_locked()) write_text_file = self.patch_autospec(security, "write_text_file") write_text_file.side_effect = check_lock security.set_shared_secret_on_filesystem(b"foo") self.assertThat(write_text_file, MockCalledOnceWith(ANY, ANY)) self.assertFalse(lock.is_locked()) def test__writes_with_secure_permissions(self): secret = factory.make_bytes() security.set_shared_secret_on_filesystem(secret) secret_path = security.get_shared_secret_filesystem_path() perms_observed = stat(secret_path).st_mode & 0o777 perms_expected = 0o640 self.assertEqual( perms_expected, perms_observed, "Expected %04o, got %04o." % (perms_expected, perms_observed)) class TestInstallSharedSecretScript(MAASTestCase): def setUp(self): super(TestInstallSharedSecretScript, self).setUp() self.useFixture(EnvironmentVariableFixture( "MAAS_ROOT", self.make_dir())) def test__has_add_arguments(self): # It doesn't do anything, but it's there to fulfil the contract with # ActionScript/MainScript. security.InstallSharedSecretScript.add_arguments(sentinel.parser) self.assertIsNotNone("Obligatory assertion.") def installAndCheckExitCode(self, code): error = self.assertRaises( SystemExit, security.InstallSharedSecretScript.run, sentinel.args) self.assertEqual(code, error.code) def test__reads_secret_from_stdin(self): secret = factory.make_bytes() stdin = self.patch_autospec(security, "stdin") stdin.readline.return_value = secret.encode("hex") stdin.isatty.return_value = False self.installAndCheckExitCode(0) self.assertEqual( secret, security.get_shared_secret_from_filesystem()) def test__ignores_surrounding_whitespace_from_stdin(self): secret = factory.make_bytes() stdin = self.patch_autospec(security, "stdin") stdin.readline.return_value = " " + secret.encode("hex") + " \n" stdin.isatty.return_value = False self.installAndCheckExitCode(0) self.assertEqual( secret, security.get_shared_secret_from_filesystem()) def test__reads_secret_from_tty(self): secret = factory.make_bytes() stdin = self.patch_autospec(security, "stdin") stdin.isatty.return_value = True raw_input = self.patch(security, "raw_input") raw_input.return_value = secret.encode("hex") self.installAndCheckExitCode(0) self.assertThat( raw_input, MockCalledOnceWith("Secret (hex/base16 encoded): ")) self.assertEqual( secret, security.get_shared_secret_from_filesystem()) def test__ignores_surrounding_whitespace_from_tty(self): secret = factory.make_bytes() stdin = self.patch_autospec(security, "stdin") stdin.isatty.return_value = True raw_input = self.patch(security, "raw_input") raw_input.return_value = " " + secret.encode("hex") + " \n" self.installAndCheckExitCode(0) self.assertEqual( secret, security.get_shared_secret_from_filesystem()) def test__deals_gracefully_with_eof_from_tty(self): stdin = self.patch_autospec(security, "stdin") stdin.isatty.return_value = True raw_input = self.patch(security, "raw_input") raw_input.side_effect = EOFError() self.installAndCheckExitCode(1) self.assertIsNone( security.get_shared_secret_from_filesystem()) def test__deals_gracefully_with_interrupt_from_tty(self): stdin = self.patch_autospec(security, "stdin") stdin.isatty.return_value = True raw_input = self.patch(security, "raw_input") raw_input.side_effect = KeyboardInterrupt() self.assertRaises( KeyboardInterrupt, security.InstallSharedSecretScript.run, sentinel.args) self.assertIsNone( security.get_shared_secret_from_filesystem()) def test__prints_error_message_when_secret_cannot_be_decoded(self): stdin = self.patch_autospec(security, "stdin") stdin.readline.return_value = "garbage" stdin.isatty.return_value = False print = self.patch(security, "print") self.installAndCheckExitCode(1) self.assertThat( print, MockCalledOnceWith( "Secret could not be decoded:", "Odd-length string", file=security.stderr)) def test__prints_message_when_secret_is_installed(self): stdin = self.patch_autospec(security, "stdin") stdin.readline.return_value = factory.make_bytes().encode("hex") stdin.isatty.return_value = False print = self.patch(security, "print") self.installAndCheckExitCode(0) shared_secret_path = security.get_shared_secret_filesystem_path() self.assertThat( print, MockCalledOnceWith( "Secret installed to %s." % shared_secret_path)) class TestCheckForSharedSecretScript(MAASTestCase): def setUp(self): super(TestCheckForSharedSecretScript, self).setUp() self.useFixture(EnvironmentVariableFixture( "MAAS_ROOT", self.make_dir())) def test__has_add_arguments(self): # It doesn't do anything, but it's there to fulfil the contract with # ActionScript/MainScript. security.CheckForSharedSecretScript.add_arguments(sentinel.parser) self.assertIsNotNone("Obligatory assertion.") def test__exits_non_zero_if_secret_does_not_exist(self): print = self.patch(security, "print") error = self.assertRaises( SystemExit, security.CheckForSharedSecretScript.run, sentinel.args) self.assertEqual(1, error.code) self.assertThat( print, MockCalledOnceWith("Shared-secret is NOT installed.")) def test__exits_zero_if_secret_exists(self): security.set_shared_secret_on_filesystem(factory.make_bytes()) print = self.patch(security, "print") error = self.assertRaises( SystemExit, security.CheckForSharedSecretScript.run, sentinel.args) self.assertEqual(0, error.code) self.assertThat( print, MockCalledOnceWith("Shared-secret is installed.")) maas-1.9.5+bzr4599.orig/src/provisioningserver/tests/test_service_monitor.py0000644000000000000000000007342113056115004025410 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for `provisioningserver.service_monitor`.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import logging from textwrap import dedent from fixtures import FakeLogger from maastesting.factory import factory from maastesting.matchers import ( MockCalledOnceWith, MockCallsMatch, ) from maastesting.testcase import MAASTestCase from mock import ( ANY, call, sentinel, ) from provisioningserver import service_monitor as service_monitor_module from provisioningserver.drivers.service import ( Service, SERVICE_STATE, ServiceRegistry, ) from provisioningserver.service_monitor import ( ServiceActionError, ServiceMonitor, ServiceNotOnError, ServiceParsingError, UnknownServiceError, ) from provisioningserver.utils.testing import RegistryFixture from testtools import ExpectedException from testtools.matchers import Equals class TestServiceMonitor(MAASTestCase): """Tests for `ServiceMonitor`.""" def setUp(self): super(TestServiceMonitor, self).setUp() # Ensure the global registry is empty for each test run. self.useFixture(RegistryFixture()) def make_service_driver(self, expected_state=None): fake_name = factory.make_name("name") fake_service_name = factory.make_name("service") if expected_state is None: if factory.pick_bool(): expected_state = SERVICE_STATE.ON else: expected_state = SERVICE_STATE.OFF class FakeService(Service): name = fake_name service_name = fake_service_name def get_expected_state(self): return expected_state service = FakeService() ServiceRegistry.register_item(service.name, service) return service def test_init_determines_init_system(self): mock_has_cmd = self.patch( service_monitor_module, "get_init_system") mock_has_cmd.return_value = sentinel.init_system service_monitor = ServiceMonitor() self.assertEquals(sentinel.init_system, service_monitor.init_system) def test__get_service_lock_adds_lock_to_service_locks(self): service_monitor = ServiceMonitor() service_name = factory.make_name("service") service_lock = service_monitor._get_service_lock(service_name) self.assertIs( service_lock, service_monitor.service_locks[service_name]) def test__get_service_lock_uses_shared_lock(self): service_monitor = ServiceMonitor() service_shared_lock = self.patch(service_monitor, "_lock") service_name = factory.make_name("service") service_monitor._get_service_lock(service_name) self.assertThat( service_shared_lock.__enter__, MockCalledOnceWith()) self.assertThat( service_shared_lock.__exit__, MockCalledOnceWith(None, None, None)) def test__lock_service_acquires_lock_for_service(self): service_monitor = ServiceMonitor() service_name = factory.make_name("service") service_lock = service_monitor._get_service_lock(service_name) with service_lock: self.assertTrue( service_lock.locked(), "Service lock was not acquired.") self.assertFalse( service_lock.locked(), "Service lock was not released.") def test_get_service_state_raises_UnknownServiceError(self): service_monitor = ServiceMonitor() with ExpectedException(UnknownServiceError): service_monitor.get_service_state(factory.make_name("service")) def test_get_service_state_returns_state_from__get_service_status(self): service = self.make_service_driver() service_monitor = ServiceMonitor() mock_get_service_status = self.patch( service_monitor, "_get_service_status") mock_get_service_status.return_value = ( sentinel.state, sentinel.process_state) self.assertEquals( sentinel.state, service_monitor.get_service_state(service.name)) def test_ensure_all_services_calls_ensure_service_for_all_services(self): service_names = sorted([ self.make_service_driver().name for _ in range(3) ]) service_calls = [ call(name) for name in service_names ] service_monitor = ServiceMonitor() mock_ensure_service = self.patch(service_monitor, "ensure_service") service_monitor.ensure_all_services() self.assertThat(mock_ensure_service, MockCallsMatch(*service_calls)) def test_ensure_all_services_log_unknown_errors(self): service = self.make_service_driver() service_monitor = ServiceMonitor() raised_exception = factory.make_exception() mock_ensure_service = self.patch(service_monitor, "ensure_service") mock_ensure_service.side_effect = raised_exception with FakeLogger( "maas.service_monitor", level=logging.ERROR) as maaslog: service_monitor.ensure_all_services() self.assertDocTestMatches( "While monitoring service '%s' an error was encountered: %s" % ( service.service_name, raised_exception), maaslog.output) def test_ensure_service_raises_UnknownServiceError(self): service_monitor = ServiceMonitor() with ExpectedException(UnknownServiceError): service_monitor.ensure_service(factory.make_name("service")) def test_ensure_service_calls_lock_and_unlock_even_with_exception(self): service = self.make_service_driver() service_monitor = ServiceMonitor() exception_type = factory.make_exception_type() mock_ensure_service = self.patch(service_monitor, "_ensure_service") mock_ensure_service.side_effect = exception_type get_service_lock = self.patch(service_monitor, "_get_service_lock") self.assertRaises( exception_type, service_monitor.ensure_service, service.name) self.expectThat(get_service_lock, MockCalledOnceWith(service.name)) lock = get_service_lock.return_value self.expectThat( lock.__enter__, MockCalledOnceWith()) self.expectThat( lock.__exit__, MockCalledOnceWith(exception_type, ANY, ANY)) def test_async_ensure_service_defers_to_a_thread(self): service_monitor = ServiceMonitor() mock_deferToThread = self.patch( service_monitor_module, "deferToThread") mock_deferToThread.return_value = sentinel.defer service_name = factory.make_name("service") self.assertEquals( sentinel.defer, service_monitor.async_ensure_service(service_name)) self.assertThat( mock_deferToThread, MockCalledOnceWith(service_monitor.ensure_service, service_name)) def test_restart_service_raises_UnknownServiceError(self): service_monitor = ServiceMonitor() with ExpectedException(UnknownServiceError): service_monitor.restart_service(factory.make_name("service")) def test_restart_service_raises_ServiceNotOnError(self): service = self.make_service_driver(SERVICE_STATE.OFF) service_monitor = ServiceMonitor() with ExpectedException(ServiceNotOnError): service_monitor.restart_service(service.name) def test_restart_service_calls_lock_and_unlock_even_with_exception(self): service = self.make_service_driver(SERVICE_STATE.ON) service_monitor = ServiceMonitor() exception_type = factory.make_exception_type() mock_service_action = self.patch(service_monitor, "_service_action") mock_service_action.side_effect = exception_type get_service_lock = self.patch(service_monitor, "_get_service_lock") self.assertRaises( exception_type, service_monitor.restart_service, service.name) self.expectThat(get_service_lock, MockCalledOnceWith(service.name)) lock = get_service_lock.return_value self.expectThat( lock.__enter__, MockCalledOnceWith()) self.expectThat( lock.__exit__, MockCalledOnceWith(exception_type, ANY, ANY)) def test_restart_service_calls__service_action_with_restart(self): service = self.make_service_driver(SERVICE_STATE.ON) service_monitor = ServiceMonitor() mock_service_action = self.patch(service_monitor, "_service_action") mock_service_action.side_effect = factory.make_exception() try: service_monitor.restart_service(service.name) except: pass self.assertThat( mock_service_action, MockCalledOnceWith(service, "restart")) def test_restart_service_raised_ServiceActionError_if_service_off(self): service = self.make_service_driver(SERVICE_STATE.ON) service_monitor = ServiceMonitor() self.patch(service_monitor, "_service_action") mock_get_service_status = self.patch( service_monitor, "_get_service_status") mock_get_service_status.return_value = ( SERVICE_STATE.OFF, "dead") with ExpectedException(ServiceActionError): service_monitor.restart_service(service.name) def test_restart_service_logs_error_if_service_off(self): service = self.make_service_driver(SERVICE_STATE.ON) service_monitor = ServiceMonitor() self.patch(service_monitor, "_service_action") mock_get_service_status = self.patch( service_monitor, "_get_service_status") mock_get_service_status.return_value = ( SERVICE_STATE.OFF, "dead") with FakeLogger( "maas.service_monitor", level=logging.ERROR) as maaslog: with ExpectedException(ServiceActionError): service_monitor.restart_service(service.name) self.assertDocTestMatches( "Service '%s' failed to restart. Its current state " "is 'off' and 'dead'." % service.service_name, maaslog.output) def test_restart_service_logs_info_if_service_on(self): service = self.make_service_driver(SERVICE_STATE.ON) service_monitor = ServiceMonitor() self.patch(service_monitor, "_service_action") mock_get_service_status = self.patch( service_monitor, "_get_service_status") mock_get_service_status.return_value = ( SERVICE_STATE.ON, "running") with FakeLogger( "maas.service_monitor", level=logging.INFO) as maaslog: service_monitor.restart_service(service.name) self.assertDocTestMatches( "Service '%s' has been restarted. Its current state " "is 'on' and 'running'." % service.service_name, maaslog.output) def test_async_restart_service_defers_to_a_thread(self): service_monitor = ServiceMonitor() mock_deferToThread = self.patch( service_monitor_module, "deferToThread") mock_deferToThread.return_value = sentinel.defer service_name = factory.make_name("service") self.assertEquals( sentinel.defer, service_monitor.async_restart_service(service_name)) self.assertThat( mock_deferToThread, MockCalledOnceWith( service_monitor.restart_service, service_name)) def test__exec_service_action_calls_service_with_name_and_action(self): service_monitor = ServiceMonitor() service_name = factory.make_name("service") action = factory.make_name("action") mock_popen = self.patch(service_monitor_module, "Popen") mock_popen.return_value.communicate.return_value = (b"", b"") service_monitor._exec_service_action(service_name, action) self.assertEquals( ["sudo", "service", service_name, action], mock_popen.call_args[0][0]) def test__exec_service_action_calls_service_with_LC_ALL_in_env(self): service_monitor = ServiceMonitor() service_name = factory.make_name("service") action = factory.make_name("action") mock_popen = self.patch(service_monitor_module, "Popen") mock_popen.return_value.communicate.return_value = (b"", b"") service_monitor._exec_service_action(service_name, action) self.assertEquals( "C.UTF-8", mock_popen.call_args[1]['env']['LC_ALL']) def test__exec_service_action_decodes_stdout(self): # From https://www.cl.cam.ac.uk/~mgk25/ucs/examples/UTF-8-demo.txt. example_text = ( '\u16bb\u16d6 \u16b3\u16b9\u16ab\u16a6 \u16a6\u16ab\u16cf ' '\u16bb\u16d6 \u16d2\u16a2\u16de\u16d6 \u16a9\u16be \u16a6' '\u16ab\u16d7 \u16da\u16aa\u16be\u16de\u16d6 \u16be\u16a9' '\u16b1\u16a6\u16b9\u16d6\u16aa\u16b1\u16de\u16a2\u16d7 ' '\u16b9\u16c1\u16a6 \u16a6\u16aa \u16b9\u16d6\u16e5\u16ab' ) service_monitor = ServiceMonitor() service_name = factory.make_name("service") action = factory.make_name("action") mock_popen = self.patch(service_monitor_module, "Popen") mock_popen.return_value.communicate.return_value = ( example_text.encode("utf-8"), b"") _, output = service_monitor._exec_service_action(service_name, action) self.assertThat(output, Equals(example_text)) def test__service_action_calls__exec_service_action(self): service = self.make_service_driver(SERVICE_STATE.ON) service_monitor = ServiceMonitor() mock_exec_service_action = self.patch( service_monitor, "_exec_service_action") mock_exec_service_action.return_value = (0, "") action = factory.make_name("action") service_monitor._service_action(service, action) self.assertThat( mock_exec_service_action, MockCalledOnceWith(service.service_name, action)) def test__service_action_raises_ServiceActionError_if_action_fails(self): service = self.make_service_driver(SERVICE_STATE.ON) service_monitor = ServiceMonitor() mock_exec_service_action = self.patch( service_monitor, "_exec_service_action") mock_exec_service_action.return_value = (1, "") action = factory.make_name("action") with ExpectedException(ServiceActionError): service_monitor._service_action(service, action) def test__service_action_logs_error_if_action_fails(self): service = self.make_service_driver(SERVICE_STATE.ON) service_monitor = ServiceMonitor() mock_exec_service_action = self.patch( service_monitor, "_exec_service_action") error_output = factory.make_name("error") mock_exec_service_action.return_value = (1, error_output) action = factory.make_name("action") with FakeLogger( "maas.service_monitor", level=logging.ERROR) as maaslog: with ExpectedException(ServiceActionError): service_monitor._service_action(service, action) self.assertDocTestMatches( "Service '%s' failed to %s: %s" % ( service.service_name, action, error_output), maaslog.output) def test__get_service_status_uses__get_systemd_service_status(self): service = self.make_service_driver(SERVICE_STATE.ON) service_monitor = ServiceMonitor() service_monitor.init_system = "systemd" mock_get_systemd_service_status = self.patch( service_monitor, "_get_systemd_service_status") service_monitor._get_service_status(service) self.assertThat( mock_get_systemd_service_status, MockCalledOnceWith(service.service_name)) def test__get_service_status_uses__get_upstart_service_status(self): service = self.make_service_driver(SERVICE_STATE.ON) service_monitor = ServiceMonitor() service_monitor.init_system = "upstart" mock_get_upstart_service_status = self.patch( service_monitor, "_get_upstart_service_status") service_monitor._get_service_status(service) self.assertThat( mock_get_upstart_service_status, MockCalledOnceWith(service.service_name)) def test__get_systemd_service_status_calls__exec_service_action(self): service_monitor = ServiceMonitor() service_name = factory.make_name("service") mock_exec_service_action = self.patch( service_monitor, "_exec_service_action") mock_exec_service_action.side_effect = factory.make_exception() try: service_monitor._get_systemd_service_status(service_name) except: pass self.assertThat( mock_exec_service_action, MockCalledOnceWith(service_name, "status")) def test__get_systemd_service_status_raises_UnknownServiceError(self): systemd_status_output = dedent("""\ missing.service Loaded: not-found (Reason: No such file or directory) Active: inactive (dead) """) service_monitor = ServiceMonitor() mock_exec_service_action = self.patch( service_monitor, "_exec_service_action") mock_exec_service_action.return_value = (3, systemd_status_output) with ExpectedException(UnknownServiceError): service_monitor._get_systemd_service_status("missing") def test__get_systemd_service_status_returns_off_and_dead(self): systemd_status_output = dedent("""\ tgt.service - LSB: iscsi target daemon Loaded: loaded (/etc/init.d/tgt) Active: inactive (dead) Docs: man:systemd-sysv-generator(8) """) service_monitor = ServiceMonitor() mock_exec_service_action = self.patch( service_monitor, "_exec_service_action") mock_exec_service_action.return_value = (3, systemd_status_output) active_state, process_state = ( service_monitor._get_systemd_service_status("tgt")) self.assertEquals(SERVICE_STATE.OFF, active_state) self.assertEquals("dead", process_state) def test__get_systemd_service_status_returns_on_and_running(self): systemd_status_output = dedent("""\ tgt.service - LSB: iscsi target daemon Loaded: loaded (/etc/init.d/tgt) Active: active (running) since Fri 2015-05-15 15:08:26 UTC; Docs: man:systemd-sysv-generator(8) """) service_monitor = ServiceMonitor() mock_exec_service_action = self.patch( service_monitor, "_exec_service_action") mock_exec_service_action.return_value = (0, systemd_status_output) active_state, process_state = ( service_monitor._get_systemd_service_status("tgt")) self.assertEquals(SERVICE_STATE.ON, active_state) self.assertEquals("running", process_state) def test__get_systemd_service_status_ignores_sudo_output(self): systemd_status_output = dedent("""\ sudo: unable to resolve host sub-etha-sens-o-matic tgt.service - LSB: iscsi target daemon Loaded: loaded (/etc/init.d/tgt) Active: active (running) since Fri 2015-05-15 15:08:26 UTC; Docs: man:systemd-sysv-generator(8) """) service_monitor = ServiceMonitor() mock_exec_service_action = self.patch( service_monitor, "_exec_service_action") mock_exec_service_action.return_value = (0, systemd_status_output) active_state, process_state = ( service_monitor._get_systemd_service_status("tgt")) self.assertEquals(SERVICE_STATE.ON, active_state) self.assertEquals("running", process_state) def test__get_systemd_service_status_raise_error_for_invalid_active(self): systemd_status_output = dedent("""\ tgt.service - LSB: iscsi target daemon Loaded: loaded (/etc/init.d/tgt) Active: unknown (running) since Fri 2015-05-15 15:08:26 UTC; Docs: man:systemd-sysv-generator(8) """) service_monitor = ServiceMonitor() mock_exec_service_action = self.patch( service_monitor, "_exec_service_action") mock_exec_service_action.return_value = (0, systemd_status_output) with ExpectedException(ServiceParsingError): service_monitor._get_systemd_service_status("tgt") def test__get_systemd_service_status_raise_error_for_invalid_output(self): service_monitor = ServiceMonitor() mock_exec_service_action = self.patch( service_monitor, "_exec_service_action") mock_exec_service_action.return_value = ( 3, factory.make_name("invalid")) with ExpectedException(ServiceParsingError): service_monitor._get_systemd_service_status("tgt") def test__get_upstart_service_status_raises_UnknownServiceError(self): service_monitor = ServiceMonitor() mock_exec_service_action = self.patch( service_monitor, "_exec_service_action") mock_exec_service_action.return_value = ( 1, "missing: unrecognized service") with ExpectedException(UnknownServiceError): service_monitor._get_upstart_service_status("missing") def test__get_upstart_service_status_returns_off_and_waiting(self): service_monitor = ServiceMonitor() mock_exec_service_action = self.patch( service_monitor, "_exec_service_action") mock_exec_service_action.return_value = ( 0, "tgt stop/waiting") active_state, process_state = ( service_monitor._get_upstart_service_status("tgt")) self.assertEquals(SERVICE_STATE.OFF, active_state) self.assertEquals("waiting", process_state) def test__get_upstart_service_status_returns_on_and_running(self): service_monitor = ServiceMonitor() mock_exec_service_action = self.patch( service_monitor, "_exec_service_action") mock_exec_service_action.return_value = ( 0, "tgt start/running, process 23239") active_state, process_state = ( service_monitor._get_upstart_service_status("tgt")) self.assertEquals(SERVICE_STATE.ON, active_state) self.assertEquals("running", process_state) def test__get_upstart_service_status_parsing_ignores_sudo_output(self): service_monitor = ServiceMonitor() mock_exec_service_action = self.patch( service_monitor, "_exec_service_action") mock_exec_service_action.return_value = (0, dedent("""\ sudo: unable to resolve host infinite-improbability tgt start/running, process 23239""")) active_state, process_state = ( service_monitor._get_upstart_service_status("tgt")) self.assertEquals(SERVICE_STATE.ON, active_state) self.assertEquals("running", process_state) def test__get_upstart_service_status_raise_error_for_invalid_active(self): service_monitor = ServiceMonitor() mock_exec_service_action = self.patch( service_monitor, "_exec_service_action") mock_exec_service_action.return_value = ( 0, "tgt unknown/running, process 23239") with ExpectedException(ServiceParsingError): service_monitor._get_upstart_service_status("tgt") def test__get_upstart_service_status_raise_error_for_invalid_output(self): service_monitor = ServiceMonitor() mock_exec_service_action = self.patch( service_monitor, "_exec_service_action") mock_exec_service_action.return_value = ( 0, factory.make_name("invalid")) with ExpectedException(ServiceParsingError): service_monitor._get_upstart_service_status("tgt") def test__get_expected_process_state_returns_upstart_running_for_on(self): service_monitor = ServiceMonitor() service_monitor.init_system = "upstart" self.assertEquals( "running", service_monitor._get_expected_process_state(SERVICE_STATE.ON)) def test__get_expected_process_state_returns_upstart_waiting_for_off(self): service_monitor = ServiceMonitor() service_monitor.init_system = "upstart" self.assertEquals( "waiting", service_monitor._get_expected_process_state(SERVICE_STATE.OFF)) def test__get_expected_process_state_returns_systemd_running_for_on(self): service_monitor = ServiceMonitor() service_monitor.init_system = "systemd" self.assertEquals( "running", service_monitor._get_expected_process_state(SERVICE_STATE.ON)) def test__get_expected_process_state_returns_systemd_dead_for_off(self): service_monitor = ServiceMonitor() service_monitor.init_system = "systemd" self.assertEquals( "dead", service_monitor._get_expected_process_state(SERVICE_STATE.OFF)) def test__ensure_service_logs_warning_in_mismatch_process_state(self): service = self.make_service_driver(SERVICE_STATE.ON) service_monitor = ServiceMonitor() expected_state = service.get_expected_state() invalid_process_state = factory.make_name("invalid_state") mock_get_service_status = self.patch( service_monitor, "_get_service_status") mock_get_service_status.return_value = ( expected_state, invalid_process_state) with FakeLogger( "maas.service_monitor", level=logging.WARNING) as maaslog: service_monitor._ensure_service(service) self.assertDocTestMatches( "Service '%s' is %s but not in the expected state of " "'%s', its current state is '%s'." % ( service.service_name, expected_state, service_monitor._get_expected_process_state(expected_state), invalid_process_state), maaslog.output) def test__ensure_service_logs_debug_in_expected_states(self): service = self.make_service_driver() service_monitor = ServiceMonitor() expected_state = service.get_expected_state() expected_process_state = service_monitor._get_expected_process_state( expected_state) mock_get_service_status = self.patch( service_monitor, "_get_service_status") mock_get_service_status.return_value = ( expected_state, expected_process_state) with FakeLogger( "maas.service_monitor", level=logging.DEBUG) as maaslog: service_monitor._ensure_service(service) self.assertDocTestMatches( "Service '%s' is %s and '%s'." % ( service.service_name, expected_state, expected_process_state), maaslog.output) def test__ensure_service_performs_start_for_off_service(self): service = self.make_service_driver(SERVICE_STATE.ON) service_monitor = ServiceMonitor() mock_get_service_status = self.patch( service_monitor, "_get_service_status") mock_get_service_status.side_effect = [ (SERVICE_STATE.OFF, "waiting"), (SERVICE_STATE.ON, "running"), ] mock_service_action = self.patch(service_monitor, "_service_action") with FakeLogger( "maas.service_monitor", level=logging.INFO) as maaslog: service_monitor._ensure_service(service) self.assertThat( mock_service_action, MockCalledOnceWith(service, "start")) self.assertDocTestMatches( """\ Service '%s' is not on, it will be started. Service '%s' has been started and is 'running'. """ % (service.service_name, service.service_name), maaslog.output) def test__ensure_service_performs_stop_for_on_service(self): service = self.make_service_driver(SERVICE_STATE.OFF) service_monitor = ServiceMonitor() mock_get_service_status = self.patch( service_monitor, "_get_service_status") mock_get_service_status.side_effect = [ (SERVICE_STATE.ON, "running"), (SERVICE_STATE.OFF, "waiting"), ] mock_service_action = self.patch(service_monitor, "_service_action") with FakeLogger( "maas.service_monitor", level=logging.INFO) as maaslog: service_monitor._ensure_service(service) self.assertThat( mock_service_action, MockCalledOnceWith(service, "stop")) self.assertDocTestMatches( """\ Service '%s' is not off, it will be stopped. Service '%s' has been stopped and is 'waiting'. """ % (service.service_name, service.service_name), maaslog.output) def test__ensure_service_performs_raises_ServiceActionError(self): service = self.make_service_driver(SERVICE_STATE.ON) service_monitor = ServiceMonitor() mock_get_service_status = self.patch( service_monitor, "_get_service_status") mock_get_service_status.side_effect = [ (SERVICE_STATE.OFF, "waiting"), (SERVICE_STATE.OFF, "waiting"), ] self.patch(service_monitor, "_service_action") with ExpectedException(ServiceActionError): with FakeLogger( "maas.service_monitor", level=logging.INFO) as maaslog: service_monitor._ensure_service(service) lint_sucks = ( service.service_name, service.service_name, SERVICE_STATE.OFF, "waiting", ) self.assertDocTestMatches("""\ Service '%s' is not on, it will be started. Service '%s' failed to start. Its current state is '%s' and '%s'. """ % lint_sucks, maaslog.output) maas-1.9.5+bzr4599.orig/src/provisioningserver/tests/test_tags.py0000644000000000000000000006114013056115004023132 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for tag updating.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import doctest import httplib from itertools import chain import json from textwrap import dedent import urllib2 from apiclient.maas_client import MAASClient import bson from fixtures import FakeLogger from lxml import etree from maastesting.factory import factory from maastesting.fakemethod import ( FakeMethod, MultiFakeMethod, ) from maastesting.matchers import IsCallable from mock import ( call, MagicMock, sentinel, ) from provisioningserver import tags from provisioningserver.testing.config import ClusterConfigurationFixture from provisioningserver.testing.testcase import PservTestCase from testtools.matchers import ( DocTestMatches, Equals, MatchesStructure, ) class TestProcessResponse(PservTestCase): def setUp(self): super(TestProcessResponse, self).setUp() self.useFixture(FakeLogger()) def test_process_OK_response_with_JSON_content(self): data = {"abc": 123} response = factory.make_response( httplib.OK, json.dumps(data), "application/json") self.assertEqual(data, tags.process_response(response)) def test_process_OK_response_with_BSON_content(self): data = {"abc": 123} response = factory.make_response( httplib.OK, bson.BSON.encode(data), "application/bson") self.assertEqual(data, tags.process_response(response)) def test_process_OK_response_with_other_content(self): data = factory.make_bytes() response = factory.make_response( httplib.OK, data, "application/octet-stream") self.assertEqual(data, tags.process_response(response)) def test_process_not_OK_response(self): response = factory.make_response( httplib.NOT_FOUND, b"", "application/json" ) response.url = factory.make_string() error = self.assertRaises( urllib2.HTTPError, tags.process_response, response) self.assertThat( error, MatchesStructure.byEquality( url=response.url, code=response.code, msg="Not Found, expected 200 OK", headers=response.headers, fp=response.fp)) class EqualsXML(Equals): @staticmethod def normalise(xml): if isinstance(xml, (bytes, unicode)): xml = etree.fromstring(dedent(xml)) return etree.tostring(xml, pretty_print=True) def __init__(self, tree): super(EqualsXML, self).__init__(self.normalise(tree)) def match(self, other): return super(EqualsXML, self).match(self.normalise(other)) class TestMergeDetailsCleanly(PservTestCase): do_merge_details = staticmethod(tags.merge_details_cleanly) def setUp(self): super(TestMergeDetailsCleanly, self).setUp() self.logger = self.useFixture(FakeLogger("maas")) def test_merge_with_no_details(self): xml = self.do_merge_details({}) self.assertThat(xml, EqualsXML("")) def test_merge_with_only_lshw_details(self): xml = self.do_merge_details( {"lshw": b"Hello"}) expected = """\ Hello """ self.assertThat(xml, EqualsXML(expected)) def test_merge_with_only_lldp_details(self): xml = self.do_merge_details( {"lldp": b"Hello"}) expected = """\ Hello """ self.assertThat(xml, EqualsXML(expected)) def test_merge_with_multiple_details(self): xml = self.do_merge_details({ "lshw": b"Hello", "lldp": b"Hello", "zoom": b"zoom", }) expected = """\ Hello Hello zoom """ self.assertThat(xml, EqualsXML(expected)) def test_merges_into_new_tree(self): xml = self.do_merge_details({ "lshw": b"Hello", "lldp": b"Hello", }) # The presence of a getroot() method indicates that this is a # tree object, not an element. self.assertThat(xml, MatchesStructure(getroot=IsCallable())) # The list tag can be obtained using an XPath expression # starting from the root of the tree. self.assertSequenceEqual( ["list"], [elem.tag for elem in xml.xpath("/list")]) def assertDocTestMatches(self, expected, observed): return self.assertThat(observed, DocTestMatches( dedent(expected), doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)) def test_merge_with_invalid_lshw_details(self): # The lshw details cannot be parsed, but merge_details_cleanly() still # returns a usable tree, albeit without any lshw details. xml = self.do_merge_details({"lshw": b"well"}) self.assertThat(xml, EqualsXML('')) # The error is logged however. self.assertDocTestMatches( """\ Invalid lshw details: ... """, self.logger.output) def test_merge_with_invalid_lshw_details_and_others_valid(self): # The lshw details cannot be parsed, but merge_details_cleanly() still # returns a usable tree, albeit without any lshw details. xml = self.do_merge_details({ "lshw": b"well", "lldp": b"Hello", "zoom": b"zoom", }) expected = """\ Hello zoom """ self.assertThat(xml, EqualsXML(expected)) # The error is logged however. self.assertDocTestMatches( """\ Invalid lshw details: ... """, self.logger.output) def test_merge_with_invalid_other_details(self): xml = self.do_merge_details({ "lshw": b"Hello", "foom": b"well", "zoom": b"zoom", "oops": None, }) expected = """\ Hello zoom """ self.assertThat(xml, EqualsXML(expected)) # The error is logged however. self.assertDocTestMatches( """\ Invalid foom details: ... """, self.logger.output) def test_merge_with_all_invalid_details(self): xml = self.do_merge_details({ "lshw": b"", "foom": b"well", "zoom": b"<>" + factory.make_bytes(), "oops": None, }) expected = """\ """ self.assertThat(xml, EqualsXML(expected)) # The error is logged however. self.assertDocTestMatches( """\ Invalid foom details: ... Invalid lshw details: ... Invalid zoom details: ... """, self.logger.output) class TestMergeDetails(TestMergeDetailsCleanly): # merge_details() differs from merge_details_cleanly() in a few # small ways, hence why this test case subclasses that for # merge_details_cleanly(), overriding tests where they produce # different results. do_merge_details = staticmethod(tags.merge_details) def test_merge_with_only_lshw_details(self): # merge_details() differs from merge_details_cleanly() in that # the lshw details are in the result twice: once as a # namespaced child of the root element, but they're also there # *as* the root element, without namespace. xml = self.do_merge_details({"lshw": b"Hello"}) expected = """\ Hello Hello """ self.assertThat(xml, EqualsXML(expected)) def test_merge_with_multiple_details(self): # merge_details() differs from merge_details_cleanly() in that # the lshw details are in the result twice: once as a # namespaced child of the root element, but they're also there # *as* the root element, without namespace. xml = self.do_merge_details({ "lshw": b"Hello", "lldp": b"Hello", "zoom": b"zoom", }) expected = """\ Hello Hello Hello zoom """ self.assertThat(xml, EqualsXML(expected)) def test_merge_with_invalid_other_details(self): # merge_details() differs from merge_details_cleanly() in that # the lshw details are in the result twice: once as a # namespaced child of the root element, but they're also there # *as* the root element, without namespace. xml = self.do_merge_details({ "lshw": b"Hello", "foom": b"well", "zoom": b"zoom", "oops": None, }) expected = """\ Hello Hello zoom """ self.assertThat(xml, EqualsXML(expected)) # The error is logged however. self.assertDocTestMatches( """\ Invalid foom details: ... """, self.logger.output) def test_merge_with_all_invalid_details(self): # merge_details() differs from merge_details_cleanly() in that # it first attempts to use the lshw details as the root # # element. If they're invalid the log message is therefore # printed first. xml = self.do_merge_details({ "lshw": b"", "foom": b"well", "zoom": b"<>" + factory.make_bytes(), "oops": None, }) expected = """\ """ self.assertThat(xml, EqualsXML(expected)) # The error is logged however. self.assertDocTestMatches( """\ Invalid lshw details: ... Invalid foom details: ... Invalid zoom details: ... """, self.logger.output) class TestGenBatchSlices(PservTestCase): def test_batch_of_1_no_things(self): self.assertSequenceEqual( [], list(tags.gen_batch_slices(0, 1))) def test_batch_of_1_one_thing(self): self.assertSequenceEqual( [slice(0, None, 1)], list(tags.gen_batch_slices(1, 1))) def test_batch_of_1_more_things(self): self.assertSequenceEqual( [slice(0, None, 3), slice(1, None, 3), slice(2, None, 3)], list(tags.gen_batch_slices(3, 1))) def test_no_things(self): self.assertSequenceEqual( [], list(tags.gen_batch_slices(0, 4))) def test_one_thing(self): self.assertSequenceEqual( [slice(0, None, 1)], list(tags.gen_batch_slices(1, 4))) def test_more_things(self): self.assertSequenceEqual( [slice(0, None, 3), slice(1, None, 3), slice(2, None, 3)], list(tags.gen_batch_slices(10, 4))) def test_batches_by_brute_force(self): expected = range(99) for size in xrange(1, len(expected) // 2): slices = tags.gen_batch_slices(len(expected), size) batches = list(expected[sl] for sl in slices) # Every element in the original list is present in the # reconsolidated list. observed = sorted(chain.from_iterable(batches)) self.assertSequenceEqual(expected, observed) # The longest batch is never more than 1 element longer # than the shortest batch. lens = [len(batch) for batch in batches] self.assertIn(max(lens) - min(lens), (0, 1)) class TestGenBatches(PservTestCase): def test_batch_of_1_no_things(self): self.assertSequenceEqual( [], list(tags.gen_batches([], 1))) def test_batch_of_1_one_thing(self): self.assertSequenceEqual( [[1]], list(tags.gen_batches([1], 1))) def test_batch_of_1_more_things(self): self.assertSequenceEqual( [[1], [2], [3]], list(tags.gen_batches([1, 2, 3], 1))) def test_no_things(self): self.assertSequenceEqual( [], list(tags.gen_batches([], 4))) def test_one_thing(self): self.assertSequenceEqual( [[1]], list(tags.gen_batches([1], 4))) def test_more_things(self): self.assertSequenceEqual( [[0, 3, 6, 9], [1, 4, 7], [2, 5, 8]], list(tags.gen_batches(range(10), 4))) def test_brute(self): expected = range(99) for size in xrange(1, len(expected) // 2): batches = list(tags.gen_batches(expected, size)) # Every element in the original list is present in the # reconsolidated list. observed = sorted(chain.from_iterable(batches)) self.assertSequenceEqual(expected, observed) # The longest batch is never more than 1 element longer # than the shortest batch. lens = [len(batch) for batch in batches] self.assertIn(max(lens) - min(lens), (0, 1)) class TestGenNodeDetails(PservTestCase): def fake_merge_details(self): """Modify `merge_details` to return a simple textual token. Specifically, it will return `merged:n1+n2+...`, where `n1`, `n2` and `...` are the names of the details passed into `merge_details`. This means we can test code that uses `merge_details` without having to come up with example XML and match on it later. """ self.patch( tags, "merge_details", lambda mapping: "merged:" + "+".join(mapping)) def test__generates_node_details(self): batches = [["s1", "s2"], ["s3"]] responses = [ {"s1": {"foo": "s1"}, "s2": {"bar": "s2"}}, {"s3": {"cob": "s3"}}, ] get_details_for_nodes = self.patch(tags, "get_details_for_nodes") get_details_for_nodes.side_effect = lambda *args: responses.pop(0) self.fake_merge_details() node_details = tags.gen_node_details( sentinel.client, sentinel.uuid, batches) self.assertItemsEqual( [('s1', 'merged:foo'), ('s2', 'merged:bar'), ('s3', 'merged:cob')], node_details) self.assertSequenceEqual( [call(sentinel.client, sentinel.uuid, batch) for batch in batches], get_details_for_nodes.mock_calls) class TestTagUpdating(PservTestCase): def setUp(self): super(TestTagUpdating, self).setUp() self.useFixture(FakeLogger()) def fake_client(self): return MAASClient(None, None, factory.make_simple_http_url()) def fake_cached_knowledge(self): nodegroup_uuid = factory.make_name('nodegroupuuid') return self.fake_client(), nodegroup_uuid def test_get_nodes_calls_correct_api_and_parses_result(self): client, uuid = self.fake_cached_knowledge() response = factory.make_response( httplib.OK, b'["system-id1", "system-id2"]', 'application/json', ) mock = MagicMock(return_value=response) self.patch(client, 'get', mock) result = tags.get_nodes_for_node_group(client, uuid) self.assertEqual(['system-id1', 'system-id2'], result) url = '/api/1.0/nodegroups/%s/' % (uuid,) mock.assert_called_once_with(url, op='list_nodes') def test_get_details_calls_correct_api_and_parses_result(self): client, uuid = self.fake_cached_knowledge() data = { "system-1": { "lshw": bson.binary.Binary(b""), "lldp": bson.binary.Binary(b""), }, "system-2": { "lshw": bson.binary.Binary(b""), "lldp": bson.binary.Binary(b""), }, } content = bson.BSON.encode(data) response = factory.make_response( httplib.OK, content, 'application/bson' ) post = self.patch(client, 'post') post.return_value = response result = tags.get_details_for_nodes( client, uuid, ['system-1', 'system-2']) self.assertEqual(data, result) url = '/api/1.0/nodegroups/%s/' % (uuid,) post.assert_called_once_with( url, op='details', system_ids=["system-1", "system-2"]) def test_post_updated_nodes_calls_correct_api_and_parses_result(self): client, uuid = self.fake_cached_knowledge() content = b'{"added": 1, "removed": 2}' response = factory.make_response( httplib.OK, content, 'application/json' ) post_mock = MagicMock(return_value=response) self.patch(client, 'post', post_mock) name = factory.make_name('tag') tag_definition = factory.make_name('//') result = tags.post_updated_nodes( client, name, tag_definition, uuid, ['add-system-id'], ['remove-1', 'remove-2']) self.assertEqual({'added': 1, 'removed': 2}, result) url = '/api/1.0/tags/%s/' % (name,) post_mock.assert_called_once_with( url, op='update_nodes', as_json=True, nodegroup=uuid, definition=tag_definition, add=['add-system-id'], remove=['remove-1', 'remove-2']) def test_post_updated_nodes_handles_conflict(self): # If a worker started processing a node late, it might try to post # an updated list with an out-of-date definition. It gets a CONFLICT in # that case, which should be handled. client, uuid = self.fake_cached_knowledge() name = factory.make_name('tag') right_tag_defintion = factory.make_name('//') wrong_tag_definition = factory.make_name('//') content = ("Definition supplied '%s' doesn't match" " current definition '%s'" % (wrong_tag_definition, right_tag_defintion)) err = urllib2.HTTPError('url', httplib.CONFLICT, content, {}, None) post_mock = MagicMock(side_effect=err) self.patch(client, 'post', post_mock) result = tags.post_updated_nodes( client, name, wrong_tag_definition, uuid, ['add-system-id'], ['remove-1', 'remove-2']) # self.assertEqual({'added': 1, 'removed': 2}, result) url = '/api/1.0/tags/%s/' % (name,) self.assertEqual({}, result) post_mock.assert_called_once_with( url, op='update_nodes', as_json=True, nodegroup=uuid, definition=wrong_tag_definition, add=['add-system-id'], remove=['remove-1', 'remove-2']) def test_classify_evaluates_xpath(self): # Yay, something that doesn't need patching... xpath = etree.XPath('//node') xml = etree.fromstring node_details = [ ('a', xml('')), ('b', xml('')), ('c', xml('')), ] self.assertEqual( (['a', 'c'], ['b']), tags.classify(xpath, node_details)) def test_process_node_tags_integration(self): self.useFixture(ClusterConfigurationFixture( maas_url=factory.make_simple_http_url())) get_nodes = FakeMethod( result=factory.make_response( httplib.OK, b'["system-id1", "system-id2"]', 'application/json', )) post_hw_details = FakeMethod( result=factory.make_response( httplib.OK, bson.BSON.encode({ 'system-id1': {'lshw': b''}, 'system-id2': {'lshw': b''}, }), 'application/bson', )) get_fake = MultiFakeMethod([get_nodes]) post_update_fake = FakeMethod( result=factory.make_response( httplib.OK, b'{"added": 1, "removed": 1}', 'application/json', )) post_fake = MultiFakeMethod([post_hw_details, post_update_fake]) self.patch(MAASClient, 'get', get_fake) self.patch(MAASClient, 'post', post_fake) tag_name = factory.make_name('tag') nodegroup_uuid = factory.make_name("nodegroup-uuid") tag_definition = '//lshw:node' tag_nsmap = {"lshw": "lshw"} tags.process_node_tags( tag_name, tag_definition, tag_nsmap, self.fake_client(), nodegroup_uuid) nodegroup_url = '/api/1.0/nodegroups/%s/' % (nodegroup_uuid,) tag_url = '/api/1.0/tags/%s/' % (tag_name,) self.assertEqual( [((nodegroup_url,), {'op': 'list_nodes'})], get_nodes.calls) self.assertEqual( [ ((nodegroup_url,), { 'op': 'details', 'system_ids': ['system-id1', 'system-id2'], }), ], post_hw_details.calls) self.assertEqual( [ ((tag_url,), { 'as_json': True, 'op': 'update_nodes', 'nodegroup': nodegroup_uuid, 'definition': tag_definition, 'add': ['system-id1'], 'remove': ['system-id2'], }), ], post_update_fake.calls) def test_process_node_tags_requests_details_in_batches(self): client = object() uuid = factory.make_name('nodegroupuuid') self.patch( tags, 'get_nodes_for_node_group', MagicMock(return_value=['a', 'b', 'c'])) fake_first = FakeMethod(result={ 'a': {'lshw': b''}, 'c': {'lshw': b''}, }) fake_second = FakeMethod(result={ 'b': {'lshw': b''}, }) self.patch( tags, 'get_details_for_nodes', MultiFakeMethod([fake_first, fake_second])) self.patch(tags, 'post_updated_nodes') tag_name = factory.make_name('tag') tag_definition = '//node' tags.process_node_tags( tag_name, tag_definition, tag_nsmap=None, client=client, nodegroup_uuid=uuid, batch_size=2) tags.get_nodes_for_node_group.assert_called_once_with(client, uuid) self.assertEqual([((client, uuid, ['a', 'c']), {})], fake_first.calls) self.assertEqual([((client, uuid, ['b']), {})], fake_second.calls) tags.post_updated_nodes.assert_called_once_with( client, tag_name, tag_definition, uuid, ['a', 'c'], ['b']) maas-1.9.5+bzr4599.orig/src/provisioningserver/tests/test_upgrade_cluster.py0000644000000000000000000003456313056115004025375 0ustar 00000000000000# Copyright 2014-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Tests for the `upgrade-cluster` command.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] from argparse import ArgumentParser from itertools import product import os import os.path from maastesting.factory import factory from maastesting.matchers import ( MockCalledOnceWith, MockNotCalled, ) from maastesting.testcase import MAASTestCase from maastesting.utils import sample_binary_data from mock import Mock from provisioningserver import upgrade_cluster from provisioningserver.boot.tftppath import list_subdirs from provisioningserver.testing.config import ClusterConfigurationFixture from provisioningserver.utils.fs import read_text_file from testtools.matchers import ( DirExists, FileContains, FileExists, Not, ) class TestUpgradeCluster(MAASTestCase): """Tests for the `upgrade-cluster` command itself.""" def run_command(self): parser = ArgumentParser() upgrade_cluster.add_arguments(parser) upgrade_cluster.run(parser.parse_args(())) def patch_upgrade_hooks(self, hooks=None): """Temporarily replace the upgrade hooks.""" if hooks is None: hooks = [] self.patch(upgrade_cluster, 'UPGRADE_HOOKS', hooks) def test_calls_hooks(self): upgrade_hook = Mock() upgrade_hook.__name__ = "upgrade_hook" self.patch_upgrade_hooks([upgrade_hook]) self.run_command() self.assertThat(upgrade_hook, MockCalledOnceWith()) def test_calls_hooks_in_order(self): calls = [] # Define some hooks. They will be run in the order in which they are # listed (not in the order in which they are defined, or alphabetical # order, or any other order). def last_hook(): calls.append('last') def first_hook(): calls.append('first') def middle_hook(): calls.append('middle') self.patch_upgrade_hooks([first_hook, middle_hook, last_hook]) self.run_command() self.assertEqual(['first', 'middle', 'last'], calls) class TestMakeMAASOwnBootResources(MAASTestCase): """Tests for the `make_maas_own_boot_resources` upgrade.""" def configure_storage(self, storage_dir): """Create a storage config.""" self.useFixture(ClusterConfigurationFixture(tftp_root=storage_dir)) def test__calls_chown_if_boot_resources_dir_exists(self): self.patch(upgrade_cluster, 'check_call') storage_dir = self.make_dir() self.configure_storage(storage_dir) upgrade_cluster.make_maas_own_boot_resources() self.assertThat( upgrade_cluster.check_call, MockCalledOnceWith(['chown', '-R', 'maas', storage_dir])) def test__skips_chown_if_boot_resources_dir_does_not_exist(self): self.patch(upgrade_cluster, 'check_call') storage_dir = os.path.join(self.make_dir(), factory.make_name('none')) os.mkdir(storage_dir) self.configure_storage(storage_dir) os.rmdir(storage_dir) upgrade_cluster.make_maas_own_boot_resources() self.assertThat(upgrade_cluster.check_call, MockNotCalled()) class TestCreateGNUPGHome(MAASTestCase): """Tests for `create_gnupg_home`.""" def make_nonexistent_path(self, parent_dir): """Return an as-yet nonexistent path, inside `parent_dir`.""" return os.path.join(parent_dir, factory.make_name('gpghome')) def patch_gnupg_home(self, gpghome): self.patch(upgrade_cluster, 'get_maas_user_gpghome').return_value = ( gpghome) def patch_call(self): return self.patch(upgrade_cluster, 'check_call') def test__succeeds_if_directory_exists(self): existing_home = self.make_dir() self.patch_gnupg_home(existing_home) self.patch_call() upgrade_cluster.create_gnupg_home() self.assertEqual([], os.listdir(existing_home)) def test__creates_directory(self): parent = self.make_dir() new_home = self.make_nonexistent_path(parent) self.patch_gnupg_home(new_home) self.patch_call() upgrade_cluster.create_gnupg_home() self.assertThat(new_home, DirExists()) def test__sets_ownership_to_maas_if_running_as_root(self): parent = self.make_dir() new_home = self.make_nonexistent_path(parent) self.patch_gnupg_home(new_home) call = self.patch_call() self.patch(os, 'geteuid').return_value = 0 upgrade_cluster.create_gnupg_home() self.assertThat( call, MockCalledOnceWith(['chown', 'maas:maas', new_home])) def test__does_not_set_ownership_if_not_running_as_root(self): parent = self.make_dir() new_home = self.make_nonexistent_path(parent) self.patch_gnupg_home(new_home) call = self.patch_call() self.patch(os, 'geteuid').return_value = 101 upgrade_cluster.create_gnupg_home() self.assertThat(call, MockNotCalled()) class TestRetireBootResourcesYAML(MAASTestCase): """Tests for `retire_bootresources_yaml`.""" def set_bootresources_yaml(self, contents): """Write a fake `bootresources.yaml`, and return its path.""" path = self.make_file('bootresources.yaml', contents=contents) self.patch(upgrade_cluster, 'BOOTRESOURCES_FILE', path) return path def test__does_nothing_if_file_not_present(self): path = self.set_bootresources_yaml('') os.remove(path) upgrade_cluster.retire_bootresources_yaml() self.assertThat(path, Not(FileExists())) def test__prefixes_header_to_file_if_present(self): content = factory.make_string() path = self.set_bootresources_yaml(content) upgrade_cluster.retire_bootresources_yaml() self.assertThat( path, FileContains(upgrade_cluster.BOOTRESOURCES_WARNING + content)) def test__is_idempotent(self): path = self.set_bootresources_yaml(factory.make_string()) upgrade_cluster.retire_bootresources_yaml() content_after_upgrade = read_text_file(path) upgrade_cluster.retire_bootresources_yaml() self.assertThat(path, FileContains(content_after_upgrade)) def test__survives_encoding_problems(self): path = os.path.join(self.make_dir(), 'bootresources.yaml') content = b'[[%s]]' % sample_binary_data with open(path, 'wb') as config: config.write(content) self.patch(upgrade_cluster, 'BOOTRESOURCES_FILE', path) upgrade_cluster.retire_bootresources_yaml() self.assertThat( path, FileContains( upgrade_cluster.BOOTRESOURCES_WARNING.encode('ascii') + content)) class TestMigrateArchitecturesIntoUbuntuDirectory(MAASTestCase): """Tests for the `migrate_architectures_into_ubuntu_directory` upgrade.""" def configure_storage(self, storage_dir, make_current_dir=True): """Create a storage config.""" current_dir = os.path.join(storage_dir, "current") os.makedirs(current_dir) self.useFixture(ClusterConfigurationFixture(tftp_root=current_dir)) if not make_current_dir: os.rmdir(current_dir) def test__list_subdirs_under_current_directory(self): self.patch(upgrade_cluster, 'list_subdirs').return_value = ['ubuntu'] storage_dir = self.make_dir() self.configure_storage(storage_dir) upgrade_cluster.migrate_architectures_into_ubuntu_directory() self.assertThat( upgrade_cluster.list_subdirs, MockCalledOnceWith(os.path.join(storage_dir, "current"))) def test__exits_early_if_boot_resources_dir_does_not_exist(self): # Patch list_subdirs, if it gets called then the method did not # exit early. self.patch(upgrade_cluster, 'list_subdirs') storage_dir = os.path.join(self.make_dir(), factory.make_name('none')) self.configure_storage(storage_dir, make_current_dir=False) upgrade_cluster.migrate_architectures_into_ubuntu_directory() self.assertThat(upgrade_cluster.list_subdirs, MockNotCalled()) def test__exits_early_if_current_dir_does_not_exist(self): # Patch list_subdirs, if it gets called then the method did not # exit early. self.patch(upgrade_cluster, 'list_subdirs') storage_dir = self.make_dir() self.configure_storage(storage_dir, make_current_dir=False) upgrade_cluster.migrate_architectures_into_ubuntu_directory() self.assertThat(upgrade_cluster.list_subdirs, MockNotCalled()) def test__exits_early_if_ubuntu_dir_exist(self): # Patch drill_down, if it gets called then the method did not # exit early. self.patch(upgrade_cluster, 'drill_down') storage_dir = self.make_dir() self.configure_storage(storage_dir) os.mkdir(os.path.join(storage_dir, 'current', 'ubuntu')) upgrade_cluster.migrate_architectures_into_ubuntu_directory() self.assertThat(upgrade_cluster.drill_down, MockNotCalled()) def test__doesnt_create_ubuntu_dir_when_no_valid_directories(self): storage_dir = self.make_dir() self.configure_storage(storage_dir) upgrade_cluster.migrate_architectures_into_ubuntu_directory() self.assertFalse( os.path.exists(os.path.join(storage_dir, 'current', 'ubuntu'))) def test__moves_paths_with_correct_levels_into_ubuntu_dir(self): storage_dir = self.make_dir() self.configure_storage(storage_dir) arches = [factory.make_name('arch') for _ in range(3)] subarches = [factory.make_name('subarch') for _ in range(3)] releases = [factory.make_name('release') for _ in range(3)] labels = [factory.make_name('label') for _ in range(3)] for arch, subarch, release, label in product( arches, subarches, releases, labels): os.makedirs( os.path.join( storage_dir, 'current', arch, subarch, release, label)) self.patch(upgrade_cluster, 'update_targets_conf') upgrade_cluster.migrate_architectures_into_ubuntu_directory() self.assertItemsEqual( arches, list_subdirs(os.path.join(storage_dir, 'current', 'ubuntu'))) def test__doesnt_move_paths_with_fewer_levels_into_ubuntu_dir(self): storage_dir = self.make_dir() self.configure_storage(storage_dir) arches = [factory.make_name('arch') for _ in range(3)] subarches = [factory.make_name('subarch') for _ in range(3)] releases = [factory.make_name('release') for _ in range(3)] # Labels directory is missing, causing none of the folders to move for arch, subarch, release in product( arches, subarches, releases): os.makedirs( os.path.join(storage_dir, 'current', arch, subarch, release)) move_arch = factory.make_name('arch') os.makedirs( os.path.join( storage_dir, 'current', move_arch, factory.make_name('subarch'), factory.make_name('release'), factory.make_name('label'))) self.patch(upgrade_cluster, 'update_targets_conf') upgrade_cluster.migrate_architectures_into_ubuntu_directory() self.assertItemsEqual( [move_arch], list_subdirs(os.path.join(storage_dir, 'current', 'ubuntu'))) def test__doesnt_move_paths_with_more_levels_into_ubuntu_dir(self): storage_dir = self.make_dir() self.configure_storage(storage_dir) # Extra directory level, this is what it looks like after upgrade. osystems = [factory.make_name('arch') for _ in range(3)] arches = [factory.make_name('arch') for _ in range(3)] subarches = [factory.make_name('subarch') for _ in range(3)] releases = [factory.make_name('release') for _ in range(3)] labels = [factory.make_name('label') for _ in range(3)] for osystem, arch, subarch, release, label in product( osystems, arches, subarches, releases, labels): os.makedirs( os.path.join( storage_dir, 'current', osystem, arch, subarch, release, label)) move_arch = factory.make_name('arch') os.makedirs( os.path.join( storage_dir, 'current', move_arch, factory.make_name('subarch'), factory.make_name('release'), factory.make_name('label'))) self.patch(upgrade_cluster, 'update_targets_conf') upgrade_cluster.migrate_architectures_into_ubuntu_directory() self.assertItemsEqual( [move_arch], list_subdirs(os.path.join(storage_dir, 'current', 'ubuntu'))) def setup_working_migration_scenario(self): storage_dir = self.make_dir() self.configure_storage(storage_dir) arches = [factory.make_name('arch') for _ in range(3)] subarches = [factory.make_name('subarch') for _ in range(3)] releases = [factory.make_name('release') for _ in range(3)] labels = [factory.make_name('label') for _ in range(3)] for arch, subarch, release, label in product( arches, subarches, releases, labels): os.makedirs( os.path.join( storage_dir, 'current', arch, subarch, release, label)) return storage_dir def test__calls_write_targets_conf_with_current_dir(self): storage_dir = self.setup_working_migration_scenario() mock_write = self.patch(upgrade_cluster, 'write_targets_conf') self.patch(upgrade_cluster, 'update_targets_conf') upgrade_cluster.migrate_architectures_into_ubuntu_directory() self.assertThat( mock_write, MockCalledOnceWith(os.path.join(storage_dir, 'current'))) def test__calls_update_targets_conf_with_current_dir(self): storage_dir = self.setup_working_migration_scenario() mock_update = self.patch(upgrade_cluster, 'update_targets_conf') upgrade_cluster.migrate_architectures_into_ubuntu_directory() self.assertThat( mock_update, MockCalledOnceWith(os.path.join(storage_dir, 'current'))) maas-1.9.5+bzr4599.orig/src/provisioningserver/utils/__init__.py0000644000000000000000000003673413056115004022705 0ustar 00000000000000# Copyright 2012-2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Utilities for the provisioning server.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "create_node", "coerce_to_valid_hostname", "commission_node", "filter_dict", "flatten", "import_settings", "locate_config", "parse_key_value_file", "ShellTemplate", "warn_deprecated", "write_custom_config_section", "in_develop_mode", "sudo", ] from collections import Iterable from itertools import ( chain, imap, ) import os from pipes import quote import re import sys from sys import _getframe as getframe from warnings import warn import bson from provisioningserver.logger.log import get_maas_logger from provisioningserver.rpc import getRegionClient from provisioningserver.rpc.exceptions import ( CommissionNodeFailed, NoConnectionsAvailable, NodeAlreadyExists, ) from provisioningserver.utils.twisted import ( asynchronous, pause, retries, ) import simplejson as json import tempita from twisted.internet import reactor from twisted.internet.defer import ( inlineCallbacks, returnValue, ) from twisted.protocols.amp import ( UnhandledCommand, UnknownRemoteError, ) maaslog = get_maas_logger("utils") def node_exists(macs, url, client): decoders = { "application/json": lambda data: json.loads(data), "application/bson": lambda data: bson.BSON(data).decode(), } params = { 'mac_address': macs } response = client.get(url, op='list', **params) content = response.read() content_type = response.headers.gettype() decode = decoders[content_type] content = decode(content) return len(content) > 0 def coerce_to_valid_hostname(hostname): """Given a server name that may contain spaces and special characters, attempts to derive a valid hostname. :param hostname: the specified (possibly invalid) hostname :return: the resulting string, or None if the hostname could not be coerced """ hostname = hostname.lower() hostname = re.sub(r'[^a-z0-9-]+', '-', hostname) hostname = hostname.strip('-') if hostname == '' or len(hostname) > 64: return None return hostname @asynchronous @inlineCallbacks def create_node(macs, arch, power_type, power_parameters, hostname=None): """Create a Node on the region and return its system_id. :param macs: A list of MAC addresses belonging to the node. :param arch: The node's architecture, in the form 'arch/subarch'. :param power_type: The node's power type as a string. :param power_parameters: The power parameters for the node, as a dict. """ # Avoid circular dependencies. from provisioningserver.rpc.region import CreateNode from provisioningserver.config import ClusterConfiguration with ClusterConfiguration.open() as config: cluster_uuid = config.cluster_uuid if hostname is not None: hostname = coerce_to_valid_hostname(hostname) for elapsed, remaining, wait in retries(15, 5, reactor): try: client = getRegionClient() break except NoConnectionsAvailable: yield pause(wait, reactor) else: maaslog.error( "Can't create node, no RPC connection to region.") return # De-dupe the MAC addresses we pass. We sort here to avoid test # failures. macs = sorted(set(macs)) try: response = yield client( CreateNode, cluster_uuid=cluster_uuid, architecture=arch, power_type=power_type, power_parameters=json.dumps(power_parameters), mac_addresses=macs, hostname=hostname) except NodeAlreadyExists: # The node already exists on the region, so we log the error and # give up. maaslog.error( "A node with one of the mac addresses in %s already exists.", macs) returnValue(None) except UnhandledCommand: # The region hasn't been upgraded to support this method # yet, so give up. maaslog.error( "Unable to create node on region: Region does not " "support the CreateNode RPC method.") returnValue(None) except UnknownRemoteError as e: # This happens, for example, if a ValidationError occurs on the region. # (In particular, we see this if the hostname is a duplicate.) # We should probably create specific exceptions for these, so we can # act on them appropriately. maaslog.error( "Unknown error while creating node %s: %s (see regiond.log)", macs, e.description) returnValue(None) else: returnValue(response['system_id']) @asynchronous @inlineCallbacks def commission_node(system_id, user): """Commission a Node on the region. :param system_id: system_id of node to commission. :param user: user for the node. """ # Avoid circular dependencies. from provisioningserver.rpc.region import CommissionNode for elapsed, remaining, wait in retries(15, 5, reactor): try: client = getRegionClient() break except NoConnectionsAvailable: yield pause(wait, reactor) else: maaslog.error( "Can't commission node, no RPC connection to region.") return try: yield client( CommissionNode, system_id=system_id, user=user) except CommissionNodeFailed as e: # The node cannot be commissioned, give up. maaslog.error( "Could not commission with system_id %s because %s.", system_id, e.args[0]) except UnhandledCommand: # The region hasn't been upgraded to support this method # yet, so give up. maaslog.error( "Unable to commission node on region: Region does not " "support the CommissionNode RPC method.") finally: returnValue(None) def locate_config(*path): """Return the location of a given config file or directory. :param path: Path elements to resolve relative to `${MAAS_ROOT}/etc/maas`. """ # The `os.curdir` avoids a crash when `path` is empty. path = os.path.join(os.curdir, *path) if os.path.isabs(path): return path else: # Avoid circular imports. from provisioningserver.path import get_tentative_path return get_tentative_path("etc", "maas", path) setting_expression = r""" ^([A-Z0-9_]+) # Variable name is all caps, alphanumeric and _. = # Assignment operator. (?:"|\')? # Optional leading single or double quote. (.*) # Value (?:"|\')? # Optional trailing single or double quote. """ def find_settings(whence): """Return settings from `whence`, which is assumed to be a module.""" # XXX 2012-10-11 JeroenVermeulen, bug=1065456: Put this in a shared # location. It's currently duplicated from elsewhere. return { name: value for name, value in vars(whence).items() if not name.startswith("_") } def import_settings(whence): """Import settings from `whence` into the caller's global scope.""" # XXX 2012-10-11 JeroenVermeulen, bug=1065456: Put this in a shared # location. It's currently duplicated from elsewhere. source = find_settings(whence) target = sys._getframe(1).f_globals target.update(source) def filter_dict(dictionary, desired_keys): """Return a version of `dictionary` restricted to `desired_keys`. This is like a set union, except the values from `dictionary` come along. (Actually `desired_keys` can be a `dict`, but its values will be ignored). """ return { key: value for key, value in dictionary.items() if key in desired_keys } def dict_depth(d, depth=0): """Returns the max depth of a dictionary.""" if not isinstance(d, dict) or not d: return depth return max(dict_depth(v, depth + 1) for _, v in d.iteritems()) def split_lines(input, separator): """Split each item from `input` into a key/value pair.""" return (line.split(separator, 1) for line in input if line.strip() != '') def strip_pairs(input): """Strip whitespace of each key/value pair in input.""" return ((key.strip(), value.strip()) for (key, value) in input) def parse_key_value_file(file_name, separator=":"): """Parse a text file into a dict of key/value pairs. Use this for simple key:value or key=value files. There are no sections, as required for python's ConfigParse. Whitespace and empty lines are ignored. :param file_name: Name of file to parse. :param separator: The text that separates each key from its value. """ with open(file_name, 'rb') as input: return dict(strip_pairs(split_lines(input, separator))) # Header and footer comments for MAAS custom config sections, as managed # by write_custom_config_section. maas_custom_config_markers = ( "## Begin MAAS settings. Do not edit; MAAS will overwrite this section.", "## End MAAS settings.", ) def find_list_item(item, in_list, starting_at=0): """Return index of `item` in `in_list`, or None if not found.""" try: return in_list.index(item, starting_at) except ValueError: return None def write_custom_config_section(original_text, custom_section): """Insert or replace a custom section in a configuration file's text. This allows you to rewrite configuration files that are not owned by MAAS, but where MAAS will have one section for its own settings. It doesn't read or write any files; this is a pure text operation. Appends `custom_section` to the end of `original_text` if there was no custom MAAS section yet. Otherwise, replaces the existing custom MAAS section with `custom_section`. Returns the new text. Assumes that the configuration file's format accepts lines starting with hash marks (#) as comments. The custom section will be bracketed by special marker comments that make it clear that MAAS wrote the section and it should not be edited by hand. :param original_text: The config file's current text. :type original_text: unicode :param custom_section: Custom config section to insert. :type custom_section: unicode :return: New config file text. :rtype: unicode """ header, footer = maas_custom_config_markers lines = original_text.splitlines() header_index = find_list_item(header, lines) if header_index is not None: footer_index = find_list_item(footer, lines, header_index) if footer_index is None: # There's a header but no footer. Pretend we didn't see the # header; just append a new custom section at the end. Any # subsequent rewrite will replace the part starting at the # header and ending at the header we will add here. At that # point there will be no trace of the strange situation # left. header_index = None if header_index is None: # There was no MAAS custom section in this file. Append it at # the end. lines += [ header, custom_section, footer, ] else: # There is a MAAS custom section in the file. Replace it. lines = ( lines[:(header_index + 1)] + [custom_section] + lines[footer_index:]) return '\n'.join(lines) + '\n' class Safe: """An object that is safe to render as-is.""" __slots__ = ("value",) def __init__(self, value): self.value = value def __repr__(self): return "<%s %r>" % ( self.__class__.__name__, self.value) def escape_py_literal(string): """Escape and quote a string for use as a python literal.""" return repr(string).decode('ascii') class ShellTemplate(tempita.Template): """A Tempita template specialised for writing shell scripts. By default, substitutions will be escaped using `pipes.quote`, unless they're marked as safe. This can be done using Tempita's filter syntax:: {{foobar|safe}} or as a plain Python expression:: {{safe(foobar)}} """ default_namespace = dict( tempita.Template.default_namespace, safe=Safe) def _repr(self, value, pos): """Shell-quote the value by default.""" rep = super(ShellTemplate, self)._repr if isinstance(value, Safe): return rep(value.value, pos) else: return quote(rep(value, pos)) def classify(func, subjects): """Classify `subjects` according to `func`. Splits `subjects` into two lists: one for those which `func` returns a truth-like value, and one for the others. :param subjects: An iterable of `(ident, subject)` tuples, where `subject` is an argument that can be passed to `func` for classification. :param func: A function that takes a single argument. :return: A ``(matched, other)`` tuple, where ``matched`` and ``other`` are `list`s of `ident` values; `subject` values are not returned. """ matched, other = [], [] for ident, subject in subjects: bucket = matched if func(subject) else other bucket.append(ident) return matched, other def warn_deprecated(alternative=None): """Issue a `DeprecationWarning` for the calling function. :param alternative: Text describing an alternative to using this deprecated function. """ target = getframe(1).f_code.co_name message = "%s is deprecated" % target if alternative is None: message = "%s." % (message,) else: message = "%s; %s" % (message, alternative) warn(message, DeprecationWarning, 1) def flatten(*things): """Recursively flatten iterable parts of `things`. For example:: >>> sorted(flatten([1, 2, {3, 4, (5, 6)}])) [1, 2, 3, 4, 5, 6] :return: An iterator. """ def _flatten(things): if isinstance(things, basestring): # String-like objects are treated as leaves; iterating through a # string yields more strings, each of which is also iterable, and # so on, until the heat-death of the universe. return iter((things,)) elif isinstance(things, Iterable): # Recurse and merge in order to flatten nested structures. return chain.from_iterable(imap(_flatten, things)) else: # This is a leaf; return an single-item iterator so that it can be # chained with any others. return iter((things,)) return _flatten(things) def is_true(value): if value is None: return False return value.lower() in ("yes", "true", "t", "1") def in_develop_mode(): """Return True if `MAAS_CLUSTER_DEVELOP` environment variable is true.""" return is_true(os.getenv('MAAS_CLUSTER_DEVELOP', None)) def sudo(command_args): """Wrap the command arguments in a sudo command, if not in debug mode.""" if in_develop_mode(): return command_args else: return ['sudo', '-n'] + command_args SYSTEMD_RUN_PATH = '/run/systemd/system' def get_init_system(): """Returns 'upstart' or 'systemd'.""" if os.path.exists(SYSTEMD_RUN_PATH): return 'systemd' else: return 'upstart' maas-1.9.5+bzr4599.orig/src/provisioningserver/utils/backoff.py0000644000000000000000000000247613056115004022535 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Utilities related to back-off. Many or most of the following are inspired by `Exponential Backoff And Jitter`__ on the AWS Architecture Blog. .. __: http://www.awsarchitectureblog.com/2015/03/backoff.html. """ from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ "exponential_growth", "full_jitter", ] from itertools import count from random import random def exponential_growth(base, rate): """Generate successive values for an exponential growth curve. Intervals are discrete and fixed, starting at 1 (not 0) and increasing by 1 on each iteration. :param base: The starting value, i.e. where the interval is 0.0. :type base: float :param rate: The rate of growth. For a 5% growth rate, pass 1.05. :type rate: float. """ for attempt in count(1): yield base * (rate ** attempt) def full_jitter(values): """Apply "full jitter" to `values`. Each value in `values` will be multiplied by a random number in the interval [0.0, 1.0). :param values: An iterable of numbers. """ for value in values: yield random() * value maas-1.9.5+bzr4599.orig/src/provisioningserver/utils/constraints.py0000644000000000000000000001036613056115004023506 0ustar 00000000000000# Copyright 2015 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Generic helpers for working with constraint strings.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [ 'parse_labeled_constraint_map', ] import re class LabeledConstraintMap(object): """Class to encapsulate a labeled constraint map, so that it only needs to be parsed once. """ def __init__(self, value): self.value = value self.map = None self.error = None try: self.map = parse_labeled_constraint_map(value) except ValueError as error: self.error = error def __repr__(self): return "%s(%s)" % (self.__class__.__name__, repr(self.value)) def __unicode__(self): return self.value def __iter__(self): if self.map is None: return iter([]) return iter(self.map) def __getitem__(self, item): return self.map[item] def validate(self, exception_type=ValueError): if self.error: # XXX mpontillo 2015-10-28 Need to re-raise this properly once we # get to Python 3. raise exception_type(self.error.message) def parse_labeled_constraint_map(value, exception_type=ValueError): """Returns a dictionary of constraints, given the specified constraint value. Validates that the following conditions hold true: - The constraint string is non-empty - The constraint string is in the format: