curtin-0.1.0~bzr399/LICENSE0000644000000000000000000010333012741214565013337 0ustar 00000000000000 GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software. A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public. The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version. An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU Affero General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Remote Network Interaction; Use with the GNU General Public License. Notwithstanding any other provision of this License, if you modify the Program, your modified version must prominently offer all users interacting with it remotely through a computer network (if your version supports such interaction) an opportunity to receive the Corresponding Source of your version by providing access to the Corresponding Source from a network server at no charge, through some standard or customary means of facilitating copying of software. This Corresponding Source shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the work with which it is combined will remain governed by version 3 of the GNU General Public License. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU Affero General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a "Source" link that leads users to an archive of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see . curtin-0.1.0~bzr399/Makefile0000644000000000000000000000231512741214565013773 0ustar 00000000000000TOP := $(abspath $(dir $(lastword $(MAKEFILE_LIST)))) CWD := $(shell pwd) PYTHON ?= python3 COVERAGE ?= 1 DEFAULT_COVERAGEOPTS = --with-coverage --cover-erase --cover-branches --cover-package=curtin --cover-inclusive ifeq ($(COVERAGE), 1) coverageopts ?= $(DEFAULT_COVERAGEOPTS) endif CURTIN_VMTEST_IMAGE_SYNC ?= False export CURTIN_VMTEST_IMAGE_SYNC noseopts ?= -vv --nologcapture build: bin/curtin: curtin/pack.py tools/write-curtin $(PYTHON) tools/write-curtin bin/curtin check: pep8 pyflakes pyflakes3 unittest coverage: coverageopts ?= $(DEFAULT_COVERAGEOPTS) coverage: unittest pep8: @$(CWD)/tools/run-pep8 pyflakes: @$(CWD)/tools/run-pyflakes pyflakes3: @$(CWD)/tools/run-pyflakes3 unittest: nosetests $(coverageopts) $(noseopts) tests/unittests nosetests3 $(coverageopts) $(noseopts) tests/unittests docs: @which sphinx-build || \ { echo "need sphinx-build. get it:"; \ echo " apt-get install -qy python3-sphinx"; exit 1; } 1>&2 make -C doc html # By default don't sync images when running all tests. vmtest: nosetests3 $(noseopts) tests/vmtests vmtest-deps: @$(CWD)/tools/vmtest-system-setup sync-images: @$(CWD)/tools/vmtest-sync-images .PHONY: all test pyflakes pyflakes3 pep8 build curtin-0.1.0~bzr399/README0000644000000000000000000000024312741214565013211 0ustar 00000000000000This is 'curtin', the curt installer. It is blunt, brief, snappish, snippety and unceremonious. Its goal is to install an operating system as quick as possible. curtin-0.1.0~bzr399/bin/0000755000000000000000000000000012741214565013102 5ustar 00000000000000curtin-0.1.0~bzr399/curtin/0000755000000000000000000000000012741214565013636 5ustar 00000000000000curtin-0.1.0~bzr399/debian/0000755000000000000000000000000012741214565013554 5ustar 00000000000000curtin-0.1.0~bzr399/doc/0000755000000000000000000000000012741214565013077 5ustar 00000000000000curtin-0.1.0~bzr399/examples/0000755000000000000000000000000012741214565014150 5ustar 00000000000000curtin-0.1.0~bzr399/helpers/0000755000000000000000000000000012741214565013774 5ustar 00000000000000curtin-0.1.0~bzr399/requirements.txt0000644000000000000000000000002012741214565015606 0ustar 00000000000000pyyaml oauthlib curtin-0.1.0~bzr399/setup.py0000644000000000000000000000140012741214565014037 0ustar 00000000000000from distutils.core import setup from glob import glob import os VERSION = '0.1.0' def is_f(p): return os.path.isfile(p) setup( name="curtin", description='The curtin installer', version=VERSION, author='Scott Moser', author_email='scott.moser@canonical.com', license="AGPL", url='http://launchpad.net/curtin/', packages=[ 'curtin', 'curtin.block', 'curtin.deps', 'curtin.commands', 'curtin.net', 'curtin.reporter', 'curtin.reporter.legacy', ], scripts=glob('bin/*'), data_files=[ ('/usr/share/doc/curtin', [f for f in glob('doc/*') if is_f(f)]), ('/usr/lib/curtin/helpers', [f for f in glob('helpers/*') if is_f(f)]) ] ) curtin-0.1.0~bzr399/test-requirements.txt0000644000000000000000000000003412741214565016570 0ustar 00000000000000mock nose pyflakes coverage curtin-0.1.0~bzr399/tests/0000755000000000000000000000000012741214565013474 5ustar 00000000000000curtin-0.1.0~bzr399/tools/0000755000000000000000000000000012741214565013472 5ustar 00000000000000curtin-0.1.0~bzr399/tox.ini0000644000000000000000000000503412741214565013647 0ustar 00000000000000[tox] minversion = 1.6 skipsdist = True envlist = py27, py3, py3-flake8, py3-pylint, py27-pylint, trusty-check, coverage27, coverage3 [tox:jenkins] downloadcache = ~/cache/pip [testenv] usedevelop = True # LC_ALL see https://github.com/gabrielfalcao/HTTPretty/issues/223 setenv = VIRTUAL_ENV={envdir} LC_ALL = en_US.utf-8 deps = -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt commands = {envpython} {toxinidir}/tools/noproxy nosetests {posargs} tests/unittests [testenv:py3] basepython = python3 # tox uses '--pre' by default to pip install. We don't want that, and # 'pip_pre=False' isn't available until tox version 1.9. install_command = pip install {opts} {packages} [testenv:py2-flake8] basepython = python2 deps = {[testenv]deps} flake8 commands = {envpython} -m flake8 {posargs:curtin} [testenv:py3-flake8] basepython = python3 deps = {[testenv]deps} flake8 commands = {envpython} -m flake8 {posargs:curtin tests/vmtests} [testenv:py3-pylint] # set basepython because tox 1.6 (trusty) does not support generated environments basepython = python3 deps = {[testenv]deps} pylint==1.5.4 bzr+lp:simplestreams commands = {envpython} -m pylint --errors-only {posargs:curtin tests/vmtests} [testenv:py27-pylint] # set basepython because tox 1.6 (trusty) does not support generated environments basepython = python2.7 deps = {[testenv]deps} pylint==1.5.4 commands = {envpython} -m pylint --errors-only {posargs:curtin} [testenv:coverage3] envdir = {toxworkdir}/py34 commands = {envpython} {toxinidir}/tools/noproxy nosetests --with-coverage --cover-erase --cover-branches --cover-package=curtin --cover-inclusive {posargs} tests/unittests [testenv:coverage27] envdir = {toxworkdir}/py27 commands = {envpython} {toxinidir}/tools/noproxy nosetests --with-coverage --cover-erase --cover-branches --cover-package=curtin --cover-inclusive {posargs} tests/unittests [testenv:docs] deps = {[testenv]deps} sphinx commands = sphinx-build -b html -d doc/_build/doctrees doc/ doc/_build/html [testenv:trusty-check] # this environment provides roughly a trusty build environment where # where 'make check' is run during package build. This protects against # package build errors on trusty where pep8 and pyflakes there have subtly # different behavior. Note, we do only run pyflakes3, though. basepython = python3 deps = pyflakes==0.8.1 pep8==1.4.6 commands = {toxinidir}/tools/run-pyflakes3 {posargs} {toxinidir}/tools/run-pep8 {posargs} [flake8] builtins = _ exclude = .venv,.bzr,.tox,dist,doc,*lib/python*,*egg,build curtin-0.1.0~bzr399/bin/curtin0000755000000000000000000000351012741214565014333 0ustar 00000000000000#!/bin/sh PY3OR2_MAIN="curtin.commands.main" PY3OR2_MCHECK="curtin.deps.check" PY3OR2_PYTHONS=${PY3OR2_PYTHONS:-"python3:python"} PYTHON=${PY3OR2_PYTHON} PY3OR2_DEBUG=${PY3OR2_DEBUG:-0} debug() { [ "${PY3OR2_DEBUG}" != "0" ] || return 0 echo "$@" 1>&2 } fail() { echo "$@" 1>&2; exit 1; } # if $0 is is bin/ and dirname($0)/../module exists, then prepend PYTHONPATH mydir=${0%/*} updir=${mydir%/*} if [ "${mydir#${updir}/}" = "bin" -a -d "$updir/${PY3OR2_MCHECK%%.*}" ]; then updir=$(cd "$mydir/.." && pwd) case "$PYTHONPATH" in *:$updir:*|$updir:*|*:$updir) :;; *) export PYTHONPATH="$updir${PYTHONPATH:+:$PYTHONPATH}" debug "adding '$updir' to PYTHONPATH" ;; esac fi if [ ! -n "$PYTHON" ]; then first_exe="" oifs="$IFS"; IFS=":" best=0 best_exe="" [ "${PY3OR2_DEBUG}" = "0" ] && _v="" || _v="-v" for p in $PY3OR2_PYTHONS; do command -v "$p" >/dev/null 2>&1 || { debug "$p: not in path"; continue; } [ -z "$PY3OR2_MCHECK" ] && PYTHON=$p && break out=$($p -m "$PY3OR2_MCHECK" $_v -- "$@" 2>&1) && PYTHON="$p" && { debug "$p is good [$p -m $PY3OR2_MCHECK $_v -- $*]"; break; } ret=$? debug "$p [$ret]: $out" # exit code of 1 is unuseable [ $ret -eq 1 ] && continue [ -n "$first_exe" ] || first_exe="$p" # higher non-zero exit values indicate more plausible usability [ $best -lt $ret ] && best_exe="$p" && best=$ret && debug "current best: $best_exe" done IFS="$oifs" [ -z "$best_exe" -a -n "$first_exe" ] && best_exe="$first_exe" [ -n "$PYTHON" ] || PYTHON="$best_exe" [ -n "$PYTHON" ] || fail "no availble python? [PY3OR2_DEBUG=1 for more info]" fi debug "executing: $PYTHON -m \"$PY3OR2_MAIN\" $*" exec $PYTHON -m "$PY3OR2_MAIN" "$@" curtin-0.1.0~bzr399/curtin/__init__.py0000644000000000000000000000276512741214565015761 0ustar 00000000000000# Copyright (C) 2013 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . # This constant is made available so a caller can read it # it must be kept the same as that used in helpers/common:get_carryover_params KERNEL_CMDLINE_COPY_TO_INSTALL_SEP = "---" # The 'FEATURES' variable is provided so that users of curtin # can determine which features are supported. Each entry should have # a consistent meaning. FEATURES = [ # install supports the 'network' config version 1 'NETWORK_CONFIG_V1', # reporter supports 'webhook' type 'REPORTING_EVENTS_WEBHOOK', # install supports the 'storage' config version 1 'STORAGE_CONFIG_V1', # subcommand 'system-install' is present 'SUBCOMMAND_SYSTEM_INSTALL', # subcommand 'system-upgrade' is present 'SUBCOMMAND_SYSTEM_UPGRADE', ] # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/block/0000755000000000000000000000000012741214565014730 5ustar 00000000000000curtin-0.1.0~bzr399/curtin/commands/0000755000000000000000000000000012741214565015437 5ustar 00000000000000curtin-0.1.0~bzr399/curtin/config.py0000644000000000000000000000773412741214565015470 0ustar 00000000000000# Copyright (C) 2013 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . import yaml import json ARCHIVE_HEADER = "#curtin-config-archive" ARCHIVE_TYPE = "text/curtin-config-archive" CONFIG_HEADER = "#curtin-config" CONFIG_TYPE = "text/curtin-config" try: # python2 _STRING_TYPES = (str, basestring, unicode) except NameError: # python3 _STRING_TYPES = (str,) def merge_config_fp(cfgin, fp): merge_config_str(cfgin, fp.read()) def merge_config_str(cfgin, cfgstr): cfg2 = yaml.safe_load(cfgstr) if not isinstance(cfg2, dict): raise TypeError("Failed reading config. not a dictionary: %s" % cfgstr) merge_config(cfgin, cfg2) def merge_config(cfg, cfg2): # update cfg by merging cfg2 over the top for k, v in cfg2.items(): if isinstance(v, dict) and isinstance(cfg.get(k, None), dict): merge_config(cfg[k], v) else: cfg[k] = v def merge_cmdarg(cfg, cmdarg, delim="/"): merge_config(cfg, cmdarg2cfg(cmdarg, delim)) def cmdarg2cfg(cmdarg, delim="/"): if '=' not in cmdarg: raise ValueError('no "=" in "%s"' % cmdarg) key, val = cmdarg.split("=", 1) cfg = {} cur = cfg is_json = False if key.startswith("json:"): is_json = True key = key[5:] items = key.split(delim) for item in items[:-1]: cur[item] = {} cur = cur[item] if is_json: try: val = json.loads(val) except (ValueError, TypeError): raise ValueError("setting of key '%s' had invalid json: %s" % (key, val)) # this would occur if 'json:={"topkey": "topval"}' if items[-1] == "": cfg = val else: cur[items[-1]] = val return cfg def load_config_archive(content): archive = yaml.load(content) config = {} for part in archive: if isinstance(part, (str,)): if part.startswith(ARCHIVE_HEADER): merge_config(config, load_config_archive(part)) elif part.startswith(CONFIG_HEADER): merge_config_str(config, part) elif isinstance(part, dict) and isinstance(part.get('content'), str): payload = part.get('content') if (part.get('type') == ARCHIVE_TYPE or payload.startswith(ARCHIVE_HEADER)): merge_config(config, load_config_archive(payload)) elif (part.get('type') == CONFIG_TYPE or payload.startswith(CONFIG_HEADER)): merge_config_str(config, payload) return config def load_config(cfg_file): with open(cfg_file, "r") as fp: content = fp.read() if not content.startswith(ARCHIVE_HEADER): return yaml.safe_load(content) else: return load_config_archive(content) def load_command_config(args, state): if hasattr(args, 'config') and args.config: return args.config else: # state 'config' points to a file with fully rendered config cfg_file = state.get('config') if not cfg_file: cfg = {} else: cfg = load_config(cfg_file) return cfg def dump_config(config): return yaml.dump(config, default_flow_style=False, indent=2) def value_as_boolean(value): if value in (False, None, '0', 0, 'False', 'false', ''): return False return True curtin-0.1.0~bzr399/curtin/deps/0000755000000000000000000000000012741214565014571 5ustar 00000000000000curtin-0.1.0~bzr399/curtin/futil.py0000644000000000000000000000441512741214565015337 0ustar 00000000000000# Copyright (C) 2013 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . import grp import pwd import os from .util import write_file def chownbyid(fname, uid=None, gid=None): if uid in [None, -1] and gid in [None, -1]: return os.chown(fname, uid, gid) def decode_perms(perm, default=0o644): try: if perm is None: return default if isinstance(perm, (int, float)): # Just 'downcast' it (if a float) return int(perm) else: # Force to string and try octal conversion return int(str(perm), 8) except (TypeError, ValueError): return default def chownbyname(fname, user=None, group=None): uid = -1 gid = -1 try: if user: uid = pwd.getpwnam(user).pw_uid if group: gid = grp.getgrnam(group).gr_gid except KeyError as e: raise OSError("Unknown user or group: %s" % (e)) chownbyid(fname, uid, gid) def extract_usergroup(ug_pair): if not ug_pair: return (None, None) ug_parted = ug_pair.split(':', 1) u = ug_parted[0].strip() if len(ug_parted) == 2: g = ug_parted[1].strip() else: g = None if not u or u == "-1" or u.lower() == "none": u = None if not g or g == "-1" or g.lower() == "none": g = None return (u, g) def write_finfo(path, content, owner="-1:-1", perms="0644"): (u, g) = extract_usergroup(owner) omode = "w" if isinstance(content, bytes): omode = "wb" write_file(path, content, mode=decode_perms(perms), omode=omode) chownbyname(path, u, g) curtin-0.1.0~bzr399/curtin/log.py0000644000000000000000000000435612741214565015001 0ustar 00000000000000# Copyright (C) 2013 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . import logging # Logging items for easy access getLogger = logging.getLogger CRITICAL = logging.CRITICAL FATAL = logging.FATAL ERROR = logging.ERROR WARNING = logging.WARNING WARN = logging.WARN INFO = logging.INFO DEBUG = logging.DEBUG NOTSET = logging.NOTSET class NullHandler(logging.Handler): def emit(self, record): pass def basicConfig(**kwargs): # basically like logging.basicConfig but only output for our logger if kwargs.get('filename'): handler = logging.FileHandler(filename=kwargs['filename'], mode=kwargs.get('filemode', 'a')) elif kwargs.get('stream'): handler = logging.StreamHandler(stream=kwargs['stream']) else: handler = NullHandler() if 'verbosity' in kwargs: level = ((logging.ERROR, logging.INFO, logging.DEBUG) [min(kwargs['verbosity'], 2)]) else: level = kwargs.get('level', logging.NOTSET) handler.setFormatter(logging.Formatter(fmt=kwargs.get('format'), datefmt=kwargs.get('datefmt'))) handler.setLevel(level) logging.getLogger().setLevel(level) logger = _getLogger() for h in list(logger.handlers): logger.removeHandler(h) logger.setLevel(level) logger.addHandler(handler) def _getLogger(name='curtin'): return logging.getLogger(name) if not logging.getLogger().handlers: logging.getLogger().addHandler(NullHandler()) LOG = _getLogger() # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/net/0000755000000000000000000000000012741214565014424 5ustar 00000000000000curtin-0.1.0~bzr399/curtin/pack.py0000644000000000000000000001675712741214565015146 0ustar 00000000000000# Copyright (C) 2013 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . import errno import os import shutil import tempfile from . import util CALL_ENTRY_POINT_SH_HEADER = """ #!/bin/sh PY3OR2_MAIN="%(ep_main)s" PY3OR2_MCHECK="%(ep_mcheck)s" PY3OR2_PYTHONS=${PY3OR2_PYTHONS:-"%(python_exe_list)s"} PYTHON=${PY3OR2_PYTHON} PY3OR2_DEBUG=${PY3OR2_DEBUG:-0} """.strip() CALL_ENTRY_POINT_SH_BODY = """ debug() { [ "${PY3OR2_DEBUG}" != "0" ] || return 0 echo "$@" 1>&2 } fail() { echo "$@" 1>&2; exit 1; } # if $0 is is bin/ and dirname($0)/../module exists, then prepend PYTHONPATH mydir=${0%/*} updir=${mydir%/*} if [ "${mydir#${updir}/}" = "bin" -a -d "$updir/${PY3OR2_MCHECK%%.*}" ]; then updir=$(cd "$mydir/.." && pwd) case "$PYTHONPATH" in *:$updir:*|$updir:*|*:$updir) :;; *) export PYTHONPATH="$updir${PYTHONPATH:+:$PYTHONPATH}" debug "adding '$updir' to PYTHONPATH" ;; esac fi if [ ! -n "$PYTHON" ]; then first_exe="" oifs="$IFS"; IFS=":" best=0 best_exe="" [ "${PY3OR2_DEBUG}" = "0" ] && _v="" || _v="-v" for p in $PY3OR2_PYTHONS; do command -v "$p" >/dev/null 2>&1 || { debug "$p: not in path"; continue; } [ -z "$PY3OR2_MCHECK" ] && PYTHON=$p && break out=$($p -m "$PY3OR2_MCHECK" $_v -- "$@" 2>&1) && PYTHON="$p" && { debug "$p is good [$p -m $PY3OR2_MCHECK $_v -- $*]"; break; } ret=$? debug "$p [$ret]: $out" # exit code of 1 is unuseable [ $ret -eq 1 ] && continue [ -n "$first_exe" ] || first_exe="$p" # higher non-zero exit values indicate more plausible usability [ $best -lt $ret ] && best_exe="$p" && best=$ret && debug "current best: $best_exe" done IFS="$oifs" [ -z "$best_exe" -a -n "$first_exe" ] && best_exe="$first_exe" [ -n "$PYTHON" ] || PYTHON="$best_exe" [ -n "$PYTHON" ] || fail "no availble python? [PY3OR2_DEBUG=1 for more info]" fi debug "executing: $PYTHON -m \\"$PY3OR2_MAIN\\" $*" exec $PYTHON -m "$PY3OR2_MAIN" "$@" """ def write_exe_wrapper(entrypoint, path=None, interpreter=None, deps_check_entry=None, mode=0o755): if not interpreter: interpreter = "python3:python" subs = { 'ep_main': entrypoint, 'ep_mcheck': deps_check_entry if deps_check_entry else "", 'python_exe_list': interpreter, } content = '\n'.join( (CALL_ENTRY_POINT_SH_HEADER % subs, CALL_ENTRY_POINT_SH_BODY)) if path is not None: with open(path, "w") as fp: fp.write(content) if mode is not None: os.chmod(path, mode) else: return content def pack(fdout=None, command=None, paths=None, copy_files=None, add_files=None): # write to 'fdout' a self extracting file to execute 'command' # if fdout is None, return content that would be written to fdout. # add_files is a list of (archive_path, file_content) tuples. # copy_files is a list of (archive_path, file_path) tuples. if paths is None: paths = util.get_paths() if add_files is None: add_files = [] if copy_files is None: copy_files = [] tmpd = None try: tmpd = tempfile.mkdtemp() exdir = os.path.join(tmpd, 'curtin') os.mkdir(exdir) bindir = os.path.join(exdir, 'bin') os.mkdir(bindir) def not_dot_py(input_d, flist): # include .py files and directories other than __pycache__ return [f for f in flist if not (f.endswith(".py") or (f != "__pycache__" and os.path.isdir(os.path.join(input_d, f))))] shutil.copytree(paths['helpers'], os.path.join(exdir, "helpers")) shutil.copytree(paths['lib'], os.path.join(exdir, "curtin"), ignore=not_dot_py) write_exe_wrapper(entrypoint='curtin.commands.main', path=os.path.join(bindir, 'curtin'), deps_check_entry="curtin.deps.check") for archpath, filepath in copy_files: target = os.path.abspath(os.path.join(exdir, archpath)) if not target.startswith(exdir + os.path.sep): raise ValueError("'%s' resulted in path outside archive" % archpath) try: os.mkdir(os.path.dirname(target)) except OSError as e: if e.errno == errno.EEXIST: pass if os.path.isfile(filepath): shutil.copy(filepath, target) else: shutil.copytree(filepath, target) for archpath, content in add_files: target = os.path.abspath(os.path.join(exdir, archpath)) if not target.startswith(exdir + os.path.sep): raise ValueError("'%s' resulted in path outside archive" % archpath) try: os.mkdir(os.path.dirname(target)) except OSError as e: if e.errno == errno.EEXIST: pass with open(target, "w") as fp: fp.write(content) archcmd = os.path.join(paths['helpers'], 'shell-archive') archout = None args = [archcmd] if fdout is not None: archout = os.path.join(tmpd, 'output') args.append("--output=%s" % archout) args.extend(["--bin-path=_pwd_/bin", "--python-path=_pwd_", exdir, "curtin", "--"]) if command is not None: args.extend(command) (out, _err) = util.subp(args, capture=True) if fdout is None: if isinstance(out, bytes): out = out.decode() return out else: with open(archout, "r") as fp: while True: buf = fp.read(4096) fdout.write(buf) if len(buf) != 4096: break finally: if tmpd: shutil.rmtree(tmpd) def pack_install(fdout=None, configs=None, paths=None, add_files=None, copy_files=None, args=None, install_deps=True): if configs is None: configs = [] if add_files is None: add_files = [] if args is None: args = [] if install_deps: dep_flags = ["--install-deps"] else: dep_flags = [] command = ["curtin"] + dep_flags + ["install"] my_files = [] for n, config in enumerate(configs): apath = "configs/config-%03d.cfg" % n my_files.append((apath, config),) command.append("--config=%s" % apath) command += args return pack(fdout=fdout, command=command, paths=paths, add_files=add_files + my_files, copy_files=copy_files) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/reporter/0000755000000000000000000000000012741214565015500 5ustar 00000000000000curtin-0.1.0~bzr399/curtin/swap.py0000644000000000000000000000645612741214565015175 0ustar 00000000000000# Copyright (C) 2014 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . import os from .log import LOG from . import util def suggested_swapsize(memsize=None, maxsize=None, fsys=None): # make a suggestion on the size of swap for this system. if memsize is None: memsize = util.get_meminfo()['total'] GB = 2 ** 30 sugg_max = 8 * GB if fsys is None and maxsize is None: # set max to 8GB default if no filesystem given maxsize = sugg_max elif fsys: avail = util.get_fs_use_info(fsys)[1] if maxsize is None: # set to 25% of filesystem space maxsize = min(int(avail / 4), sugg_max) elif maxsize > ((avail * .9)): # set to 90% of available disk space maxsize = int(avail * .9) formulas = [ # < 1G: swap = double memory (1 * GB, lambda x: x * 2), # < 2G: swap = 2G (2 * GB, lambda x: 2 * GB), # < 4G: swap = memory (4 * GB, lambda x: x), # < 16G: 4G (16 * GB, lambda x: 4 * GB), # < 64G: 1/2 M up to max (64 * GB, lambda x: x / 2), ] size = None for top, func in formulas: if memsize <= top: size = min(func(memsize), maxsize) if size < (memsize / 2) and size < 4 * GB: return 0 return size return maxsize def setup_swapfile(target, fstab=None, swapfile=None, size=None, maxsize=None): if size is None: size = suggested_swapsize(fsys=target, maxsize=maxsize) if size == 0: LOG.debug("Not creating swap: suggested size was 0") return if swapfile is None: swapfile = "/swap.img" if not swapfile.startswith("/"): swapfile = "/" + swapfile mbsize = str(int(size / (2 ** 20))) msg = "creating swap file '%s' of %sMB" % (swapfile, mbsize) fpath = os.path.sep.join([target, swapfile]) try: util.ensure_dir(os.path.dirname(fpath)) with util.LogTimer(LOG.debug, msg): util.subp( ['sh', '-c', ('rm -f "$1" && umask 0066 && ' '{ fallocate -l "${2}M" "$1" || ' ' dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && ' 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'), 'setup_swap', fpath, mbsize]) except Exception: LOG.warn("failed %s" % msg) raise if fstab is None: return try: line = '\t'.join([swapfile, 'none', 'swap', 'sw', '0', '0']) with open(fstab, "a") as fp: fp.write(line + "\n") except Exception: os.unlink(fpath) raise curtin-0.1.0~bzr399/curtin/udev.py0000644000000000000000000000446712741214565015166 0ustar 00000000000000# Copyright (C) 2015 Canonical Ltd. # # Author: Ryan Harper # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . import os from curtin import util def compose_udev_equality(key, value): """Return a udev comparison clause, like `ACTION=="add"`.""" assert key == key.upper() return '%s=="%s"' % (key, value) def compose_udev_attr_equality(attribute, value): """Return a udev attribute comparison clause, like `ATTR{type}=="1"`.""" assert attribute == attribute.lower() return 'ATTR{%s}=="%s"' % (attribute, value) def compose_udev_setting(key, value): """Return a udev assignment clause, like `NAME="eth0"`.""" assert key == key.upper() return '%s="%s"' % (key, value) def generate_udev_rule(interface, mac): """Return a udev rule to set the name of network interface with `mac`. The rule ends up as a single line looking something like: SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}="ff:ee:dd:cc:bb:aa", NAME="eth0" """ rule = ', '.join([ compose_udev_equality('SUBSYSTEM', 'net'), compose_udev_equality('ACTION', 'add'), compose_udev_equality('DRIVERS', '?*'), compose_udev_attr_equality('address', mac), compose_udev_setting('NAME', interface), ]) return '%s\n' % rule def udevadm_settle(exists=None, timeout=None): settle_cmd = ["udevadm", "settle"] if exists: # skip the settle if the requested path already exists if os.path.exists(exists): return settle_cmd.extend(['--exit-if-exists=%s' % exists]) if timeout: settle_cmd.extend(['--timeout=%s' % timeout]) util.subp(settle_cmd) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/url_helper.py0000644000000000000000000002436212741214565016360 0ustar 00000000000000from email.utils import parsedate import json import os import socket import sys import time import uuid from functools import partial try: from urllib import request as _u_re # pylint: disable=no-name-in-module from urllib import error as _u_e # pylint: disable=no-name-in-module from urllib.parse import urlparse # pylint: disable=no-name-in-module urllib_request = _u_re urllib_error = _u_e except ImportError: # python2 import urllib2 as urllib_request import urllib2 as urllib_error from urlparse import urlparse # pylint: disable=import-error from .log import LOG error = urllib_error class _ReRaisedException(Exception): exc = None """this exists only as an exception type that was re-raised by an exception_cb, so code can know to handle it specially""" def __init__(self, exc): self.exc = exc def _geturl(url, headers=None, headers_cb=None, exception_cb=None, data=None): def_headers = {'User-Agent': 'Curtin/0.1'} if headers is not None: def_headers.update(headers) headers = def_headers if headers_cb: headers.update(headers_cb(url)) if data and isinstance(data, dict): data = json.dumps(data).encode() try: req = urllib_request.Request(url=url, data=data, headers=headers) r = urllib_request.urlopen(req).read() # python2, we want to return bytes, which is what python3 does if isinstance(r, str): return r.decode() return r except urllib_error.HTTPError as exc: myexc = UrlError(exc, code=exc.code, headers=exc.headers, url=url, reason=exc.reason) except Exception as exc: myexc = UrlError(exc, code=None, headers=None, url=url, reason="unknown") if exception_cb: try: exception_cb(myexc) except Exception as e: myexc = _ReRaisedException(e) raise myexc def geturl(url, headers=None, headers_cb=None, exception_cb=None, data=None, retries=None, log=LOG.warn): """return the content of the url in binary_type. (py3: bytes, py2: str)""" if retries is None: retries = [] curexc = None for trynum, naptime in enumerate(retries): try: return _geturl(url=url, headers=headers, headers_cb=headers_cb, exception_cb=exception_cb, data=data) except _ReRaisedException as e: raise curexc.exc except Exception as e: curexc = e if log: msg = ("try %d of request to %s failed. sleeping %d: %s" % (naptime, url, naptime, curexc)) log(msg) time.sleep(naptime) try: return _geturl(url=url, headers=headers, headers_cb=headers_cb, exception_cb=exception_cb, data=data) except _ReRaisedException as e: raise e.exc class UrlError(IOError): def __init__(self, cause, code=None, headers=None, url=None, reason=None): IOError.__init__(self, str(cause)) self.cause = cause self.code = code self.headers = headers if self.headers is None: self.headers = {} self.url = url self.reason = reason def __str__(self): if isinstance(self.cause, urllib_error.HTTPError): msg = "http error: %s" % self.cause.code elif isinstance(self.cause, urllib_error.URLError): msg = "url error: %s" % self.cause.reason elif isinstance(self.cause, socket.timeout): msg = "socket timeout: %s" % self.cause else: msg = "Unknown Exception: %s" % self.cause return "[%s] " % self.url + msg class OauthUrlHelper(object): def __init__(self, consumer_key=None, token_key=None, token_secret=None, consumer_secret=None, skew_data_file="/run/oauth_skew.json"): self.consumer_key = consumer_key self.consumer_secret = consumer_secret or "" self.token_key = token_key self.token_secret = token_secret self.skew_data_file = skew_data_file self._do_oauth = True self.skew_change_limit = 5 required = (self.token_key, self.token_secret, self.consumer_key) if not any(required): self._do_oauth = False elif not all(required): raise ValueError("all or none of token_key, token_secret, or " "consumer_key can be set") old = self.read_skew_file() self.skew_data = old or {} def __str__(self): fields = ['consumer_key', 'consumer_secret', 'token_key', 'token_secret'] masked = fields def r(name): if not hasattr(self, name): rval = "_unset" else: val = getattr(self, name) if val is None: rval = "None" elif name in masked: rval = '"%s"' % ("*" * len(val)) else: rval = '"%s"' % val return '%s=%s' % (name, rval) return ("OauthUrlHelper(" + ','.join([r(f) for f in fields]) + ")") def read_skew_file(self): if self.skew_data_file and os.path.isfile(self.skew_data_file): with open(self.skew_data_file, mode="r") as fp: return json.load(fp) return None def update_skew_file(self, host, value): # this is not atomic if not self.skew_data_file: return cur = self.read_skew_file() if cur is None: cur = {} cur[host] = value with open(self.skew_data_file, mode="w") as fp: fp.write(json.dumps(cur)) def exception_cb(self, exception): if not (isinstance(exception, UrlError) and (exception.code == 403 or exception.code == 401)): return if 'date' not in exception.headers: LOG.warn("Missing header 'date' in %s response", exception.code) return date = exception.headers['date'] try: remote_time = time.mktime(parsedate(date)) except Exception as e: LOG.warn("Failed to convert datetime '%s': %s", date, e) return skew = int(remote_time - time.time()) host = urlparse(exception.url).netloc old_skew = self.skew_data.get(host, 0) if abs(old_skew - skew) > self.skew_change_limit: self.update_skew_file(host, skew) LOG.warn("Setting oauth clockskew for %s to %d", host, skew) self.skew_data[host] = skew return def headers_cb(self, url): if not self._do_oauth: return {} host = urlparse(url).netloc clockskew = None if self.skew_data and host in self.skew_data: clockskew = self.skew_data[host] return oauth_headers( url=url, consumer_key=self.consumer_key, token_key=self.token_key, token_secret=self.token_secret, consumer_secret=self.consumer_secret, clockskew=clockskew) def _wrapped(self, wrapped_func, args, kwargs): kwargs['headers_cb'] = partial( self._headers_cb, kwargs.get('headers_cb')) kwargs['exception_cb'] = partial( self._exception_cb, kwargs.get('exception_cb')) return wrapped_func(*args, **kwargs) def geturl(self, *args, **kwargs): return self._wrapped(geturl, args, kwargs) def _exception_cb(self, extra_exception_cb, exception): ret = None try: if extra_exception_cb: ret = extra_exception_cb(exception) finally: self.exception_cb(exception) return ret def _headers_cb(self, extra_headers_cb, url): headers = {} if extra_headers_cb: headers = extra_headers_cb(url) headers.update(self.headers_cb(url)) return headers def _oauth_headers_none(url, consumer_key, token_key, token_secret, consumer_secret, clockskew=0): """oauth_headers implementation when no oauth is available""" if not any([token_key, token_secret, consumer_key]): return {} pkg = "'python3-oauthlib'" if sys.version_info[0] == 2: pkg = "'python-oauthlib' or 'python-oauth'" raise ValueError( "Oauth was necessary but no oauth library is available. " "Please install package " + pkg + ".") def _oauth_headers_oauth(url, consumer_key, token_key, token_secret, consumer_secret, clockskew=0): """Build OAuth headers with oauth using given credentials.""" consumer = oauth.OAuthConsumer(consumer_key, consumer_secret) token = oauth.OAuthToken(token_key, token_secret) if clockskew is None: clockskew = 0 timestamp = int(time.time()) + clockskew params = { 'oauth_version': "1.0", 'oauth_nonce': uuid.uuid4().hex, 'oauth_timestamp': timestamp, 'oauth_token': token.key, 'oauth_consumer_key': consumer.key, } req = oauth.OAuthRequest(http_url=url, parameters=params) req.sign_request( oauth.OAuthSignatureMethod_PLAINTEXT(), consumer, token) return(req.to_header()) def _oauth_headers_oauthlib(url, consumer_key, token_key, token_secret, consumer_secret, clockskew=0): """Build OAuth headers with oauthlib using given credentials.""" if clockskew is None: clockskew = 0 timestamp = int(time.time()) + clockskew client = oauth1.Client( consumer_key, client_secret=consumer_secret, resource_owner_key=token_key, resource_owner_secret=token_secret, signature_method=oauth1.SIGNATURE_PLAINTEXT, timestamp=str(timestamp)) uri, signed_headers, body = client.sign(url) return signed_headers oauth_headers = _oauth_headers_none try: # prefer to use oauthlib. (python-oauthlib) import oauthlib.oauth1 as oauth1 oauth_headers = _oauth_headers_oauthlib except ImportError: # no oauthlib was present, try using oauth (python-oauth) try: import oauth.oauth as oauth oauth_headers = _oauth_headers_oauth except ImportError: # we have no oauth libraries available, use oauth_headers_none pass # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/util.py0000644000000000000000000006554312741214565015202 0ustar 00000000000000# Copyright (C) 2013 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . import argparse import errno import glob import json import os import platform import shutil import subprocess import stat import sys import tempfile import time from .log import LOG _INSTALLED_HELPERS_PATH = '/usr/lib/curtin/helpers' _INSTALLED_MAIN = '/usr/bin/curtin' _LSB_RELEASE = {} def _subp(args, data=None, rcs=None, env=None, capture=False, shell=False, logstring=False, decode="replace"): if rcs is None: rcs = [0] devnull_fp = None try: if not logstring: LOG.debug(("Running command %s with allowed return codes %s" " (shell=%s, capture=%s)"), args, rcs, shell, capture) else: LOG.debug(("Running hidden command to protect sensitive " "input/output logstring: %s"), logstring) stdin = None stdout = None stderr = None if capture: stdout = subprocess.PIPE stderr = subprocess.PIPE if data is None: devnull_fp = open(os.devnull) stdin = devnull_fp else: stdin = subprocess.PIPE sp = subprocess.Popen(args, stdout=stdout, stderr=stderr, stdin=stdin, env=env, shell=shell) (out, err) = sp.communicate(data) # Just ensure blank instead of none. if not out and capture: out = b'' if not err and capture: err = b'' if decode: def ldecode(data, m='utf-8'): if not isinstance(data, bytes): return data return data.decode(m, errors=decode) out = ldecode(out) err = ldecode(err) except OSError as e: raise ProcessExecutionError(cmd=args, reason=e) finally: if devnull_fp: devnull_fp.close() rc = sp.returncode # pylint: disable=E1101 if rc not in rcs: raise ProcessExecutionError(stdout=out, stderr=err, exit_code=rc, cmd=args) return (out, err) def subp(*args, **kwargs): """Run a subprocess. :param args: command to run in a list. [cmd, arg1, arg2...] :param data: input to the command, made available on its stdin. :param rcs: a list of allowed return codes. If subprocess exits with a value not in this list, a ProcessExecutionError will be raised. By default, data is returned as a string. See 'decode' parameter. :param env: a dictionary for the command's environment. :param capture: boolean indicating if output should be captured. If True, then stderr and stdout will be returned. If False, they will not be redirected. :param shell: boolean indicating if this should be run with a shell. :param logstring: the command will be logged to DEBUG. If it contains info that should not be logged, then logstring will be logged instead. :param decode: if False, no decoding will be done and returned stdout and stderr will be bytes. Other allowed values are 'strict', 'ignore', and 'replace'. These values are passed through to bytes().decode() as the 'errors' parameter. There is no support for decoding to other than utf-8. :param retries: a list of times to sleep in between retries. After each failure subp will sleep for N seconds and then try again. A value of [1, 3] means to run, sleep 1, run, sleep 3, run and then return exit code. """ retries = [] if "retries" in kwargs: retries = kwargs.pop("retries") if args: cmd = args[0] if 'args' in kwargs: cmd = kwargs['args'] # Retry with waits between the retried command. for num, wait in enumerate(retries): try: return _subp(*args, **kwargs) except ProcessExecutionError as e: LOG.debug("try %s: command %s failed, rc: %s", num, cmd, e.exit_code) time.sleep(wait) # Final try without needing to wait or catch the error. If this # errors here then it will be raised to the caller. return _subp(*args, **kwargs) def load_command_environment(env=os.environ, strict=False): mapping = {'scratch': 'WORKING_DIR', 'fstab': 'OUTPUT_FSTAB', 'interfaces': 'OUTPUT_INTERFACES', 'config': 'CONFIG', 'target': 'TARGET_MOUNT_POINT', 'network_state': 'OUTPUT_NETWORK_STATE', 'network_config': 'OUTPUT_NETWORK_CONFIG', 'report_stack_prefix': 'CURTIN_REPORTSTACK'} if strict: missing = [k for k in mapping if k not in env] if len(missing): raise KeyError("missing environment vars: %s" % missing) return {k: env.get(v) for k, v in mapping.items()} class BadUsage(Exception): pass class ProcessExecutionError(IOError): MESSAGE_TMPL = ('%(description)s\n' 'Command: %(cmd)s\n' 'Exit code: %(exit_code)s\n' 'Reason: %(reason)s\n' 'Stdout: %(stdout)r\n' 'Stderr: %(stderr)r') def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, description=None, reason=None): if not cmd: self.cmd = '-' else: self.cmd = cmd if not description: self.description = 'Unexpected error while running command.' else: self.description = description if not isinstance(exit_code, int): self.exit_code = '-' else: self.exit_code = exit_code if not stderr: self.stderr = '' else: self.stderr = stderr if not stdout: self.stdout = '' else: self.stdout = stdout if reason: self.reason = reason else: self.reason = '-' message = self.MESSAGE_TMPL % { 'description': self.description, 'cmd': self.cmd, 'exit_code': self.exit_code, 'stdout': self.stdout, 'stderr': self.stderr, 'reason': self.reason, } IOError.__init__(self, message) class LogTimer(object): def __init__(self, logfunc, msg): self.logfunc = logfunc self.msg = msg def __enter__(self): self.start = time.time() return self def __exit__(self, etype, value, trace): self.logfunc("%s took %0.3f seconds" % (self.msg, time.time() - self.start)) def is_mounted(target, src=None, opts=None): # return whether or not src is mounted on target mounts = "" with open("/proc/mounts", "r") as fp: mounts = fp.read() for line in mounts.splitlines(): if line.split()[1] == os.path.abspath(target): return True return False def do_mount(src, target, opts=None): # mount src at target with opts and return True # if already mounted, return False if opts is None: opts = [] if isinstance(opts, str): opts = [opts] if is_mounted(target, src, opts): return False ensure_dir(target) cmd = ['mount'] + opts + [src, target] subp(cmd) return True def do_umount(mountpoint): if not is_mounted(mountpoint): return False subp(['umount', mountpoint]) return True def ensure_dir(path, mode=None): try: os.makedirs(path) except OSError as e: if e.errno != errno.EEXIST: raise if mode is not None: os.chmod(path, mode) def write_file(filename, content, mode=0o644, omode="w"): ensure_dir(os.path.dirname(filename)) with open(filename, omode) as fp: fp.write(content) os.chmod(filename, mode) def load_file(path, mode="r"): with open(path, mode) as fp: return fp.read() def del_file(path): try: os.unlink(path) LOG.debug("del_file: removed %s", path) except OSError as e: LOG.exception("del_file: %s did not exist.", path) if e.errno != errno.ENOENT: raise e def disable_daemons_in_root(target): contents = "\n".join( ['#!/bin/sh', '# see invoke-rc.d for exit codes. 101 is "do not run"', 'while true; do', ' case "$1" in', ' -*) shift;;', ' makedev|x11-common) exit 0;;', ' *) exit 101;;', ' esac', 'done', '']) fpath = os.path.join(target, "usr/sbin/policy-rc.d") if os.path.isfile(fpath): return False write_file(fpath, mode=0o755, content=contents) return True def undisable_daemons_in_root(target): try: os.unlink(os.path.join(target, "usr/sbin/policy-rc.d")) except OSError as e: if e.errno != errno.ENOENT: raise return False return True class ChrootableTarget(object): def __init__(self, target, allow_daemons=False, sys_resolvconf=True): if target is None: target = "/" self.target = os.path.abspath(target) self.mounts = ["/dev", "/proc", "/sys"] self.umounts = [] self.disabled_daemons = False self.allow_daemons = allow_daemons self.sys_resolvconf = sys_resolvconf self.rconf_d = None def __enter__(self): for p in self.mounts: tpath = os.path.join(self.target, p[1:]) if do_mount(p, tpath, opts='--bind'): self.umounts.append(tpath) if not self.allow_daemons: self.disabled_daemons = disable_daemons_in_root(self.target) target_etc = os.path.join(self.target, "etc") if self.target != "/" and os.path.isdir(target_etc): # never muck with resolv.conf on / rconf = os.path.join(target_etc, "resolv.conf") rtd = None try: rtd = tempfile.mkdtemp(dir=os.path.dirname(rconf)) tmp = os.path.join(rtd, "resolv.conf") os.rename(rconf, tmp) self.rconf_d = rtd shutil.copy("/etc/resolv.conf", rconf) except: if rtd: shutil.rmtree(rtd) self.rconf_d = None raise return self def __exit__(self, etype, value, trace): if self.disabled_daemons: undisable_daemons_in_root(self.target) # if /dev is to be unmounted, udevadm settle (LP: #1462139) if os.path.join(self.target, "dev") in self.umounts: subp(['udevadm', 'settle']) for p in reversed(self.umounts): do_umount(p) rconf = os.path.join(self.target, "etc", "resolv.conf") if self.sys_resolvconf and self.rconf_d: os.rename(os.path.join(self.rconf_d, "resolv.conf"), rconf) shutil.rmtree(self.rconf_d) class RunInChroot(ChrootableTarget): def __call__(self, args, **kwargs): if self.target != "/": chroot = ["chroot", self.target] else: chroot = [] return subp(chroot + args, **kwargs) def is_exe(fpath): # Return path of program for execution if found in path return os.path.isfile(fpath) and os.access(fpath, os.X_OK) def which(program, search=None, target=None): if target is None or os.path.realpath(target) == "/": target = "/" if os.path.sep in program: # if program had a '/' in it, then do not search PATH # 'which' does consider cwd here. (cd / && which bin/ls) = bin/ls # so effectively we set cwd to / (or target) if is_exe(os.path.sep.join((target, program,))): return program if search is None: paths = [p.strip('"') for p in os.environ.get("PATH", "").split(os.pathsep)] if target == "/": search = paths else: search = [p for p in paths if p.startswith("/")] # normalize path input search = [os.path.abspath(p) for p in search] for path in search: if is_exe(os.path.sep.join((target, path, program,))): return os.path.sep.join((path, program,)) return None def get_paths(curtin_exe=None, lib=None, helpers=None): # return a dictionary with paths for 'curtin_exe', 'helpers' and 'lib' # that represent where 'curtin' executable lives, where the 'curtin' module # directory is (containing __init__.py) and where the 'helpers' directory. mydir = os.path.realpath(os.path.dirname(__file__)) tld = os.path.realpath(mydir + os.path.sep + "..") if curtin_exe is None: if os.path.isfile(os.path.join(tld, "bin", "curtin")): curtin_exe = os.path.join(tld, "bin", "curtin") if (curtin_exe is None and (os.path.basename(sys.argv[0]).startswith("curtin") and os.path.isfile(sys.argv[0]))): curtin_exe = os.path.realpath(sys.argv[0]) if curtin_exe is None: found = which('curtin') if found: curtin_exe = found if (curtin_exe is None and os.path.exists(_INSTALLED_MAIN)): curtin_exe = _INSTALLED_MAIN cfile = "common" # a file in 'helpers' if (helpers is None and os.path.isfile(os.path.join(tld, "helpers", cfile))): helpers = os.path.join(tld, "helpers") if (helpers is None and os.path.isfile(os.path.join(_INSTALLED_HELPERS_PATH, cfile))): helpers = _INSTALLED_HELPERS_PATH return({'curtin_exe': curtin_exe, 'lib': mydir, 'helpers': helpers}) def get_architecture(target=None): chroot = [] if target is not None: chroot = ['chroot', target] out, _ = subp(chroot + ['dpkg', '--print-architecture'], capture=True) return out.strip() def has_pkg_available(pkg, target=None): chroot = [] if target is not None: chroot = ['chroot', target] out, _ = subp(chroot + ['apt-cache', 'pkgnames'], capture=True) for item in out.splitlines(): if pkg == item.strip(): return True return False def has_pkg_installed(pkg, target=None): chroot = [] if target is not None: chroot = ['chroot', target] try: out, _ = subp(chroot + ['dpkg-query', '--show', '--showformat', '${db:Status-Abbrev}', pkg], capture=True) return out.rstrip() == "ii" except ProcessExecutionError: return False def parse_dpkg_version(raw, name=None, semx=None): """Parse a dpkg version string into various parts and calcualate a numerical value of the version for use in comparing package versions returns a dictionary with the results """ if semx is None: semx = (10000, 100, 1) upstream = raw.split('-')[0] toks = upstream.split(".", 2) if len(toks) == 3: major, minor, micro = toks elif len(toks) == 2: major, minor, micro = (toks[0], toks[1], 0) elif len(toks) == 1: major, minor, micro = (toks[0], 0, 0) version = { 'major': major, 'minor': minor, 'micro': micro, 'raw': raw, 'upstream': upstream, } if name: version['name'] = name if semx: try: version['semantic_version'] = int( int(major) * semx[0] + int(minor) * semx[1] + int(micro) * semx[2]) except (ValueError, IndexError): version['semantic_version'] = None return version def get_package_version(pkg, target=None, semx=None): """Use dpkg-query to extract package pkg's version string and parse the version string into a dictionary """ chroot = [] if target is not None: chroot = ['chroot', target] try: out, _ = subp(chroot + ['dpkg-query', '--show', '--showformat', '${Version}', pkg], capture=True) raw = out.rstrip() return parse_dpkg_version(raw, name=pkg, semx=semx) except ProcessExecutionError: return None def find_newer(src, files): mtime = os.stat(src).st_mtime return [f for f in files if os.path.exists(f) and os.stat(f).st_mtime > mtime] def set_unexecutable(fname, strict=False): """set fname so it is not executable. if strict, raise an exception if the file does not exist. return the current mode, or None if no change is needed. """ if not os.path.exists(fname): if strict: raise ValueError('%s: file does not exist' % fname) return None cur = stat.S_IMODE(os.lstat(fname).st_mode) target = cur & (~stat.S_IEXEC & ~stat.S_IXGRP & ~stat.S_IXOTH) if cur == target: return None os.chmod(fname, target) return cur def apt_update(target=None, env=None, force=False, comment=None, retries=None): marker = "tmp/curtin.aptupdate" if target is None: target = "/" if env is None: env = os.environ.copy() if retries is None: # by default run apt-update up to 3 times to allow # for transient failures retries = (1, 2, 3) if comment is None: comment = "no comment provided" if comment.endswith("\n"): comment = comment[:-1] marker = os.path.join(target, marker) # if marker exists, check if there are files that would make it obsolete listfiles = [os.path.join(target, "etc/apt/sources.list")] listfiles += glob.glob( os.path.join(target, "etc/apt/sources.list.d/*.list")) if os.path.exists(marker) and not force: if len(find_newer(marker, listfiles)) == 0: return restore_perms = [] abs_tmpdir = tempfile.mkdtemp(dir=os.path.join(target, 'tmp')) try: abs_slist = abs_tmpdir + "/sources.list" abs_slistd = abs_tmpdir + "/sources.list.d" ch_tmpdir = "/tmp/" + os.path.basename(abs_tmpdir) ch_slist = ch_tmpdir + "/sources.list" ch_slistd = ch_tmpdir + "/sources.list.d" # this file gets executed on apt-get update sometimes. (LP: #1527710) motd_update = os.path.join( target, "usr/lib/update-notifier/update-motd-updates-available") pmode = set_unexecutable(motd_update) if pmode is not None: restore_perms.append((motd_update, pmode),) # create tmpdir/sources.list with all lines other than deb-src # avoid apt complaining by using existing and empty dir for sourceparts os.mkdir(abs_slistd) with open(abs_slist, "w") as sfp: for sfile in listfiles: with open(sfile, "r") as fp: contents = fp.read() for line in contents.splitlines(): line = line.lstrip() if not line.startswith("deb-src"): sfp.write(line + "\n") update_cmd = [ 'apt-get', '--quiet', '--option=Acquire::Languages=none', '--option=Dir::Etc::sourcelist=%s' % ch_slist, '--option=Dir::Etc::sourceparts=%s' % ch_slistd, 'update'] # do not using 'run_apt_command' so we can use 'retries' to subp with RunInChroot(target, allow_daemons=True) as inchroot: inchroot(update_cmd, env=env, retries=retries) finally: for fname, perms in restore_perms: os.chmod(fname, perms) if abs_tmpdir: shutil.rmtree(abs_tmpdir) with open(marker, "w") as fp: fp.write(comment + "\n") def run_apt_command(mode, args=None, aptopts=None, env=None, target=None, execute=True, allow_daemons=False): opts = ['--quiet', '--assume-yes', '--option=Dpkg::options::=--force-unsafe-io', '--option=Dpkg::Options::=--force-confold'] if args is None: args = [] if aptopts is None: aptopts = [] if env is None: env = os.environ.copy() env['DEBIAN_FRONTEND'] = 'noninteractive' if which('eatmydata', target=target): emd = ['eatmydata'] else: emd = [] cmd = emd + ['apt-get'] + opts + aptopts + [mode] + args if not execute: return env, cmd apt_update(target, env=env, comment=' '.join(cmd)) ric = RunInChroot(target, allow_daemons=allow_daemons) with ric as inchroot: return inchroot(cmd, env=env) def system_upgrade(aptopts=None, target=None, env=None, allow_daemons=False): LOG.debug("Upgrading system in %s", target) for mode in ('dist-upgrade', 'autoremove'): ret = run_apt_command( mode, aptopts=aptopts, target=target, env=env, allow_daemons=allow_daemons) return ret def install_packages(pkglist, aptopts=None, target=None, env=None, allow_daemons=False): if isinstance(pkglist, str): pkglist = [pkglist] return run_apt_command( 'install', args=pkglist, aptopts=aptopts, target=target, env=env, allow_daemons=allow_daemons) def is_uefi_bootable(): return os.path.exists('/sys/firmware/efi') is True def run_hook_if_exists(target, hook): """ Look for "hook" in "target" and run it """ target_hook = os.path.join(target, 'curtin', hook) if os.path.isfile(target_hook): LOG.debug("running %s" % target_hook) subp([target_hook]) return True return False def sanitize_source(source): """ Check the install source for type information If no type information is present or it is an invalid type, we default to the standard tgz format """ if type(source) is dict: # already sanitized? return source supported = ['tgz', 'dd-tgz'] deftype = 'tgz' for i in supported: prefix = i + ":" if source.startswith(prefix): return {'type': i, 'uri': source[len(prefix):]} LOG.debug("unknown type for url '%s', assuming type '%s'", source, deftype) # default to tgz for unknown types return {'type': deftype, 'uri': source} def get_dd_images(sources): """ return all disk images in sources list """ src = [] if type(sources) is not dict: return src for i in sources: if type(sources[i]) is not dict: continue if sources[i]['type'].startswith('dd-'): src.append(sources[i]['uri']) return src def get_meminfo(meminfo="/proc/meminfo", raw=False): mpliers = {'kB': 2**10, 'mB': 2 ** 20, 'B': 1, 'gB': 2 ** 30} kmap = {'MemTotal:': 'total', 'MemFree:': 'free', 'MemAvailable:': 'available'} ret = {} with open(meminfo, "r") as fp: for line in fp: try: key, value, unit = line.split() except ValueError: key, value = line.split() unit = 'B' if raw: ret[key] = int(value) * mpliers[unit] elif key in kmap: ret[kmap[key]] = int(value) * mpliers[unit] return ret def get_fs_use_info(path): # return some filesystem usage info as tuple of (size_in_bytes, free_bytes) statvfs = os.statvfs(path) return (statvfs.f_frsize * statvfs.f_blocks, statvfs.f_frsize * statvfs.f_bfree) def human2bytes(size): # convert human 'size' to integer size_in = size if isinstance(size, int): return size elif isinstance(size, float): if int(size) != size: raise ValueError("'%s': resulted in non-integer (%s)" % (size_in, int(size))) return size elif not isinstance(size, str): raise TypeError("cannot convert type %s ('%s')." % (type(size), size)) if size.endswith("B"): size = size[:-1] mpliers = {'B': 1, 'K': 2 ** 10, 'M': 2 ** 20, 'G': 2 ** 30, 'T': 2 ** 40} num = size mplier = 'B' for m in mpliers: if size.endswith(m): mplier = m num = size[0:-len(m)] try: num = float(num) except ValueError: raise ValueError("'%s' is not valid input." % size_in) if num < 0: raise ValueError("'%s': cannot be negative" % size_in) val = num * mpliers[mplier] if int(val) != val: raise ValueError("'%s': resulted in non-integer (%s)" % (size_in, val)) return val def import_module(import_str): """Import a module.""" __import__(import_str) return sys.modules[import_str] def try_import_module(import_str, default=None): """Try to import a module.""" try: return import_module(import_str) except ImportError: return default def is_file_not_found_exc(exc): return (isinstance(exc, IOError) and exc.errno == errno.ENOENT) def lsb_release(): fmap = {'Codename': 'codename', 'Description': 'description', 'Distributor ID': 'id', 'Release': 'release'} global _LSB_RELEASE if not _LSB_RELEASE: data = {} try: out, err = subp(['lsb_release', '--all'], capture=True) for line in out.splitlines(): fname, tok, val = line.partition(":") if fname in fmap: data[fmap[fname]] = val.strip() missing = [k for k in fmap.values() if k not in data] if len(missing): LOG.warn("Missing fields in lsb_release --all output: %s", ','.join(missing)) except ProcessExecutionError as e: LOG.warn("Unable to get lsb_release --all: %s", e) data = {v: "UNAVAILABLE" for v in fmap.values()} _LSB_RELEASE.update(data) return _LSB_RELEASE class MergedCmdAppend(argparse.Action): """This appends to a list in order of appearence both the option string and the value""" def __call__(self, parser, namespace, values, option_string=None): if getattr(namespace, self.dest, None) is None: setattr(namespace, self.dest, []) getattr(namespace, self.dest).append((option_string, values,)) def json_dumps(data): return json.dumps(data, indent=1, sort_keys=True, separators=(',', ': ')).encode('utf-8') def get_platform_arch(): platform2arch = { 'i586': 'i386', 'i686': 'i386', 'x86_64': 'amd64', 'ppc64le': 'ppc64el', 'aarch64': 'arm64', } return platform2arch.get(platform.machine(), platform.machine()) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/block/__init__.py0000644000000000000000000005701312741214565017047 0ustar 00000000000000# Copyright (C) 2013 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . import errno import os import stat import shlex import tempfile import itertools from curtin import util from curtin.udev import udevadm_settle from curtin.log import LOG def get_dev_name_entry(devname): bname = devname.split('/dev/')[-1] return (bname, "/dev/" + bname) def is_valid_device(devname): devent = get_dev_name_entry(devname)[1] return is_block_device(devent) def is_block_device(path): try: return stat.S_ISBLK(os.stat(path).st_mode) except OSError as e: if not util.is_file_not_found_exc(e): raise return False def dev_short(devname): if os.path.sep in devname: return os.path.basename(devname) return devname def dev_path(devname): if devname.startswith('/dev/'): return devname else: return '/dev/' + devname def sys_block_path(devname, add=None, strict=True): toks = ['/sys/class/block'] # insert parent dev if devname is partition (parent, partnum) = get_blockdev_for_partition(devname) if partnum: toks.append(dev_short(parent)) toks.append(dev_short(devname)) if add is not None: toks.append(add) path = os.sep.join(toks) if strict and not os.path.exists(path): err = OSError( "devname '{}' did not have existing syspath '{}'".format( devname, path)) err.errno = errno.ENOENT raise err return os.path.normpath(path) def _lsblock_pairs_to_dict(lines): ret = {} for line in lines.splitlines(): toks = shlex.split(line) cur = {} for tok in toks: k, v = tok.split("=", 1) cur[k] = v # use KNAME, as NAME may include spaces and other info, # for example, lvm decices may show 'dm0 lvm1' cur['device_path'] = get_dev_name_entry(cur['KNAME'])[1] ret[cur['KNAME']] = cur return ret def _lsblock(args=None): # lsblk --help | sed -n '/Available/,/^$/p' | # sed -e 1d -e '$d' -e 's,^[ ]\+,,' -e 's, .*,,' | sort keys = ['ALIGNMENT', 'DISC-ALN', 'DISC-GRAN', 'DISC-MAX', 'DISC-ZERO', 'FSTYPE', 'GROUP', 'KNAME', 'LABEL', 'LOG-SEC', 'MAJ:MIN', 'MIN-IO', 'MODE', 'MODEL', 'MOUNTPOINT', 'NAME', 'OPT-IO', 'OWNER', 'PHY-SEC', 'RM', 'RO', 'ROTA', 'RQ-SIZE', 'SCHED', 'SIZE', 'STATE', 'TYPE', 'UUID'] if args is None: args = [] args = [x.replace('!', '/') for x in args] # in order to avoid a very odd error with '-o' and all output fields above # we just drop one. doesn't really matter which one. keys.remove('SCHED') basecmd = ['lsblk', '--noheadings', '--bytes', '--pairs', '--output=' + ','.join(keys)] (out, _err) = util.subp(basecmd + list(args), capture=True) out = out.replace('!', '/') return _lsblock_pairs_to_dict(out) def get_unused_blockdev_info(): # return a list of unused block devices. These are devices that # do not have anything mounted on them. # get a list of top level block devices, then iterate over it to get # devices dependent on those. If the lsblk call for that specific # call has nothing 'MOUNTED", then this is an unused block device bdinfo = _lsblock(['--nodeps']) unused = {} for devname, data in bdinfo.items(): cur = _lsblock([data['device_path']]) mountpoints = [x for x in cur if cur[x].get('MOUNTPOINT')] if len(mountpoints) == 0: unused[devname] = data return unused def get_devices_for_mp(mountpoint): # return a list of devices (full paths) used by the provided mountpoint bdinfo = _lsblock() found = set() for devname, data in bdinfo.items(): if data['MOUNTPOINT'] == mountpoint: found.add(data['device_path']) if found: return list(found) # for some reason, on some systems, lsblk does not list mountpoint # for devices that are mounted. This happens on /dev/vdc1 during a run # using tools/launch. mountpoint = [os.path.realpath(dev) for (dev, mp, vfs, opts, freq, passno) in get_proc_mounts() if mp == mountpoint] return mountpoint def get_installable_blockdevs(include_removable=False, min_size=1024**3): good = [] unused = get_unused_blockdev_info() for devname, data in unused.items(): if not include_removable and data.get('RM') == "1": continue if data.get('RO') != "0" or data.get('TYPE') != "disk": continue if min_size is not None and int(data.get('SIZE', '0')) < min_size: continue good.append(devname) return good def get_blockdev_for_partition(devpath): # convert an entry in /dev/ to parent disk and partition number # if devpath is a block device and not a partition, return (devpath, None) # input of /dev/vdb or /dev/disk/by-label/foo # rpath is hopefully a real-ish path in /dev (vda, sdb..) rpath = os.path.realpath(devpath) bname = os.path.basename(rpath) syspath = "/sys/class/block/%s" % bname if not os.path.exists(syspath): syspath2 = "/sys/class/block/cciss!%s" % bname if not os.path.exists(syspath2): raise ValueError("%s had no syspath (%s)" % (devpath, syspath)) syspath = syspath2 ptpath = os.path.join(syspath, "partition") if not os.path.exists(ptpath): return (rpath, None) ptnum = util.load_file(ptpath).rstrip() # for a partition, real syspath is something like: # /sys/devices/pci0000:00/0000:00:04.0/virtio1/block/vda/vda1 rsyspath = os.path.realpath(syspath) disksyspath = os.path.dirname(rsyspath) diskmajmin = util.load_file(os.path.join(disksyspath, "dev")).rstrip() diskdevpath = os.path.realpath("/dev/block/%s" % diskmajmin) # diskdevpath has something like 253:0 # and udev has put links in /dev/block/253:0 to the device name in /dev/ return (diskdevpath, ptnum) def get_pardevs_on_blockdevs(devs): # return a dict of partitions with their info that are on provided devs if devs is None: devs = [] devs = [get_dev_name_entry(d)[1] for d in devs] found = _lsblock(devs) ret = {} for short in found: if found[short]['device_path'] not in devs: ret[short] = found[short] return ret def stop_all_unused_multipath_devices(): """ Stop all unused multipath devices. """ multipath = util.which('multipath') # Command multipath is not available only when multipath-tools package # is not installed. Nothing needs to be done in this case because system # doesn't create multipath devices without this package installed and we # have nothing to stop. if not multipath: return # Command multipath -F flushes all unused multipath device maps cmd = [multipath, '-F'] try: # unless multipath cleared *everything* it will exit with 1 util.subp(cmd, rcs=[0, 1]) except util.ProcessExecutionError as e: LOG.warn("Failed to stop multipath devices: %s", e) def rescan_block_devices(): # run 'blockdev --rereadpt' for all block devices not currently mounted unused = get_unused_blockdev_info() devices = [] for devname, data in unused.items(): if data.get('RM') == "1": continue if data.get('RO') != "0" or data.get('TYPE') != "disk": continue devices.append(data['device_path']) if not devices: LOG.debug("no devices found to rescan") return cmd = ['blockdev', '--rereadpt'] + devices try: util.subp(cmd, capture=True) except util.ProcessExecutionError as e: # FIXME: its less than ideal to swallow this error, but until # we fix LP: #1489521 we kind of need to. LOG.warn("rescanning devices failed: %s", e) udevadm_settle() return def blkid(devs=None, cache=True): if devs is None: devs = [] # 14.04 blkid reads undocumented /dev/.blkid.tab # man pages mention /run/blkid.tab and /etc/blkid.tab if not cache: cfiles = ("/run/blkid/blkid.tab", "/dev/.blkid.tab", "/etc/blkid.tab") for cachefile in cfiles: if os.path.exists(cachefile): os.unlink(cachefile) cmd = ['blkid', '-o', 'full'] # blkid output is : KEY=VALUE # where KEY is TYPE, UUID, PARTUUID, LABEL out, err = util.subp(cmd, capture=True) data = {} for line in out.splitlines(): curdev, curdata = line.split(":", 1) data[curdev] = dict(tok.split('=', 1) for tok in shlex.split(curdata)) return data def detect_multipath(target_mountpoint): """ Detect if the operating system has been installed to a multipath device. """ # The obvious way to detect multipath is to use multipath utility which is # provided by the multipath-tools package. Unfortunately, multipath-tools # package is not available in all ephemeral images hence we can't use it. # Another reasonable way to detect multipath is to look for two (or more) # devices with the same World Wide Name (WWN) which can be fetched using # scsi_id utility. This way doesn't work as well because WWNs are not # unique in some cases which leads to false positives which may prevent # system from booting (see LP: #1463046 for details). # Taking into account all the issues mentioned above, curent implementation # detects multipath by looking for a filesystem with the same UUID # as the target device. It relies on the fact that all alternative routes # to the same disk observe identical partition information including UUID. # There are some issues with this approach as well though. We won't detect # multipath disk if it doesn't any filesystems. Good news is that # target disk will always have a filesystem because curtin creates them # while installing the system. rescan_block_devices() binfo = blkid(cache=False) LOG.debug("detect_multipath found blkid info: %s", binfo) # get_devices_for_mp may return multiple devices by design. It is not yet # implemented but it should return multiple devices when installer creates # separate disk partitions for / and /boot. We need to do UUID-based # multipath detection against each of target devices. target_devs = get_devices_for_mp(target_mountpoint) LOG.debug("target_devs: %s" % target_devs) for devpath, data in binfo.items(): # We need to figure out UUID of the target device first if devpath not in target_devs: continue # This entry contains information about one of target devices target_uuid = data.get('UUID') # UUID-based multipath detection won't work if target partition # doesn't have UUID assigned if not target_uuid: LOG.warn("Target partition %s doesn't have UUID assigned", devpath) continue LOG.debug("%s: %s" % (devpath, data.get('UUID', ""))) # Iterating over available devices to see if any other device # has the same UUID as the target device. If such device exists # we probably installed the system to the multipath device. for other_devpath, other_data in binfo.items(): if ((other_data.get('UUID') == target_uuid) and (other_devpath != devpath)): return True # No other devices have the same UUID as the target devices. # We probably installed the system to the non-multipath device. return False def get_scsi_wwid(device, replace_whitespace=False): """ Issue a call to scsi_id utility to get WWID of the device. """ cmd = ['/lib/udev/scsi_id', '--whitelisted', '--device=%s' % device] if replace_whitespace: cmd.append('--replace-whitespace') try: (out, err) = util.subp(cmd, capture=True) LOG.debug("scsi_id output raw:\n%s\nerror:\n%s", out, err) scsi_wwid = out.rstrip('\n') return scsi_wwid except util.ProcessExecutionError as e: LOG.warn("Failed to get WWID: %s", e) return None def get_multipath_wwids(): """ Get WWIDs of all multipath devices available in the system. """ multipath_devices = set() multipath_wwids = set() devuuids = [(d, i['UUID']) for d, i in blkid().items() if 'UUID' in i] # Looking for two disks which contain filesystems with the same UUID. for (dev1, uuid1), (dev2, uuid2) in itertools.combinations(devuuids, 2): if uuid1 == uuid2: multipath_devices.add(get_blockdev_for_partition(dev1)[0]) for device in multipath_devices: wwid = get_scsi_wwid(device) # Function get_scsi_wwid() may return None in case of errors or # WWID field may be empty for some buggy disk. We don't want to # propagate both of these value further to avoid generation of # incorrect /etc/multipath/bindings file. if wwid: multipath_wwids.add(wwid) return multipath_wwids def get_root_device(dev, fpath="curtin"): """ Get root partition for specified device, based on presence of /curtin. """ partitions = get_pardevs_on_blockdevs(dev) target = None tmp_mount = tempfile.mkdtemp() for i in partitions: dev_path = partitions[i]['device_path'] mp = None try: util.do_mount(dev_path, tmp_mount) mp = tmp_mount curtin_dir = os.path.join(tmp_mount, fpath) if os.path.isdir(curtin_dir): target = dev_path break except: pass finally: if mp: util.do_umount(mp) os.rmdir(tmp_mount) if target is None: raise ValueError("Could not find root device") return target def get_blockdev_sector_size(devpath): """ Get the logical and physical sector size of device at devpath Returns a tuple of integer values (logical, physical). """ info = _lsblock([devpath]) LOG.debug('get_blockdev_sector_size: info:\n%s' % util.json_dumps(info)) [parent] = info return (int(info[parent]['LOG-SEC']), int(info[parent]['PHY-SEC'])) def get_volume_uuid(path): """ Get uuid of disk with given path. This address uniquely identifies the device and remains consistant across reboots """ (out, _err) = util.subp(["blkid", "-o", "export", path], capture=True) for line in out.splitlines(): if "UUID" in line: return line.split('=')[-1] return '' def get_mountpoints(): """ Returns a list of all mountpoints where filesystems are currently mounted. """ info = _lsblock() proc_mounts = [mp for (dev, mp, vfs, opts, freq, passno) in get_proc_mounts()] lsblock_mounts = list(i.get("MOUNTPOINT") for name, i in info.items() if i.get("MOUNTPOINT") is not None and i.get("MOUNTPOINT") != "") return list(set(proc_mounts + lsblock_mounts)) def get_proc_mounts(): """ Returns a list of tuples for each entry in /proc/mounts """ mounts = [] with open("/proc/mounts", "r") as fp: for line in fp: try: (dev, mp, vfs, opts, freq, passno) = \ line.strip().split(None, 5) mounts.append((dev, mp, vfs, opts, freq, passno)) except ValueError: continue return mounts def lookup_disk(serial): """ Search for a disk by its serial number using /dev/disk/by-id/ """ # Get all volumes in /dev/disk/by-id/ containing the serial string. The # string specified can be either in the short or long serial format # hack, some serials have spaces, udev usually converts ' ' -> '_' serial_udev = serial.replace(' ', '_') LOG.info('Processing serial %s via udev to %s', serial, serial_udev) disks = list(filter(lambda x: serial_udev in x, os.listdir("/dev/disk/by-id/"))) if not disks or len(disks) < 1: raise ValueError("no disk with serial '%s' found" % serial_udev) # Sort by length and take the shortest path name, as the longer path names # will be the partitions on the disk. Then use os.path.realpath to # determine the path to the block device in /dev/ disks.sort(key=lambda x: len(x)) path = os.path.realpath("/dev/disk/by-id/%s" % disks[0]) if not os.path.exists(path): raise ValueError("path '%s' to block device for disk with serial '%s' \ does not exist" % (path, serial_udev)) return path def sysfs_partition_data(blockdev=None, sysfs_path=None): # given block device or sysfs_path, return a list of tuples # of (kernel_name, number, offset, size) if blockdev is None and sysfs_path is None: raise ValueError("Blockdev and sysfs_path cannot both be None") if blockdev: sysfs_path = sys_block_path(blockdev) ptdata = [] # /sys/class/block/dev has entries of 'kname' for each partition # queue property is only on parent devices, ie, we can't read # /sys/class/block/vda/vda1/queue/* as queue is only on the # parent device (parent, partnum) = get_blockdev_for_partition(blockdev) sysfs_prefix = sysfs_path if partnum: sysfs_prefix = sys_block_path(parent) block_size = int(util.load_file(os.path.join(sysfs_prefix, 'queue/logical_block_size'))) block_size = int( util.load_file(os.path.join(sysfs_path, 'queue/logical_block_size'))) unit = block_size for d in os.listdir(sysfs_path): partd = os.path.join(sysfs_path, d) data = {} for sfile in ('partition', 'start', 'size'): dfile = os.path.join(partd, sfile) if not os.path.isfile(dfile): continue data[sfile] = int(util.load_file(dfile)) if 'partition' not in data: continue ptdata.append((d, data['partition'], data['start'] * unit, data['size'] * unit,)) return ptdata def wipe_file(path, reader=None, buflen=4 * 1024 * 1024): # wipe the existing file at path. # if reader is provided, it will be called as a 'reader(buflen)' # to provide data for each write. Otherwise, zeros are used. # writes will be done in size of buflen. if reader: readfunc = reader else: buf = buflen * b'\0' def readfunc(size): return buf with open(path, "rb+") as fp: # get the size by seeking to end. fp.seek(0, 2) size = fp.tell() LOG.debug("%s is %s bytes. wiping with buflen=%s", path, size, buflen) fp.seek(0) while True: pbuf = readfunc(buflen) pos = fp.tell() if len(pbuf) != buflen and len(pbuf) + pos < size: raise ValueError( "short read on reader got %d expected %d after %d" % (len(pbuf), buflen, pos)) if pos + buflen >= size: fp.write(pbuf[0:size-pos]) break else: fp.write(pbuf) def quick_zero(path, partitions=True): # zero 1M at front, 1M at end, and 1M at front # if this is a block device and partitions is true, then # zero 1M at front and end of each partition. buflen = 1024 count = 1024 zero_size = buflen * count offsets = [0, -zero_size] is_block = is_block_device(path) if not (is_block or os.path.isfile(path)): raise ValueError("%s: not an existing file or block device") if partitions and is_block: ptdata = sysfs_partition_data(path) for kname, ptnum, start, size in ptdata: offsets.append(start) offsets.append(start + size - zero_size) LOG.debug("wiping 1M on %s at offsets %s", path, offsets) return zero_file_at_offsets(path, offsets, buflen=buflen, count=count) def zero_file_at_offsets(path, offsets, buflen=1024, count=1024, strict=False): bmsg = "{path} (size={size}): " m_short = bmsg + "{tot} bytes from {offset} > size." m_badoff = bmsg + "invalid offset {offset}." if not strict: m_short += " Shortened to {wsize} bytes." m_badoff += " Skipping." buf = b'\0' * buflen tot = buflen * count msg_vals = {'path': path, 'tot': buflen * count} with open(path, "rb+") as fp: # get the size by seeking to end. fp.seek(0, 2) size = fp.tell() msg_vals['size'] = size for offset in offsets: if offset < 0: pos = size + offset else: pos = offset msg_vals['offset'] = offset msg_vals['pos'] = pos if pos > size or pos < 0: if strict: raise ValueError(m_badoff.format(**msg_vals)) else: LOG.debug(m_badoff.format(**msg_vals)) continue msg_vals['wsize'] = size - pos if pos + tot > size: if strict: raise ValueError(m_short.format(**msg_vals)) else: LOG.debug(m_short.format(**msg_vals)) fp.seek(pos) for i in range(count): pos = fp.tell() if pos + buflen > size: fp.write(buf[0:size-pos]) else: fp.write(buf) def wipe_volume(path, mode="superblock"): """wipe a volume/block device :param path: a path to a block device :param mode: how to wipe it. pvremove: wipe a lvm physical volume zero: write zeros to the entire volume random: write random data (/dev/urandom) to the entire volume superblock: zero the beginning and the end of the volume superblock-recursive: zero the beginning of the volume, the end of the volume and beginning and end of any partitions that are known to be on this device. """ if mode == "pvremove": # We need to use --force --force in case it's already in a volgroup and # pvremove doesn't want to remove it cmds = [] cmds.append(["pvremove", "--force", "--force", "--yes", path]) cmds.append(["pvscan", "--cache"]) cmds.append(["vgscan", "--mknodes", "--cache"]) # If pvremove is run and there is no label on the system, # then it exits with 5. That is also okay, because we might be # wiping something that is already blank for cmd in cmds: util.subp(cmd, rcs=[0, 5], capture=True) elif mode == "zero": wipe_file(path) elif mode == "random": with open("/dev/urandom", "rb") as reader: wipe_file(path, reader=reader.read) elif mode == "superblock": quick_zero(path, partitions=False) elif mode == "superblock-recursive": quick_zero(path, partitions=True) else: raise ValueError("wipe mode %s not supported" % mode) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/block/mdadm.py0000644000000000000000000005120012741214565016362 0ustar 00000000000000# Copyright (C) 2015 Canonical Ltd. # # Author: Ryan Harper # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . # This module wraps calls to the mdadm utility for examing Linux SoftRAID # virtual devices. Functions prefixed with 'mdadm_' involve executing # the 'mdadm' command in a subprocess. The remaining functions handle # manipulation of the mdadm output. import os import re import shlex from subprocess import CalledProcessError from curtin.block import (dev_short, dev_path, is_valid_device, sys_block_path) from curtin import util from curtin.log import LOG NOSPARE_RAID_LEVELS = [ 'linear', 'raid0', '0', 0, ] SPARE_RAID_LEVELS = [ 'raid1', 'stripe', 'mirror', '1', 1, 'raid4', '4', 4, 'raid5', '5', 5, 'raid6', '6', 6, 'raid10', '10', 10, ] VALID_RAID_LEVELS = NOSPARE_RAID_LEVELS + SPARE_RAID_LEVELS # https://www.kernel.org/doc/Documentation/md.txt ''' clear No devices, no size, no level Writing is equivalent to STOP_ARRAY ioctl inactive May have some settings, but array is not active all IO results in error When written, doesn't tear down array, but just stops it suspended (not supported yet) All IO requests will block. The array can be reconfigured. Writing this, if accepted, will block until array is quiessent readonly no resync can happen. no superblocks get written. write requests fail read-auto like readonly, but behaves like 'clean' on a write request. clean - no pending writes, but otherwise active. When written to inactive array, starts without resync If a write request arrives then if metadata is known, mark 'dirty' and switch to 'active'. if not known, block and switch to write-pending If written to an active array that has pending writes, then fails. active fully active: IO and resync can be happening. When written to inactive array, starts with resync write-pending clean, but writes are blocked waiting for 'active' to be written. active-idle like active, but no writes have been seen for a while (safe_mode_delay). ''' ERROR_RAID_STATES = [ 'clear', 'inactive', 'suspended', ] READONLY_RAID_STATES = [ 'readonly', ] READWRITE_RAID_STATES = [ 'read-auto', 'clean', 'active', 'active-idle', 'write-pending', ] VALID_RAID_ARRAY_STATES = ( ERROR_RAID_STATES + READONLY_RAID_STATES + READWRITE_RAID_STATES ) # need a on-import check of version and set the value for later reference ''' mdadm version < 3.3 doesn't include enough info when using --export and we must use --detail and parse out information. This method checks the mdadm version and will return True if we can use --export for key=value list with enough info, false if version is less than ''' MDADM_USE_EXPORT = util.lsb_release()['codename'] not in ['precise', 'trusty'] # # mdadm executors # def mdadm_assemble(md_devname=None, devices=[], spares=[], scan=False): # md_devname is a /dev/XXXX # devices is non-empty list of /dev/xxx # if spares is non-empt list append of /dev/xxx cmd = ["mdadm", "--assemble"] if scan: cmd += ['--scan'] else: valid_mdname(md_devname) cmd += [md_devname, "--run"] + devices if spares: cmd += spares util.subp(cmd, capture=True, rcs=[0, 1, 2]) util.subp(["udevadm", "settle"]) def mdadm_create(md_devname, raidlevel, devices, spares=None, md_name=""): LOG.debug('mdadm_create: ' + 'md_name=%s raidlevel=%s ' % (md_devname, raidlevel) + ' devices=%s spares=%s name=%s' % (devices, spares, md_name)) assert_valid_devpath(md_devname) if raidlevel not in VALID_RAID_LEVELS: raise ValueError('Invalid raidlevel: [{}]'.format(raidlevel)) min_devices = md_minimum_devices(raidlevel) if len(devices) < min_devices: err = 'Not enough devices for raidlevel: ' + str(raidlevel) err += ' minimum devices needed: ' + str(min_devices) raise ValueError(err) if spares and raidlevel not in SPARE_RAID_LEVELS: err = ('Raidlevel does not support spare devices: ' + str(raidlevel)) raise ValueError(err) (hostname, _err) = util.subp(["hostname", "-s"], rcs=[0], capture=True) cmd = ["mdadm", "--create", md_devname, "--run", "--homehost=%s" % hostname.strip(), "--level=%s" % raidlevel, "--raid-devices=%s" % len(devices)] if md_name: cmd.append("--name=%s" % md_name) for device in devices: # Zero out device superblock just in case device has been used for raid # before, as this will cause many issues util.subp(["mdadm", "--zero-superblock", device], capture=True) cmd.append(device) if spares: cmd.append("--spare-devices=%s" % len(spares)) for device in spares: util.subp(["mdadm", "--zero-superblock", device], capture=True) cmd.append(device) # Create the raid device util.subp(["udevadm", "settle"]) util.subp(["udevadm", "control", "--stop-exec-queue"]) try: util.subp(cmd, capture=True) except util.ProcessExecutionError: # frequent issues by modules being missing (LP: #1519470) - add debug LOG.debug('mdadm_create failed - extra debug regarding md modules') (out, _err) = util.subp(["lsmod"], capture=True) if not _err: LOG.debug('modules loaded: \n%s' % out) raidmodpath = '/lib/modules/%s/kernel/drivers/md' % os.uname()[2] (out, _err) = util.subp(["find", raidmodpath], rcs=[0, 1], capture=True) if out: LOG.debug('available md modules: \n%s' % out) else: LOG.debug('no available md modules found') raise util.subp(["udevadm", "control", "--start-exec-queue"]) util.subp(["udevadm", "settle", "--exit-if-exists=%s" % md_devname]) def mdadm_examine(devpath, export=MDADM_USE_EXPORT): ''' exectute mdadm --examine, and optionally append --export. Parse and return dict of key=val from output''' assert_valid_devpath(devpath) cmd = ["mdadm", "--examine"] if export: cmd.extend(["--export"]) cmd.extend([devpath]) try: (out, _err) = util.subp(cmd, capture=True) except CalledProcessError: LOG.exception('Error: not a valid md device: ' + devpath) return {} if export: data = __mdadm_export_to_dict(out) else: data = __upgrade_detail_dict(__mdadm_detail_to_dict(out)) return data def mdadm_stop(devpath): assert_valid_devpath(devpath) LOG.info("mdadm stopping: %s" % devpath) util.subp(["mdadm", "--stop", devpath], rcs=[0, 1], capture=True) def mdadm_remove(devpath): assert_valid_devpath(devpath) LOG.info("mdadm removing: %s" % devpath) util.subp(["mdadm", "--remove", devpath], rcs=[0, 1], capture=True) def mdadm_query_detail(md_devname, export=MDADM_USE_EXPORT): valid_mdname(md_devname) cmd = ["mdadm", "--query", "--detail"] if export: cmd.extend(["--export"]) cmd.extend([md_devname]) (out, _err) = util.subp(cmd, capture=True) if export: data = __mdadm_export_to_dict(out) else: data = __upgrade_detail_dict(__mdadm_detail_to_dict(out)) return data def mdadm_detail_scan(): (out, _err) = util.subp(["mdadm", "--detail", "--scan"], capture=True) if not _err: return out # ------------------------------ # def valid_mdname(md_devname): assert_valid_devpath(md_devname) if not is_valid_device(md_devname): raise ValueError('Specified md device does not exist: ' + md_devname) return False return True def valid_devpath(devpath): if devpath: return devpath.startswith('/dev') return False def assert_valid_devpath(devpath): if not valid_devpath(devpath): raise ValueError("Invalid devpath: '%s'" % devpath) def md_sysfs_attr(md_devname, attrname): if not valid_mdname(md_devname): raise ValueError('Invalid md devicename: [{}]'.format(md_devname)) attrdata = '' # /sys/class/block//md sysmd = sys_block_path(md_devname, "md") # /sys/class/block//md/attrname sysfs_attr_path = os.path.join(sysmd, attrname) if os.path.isfile(sysfs_attr_path): attrdata = util.load_file(sysfs_attr_path).strip() return attrdata def md_raidlevel_short(raidlevel): if isinstance(raidlevel, int) or raidlevel in ['linear', 'stripe']: return raidlevel return int(raidlevel.replace('raid', '')) def md_minimum_devices(raidlevel): ''' return the minimum number of devices for a given raid level ''' rl = md_raidlevel_short(raidlevel) if rl in [0, 1, 'linear', 'stripe']: return 2 if rl in [5]: return 3 if rl in [6, 10]: return 4 return -1 def __md_check_array_state(md_devname, mode='READWRITE'): modes = { 'READWRITE': READWRITE_RAID_STATES, 'READONLY': READONLY_RAID_STATES, 'ERROR': ERROR_RAID_STATES, } if mode not in modes: raise ValueError('Invalid Array State mode: ' + mode) array_state = md_sysfs_attr(md_devname, 'array_state') if array_state in modes[mode]: return True return False def md_check_array_state_rw(md_devname): return __md_check_array_state(md_devname, mode='READWRITE') def md_check_array_state_ro(md_devname): return __md_check_array_state(md_devname, mode='READONLY') def md_check_array_state_error(md_devname): return __md_check_array_state(md_devname, mode='ERROR') def __mdadm_export_to_dict(output): ''' convert Key=Value text output into dictionary ''' return dict(tok.split('=', 1) for tok in shlex.split(output)) def __mdadm_detail_to_dict(input): ''' Convert mdadm --detail output to dictionary /dev/vde: Magic : a92b4efc Version : 1.2 Feature Map : 0x0 Array UUID : 93a73e10:427f280b:b7076c02:204b8f7a Name : wily-foobar:0 (local to host wily-foobar) Creation Time : Sat Dec 12 16:06:05 2015 Raid Level : raid1 Raid Devices : 2 Avail Dev Size : 20955136 (9.99 GiB 10.73 GB) Used Dev Size : 20955136 (9.99 GiB 10.73 GB) Array Size : 10477568 (9.99 GiB 10.73 GB) Data Offset : 16384 sectors Super Offset : 8 sectors Unused Space : before=16296 sectors, after=0 sectors State : clean Device UUID : 8fcd62e6:991acc6e:6cb71ee3:7c956919 Update Time : Sat Dec 12 16:09:09 2015 Bad Block Log : 512 entries available at offset 72 sectors Checksum : 65b57c2e - correct Events : 17 Device Role : spare Array State : AA ('A' == active, '.' == missing, 'R' == replacing) ''' data = {} device = re.findall('^(\/dev\/[a-zA-Z0-9-\._]+)', input) if len(device) == 1: data.update({'device': device[0]}) else: raise ValueError('Failed to determine device in input') # FIXME: probably could do a better regex to match the LHS which # has one, two or three words for f in re.findall('(\w+|\w+\ \w+|\w+\ \w+\ \w+)' + '\ \:\ ([a-zA-Z0-9\-\.,: \(\)=\']+)', input, re.MULTILINE): key = f[0].replace(' ', '_').lower() val = f[1] if key in data: raise ValueError('Duplicate key in mdadm regex parsing: ' + key) data.update({key: val}) return data def md_device_key_role(devname): if not devname: raise ValueError('Missing parameter devname') return 'MD_DEVICE_' + dev_short(devname) + '_ROLE' def md_device_key_dev(devname): if not devname: raise ValueError('Missing parameter devname') return 'MD_DEVICE_' + dev_short(devname) + '_DEV' def __upgrade_detail_dict(detail): ''' This method attempts to convert mdadm --detail output into a KEY=VALUE output the same as mdadm --detail --export from mdadm v3.3 ''' # if the input already has MD_UUID, it's already been converted if 'MD_UUID' in detail: return detail md_detail = { 'MD_LEVEL': detail['raid_level'], 'MD_DEVICES': detail['raid_devices'], 'MD_METADATA': detail['version'], 'MD_NAME': detail['name'].split()[0], } # exmaine has ARRAY UUID if 'array_uuid' in detail: md_detail.update({'MD_UUID': detail['array_uuid']}) # query,detail has UUID elif 'uuid' in detail: md_detail.update({'MD_UUID': detail['uuid']}) device = detail['device'] # MD_DEVICE_vdc1_DEV=/dev/vdc1 md_detail.update({md_device_key_dev(device): device}) if 'device_role' in detail: role = detail['device_role'] if role != 'spare': # device_role = Active device 1 role = role.split()[-1] # MD_DEVICE_vdc1_ROLE=spare md_detail.update({md_device_key_role(device): role}) return md_detail def md_read_run_mdadm_map(): ''' md1 1.2 59beb40f:4c202f67:088e702b:efdf577a /dev/md1 md0 0.90 077e6a9e:edf92012:e2a6e712:b193f786 /dev/md0 return # md_shortname = (metaversion, md_uuid, md_devpath) data = { 'md1': (1.2, 59beb40f:4c202f67:088e702b:efdf577a, /dev/md1) 'md0': (0.90, 077e6a9e:edf92012:e2a6e712:b193f786, /dev/md0) ''' mdadm_map = {} run_mdadm_map = '/run/mdadm/map' if os.path.exists(run_mdadm_map): with open(run_mdadm_map, 'r') as fp: data = fp.read().strip() for entry in data.split('\n'): (key, meta, md_uuid, dev) = entry.split() mdadm_map.update({key: (meta, md_uuid, dev)}) return mdadm_map def md_get_spares_list(devpath): sysfs_md = sys_block_path(devpath, "md") spares = [dev_path(dev[4:]) for dev in os.listdir(sysfs_md) if (dev.startswith('dev-') and util.load_file(os.path.join(sysfs_md, dev, 'state')).strip() == 'spare')] return spares def md_get_devices_list(devpath): sysfs_md = sys_block_path(devpath, "md") devices = [dev_path(dev[4:]) for dev in os.listdir(sysfs_md) if (dev.startswith('dev-') and util.load_file(os.path.join(sysfs_md, dev, 'state')).strip() != 'spare')] return devices def md_check_array_uuid(md_devname, md_uuid): valid_mdname(md_devname) # confirm we have /dev/{mdname} by following the udev symlink mduuid_path = ('/dev/disk/by-id/md-uuid-' + md_uuid) mdlink_devname = dev_path(os.path.realpath(mduuid_path)) if md_devname != mdlink_devname: err = ('Mismatch between devname and md-uuid symlink: ' + '%s -> %s != %s' % (mduuid_path, mdlink_devname, md_devname)) raise ValueError(err) return True def md_get_uuid(md_devname): valid_mdname(md_devname) md_query = mdadm_query_detail(md_devname) return md_query.get('MD_UUID', None) def _compare_devlist(expected, found): LOG.debug('comparing device lists: ' 'expected: {} found: {}'.format(expected, found)) expected = set(expected) found = set(found) if expected != found: missing = expected.difference(found) extra = found.difference(expected) raise ValueError("RAID array device list does not match." " Missing: {} Extra: {}".format(missing, extra)) def md_check_raidlevel(raidlevel): # Validate raidlevel against what curtin supports configuring if raidlevel not in VALID_RAID_LEVELS: err = ('Invalid raidlevel: ' + raidlevel + ' Must be one of: ' + str(VALID_RAID_LEVELS)) raise ValueError(err) return True def md_block_until_in_sync(md_devname): ''' sync_completed This shows the number of sectors that have been completed of whatever the current sync_action is, followed by the number of sectors in total that could need to be processed. The two numbers are separated by a '/' thus effectively showing one value, a fraction of the process that is complete. A 'select' on this attribute will return when resync completes, when it reaches the current sync_max (below) and possibly at other times. ''' # FIXME: use selectors to block on: /sys/class/block/mdX/md/sync_completed pass def md_check_array_state(md_devname): # check array state writable = md_check_array_state_rw(md_devname) degraded = md_sysfs_attr(md_devname, 'degraded') sync_action = md_sysfs_attr(md_devname, 'sync_action') if not writable: raise ValueError('Array not in writable state: ' + md_devname) if degraded != "0": raise ValueError('Array in degraded state: ' + md_devname) if sync_action != "idle": raise ValueError('Array syncing, not idle state: ' + md_devname) return True def md_check_uuid(md_devname): md_uuid = md_get_uuid(md_devname) if not md_uuid: raise ValueError('Failed to get md UUID from device: ' + md_devname) return md_check_array_uuid(md_devname, md_uuid) def md_check_devices(md_devname, devices): if not devices or len(devices) == 0: raise ValueError('Cannot verify raid array with empty device list') # collect and compare raid devices based on md name versus # expected device list. # # NB: In some cases, a device might report as a spare until # md has finished syncing it into the array. Currently # we fail the check since the specified raid device is not # yet in its proper role. Callers can check mdadm_sync_action # state to see if the array is currently recovering, which would # explain the failure. Also mdadm_degraded will indicate if the # raid is currently degraded or not, which would also explain the # failure. md_raid_devices = md_get_devices_list(md_devname) LOG.debug('md_check_devices: md_raid_devs: ' + str(md_raid_devices)) _compare_devlist(devices, md_raid_devices) def md_check_spares(md_devname, spares): # collect and compare spare devices based on md name versus # expected device list. md_raid_spares = md_get_spares_list(md_devname) _compare_devlist(spares, md_raid_spares) def md_check_array_membership(md_devname, devices): # validate that all devices are members of the correct array md_uuid = md_get_uuid(md_devname) for device in devices: dev_examine = mdadm_examine(device, export=False) if 'MD_UUID' not in dev_examine: raise ValueError('Device is not part of an array: ' + device) dev_uuid = dev_examine['MD_UUID'] if dev_uuid != md_uuid: err = "Device {} is not part of {} array. ".format(device, md_devname) err += "MD_UUID mismatch: device:{} != array:{}".format(dev_uuid, md_uuid) raise ValueError(err) def md_check(md_devname, raidlevel, devices=[], spares=[]): ''' Check passed in variables from storage configuration against the system we're running upon. ''' LOG.debug('RAID validation: ' + 'name={} raidlevel={} devices={} spares={}'.format(md_devname, raidlevel, devices, spares)) assert_valid_devpath(md_devname) md_check_array_state(md_devname) md_check_raidlevel(raidlevel) md_check_uuid(md_devname) md_check_devices(md_devname, devices) md_check_spares(md_devname, spares) md_check_array_membership(md_devname, devices + spares) LOG.debug('RAID array OK: ' + md_devname) return True # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/block/mkfs.py0000644000000000000000000001650312741214565016247 0ustar 00000000000000# Copyright (C) 2016 Canonical Ltd. # # Author: Wesley Wiedenmeier # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . # This module wraps calls to mkfs. and determines the appropriate flags # for each filesystem type from curtin import util from curtin import block import string import os from uuid import uuid1 mkfs_commands = { "btrfs": "mkfs.btrfs", "ext2": "mkfs.ext2", "ext3": "mkfs.ext3", "ext4": "mkfs.ext4", "fat": "mkfs.vfat", "fat12": "mkfs.vfat", "fat16": "mkfs.vfat", "fat32": "mkfs.vfat", "vfat": "mkfs.vfat", "jfs": "jfs_mkfs", "ntfs": "mkntfs", "reiserfs": "mkfs.reiserfs", "swap": "mkswap", "xfs": "mkfs.xfs" } specific_to_family = { "ext2": "ext", "ext3": "ext", "ext4": "ext", "fat12": "fat", "fat16": "fat", "fat32": "fat", "vfat": "fat", } label_length_limits = { "btrfs": 256, "ext": 16, "fat": 11, "jfs": 16, # see jfs_tune manpage "ntfs": 32, "reiserfs": 16, "swap": 15, # not in manpages, found experimentally "xfs": 12 } family_flag_mappings = { "label": {"btrfs": "--label", "ext": "-L", "fat": "-n", "jfs": "-L", "ntfs": "--label", "reiserfs": "--label", "swap": "--label", "xfs": "-L"}, "uuid": {"btrfs": "--uuid", "ext": "-U", "reiserfs": "--uuid", "swap": "--uuid"}, "force": {"btrfs": "--force", "ext": "-F", "ntfs": "--force", "reiserfs": "-f", "swap": "--force", "xfs": "-f"}, "fatsize": {"fat": "-F"}, "quiet": {"ext": "-q", "ntfs": "-q", "reiserfs": "-q", "xfs": "--quiet"}, "sectorsize": { "btrfs": "--sectorsize", "ext": "-b", "fat": "-S", "ntfs": "--sector-size", "reiserfs": "--block-size"} } release_flag_mapping_overrides = { "precise": { "force": {"btrfs": None}, "uuid": {"btrfs": None}}, "trusty": { "uuid": {"btrfs": None}}, } def valid_fstypes(): return list(mkfs_commands.keys()) def get_flag_mapping(flag_name, fs_family, param=None, strict=False): ret = [] release = util.lsb_release()['codename'] overrides = release_flag_mapping_overrides.get(release, {}) if flag_name in overrides and fs_family in overrides[flag_name]: flag_sym = overrides[flag_name][fs_family] else: flag_sym_families = family_flag_mappings.get(flag_name) if flag_sym_families is None: raise ValueError("unsupported flag '%s'" % flag_name) flag_sym = flag_sym_families.get(fs_family) if flag_sym is None: if strict: raise ValueError("flag '%s' not supported by fs family '%s'" % flag_name, fs_family) else: ret = [flag_sym] if param is not None: ret.append(param) return ret def mkfs(path, fstype, strict=False, label=None, uuid=None, force=False): """Make filesystem on block device with given path using given fstype and appropriate flags for filesystem family. Filesystem uuid and label can be passed in as kwargs. By default no label or uuid will be used. If a filesystem label is too long curtin will raise a ValueError if the strict flag is true or will truncate it to the maximum possible length. If a flag is not supported by a filesystem family mkfs will raise a ValueError if the strict flag is true or silently ignore it otherwise. Force can be specified to force the mkfs command to continue even if it finds old data or filesystems on the partition. """ if path is None: raise ValueError("invalid block dev path '%s'" % path) if not os.path.exists(path): raise ValueError("'%s': no such file or directory" % path) fs_family = specific_to_family.get(fstype, fstype) mkfs_cmd = mkfs_commands.get(fstype) if not mkfs_cmd: raise ValueError("unsupported fs type '%s'" % fstype) if util.which(mkfs_cmd) is None: raise ValueError("need '%s' but it could not be found" % mkfs_cmd) cmd = [mkfs_cmd] # use device logical block size to ensure properly formated filesystems (logical_bsize, physical_bsize) = block.get_blockdev_sector_size(path) if logical_bsize > 512: cmd.extend(get_flag_mapping("sectorsize", fs_family, param=str(logical_bsize), strict=strict)) # mkfs.vfat doesn't calculate this right for non-512b sector size # lp:1569576 , d-i uses the same setting. cmd.extend(["-s", "1"]) if force: cmd.extend(get_flag_mapping("force", fs_family, strict=strict)) if label is not None: limit = label_length_limits.get(fs_family) if len(label) > limit: if strict: raise ValueError("length of fs label for '%s' exceeds max \ allowed for fstype '%s'. max is '%s'" % (path, fstype, limit)) else: label = label[:limit] cmd.extend(get_flag_mapping("label", fs_family, param=label, strict=strict)) # If uuid is not specified, generate one and try to use it if uuid is None: uuid = str(uuid1()) cmd.extend(get_flag_mapping("uuid", fs_family, param=uuid, strict=strict)) if fs_family == "fat": fat_size = fstype.strip(string.ascii_letters) if fat_size in ["12", "16", "32"]: cmd.extend(get_flag_mapping("fatsize", fs_family, param=fat_size, strict=strict)) cmd.append(path) util.subp(cmd, capture=True) # if fs_family does not support specifying uuid then use blkid to find it # if blkid is unable to then just return None for uuid if fs_family not in family_flag_mappings['uuid']: try: uuid = block.blkid()[path]['UUID'] except: pass # return uuid, may be none if it could not be specified and blkid could not # find it return uuid def mkfs_from_config(path, info, strict=False): """Make filesystem on block device with given path according to storage config given""" fstype = info.get('fstype') if fstype is None: raise ValueError("fstype must be specified") # NOTE: Since old metadata on partitions that have not been wiped can cause # some mkfs commands to refuse to work, it's best to use force=True mkfs(path, fstype, strict=strict, force=True, uuid=info.get('uuid'), label=info.get('label')) curtin-0.1.0~bzr399/curtin/commands/__init__.py0000644000000000000000000000203612741214565017551 0ustar 00000000000000# Copyright (C) 2013 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . def populate_one_subcmd(parser, options_dict, handler): for ent in options_dict: args = ent[0] if not isinstance(args, (list, tuple)): args = (args,) parser.add_argument(*args, **ent[1]) parser.set_defaults(func=handler) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/commands/apply_net.py0000644000000000000000000000702712741214565020012 0ustar 00000000000000# Copyright (C) 2015 Canonical Ltd. # # Author: Ryan Harper # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . import os import sys from .. import log import curtin.net as net import curtin.util as util from . import populate_one_subcmd LOG = log.LOG def apply_net(target, network_state=None, network_config=None): if network_state is None and network_config is None: msg = "Must provide at least config or state" sys.stderr.write(msg + "\n") raise Exception(msg) if target is None: msg = "Must provide target" sys.stderr.write(msg + "\n") raise Exception(msg) if network_state: ns = net.network_state.from_state_file(network_state) elif network_config: ns = net.parse_net_config(network_config) net.render_network_state(target=target, network_state=ns) def apply_net_main(args): # curtin apply_net [--net-state=/config/netstate.yml] [--target=/] # [--net-config=/config/maas_net.yml] state = util.load_command_environment() log.basicConfig(stream=args.log_file, verbosity=1) if args.target is not None: state['target'] = args.target if args.net_state is not None: state['network_state'] = args.net_state if args.net_config is not None: state['network_config'] = args.net_config if state['target'] is None: sys.stderr.write("Unable to find target. " "Use --target or set TARGET_MOUNT_POINT\n") sys.exit(2) if not state['network_config'] and not state['network_state']: sys.stderr.write("Must provide at least config or state\n") sys.exit(2) LOG.info('Applying network configuration') try: apply_net(target=state['target'], network_state=state['network_state'], network_config=state['network_config']) except Exception: LOG.exception('failed to apply network config') LOG.info('Applied network configuration successfully') sys.exit(0) CMD_ARGUMENTS = ( ((('-s', '--net-state'), {'help': ('file to read containing network state. ' 'defaults to env["OUTPUT_NETWORK_STATE"]'), 'metavar': 'NETSTATE', 'action': 'store', 'default': os.environ.get('OUTPUT_NETWORK_STATE')}), (('-t', '--target'), {'help': ('target filesystem root to add swap file to. ' 'default is env["TARGET_MOUNT_POINT"]'), 'metavar': 'TARGET', 'action': 'store', 'default': os.environ.get('TARGET_MOUNT_POINT')}), (('-c', '--net-config'), {'help': ('file to read containing curtin network config.' 'defaults to env["OUTPUT_NETWORK_CONFIG"]'), 'metavar': 'NETCONFIG', 'action': 'store', 'default': os.environ.get('OUTPUT_NETWORK_CONFIG')}))) def POPULATE_SUBCMD(parser): populate_one_subcmd(parser, CMD_ARGUMENTS, apply_net_main) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/commands/block_meta.py0000644000000000000000000015263112741214565020121 0ustar 00000000000000# Copyright (C) 2013 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . from collections import OrderedDict from curtin import (block, config, util) from curtin.block import mdadm from curtin.log import LOG from curtin.block import mkfs from curtin.reporter import events from . import populate_one_subcmd from curtin.udev import compose_udev_equality, udevadm_settle import glob import os import platform import re import sys import tempfile import time SIMPLE = 'simple' SIMPLE_BOOT = 'simple-boot' CUSTOM = 'custom' CMD_ARGUMENTS = ( ((('-D', '--devices'), {'help': 'which devices to operate on', 'action': 'append', 'metavar': 'DEVICE', 'default': None, }), ('--fstype', {'help': 'root partition filesystem type', 'choices': ['ext4', 'ext3'], 'default': 'ext4'}), (('-t', '--target'), {'help': 'chroot to target. default is env[TARGET_MOUNT_POINT]', 'action': 'store', 'metavar': 'TARGET', 'default': os.environ.get('TARGET_MOUNT_POINT')}), ('--boot-fstype', {'help': 'boot partition filesystem type', 'choices': ['ext4', 'ext3'], 'default': None}), ('mode', {'help': 'meta-mode to use', 'choices': [CUSTOM, SIMPLE, SIMPLE_BOOT]}), ) ) def block_meta(args): # main entry point for the block-meta command. state = util.load_command_environment() cfg = config.load_command_config(args, state) if args.mode == CUSTOM or cfg.get("storage") is not None: meta_custom(args) elif args.mode in (SIMPLE, SIMPLE_BOOT): meta_simple(args) else: raise NotImplementedError("mode=%s is not implemented" % args.mode) def logtime(msg, func, *args, **kwargs): with util.LogTimer(LOG.debug, msg): return func(*args, **kwargs) def write_image_to_disk(source, dev): """ Write disk image to block device """ (devname, devnode) = block.get_dev_name_entry(dev) util.subp(args=['sh', '-c', ('wget "$1" --progress=dot:mega -O - |' 'tar -SxOzf - | dd of="$2"'), '--', source, devnode]) util.subp(['partprobe', devnode]) udevadm_settle() return block.get_root_device([devname, ]) def get_bootpt_cfg(cfg, enabled=False, fstype=None, root_fstype=None): # 'cfg' looks like: # enabled: boolean # fstype: filesystem type (default to 'fstype') # label: filesystem label (default to 'boot') # parm enable can enable, but not disable # parm fstype overrides cfg['fstype'] def_boot = (platform.machine() in ('aarch64') and not util.is_uefi_bootable()) ret = {'enabled': def_boot, 'fstype': None, 'label': 'boot'} ret.update(cfg) if enabled: ret['enabled'] = True if ret['enabled'] and not ret['fstype']: if root_fstype: ret['fstype'] = root_fstype if fstype: ret['fstype'] = fstype return ret def get_partition_format_type(cfg, machine=None, uefi_bootable=None): if machine is None: machine = platform.machine() if uefi_bootable is None: uefi_bootable = util.is_uefi_bootable() cfgval = cfg.get('format', None) if cfgval: return cfgval if uefi_bootable: return 'uefi' if machine in ['aarch64']: return 'gpt' elif machine.startswith('ppc64'): return 'prep' return "mbr" def block_find_sysfs_path(devname): # return the path in sys for device named devname # support either short name ('sda') or full path /dev/sda # sda -> /sys/class/block/sda # sda1 -> /sys/class/block/sda/sda1 if not devname: raise ValueError("empty devname provided to find_sysfs_path") sys_class_block = '/sys/class/block/' basename = os.path.basename(devname) # try without parent blockdevice, then prepend parent paths = [ os.path.join(sys_class_block, basename), os.path.join(sys_class_block, re.split('[\d+]', basename)[0], basename), ] # find path to devname directory in sysfs devname_sysfs = None for path in paths: if os.path.exists(path): devname_sysfs = path if devname_sysfs is None: err = ('No sysfs path to device:' ' {}'.format(devname_sysfs)) LOG.error(err) raise ValueError(err) return devname_sysfs def get_holders(devname): # Look up any block device holders. # Handle devices and partitions as devnames (vdb, md0, vdb7) devname_sysfs = block_find_sysfs_path(devname) if devname_sysfs: holders = os.listdir(os.path.join(devname_sysfs, 'holders')) LOG.debug("devname '%s' had holders: %s", devname, ','.join(holders)) return holders LOG.debug('get_holders: did not find sysfs path for %s', devname) return [] def clear_holders(sys_block_path): holders = os.listdir(os.path.join(sys_block_path, "holders")) LOG.info("clear_holders running on '%s', with holders '%s'" % (sys_block_path, holders)) for holder in holders: # get path to holder in /sys/block, then clear it try: holder_realpath = os.path.realpath( os.path.join(sys_block_path, "holders", holder)) clear_holders(holder_realpath) except IOError as e: # something might have already caused the holder to go away if util.is_file_not_found_exc(e): pass pass # detect what type of holder is using this volume and shut it down, need to # find more robust name of doing detection if "bcache" in sys_block_path: # bcache device part_devs = [] for part_dev in glob.glob(os.path.join(sys_block_path, "slaves", "*", "dev")): with open(part_dev, "r") as fp: part_dev_id = fp.read().rstrip() part_devs.append( os.path.split(os.path.realpath(os.path.join("/dev/block", part_dev_id)))[-1]) for cache_dev in glob.glob("/sys/fs/bcache/*/bdev*"): for part_dev in part_devs: if part_dev in os.path.realpath(cache_dev): # This is our bcache device, stop it, wait for udev to # settle with open(os.path.join(os.path.split(cache_dev)[0], "stop"), "w") as fp: LOG.info("stopping: %s" % fp) fp.write("1") udevadm_settle() break for part_dev in part_devs: block.wipe_volume(os.path.join("/dev", part_dev), mode="superblock") if os.path.exists(os.path.join(sys_block_path, "bcache")): # bcache device that isn't running, if it were, we would have found it # when we looked for holders try: with open(os.path.join(sys_block_path, "bcache", "set", "stop"), "w") as fp: LOG.info("stopping: %s" % fp) fp.write("1") except IOError as e: if not util.is_file_not_found_exc(e): raise e with open(os.path.join(sys_block_path, "bcache", "stop"), "w") as fp: LOG.info("stopping: %s" % fp) fp.write("1") udevadm_settle() if os.path.exists(os.path.join(sys_block_path, "md")): # md device block_dev = os.path.join("/dev/", os.path.split(sys_block_path)[-1]) # if these fail its okay, the array might not be assembled and thats # fine mdadm.mdadm_stop(block_dev) mdadm.mdadm_remove(block_dev) elif os.path.exists(os.path.join(sys_block_path, "dm")): # Shut down any volgroups with open(os.path.join(sys_block_path, "dm", "name"), "r") as fp: name = fp.read().split('-') util.subp(["lvremove", "--force", name[0].rstrip(), name[1].rstrip()], rcs=[0, 5]) util.subp(["vgremove", name[0].rstrip()], rcs=[0, 5, 6]) def devsync(devpath): LOG.debug('devsync for %s', devpath) util.subp(['partprobe', devpath], rcs=[0, 1]) udevadm_settle() for x in range(0, 10): if os.path.exists(devpath): LOG.debug('devsync happy - path %s now exists', devpath) return else: LOG.debug('Waiting on device path: %s', devpath) time.sleep(1) raise OSError('Failed to find device at path: %s', devpath) def determine_partition_kname(disk_kname, partition_number): for dev_type in ["nvme", "mmcblk"]: if disk_kname.startswith(dev_type): partition_number = "p%s" % partition_number break return "%s%s" % (disk_kname, partition_number) def determine_partition_number(partition_id, storage_config): vol = storage_config.get(partition_id) partnumber = vol.get('number') if vol.get('flag') == "logical": if not partnumber: LOG.warn('partition \'number\' key not set in config:\n%s', util.json_dumps(vol)) partnumber = 5 for key, item in storage_config.items(): if item.get('type') == "partition" and \ item.get('device') == vol.get('device') and\ item.get('flag') == "logical": if item.get('id') == vol.get('id'): break else: partnumber += 1 else: if not partnumber: LOG.warn('partition \'number\' key not set in config:\n%s', util.json_dumps(vol)) partnumber = 1 for key, item in storage_config.items(): if item.get('type') == "partition" and \ item.get('device') == vol.get('device'): if item.get('id') == vol.get('id'): break else: partnumber += 1 return partnumber def make_dname(volume, storage_config): state = util.load_command_environment() rules_dir = os.path.join(state['scratch'], "rules.d") vol = storage_config.get(volume) path = get_path_to_storage_volume(volume, storage_config) ptuuid = None dname = vol.get('name') if vol.get('type') in ["partition", "disk"]: (out, _err) = util.subp(["blkid", "-o", "export", path], capture=True, rcs=[0, 2], retries=[1, 1, 1]) for line in out.splitlines(): if "PTUUID" in line or "PARTUUID" in line: ptuuid = line.split('=')[-1] break # we may not always be able to find a uniq identifier on devices with names if not ptuuid and vol.get('type') in ["disk", "partition"]: LOG.warning("Can't find a uuid for volume: {}. Skipping dname.".format( dname)) return rule = [ compose_udev_equality("SUBSYSTEM", "block"), compose_udev_equality("ACTION", "add|change"), ] if vol.get('type') == "disk": rule.append(compose_udev_equality('ENV{DEVTYPE}', "disk")) rule.append(compose_udev_equality('ENV{ID_PART_TABLE_UUID}', ptuuid)) elif vol.get('type') == "partition": rule.append(compose_udev_equality('ENV{DEVTYPE}', "partition")) dname = storage_config.get(vol.get('device')).get('name') + \ "-part%s" % determine_partition_number(volume, storage_config) rule.append(compose_udev_equality('ENV{ID_PART_ENTRY_UUID}', ptuuid)) elif vol.get('type') == "raid": md_data = mdadm.mdadm_query_detail(path) md_uuid = md_data.get('MD_UUID') rule.append(compose_udev_equality("ENV{MD_UUID}", md_uuid)) elif vol.get('type') == "bcache": rule.append(compose_udev_equality("ENV{DEVNAME}", path)) elif vol.get('type') == "lvm_partition": volgroup_name = storage_config.get(vol.get('volgroup')).get('name') dname = "%s-%s" % (volgroup_name, dname) rule.append(compose_udev_equality("ENV{DM_NAME}", dname)) rule.append("SYMLINK+=\"disk/by-dname/%s\"" % dname) LOG.debug("Writing dname udev rule '{}'".format(str(rule))) util.ensure_dir(rules_dir) with open(os.path.join(rules_dir, volume), "w") as fp: fp.write(', '.join(rule)) def get_path_to_storage_volume(volume, storage_config): # Get path to block device for volume. Volume param should refer to id of # volume in storage config LOG.debug('get_path_to_storage_volume for volume {}'.format(volume)) devsync_vol = None vol = storage_config.get(volume) if not vol: raise ValueError("volume with id '%s' not found" % volume) # Find path to block device if vol.get('type') == "partition": partnumber = determine_partition_number(vol.get('id'), storage_config) disk_block_path = get_path_to_storage_volume(vol.get('device'), storage_config) (base_path, disk_kname) = os.path.split(disk_block_path) partition_kname = determine_partition_kname(disk_kname, partnumber) volume_path = os.path.join(base_path, partition_kname) devsync_vol = os.path.join(disk_block_path) elif vol.get('type') == "disk": # Get path to block device for disk. Device_id param should refer # to id of device in storage config if vol.get('serial'): volume_path = block.lookup_disk(vol.get('serial')) elif vol.get('path'): # resolve any symlinks to the dev_kname so sys/class/block access # is valid. ie, there are no udev generated values in sysfs volume_path = os.path.realpath(vol.get('path')) elif vol.get('wwn'): by_wwn = '/dev/disk/by-id/wwn-%s' % vol.get('wwn') volume_path = os.path.realpath(by_wwn) else: raise ValueError("serial, wwn or path to block dev must be \ specified to identify disk") elif vol.get('type') == "lvm_partition": # For lvm partitions, a directory in /dev/ should be present with the # name of the volgroup the partition belongs to. We can simply append # the id of the lvm partition to the path of that directory volgroup = storage_config.get(vol.get('volgroup')) if not volgroup: raise ValueError("lvm volume group '%s' could not be found" % vol.get('volgroup')) volume_path = os.path.join("/dev/", volgroup.get('name'), vol.get('name')) elif vol.get('type') == "dm_crypt": # For dm_crypted partitions, unencrypted block device is at # /dev/mapper/ dm_name = vol.get('dm_name') if not dm_name: dm_name = vol.get('id') volume_path = os.path.join("/dev", "mapper", dm_name) elif vol.get('type') == "raid": # For raid partitions, block device is at /dev/mdX name = vol.get('name') volume_path = os.path.join("/dev", name) elif vol.get('type') == "bcache": # For bcache setups, the only reliable way to determine the name of the # block device is to look in all /sys/block/bcacheX/ dirs and see what # block devs are in the slaves dir there. Then, those blockdevs can be # checked against the kname of the devs in the config for the desired # bcache device. This is not very elegant though backing_device_kname = os.path.split(get_path_to_storage_volume( vol.get('backing_device'), storage_config))[-1] sys_path = list(filter(lambda x: backing_device_kname in x, glob.glob("/sys/block/bcache*/slaves/*")))[0] while "bcache" not in os.path.split(sys_path)[-1]: sys_path = os.path.split(sys_path)[0] volume_path = os.path.join("/dev", os.path.split(sys_path)[-1]) LOG.debug('got bcache volume path {}'.format(volume_path)) else: raise NotImplementedError("cannot determine the path to storage \ volume '%s' with type '%s'" % (volume, vol.get('type'))) # sync devices if not devsync_vol: devsync_vol = volume_path devsync(devsync_vol) LOG.debug('return volume path {}'.format(volume_path)) return volume_path def disk_handler(info, storage_config): ptable = info.get('ptable') disk = get_path_to_storage_volume(info.get('id'), storage_config) # Handle preserve flag if info.get('preserve'): if not ptable: # Don't need to check state, return return # Check state of current ptable try: (out, _err) = util.subp(["blkid", "-o", "export", disk], capture=True) except util.ProcessExecutionError: raise ValueError("disk '%s' has no readable partition table or \ cannot be accessed, but preserve is set to true, so cannot \ continue") current_ptable = list(filter(lambda x: "PTTYPE" in x, out.splitlines()))[0].split("=")[-1] if current_ptable == "dos" and ptable != "msdos" or \ current_ptable == "gpt" and ptable != "gpt": raise ValueError("disk '%s' does not have correct \ partition table, but preserve is set to true, so not \ creating table, so not creating table." % info.get('id')) LOG.info("disk '%s' marked to be preserved, so keeping partition \ table") return # Wipe the disk if info.get('wipe') and info.get('wipe') != "none": # The disk has a lable, clear all partitions mdadm.mdadm_assemble(scan=True) disk_kname = os.path.split(disk)[-1] syspath_partitions = list( os.path.split(prt)[0] for prt in glob.glob("/sys/block/%s/*/partition" % disk_kname)) for partition in syspath_partitions: clear_holders(partition) with open(os.path.join(partition, "dev"), "r") as fp: block_no = fp.read().rstrip() partition_path = os.path.realpath( os.path.join("/dev/block", block_no)) block.wipe_volume(partition_path, mode=info.get('wipe')) clear_holders("/sys/block/%s" % disk_kname) block.wipe_volume(disk, mode=info.get('wipe')) # Create partition table on disk if info.get('ptable'): LOG.info("labeling device: '%s' with '%s' partition table", disk, ptable) if ptable == "gpt": util.subp(["sgdisk", "--clear", disk]) elif ptable == "msdos": util.subp(["parted", disk, "--script", "mklabel", "msdos"]) # Make the name if needed if info.get('name'): make_dname(info.get('id'), storage_config) def getnumberoflogicaldisks(device, storage_config): logicaldisks = 0 for key, item in storage_config.items(): if item.get('device') == device and item.get('flag') == "logical": logicaldisks = logicaldisks + 1 return logicaldisks def find_previous_partition(disk_id, part_id, storage_config): last_partnum = None for item_id, command in storage_config.items(): if item_id == part_id: break # skip anything not on this disk, not a 'partition' or 'extended' if command['type'] != 'partition' or command['device'] != disk_id: continue if command.get('flag') == "extended": continue last_partnum = determine_partition_number(item_id, storage_config) return last_partnum def partition_handler(info, storage_config): device = info.get('device') size = info.get('size') flag = info.get('flag') disk_ptable = storage_config.get(device).get('ptable') partition_type = None if not device: raise ValueError("device must be set for partition to be created") if not size: raise ValueError("size must be specified for partition to be created") disk = get_path_to_storage_volume(device, storage_config) partnumber = determine_partition_number(info.get('id'), storage_config) disk_kname = os.path.split( get_path_to_storage_volume(device, storage_config))[-1] # consider the disks logical sector size when calculating sectors try: prefix = "/sys/block/%s/queue/" % disk_kname with open(prefix + "logical_block_size", "r") as f: l = f.readline() logical_block_size_bytes = int(l) except: logical_block_size_bytes = 512 LOG.debug( "{} logical_block_size_bytes: {}".format(disk_kname, logical_block_size_bytes)) if partnumber > 1: if partnumber == 5 and disk_ptable == "msdos": for key, item in storage_config.items(): if item.get('type') == "partition" and \ item.get('device') == device and \ item.get('flag') == "extended": extended_part_no = determine_partition_number( key, storage_config) break partition_kname = determine_partition_kname( disk_kname, extended_part_no) previous_partition = "/sys/block/%s/%s/" % \ (disk_kname, partition_kname) else: pnum = find_previous_partition(device, info['id'], storage_config) LOG.debug("previous partition number for '%s' found to be '%s'", info.get('id'), pnum) partition_kname = determine_partition_kname(disk_kname, pnum) previous_partition = "/sys/block/%s/%s/" % \ (disk_kname, partition_kname) LOG.debug("previous partition: {}".format(previous_partition)) # XXX: sys/block/X/{size,start} is *ALWAYS* in 512b value previous_size = util.load_file(os.path.join(previous_partition, "size")) previous_size_sectors = (int(previous_size) * 512 / logical_block_size_bytes) previous_start = util.load_file(os.path.join(previous_partition, "start")) previous_start_sectors = (int(previous_start) * 512 / logical_block_size_bytes) LOG.debug("previous partition.size_sectors: {}".format( previous_size_sectors)) LOG.debug("previous partition.start_sectors: {}".format( previous_start_sectors)) # Align to 1M at the beginning of the disk and at logical partitions alignment_offset = int((1 << 20) / logical_block_size_bytes) if partnumber == 1: # start of disk offset_sectors = alignment_offset else: # further partitions if disk_ptable == "gpt" or flag != "logical": # msdos primary and any gpt part start after former partition end offset_sectors = previous_start_sectors + previous_size_sectors else: # msdos extended/logical partitions if flag == "logical": if partnumber == 5: # First logical partition # start at extended partition start + alignment_offset offset_sectors = (previous_start_sectors + alignment_offset) else: # Further logical partitions # start at former logical partition end + alignment_offset offset_sectors = (previous_start_sectors + previous_size_sectors + alignment_offset) length_bytes = util.human2bytes(size) # start sector is part of the sectors that define the partitions size # so length has to be "size in sectors - 1" length_sectors = int(length_bytes / logical_block_size_bytes) - 1 # logical partitions can't share their start sector with the extended # partition and logical partitions can't go head-to-head, so we have to # realign and for that increase size as required if info.get('flag') == "extended": logdisks = getnumberoflogicaldisks(device, storage_config) length_sectors = length_sectors + (logdisks * alignment_offset) # Handle preserve flag if info.get('preserve'): return elif storage_config.get(device).get('preserve'): raise NotImplementedError("Partition '%s' is not marked to be \ preserved, but device '%s' is. At this time, preserving devices \ but not also the partitions on the devices is not supported, \ because of the possibility of damaging partitions intended to be \ preserved." % (info.get('id'), device)) # Set flag # 'sgdisk --list-types' sgdisk_flags = {"boot": 'ef00', "lvm": '8e00', "raid": 'fd00', "bios_grub": 'ef02', "prep": '4100', "swap": '8200', "home": '8302', "linux": '8300'} LOG.info("adding partition '%s' to disk '%s' (ptable: '%s')", info.get('id'), device, disk_ptable) LOG.debug("partnum: %s offset_sectors: %s length_sectors: %s", partnumber, offset_sectors, length_sectors) if disk_ptable == "msdos": if flag in ["extended", "logical", "primary"]: partition_type = flag else: partition_type = "primary" cmd = ["parted", disk, "--script", "mkpart", partition_type, "%ss" % offset_sectors, "%ss" % str(offset_sectors + length_sectors)] util.subp(cmd, capture=True) elif disk_ptable == "gpt": if flag and flag in sgdisk_flags: typecode = sgdisk_flags[flag] else: typecode = sgdisk_flags['linux'] cmd = ["sgdisk", "--new", "%s:%s:%s" % (partnumber, offset_sectors, length_sectors + offset_sectors), "--typecode=%s:%s" % (partnumber, typecode), disk] util.subp(cmd, capture=True) else: raise ValueError("parent partition has invalid partition table") # Wipe the partition if told to do so if info.get('wipe') and info.get('wipe') != "none": block.wipe_volume( get_path_to_storage_volume(info.get('id'), storage_config), mode=info.get('wipe')) # Make the name if needed if storage_config.get(device).get('name') and partition_type != 'extended': make_dname(info.get('id'), storage_config) def format_handler(info, storage_config): volume = info.get('volume') if not volume: raise ValueError("volume must be specified for partition '%s'" % info.get('id')) # Get path to volume volume_path = get_path_to_storage_volume(volume, storage_config) # Handle preserve flag if info.get('preserve'): # Volume marked to be preserved, not formatting return # Make filesystem using block library LOG.debug("mkfs {} info: {}".format(volume_path, info)) mkfs.mkfs_from_config(volume_path, info) def mount_handler(info, storage_config): state = util.load_command_environment() path = info.get('path') filesystem = storage_config.get(info.get('device')) if not path and filesystem.get('fstype') != "swap": raise ValueError("path to mountpoint must be specified") volume = storage_config.get(filesystem.get('volume')) # Get path to volume volume_path = get_path_to_storage_volume(filesystem.get('volume'), storage_config) if filesystem.get('fstype') != "swap": # Figure out what point should be while len(path) > 0 and path[0] == "/": path = path[1:] mount_point = os.path.join(state['target'], path) # Create mount point if does not exist util.ensure_dir(mount_point) # Mount volume util.subp(['mount', volume_path, mount_point]) # Add volume to fstab if state['fstab']: with open(state['fstab'], "a") as fp: if volume.get('type') in ["raid", "bcache", "disk", "lvm_partition"]: location = get_path_to_storage_volume(volume.get('id'), storage_config) elif volume.get('type') in ["partition", "dm_crypt"]: location = "UUID=%s" % block.get_volume_uuid(volume_path) else: raise ValueError("cannot write fstab for volume type '%s'" % volume.get("type")) if filesystem.get('fstype') == "swap": path = "none" options = "sw" else: path = "/%s" % path options = "defaults" if filesystem.get('fstype') in ["fat", "fat12", "fat16", "fat32", "fat64"]: fstype = "vfat" else: fstype = filesystem.get('fstype') fp.write("%s %s %s %s 0 0\n" % (location, path, fstype, options)) else: LOG.info("fstab not in environment, so not writing") def lvm_volgroup_handler(info, storage_config): devices = info.get('devices') device_paths = [] name = info.get('name') if not devices: raise ValueError("devices for volgroup '%s' must be specified" % info.get('id')) if not name: raise ValueError("name for volgroups needs to be specified") for device_id in devices: device = storage_config.get(device_id) if not device: raise ValueError("device '%s' could not be found in storage config" % device_id) device_paths.append(get_path_to_storage_volume(device_id, storage_config)) # Handle preserve flag if info.get('preserve'): # LVM will probably be offline, so start it util.subp(["vgchange", "-a", "y"]) # Verify that volgroup exists and contains all specified devices current_paths = [] (out, _err) = util.subp(["pvdisplay", "-C", "--separator", "=", "-o", "vg_name,pv_name", "--noheadings"], capture=True) for line in out.splitlines(): if name in line: current_paths.append(line.split("=")[-1]) if set(current_paths) != set(device_paths): raise ValueError("volgroup '%s' marked to be preserved, but does \ not exist or does not contain the right physical \ volumes" % info.get('id')) else: # Create vgrcreate command and run cmd = ["vgcreate", name] cmd.extend(device_paths) util.subp(cmd) def lvm_partition_handler(info, storage_config): volgroup = storage_config.get(info.get('volgroup')).get('name') name = info.get('name') if not volgroup: raise ValueError("lvm volgroup for lvm partition must be specified") if not name: raise ValueError("lvm partition name must be specified") # Handle preserve flag if info.get('preserve'): (out, _err) = util.subp(["lvdisplay", "-C", "--separator", "=", "-o", "lv_name,vg_name", "--noheadings"], capture=True) found = False for line in out.splitlines(): if name in line: if volgroup == line.split("=")[-1]: found = True break if not found: raise ValueError("lvm partition '%s' marked to be preserved, but \ does not exist or does not mach storage \ configuration" % info.get('id')) elif storage_config.get(info.get('volgroup')).get('preserve'): raise NotImplementedError("Lvm Partition '%s' is not marked to be \ preserved, but volgroup '%s' is. At this time, preserving \ volgroups but not also the lvm partitions on the volgroup is \ not supported, because of the possibility of damaging lvm \ partitions intended to be preserved." % (info.get('id'), volgroup)) else: cmd = ["lvcreate", volgroup, "-n", name] if info.get('size'): cmd.extend(["-L", info.get('size')]) else: cmd.extend(["-l", "100%FREE"]) util.subp(cmd) if info.get('ptable'): raise ValueError("Partition tables on top of lvm logical volumes is \ not supported") make_dname(info.get('id'), storage_config) def dm_crypt_handler(info, storage_config): state = util.load_command_environment() volume = info.get('volume') key = info.get('key') keysize = info.get('keysize') cipher = info.get('cipher') dm_name = info.get('dm_name') if not volume: raise ValueError("volume for cryptsetup to operate on must be \ specified") if not key: raise ValueError("encryption key must be specified") if not dm_name: dm_name = info.get('id') volume_path = get_path_to_storage_volume(volume, storage_config) # TODO: this is insecure, find better way to do this tmp_keyfile = tempfile.mkstemp()[1] fp = open(tmp_keyfile, "w") fp.write(key) fp.close() cmd = ["cryptsetup"] if cipher: cmd.extend(["--cipher", cipher]) if keysize: cmd.extend(["--key-size", keysize]) cmd.extend(["luksFormat", volume_path, tmp_keyfile]) util.subp(cmd) cmd = ["cryptsetup", "open", "--type", "luks", volume_path, dm_name, "--key-file", tmp_keyfile] util.subp(cmd) os.remove(tmp_keyfile) # A crypttab will be created in the same directory as the fstab in the # configuration. This will then be copied onto the system later if state['fstab']: crypt_tab_location = os.path.join(os.path.split(state['fstab'])[0], "crypttab") uuid = block.get_volume_uuid(volume_path) with open(crypt_tab_location, "a") as fp: fp.write("%s UUID=%s none luks\n" % (dm_name, uuid)) else: LOG.info("fstab configuration is not present in environment, so \ cannot locate an appropriate directory to write crypttab in \ so not writing crypttab") def raid_handler(info, storage_config): state = util.load_command_environment() devices = info.get('devices') raidlevel = info.get('raidlevel') spare_devices = info.get('spare_devices') md_devname = block.dev_path(info.get('name')) if not devices: raise ValueError("devices for raid must be specified") if raidlevel not in ['linear', 'raid0', 0, 'stripe', 'raid1', 1, 'mirror', 'raid4', 4, 'raid5', 5, 'raid6', 6, 'raid10', 10]: raise ValueError("invalid raidlevel '%s'" % raidlevel) if raidlevel in ['linear', 'raid0', 0, 'stripe']: if spare_devices: raise ValueError("spareunsupported in raidlevel '%s'" % raidlevel) LOG.debug('raid: cfg: {}'.format(util.json_dumps(info))) device_paths = list(get_path_to_storage_volume(dev, storage_config) for dev in devices) LOG.debug('raid: device path mapping: {}'.format( zip(devices, device_paths))) spare_device_paths = [] if spare_devices: spare_device_paths = list(get_path_to_storage_volume(dev, storage_config) for dev in spare_devices) LOG.debug('raid: spare device path mapping: {}'.format( zip(spare_devices, spare_device_paths))) # Handle preserve flag if info.get('preserve'): # check if the array is already up, if not try to assemble if not mdadm.md_check(md_devname, raidlevel, device_paths, spare_device_paths): LOG.info("assembling preserved raid for " "{}".format(md_devname)) mdadm.mdadm_assemble(md_devname, device_paths, spare_device_paths) # try again after attempting to assemble if not mdadm.md_check(md_devname, raidlevel, devices, spare_device_paths): raise ValueError("Unable to confirm preserved raid array: " " {}".format(md_devname)) # raid is all OK return mdadm.mdadm_create(md_devname, raidlevel, device_paths, spare_device_paths, info.get('mdname', '')) # Make dname rule for this dev make_dname(info.get('id'), storage_config) # A mdadm.conf will be created in the same directory as the fstab in the # configuration. This will then be copied onto the installed system later. # The file must also be written onto the running system to enable it to run # mdadm --assemble and continue installation if state['fstab']: mdadm_location = os.path.join(os.path.split(state['fstab'])[0], "mdadm.conf") mdadm_scan_data = mdadm.mdadm_detail_scan() with open(mdadm_location, "w") as fp: fp.write(mdadm_scan_data) else: LOG.info("fstab configuration is not present in the environment, so \ cannot locate an appropriate directory to write mdadm.conf in, \ so not writing mdadm.conf") # If ptable is specified, call disk_handler on this mdadm device to create # the table if info.get('ptable'): disk_handler(info, storage_config) def bcache_handler(info, storage_config): backing_device = get_path_to_storage_volume(info.get('backing_device'), storage_config) cache_device = get_path_to_storage_volume(info.get('cache_device'), storage_config) cache_mode = info.get('cache_mode', None) if not backing_device or not cache_device: raise ValueError("backing device and cache device for bcache" " must be specified") # The bcache module is not loaded when bcache is installed by apt-get, so # we will load it now util.subp(["modprobe", "bcache"]) bcache_sysfs = "/sys/fs/bcache" udevadm_settle(exists=bcache_sysfs) def register_bcache(bcache_device): with open("/sys/fs/bcache/register", "w") as fp: fp.write(bcache_device) def ensure_bcache_is_registered(bcache_device, expected, retry=0): # find the actual bcache device name via sysfs using the # backing device's holders directory. LOG.debug('check just created bcache %s if it is registered', bcache_device) try: udevadm_settle(exists=expected) if os.path.exists(expected): LOG.debug('Found bcache dev %s at expected path %s', bcache_device, expected) return LOG.debug('bcache device path not found: %s', expected) local_holders = get_holders(bcache_device) LOG.debug('got initial holders being "%s"', local_holders) if len(local_holders) == 0: raise ValueError("holders == 0 , expected non-zero") except (OSError, IndexError, ValueError): # Some versions of bcache-tools will register the bcache device as # soon as we run make-bcache using udev rules, so wait for udev to # settle, then try to locate the dev, on older versions we need to # register it manually though LOG.debug('bcache device was not registered, registering %s at ' '/sys/fs/bcache/register', bcache_device) try: register_bcache(bcache_device) udevadm_settle(exists=expected) except (IOError): # device creation is notoriously racy and this can trigger # "Invalid argument" IOErrors if it got created in "the # meantime" - just restart the function a few times to # check it all again if retry < 5: ensure_bcache_is_registered(bcache_device, expected, (retry+1)) else: LOG.debug('Repetive error registering the bcache dev %s', bcache_device) raise ValueError("bcache device %s can't be registered", bcache_device) if cache_device: # /sys/class/block/XXX/YYY/ cache_device_sysfs = block_find_sysfs_path(cache_device) if os.path.exists(os.path.join(cache_device_sysfs, "bcache")): LOG.debug('caching device already exists at {}/bcache. Read ' 'cset.uuid'.format(cache_device_sysfs)) (out, err) = util.subp(["bcache-super-show", cache_device], capture=True) LOG.debug('bcache-super-show=[{}]'.format(out)) [cset_uuid] = [line.split()[-1] for line in out.split("\n") if line.startswith('cset.uuid')] else: LOG.debug('caching device does not yet exist at {}/bcache. Make ' 'cache and get uuid'.format(cache_device_sysfs)) # make the cache device, extracting cacheset uuid (out, err) = util.subp(["make-bcache", "-C", cache_device], capture=True) LOG.debug('out=[{}]'.format(out)) [cset_uuid] = [line.split()[-1] for line in out.split("\n") if line.startswith('Set UUID:')] target_sysfs_path = '/sys/fs/bcache/%s' % cset_uuid ensure_bcache_is_registered(cache_device, target_sysfs_path) if backing_device: backing_device_sysfs = block_find_sysfs_path(backing_device) target_sysfs_path = os.path.join(backing_device_sysfs, "bcache") if not os.path.exists(os.path.join(backing_device_sysfs, "bcache")): util.subp(["make-bcache", "-B", backing_device]) ensure_bcache_is_registered(backing_device, target_sysfs_path) # via the holders we can identify which bcache device we just created # for a given backing device holders = get_holders(backing_device) if len(holders) != 1: err = ('Invalid number {} of holding devices:' ' "{}"'.format(len(holders), holders)) LOG.error(err) raise ValueError(err) [bcache_dev] = holders LOG.debug('The just created bcache device is {}'.format(holders)) if cache_device: # if we specify both then we need to attach backing to cache if cset_uuid: LOG.info("Attaching backing device to cacheset: " "{} -> {} cset.uuid: {}".format(backing_device, cache_device, cset_uuid)) attach = os.path.join(backing_device_sysfs, "bcache", "attach") with open(attach, "w") as fp: fp.write(cset_uuid) else: msg = "Invalid cset_uuid: {}".format(cset_uuid) LOG.error(msg) raise ValueError(msg) if cache_mode: LOG.info("Setting cache_mode on {} to {}".format(bcache_dev, cache_mode)) cache_mode_file = \ '/sys/block/{}/bcache/cache_mode'.format(bcache_dev) with open(cache_mode_file, "w") as fp: fp.write(cache_mode) else: # no backing device if cache_mode: raise ValueError("cache mode specified which can only be set per \ backing devices, but none was specified") if info.get('name'): # Make dname rule for this dev make_dname(info.get('id'), storage_config) if info.get('ptable'): raise ValueError("Partition tables on top of lvm logical volumes is \ not supported") LOG.debug('Finished bcache creation for backing {} or caching {}' .format(backing_device, cache_device)) def extract_storage_ordered_dict(config): storage_config = config.get('storage', {}) if not storage_config: raise ValueError("no 'storage' entry in config") scfg = storage_config.get('config') if not scfg: raise ValueError("invalid storage config data") # Since storage config will often have to be searched for a value by its # id, and this can become very inefficient as storage_config grows, a dict # will be generated with the id of each component of the storage_config as # its index and the component of storage_config as its value return OrderedDict((d["id"], d) for (i, d) in enumerate(scfg)) def meta_custom(args): """Does custom partitioning based on the layout provided in the config file. Section with the name storage contains information on which partitions on which disks to create. It also contains information about overlays (raid, lvm, bcache) which need to be setup. """ command_handlers = { 'disk': disk_handler, 'partition': partition_handler, 'format': format_handler, 'mount': mount_handler, 'lvm_volgroup': lvm_volgroup_handler, 'lvm_partition': lvm_partition_handler, 'dm_crypt': dm_crypt_handler, 'raid': raid_handler, 'bcache': bcache_handler } state = util.load_command_environment() cfg = config.load_command_config(args, state) storage_config_dict = extract_storage_ordered_dict(cfg) # set up reportstack stack_prefix = state.get('report_stack_prefix', '') for item_id, command in storage_config_dict.items(): handler = command_handlers.get(command['type']) if not handler: raise ValueError("unknown command type '%s'" % command['type']) with events.ReportEventStack( name=stack_prefix, reporting_enabled=True, level="INFO", description="configuring %s: %s" % (command['type'], command['id'])): try: handler(command, storage_config_dict) except Exception as error: LOG.error("An error occured handling '%s': %s - %s" % (item_id, type(error).__name__, error)) raise return 0 def meta_simple(args): """Creates a root partition. If args.mode == SIMPLE_BOOT, it will also create a separate /boot partition. """ state = util.load_command_environment() cfg = config.load_command_config(args, state) if args.target is not None: state['target'] = args.target if state['target'] is None: sys.stderr.write("Unable to find target. " "Use --target or set TARGET_MOUNT_POINT\n") sys.exit(2) devices = args.devices if devices is None: devices = cfg.get('block-meta', {}).get('devices', []) bootpt = get_bootpt_cfg( cfg.get('block-meta', {}).get('boot-partition', {}), enabled=args.mode == SIMPLE_BOOT, fstype=args.boot_fstype, root_fstype=args.fstype) ptfmt = get_partition_format_type(cfg.get('block-meta', {})) # Remove duplicates but maintain ordering. devices = list(OrderedDict.fromkeys(devices)) # Multipath devices might be automatically assembled if multipath-tools # package is available in the installation environment. We need to stop # all multipath devices to exclusively use one of paths as a target disk. block.stop_all_unused_multipath_devices() if len(devices) == 0: devices = block.get_installable_blockdevs() LOG.warn("'%s' mode, no devices given. unused list: %s", args.mode, devices) # Check if the list of installable block devices is still empty after # checking for block devices and filtering out the removable ones. In # this case we may have a system which has its harddrives reported by # lsblk incorrectly. In this case we search for installable # blockdevices that are removable as a last resort before raising an # exception. if len(devices) == 0: devices = block.get_installable_blockdevs(include_removable=True) if len(devices) == 0: # Fail gracefully if no devices are found, still. raise Exception("No valid target devices found that curtin " "can install on.") else: LOG.warn("No non-removable, installable devices found. List " "populated with removable devices allowed: %s", devices) if len(devices) > 1: if args.devices is not None: LOG.warn("'%s' mode but multiple devices given. " "using first found", args.mode) available = [f for f in devices if block.is_valid_device(f)] target = sorted(available)[0] LOG.warn("mode is '%s'. multiple devices given. using '%s' " "(first available)", args.mode, target) else: target = devices[0] if not block.is_valid_device(target): raise Exception("target device '%s' is not a valid device" % target) (devname, devnode) = block.get_dev_name_entry(target) LOG.info("installing in '%s' mode to '%s'", args.mode, devname) sources = cfg.get('sources', {}) dd_images = util.get_dd_images(sources) if len(dd_images): # we have at least one dd-able image # we will only take the first one rootdev = write_image_to_disk(dd_images[0], devname) util.subp(['mount', rootdev, state['target']]) return 0 # helper partition will forcibly set up partition there ptcmd = ['partition', '--format=' + ptfmt] if bootpt['enabled']: ptcmd.append('--boot') ptcmd.append(devnode) if bootpt['enabled'] and ptfmt in ("uefi", "prep"): raise ValueError("format=%s with boot partition not supported" % ptfmt) bootdev_ptnum = None rootdev_ptnum = None bootdev = None if bootpt['enabled']: bootdev_ptnum = 1 rootdev_ptnum = 2 else: if ptfmt == "prep": rootdev_ptnum = 2 else: rootdev_ptnum = 1 logtime("creating partition with: %s" % ' '.join(ptcmd), util.subp, ptcmd) ptpre = "" if not os.path.exists("%s%s" % (devnode, rootdev_ptnum)): # perhaps the device is /dev/p if os.path.exists("%sp%s" % (devnode, rootdev_ptnum)): ptpre = "p" else: LOG.warn("root device %s%s did not exist, expecting failure", devnode, rootdev_ptnum) if bootdev_ptnum: bootdev = "%s%s%s" % (devnode, ptpre, bootdev_ptnum) if ptfmt == "uefi": # assumed / required from the partitioner pt_uefi uefi_ptnum = "15" uefi_label = "uefi-boot" uefi_dev = "%s%s%s" % (devnode, ptpre, uefi_ptnum) rootdev = "%s%s%s" % (devnode, ptpre, rootdev_ptnum) LOG.debug("rootdev=%s bootdev=%s fmt=%s bootpt=%s", rootdev, bootdev, ptfmt, bootpt) # mkfs for root partition first and mount cmd = ['mkfs.%s' % args.fstype, '-q', '-L', 'cloudimg-rootfs', rootdev] logtime(' '.join(cmd), util.subp, cmd) util.subp(['mount', rootdev, state['target']]) if bootpt['enabled']: # create 'boot' directory in state['target'] boot_dir = os.path.join(state['target'], 'boot') util.subp(['mkdir', boot_dir]) # mkfs for boot partition and mount cmd = ['mkfs.%s' % bootpt['fstype'], '-q', '-L', bootpt['label'], bootdev] logtime(' '.join(cmd), util.subp, cmd) util.subp(['mount', bootdev, boot_dir]) if ptfmt == "uefi": uefi_dir = os.path.join(state['target'], 'boot', 'efi') util.ensure_dir(uefi_dir) util.subp(['mount', uefi_dev, uefi_dir]) if state['fstab']: with open(state['fstab'], "w") as fp: if bootpt['enabled']: fp.write("LABEL=%s /boot %s defaults 0 0\n" % (bootpt['label'], bootpt['fstype'])) if ptfmt == "uefi": # label created in helpers/partition for uefi fp.write("LABEL=%s /boot/efi vfat defaults 0 0\n" % uefi_label) fp.write("LABEL=%s / %s defaults 0 0\n" % ('cloudimg-rootfs', args.fstype)) else: LOG.info("fstab not in environment, so not writing") return 0 def POPULATE_SUBCMD(parser): populate_one_subcmd(parser, CMD_ARGUMENTS, block_meta) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/commands/block_wipe.py0000644000000000000000000000314112741214565020126 0ustar 00000000000000# Copyright (C) 2016 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . import sys import curtin.block as block from . import populate_one_subcmd def wipe_main(args): # curtin clear-holders device [device2 [device3]] for blockdev in args.devices: try: block.wipe_volume(blockdev, mode=args.mode) except Exception as e: sys.stderr.write( "Failed to wipe volume %s in mode %s: %s" % (blockdev, args.mode, e)) sys.exit(1) sys.exit(0) CMD_ARGUMENTS = ( ((('-m', '--mode'), {'help': 'mode for wipe.', 'action': 'store', 'default': 'superblocks', 'choices': ['zero', 'superblock', 'superblock-recursive', 'random']}), ('devices', {'help': 'devices to wipe', 'default': [], 'nargs': '+'}), ) ) def POPULATE_SUBCMD(parser): populate_one_subcmd(parser, CMD_ARGUMENTS, wipe_main) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/commands/curthooks.py0000644000000000000000000007541012741214565020041 0ustar 00000000000000# Copyright (C) 2013 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . import copy import glob import os import platform import re import sys import shutil import textwrap from curtin import config from curtin import block from curtin import futil from curtin.log import LOG from curtin import swap from curtin import util from curtin import net from curtin.reporter import events from . import populate_one_subcmd CMD_ARGUMENTS = ( ((('-t', '--target'), {'help': 'operate on target. default is env[TARGET_MOUNT_POINT]', 'action': 'store', 'metavar': 'TARGET', 'default': None}), (('-c', '--config'), {'help': 'operate on config. default is env[CONFIG]', 'action': 'store', 'metavar': 'CONFIG', 'default': None}), ) ) KERNEL_MAPPING = { 'precise': { '3.2.0': '', '3.5.0': '-lts-quantal', '3.8.0': '-lts-raring', '3.11.0': '-lts-saucy', '3.13.0': '-lts-trusty', }, 'trusty': { '3.13.0': '', '3.16.0': '-lts-utopic', '3.19.0': '-lts-vivid', '4.2.0': '-lts-wily', '4.4.0': '-lts-xenial', }, 'xenial': { '4.3.0': '', # development release has 4.3, release will have 4.4 '4.4.0': '', } } def write_files(cfg, target): # this takes 'write_files' entry in config and writes files in the target # config entry example: # f1: # path: /file1 # content: !!binary | # f0VMRgIBAQAAAAAAAAAAAAIAPgABAAAAwARAAAAAAABAAAAAAAAAAJAVAAAAAAA # f2: {path: /file2, content: "foobar", permissions: '0666'} if 'write_files' not in cfg: return for (key, info) in cfg.get('write_files').items(): if not info.get('path'): LOG.warn("Warning, write_files[%s] had no 'path' entry", key) continue futil.write_finfo(path=target + os.path.sep + info['path'], content=info.get('content', ''), owner=info.get('owner', "-1:-1"), perms=info.get('permissions', info.get('perms', "0644"))) def apt_config(cfg, target): # cfg['apt_proxy'] proxy_cfg_path = os.path.sep.join( [target, '/etc/apt/apt.conf.d/90curtin-aptproxy']) if cfg.get('apt_proxy'): util.write_file( proxy_cfg_path, content='Acquire::HTTP::Proxy "%s";\n' % cfg['apt_proxy']) else: if os.path.isfile(proxy_cfg_path): os.unlink(proxy_cfg_path) # cfg['apt_mirrors'] # apt_mirrors: # ubuntu_archive: http://local.archive/ubuntu # ubuntu_security: http://local.archive/ubuntu sources_list = os.path.sep.join([target, '/etc/apt/sources.list']) if (isinstance(cfg.get('apt_mirrors'), dict) and os.path.isfile(sources_list)): repls = [ ('ubuntu_archive', r'http://\S*[.]*archive.ubuntu.com/\S*'), ('ubuntu_security', r'http://security.ubuntu.com/\S*'), ] content = None for name, regex in repls: mirror = cfg['apt_mirrors'].get(name) if not mirror: continue if content is None: with open(sources_list) as fp: content = fp.read() util.write_file(sources_list + ".dist", content) content = re.sub(regex, mirror + " ", content) if content is not None: util.write_file(sources_list, content) def disable_overlayroot(cfg, target): # cloud images come with overlayroot, but installed systems need disabled disable = cfg.get('disable_overlayroot', True) local_conf = os.path.sep.join([target, 'etc/overlayroot.local.conf']) if disable and os.path.exists(local_conf): LOG.debug("renaming %s to %s", local_conf, local_conf + ".old") shutil.move(local_conf, local_conf + ".old") def clean_cloud_init(target): flist = glob.glob( os.path.sep.join([target, "/etc/cloud/cloud.cfg.d/*dpkg*"])) LOG.debug("cleaning cloud-init config from: %s" % flist) for dpkg_cfg in flist: os.unlink(dpkg_cfg) def _maybe_remove_legacy_eth0(target, path="/etc/network/interfaces.d/eth0.cfg"): """Ubuntu cloud images previously included a 'eth0.cfg' that had hard coded content. That file would interfere with the rendered configuration if it was present. if the file does not exist do nothing. If the file exists: - with known content, remove it and warn - with unknown content, leave it and warn """ cfg = os.path.sep.join([target, path]) if not os.path.exists(cfg): LOG.warn('Failed to find legacy conf file %s', cfg) return bmsg = "Dynamic networking config may not apply." try: contents = util.load_file(cfg) known_contents = ["auto eth0", "iface eth0 inet dhcp"] lines = [f.strip() for f in contents.splitlines() if not f.startswith("#")] if lines == known_contents: util.del_file(cfg) msg = "removed %s with known contents" % cfg else: msg = (bmsg + " '%s' exists with user configured content." % cfg) except: msg = bmsg + " %s exists, but could not be read." % cfg LOG.exception(msg) return LOG.warn(msg) def setup_zipl(cfg, target): if platform.machine() != 's390x': return # assuming that below gives the "/" rootfs target_dev = block.get_devices_for_mp(target)[0] root_arg = None # not mapped rootfs, use UUID if 'mapper' in target_dev: root_arg = target_dev else: uuid = block.get_volume_uuid(target_dev) if uuid: root_arg = "UUID=%s" % uuid if not root_arg: msg = "Failed to identify root= for %s at %s." % (target, target_dev) LOG.warn(msg) raise ValueError(msg) zipl_conf = """ # This has been modified by the MAAS curtin installer [defaultboot] default=ubuntu [ubuntu] target = /boot image = /boot/vmlinuz ramdisk = /boot/initrd.img parameters = root=%s """ % root_arg zipl_cfg = { "write_files": { "zipl_cfg": { "path": "/etc/zipl.conf", "content": zipl_conf, } } } write_files(zipl_cfg, target) def run_zipl(cfg, target): if platform.machine() != 's390x': return with util.RunInChroot(target) as in_chroot: in_chroot(['zipl']) def install_kernel(cfg, target): kernel_cfg = cfg.get('kernel', {'package': None, 'fallback-package': "linux-generic", 'mapping': {}}) if kernel_cfg is not None: kernel_package = kernel_cfg.get('package') kernel_fallback = kernel_cfg.get('fallback-package') else: kernel_package = None kernel_fallback = None mapping = copy.deepcopy(KERNEL_MAPPING) config.merge_config(mapping, kernel_cfg.get('mapping', {})) with util.RunInChroot(target) as in_chroot: if kernel_package: util.install_packages([kernel_package], target=target) return # uname[2] is kernel name (ie: 3.16.0-7-generic) # version gets X.Y.Z, flavor gets anything after second '-'. kernel = os.uname()[2] codename, err = in_chroot(['lsb_release', '--codename', '--short'], capture=True) codename = codename.strip() version, abi, flavor = kernel.split('-', 2) try: map_suffix = mapping[codename][version] except KeyError: LOG.warn("Couldn't detect kernel package to install for %s." % kernel) if kernel_fallback is not None: util.install_packages([kernel_fallback], target=target) return package = "linux-{flavor}{map_suffix}".format( flavor=flavor, map_suffix=map_suffix) if util.has_pkg_available(package, target): if util.has_pkg_installed(package, target): LOG.debug("Kernel package '%s' already installed", package) else: LOG.debug("installing kernel package '%s'", package) util.install_packages([package], target=target) else: if kernel_fallback is not None: LOG.info("Kernel package '%s' not available. " "Installing fallback package '%s'.", package, kernel_fallback) util.install_packages([kernel_fallback], target=target) else: LOG.warn("Kernel package '%s' not available and no fallback." " System may not boot.", package) def apply_debconf_selections(cfg, target): # debconf_selections: # set1: | # cloud-init cloud-init/datasources multiselect MAAS # set2: pkg pkg/value string bar selsets = cfg.get('debconf_selections') if not selsets: LOG.debug("debconf_selections was not set in config") return # for each entry in selections, chroot and apply them. # keep a running total of packages we've seen. pkgs_cfgd = set() for key, content in selsets.items(): LOG.debug("setting for %s, %s" % (key, content)) util.subp(['chroot', target, 'debconf-set-selections'], data=content.encode()) for line in content.splitlines(): if line.startswith("#"): continue pkg = re.sub(r"[:\s].*", "", line) pkgs_cfgd.add(pkg) pkgs_installed = get_installed_packages(target) LOG.debug("pkgs_cfgd: %s" % pkgs_cfgd) LOG.debug("pkgs_installed: %s" % pkgs_installed) need_reconfig = pkgs_cfgd.intersection(pkgs_installed) if len(need_reconfig) == 0: LOG.debug("no need for reconfig") return # For any packages that are already installed, but have preseed data # we populate the debconf database, but the filesystem configuration # would be preferred on a subsequent dpkg-reconfigure. # so, what we have to do is "know" information about certain packages # to unconfigure them. unhandled = [] to_config = [] for pkg in need_reconfig: if pkg in CONFIG_CLEANERS: LOG.debug("unconfiguring %s" % pkg) CONFIG_CLEANERS[pkg](target) to_config.append(pkg) else: unhandled.append(pkg) if len(unhandled): LOG.warn("The following packages were installed and preseeded, " "but cannot be unconfigured: %s", unhandled) util.subp(['chroot', target, 'dpkg-reconfigure', '--frontend=noninteractive'] + list(to_config), data=None) def get_installed_packages(target=None): cmd = [] if target is not None: cmd = ['chroot', target] cmd.extend(['dpkg-query', '--list']) (out, _err) = util.subp(cmd, capture=True) if isinstance(out, bytes): out = out.decode() pkgs_inst = set() for line in out.splitlines(): try: (state, pkg, other) = line.split(None, 2) except ValueError: continue if state.startswith("hi") or state.startswith("ii"): pkgs_inst.add(re.sub(":.*", "", pkg)) return pkgs_inst def setup_grub(cfg, target): # target is the path to the mounted filesystem # FIXME: these methods need moving to curtin.block # and using them from there rather than commands.block_meta from curtin.commands.block_meta import (extract_storage_ordered_dict, get_path_to_storage_volume) grubcfg = cfg.get('grub', {}) # copy legacy top level name if 'grub_install_devices' in cfg and 'install_devices' not in grubcfg: grubcfg['install_devices'] = cfg['grub_install_devices'] LOG.debug("setup grub on target %s", target) # if there is storage config, look for devices tagged with 'grub_device' storage_cfg_odict = None try: storage_cfg_odict = extract_storage_ordered_dict(cfg) except ValueError as e: pass if storage_cfg_odict: storage_grub_devices = [] for item_id, item in storage_cfg_odict.items(): if not item.get('grub_device'): continue LOG.debug("checking: %s", item) storage_grub_devices.append( get_path_to_storage_volume(item_id, storage_cfg_odict)) if len(storage_grub_devices) > 0: grubcfg['install_devices'] = storage_grub_devices LOG.debug("install_devices: %s", grubcfg.get('install_devices')) if 'install_devices' in grubcfg: instdevs = grubcfg.get('install_devices') if isinstance(instdevs, str): instdevs = [instdevs] if instdevs is None: LOG.debug("grub installation disabled by config") else: # If there were no install_devices found then we try to do the right # thing. That right thing is basically installing on all block # devices that are mounted. On powerpc, though it means finding PrEP # partitions. devs = block.get_devices_for_mp(target) blockdevs = set() for maybepart in devs: try: (blockdev, part) = block.get_blockdev_for_partition(maybepart) blockdevs.add(blockdev) except ValueError as e: # if there is no syspath for this device such as a lvm # or raid device, then a ValueError is raised here. LOG.debug("failed to find block device for %s", maybepart) if platform.machine().startswith("ppc64"): # assume we want partitions that are 4100 (PReP). The snippet here # just prints the partition number partitions of that type. shnip = textwrap.dedent(""" export LANG=C; for d in "$@"; do sgdisk "$d" --print | awk "\$6 == prep { print d \$1 }" "d=$d" prep=4100 done """) try: out, err = util.subp( ['sh', '-c', shnip, '--'] + list(blockdevs), capture=True) instdevs = str(out).splitlines() if not instdevs: LOG.warn("No power grub target partitions found!") instdevs = None except util.ProcessExecutionError as e: LOG.warn("Failed to find power grub partitions: %s", e) instdevs = None else: instdevs = list(blockdevs) # UEFI requires grub-efi-{arch}. If a signed version of that package # exists then it will be installed. if util.is_uefi_bootable(): arch = util.get_architecture() pkgs = ['grub-efi-%s' % arch] # Architecture might support a signed UEFI loader uefi_pkg_signed = 'grub-efi-%s-signed' % arch if util.has_pkg_available(uefi_pkg_signed): pkgs.append(uefi_pkg_signed) # AMD64 has shim-signed for SecureBoot support if arch == "amd64": pkgs.append("shim-signed") # Install the UEFI packages needed for the architecture util.install_packages(pkgs, target=target) env = os.environ.copy() replace_default = grubcfg.get('replace_linux_default', True) if str(replace_default).lower() in ("0", "false"): env['REPLACE_GRUB_LINUX_DEFAULT'] = "0" else: env['REPLACE_GRUB_LINUX_DEFAULT'] = "1" if instdevs: instdevs = [block.get_dev_name_entry(i)[1] for i in instdevs] else: instdevs = ["none"] LOG.debug("installing grub to %s [replace_default=%s]", instdevs, replace_default) with util.ChrootableTarget(target): args = ['install-grub'] if util.is_uefi_bootable(): args.append("--uefi") if grubcfg.get('update_nvram', False): LOG.debug("GRUB UEFI enabling NVRAM updates") args.append("--update-nvram") else: LOG.debug("NOT enabling UEFI nvram updates") LOG.debug("Target system may not boot") args.append(target) util.subp(args + instdevs, env=env) def update_initramfs(target, all_kernels=False): cmd = ['update-initramfs', '-u'] if all_kernels: cmd.extend(['-k', 'all']) with util.RunInChroot(target) as in_chroot: in_chroot(cmd) def copy_fstab(fstab, target): if not fstab: LOG.warn("fstab variable not in state, not copying fstab") return shutil.copy(fstab, os.path.sep.join([target, 'etc/fstab'])) def copy_crypttab(crypttab, target): if not crypttab: LOG.warn("crypttab config must be specified, not copying") return shutil.copy(crypttab, os.path.sep.join([target, 'etc/crypttab'])) def copy_mdadm_conf(mdadm_conf, target): if not mdadm_conf: LOG.warn("mdadm config must be specified, not copying") return LOG.info("copying mdadm.conf into target") shutil.copy(mdadm_conf, os.path.sep.join([target, 'etc/mdadm/mdadm.conf'])) def apply_networking(target, state): netstate = state.get('network_state') netconf = state.get('network_config') interfaces = state.get('interfaces') def is_valid_src(infile): with open(infile, 'r') as fp: content = fp.read() if len(content.split('\n')) > 1: return True return False ns = None if is_valid_src(netstate): LOG.debug("applying network_state") ns = net.network_state.from_state_file(netstate) elif is_valid_src(netconf): LOG.debug("applying network_config") ns = net.parse_net_config(netconf) if ns is not None: net.render_network_state(target=target, network_state=ns) else: LOG.debug("copying interfaces") copy_interfaces(interfaces, target) _maybe_remove_legacy_eth0(target) def copy_interfaces(interfaces, target): if not interfaces: LOG.warn("no interfaces file to copy!") return eni = os.path.sep.join([target, 'etc/network/interfaces']) shutil.copy(interfaces, eni) def copy_dname_rules(rules_d, target): if not rules_d: LOG.warn("no udev rules directory to copy") return for rule in os.listdir(rules_d): target_file = os.path.join( target, "etc/udev/rules.d", "%s.rules" % rule) shutil.copy(os.path.join(rules_d, rule), target_file) def restore_dist_interfaces(cfg, target): # cloud images have a link of /etc/network/interfaces into /run eni = os.path.sep.join([target, 'etc/network/interfaces']) if not cfg.get('restore_dist_interfaces', True): return rp = os.path.realpath(eni) if (os.path.exists(eni + ".dist") and (rp.startswith("/run") or rp.startswith(target + "/run"))): LOG.debug("restoring dist interfaces, existing link pointed to /run") shutil.move(eni, eni + ".old") shutil.move(eni + ".dist", eni) def add_swap(cfg, target, fstab): # add swap file per cfg to filesystem root at target. update fstab. # # swap: # filename: 'swap.img', # size: None # (or 1G) # maxsize: 2G if 'swap' in cfg and not cfg.get('swap'): LOG.debug("disabling 'add_swap' due to config") return swapcfg = cfg.get('swap', {}) fname = swapcfg.get('filename', None) size = swapcfg.get('size', None) maxsize = swapcfg.get('maxsize', None) if size: size = util.human2bytes(str(size)) if maxsize: maxsize = util.human2bytes(str(maxsize)) swap.setup_swapfile(target=target, fstab=fstab, swapfile=fname, size=size, maxsize=maxsize) def detect_and_handle_multipath(cfg, target): DEFAULT_MULTIPATH_PACKAGES = ['multipath-tools-boot'] mpcfg = cfg.get('multipath', {}) mpmode = mpcfg.get('mode', 'auto') mppkgs = mpcfg.get('packages', DEFAULT_MULTIPATH_PACKAGES) mpbindings = mpcfg.get('overwrite_bindings', True) if isinstance(mppkgs, str): mppkgs = [mppkgs] if mpmode == 'disabled': return if mpmode == 'auto' and not block.detect_multipath(target): return LOG.info("Detected multipath devices. Installing support via %s", mppkgs) util.install_packages(mppkgs, target=target) replace_spaces = True try: # check in-target version pkg_ver = util.get_package_version('multipath-tools', target=target) LOG.debug("get_package_version:\n%s", pkg_ver) LOG.debug("multipath version is %s (major=%s minor=%s micro=%s)", pkg_ver['semantic_version'], pkg_ver['major'], pkg_ver['minor'], pkg_ver['micro']) # multipath-tools versions < 0.5.0 do _NOT_ want whitespace replaced # i.e. 0.4.X in Trusty. if pkg_ver['semantic_version'] < 500: replace_spaces = False except Exception as e: LOG.warn("failed reading multipath-tools version, " "assuming it wants no spaces in wwids: %s", e) multipath_cfg_path = os.path.sep.join([target, '/etc/multipath.conf']) multipath_bind_path = os.path.sep.join([target, '/etc/multipath/bindings']) # We don't want to overwrite multipath.conf file provided by the image. if not os.path.isfile(multipath_cfg_path): # Without user_friendly_names option enabled system fails to boot # if any of the disks has spaces in its name. Package multipath-tools # has bug opened for this issue (LP: 1432062) but it was not fixed yet. multipath_cfg_content = '\n'.join( ['# This file was created by curtin while installing the system.', 'defaults {', ' user_friendly_names yes', '}', '']) util.write_file(multipath_cfg_path, content=multipath_cfg_content) if mpbindings or not os.path.isfile(multipath_bind_path): # we do assume that get_devices_for_mp()[0] is / target_dev = block.get_devices_for_mp(target)[0] wwid = block.get_scsi_wwid(target_dev, replace_whitespace=replace_spaces) blockdev, partno = block.get_blockdev_for_partition(target_dev) mpname = "mpath0" grub_dev = "/dev/mapper/" + mpname if partno is not None: grub_dev += "-part%s" % partno LOG.debug("configuring multipath install for root=%s wwid=%s", grub_dev, wwid) multipath_bind_content = '\n'.join( ['# This file was created by curtin while installing the system.', "%s %s" % (mpname, wwid), '# End of content generated by curtin.', '# Everything below is maintained by multipath subsystem.', '']) util.write_file(multipath_bind_path, content=multipath_bind_content) grub_cfg = os.path.sep.join( [target, '/etc/default/grub.d/50-curtin-multipath.cfg']) msg = '\n'.join([ '# Written by curtin for multipath device wwid "%s"' % wwid, 'GRUB_DEVICE=%s' % grub_dev, 'GRUB_DISABLE_LINUX_UUID=true', '']) util.write_file(grub_cfg, content=msg) # FIXME: this assumes grub. need more generic way to update root= util.ensure_dir(os.path.sep.join([target, os.path.dirname(grub_dev)])) with util.RunInChroot(target) as in_chroot: in_chroot(['update-grub']) else: LOG.warn("Not sure how this will boot") # Initrams needs to be updated to include /etc/multipath.cfg # and /etc/multipath/bindings files. update_initramfs(target, all_kernels=True) def install_missing_packages(cfg, target): ''' describe which operation types will require specific packages 'custom_config_key': { 'pkg1': ['op_name_1', 'op_name_2', ...] } ''' custom_configs = { 'storage': { 'lvm2': ['lvm_volgroup', 'lvm_partition'], 'mdadm': ['raid'], 'bcache-tools': ['bcache']}, 'network': { 'vlan': ['vlan'], 'ifenslave': ['bond'], 'bridge-utils': ['bridge']}, } format_configs = { 'xfsprogs': ['xfs'], 'e2fsprogs': ['ext2', 'ext3', 'ext4'], 'btrfs-tools': ['btrfs'], } needed_packages = [] installed_packages = get_installed_packages(target) for cust_cfg, pkg_reqs in custom_configs.items(): if cust_cfg not in cfg: continue all_types = set( operation['type'] for operation in cfg[cust_cfg]['config'] ) for pkg, types in pkg_reqs.items(): if set(types).intersection(all_types) and \ pkg not in installed_packages: needed_packages.append(pkg) format_types = set( [operation['fstype'] for operation in cfg[cust_cfg]['config'] if operation['type'] == 'format']) for pkg, fstypes in format_configs.items(): if set(fstypes).intersection(format_types) and \ pkg not in installed_packages: needed_packages.append(pkg) if needed_packages: state = util.load_command_environment() with events.ReportEventStack( name=state.get('report_stack_prefix'), reporting_enabled=True, level="INFO", description="Installing packages on target system: " + str(needed_packages)): util.install_packages(needed_packages, target=target) def system_upgrade(cfg, target): """run system-upgrade (apt-get dist-upgrade) or other in target. config: system_upgrade: enabled: False """ mycfg = {'system_upgrade': {'enabled': False}} config.merge_config(mycfg, cfg) mycfg = mycfg.get('system_upgrade') if not isinstance(mycfg, dict): LOG.debug("system_upgrade disabled by config. entry not a dict.") return if not config.value_as_boolean(mycfg.get('enabled', True)): LOG.debug("system_upgrade disabled by config.") return util.system_upgrade(target=target) def curthooks(args): state = util.load_command_environment() if args.target is not None: target = args.target else: target = state['target'] if target is None: sys.stderr.write("Unable to find target. " "Use --target or set TARGET_MOUNT_POINT\n") sys.exit(2) # if network-config hook exists in target, # we do not run the builtin if util.run_hook_if_exists(target, 'curtin-hooks'): sys.exit(0) cfg = config.load_command_config(args, state) stack_prefix = state.get('report_stack_prefix', '') with events.ReportEventStack( name=stack_prefix, reporting_enabled=True, level="INFO", description="writing config files and configuring apt"): write_files(cfg, target) apt_config(cfg, target) disable_overlayroot(cfg, target) # packages may be needed prior to installing kernel install_missing_packages(cfg, target) # If a mdadm.conf file was created by block_meta than it needs to be copied # onto the target system mdadm_location = os.path.join(os.path.split(state['fstab'])[0], "mdadm.conf") if os.path.exists(mdadm_location): copy_mdadm_conf(mdadm_location, target) # as per https://bugs.launchpad.net/ubuntu/+source/mdadm/+bug/964052 # reconfigure mdadm util.subp(['chroot', target, 'dpkg-reconfigure', '--frontend=noninteractive', 'mdadm'], data=None) with events.ReportEventStack( name=stack_prefix, reporting_enabled=True, level="INFO", description="installing kernel"): setup_zipl(cfg, target) install_kernel(cfg, target) run_zipl(cfg, target) apply_debconf_selections(cfg, target) restore_dist_interfaces(cfg, target) with events.ReportEventStack( name=stack_prefix, reporting_enabled=True, level="INFO", description="setting up swap"): add_swap(cfg, target, state.get('fstab')) with events.ReportEventStack( name=stack_prefix, reporting_enabled=True, level="INFO", description="apply networking"): apply_networking(target, state) with events.ReportEventStack( name=stack_prefix, reporting_enabled=True, level="INFO", description="writing etc/fstab"): copy_fstab(state.get('fstab'), target) with events.ReportEventStack( name=stack_prefix, reporting_enabled=True, level="INFO", description="configuring multipath"): detect_and_handle_multipath(cfg, target) with events.ReportEventStack( name=stack_prefix, reporting_enabled=True, level="INFO", description="updating packages on target system"): system_upgrade(cfg, target) # If a crypttab file was created by block_meta than it needs to be copied # onto the target system, and update_initramfs() needs to be run, so that # the cryptsetup hooks are properly configured on the installed system and # it will be able to open encrypted volumes at boot. crypttab_location = os.path.join(os.path.split(state['fstab'])[0], "crypttab") if os.path.exists(crypttab_location): copy_crypttab(crypttab_location, target) update_initramfs(target) # If udev dname rules were created, copy them to target udev_rules_d = os.path.join(state['scratch'], "rules.d") if os.path.isdir(udev_rules_d): copy_dname_rules(udev_rules_d, target) # As a rule, ARMv7 systems don't use grub. This may change some # day, but for now, assume no. They do require the initramfs # to be updated, and this also triggers boot loader setup via # flash-kernel. machine = platform.machine() if (machine.startswith('armv7') or machine.startswith('s390x') or machine.startswith('aarch64') and not util.is_uefi_bootable()): update_initramfs(target) else: setup_grub(cfg, target) sys.exit(0) def POPULATE_SUBCMD(parser): populate_one_subcmd(parser, CMD_ARGUMENTS, curthooks) CONFIG_CLEANERS = { 'cloud-init': clean_cloud_init, } # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/commands/extract.py0000644000000000000000000001053512741214565017467 0ustar 00000000000000# Copyright (C) 2013 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . import os import sys import curtin.config from curtin.log import LOG import curtin.util from curtin.reporter import events from . import populate_one_subcmd CMD_ARGUMENTS = ( ((('-t', '--target'), {'help': ('target directory to extract to (root) ' '[default TARGET_MOUNT_POINT]'), 'action': 'store', 'default': os.environ.get('TARGET_MOUNT_POINT')}), (('sources',), {'help': 'the sources to install [default read from CONFIG]', 'nargs': '*'}), ) ) def tar_xattr_opts(cmd=None): # if tar cmd supports xattrs, return the required flags to extract them. if cmd is None: cmd = ['tar'] if isinstance(cmd, str): cmd = [cmd] (out, _err) = curtin.util.subp(cmd + ['--help'], capture=True) if "xattr" in out: return ['--xattrs', '--xattrs-include=*'] return [] def extract_root_tgz_url(source, target): # extract a -root.tar.gz url in the 'target' directory # # Uses smtar to avoid specifying the compression type curtin.util.subp(args=['sh', '-cf', ('wget "$1" --progress=dot:mega -O - |' 'smtar -C "$2" ' + ' '.join(tar_xattr_opts()) + ' ' + '-Sxpf - --numeric-owner'), '--', source, target]) def extract_root_tgz_file(source, target): curtin.util.subp(args=['tar', '-C', target] + tar_xattr_opts() + ['-Sxpzf', source, '--numeric-owner']) def copy_to_target(source, target): if source.startswith("cp://"): source = source[5:] source = os.path.abspath(source) curtin.util.subp(args=['sh', '-c', ('mkdir -p "$2" && cd "$2" && ' 'rsync -aXHAS --one-file-system "$1/" .'), '--', source, target]) def extract(args): if not args.target: raise ValueError("Target must be defined or set in environment") state = curtin.util.load_command_environment() cfg = curtin.config.load_command_config(args, state) sources = args.sources target = args.target if not sources: if not cfg.get('sources'): raise ValueError("'sources' must be on cmdline or in config") sources = cfg.get('sources') if isinstance(sources, dict): sources = [sources[k] for k in sorted(sources.keys())] LOG.debug("Installing sources: %s to target at %s" % (sources, target)) stack_prefix = state.get('report_stack_prefix', '') for source in sources: with events.ReportEventStack( name=stack_prefix, reporting_enabled=True, level="INFO", description="acquiring and extracting image from %s" % source['uri']): if source['type'].startswith('dd-'): continue if source['uri'].startswith("cp://"): copy_to_target(source['uri'], target) elif os.path.isfile(source['uri']): extract_root_tgz_file(source['uri'], target) elif source['uri'].startswith("file://"): extract_root_tgz_file( source['uri'][len("file://"):], target) elif (source['uri'].startswith("http://") or source['uri'].startswith("https://")): extract_root_tgz_url(source['uri'], target) else: raise TypeError( "do not know how to extract '%s'" % source['uri']) sys.exit(0) def POPULATE_SUBCMD(parser): populate_one_subcmd(parser, CMD_ARGUMENTS, extract) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/commands/hook.py0000644000000000000000000000266312741214565016760 0ustar 00000000000000# Copyright (C) 2013 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . import os import sys import curtin.config from curtin.log import LOG import curtin.util from . import populate_one_subcmd CMD_ARGUMENTS = ( ((('target',), {'help': 'finalize the provided directory [default TARGET_MOUNT_POINT]', 'action': 'store', 'default': os.environ.get('TARGET_MOUNT_POINT'), 'nargs': '?'}), ) ) def hook(args): if not args.target: raise ValueError("Target must be provided or set in environment") LOG.debug("Finalizing %s" % args.target) curtin.util.run_hook_if_exists(args.target, "finalize") sys.exit(0) def POPULATE_SUBCMD(parser): populate_one_subcmd(parser, CMD_ARGUMENTS, hook) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/commands/in_target.py0000644000000000000000000000525312741214565017772 0ustar 00000000000000# Copyright (C) 2013 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . import os import pty import sys from curtin import util from . import populate_one_subcmd CMD_ARGUMENTS = ( ((('-a', '--allow-daemons'), {'help': 'do not disable daemons via invoke-rc.d', 'action': 'store_true', 'default': False, }), (('-i', '--interactive'), {'help': 'use command invoked interactively', 'action': 'store_true', 'default': False}), (('--capture',), {'help': 'capture/swallow output of command', 'action': 'store_true', 'default': False}), (('-t', '--target'), {'help': 'chroot to target. default is env[TARGET_MOUNT_POINT]', 'action': 'store', 'metavar': 'TARGET', 'default': os.environ.get('TARGET_MOUNT_POINT')}), ('command_args', {'help': 'run a command chrooted in the target', 'nargs': '*'}), ) ) def run_command(cmd, interactive, capture=False): exit = 0 if interactive: pty.spawn(cmd) else: try: util.subp(cmd, capture=capture) except util.ProcessExecutionError as e: exit = e.exit_code return exit def in_target_main(args): if args.target is not None: target = args.target else: state = util.load_command_environment() target = state['target'] if args.target is None: sys.stderr.write("Unable to find target. " "Use --target or set TARGET_MOUNT_POINT\n") sys.exit(2) if os.path.abspath(target) == "/": cmd = args.command_args else: cmd = ['chroot', target] + args.command_args if target == "/" and args.allow_daemons: ret = run_command(cmd, args.interactive, capture=args.capture) else: with util.ChrootableTarget(target, allow_daemons=args.allow_daemons): ret = run_command(cmd, args.interactive) sys.exit(ret) def POPULATE_SUBCMD(parser): populate_one_subcmd(parser, CMD_ARGUMENTS, in_target_main) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/commands/install.py0000644000000000000000000003561112741214565017465 0ustar 00000000000000# Copyright (C) 2013 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . import argparse import json import os import re import shlex import shutil import subprocess import sys import tempfile from curtin import block from curtin import config from curtin import util from curtin.log import LOG from curtin.reporter.legacy import load_reporter from curtin.reporter import events from . import populate_one_subcmd INSTALL_LOG = "/var/log/curtin/install.log" INSTALL_PASS_MSG = "curtin: Installation finished." STAGE_DESCRIPTIONS = { 'early': 'preparing for installation', 'partitioning': 'configuring storage', 'network': 'configuring network', 'extract': 'writing install sources to disk', 'curthooks': 'configuring installed system', 'hook': 'finalizing installation', 'late': 'executing late commands', } CONFIG_BUILTIN = { 'sources': {}, 'stages': ['early', 'partitioning', 'network', 'extract', 'curthooks', 'hook', 'late'], 'extract_commands': {'builtin': ['curtin', 'extract']}, 'hook_commands': {'builtin': ['curtin', 'hook']}, 'partitioning_commands': { 'builtin': ['curtin', 'block-meta', 'simple']}, 'curthooks_commands': {'builtin': ['curtin', 'curthooks']}, 'late_commands': {'builtin': []}, 'network_commands': {'builtin': ['curtin', 'net-meta', 'auto']}, 'apply_net_commands': {'builtin': []}, 'install': {'log_file': INSTALL_LOG}, } def clear_install_log(logfile): """Clear the installation log, so no previous installation is present.""" util.ensure_dir(os.path.dirname(logfile)) try: open(logfile, 'w').close() except: pass def writeline(fname, output): """Write a line to a file.""" if not output.endswith('\n'): output += '\n' try: with open(fname, 'a') as fp: fp.write(output) except IOError: pass class WorkingDir(object): def __init__(self, config): top_d = tempfile.mkdtemp() state_d = os.path.join(top_d, 'state') target_d = os.path.join(top_d, 'target') scratch_d = os.path.join(top_d, 'scratch') for p in (state_d, target_d, scratch_d): os.mkdir(p) netconf_f = os.path.join(state_d, 'network_config') netstate_f = os.path.join(state_d, 'network_state') interfaces_f = os.path.join(state_d, 'interfaces') config_f = os.path.join(state_d, 'config') fstab_f = os.path.join(state_d, 'fstab') with open(config_f, "w") as fp: json.dump(config, fp) # just touch these files to make sure they exist for f in (interfaces_f, config_f, fstab_f, netconf_f, netstate_f): with open(f, "ab") as fp: pass self.scratch = scratch_d self.target = target_d self.top = top_d self.interfaces = interfaces_f self.netconf = netconf_f self.netstate = netstate_f self.fstab = fstab_f self.config = config self.config_file = config_f def env(self): return ({'WORKING_DIR': self.scratch, 'OUTPUT_FSTAB': self.fstab, 'OUTPUT_INTERFACES': self.interfaces, 'OUTPUT_NETWORK_CONFIG': self.netconf, 'OUTPUT_NETWORK_STATE': self.netstate, 'TARGET_MOUNT_POINT': self.target, 'CONFIG': self.config_file}) class Stage(object): def __init__(self, name, commands, env, reportstack=None, logfile=None): self.name = name self.commands = commands self.env = env if logfile is None: logfile = INSTALL_LOG self.install_log = self._open_install_log(logfile) if hasattr(sys.stdout, 'buffer'): self.write_stdout = self._write_stdout3 else: self.write_stdout = self._write_stdout2 if reportstack is None: reportstack = events.ReportEventStack( name="stage-%s" % name, description="basic stage %s" % name, reporting_enabled=False) self.reportstack = reportstack def _open_install_log(self, logfile): """Open the install log.""" if not logfile: return None try: return open(logfile, 'ab') except IOError: return None def _write_stdout3(self, data): sys.stdout.buffer.write(data) # pylint: disable=no-member sys.stdout.flush() def _write_stdout2(self, data): sys.stdout.write(data) sys.stdout.flush() def write(self, data): """Write data to stdout and to the install_log.""" self.write_stdout(data) if self.install_log is not None: self.install_log.write(data) self.install_log.flush() def run(self): for cmdname in sorted(self.commands.keys()): cmd = self.commands[cmdname] if not cmd: continue cur_res = events.ReportEventStack( name=cmdname, description="running '%s'" % ' '.join(cmd), parent=self.reportstack, level="DEBUG") env = self.env.copy() env['CURTIN_REPORTSTACK'] = cur_res.fullname shell = not isinstance(cmd, list) with util.LogTimer(LOG.debug, cmdname): with cur_res: try: sp = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env, shell=shell) except OSError as e: LOG.warn("%s command failed", cmdname) raise util.ProcessExecutionError(cmd=cmd, reason=e) output = b"" while True: data = sp.stdout.read(1) if not data and sp.poll() is not None: break self.write(data) output += data rc = sp.returncode if rc != 0: LOG.warn("%s command failed", cmdname) raise util.ProcessExecutionError( stdout=output, stderr="", exit_code=rc, cmd=cmd) def apply_power_state(pstate): """ power_state: delay: 5 mode: poweroff message: Bye Bye """ cmd = load_power_state(pstate) if not cmd: return LOG.info("powering off with %s", cmd) fid = os.fork() if fid == 0: try: util.subp(cmd) os._exit(0) except: LOG.warn("%s returned non-zero" % cmd) os._exit(1) return def load_power_state(pstate): """Returns a command to reboot the system if power_state should.""" if pstate is None: return None if not isinstance(pstate, dict): raise TypeError("power_state is not a dict.") opt_map = {'halt': '-H', 'poweroff': '-P', 'reboot': '-r'} mode = pstate.get("mode") if mode not in opt_map: raise TypeError("power_state[mode] required, must be one of: %s." % ','.join(opt_map.keys())) delay = pstate.get("delay", "5") if delay == "now": delay = "0" elif re.match(r"\+[0-9]+", str(delay)): delay = "%sm" % delay[1:] else: delay = str(delay) args = ["shutdown", opt_map[mode], "now"] if pstate.get("message"): args.append(pstate.get("message")) shcmd = ('sleep "$1" && shift; ' '[ -f /run/block-curtin-poweroff ] && exit 0; ' 'exec "$@"') return (['sh', '-c', shcmd, 'curtin-poweroff', delay] + args) def apply_kexec(kexec, target): """ load kexec kernel from target dir, similar to /etc/init.d/kexec-load kexec: mode: on """ grubcfg = "boot/grub/grub.cfg" target_grubcfg = os.path.join(target, grubcfg) if kexec is None or kexec.get("mode") != "on": return False if not isinstance(kexec, dict): raise TypeError("kexec is not a dict.") if not util.which('kexec'): util.install_packages('kexec-tools') if not os.path.isfile(target_grubcfg): raise ValueError("%s does not exist in target" % grubcfg) with open(target_grubcfg, "r") as fp: default = 0 menu_lines = [] # get the default grub boot entry number and menu entry line numbers for line_num, line in enumerate(fp, 1): if re.search(r"\bset default=\"[0-9]+\"\b", " %s " % line): default = int(re.sub(r"[^0-9]", '', line)) if re.search(r"\bmenuentry\b", " %s " % line): menu_lines.append(line_num) if not menu_lines: LOG.error("grub config file does not have a menuentry\n") return False # get the begin and end line numbers for default menuentry section, # using end of file if it's the last menuentry section begin = menu_lines[default] if begin != menu_lines[-1]: end = menu_lines[default + 1] - 1 else: end = line_num fp.seek(0) lines = fp.readlines() kernel = append = initrd = "" for i in range(begin, end): if 'linux' in lines[i].split(): split_line = shlex.split(lines[i]) kernel = os.path.join(target, split_line[1]) append = "--append=" + ' '.join(split_line[2:]) if 'initrd' in lines[i].split(): split_line = shlex.split(lines[i]) initrd = "--initrd=" + os.path.join(target, split_line[1]) if not kernel: LOG.error("grub config file does not have a kernel\n") return False LOG.debug("kexec -l %s %s %s" % (kernel, append, initrd)) util.subp(args=['kexec', '-l', kernel, append, initrd]) return True def cmd_install(args): cfg = CONFIG_BUILTIN.copy() config.merge_config(cfg, args.config) for source in args.source: src = util.sanitize_source(source) cfg['sources']["%02d_cmdline" % len(cfg['sources'])] = src LOG.debug("merged config: %s" % cfg) if not len(cfg.get('sources', [])): raise util.BadUsage("no sources provided to install") for i in cfg['sources']: # we default to tgz for old style sources config cfg['sources'][i] = util.sanitize_source(cfg['sources'][i]) if cfg.get('http_proxy'): os.environ['http_proxy'] = cfg['http_proxy'] instcfg = cfg.get('install', {}) logfile = instcfg.get('log_file') post_files = instcfg.get('post_files', [logfile]) # Generate curtin configuration dump and add to write_files unless # installation config disables dump yaml_dump_file = instcfg.get('save_install_config', '/root/curtin-install-cfg.yaml') if yaml_dump_file: write_files = cfg.get('write_files', {}) write_files['curtin_install_cfg'] = { 'path': yaml_dump_file, 'permissions': '0400', 'owner': 'root:root', 'content': config.dump_config(cfg) } cfg['write_files'] = write_files # Load reporter clear_install_log(logfile) post_files = cfg.get('post_files', [logfile]) legacy_reporter = load_reporter(cfg) legacy_reporter.files = post_files args.reportstack.post_files = post_files try: dd_images = util.get_dd_images(cfg.get('sources', {})) if len(dd_images) > 1: raise ValueError("You may not use more then one disk image") workingd = WorkingDir(cfg) LOG.debug(workingd.env()) env = os.environ.copy() env.update(workingd.env()) for name in cfg.get('stages'): desc = STAGE_DESCRIPTIONS.get(name, "stage %s" % name) reportstack = events.ReportEventStack( "stage-%s" % name, description=desc, parent=args.reportstack) env['CURTIN_REPORTSTACK'] = reportstack.fullname with reportstack: commands_name = '%s_commands' % name with util.LogTimer(LOG.debug, 'stage_%s' % name): stage = Stage(name, cfg.get(commands_name, {}), env, reportstack=reportstack, logfile=logfile) stage.run() if apply_kexec(cfg.get('kexec'), workingd.target): cfg['power_state'] = {'mode': 'reboot', 'delay': 'now', 'message': "'rebooting with kexec'"} writeline(logfile, INSTALL_PASS_MSG) out = sys.stdout msg = "%s\n" % INSTALL_PASS_MSG if hasattr(out, 'buffer'): out = out.buffer msg = msg.encode() out.write(msg) out.flush() legacy_reporter.report_success() except Exception as e: exp_msg = "Installation failed with exception: %s" % e writeline(logfile, exp_msg) LOG.error(exp_msg) legacy_reporter.report_failure(exp_msg) raise e finally: for d in ('sys', 'dev', 'proc'): util.do_umount(os.path.join(workingd.target, d)) mounted = block.get_mountpoints() mounted.sort(key=lambda x: -1 * x.count("/")) for d in filter(lambda x: workingd.target in x, mounted): util.do_umount(d) util.do_umount(workingd.target) shutil.rmtree(workingd.top) apply_power_state(cfg.get('power_state')) sys.exit(0) # we explicitly accept config on install for backwards compatibility CMD_ARGUMENTS = ( ((('-c', '--config'), {'help': 'read configuration from cfg', 'action': util.MergedCmdAppend, 'metavar': 'FILE', 'type': argparse.FileType("rb"), 'dest': 'cfgopts', 'default': []}), ('--set', {'action': util.MergedCmdAppend, 'help': ('define a config variable. key can be a "/" ' 'delimited path ("early_commands/cmd1=a"). if ' 'key starts with "json:" then val is loaded as ' 'json (json:stages="[\'early\']")'), 'metavar': 'key=val', 'dest': 'cfgopts'}), ('source', {'help': 'what to install', 'nargs': '*'}), ) ) def POPULATE_SUBCMD(parser): populate_one_subcmd(parser, CMD_ARGUMENTS, cmd_install) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/commands/main.py0000644000000000000000000001655712741214565016753 0ustar 00000000000000#!/usr/bin/python # Copyright (C) 2013 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . import argparse import os import sys import traceback from .. import log from .. import util from ..deps import install_deps SUB_COMMAND_MODULES = [ 'apply_net', 'block-meta', 'block-wipe', 'curthooks', 'extract', 'hook', 'in-target', 'install', 'mkfs', 'net-meta', 'pack', 'swap', 'system-install', 'system-upgrade'] def add_subcmd(subparser, subcmd): modname = subcmd.replace("-", "_") subcmd_full = "curtin.commands.%s" % modname __import__(subcmd_full) try: popfunc = getattr(sys.modules[subcmd_full], 'POPULATE_SUBCMD') except AttributeError: raise AttributeError("No 'POPULATE_SUBCMD' in %s" % subcmd_full) popfunc(subparser.add_parser(subcmd)) class NoHelpParser(argparse.ArgumentParser): # ArgumentParser with forced 'add_help=False' def __init__(self, *args, **kwargs): kwargs.update({'add_help': False}) super(NoHelpParser, self).__init__(*args, **kwargs) def error(self, message): # without overriding this, argparse exits with bad usage raise ValueError("failed parsing arguments: %s" % message) def get_main_parser(stacktrace=False, verbosity=0, parser_class=argparse.ArgumentParser): parser = parser_class(prog='curtin') parser.add_argument('--showtrace', action='store_true', default=stacktrace) parser.add_argument('-v', '--verbose', action='count', default=verbosity, dest='verbosity') parser.add_argument('--log-file', default=sys.stderr, type=argparse.FileType('w')) parser.add_argument('-c', '--config', action=util.MergedCmdAppend, help='read configuration from cfg', metavar='FILE', type=argparse.FileType("rb"), dest='main_cfgopts', default=[]) parser.add_argument('--install-deps', action='store_true', help='install dependencies as necessary', default=False) parser.add_argument('--set', action=util.MergedCmdAppend, help=('define a config variable. key can be a "/" ' 'delimited path ("early_commands/cmd1=a"). if ' 'key starts with "json:" then val is loaded as ' 'json (json:stages="[\'early\']")'), metavar='key=val', dest='main_cfgopts') parser.set_defaults(config={}) parser.set_defaults(reportstack=None) return parser def maybe_install_deps(args, stacktrace=True, verbosity=0): parser = get_main_parser(stacktrace=stacktrace, verbosity=verbosity, parser_class=NoHelpParser) subps = parser.add_subparsers(dest="subcmd", parser_class=NoHelpParser) for subcmd in SUB_COMMAND_MODULES: subps.add_parser(subcmd) install_only_args = [ ['-v', '--install-deps'], ['-vv', '--install-deps'], ['--install-deps', '-v'], ['--install-deps', '-vv'], ['--install-deps'], ] install_only = args in install_only_args if install_only: verbosity = 1 else: try: ns, unknown = parser.parse_known_args(args) verbosity = ns.verbosity if not ns.install_deps: return except ValueError: # bad usage will be reported by the real reporter return ret = install_deps(verbosity=verbosity) if ret != 0 or install_only: sys.exit(ret) return def main(argv=None): if argv is None: argv = sys.argv[1:] stacktrace = (os.environ.get('CURTIN_STACKTRACE', "0").lower() not in ("0", "false", "")) try: verbosity = int(os.environ.get('CURTIN_VERBOSITY', "0")) except ValueError: verbosity = 1 maybe_install_deps(argv, stacktrace=stacktrace, verbosity=verbosity) # Above here, only standard library modules can be assumed. from .. import config from ..reporter import (events, update_configuration) parser = get_main_parser(stacktrace=stacktrace, verbosity=verbosity) subps = parser.add_subparsers(dest="subcmd") for subcmd in SUB_COMMAND_MODULES: add_subcmd(subps, subcmd) args = parser.parse_args(argv) # merge config flags into a single config dictionary cfg_opts = args.main_cfgopts if hasattr(args, 'cfgopts'): cfg_opts += getattr(args, 'cfgopts') cfg = {} if cfg_opts: for (flag, val) in cfg_opts: if flag in ('-c', '--config'): config.merge_config_fp(cfg, val) val.close() elif flag in ('--set'): config.merge_cmdarg(cfg, val) else: cfg = config.load_command_config(args, util.load_command_environment()) args.config = cfg # if user gave cmdline arguments, then set environ so subsequent # curtin calls get those as default showtrace = args.showtrace if 'showtrace' in cfg: showtrace = str(cfg['showtrace']).lower() not in ("0", "false") os.environ['CURTIN_STACKTRACE'] = str(int(showtrace)) verbosity = args.verbosity if 'verbosity' in cfg: verbosity = int(cfg['verbosity']) os.environ['CURTIN_VERBOSITY'] = str(verbosity) if not getattr(args, 'func', None): # http://bugs.python.org/issue16308 parser.print_help() sys.exit(1) log.basicConfig(stream=args.log_file, verbosity=verbosity) paths = util.get_paths() if paths['helpers'] is None or paths['curtin_exe'] is None: raise OSError("Unable to find helpers or 'curtin' exe to add to path") path = os.environ['PATH'].split(':') for cand in (paths['helpers'], os.path.dirname(paths['curtin_exe'])): if cand not in [os.path.abspath(d) for d in path]: path.insert(0, cand) os.environ['PATH'] = ':'.join(path) # set up the reportstack update_configuration(cfg.get('reporting', {})) stack_prefix = (os.environ.get("CURTIN_REPORTSTACK", "") + "/cmd-%s" % args.subcmd) if stack_prefix.startswith("/"): stack_prefix = stack_prefix[1:] os.environ["CURTIN_REPORTSTACK"] = stack_prefix args.reportstack = events.ReportEventStack( name=stack_prefix, reporting_enabled=True, level="DEBUG", description="curtin command %s" % args.subcmd) try: with args.reportstack: ret = args.func(args) sys.exit(ret) except Exception as e: if showtrace: traceback.print_exc() sys.stderr.write("%s\n" % e) sys.exit(3) if __name__ == '__main__': sys.exit(main()) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/commands/mkfs.py0000644000000000000000000000430312741214565016751 0ustar 00000000000000# Copyright (C) 2015 Canonical Ltd. # # Author: Wesley Wiedenmeier # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . from . import populate_one_subcmd from curtin.block.mkfs import mkfs as run_mkfs from curtin.block.mkfs import valid_fstypes import sys CMD_ARGUMENTS = ( (('devices', {'help': 'create filesystem on the target volume(s) or storage config \ item(s)', 'metavar': 'DEVICE', 'action': 'store', 'nargs': '+'}), (('-f', '--fstype'), {'help': 'filesystem type to use. default is ext4', 'choices': sorted(valid_fstypes()), 'default': 'ext4', 'action': 'store'}), (('-l', '--label'), {'help': 'label to use for filesystem', 'action': 'store'}), (('-u', '--uuid'), {'help': 'uuid to use for filesystem', 'action': 'store'}), (('-s', '--strict'), {'help': 'exit if mkfs cannot do exactly what is specified', 'action': 'store_true', 'default': False}), (('-F', '--force'), {'help': 'continue if some data already exists on device', 'action': 'store_true', 'default': False}) ) ) def mkfs(args): for device in args.devices: uuid = run_mkfs(device, args.fstype, strict=args.strict, uuid=args.uuid, label=args.label, force=args.force) print("Created '%s' filesystem in '%s' with uuid '%s' and label '%s'" % (args.fstype, device, uuid, args.label)) sys.exit(0) def POPULATE_SUBCMD(parser): populate_one_subcmd(parser, CMD_ARGUMENTS, mkfs) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/commands/net_meta.py0000644000000000000000000001367212741214565017616 0ustar 00000000000000# Copyright (C) 2013 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . import argparse import os import sys from curtin import net from curtin.log import LOG import curtin.util as util import curtin.config as config from . import populate_one_subcmd DEVNAME_ALIASES = ['connected', 'configured', 'netboot'] def network_device(value): if value in DEVNAME_ALIASES: return value if (value.startswith('eth') or (value.startswith('en') and len(value) == 3)): return value raise argparse.ArgumentTypeError("%s does not look like a netdev name") def resolve_alias(alias): if alias == "connected": alldevs = net.get_devicelist() return [d for d in alldevs if net.is_physical(d) and net.is_up(d)] elif alias == "configured": alldevs = net.get_devicelist() return [d for d in alldevs if net.is_physical(d) and net.is_up(d) and net.is_connected(d)] elif alias == "netboot": # should read /proc/cmdline here for BOOTIF raise NotImplementedError("netboot alias not implemented") else: raise ValueError("'%s' is not an alias: %s", alias, DEVNAME_ALIASES) def interfaces_basic_dhcp(devices, macs=None): # return network configuration that says to dhcp on provided devices if macs is None: macs = {} for dev in devices: macs[dev] = net.get_interface_mac(dev) config = [] for dev in devices: config.append({ 'type': 'physical', 'name': dev, 'mac_address': macs.get(dev), 'subnets': [{'type': 'dhcp4'}]}) return {'network': {'version': 1, 'config': config}} def interfaces_custom(args): state = util.load_command_environment() cfg = config.load_command_config(args, state) network_config = cfg.get('network', []) if not network_config: raise Exception("network configuration is required by mode '%s' " "but not provided in the config file" % 'custom') return {'network': network_config} def net_meta(args): # curtin net-meta --devices connected dhcp # curtin net-meta --devices configured dhcp # curtin net-meta --devices netboot dhcp # curtin net-meta --devices connected custom # if network-config hook exists in target, # we do not run the builtin if util.run_hook_if_exists(args.target, 'network-config'): sys.exit(0) state = util.load_command_environment() cfg = config.load_command_config(args, state) if cfg.get("network") is not None: args.mode = "custom" eni = "etc/network/interfaces" if args.mode == "auto": if not args.devices: args.devices = ["connected"] t_eni = None if args.target: t_eni = os.path.sep.join((args.target, eni,)) if not os.path.isfile(t_eni): t_eni = None if t_eni: args.mode = "copy" else: args.mode = "dhcp" devices = [] if args.devices: for dev in args.devices: if dev in DEVNAME_ALIASES: devices += resolve_alias(dev) else: devices.append(dev) LOG.debug("net-meta mode is '%s'. devices=%s", args.mode, devices) output_network_config = os.environ.get("OUTPUT_NETWORK_CONFIG", "") if args.mode == "copy": if not args.target: raise argparse.ArgumentTypeError("mode 'copy' requires --target") t_eni = os.path.sep.join((args.target, "etc/network/interfaces",)) with open(t_eni, "r") as fp: content = fp.read() LOG.warn("net-meta mode is 'copy', static network interfaces files" "can be brittle. Copied interfaces: %s", content) target = args.output elif args.mode == "dhcp": target = output_network_config content = config.dump_config(interfaces_basic_dhcp(devices)) elif args.mode == 'custom': target = output_network_config content = config.dump_config(interfaces_custom(args)) else: raise Exception("Unexpected network config mode '%s'." % args.mode) if not target: raise Exception( "No target given for mode = '%s'. No where to write content: %s" % (args.mode, content)) LOG.debug("writing to file %s with network config: %s", target, content) if target == "-": sys.stdout.write(content) else: with open(target, "w") as fp: fp.write(content) sys.exit(0) CMD_ARGUMENTS = ( ((('-D', '--devices'), {'help': 'which devices to operate on', 'action': 'append', 'metavar': 'DEVICE', 'type': network_device}), (('-o', '--output'), {'help': 'file to write to. defaults to env["OUTPUT_INTERFACES"] or "-"', 'metavar': 'IFILE', 'action': 'store', 'default': os.environ.get('OUTPUT_INTERFACES', "-")}), (('-t', '--target'), {'help': 'operate on target. default is env[TARGET_MOUNT_POINT]', 'action': 'store', 'metavar': 'TARGET', 'default': os.environ.get('TARGET_MOUNT_POINT')}), ('mode', {'help': 'meta-mode to use', 'choices': ['dhcp', 'copy', 'auto', 'custom']}) ) ) def POPULATE_SUBCMD(parser): populate_one_subcmd(parser, CMD_ARGUMENTS, net_meta) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/commands/pack.py0000644000000000000000000000361212741214565016731 0ustar 00000000000000# Copyright (C) 2013 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . import sys from curtin import pack from . import populate_one_subcmd CMD_ARGUMENTS = ( ((('-o', '--output'), {'help': 'where to write the archive to', 'action': 'store', 'metavar': 'FILE', 'default': "-", }), (('-a', '--add'), {'help': 'include FILE_PATH in archive at ARCHIVE_PATH', 'action': 'append', 'metavar': 'ARCHIVE_PATH:FILE_PATH', 'default': []}), ('command_args', {'help': 'command to run after extracting', 'nargs': '*'}), ) ) def pack_main(args): if args.output == "-": fdout = sys.stdout else: fdout = open(args.output, "w") delim = ":" addl = [] for tok in args.add: if delim not in tok: raise ValueError("'--add' argument '%s' did not have a '%s'", (tok, delim)) (archpath, filepath) = tok.split(":", 1) addl.append((archpath, filepath),) pack.pack(fdout, command=args.command_args, copy_files=addl) if args.output != "-": fdout.close() sys.exit(0) def POPULATE_SUBCMD(parser): populate_one_subcmd(parser, CMD_ARGUMENTS, pack_main) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/commands/swap.py0000644000000000000000000000547712741214565017000 0ustar 00000000000000# Copyright (C) 2013 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . import os import sys import curtin.swap as swap import curtin.util as util from . import populate_one_subcmd def swap_main(args): # curtin swap [--size=4G] [--target=/] [--fstab=/etc/fstab] [swap] state = util.load_command_environment() if args.target is not None: state['target'] = args.target if args.fstab is not None: state['fstab'] = args.fstab if state['target'] is None: sys.stderr.write("Unable to find target. " "Use --target or set TARGET_MOUNT_POINT\n") sys.exit(2) size = args.size if size is not None and size.lower() == "auto": size = None if size is not None: try: size = util.human2bytes(size) except ValueError as e: sys.stderr.write("%s\n" % e) sys.exit(2) if args.maxsize is not None: args.maxsize = util.human2bytes(args.maxsize) swap.setup_swapfile(target=state['target'], fstab=state['fstab'], swapfile=args.swapfile, size=size, maxsize=args.maxsize) sys.exit(2) CMD_ARGUMENTS = ( ((('-f', '--fstab'), {'help': 'file to write to. defaults to env["OUTPUT_FSTAB"]', 'metavar': 'FSTAB', 'action': 'store', 'default': os.environ.get('OUTPUT_FSTAB')}), (('-t', '--target'), {'help': ('target filesystem root to add swap file to. ' 'default is env[TARGET_MOUNT_POINT]'), 'action': 'store', 'metavar': 'TARGET', 'default': os.environ.get('TARGET_MOUNT_POINT')}), (('-s', '--size'), {'help': 'size of swap file (eg: 1G, 1500M, 1024K, 100000. def: "auto")', 'default': None, 'action': 'store'}), (('-M', '--maxsize'), {'help': 'maximum size of swap file (assuming "auto")', 'default': None, 'action': 'store'}), ('swapfile', {'help': 'path to swap file under target', 'default': 'swap.img', 'nargs': '?'}), ) ) def POPULATE_SUBCMD(parser): populate_one_subcmd(parser, CMD_ARGUMENTS, swap_main) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/commands/system_install.py0000644000000000000000000000375412741214565021074 0ustar 00000000000000# Copyright (C) 2015 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . import os import sys import curtin.util as util from . import populate_one_subcmd from curtin.log import LOG def system_install_pkgs_main(args): # curtin system-install [--target=/] [pkg, [pkg...]] if args.target is None: args.target = "/" exit_code = 0 try: util.install_packages( pkglist=args.packages, target=args.target, allow_daemons=args.allow_daemons) except util.ProcessExecutionError as e: LOG.warn("system install failed for %s: %s" % (args.packages, e)) exit_code = e.exit_code sys.exit(exit_code) CMD_ARGUMENTS = ( ((('--allow-daemons',), {'help': ('do not disable running of daemons during upgrade.'), 'action': 'store_true', 'default': False}), (('-t', '--target'), {'help': ('target root to upgrade. ' 'default is env[TARGET_MOUNT_POINT]'), 'action': 'store', 'metavar': 'TARGET', 'default': os.environ.get('TARGET_MOUNT_POINT')}), ('packages', {'help': 'the list of packages to install', 'metavar': 'PACKAGES', 'action': 'store', 'nargs': '+'}), ) ) def POPULATE_SUBCMD(parser): populate_one_subcmd(parser, CMD_ARGUMENTS, system_install_pkgs_main) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/commands/system_upgrade.py0000644000000000000000000000343712741214565021053 0ustar 00000000000000# Copyright (C) 2015 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . import os import sys import curtin.util as util from . import populate_one_subcmd from curtin.log import LOG def system_upgrade_main(args): # curtin system-upgrade [--target=/] if args.target is None: args.target = "/" exit_code = 0 try: util.system_upgrade(target=args.target, allow_daemons=args.allow_daemons) except util.ProcessExecutionError as e: LOG.warn("system upgrade failed: %s" % e) exit_code = e.exit_code sys.exit(exit_code) CMD_ARGUMENTS = ( ((('--allow-daemons',), {'help': ('do not disable running of daemons during upgrade.'), 'action': 'store_true', 'default': False}), (('-t', '--target'), {'help': ('target root to upgrade. ' 'default is env[TARGET_MOUNT_POINT]'), 'action': 'store', 'metavar': 'TARGET', 'default': os.environ.get('TARGET_MOUNT_POINT')}), ) ) def POPULATE_SUBCMD(parser): populate_one_subcmd(parser, CMD_ARGUMENTS, system_upgrade_main) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/deps/__init__.py0000644000000000000000000001117612741214565016710 0ustar 00000000000000# Copyright (C) 2015 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . import os import sys from curtin.util import (which, install_packages, lsb_release, ProcessExecutionError) REQUIRED_IMPORTS = [ # import string to execute, python2 package, python3 package ('import yaml', 'python-yaml', 'python3-yaml'), ] REQUIRED_EXECUTABLES = [ # executable in PATH, package ('file', 'file'), ('lvcreate', 'lvm2'), ('mdadm', 'mdadm'), ('mkfs.vfat', 'dosfstools'), ('mkfs.btrfs', 'btrfs-tools'), ('mkfs.ext4', 'e2fsprogs'), ('mkfs.xfs', 'xfsprogs'), ('partprobe', 'parted'), ('sgdisk', 'gdisk'), ('udevadm', 'udev'), ('make-bcache', 'bcache-tools'), ] if lsb_release()['codename'] == "precise": REQUIRED_IMPORTS.append( ('import oauth.oauth', 'python-oauth', None),) else: REQUIRED_IMPORTS.append( ('import oauthlib.oauth1', 'python-oauthlib', 'python3-oauthlib'),) class MissingDeps(Exception): def __init__(self, message, deps): self.message = message if isinstance(deps, str) or deps is None: deps = [deps] self.deps = [d for d in deps if d is not None] self.fatal = None in deps def __str__(self): if self.fatal: if not len(self.deps): return self.message + " Unresolvable." return (self.message + " Unresolvable. Partially resolvable with packages: %s" % ' '.join(self.deps)) else: return self.message + " Install packages: %s" % ' '.join(self.deps) def check_import(imports, py2pkgs, py3pkgs, message=None): import_group = imports if isinstance(import_group, str): import_group = [import_group] for istr in import_group: try: exec(istr) return except ImportError: pass if not message: if isinstance(imports, str): message = "Failed '%s'." % imports else: message = "Unable to do any of %s." % import_group if sys.version_info[0] == 2: pkgs = py2pkgs else: pkgs = py3pkgs raise MissingDeps(message, pkgs) def check_executable(cmdname, pkg): if not which(cmdname): raise MissingDeps("Missing program '%s'." % cmdname, pkg) def check_executables(executables=None): if executables is None: executables = REQUIRED_EXECUTABLES mdeps = [] for exe, pkg in executables: try: check_executable(exe, pkg) except MissingDeps as e: mdeps.append(e) return mdeps def check_imports(imports=None): if imports is None: imports = REQUIRED_IMPORTS mdeps = [] for import_str, py2pkg, py3pkg in imports: try: check_import(import_str, py2pkg, py3pkg) except MissingDeps as e: mdeps.append(e) return mdeps def find_missing_deps(): return check_executables() + check_imports() def install_deps(verbosity=False, dry_run=False, allow_daemons=True): errors = find_missing_deps() if len(errors) == 0: if verbosity: sys.stderr.write("No missing dependencies\n") return 0 missing_pkgs = [] for e in errors: missing_pkgs += e.deps deps_string = ' '.join(sorted(missing_pkgs)) if dry_run: sys.stderr.write("Missing dependencies: %s\n" % deps_string) return 0 if os.geteuid() != 0: sys.stderr.write("Missing dependencies: %s\n" % deps_string) sys.stderr.write("Package installation is not possible as non-root.\n") return 2 if verbosity: sys.stderr.write("Installing %s\n" % deps_string) ret = 0 try: install_packages(missing_pkgs, allow_daemons=allow_daemons, aptopts=["--no-install-recommends"]) except ProcessExecutionError as e: sys.stderr.write("%s\n" % e) ret = e.exit_code return ret # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/deps/check.py0000644000000000000000000000427312741214565016226 0ustar 00000000000000# Copyright (C) 2015 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . """ The intent point of this module is that it can be called and exit success or fail, indicating that deps should be there. python -m curtin.deps.check [-v] """ import argparse import sys from . import find_missing_deps def debug(level, msg_level, msg): if level >= msg_level: if msg[-1] != "\n": msg += "\n" sys.stderr.write(msg) def main(): parser = argparse.ArgumentParser( prog='curtin-check-deps', description='check dependencies for curtin.') parser.add_argument('-v', '--verbose', action='count', default=0, dest='verbosity') args, extra = parser.parse_known_args(sys.argv[1:]) errors = find_missing_deps() if len(errors) == 0: # exit 0 means all dependencies are available. debug(args.verbosity, 1, "No missing dependencies") sys.exit(0) missing_pkgs = [] fatal = [] for e in errors: if e.fatal: fatal.append(e) debug(args.verbosity, 2, str(e)) missing_pkgs += e.deps if len(fatal): for e in fatal: debug(args.verbosity, 1, str(e)) sys.exit(1) debug(args.verbosity, 1, "Fix with:\n apt-get -qy install %s\n" % ' '.join(sorted(missing_pkgs))) # we exit higher with less deps needed. # exiting 99 means just 1 dep needed. sys.exit(100-len(missing_pkgs)) if __name__ == '__main__': main() # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/deps/install.py0000644000000000000000000000310712741214565016612 0ustar 00000000000000# Copyright (C) 2015 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . """ The intent of this module is that it can be called to install deps python -m curtin.deps.install [-v] """ import argparse import sys from . import install_deps def main(): parser = argparse.ArgumentParser( prog='curtin-install-deps', description='install dependencies for curtin.') parser.add_argument('-v', '--verbose', action='count', default=0, dest='verbosity') parser.add_argument('--dry-run', action='store_true', default=False) parser.add_argument('--no-allow-daemons', action='store_false', default=True) args = parser.parse_args(sys.argv[1:]) ret = install_deps(verbosity=args.verbosity, dry_run=args.dry_run, allow_daemons=True) sys.exit(ret) if __name__ == '__main__': main() # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/net/__init__.py0000644000000000000000000004176312741214565016550 0ustar 00000000000000# Copyright (C) 2013-2014 Canonical Ltd. # # Author: Scott Moser # Author: Blake Rouse # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . import errno import glob import os import re from curtin.log import LOG from curtin.udev import generate_udev_rule import curtin.util as util import curtin.config as config from . import network_state SYS_CLASS_NET = "/sys/class/net/" NET_CONFIG_OPTIONS = [ "address", "netmask", "broadcast", "network", "metric", "gateway", "pointtopoint", "media", "mtu", "hostname", "leasehours", "leasetime", "vendor", "client", "bootfile", "server", "hwaddr", "provider", "frame", "netnum", "endpoint", "local", "ttl", ] NET_CONFIG_COMMANDS = [ "pre-up", "up", "post-up", "down", "pre-down", "post-down", ] NET_CONFIG_BRIDGE_OPTIONS = [ "bridge_ageing", "bridge_bridgeprio", "bridge_fd", "bridge_gcinit", "bridge_hello", "bridge_maxage", "bridge_maxwait", "bridge_stp", ] def sys_dev_path(devname, path=""): return SYS_CLASS_NET + devname + "/" + path def read_sys_net(devname, path, translate=None, enoent=None, keyerror=None): try: contents = "" with open(sys_dev_path(devname, path), "r") as fp: contents = fp.read().strip() if translate is None: return contents try: return translate.get(contents) except KeyError: LOG.debug("found unexpected value '%s' in '%s/%s'", contents, devname, path) if keyerror is not None: return keyerror raise except OSError as e: if e.errno == errno.ENOENT and enoent is not None: return enoent raise def is_up(devname): # The linux kernel says to consider devices in 'unknown' # operstate as up for the purposes of network configuration. See # Documentation/networking/operstates.txt in the kernel source. translate = {'up': True, 'unknown': True, 'down': False} return read_sys_net(devname, "operstate", enoent=False, keyerror=False, translate=translate) def is_wireless(devname): return os.path.exists(sys_dev_path(devname, "wireless")) def is_connected(devname): # is_connected isn't really as simple as that. 2 is # 'physically connected'. 3 is 'not connected'. but a wlan interface will # always show 3. try: iflink = read_sys_net(devname, "iflink", enoent=False) if iflink == "2": return True if not is_wireless(devname): return False LOG.debug("'%s' is wireless, basing 'connected' on carrier", devname) return read_sys_net(devname, "carrier", enoent=False, keyerror=False, translate={'0': False, '1': True}) except IOError as e: if e.errno == errno.EINVAL: return False raise def is_physical(devname): return os.path.exists(sys_dev_path(devname, "device")) def is_present(devname): return os.path.exists(sys_dev_path(devname)) def get_devicelist(): return os.listdir(SYS_CLASS_NET) class ParserError(Exception): """Raised when parser has issue parsing the interfaces file.""" def parse_deb_config_data(ifaces, contents, src_dir, src_path): """Parses the file contents, placing result into ifaces. '_source_path' is added to every dictionary entry to define which file the configration information came from. :param ifaces: interface dictionary :param contents: contents of interfaces file :param src_dir: directory interfaces file was located :param src_path: file path the `contents` was read """ currif = None for line in contents.splitlines(): line = line.strip() if line.startswith('#'): continue split = line.split(' ') option = split[0] if option == "source-directory": parsed_src_dir = split[1] if not parsed_src_dir.startswith("/"): parsed_src_dir = os.path.join(src_dir, parsed_src_dir) for expanded_path in glob.glob(parsed_src_dir): dir_contents = os.listdir(expanded_path) dir_contents = [ os.path.join(expanded_path, path) for path in dir_contents if (os.path.isfile(os.path.join(expanded_path, path)) and re.match("^[a-zA-Z0-9_-]+$", path) is not None) ] for entry in dir_contents: with open(entry, "r") as fp: src_data = fp.read().strip() abs_entry = os.path.abspath(entry) parse_deb_config_data( ifaces, src_data, os.path.dirname(abs_entry), abs_entry) elif option == "source": new_src_path = split[1] if not new_src_path.startswith("/"): new_src_path = os.path.join(src_dir, new_src_path) for expanded_path in glob.glob(new_src_path): with open(expanded_path, "r") as fp: src_data = fp.read().strip() abs_path = os.path.abspath(expanded_path) parse_deb_config_data( ifaces, src_data, os.path.dirname(abs_path), abs_path) elif option == "auto": for iface in split[1:]: if iface not in ifaces: ifaces[iface] = { # Include the source path this interface was found in. "_source_path": src_path } ifaces[iface]['auto'] = True ifaces[iface]['control'] = 'auto' elif option.startswith('allow-'): for iface in split[1:]: if iface not in ifaces: ifaces[iface] = { # Include the source path this interface was found in. "_source_path": src_path } ifaces[iface]['auto'] = False ifaces[iface]['control'] = option.split('allow-')[-1] elif option == "iface": iface, family, method = split[1:4] if iface not in ifaces: ifaces[iface] = { # Include the source path this interface was found in. "_source_path": src_path } # man (5) interfaces says we can have multiple iface stanzas # all options are combined ifaces[iface]['family'] = family ifaces[iface]['method'] = method currif = iface elif option == "hwaddress": ifaces[currif]['hwaddress'] = split[1] elif option in NET_CONFIG_OPTIONS: ifaces[currif][option] = split[1] elif option in NET_CONFIG_COMMANDS: if option not in ifaces[currif]: ifaces[currif][option] = [] ifaces[currif][option].append(' '.join(split[1:])) elif option.startswith('dns-'): if 'dns' not in ifaces[currif]: ifaces[currif]['dns'] = {} if option == 'dns-search': ifaces[currif]['dns']['search'] = [] for domain in split[1:]: ifaces[currif]['dns']['search'].append(domain) elif option == 'dns-nameservers': ifaces[currif]['dns']['nameservers'] = [] for server in split[1:]: ifaces[currif]['dns']['nameservers'].append(server) elif option.startswith('bridge_'): if 'bridge' not in ifaces[currif]: ifaces[currif]['bridge'] = {} if option in NET_CONFIG_BRIDGE_OPTIONS: bridge_option = option.replace('bridge_', '', 1) ifaces[currif]['bridge'][bridge_option] = split[1] elif option == "bridge_ports": ifaces[currif]['bridge']['ports'] = [] for iface in split[1:]: ifaces[currif]['bridge']['ports'].append(iface) elif option == "bridge_hw" and split[1].lower() == "mac": ifaces[currif]['bridge']['mac'] = split[2] elif option == "bridge_pathcost": if 'pathcost' not in ifaces[currif]['bridge']: ifaces[currif]['bridge']['pathcost'] = {} ifaces[currif]['bridge']['pathcost'][split[1]] = split[2] elif option == "bridge_portprio": if 'portprio' not in ifaces[currif]['bridge']: ifaces[currif]['bridge']['portprio'] = {} ifaces[currif]['bridge']['portprio'][split[1]] = split[2] elif option.startswith('bond-'): if 'bond' not in ifaces[currif]: ifaces[currif]['bond'] = {} bond_option = option.replace('bond-', '', 1) ifaces[currif]['bond'][bond_option] = split[1] for iface in ifaces.keys(): if 'auto' not in ifaces[iface]: ifaces[iface]['auto'] = False def parse_deb_config(path): """Parses a debian network configuration file.""" ifaces = {} with open(path, "r") as fp: contents = fp.read().strip() abs_path = os.path.abspath(path) parse_deb_config_data( ifaces, contents, os.path.dirname(abs_path), abs_path) return ifaces def parse_net_config_data(net_config): """Parses the config, returns NetworkState dictionary :param net_config: curtin network config dict """ state = None if 'version' in net_config and 'config' in net_config: ns = network_state.NetworkState(version=net_config.get('version'), config=net_config.get('config')) ns.parse_config() state = ns.network_state return state def parse_net_config(path): """Parses a curtin network configuration file and return network state""" ns = None net_config = config.load_config(path) if 'network' in net_config: ns = parse_net_config_data(net_config.get('network')) return ns def render_persistent_net(network_state): ''' Given state, emit udev rules to map mac to ifname ''' content = "# Autogenerated by curtin\n" interfaces = network_state.get('interfaces') for iface in interfaces.values(): if iface['type'] == 'physical': ifname = iface.get('name', None) mac = iface.get('mac_address', '') # len(macaddr) == 2 * 6 + 5 == 17 if ifname and mac and len(mac) == 17: content += generate_udev_rule(ifname, mac) return content # TODO: switch valid_map based on mode inet/inet6 def iface_add_subnet(iface, subnet): content = "" valid_map = [ 'address', 'netmask', 'broadcast', 'metric', 'gateway', 'pointopoint', 'mtu', 'scope', 'dns_search', 'dns_nameservers', ] for key, value in subnet.items(): if value and key in valid_map: if type(value) == list: value = " ".join(value) if '_' in key: key = key.replace('_', '-') content += " {} {}\n".format(key, value) return content # TODO: switch to valid_map for attrs def iface_add_attrs(iface, index): # If the index is non-zero, this is an alias interface. Alias interfaces # represent additional interface addresses, and should not have additional # attributes. (extra attributes here are almost always either incorrect, # or are applied to the parent interface.) So if this is an alias, stop # right here. if index != 0: return "" content = "" ignore_map = [ 'control', 'index', 'inet', 'mode', 'name', 'subnets', 'type', ] if iface['type'] not in ['bond', 'bridge']: ignore_map.append('mac_address') for key, value in iface.items(): if value and key not in ignore_map: if type(value) == list: value = " ".join(value) content += " {} {}\n".format(key, value) return content def render_route(route): content = "up route add" mapping = { 'network': '-net', 'netmask': 'netmask', 'gateway': 'gw', 'metric': 'metric', } for k in ['network', 'netmask', 'gateway', 'metric']: if k in route: content += " %s %s" % (mapping[k], route[k]) content += '\n' return content def iface_start_entry(iface, index): fullname = iface['name'] if index != 0: fullname += ":%s" % index control = iface['control'] if control == "auto": cverb = "auto" elif control in ("hotplug",): cverb = "allow-" + control else: cverb = "# control-" + control subst = iface.copy() subst.update({'fullname': fullname, 'cverb': cverb}) return ("{cverb} {fullname}\n" "iface {fullname} {inet} {mode}\n").format(**subst) def render_interfaces(network_state): ''' Given state, emit etc/network/interfaces content ''' content = "" interfaces = network_state.get('interfaces') ''' Apply a sort order to ensure that we write out the physical interfaces first; this is critical for bonding ''' order = { 'physical': 0, 'bond': 1, 'bridge': 2, 'vlan': 3, } content += "auto lo\niface lo inet loopback\n" for dnskey, value in network_state.get('dns', {}).items(): if len(value): content += " dns-{} {}\n".format(dnskey, " ".join(value)) for iface in sorted(interfaces.values(), key=lambda k: (order[k['type']], k['name'])): if content[-2:] != "\n\n": content += "\n" subnets = iface.get('subnets', {}) if subnets: for index, subnet in zip(range(0, len(subnets)), subnets): if content[-2:] != "\n\n": content += "\n" iface['index'] = index iface['mode'] = subnet['type'] iface['control'] = subnet.get('control', 'auto') subnet_inet = 'inet' if iface['mode'].endswith('6'): # This is a request for DHCPv6. subnet_inet += '6' elif iface['mode'] == 'static' and ":" in subnet['address']: # This is a static IPv6 address. subnet_inet += '6' iface['inet'] = subnet_inet if iface['mode'].startswith('dhcp'): iface['mode'] = 'dhcp' content += iface_start_entry(iface, index) content += iface_add_subnet(iface, subnet) content += iface_add_attrs(iface, index) if len(subnets) > 1 and index == 0: for i in range(1, len(subnets)): content += " post-up ifup %s:%s\n" % (iface['name'], i) else: # ifenslave docs say to auto the slave devices if 'bond-master' in iface: content += "auto {name}\n".format(**iface) content += "iface {name} {inet} {mode}\n".format(**iface) content += iface_add_attrs(iface, index) for route in network_state.get('routes'): content += render_route(route) # global replacements until v2 format content = content.replace('mac_address', 'hwaddress') # Play nice with others and source eni config files content += "\nsource /etc/network/interfaces.d/*.cfg\n" return content def render_network_state(target, network_state): eni = 'etc/network/interfaces' netrules = 'etc/udev/rules.d/70-persistent-net.rules' cc = 'etc/cloud/cloud.cfg.d/curtin-disable-cloudinit-networking.cfg' eni = os.path.sep.join((target, eni,)) LOG.info('Writing ' + eni) util.write_file(eni, content=render_interfaces(network_state)) netrules = os.path.sep.join((target, netrules,)) LOG.info('Writing ' + netrules) util.write_file(netrules, content=render_persistent_net(network_state)) cc_disable = os.path.sep.join((target, cc,)) LOG.info('Writing ' + cc_disable) util.write_file(cc_disable, content='network: {config: disabled}\n') def get_interface_mac(ifname): """Returns the string value of an interface's MAC Address""" return read_sys_net(ifname, "address", enoent=False) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/net/network_state.py0000644000000000000000000003153512741214565017676 0ustar 00000000000000# Copyright (C) 2013-2014 Canonical Ltd. # # Author: Ryan Harper # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . from curtin.log import LOG import curtin.config as curtin_config NETWORK_STATE_VERSION = 1 NETWORK_STATE_REQUIRED_KEYS = { 1: ['version', 'config', 'network_state'], } def from_state_file(state_file): network_state = None state = curtin_config.load_config(state_file) network_state = NetworkState() network_state.load(state) return network_state class NetworkState: def __init__(self, version=NETWORK_STATE_VERSION, config=None): self.version = version self.config = config self.network_state = { 'interfaces': {}, 'routes': [], 'dns': { 'nameservers': [], 'search': [], } } self.command_handlers = self.get_command_handlers() def get_command_handlers(self): METHOD_PREFIX = 'handle_' methods = filter(lambda x: callable(getattr(self, x)) and x.startswith(METHOD_PREFIX), dir(self)) handlers = {} for m in methods: key = m.replace(METHOD_PREFIX, '') handlers[key] = getattr(self, m) return handlers def dump(self): state = { 'version': self.version, 'config': self.config, 'network_state': self.network_state, } return curtin_config.dump_config(state) def load(self, state): if 'version' not in state: LOG.error('Invalid state, missing version field') raise Exception('Invalid state, missing version field') required_keys = NETWORK_STATE_REQUIRED_KEYS[state['version']] if not self.valid_command(state, required_keys): msg = 'Invalid state, missing keys: {}'.format(required_keys) LOG.error(msg) raise Exception(msg) # v1 - direct attr mapping, except version for key in [k for k in required_keys if k not in ['version']]: setattr(self, key, state[key]) self.command_handlers = self.get_command_handlers() def dump_network_state(self): return curtin_config.dump_config(self.network_state) def parse_config(self): # rebuild network state for command in self.config: handler = self.command_handlers.get(command['type']) handler(command) def valid_command(self, command, required_keys): if not required_keys: return False found_keys = [key for key in command.keys() if key in required_keys] return len(found_keys) == len(required_keys) def handle_physical(self, command): ''' command = { 'type': 'physical', 'mac_address': 'c0:d6:9f:2c:e8:80', 'name': 'eth0', 'subnets': [ {'type': 'dhcp4'} ] } ''' required_keys = [ 'name', ] if not self.valid_command(command, required_keys): LOG.warn('Skipping Invalid command: {}'.format(command)) LOG.debug(self.dump_network_state()) return interfaces = self.network_state.get('interfaces') iface = interfaces.get(command['name'], {}) for param, val in command.get('params', {}).items(): iface.update({param: val}) iface.update({ 'name': command.get('name'), 'type': command.get('type'), 'mac_address': command.get('mac_address'), 'inet': 'inet', 'mode': 'manual', 'mtu': command.get('mtu'), 'address': None, 'gateway': None, 'subnets': command.get('subnets'), }) self.network_state['interfaces'].update({command.get('name'): iface}) self.dump_network_state() def handle_vlan(self, command): ''' auto eth0.222 iface eth0.222 inet static address 10.10.10.1 netmask 255.255.255.0 vlan-raw-device eth0 ''' required_keys = [ 'name', 'vlan_link', 'vlan_id', ] if not self.valid_command(command, required_keys): print('Skipping Invalid command: {}'.format(command)) print(self.dump_network_state()) return interfaces = self.network_state.get('interfaces') self.handle_physical(command) iface = interfaces.get(command.get('name'), {}) iface['vlan-raw-device'] = command.get('vlan_link') iface['vlan_id'] = command.get('vlan_id') interfaces.update({iface['name']: iface}) def handle_bond(self, command): ''' #/etc/network/interfaces auto eth0 iface eth0 inet manual bond-master bond0 bond-mode 802.3ad auto eth1 iface eth1 inet manual bond-master bond0 bond-mode 802.3ad auto bond0 iface bond0 inet static address 192.168.0.10 gateway 192.168.0.1 netmask 255.255.255.0 bond-slaves none bond-mode 802.3ad bond-miimon 100 bond-downdelay 200 bond-updelay 200 bond-lacp-rate 4 ''' required_keys = [ 'name', 'bond_interfaces', 'params', ] if not self.valid_command(command, required_keys): print('Skipping Invalid command: {}'.format(command)) print(self.dump_network_state()) return self.handle_physical(command) interfaces = self.network_state.get('interfaces') iface = interfaces.get(command.get('name'), {}) for param, val in command.get('params').items(): iface.update({param: val}) iface.update({'bond-slaves': 'none'}) self.network_state['interfaces'].update({iface['name']: iface}) # handle bond slaves for ifname in command.get('bond_interfaces'): if ifname not in interfaces: cmd = { 'name': ifname, 'type': 'bond', } # inject placeholder self.handle_physical(cmd) interfaces = self.network_state.get('interfaces') bond_if = interfaces.get(ifname) bond_if['bond-master'] = command.get('name') # copy in bond config into slave for param, val in command.get('params').items(): bond_if.update({param: val}) self.network_state['interfaces'].update({ifname: bond_if}) def handle_bridge(self, command): ''' auto br0 iface br0 inet static address 10.10.10.1 netmask 255.255.255.0 bridge_ports eth0 eth1 bridge_stp off bridge_fd 0 bridge_maxwait 0 bridge_params = [ "bridge_ports", "bridge_ageing", "bridge_bridgeprio", "bridge_fd", "bridge_gcint", "bridge_hello", "bridge_hw", "bridge_maxage", "bridge_maxwait", "bridge_pathcost", "bridge_portprio", "bridge_stp", "bridge_waitport", ] ''' required_keys = [ 'name', 'bridge_interfaces', 'params', ] if not self.valid_command(command, required_keys): print('Skipping Invalid command: {}'.format(command)) print(self.dump_network_state()) return # find one of the bridge port ifaces to get mac_addr # handle bridge_slaves interfaces = self.network_state.get('interfaces') for ifname in command.get('bridge_interfaces'): if ifname in interfaces: continue cmd = { 'name': ifname, } # inject placeholder self.handle_physical(cmd) interfaces = self.network_state.get('interfaces') self.handle_physical(command) iface = interfaces.get(command.get('name'), {}) iface['bridge_ports'] = command['bridge_interfaces'] for param, val in command.get('params').items(): iface.update({param: val}) interfaces.update({iface['name']: iface}) def handle_nameserver(self, command): required_keys = [ 'address', ] if not self.valid_command(command, required_keys): print('Skipping Invalid command: {}'.format(command)) print(self.dump_network_state()) return dns = self.network_state.get('dns') if 'address' in command: addrs = command['address'] if not type(addrs) == list: addrs = [addrs] for addr in addrs: dns['nameservers'].append(addr) if 'search' in command: paths = command['search'] if not isinstance(paths, list): paths = [paths] for path in paths: dns['search'].append(path) def handle_route(self, command): required_keys = [ 'destination', ] if not self.valid_command(command, required_keys): print('Skipping Invalid command: {}'.format(command)) print(self.dump_network_state()) return routes = self.network_state.get('routes') network, cidr = command['destination'].split("/") netmask = cidr2mask(int(cidr)) route = { 'network': network, 'netmask': netmask, 'gateway': command.get('gateway'), 'metric': command.get('metric'), } routes.append(route) def cidr2mask(cidr): mask = [0, 0, 0, 0] for i in list(range(0, cidr)): idx = int(i / 8) mask[idx] = mask[idx] + (1 << (7 - i % 8)) return ".".join([str(x) for x in mask]) if __name__ == '__main__': import sys import random from curtin import net def load_config(nc): version = nc.get('version') config = nc.get('config') return (version, config) def test_parse(network_config): (version, config) = load_config(network_config) ns1 = NetworkState(version=version, config=config) ns1.parse_config() random.shuffle(config) ns2 = NetworkState(version=version, config=config) ns2.parse_config() print("----NS1-----") print(ns1.dump_network_state()) print() print("----NS2-----") print(ns2.dump_network_state()) print("NS1 == NS2 ?=> {}".format( ns1.network_state == ns2.network_state)) eni = net.render_interfaces(ns2.network_state) print(eni) udev_rules = net.render_persistent_net(ns2.network_state) print(udev_rules) def test_dump_and_load(network_config): print("Loading network_config into NetworkState") (version, config) = load_config(network_config) ns1 = NetworkState(version=version, config=config) ns1.parse_config() print("Dumping state to file") ns1_dump = ns1.dump() ns1_state = "/tmp/ns1.state" with open(ns1_state, "w+") as f: f.write(ns1_dump) print("Loading state from file") ns2 = from_state_file(ns1_state) print("NS1 == NS2 ?=> {}".format( ns1.network_state == ns2.network_state)) def test_output(network_config): (version, config) = load_config(network_config) ns1 = NetworkState(version=version, config=config) ns1.parse_config() random.shuffle(config) ns2 = NetworkState(version=version, config=config) ns2.parse_config() print("NS1 == NS2 ?=> {}".format( ns1.network_state == ns2.network_state)) eni_1 = net.render_interfaces(ns1.network_state) eni_2 = net.render_interfaces(ns2.network_state) print(eni_1) print(eni_2) print("eni_1 == eni_2 ?=> {}".format( eni_1 == eni_2)) y = curtin_config.load_config(sys.argv[1]) network_config = y.get('network') test_parse(network_config) test_dump_and_load(network_config) test_output(network_config) curtin-0.1.0~bzr399/curtin/reporter/__init__.py0000644000000000000000000000343012741214565017611 0ustar 00000000000000# Copyright (C) 2014 Canonical Ltd. # # Author: Newell Jensen # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . """Reporter Abstract Base Class.""" from .registry import DictRegistry from .handlers import available_handlers DEFAULT_CONFIG = { 'logging': {'type': 'log'}, } def update_configuration(config): """Update the instanciated_handler_registry. :param config: The dictionary containing changes to apply. If a key is given with a False-ish value, the registered handler matching that name will be unregistered. """ for handler_name, handler_config in config.items(): if not handler_config: instantiated_handler_registry.unregister_item( handler_name, force=True) continue handler_config = handler_config.copy() cls = available_handlers.registered_items[handler_config.pop('type')] instantiated_handler_registry.unregister_item(handler_name) instance = cls(**handler_config) instantiated_handler_registry.register_item(handler_name, instance) instantiated_handler_registry = DictRegistry() update_configuration(DEFAULT_CONFIG) curtin-0.1.0~bzr399/curtin/reporter/events.py0000644000000000000000000002230712741214565017362 0ustar 00000000000000# Copyright (C) 2015 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . """ cloud-init reporting framework The reporting framework is intended to allow all parts of cloud-init to report events in a structured manner. """ import base64 import os.path import time from . import instantiated_handler_registry FINISH_EVENT_TYPE = 'finish' START_EVENT_TYPE = 'start' RESULT_EVENT_TYPE = 'result' DEFAULT_EVENT_ORIGIN = 'curtin' class _nameset(set): def __getattr__(self, name): if name in self: return name raise AttributeError("%s not a valid value" % name) status = _nameset(("SUCCESS", "WARN", "FAIL")) class ReportingEvent(object): """Encapsulation of event formatting.""" def __init__(self, event_type, name, description, origin=DEFAULT_EVENT_ORIGIN, timestamp=None, level=None): self.event_type = event_type self.name = name self.description = description self.origin = origin if timestamp is None: timestamp = time.time() self.timestamp = timestamp if level is None: level = "INFO" self.level = level def as_string(self): """The event represented as a string.""" return '{0}: {1}: {2}'.format( self.event_type, self.name, self.description) def as_dict(self): """The event represented as a dictionary.""" return {'name': self.name, 'description': self.description, 'event_type': self.event_type, 'origin': self.origin, 'timestamp': self.timestamp, 'level': self.level} class FinishReportingEvent(ReportingEvent): def __init__(self, name, description, result=status.SUCCESS, post_files=None, level=None): super(FinishReportingEvent, self).__init__( FINISH_EVENT_TYPE, name, description, level=level) self.result = result if post_files is None: post_files = [] self.post_files = post_files if result not in status: raise ValueError("Invalid result: %s" % result) def as_string(self): return '{0}: {1}: {2}: {3}'.format( self.event_type, self.name, self.result, self.description) def as_dict(self): """The event represented as json friendly.""" data = super(FinishReportingEvent, self).as_dict() data['result'] = self.result if self.post_files: data['files'] = _collect_file_info(self.post_files) if self.result == status.WARN: data['level'] = "WARN" elif self.result == status.FAIL: data['level'] = "ERROR" return data def report_event(event): """Report an event to all registered event handlers. This should generally be called via one of the other functions in the reporting module. :param event_type: The type of the event; this should be a constant from the reporting module. """ for _, handler in instantiated_handler_registry.registered_items.items(): handler.publish_event(event) def report_finish_event(event_name, event_description, result=status.SUCCESS, post_files=None, level=None): """Report a "finish" event. See :py:func:`.report_event` for parameter details. """ if result == status.SUCCESS: event_description = "finished: " + event_description else: event_description = "failed: " + event_description event = FinishReportingEvent(event_name, event_description, result, post_files=post_files, level=level) return report_event(event) def report_start_event(event_name, event_description, level=None): """Report a "start" event. :param event_name: The name of the event; this should be a topic which events would share (e.g. it will be the same for start and finish events). :param event_description: A human-readable description of the event that has occurred. """ event_description = "started: " + event_description event = ReportingEvent(START_EVENT_TYPE, event_name, event_description, level=level) return report_event(event) class ReportEventStack(object): """Context Manager for using :py:func:`report_event` This enables calling :py:func:`report_start_event` and :py:func:`report_finish_event` through a context manager. :param name: the name of the event :param description: the event's description, passed on to :py:func:`report_start_event` :param message: the description to use for the finish event. defaults to :param:description. :param parent: :type parent: :py:class:ReportEventStack or None The parent of this event. The parent is populated with results of all its children. The name used in reporting is / :param reporting_enabled: Indicates if reporting events should be generated. If not provided, defaults to the parent's value, or True if no parent is provided. :param result_on_exception: The result value to set if an exception is caught. default value is FAIL. :param level: The priority level of the enter and exit messages sent. Default value is INFO. """ def __init__(self, name, description, message=None, parent=None, reporting_enabled=None, result_on_exception=status.FAIL, post_files=None, level="INFO"): self.parent = parent self.name = name self.description = description self.message = message self.result_on_exception = result_on_exception self.result = status.SUCCESS self.level = level if post_files is None: post_files = [] self.post_files = post_files # use parents reporting value if not provided if reporting_enabled is None: if parent: reporting_enabled = parent.reporting_enabled else: reporting_enabled = True self.reporting_enabled = reporting_enabled if parent: self.fullname = '/'.join((parent.fullname, name,)) else: self.fullname = self.name self.children = {} def __repr__(self): return ("ReportEventStack(%s, %s, reporting_enabled=%s)" % (self.name, self.description, self.reporting_enabled)) def __enter__(self): self.result = status.SUCCESS if self.reporting_enabled: report_start_event(self.fullname, self.description, level=self.level) if self.parent: self.parent.children[self.name] = (None, None) return self def _childrens_finish_info(self): for cand_result in (status.FAIL, status.WARN): for name, (value, msg) in self.children.items(): if value == cand_result: return (value, self.message) return (self.result, self.message) @property def result(self): return self._result @result.setter def result(self, value): if value not in status: raise ValueError("'%s' not a valid result" % value) self._result = value @property def message(self): if self._message is not None: return self._message return self.description @message.setter def message(self, value): self._message = value def _finish_info(self, exc): # return tuple of description, and value # explicitly handle sys.exit(0) as not an error if exc and not(isinstance(exc, SystemExit) and exc.code == 0): return (self.result_on_exception, self.message) return self._childrens_finish_info() def __exit__(self, exc_type, exc_value, traceback): (result, msg) = self._finish_info(exc_value) if self.parent: self.parent.children[self.name] = (result, msg) if self.reporting_enabled: report_finish_event(self.fullname, msg, result, post_files=self.post_files, level=self.level) def _collect_file_info(files): if not files: return None ret = [] for fname in files: if not os.path.isfile(fname): content = None else: with open(fname, "rb") as fp: content = base64.b64encode(fp.read()).decode() ret.append({'path': fname, 'content': content, 'encoding': 'base64'}) return ret # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/curtin/reporter/handlers.py0000644000000000000000000000523612741214565017660 0ustar 00000000000000# vi: ts=4 expandtab import abc from .registry import DictRegistry from .. import url_helper from .. import log as logging LOG = logging.getLogger(__name__) class ReportingHandler(object): """Base class for report handlers. Implement :meth:`~publish_event` for controlling what the handler does with an event. """ @abc.abstractmethod def publish_event(self, event): """Publish an event to the ``INFO`` log level.""" class LogHandler(ReportingHandler): """Publishes events to the curtin log at the ``DEBUG`` log level.""" def __init__(self, level="DEBUG"): super(LogHandler, self).__init__() if isinstance(level, int): pass else: input_level = level try: level = getattr(logging, level.upper()) except: LOG.warn("invalid level '%s', using WARN", input_level) level = logging.WARN self.level = level def publish_event(self, event): """Publish an event to the ``DEBUG`` log level.""" logger = logging.getLogger( '.'.join(['curtin', 'reporting', event.event_type, event.name])) logger.log(self.level, event.as_string()) class PrintHandler(ReportingHandler): """Print the event as a string.""" def publish_event(self, event): print(event.as_string()) class WebHookHandler(ReportingHandler): def __init__(self, endpoint, consumer_key=None, token_key=None, token_secret=None, consumer_secret=None, timeout=None, retries=None, level="DEBUG"): super(WebHookHandler, self).__init__() self.oauth_helper = url_helper.OauthUrlHelper( consumer_key=consumer_key, token_key=token_key, token_secret=token_secret, consumer_secret=consumer_secret) self.endpoint = endpoint self.timeout = timeout self.retries = retries try: self.level = getattr(logging, level.upper()) except: LOG.warn("invalid level '%s', using WARN", level) self.level = logging.WARN self.headers = {'Content-Type': 'application/json'} def publish_event(self, event): try: return self.oauth_helper.geturl( url=self.endpoint, data=event.as_dict(), headers=self.headers, retries=self.retries) except Exception as e: LOG.warn("failed posting event: %s [%s]" % (event.as_string(), e)) available_handlers = DictRegistry() available_handlers.register_item('log', LogHandler) available_handlers.register_item('print', PrintHandler) available_handlers.register_item('webhook', WebHookHandler) curtin-0.1.0~bzr399/curtin/reporter/legacy/0000755000000000000000000000000012741214565016744 5ustar 00000000000000curtin-0.1.0~bzr399/curtin/reporter/registry.py0000644000000000000000000000201212741214565017715 0ustar 00000000000000# Copyright 2015 Canonical Ltd. # This file is part of cloud-init. See LICENCE file for license information. # # vi: ts=4 expandtab import copy class DictRegistry(object): """A simple registry for a mapping of objects.""" def __init__(self): self.reset() def reset(self): self._items = {} def register_item(self, key, item): """Add item to the registry.""" if key in self._items: raise ValueError( 'Item already registered with key {0}'.format(key)) self._items[key] = item def unregister_item(self, key, force=True): """Remove item from the registry.""" if key in self._items: del self._items[key] elif not force: raise KeyError("%s: key not present to unregister" % key) @property def registered_items(self): """All the items that have been registered. This cannot be used to modify the contents of the registry. """ return copy.copy(self._items) curtin-0.1.0~bzr399/curtin/reporter/legacy/__init__.py0000644000000000000000000000253712741214565021064 0ustar 00000000000000from curtin.util import ( try_import_module, ) from abc import ( ABCMeta, abstractmethod, ) from curtin.log import LOG class BaseReporter: """Skeleton for a report.""" __metaclass__ = ABCMeta @abstractmethod def report_success(self): """Report installation success.""" @abstractmethod def report_failure(self, failure): """Report installation failure.""" class EmptyReporter(BaseReporter): def report_success(self): """Empty.""" def report_failure(self, failure): """Empty.""" class LoadReporterException(Exception): """Raise exception if desired reporter not loaded.""" pass def load_reporter(config): """Loads and returns reporter instance stored in config file.""" reporter = config.get('reporter') if reporter is None: LOG.info("'reporter' not found in config file.") return EmptyReporter() name, options = reporter.popitem() module = try_import_module('curtin.reporter.legacy.%s' % name) if module is None: LOG.error( "Module for %s reporter could not load." % name) return EmptyReporter() try: return module.load_factory(options) except LoadReporterException: LOG.error( "Failed loading %s reporter with %s" % (name, options)) return EmptyReporter() curtin-0.1.0~bzr399/curtin/reporter/legacy/maas.py0000644000000000000000000000776112741214565020252 0ustar 00000000000000from curtin import url_helper from . import (BaseReporter, LoadReporterException) import mimetypes import os.path import random import string import sys class MAASReporter(BaseReporter): def __init__(self, config): """Load config dictionary and initialize object.""" self.url = config['url'] self.urlhelper = url_helper.OauthUrlHelper( consumer_key=config.get('consumer_key'), token_key=config.get('token_key'), token_secret=config.get('token_secret'), consumer_secret='', skew_data_file="/run/oauth_skew.json") self.files = [] self.retries = config.get('retries', [1, 1, 2, 4, 8, 16, 32]) def report_success(self): """Report installation success.""" status = "OK" message = "Installation succeeded." self.report(status, message, files=self.files) def report_failure(self, message): """Report installation failure.""" status = "FAILED" self.report(status, message, files=self.files) def encode_multipart_data(self, data, files): """Create a MIME multipart payload from L{data} and L{files}. @param data: A mapping of names (ASCII strings) to data (byte string). @param files: A mapping of names (ASCII strings) to file objects ready to be read. @return: A 2-tuple of C{(body, headers)}, where C{body} is a a byte string and C{headers} is a dict of headers to add to the enclosing request in which this payload will travel. """ boundary = self._random_string(30) lines = [] for name in data: lines.extend(self._encode_field(name, data[name], boundary)) for name in files: lines.extend(self._encode_file(name, files[name], boundary)) lines.extend(('--%s--' % boundary, '')) body = '\r\n'.join(lines) headers = { 'content-type': 'multipart/form-data; boundary=' + boundary, 'content-length': "%d" % len(body), } return body, headers def report(self, status, message=None, files=None): """Send the report.""" params = {} params['status'] = status if message is not None: params['error'] = message if files is None: files = [] install_files = {} for fpath in files: install_files[os.path.basename(fpath)] = open(fpath, "r") data, headers = self.encode_multipart_data(params, install_files) msg = "" if not isinstance(data, bytes): data = data.encode() try: payload = self.urlhelper.geturl( self.url, data=data, headers=headers, retries=self.retries) if payload != b'OK': raise TypeError("Unexpected result from call: %s" % payload) else: msg = "Success" except url_helper.UrlError as exc: msg = str(exc) except Exception as exc: raise exc sys.stderr.write("%s\n" % msg) def _encode_field(self, field_name, data, boundary): return ( '--' + boundary, 'Content-Disposition: form-data; name="%s"' % field_name, '', str(data), ) def _encode_file(self, name, fileObj, boundary): return ( '--' + boundary, 'Content-Disposition: form-data; name="%s"; filename="%s"' % (name, name), 'Content-Type: %s' % self._get_content_type(name), '', fileObj.read(), ) def _random_string(self, length): return ''.join(random.choice(string.ascii_letters) for ii in range(length + 1)) def _get_content_type(self, filename): return mimetypes.guess_type(filename)[0] or 'application/octet-stream' def load_factory(options): try: return MAASReporter(options) except Exception: raise LoadReporterException curtin-0.1.0~bzr399/debian/changelog.trunk0000644000000000000000000000022412741214565016566 0ustar 00000000000000curtin (0.1.0~bzrREVNO-0ubuntu1) UNRELEASED; urgency=low * Initial release -- Scott Moser Mon, 29 Jul 2013 16:12:09 -0400 curtin-0.1.0~bzr399/debian/compat0000644000000000000000000000000212741214565014752 0ustar 000000000000007 curtin-0.1.0~bzr399/debian/control0000644000000000000000000000431712741214565015164 0ustar 00000000000000Source: curtin Section: admin Priority: extra Standards-Version: 3.9.6 Maintainer: Ubuntu Developers Build-Depends: debhelper (>= 7), dh-python, pep8, pyflakes, python-all, python-coverage, python-mock, python-nose, python-oauthlib, python-setuptools, python-yaml, python3, python3-coverage, python3-mock, python3-nose, python3-oauthlib, python3-pyflakes | pyflakes (<< 1.1.0-2), python3-setuptools, python3-yaml Homepage: http://launchpad.net/curtin X-Python3-Version: >= 3.2 Package: curtin Architecture: all Priority: extra Depends: bcache-tools, btrfs-tools, dosfstools, file, gdisk, lvm2, mdadm, parted, python3-curtin (= ${binary:Version}), udev, xfsprogs, ${misc:Depends} Description: Library and tools for the curtin installer This package provides the curtin installer. . Curtin is an installer that is blunt, brief, snappish, snippety and unceremonious. Package: curtin-common Architecture: all Priority: extra Depends: ${misc:Depends} Conflicts: curtin (<= 0.1.0~bzr54-0ubuntu1) Description: Library and tools for curtin installer This package contains utilities for the curtin installer. Package: python-curtin Section: python Architecture: all Priority: extra Depends: curtin-common (= ${binary:Version}), python-oauthlib, python-yaml, wget, ${misc:Depends}, ${python:Depends} Description: Library and tools for curtin installer This package provides python library for use by curtin. Package: python3-curtin Section: python Architecture: all Priority: extra Conflicts: curtin (<= 0.1.0~bzr54-0ubuntu1) Depends: curtin-common (= ${binary:Version}), python3-oauthlib, python3-yaml, wget, ${misc:Depends}, ${python3:Depends} Description: Library and tools for curtin installer This package provides python3 library for use by curtin. curtin-0.1.0~bzr399/debian/copyright0000644000000000000000000000114012741214565015503 0ustar 00000000000000Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: curtin Upstream-Contact: Scott Moser Source: https://launchpad.net/curtin Files: * Copyright: 2013, Canonical Ltd. License: AGPL-3 GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 . Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. . The complete text of the AGPL version 3 can be seen in http://www.gnu.org/licenses/agpl-3.0.html curtin-0.1.0~bzr399/debian/curtin-common.install0000644000000000000000000000003112741214565017730 0ustar 00000000000000usr/lib/curtin/helpers/* curtin-0.1.0~bzr399/debian/curtin.install0000644000000000000000000000001212741214565016441 0ustar 00000000000000usr/bin/* curtin-0.1.0~bzr399/debian/python-curtin.install0000644000000000000000000000004512741214565017766 0ustar 00000000000000usr/lib/python2*/*-packages/curtin/* curtin-0.1.0~bzr399/debian/python3-curtin.install0000644000000000000000000000004512741214565020051 0ustar 00000000000000usr/lib/python3*/*-packages/curtin/* curtin-0.1.0~bzr399/debian/rules0000755000000000000000000000064412741214565014640 0ustar 00000000000000#!/usr/bin/make -f PYVERS := $(shell pyversions -r) PY3VERS := $(shell py3versions -r) %: dh $@ --with=python2,python3 override_dh_auto_install: dh_auto_install set -ex; for python in $(PY3VERS) $(PYVERS); do \ $$python setup.py build --executable=/usr/bin/python && \ $$python setup.py install --root=$(CURDIR)/debian/tmp --install-layout=deb; \ done chmod 755 $(CURDIR)/debian/tmp/usr/lib/curtin/helpers/* curtin-0.1.0~bzr399/debian/source/0000755000000000000000000000000012741214565015054 5ustar 00000000000000curtin-0.1.0~bzr399/debian/source/format0000644000000000000000000000001412741214565016262 0ustar 000000000000003.0 (quilt) curtin-0.1.0~bzr399/doc/Makefile0000644000000000000000000001267412741214565014551 0ustar 00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/curtin.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/curtin.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/curtin" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/curtin" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." curtin-0.1.0~bzr399/doc/conf.py0000644000000000000000000001703212741214565014401 0ustar 00000000000000# -*- coding: utf-8 -*- # # curtin documentation build configuration file, created by # sphinx-quickstart on Thu May 30 16:03:34 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'curtin' copyright = u'2013, Scott Moser' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.3' # The full version, including alpha/beta/rc tags. release = '0.3' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'classic' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'curtindoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'curtin.tex', u'curtin Documentation', u'Scott Moser', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'curtin', u'curtin Documentation', [u'Scott Moser'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'curtin', u'curtin Documentation', u'Scott Moser', 'curtin', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' curtin-0.1.0~bzr399/doc/devel/0000755000000000000000000000000012741214565014176 5ustar 00000000000000curtin-0.1.0~bzr399/doc/index.rst0000644000000000000000000000116212741214565014740 0ustar 00000000000000.. curtin documentation master file, created by sphinx-quickstart on Thu May 30 16:03:34 2013. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Welcome to curtin's documentation! ================================== This is 'curtin', the curt installer. It is blunt, brief, snappish, snippety and unceremonious. Its goal is to install an operating system as quick as possible. Contents: .. toctree:: :maxdepth: 2 topics/overview topics/reporting Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` curtin-0.1.0~bzr399/doc/topics/0000755000000000000000000000000012741214565014400 5ustar 00000000000000curtin-0.1.0~bzr399/doc/devel/README-maas-image.txt0000644000000000000000000000274212741214565017700 0ustar 00000000000000A maas image is also convenient for curtin development. Using a maas image requires you to be able to be root to convert the root-image.gz file into a -root.tar.gz file that is installable by curtin. But other than that, the maas image is very convenient. follow doc/devel/README.txt, but instead of the 'get cloud image' section do the following. ## this gets root image url that you could find from traversing ## http://maas.ubuntu.com/images/ephemeral-v2/daily/xenial/amd64/ rel=xenial streams_url="http://maas.ubuntu.com/images/ephemeral-v2/daily/streams/v1/index.json" root_img_url=$(sstream-query \ --output-format="%(item_url)s" --max=1 \ ${streams_url} release=$rel ftype=root-image.gz arch=amd64 krel=$rel) root_img_gz="${root_img_url##*/}" root_img=${root_img_gz%.gz} ## download the root_img_url to local .gz file [ -f "$root_img" ] || { wget "${root_img_url}" -O "${root_img_gz}.tmp" && mv "${root_img_gz}.tmp" "$root_img_gz" ; } ## maas2roottar to ## a.) create the root_img file [root-image] ## b.) extract kernel and initramfs root-image-kernel, root-image-initramfs] ## c.) convert maas image to installable tarball [root-image.tar.gz] ## and pull the kernel and initramfs out [ -f "$root_img" ] || ./tools/maas2roottar -vv --krd "${root_img_gz}" ## launch it using --kernel, --initrd ./tools/launch \ ./root-image --kernel=./root-image-kernel --initrd=./root-image-initrd \ -v --publish ./root-image.tar.gz -- \ curtin -vv install "PUBURL/root-image.tar.gz" curtin-0.1.0~bzr399/doc/devel/README-vmtest.txt0000644000000000000000000001536312741214565017224 0ustar 00000000000000== Background == Curtin includes a mechanism called 'vmtest' that allows it to actually do installs and validate a number of configurations. The general flow of the vmtests is: 1. each test has an associated yaml config file for curtin in examples/tests 2. uses curtin-pack to create the user-data for cloud-init to trigger install 3. create and install a system using 'tools/launch'. 3.1 The install environment is booted from a maas ephemeral image. 3.2 kernel & initrd used are from maas images (not part of the image) 3.3 network by default is handled via user networking 3.4 It creates all empty disks required 3.5 cloud-init datasource is provided by launch a) like: ds=nocloud-net;seedfrom=http://10.7.0.41:41518/ provided by python webserver start_http b) via -drive file=/tmp/launch.8VOiOn/seed.img,if=virtio,media=cdrom as a seed disk (if booted without external kernel) 3.6 dependencies and other preparations are installed at the beginning by curtin inside the ephemeral image prior to configuring the target 4. power off the system. 5. configure a 'NoCloud' datasource seed image that provides scripts that will run on first boot. 5.1 this will contain all our code to gather health data on the install 5.2 by cloud-init design this runs only once per instance, if you start the system again this won't be called again 6. boot the installed system with 'tools/xkvm'. 6.1 reuses the disks that were installed/configured in the former steps 6.2 also adds an output disk 6.3 additionally the seed image for the data gathering is added 6.4 On this boot it will run the provided scripts, write their output to a "data" disk and then shut itself down. 7. extract the data from the output disk 8. vmtest python code now verifies if the output is as expected. == Debugging == At 3.1 - one can pull data out of the maas image with sudo mount-image-callback your.img -- sh -c 'COMMAND' e.g. sudo mount-image-callback your.img -- sh -c 'cp $MOUNTPOINT/boot/* .' At step 3.6 -> 4. - tools/launch can be called in a way to give you console access to do so just call tools/launch but drop the -serial=x parameter. One might want to change "'power_state': {'mode': 'poweroff'}" to avoid the auto reboot before getting control Replace the directory usually seen in the launch calls with a clean fresh directory - In /curtin curtin and its config can be found - if the system gets that far cloud-init will create a user ubuntu/passw0rd - otherwise one can use a cloud-image from https://cloud-images.ubuntu.com/ and add a backdoor user via bzr branch lp:~maas-maintainers/maas/backdoor-image backdoor-image sudo ./backdoor-image -v --user= --password-auth --password= IMG At step 6 -> 7 - You might want to keep all the temporary images around. To do so you can set CURTIN_VMTEST_KEEP_DATA_PASS=all: export CURTIN_VMTEST_KEEP_DATA_PASS=all CURTIN_VMTEST_KEEP_DATA_FAIL=all That will keep the /tmp/tmpXXXXX directories and all files in there for further execution. At step 7 - You might want to take a look at the output disk yourself. It is a normal qcow image, so one can use mount-image-callback as described above - to invoke xkvm on your own take the command you see in the output and remove the "-serial ..." but add -nographic instead For graphical console one can add --vnc 127.0.0.1:1 == Setup == In order to run vmtest you'll need some dependencies. To get them, you can run: make vmtest-deps That will install all necessary dependencies. == Running == Running tests is done most simply by: make vmtest If you wish to all tests in test_network.py, do so with: sudo PATH=$PWD/tools:$PATH nosetests3 tests/vmtests/test_network.py Or run a single test with: sudo PATH=$PWD/tools:$PATH nosetests3 tests/vmtests/test_network.py:WilyTestBasic Note: * currently, the tests have to run as root. The reason for this is that the kernel and initramfs to boot are extracted from the maas ephemeral image. This should be fixed at some point, and then 'make vmtest' The tests themselves don't actually have to run as root, but the test setup does. * the 'tools' directory must be in your path. * test will set apt_proxy in the guests to the value of 'apt_proxy' environment variable. If that is not set it will look at the host's apt config and read 'Acquire::HTTP::Proxy' == Environment Variables == Some environment variables affect the running of vmtest * apt_proxy: test will set apt_proxy in the guests to the value of 'apt_proxy'. If that is not set it will look at the host's apt config and read 'Acquire::HTTP::Proxy' * CURTIN_VMTEST_KEEP_DATA_PASS CURTIN_VMTEST_KEEP_DATA_FAIL: default: CURTIN_VMTEST_KEEP_DATA_PASS=none CURTIN_VMTEST_KEEP_DATA_FAIL=all These 2 variables determine what portions of the temporary test data are kept. The variables contain a comma ',' delimited list of directories that should be kept in the case of pass or fail. Additionally, the values 'all' and 'none' are accepted. Each vmtest that runs has its own sub-directory under the top level CURTIN_VMTEST_TOPDIR. In that directory are directories: boot: inputs to the system boot (after install) install: install phase related files disks: the disks used for installation and boot logs: install and boot logs collect: data collected by the boot phase * CURTIN_VMTEST_TOPDIR: default $TMPDIR/vmtest- vmtest puts all test data under this value. By default, it creates a directory in TMPDIR (/tmp) named with as "vmtest-" If you set this value, you must ensure that the directory is either non-existant or clean. * CURTIN_VMTEST_LOG: default $TMPDIR/vmtest-.log vmtest writes extended log information to this file. The default puts the log along side the TOPDIR. * CURTIN_VMTEST_IMAGE_SYNC: default false (boolean) if set to true, each run will attempt a sync of images. If you want to make sure images are always up to date, then set to true. * CURTIN_VMTEST_BRIDGE: default 'user' the network devices will be attached to this bridge. The default is 'user', which means to use qemu user mode networking. Set it to 'virbr0' or 'lxcbr0' to use those bridges and then be able to ssh in directly. * IMAGE_DIR: default /srv/images vmtest keeps a mirror of maas ephemeral images in this directory. * IMAGES_TO_KEEP: default 1 keep this number of images of each release in the IMAGE_DIR. Environment 'boolean' values: For boolean environment variables the value is considered True if it is any value other than case insensitive 'false', '' or "0" curtin-0.1.0~bzr399/doc/devel/README.txt0000644000000000000000000000417412741214565015702 0ustar 00000000000000## curtin development ## This document describes how to use kvm and ubuntu cloud images to develop curtin or test install configurations inside kvm. ## get some dependencies ## sudo apt-get -qy install kvm libvirt-bin cloud-utils bzr ## get cloud image to boot (-disk1.img) and one to install (-root.tar.gz) mkdir -p ~/download DLDIR=$( cd ~/download && pwd ) rel="trusty" arch=amd64 burl="http://cloud-images.ubuntu.com/$rel/current/" for f in $rel-server-cloudimg-${arch}-root.tar.gz $rel-server-cloudimg-${arch}-disk1.img; do wget "$burl/$f" -O $DLDIR/$f; done ( cd $DLDIR && qemu-img convert -O qcow $rel-server-cloudimg-${arch}-disk1.img $rel-server-cloudimg-${arch}-disk1.qcow2) BOOTIMG="$DLDIR/$rel-server-cloudimg-${arch}-disk1.qcow2" ROOTTGZ="$DLDIR/$rel-server-cloudimg-${arch}-root.tar.gz" ## get curtin mkdir -p ~/src bzr init-repo ~/src/curtin ( cd ~/src/curtin && bzr branch lp:curtin trunk.dist ) ( cd ~/src/curtin && bzr branch trunk.dist trunk ) ## work with curtin cd ~/src/curtin/trunk # use 'launch' to launch a kvm instance with user data to pack # up local curtin and run it inside instance. ./tools/launch $BOOTIMG --publish $ROOTTGZ -- curtin install "PUBURL/${ROOTTGZ##*/}" ## notes about 'launch' ## * launch has --help so you can see that for some info. * '--publish' adds a web server at ${HTTP_PORT:-9923} and puts the files you want available there. You can reference this url in config or cmdline with 'PUBURL'. For example '--publish foo.img' will put 'foo.img' at PUBURL/foo.img. * launch sets 'ubuntu' user password to 'passw0rd' * launch runs 'kvm -curses' kvm -curses keyboard info: 'alt-2' to go to qemu console * launch puts serial console to 'serial.log' (look there for stuff) * when logged in * you can look at /var/log/cloud-init-output.log * archive should be extracted in /curtin * shell archive should be in /var/lib/cloud/instance/scripts/part-002 * when logged in, and archive available at ## other notes ## * need to add '--install-deps' or something for curtin cloud-image in 12.04 has no 'python3' ideally 'curtin --install-deps install' would get the things it needs curtin-0.1.0~bzr399/doc/topics/overview.rst0000644000000000000000000001173312741214565017005 0ustar 00000000000000======== Overview ======== Curtin is intended to be a bare bones "installer". Its goal is to take data from a source, and get it onto disk as quick as possible and then boot it. The key difference from traditional package based installers is that curtin assumes the thing its installing is intelligent and will do the right thing. Stages ------ A usage of curtin will go through the following stages: - Install Environment boot - Early Commands - Partitioning - Network Discovery and Setup - Extraction of sources - Hook for installed OS to customize itself - Final Commands Install Environment boot ~~~~~~~~~~~~~~~~~~~~~~~~ At the moment, curtin doesn't address how the system that it is running on is booted. It could be booted from a live-cd or from a pxe boot environment. It could even be booted off a disk in the system (although installation to that disk would probably break things). Curtin's assumption is that a fairly rich linux (Ubuntu) environment is booted. Early Commands ~~~~~~~~~~~~~~ Early commands are executed on the system, and non-zero exit status will terminate the installation process. These commands are intended to be used for things like - module loading - hardware setup - environment setup for subsequent stages of curtin. **Config Example**:: early_commands: 05_load_loop: [modprobe, loop] 99_update: apt-get update && apt-get dist-upgrade Partitioning ~~~~~~~~~~~~ Partitioning covers setting up filesystems on the system. A series of commands are run serially in order. At the end, a fstab formated file must be populated in ``OUTPUT_FSTAB`` that contains mount information, and the filesystems are expected to be mounted at the ``TARGET_MOUNT_POINT``. Any commands can be used to create this filesystem, but curtin contains some tools to facilitate with this process. **Config Example**:: paritioning_commands: 10_wipe_filesystems: curtin wipe --quick --all-unused-disks 50_setup_raid: curtin disk-setup --all-disks raid0 / **Command environment** Partitioning commands have the following environment variables available to them: - ``WORKING_DIR``: This is simply for some sort of inter-command state. It will be the same directory for each command run and will only be deleted at the end of all partitioning_commands. - ``OUTPUT_FSTAB``: This is the target path for a fstab file. After all partitioning commands have been run, a file should exist, formated per fstab(5) that describes how the filesystems should be mounted. - ``TARGET_MOUNT_POINT``: Network Discovery and Setup ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Networking is done in a similar fashion to partitioning. A series of commands, specified in the config are run. At the end of these commands, a interfaces(5) style file is expected to be written to ``OUTPUT_INTERFACES``. Note, that as with fstab, this file is not copied verbatum to the target filesystem, but rather made availble to the OS customization stage. That stage may just copy the file verbatum, but may also parse it, and use that as input. **Config Example**:: network_commands: 10_netconf: curtin network copy-existing **Command environment** Networking commands have the following environment variables available to them: - ``WORKING_DIR``: This is simply for some sort of inter-command state. It will be the same directory for each command run and will only be deleted at the end of all network_commands. - ``OUTPUT_INTERFACES``: This is the target path for an interfaces style file. After all commands have been run, a file should exist, formated per interfaces(5) that describes the systems network setup. Extraction of sources ~~~~~~~~~~~~~~~~~~~~~ Sources are the things to install. Curtin prefers to install root filesystem tar files. **Config Example**:: sources: 05_primary: http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64-root.tar.gz Given the source above, curtin will essentiall do a:: wget $URL | tar -Sxvzf Hook for installed OS to customize itself ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ After extraction of sources, the source that was extracted is then given a chance to customize itself for the system. This customization may include: - ensuring that appropriate device drivers are loaded on first boot - consuming the network interfaces file and applying its declarations. - ensuring that necessary packages **Config Example**:: config_hook: {{TARGET_MP}}/opt/curtin/config-hook **Command environment** - ``INTERFACES``: This is a path to the file created during networking stage - ``FSTAB``: This is a path to the file created during partitioning stage - ``CONFIG``: This is a path to the curtin config file. It is provided so that additional configuration could be provided through to the OS customization. **Helpers** Curtin provides some helpers to make the OS customization easier. - `curtin in-target`: run the command while chrooted into the target. Final Commands ~~~~~~~~~~~~~~ **Config Example**:: final_commands: 05_callhome_finished: wget http://example.com/i-am-done curtin-0.1.0~bzr399/doc/topics/reporting.rst0000644000000000000000000001145712741214565017153 0ustar 00000000000000========= Reporting ========= Curtin is capable of reporting its progress via the reporting framework. This enables the user to obtain status information from curtin. Events ------ Reporting consists of notifcation of a series of 'events. Each event has: - **event_type**: 'start' or 'finish' - **description**: human readable text - **name**: and id for this event - **result**: only present when event_type is 'finish', its value is one of "SUCCESS", "WARN", or "FAIL". A result of WARN indicates something is likely wrong, but a non-fatal error. A result of "FAIL" is fatal. - **origin**: literal value 'curtin' - **timestamp**: the unix timestamp at which this event occurred names are unique and hierarchical. For example, a series of names might look like: - cmd-install (start) - cmd-install/stage-early (start) - cmd-install/stage-early (finish) - cmd-install (finish) You are guaranteed to always get a finish for each sub-item before finish of the parent item, and guaranteed to get finish for all events. A FAIL result of a sub-item will bubble up to its parent item. Configuration ------------- Reporting configuration is done through the ``reporting`` item in config. An example config:: reporting: keyname1: type: webhook endpoint: "http://127.0.1.1:8000/" keyname2: type: print install: log_file: /tmp/install.log post_files: [/tmp/install.log, /var/log/syslog] Each entry in the ``reporting`` dictionary must be a dictionary. The key is only used for reference and to aid in config merging. Each entry must have a 'type'. The currently supported values are: - **log**: logs via python logger - **print**: prints messages to stdout (for debugging) - **webhook**: posts json formated data to a remote url. Supports Oauth. Additionally, the webhook reporter will post files on finish of curtin. The user can declare which files should be posted in the ``install`` item via ``post_files`` as shown above. If post_files is not present, it will default to the value of log_file. Webhook Reporter ---------------- The webhook reporter posts the event in json format to an endpoint. To enable, provide curtin with config like:: reporting: mylistener: type: webhook endpoint: http://example.com/endpoint/path consumer_key: "ck_foo" consumer_secret: "cs_foo" token_key: "tk_foo" token_secret: "tk_secret" level: INFO The ``endpoint`` key is required. Oauth information (consumer_key, consumer_secret, token_key, token_secret) is not required, but if provided then oauth will be used to authenticate to the endpoint on each post. If level is specified then all messages with a lower priority than specified will be ignored. Default is INFO. Example Events ~~~~~~~~~~~~~~ The following is an example event that would be posted:: { "origin": "curtin", "timestamp": 1440688425.6038516, "event_type": "start", "name": "cmd-install", "description": "curtin command install", "level": "INFO" } The post files will look like this:: { "origin": "curtin", "files": [ { "content: "fCBzZmRpc2s....gLS1uby1yZX", "path": "/var/log/curtin/install.log", "encoding": "base64" }, { "content: "fCBzZmRpc2s....gLS1uby1yZX", "path": "/var/log/syslog", "encoding": "base64" } ], "description": "curtin command install", "timestamp": 1440688425.6038516, "name": "cmd-install", "result": "SUCCESS", "event_type": "finish" } Example Http Request ~~~~~~~~~~~~~~~~~~~~ The following is an example http request from curtin:: Accept-Encoding: identity Host: localhost:8000 Content-Type: application/json Connection: close User-Agent: Curtin/0.1 Content-Length: 156 { "origin": "curtin", "timestamp": 1440688425.6038516, "event_type": "start", "name": "cmd-install/stage-early", "description": "preparing for installation" } Development / Debug Reporting ----------------------------- For debugging and development a simple web server is provided in `tools/report-webhook-logger`. Run the web service like:: ./tools/report-webhook-logger 8000 And then run your install with appropriate config, like:: sudo ./bin/curtin -vvv install \ --set install/logfile=/tmp/foo \ --set reporting/mypost/type=webhook \ --set reporting/mypost/endpoint=http://localhost:8000/ file://$root_tgz Legacy Reporter --------------- The legacy 'reporter' config entry is still supported. This was utilized by MAAS for start/end and posting of the install log at the end of isntallation. Its configuration looks like this: **Legacy Reporter Config Example**:: reporter: url: http://example.com/your/path/to/post consumer_key: "ck_foo" consumer_secret: "cs_foo" token_key: "tk_foo" token_secret: "tk_secret" curtin-0.1.0~bzr399/examples/basic.yaml0000644000000000000000000000070612741214565016120 0ustar 00000000000000early_commands: 98_update: apt-get update 99_upgrade: DEBIAN_FRONTEND=noninteractive apt-get dist-upgrade --assume-yes partitioning_commands: 10_partition: curtin block-meta --device=/dev/vdc simple network_commands: 10_network: curtin net-meta --device=eth0 dhcp sources: 05_primary: uri: "http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64-root.tar.gz" type: "tgz" # vi: ts=4 expandtab syntax=yaml curtin-0.1.0~bzr399/examples/finalize.windows0000644000000000000000000000244112741214565017366 0ustar 00000000000000#!/usr/bin/env python import os import sys import tempfile from curtin.log import LOG from curtin import util def curthooks(): state = util.load_command_environment() target = state['target'] if target is None: sys.stderr.write("Unable to find target. " "Use --target or set TARGET_MOUNT_POINT\n") sys.exit(2) cfg = config.load_command_config({}, state) cloudbase_init = cfg.get('cloudbase_init', None) if not cloudbase_init: return False cloudbase_init_cfg = os.path.join( target, "Program Files (x86)", "Cloudbase Solutions", "Cloudbase-Init", "conf", "cloudbase-init.conf") cloudbase_init_unattended_cfg = os.path.join( target, "Program Files (x86)", "Cloudbase Solutions", "Cloudbase-Init", "conf", "cloudbase-init-unattend.conf") if os.path.isfile(cloudbase_init_cfg) is False: sys.stderr.write("Unable to find cloudbase-init.cfg.\n") sys.exit(2) fp = open(cloudbase_init_cfg, 'a') fp_u = open(cloudbase_init_unattended_cfg, 'a') for i in cloudbase_init['config'].splitlines(): fp.write("%s\r\n" % i) fp_u.write("%s\r\n" % i) fp.close() fp_u.close() curthooks() curtin-0.1.0~bzr399/examples/network-all.yaml0000644000000000000000000000630312741214565017275 0ustar 00000000000000network_commands: builtin: null 10_network: curtin net-meta custom # YAML example of a network config. network: version: 1 config: # Physical interfaces. - type: physical name: eth0 mac_address: "c0:d6:9f:2c:e8:80" - type: physical name: eth1 mac_address: "aa:d6:9f:2c:e8:80" - type: physical name: eth2 mac_address: "c0:bb:9f:2c:e8:80" - type: physical name: eth3 mac_address: "66:bb:9f:2c:e8:80" - type: physical name: eth4 mac_address: "98:bb:9f:2c:e8:80" # specify how ifupdown should treat iface # control is one of ['auto', 'hotplug', 'manual'] # with manual meaning ifup/ifdown should not affect the iface # useful for things like iscsi root + dhcp - type: physical name: eth5 mac_address: "98:bb:9f:2c:e8:8a" subnets: - type: dhcp control: manual # VLAN interface. - type: vlan name: eth0.101 vlan_link: eth0 vlan_id: 101 mtu: 1500 subnets: - type: static address: 192.168.0.2/24 gateway: 192.168.0.1 dns_nameservers: - 192.168.0.10 - 10.23.23.134 dns_search: - barley.maas - sacchromyces.maas - brettanomyces.maas - type: static address: 192.168.2.10/24 # Bond. - type: bond name: bond0 # if 'mac_address' is omitted, the MAC is taken from # the first slave. mac_address: "aa:bb:cc:dd:ee:ff" bond_interfaces: - eth1 - eth2 params: bond-mode: active-backup subnets: - type: dhcp6 # A Bond VLAN. - type: vlan name: bond0.200 vlan_link: bond0 vlan_id: 200 subnets: - type: dhcp4 # A bridge. - type: bridge name: br0 bridge_interfaces: - eth3 - eth4 ipv4_conf: rp_filter: 1 proxy_arp: 0 forwarding: 1 ipv6_conf: autoconf: 1 disable_ipv6: 1 use_tempaddr: 1 forwarding: 1 # basically anything in /proc/sys/net/ipv6/conf/.../ params: bridge_stp: 'off' bridge_fd: 0 bridge_maxwait: 0 subnets: - type: static address: 192.168.14.2/24 - type: static address: 2001:1::1/64 # default to /64 # A global nameserver. - type: nameserver address: 8.8.8.8 search: barley.maas # global nameservers and search in list form - type: nameserver address: - 4.4.4.4 - 8.8.4.4 search: - wark.maas - foobar.maas # A global route. - type: route destination: 10.0.0.0/8 gateway: 11.0.0.1 metric: 3 curtin-0.1.0~bzr399/examples/network-bond.yaml0000644000000000000000000000240012741214565017441 0ustar 00000000000000network_commands: builtin: null 10_network: curtin net-meta custom # YAML example of a network config. network: version: 1 config: # Physical interfaces. - type: physical name: eth0 mac_address: "c0:d6:9f:2c:e8:80" - type: physical name: eth1 mac_address: "aa:d6:9f:2c:e8:80" - type: physical name: eth2 mac_address: "c0:bb:9f:2c:e8:80" - type: physical name: eth3 mac_address: "66:bb:9f:2c:e8:80" - type: physical name: eth4 mac_address: "98:bb:9f:2c:e8:80" # Bond. - type: bond name: bond0 # if 'mac_address' is omitted, the MAC is taken from # the first slave. mac_address: "aa:bb:cc:dd:ee:ff" bond_interfaces: - eth1 - eth2 params: bond-mode: active-backup subnets: - type: dhcp6 # A Bond VLAN. - type: vlan name: bond0.200 vlan_link: bond0 vlan_id: 200 subnets: - type: static address: 192.168.0.2/24 gateway: 192.168.0.1 dns_nameservers: - 192.168.0.10 curtin-0.1.0~bzr399/examples/network-bridge.yaml0000644000000000000000000000140112741214565017753 0ustar 00000000000000network_commands: builtin: null 10_network: curtin net-meta custom # YAML example of a network config. network: version: 1 config: # Physical interfaces. - type: physical name: eth0 mac_address: "c0:d6:9f:2c:e8:80" - type: physical name: eth1 mac_address: "aa:d6:9f:2c:e8:80" # A bridge. - type: bridge name: br0 bridge_interfaces: - eth0 - eth1 params: bridge_stp: 'off' bridge_fd: 0 bridge_maxwait: 0 subnets: - type: static address: 192.168.14.2/24 - type: static address: 2001:1::1/64 # default to /64 curtin-0.1.0~bzr399/examples/network-iscsiroot.yaml0000644000000000000000000000211112741214565020534 0ustar 00000000000000network: version: 1 config: # iface used for iscsi root, set control to manual # to prevent networking restarts from up/down'ing the # interface - type: physical name: interface0 mac_address: "c0:d6:9f:2c:e8:80" subnets: - type: dhcp4 control: manual # control: auto this is the default if control isn't present - type: physical name: interface1 mtu: 1492 mac_address: "aa:d6:9f:2c:e8:80" subnets: - type: static address: 192.168.14.2/24 gateway: 192.168.14.1 - type: static address: 192.168.14.4/24 # this interface may be hotplugged # and we want to auto configure this # when present - type: physical name: interface2 mac_address: "cf:d6:af:48:e8:80" subnets: - type: static control: hotplug address: 10.11.12.13/22 gateway: 10.11.12.1 curtin-0.1.0~bzr399/examples/network-simple.yaml0000644000000000000000000000133012741214565020011 0ustar 00000000000000network_commands: builtin: null 10_network: - curtin - net-meta - custom # YAML example of a simple network config network: version: 1 config: # Physical interfaces. - type: physical name: eth0 mac_address: "c0:d6:9f:2c:e8:80" subnets: - type: dhcp4 - type: physical name: eth1 mtu: 1492 mac_address: "aa:d6:9f:2c:e8:80" subnets: - type: static address: 192.168.14.2/24 gateway: 192.168.14.1 - type: static address: 192.168.14.4/24 - type: physical name: eth2 mac_address: "cf:d6:af:48:e8:80" curtin-0.1.0~bzr399/examples/network-vlan.yaml0000644000000000000000000000111112741214565017455 0ustar 00000000000000network_commands: builtin: null 10_network: curtin net-meta custom # YAML example of a network config. network: version: 1 config: # Physical interfaces. - type: physical name: eth0 mac_address: "c0:d6:9f:2c:e8:80" # VLAN interface. - type: vlan name: eth0.101 vlan_link: eth0 vlan_id: 101 mtu: 1500 subnets: - type: static address: 192.168.0.2/24 gateway: 192.168.0.1 dns_nameservers: - 192.168.0.10 curtin-0.1.0~bzr399/examples/partitioning-demos/0000755000000000000000000000000012741214565017764 5ustar 00000000000000curtin-0.1.0~bzr399/examples/tests/0000755000000000000000000000000012741214565015312 5ustar 00000000000000curtin-0.1.0~bzr399/examples/partitioning-demos/custom-partitioning-demo-bcache.yaml0000644000000000000000000000206112741214565027013 0ustar 00000000000000partitioning_commands: builtin: curtin block-meta custom storage: version: 1 config: - id: sda type: disk ptable: msdos model: QEMU HARDDISK serial: QM00002 - id: sdb type: disk ptable: msdos model: QEMU HARDDISK serial: QM00003 - id: sda1 type: partition number: 1 size: 7GB device: sda flag: boot - id: sda2 type: partition number: 2 size: 2GB device: sda - id: sdb1 type: partition number: 1 size: 1GB device: sdb - id: bcache0 type: bcache backing_device: sda2 cache_device: sdb1 - id: sda1_root type: format fstype: ext4 volume: sda1 - id: bcache_home type: format fstype: ext4 volume: bcache0 - id: sda1_mount type: mount path: / device: sda1_root - id: home_mount type: mount path: /home device: bcache_home curtin-0.1.0~bzr399/examples/partitioning-demos/custom-partitioning-demo-dmcrypt.yaml0000644000000000000000000000162312741214565027273 0ustar 00000000000000partitioning_commands: builtin: curtin block-meta custom storage: version: 1 config: - id: sda type: disk ptable: msdos model: QEMU HARDDISK serial: QM00002 grub_device: True - id: sda1 type: partition number: 1 size: 512MB device: sda flag: boot - id: sda2 type: partition number: 2 size: 9GB device: sda - id: sda2_crypto type: dm_crypt volume: sda2 key: testkey dm_name: sda2_crypto - id: sda1_boot type: format fstype: ext4 volume: sda1 - id: sda2_root type: format fstype: ext4 volume: sda2_crypto - id: sda2_mount type: mount path: / device: sda2_root - id: sda1_mount type: mount path: /boot device: sda1_boot curtin-0.1.0~bzr399/examples/partitioning-demos/custom-partitioning-demo-gpt.yaml0000644000000000000000000000137412741214565026406 0ustar 00000000000000storage: version: 1 config: - id: sda type: disk ptable: gpt model: QEMU HARDDISK serial: QM00002 - id: bios_boot_partition type: partition size: 1MB device: sda flag: bios_grub - id: sda1 type: partition size: 8GB device: sda - id: sda2 type: partition size: 1GB device: sda - id: sda1_root type: format fstype: ext4 volume: sda1 - id: sda2_home type: format fstype: ext4 volume: sda2 - id: sda1_mount type: mount path: / device: sda1_root - id: sda2_mount type: mount path: /home device: sda2_home curtin-0.1.0~bzr399/examples/partitioning-demos/custom-partitioning-demo-lvm.yaml0000644000000000000000000000265412741214565026414 0ustar 00000000000000partitioning_commands: builtin: curtin block-meta custom storage: version: 1 config: - id: sda type: disk ptable: msdos model: QEMU HARDDISK serial: QM00002 - id: sdb type: disk ptable: msdos mode: QEMU HARDDISK serial: QM00003 - id: sda1 type: partition number: 1 size: 8GB device: sda flag: boot - id: sda2 type: partition number: 2 size: 1GB device: sda - id: storage_volgroup type: lvm_volgroup name: volgroup1 devices: - sda2 - sdb - id: storage_1 type: lvm_partition volgroup: storage_volgroup name: lv1 size: 2G - id: storage_2 type: lvm_partition name: lv2 volgroup: storage_volgroup - id: sda1_root type: format fstype: ext4 volume: sda1 - id: storage_1_fs type: format fstype: ext4 volume: storage_1 - id: storage_2_fs type: format fstype: fat32 volume: storage_2 - id: sda1_mount type: mount path: / device: sda1_root - id: storage_1_mount type: mount path: /media/storage1 device: storage_1_fs - id: storage_2_mount type: mount path: /media/storage2 device: storage_2_fs curtin-0.1.0~bzr399/examples/partitioning-demos/custom-partitioning-demo-raid.yaml0000644000000000000000000000276212741214565026535 0ustar 00000000000000partitioning_commands: builtin: curtin block-meta custom storage: version: 1 config: - id: sda type: disk ptable: msdos model: QEMU HARDDISK serial: QM00002 - id: sdb type: disk ptable: msdos model: QEMU HARDDISK serial: QM00003 - id: sdc type: disk ptable: msdos model: QEMU HARDDISK serial: QM00004 - id: sdd type: disk ptable: msdos model: QEMU HARDDISK serial: QM00005 - id: sda1 type: partition number: 1 size: 8GB device: sda flag: boot - id: sdb1 type: partition number: 1 size: 1GB device: sdb flag: raid - id: sdc1 type: partition number: 1 size: 1GB device: sdc flag: raid - id: sdd1 type: partition number: 1 size: 1GB device: sdc flag: raid - id: md0 type: raid name: md0 raidlevel: 1 devices: - sdb1 - sdc1 spare_devices: - sdd1 - id: sda1_root type: format fstype: ext4 volume: sda1 - id: raid_storage type: format fstype: ext4 volume: md0 - id: sda1_mount type: mount path: / device: sda1_root - id: raid_mount type: mount path: /media/storage device: raid_storage curtin-0.1.0~bzr399/examples/partitioning-demos/custom-partitioning-demo.yaml0000644000000000000000000000137212741214565025614 0ustar 00000000000000partitioning_commands: builtin: curtin block-meta custom storage: version: 1 config: - id: sda type: disk ptable: msdos model: QEMU HARDDISK serial: QM00002 - id: sda1 type: partition number: 1 size: 8GB device: sda flag: boot - id: sda2 type: partition number: 2 size: 1GB device: sda - id: sda1_root type: format fstype: ext4 volume: sda1 - id: sda2_home type: format fstype: ext4 volume: sda2 - id: sda1_mount type: mount path: / device: sda1_root - id: sda2_mount type: mount path: /home device: sda2_home curtin-0.1.0~bzr399/examples/tests/allindata.yaml0000644000000000000000000001025212741214565020127 0ustar 00000000000000showtrace: true storage: version: 1 config: - id: sda type: disk ptable: gpt model: QEMU HARDDISK path: /dev/vdb name: main_disk grub_device: 1 - id: bios_boot_partition type: partition size: 1MB device: sda flag: bios_grub number: 1 - id: sda1 type: partition size: 1GB device: sda number: 2 # XXX: we really need to stop using id with DiskPartnum - id: sda2 type: partition size: 1GB device: sda number: 3 # XXX: we really need to stop using id with DiskPartnum - id: sda3 type: partition size: 1GB device: sda number: 4 # XXX: we really need to stop using id with DiskPartnum - id: sda4 type: partition size: 1GB device: sda number: 5 # XXX: we really need to stop using id with DiskPartnum - id: sda5 type: partition size: 3GB device: sda number: 6 # XXX: we really need to stop using id with DiskPartnum - id: sdb type: disk ptable: gpt model: QEMU HARDDISK path: /dev/vdc name: second_disk - id: sdb1 type: partition size: 1GB device: sdb - id: sdb2 type: partition size: 1GB device: sdb - id: sdb3 type: partition size: 1GB device: sdb - id: sdb4 type: partition size: 1GB device: sdb - id: sdc type: disk ptable: gpt model: QEMU HARDDISK path: /dev/vdd name: third_disk - id: sdc1 type: partition size: 1GB device: sdc - id: sdc2 type: partition size: 1GB device: sdc - id: sdc3 type: partition size: 1GB device: sdc - id: sdc4 type: partition size: 1GB device: sdc - id: sdd type: disk ptable: gpt model: QEMU HARDDISK path: /dev/vde name: fourth_disk - id: sdd1 type: partition size: 1GB device: sdd - id: sdd2 type: partition size: 1GB device: sdd - id: sdd3 type: partition size: 1GB device: sdd - id: sdd4 type: partition size: 1GB device: sdd - id: mddevice0 name: md0 type: raid raidlevel: 5 devices: - sda1 - sdb1 - sdc1 spare_devices: - sdd1 - id: mddevice1 name: md1 type: raid raidlevel: raid6 devices: - sda2 - sdb2 - sdc2 - sdd2 spare_devices: - sda3 - id: mddevice2 name: md2 type: raid raidlevel: 1 devices: - sda4 - sdb3 spare_devices: - sdc3 - sdb4 - id: mddevice3 name: md3 type: raid raidlevel: raid0 devices: - sdc4 - sdd3 - id: volgroup1 name: vg1 type: lvm_volgroup devices: - mddevice0 - mddevice1 - mddevice2 - mddevice3 - id: lvmpart1 name: lv1 size: 1G type: lvm_partition volgroup: volgroup1 - id: lvmpart2 name: lv2 size: 1G type: lvm_partition volgroup: volgroup1 - id: lvmpart3 name: lv3 type: lvm_partition volgroup: volgroup1 - id: dmcrypt0 type: dm_crypt volume: lvmpart3 key: testkey dm_name: dmcrypt0 - id: lv1_fs name: storage type: format fstype: ext3 volume: lvmpart1 - id: lv2_fs name: storage type: format fstype: ext4 volume: lvmpart2 - id: dmcrypt_fs name: storage type: format fstype: xfs volume: dmcrypt0 - id: sda5_root type: format fstype: ext4 volume: sda5 - id: sda5_mount type: mount path: / device: sda5_root - id: lv1_mount type: mount path: /srv/data device: lv1_fs - id: lv2_mount type: mount path: /srv/backup device: lv2_fs curtin-0.1.0~bzr399/examples/tests/basic.yaml0000644000000000000000000000307112741214565017260 0ustar 00000000000000showtrace: true storage: version: 1 config: - id: sda type: disk ptable: msdos model: QEMU HARDDISK path: /dev/vdb name: main_disk wipe: superblock grub_device: true - id: sda1 type: partition number: 1 size: 3GB device: sda flag: boot - id: sda2 type: partition number: 2 size: 1GB device: sda - id: sda1_root type: format fstype: ext4 volume: sda1 - id: sda2_home type: format fstype: ext4 volume: sda2 - id: sda1_mount type: mount path: / device: sda1_root - id: sda2_mount type: mount path: /home device: sda2_home - id: sparedisk_id type: disk path: /dev/vdc name: sparedisk wipe: superblock - id: btrfs_disk_id type: disk path: /dev/vdd name: btrfs_volume wipe: superblock - id: btrfs_disk_fmt_id type: format fstype: btrfs volume: btrfs_disk_id - id: btrfs_disk_mnt_id type: mount path: /btrfs device: btrfs_disk_fmt_id - id: pnum_disk type: disk path: /dev/vde name: pnum_disk wipe: superblock ptable: gpt - id: pnum_disk_p1 type: partition number: 1 size: 1GB device: pnum_disk - id: pnum_disk_p2 type: partition number: 10 size: 1GB device: pnum_disk curtin-0.1.0~bzr399/examples/tests/basic_network.yaml0000644000000000000000000000162312741214565021032 0ustar 00000000000000showtrace: true network: version: 1 config: # Physical interfaces. - type: physical name: interface0 mac_address: "52:54:00:12:34:00" subnets: - type: dhcp4 - type: physical name: interface1 mtu: 1492 mac_address: "52:54:00:12:34:02" subnets: - type: static address: 10.0.2.100/24 - type: static address: 10.0.3.100/24 - type: static address: 10.0.4.100/24 - type: static address: 10.0.5.100/24 - type: static address: 10.0.2.200/24 dns_nameservers: - 8.8.8.8 dns_search: - barley.maas - type: physical name: interface2 mac_address: "52:54:00:12:34:04" curtin-0.1.0~bzr399/examples/tests/basic_network_static.yaml0000644000000000000000000000066312741214565022404 0ustar 00000000000000showtrace: true network: version: 1 config: # Physical interfaces. - type: physical name: interface0 mac_address: "52:54:00:12:34:00" subnets: - type: static address: 10.0.2.15/24 gateway: 10.0.2.2 - type: nameserver address: - 10.0.2.3 search: - wark.maas - foobar.maas curtin-0.1.0~bzr399/examples/tests/basic_scsi.yaml0000644000000000000000000000311012741214565020273 0ustar 00000000000000showtrace: true storage: version: 1 config: - id: sda type: disk ptable: msdos wwn: '0x39cc071e72c64cc4' name: main_disk wipe: superblock grub_device: true - id: sda1 type: partition number: 1 size: 3GB device: sda flag: boot - id: sda2 type: partition number: 2 size: 1GB device: sda - id: sda1_root type: format fstype: ext4 volume: sda1 - id: sda2_home type: format fstype: ext4 volume: sda2 - id: sda1_mount type: mount path: / device: sda1_root - id: sda2_mount type: mount path: /home device: sda2_home - id: sparedisk_id type: disk wwn: '0x080258d13ea95ae5' name: sparedisk wipe: superblock - id: btrfs_disk_id type: disk wwn: '0x22dc58dc023c7008' name: btrfs_volume wipe: superblock - id: btrfs_disk_fmt_id type: format fstype: btrfs volume: btrfs_disk_id - id: btrfs_disk_mnt_id type: mount path: /btrfs device: btrfs_disk_fmt_id - id: pnum_disk type: disk wwn: '0x550a270c3a5811c5' name: pnum_disk wipe: superblock ptable: gpt - id: pnum_disk_p1 type: partition number: 1 size: 1GB device: pnum_disk - id: pnum_disk_p2 type: partition number: 10 size: 1GB device: pnum_disk curtin-0.1.0~bzr399/examples/tests/bcache_basic.yaml0000644000000000000000000000201212741214565020537 0ustar 00000000000000showtrace: true storage: config: - id: id_rotary0 type: disk name: rotary0 path: /dev/vdb ptable: msdos wipe: superblock grub_device: true - id: id_ssd0 type: disk name: ssd0 path: /dev/vdc wipe: superblock - id: id_rotary0_part1 type: partition name: rotary0-part1 device: id_rotary0 number: 1 offset: 1M size: 999M wipe: superblock - id: id_rotary0_part2 type: partition name: rotary0-part2 device: id_rotary0 number: 2 size: 9G wipe: superblock - id: id_bcache0 type: bcache name: bcache0 backing_device: id_rotary0_part2 cache_device: id_ssd0 cache_mode: writeback - id: bootfs type: format label: boot-fs volume: id_rotary0_part1 fstype: ext4 - id: rootfs type: format label: root-fs volume: id_bcache0 fstype: ext4 - id: rootfs_mount type: mount path: / device: rootfs - id: bootfs_mount type: mount path: /boot device: bootfs version: 1 curtin-0.1.0~bzr399/examples/tests/bonding_network.yaml0000644000000000000000000000214012741214565021364 0ustar 00000000000000showtrace: true network: version: 1 config: # Physical interfaces. - type: physical name: interface0 mac_address: "52:54:00:12:34:00" subnets: - type: dhcp4 - type: physical name: interface1 mac_address: "52:54:00:12:34:02" - type: physical name: interface2 mac_address: "52:54:00:12:34:04" # Bond. - type: bond name: bond0 mac_address: "52:54:00:12:34:06" bond_interfaces: - interface1 - interface2 params: bond-mode: active-backup subnets: - type: static address: 10.23.23.2/24 - type: static address: 10.23.24.2/24 curthooks_commands: # use curtin to disable open-iscsi ifupdown hooks for precise; they're # racy with the iface renaming iscsi_cleanup: ['curtin', 'in-target', '--', 'sh', '-c', 'if [ "`lsb_release -sc`" = "precise" ]; then rm -f /etc/network/if-down.d/open-iscsi /etc/network/if-up.d/open-iscsi; fi'] curtin-0.1.0~bzr399/examples/tests/lvm.yaml0000644000000000000000000000272012741214565016775 0ustar 00000000000000showtrace: true storage: version: 1 config: - id: sda type: disk ptable: msdos model: QEMU HARDDISK path: /dev/vdb name: main_disk - id: sda1 type: partition size: 3GB device: sda flag: boot - id: sda_extended type: partition size: 5G flag: extended device: sda - id: sda2 type: partition size: 2G flag: logical device: sda - id: sda3 type: partition size: 3G flag: logical device: sda - id: volgroup1 name: vg1 type: lvm_volgroup devices: - sda2 - sda3 - id: lvmpart1 name: lv1 size: 1G type: lvm_partition volgroup: volgroup1 - id: lvmpart2 name: lv2 type: lvm_partition volgroup: volgroup1 - id: sda1_root type: format fstype: ext4 volume: sda1 - id: lv1_fs name: storage type: format fstype: fat32 volume: lvmpart1 - id: lv2_fs name: storage type: format fstype: ext3 volume: lvmpart2 - id: sda1_mount type: mount path: / device: sda1_root - id: lv1_mount type: mount path: /srv/data device: lv1_fs - id: lv2_mount type: mount path: /srv/backup device: lv2_fs curtin-0.1.0~bzr399/examples/tests/mdadm_bcache.yaml0000644000000000000000000000666312741214565020560 0ustar 00000000000000showtrace: true storage: version: 1 config: - grub_device: true id: sda type: disk ptable: gpt model: QEMU HARDDISK path: /dev/vdb name: main_disk - id: bios_boot_partition type: partition size: 1MB device: sda flag: bios_grub number: 1 - id: sda1 type: partition size: 3GB device: sda number: 2 # XXX: we really need to stop using id with DiskPartnum uuid: deadbeef-dead-beef-dead-deadbeefaaa1 - id: sda2 type: partition size: 1GB device: sda number: 3 # XXX: we really need to stop using id with DiskPartnum uuid: deadbeef-dead-beef-dead-deadbeefaaa2 - id: sda3 type: partition size: 1GB device: sda number: 4 # XXX: we really need to stop using id with DiskPartnum uuid: deadbeef-dead-beef-dead-deadbeefaaa3 - id: sda4 type: partition size: 1GB device: sda number: 5 # XXX: we really need to stop using id with DiskPartnum uuid: deadbeef-dead-beef-dead-deadbeefaaa4 - id: sda5 type: partition size: 1GB device: sda number: 6 # XXX: we really need to stop using id with DiskPartnum uuid: deadbeef-dead-beef-dead-deadbeefaaa5 - id: sda6 type: partition size: 1GB device: sda number: 7 # XXX: we really need to stop using id with DiskPartnum uuid: deadbeef-dead-beef-dead-deadbeefaaa6 - id: sdb type: disk model: QEMU HARDDISK path: /dev/vdc name: second_disk - id: sdc type: disk ptable: gpt model: QEMU HARDDISK path: /dev/vdd name: third_disk - id: sdc1 type: partition size: 3GB device: sdc uuid: deadbeef-dead-beef-dead-deadbeefaac1 - id: mddevice name: md0 type: raid raidlevel: 1 devices: - sda2 - sda3 spare_devices: - sda4 - id: bcache1_raid type: bcache name: cached_array backing_device: mddevice cache_device: sda5 cache_mode: writeback - id: bcache_normal type: bcache name: cached_array_2 backing_device: sda6 cache_device: sda5 cache_mode: writethrough - id: bcachefoo type: bcache name: cached_array_3 backing_device: sdc1 cache_device: sdb cache_mode: writearound - id: sda1_fs type: format fstype: ext4 volume: sda1 uuid: deadbeef-dead-beef-dead-deadbeeffff1 - id: bcache_raid_storage type: format fstype: ext4 volume: bcache1_raid uuid: deadbeef-dead-beef-dead-deadbeefcac1 - id: bcache_normal_storage type: format fstype: ext4 volume: bcache_normal uuid: deadbeef-dead-beef-dead-deadbeefcac2 - id: bcachefoo_fulldiskascache_storage type: format fstype: ext4 volume: bcachefoo - id: sda1_mount type: mount path: / device: sda1_fs - id: bcache1_raid_mount type: mount path: /media/data device: bcache_raid_storage - id: bcache0_mount type: mount path: /media/bcache_normal device: bcache_normal_storage - id: sda1_non_root_mount type: mount path: /media/bcachefoo_fulldiskascache_storage device: bcachefoo_fulldiskascache_storage curtin-0.1.0~bzr399/examples/tests/mdadm_bcache_complex.yaml0000644000000000000000000000531212741214565022275 0ustar 00000000000000storage: version: 1 config: - grub_device: true id: sda type: disk ptable: gpt model: QEMU HARDDISK path: /dev/vdb name: main_disk - id: bios_boot_partition type: partition size: 1MB device: sda flag: bios_grub - id: sda1 type: partition size: 2GB device: sda - id: sda2 type: partition size: 1GB device: sda - id: sda3 type: partition size: 1GB device: sda - id: sda4 type: partition size: 1GB device: sda - id: sda5 type: partition size: 1GB device: sda - id: sda6 type: partition size: 1GB device: sda - id: sda7 type: partition size: 1GB device: sda - id: sdb type: disk model: QEMU HARDDISK path: /dev/vdc name: second_disk - id: sdc type: disk ptable: gpt model: QEMU HARDDISK path: /dev/vdd name: third_disk - id: sdc1 type: partition size: 3GB device: sdc - id: mddevice name: md0 type: raid raidlevel: 1 devices: - sda2 - sda3 spare_devices: - sda4 - id: bcache1_raid type: bcache name: cached_array backing_device: mddevice cache_device: sda5 cache_mode: writeback - id: bcache_normal type: bcache name: cached_array_2 backing_device: sda6 cache_device: sda5 cache_mode: writethrough - id: bcachefoo type: bcache name: cached_array_3 backing_device: sdc1 cache_device: sdb cache_mode: writearound - id: sda1_extradisk type: format fstype: ext4 volume: sda1 - id: sda7_boot type: format fstype: ext4 volume: sda7 - id: bcache_raid_storage type: format fstype: ext4 volume: bcache1_raid - id: bcache_normal_storage type: format fstype: ext4 volume: bcache_normal - id: bcachefoo_fulldiskascache_storage type: format fstype: ext4 volume: bcachefoo - id: bcache_root type: mount path: / device: bcachefoo_fulldiskascache_storage - id: bcache1_raid_mount type: mount path: /media/data device: bcache_raid_storage - id: bcache0_mount type: mount path: /media/bcache_normal device: bcache_normal_storage - id: sda1_non_root_mount type: mount path: /media/sda1 device: sda1_extradisk - id: sda7_boot_mount type: mount path: /boot device: sda7_boot curtin-0.1.0~bzr399/examples/tests/mirrorboot.yaml0000644000000000000000000000154412741214565020400 0ustar 00000000000000showtrace: true storage: version: 1 config: - id: sda type: disk ptable: gpt model: QEMU HARDDISK path: /dev/vdb name: main_disk grub_device: 1 - id: bios_boot_partition type: partition size: 1MB device: sda flag: bios_grub - id: sda1 type: partition size: 3GB device: sda - id: sdb type: disk ptable: gpt model: QEMU HARDDISK path: /dev/vdc name: second_disk - id: sdb1 type: partition size: 3GB device: sdb - id: mddevice name: md0 type: raid raidlevel: 1 devices: - sda1 - sdb1 - id: md_root type: format fstype: ext4 volume: mddevice - id: md_mount type: mount path: / device: md_root curtin-0.1.0~bzr399/examples/tests/multipath.yaml0000644000000000000000000000140612741214565020206 0ustar 00000000000000showtrace: true storage: version: 1 config: - id: sda type: disk ptable: msdos serial: 'IPR-0 1234567890' name: mpath_a wipe: superblock grub_device: true - id: sda1 type: partition number: 1 size: 3GB device: sda flag: boot - id: sda2 type: partition number: 2 size: 1GB device: sda - id: sda1_root type: format fstype: ext4 volume: sda1 - id: sda2_home type: format fstype: ext4 volume: sda2 - id: sda1_mount type: mount path: / device: sda1_root - id: sda2_mount type: mount path: /home device: sda2_home curtin-0.1.0~bzr399/examples/tests/network_source.yaml0000644000000000000000000000213612741214565021251 0ustar 00000000000000network: version: 1 config: # Physical interfaces. - type: physical name: interface0 mac_address: "52:54:00:12:34:00" subnets: - type: dhcp4 - type: physical name: interface1 mtu: 1492 mac_address: "52:54:00:12:34:02" subnets: - type: static address: 10.0.2.100/24 - type: static address: 10.0.2.200/24 dns_nameservers: - 8.8.8.8 dns_search: - barley.maas - type: physical name: interface2 mac_address: "52:54:00:12:34:04" curthooks_commands: # use curtin to inject a eni config file outside of the network yaml # this allows us to test user installed configurations outside of # curtin's control aa_cleanup: ['curtin', 'in-target', '--', 'sh', '-c', "rm -f /etc/network/interfaces.d/eth0.cfg; /bin/echo -e 'auto interface2\niface interface2 inet static\n address 192.168.23.23/24\n' > /etc/network/interfaces.d/interface2.cfg"] curtin-0.1.0~bzr399/examples/tests/nvme.yaml0000644000000000000000000000316612741214565017151 0ustar 00000000000000showtrace: true storage: version: 1 config: - id: main_disk type: disk ptable: gpt path: /dev/vdb name: main_disk wipe: superblock grub_device: true - id: main_disk_p1 type: partition number: 1 size: 3GB device: main_disk flag: boot - id: main_disk_p2 type: partition number: 2 size: 1GB device: main_disk - id: bios_boot type: partition size: 1MB number: 15 device: main_disk flag: bios_grub - id: main_disk_root type: format fstype: ext4 volume: main_disk_p1 - id: main_disk_home type: format fstype: ext4 volume: main_disk_p2 - id: root_mount type: mount path: / device: main_disk_root - id: home_mount type: mount path: /home device: main_disk_home - id: nvme_disk type: disk path: /dev/nvme0n1 name: nvme_disk wipe: superblock ptable: gpt - id: nvme_disk_p1 type: partition size: 1GB device: nvme_disk - id: nvme_disk_p2 type: partition number: 2 size: 1GB device: nvme_disk - id: nvme_disk_p3 type: partition size: 1GB device: nvme_disk - id: nvme_disk2 type: disk path: /dev/nvme1n1 wipe: superblock ptable: msdos name: second_nvme - id: nvme_disk2_p1 type: partition size: 1GB number: 1 device: nvme_disk2 curtin-0.1.0~bzr399/examples/tests/raid10boot.yaml0000644000000000000000000000243212741214565020143 0ustar 00000000000000showtrace: true storage: version: 1 config: - id: sda type: disk ptable: gpt model: QEMU HARDDISK path: /dev/vdb name: main_disk grub_device: 1 - id: bios_boot_partition type: partition size: 1MB device: sda flag: bios_grub - id: sda1 type: partition size: 3GB device: sda - id: sdb type: disk ptable: gpt model: QEMU HARDDISK path: /dev/vdc name: second_disk - id: sdb1 type: partition size: 3GB device: sdb - id: sdc type: disk ptable: gpt model: QEMU HARDDISK path: /dev/vdd name: third_disk - id: sdc1 type: partition size: 3GB device: sdc - id: sdd type: disk ptable: gpt model: QEMU HARDDISK path: /dev/vde name: fourth_disk - id: sdd1 type: partition size: 3GB device: sdd - id: mddevice0 name: md0 type: raid raidlevel: 10 devices: - sda1 - sdb1 - sdc1 - sdd1 - id: md_root type: format fstype: ext4 volume: mddevice0 - id: md_mount type: mount path: / device: md_root curtin-0.1.0~bzr399/examples/tests/raid5bcache.yaml0000644000000000000000000000355012741214565020333 0ustar 00000000000000showtrace: true storage: config: - grub_device: true id: sda model: QEMU HARDDISK name: sda ptable: msdos path: /dev/vdb type: disk wipe: superblock - id: sdb model: QEMU HARDDISK name: sdb path: /dev/vdc type: disk wipe: superblock - id: sdc model: QEMU HARDDISK name: sdc path: /dev/vdd type: disk wipe: superblock - id: sdd model: QEMU HARDDISK name: sdd path: /dev/vde type: disk wipe: superblock - id: sde model: QEMU HARDDISK name: sde path: /dev/vdf type: disk wipe: superblock - devices: - sdc - sdd - sde id: md0 name: md0 raidlevel: 5 spare_devices: [] type: raid - device: sda id: sda-part1 name: sda-part1 number: 1 offset: 2097152B size: 1000001536B type: partition uuid: 3a38820c-d675-4069-b060-509a3d9d13cc wipe: superblock - device: sda id: sda-part2 name: sda-part2 number: 2 size: 7586787328B type: partition uuid: 17747faa-4b9e-4411-97e5-12fd3d199fb8 wipe: superblock - backing_device: sda-part2 cache_device: sdb cache_mode: writeback id: bcache0 name: bcache0 type: bcache - fstype: ext4 id: sda-part1_format label: '' type: format uuid: 71b1ef6f-5cab-4a77-b4c8-5a209ec11d7c volume: sda-part1 - fstype: ext4 id: md0_format label: '' type: format uuid: b031f0a0-adb3-43be-bb43-ce0fc8a224a4 volume: md0 - fstype: ext4 id: bcache0_format label: '' type: format uuid: ce45bbaf-5a44-4487-b89e-035c2dd40657 volume: bcache0 - device: bcache0_format id: bcache0_mount path: / type: mount - device: sda-part1_format id: sda-part1_mount path: /boot type: mount - device: md0_format id: md0_mount path: /srv/data type: mount version: 1 curtin-0.1.0~bzr399/examples/tests/raid5boot.yaml0000644000000000000000000000207512741214565020072 0ustar 00000000000000showtrace: true storage: version: 1 config: - id: sda type: disk ptable: gpt model: QEMU HARDDISK path: /dev/vdb name: main_disk grub_device: 1 - id: bios_boot_partition type: partition size: 1MB device: sda flag: bios_grub - id: sda1 type: partition size: 3GB device: sda - id: sdb type: disk ptable: gpt model: QEMU HARDDISK path: /dev/vdc name: second_disk - id: sdb1 type: partition size: 3GB device: sdb - id: sdc type: disk ptable: gpt model: QEMU HARDDISK path: /dev/vdd name: third_disk - id: sdc1 type: partition size: 3GB device: sdc - id: mddevice name: md0 type: raid raidlevel: 5 devices: - sda1 - sdb1 - sdc1 - id: md_root type: format fstype: ext4 volume: mddevice - id: md_mount type: mount path: / device: md_root curtin-0.1.0~bzr399/examples/tests/raid6boot.yaml0000644000000000000000000000245512741214565020075 0ustar 00000000000000showtrace: true storage: version: 1 config: - id: sda type: disk ptable: gpt model: QEMU HARDDISK path: /dev/vdb name: main_disk grub_device: 1 - id: bios_boot_partition type: partition size: 1MB device: sda flag: bios_grub - id: sda1 type: partition size: 3GB device: sda - id: sdb type: disk ptable: gpt model: QEMU HARDDISK path: /dev/vdc name: second_disk - id: sdb1 type: partition size: 3GB device: sdb - id: sdc type: disk ptable: gpt model: QEMU HARDDISK path: /dev/vdd name: third_disk - id: sdc1 type: partition size: 3GB device: sdc - id: sdd type: disk ptable: gpt model: QEMU HARDDISK path: /dev/vde name: fourth_disk - id: sdd1 type: partition size: 3GB device: sdd - id: mddevice name: md0 mdname: foobar type: raid raidlevel: 6 devices: - sda1 - sdb1 - sdc1 - sdd1 - id: md_root type: format fstype: ext4 volume: mddevice - id: md_mount type: mount path: / device: md_root curtin-0.1.0~bzr399/examples/tests/simple.yaml0000644000000000000000000000021012741214565017460 0ustar 00000000000000# This pushes curtin through a automatic installation # where no storage configuration is necessary. placeholder_simple_install: unused curtin-0.1.0~bzr399/examples/tests/uefi_basic.yaml0000644000000000000000000000143112741214565020266 0ustar 00000000000000showtrace: true storage: config: - id: id_disk0 type: disk name: main_disk path: /dev/vdb ptable: gpt wipe: superblock grub_device: true - device: id_disk0 flag: boot id: id_disk0_part1 number: 1 offset: 1M size: 512M type: partition wipe: superblock - device: id_disk0 id: id_disk0_part2 number: 2 size: 3G type: partition wipe: superblock - fstype: fat32 id: id_efi_format label: efi type: format volume: id_disk0_part1 - fstype: ext4 id: id_root_format label: root type: format volume: id_disk0_part2 - device: id_root_format id: id_root_mount path: / type: mount - device: id_efi_format id: id_efi_mount path: /boot/efi type: mount version: 1 curtin-0.1.0~bzr399/examples/tests/vlan_network.yaml0000644000000000000000000000321412741214565020707 0ustar 00000000000000network: config: - id: interface0 mac_address: d4:be:d9:a8:49:13 mtu: 1500 name: interface0 subnets: - address: 10.245.168.16/21 dns_nameservers: - 10.245.168.2 gateway: 10.245.168.1 type: static type: physical - id: interface1 mac_address: d4:be:d9:a8:49:15 mtu: 1500 name: interface1 subnets: - address: 10.245.188.2/24 dns_nameservers: [] type: static type: physical - id: interface2 mac_address: d4:be:d9:a8:49:17 mtu: 1500 name: interface2 subnets: - type: manual type: physical - id: interface3 mac_address: d4:be:d9:a8:49:19 mtu: 1500 name: interface3 subnets: - type: manual type: physical - id: interface1.2667 mtu: 1500 name: interface1.2667 subnets: - address: 10.245.184.2/24 dns_nameservers: [] type: static type: vlan vlan_id: 2667 vlan_link: interface1 - id: interface1.2668 mtu: 1500 name: interface1.2668 subnets: - address: 10.245.185.1/24 dns_nameservers: [] type: static type: vlan vlan_id: 2668 vlan_link: interface1 - id: interface1.2669 mtu: 1500 name: interface1.2669 subnets: - address: 10.245.186.1/24 dns_nameservers: [] type: static type: vlan vlan_id: 2669 vlan_link: interface1 - id: interface1.2670 mtu: 1500 name: interface1.2670 subnets: - address: 10.245.187.2/24 dns_nameservers: [] type: static type: vlan vlan_id: 2670 vlan_link: interface1 - address: 10.245.168.2 search: - dellstack type: nameserver version: 1 curtin-0.1.0~bzr399/helpers/common0000644000000000000000000005416712741214565015224 0ustar 00000000000000#!/bin/bash TEMP_D="" CR=" " VERBOSITY=${VERBOSITY:-${CURTIN_VERBOSITY:-0}} error() { echo "$@" 1>&2; } debug() { [ ${VERBOSITY:-0} -ge "$1" ] || return shift error "$@" } partition_main_usage() { cat <&1) || { error "wiping entire '$target' with ${info} failed." error "$out" return 1 } else local fbs=$bs count=$((size / bs)) if [ "$size" -ge "$mb" ]; then count=1 fbs=$mb fi info="size=$size count=$count bs=$fbs" debug 1 "wiping start of '$target' with ${info}." # wipe the first MB (up to 'size') out=$(dd if=/dev/zero conv=notrunc "of=$target" \ "bs=$fbs" "count=$count" 2>&1) || { error "wiping start of '$target' with ${info} failed." error "$out" return 1 } if $wipe_end && [ "$size" -gt "$mb" ]; then # do the last 1MB count=$((mb / bs)) seek=$(((size / bs) - $count)) info="size=$size count=$count bs=$bs seek=$seek" debug 1 "wiping end of '$target' with ${info}." out=$(dd if=/dev/zero conv=notrunc "of=$target" "seek=$seek" \ "bs=$bs" "count=$count" 2>&1) if [ $? -ne 0 ]; then error "wiping end of '$target' with ${info} failed." error "$out"; return 1; fi fi fi if $rereadpt && [ -b "$target" ]; then blockdev --rereadpt "$target" udevadm settle fi } find_partno() { local devname="$1" partno="$2" local devbname cand msg="" slash="/" devbname="${devname#/dev/}" # /dev/cciss/c0d0 -> ccis!c0d0 devbname="${devbname//$slash/!}" if [ -d "/sys/class/block/${devbname}" ]; then local cand candptno name partdev debug 1 "using sys/class/block/$devbname" for cand in /sys/class/block/$devbname/*/partition; do [ -f "$cand" ] || continue read candptno < "$cand" [ "$candptno" = "$partno" ] || continue name=${cand#/sys/class/block/${devbname}/} name=${name%/partition} # ccis!c0d0p1 -> ccis/c0d0p1 name=${name//!/$slash} partdev="/dev/$name" [ -b "$partdev" ] && _RET="$partdev" && return 0 msg="expected $partdev to exist as partition $partno on $devname" error "WARN: $msg. it did not exist." done else for cand in "${devname}$partno" "${devname}p${partno}"; do [ -b "$cand" ] && _RET="$cand" && return 0 done fi return 1 } part2bd() { # part2bd given a partition, return the block device it is on # and the number the partition is. ie, 'sda2' -> '/dev/sda 2' local dev="$1" fp="" sp="" bd="" ptnum="" dev="/dev/${dev#/dev/}" fp=$(readlink -f "$dev") || return 1 sp="/sys/class/block/${fp##*/}" [ -f "$sp/partition" ] || { _RET="$fp 0"; return 0; } read ptnum < "$sp/partition" sp=$(readlink -f "$sp") || return 1 # sp now has some /sys/devices/pci..../0:2:0:0/block/sda/sda1 bd=${sp##*/block/} bd="${bd%/*}" _RET="/dev/$bd $ptnum" return 0 } pt_gpt() { local target="$1" end=${2:-""} boot="$3" size="" s512="" local start="2048" rootsize="" bootsize="1048576" maxend="" local isblk=false getsize "$target" || { error "failed to get size of $target"; return 1; } size="$_RET" if [ -z "$end" ]; then end=$(($size/512)) else end=$(($end/512)) fi if [ "$boot" = true ]; then maxend=$((($size/512)-$start-$bootsize)) if [ $maxend -lt 0 ]; then error "Disk is not big enough for /boot partition on $target"; return 1; fi else maxend=$((($size/512)-$start)) fi [ "$end" -gt "$maxend" ] && end="$maxend" debug 1 "maxend=$maxend end=$end size=$size" [ -b "$target" ] && isblk=true if [ "$boot" = true ]; then # Creating 'efi', '/boot' and '/' partitions sgdisk --new "15:$start:+1M" --typecode=15:ef02 \ --new "1::+512M" --typecode=1:8300 \ --new "2::$end" --typecode=2:8300 "$target" || { error "failed to gpt partition $target"; return 1; } else # Creating 'efi' and '/' partitions sgdisk --new "15:$start:+1M" --typecode=15:ef02 \ --new "1::$end" --typecode=1:8300 "$target" || { error "failed to gpt partition $target"; return 1; } fi if $isblk; then local expected="1 15" [ "$boot" = "true" ] && expected="$expected 2" blockdev --rereadpt "$target" udevadm settle assert_partitions "$target" $expected || { error "$target missing partitions: $_RET"; return 1; } wipe_partitions "$target" $expected || { error "$target: failed to wipe partitions"; return 1; } fi } assert_partitions() { local dev="$1" missing="" part="" shift for part in "$@"; do find_partno "$dev" $part || missing="${missing} ${part}" done _RET="${missing# }" [ -z "$missing" ] } pt_uefi() { local target="$1" end=${2:-""} size="" s512="" local start="2048" rootsize="" maxend="" local isblk=false getsize "$target" || { error "failed to get size of $target"; return 1; } size="$_RET" if [ -z "$end" ]; then end=$(($size/512)) else end=$(($end/512)) fi maxend=$((($size/512)-$start)) [ "$end" -gt "$maxend" ] && end="$maxend" debug 1 "maxend=$maxend end=$end size=$size" [ -b "$target" ] && isblk=true # Creating 'UEFI' and '/' partitions sgdisk --new "15:2048:+512M" --typecode=15:ef00 \ --new "1::$end" --typecode=1:8300 "$target" || { error "failed to sgdisk for uefi to $target"; return 1; } if $isblk; then blockdev --rereadpt "$target" udevadm settle assert_partitions "$target" 1 15 || { error "$target missing partitions: $_RET"; return 1; } wipe_partitions "$target" 1 15 || { error "$target: failed to wipe partitions"; return 1; } fi local pt15 find_partno "$target" 15 && pt15="$_RET" || { error "failed to find partition 15 for $target"; return 1; } mkfs -t vfat -F 32 -n uefi-boot "$pt15" || { error "failed to partition :$pt15' for UEFI vfat"; return 1; } } pt_mbr() { local target="$1" end=${2:-""} boot="$3" size="" s512="" ptype="L" local start="2048" rootsize="" maxsize="4294967296" local maxend="" isblk=false def_bootsize="1048576" bootsize=0 local isblk=false getsize "$target" || { error "failed to get size of $target"; return 1; } size="$_RET" if $boot; then bootsize=$def_bootsize fi s512=$(($size/512)) if [ $s512 -ge $maxsize ]; then debug 1 "disk is larger than max for mbr (2TB)" s512=$maxsize fi # allow 33 sectors for the secondary gpt header in the case that # the user wants to later 'sgdisk --mbrtogpt' local gpt2hsize="33" if [ -n "$end" ]; then rootsize=$(((end/512)-start-bootsize)) else rootsize=$((s512-start-bootsize-$gpt2hsize)) fi [ -b "$target" ] && isblk=true # interact with sfdisk in units of 512 bytes (--unit S) # we start all partitions at 2048 of those (1M) local sfdisk_out="" sfdisk_in="" sfdisk_cmd="" t="" expected="" if "$boot"; then t="$start,$bootsize,$ptype,-${CR}" t="$t$(($start+$bootsize)),$rootsize,$ptype,*" sfdisk_in="$t" expected="1 2" else sfdisk_in="$start,$rootsize,$ptype,*" expected=1 fi sfdisk_cmd=( sfdisk --no-reread --force --Linux --unit S "$target" ) debug 1 "sfdisking with: echo '$sfdisk_in' | ${sfdisk_cmd[*]}" sfdisk_out=$(echo "$sfdisk_in" | "${sfdisk_cmd[@]}" 2>&1) ret=$? [ $ret -eq 0 ] || { error "failed to partition $target [${sfdisk_out}]"; return 1; } if $isblk; then blockdev --rereadpt "$target" udevadm settle assert_partitions "$target" ${expected} || { error "$target missing partitions: $_RET"; return 1; } wipe_partitions "$target" ${expected} || { error "failed to wipe partition 1 on $target"; return 1; } fi } pt_prep() { local target="$1" end=${2:-""} local cmd="" isblk=false [ -b "$target" ] && isblk=true local pprep="1" proot="2" wipedev "$target" || { error "failed to clear $target"; return 1; } cmd=( sgdisk --new "${pprep}::+8M" "--typecode=${pprep}:4100" --new "${proot}::$end" "--typecode=${proot}:8300" "$target" ) debug 1 "partitioning '$target' with ${cmd[*]}" "${cmd[@]}" || fail "Failed to create GPT partitions (${cmd[*]})" udevadm trigger udevadm settle if $isblk; then blockdev --rereadpt "$target" udevadm settle assert_partitions "$target" "${proot}" "${pprep}" || { error "$target missing partitions: $_RET"; return 1; } # wipe the full prep partition wipe_partitions --full "$target" "${pprep}" || { error "$target: failed to wipe full PReP partition"; return 1;} wipe_partitions "$target" "${proot}" || { error "$target: failed to wipe partition ${proot}"; return 1;} fi return 0 } partition_main() { local short_opts="hE:f:bv" local long_opts="help,end:,format:,boot,verbose" local getopt_out=$(getopt --name "${0##*/}" \ --options "${short_opts}" --long "${long_opts}" -- "$@") && eval set -- "${getopt_out}" || { partition_main_usage 1>&2; return 1; } local cur="" next="" local format="mbr" boot=false target="" end="" ret=0 while [ $# -ne 0 ]; do cur="$1"; next="$2"; case "$cur" in -h|--help) partition_main_usage ; exit 0;; -E|--end) end=$next; shift;; -f|--format) format=$next; shift;; -b|--boot) boot=true;; -v|--verbose) VERBOSITY=$((${VERBOSITY}+1));; --) shift; break;; esac shift; done [ $# -gt 1 ] && { partition_main_usage "got $# args, expected 1" 1>&2; return 1; } [ $# -eq 0 ] && { partition_main_usage "must provide target-dev" 1>&2; return 1; } target="$1" if [ -n "$end" ]; then human2bytes "$end" || { error "failed to convert '$end' to bytes"; return 1; } end="$_RET" fi [ "$format" = "gpt" -o "$format" = "mbr" ] || [ "$format" = "uefi" -o "$format" = "prep" ] || { partition_main_usage "invalid format: $format" 1>&2; return 1; } TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX") || fail "failed to make tempdir" trap cleanup EXIT [ -e "$target" ] || { error "$target does not exist"; return 1; } [ -f "$target" -o -b "$target" ] || { error "$target not a block device"; return 1; } wipedev "$target" || { error "wiping $target failed"; return 1; } if [ "$format" = "mbr" ]; then pt_mbr "$target" "$end" "$boot" elif [ "$format" = "gpt" ]; then pt_gpt "$target" "$end" "$boot" elif [ "$format" = "uefi" ]; then pt_uefi "$target" "$end" elif [ "$format" = "prep" ]; then pt_prep "$target" "$end" fi ret=$? return $ret } human2bytes() { # converts size suitable for input to resize2fs to bytes # s:512 byte sectors, K:kilobytes, M:megabytes, G:gigabytes # none: block size of the image local input=${1} defunit=${2:-1024} local unit count; case "$input" in *s) count=${input%s}; unit=512;; *K) count=${input%K}; unit=1024;; *M) count=${input%M}; unit=$((1024*1024));; *G) count=${input%G}; unit=$((1024*1024*1024));; *) count=${input} ; unit=${defunit};; esac _RET=$((${count}*${unit})) } getsize() { # return size of target in bytes local target="$1" if [ -b "$target" ]; then _RET=$(blockdev --getsize64 "$target") elif [ -f "$target" ]; then _RET=$(stat "--format=%s" "$target") else return 1; fi } is_md() { case "${1##*/}" in md[0-9]) return 0;; esac return 1 } get_carryover_params() { local cmdline=" $1 " extra="" lead="" carry_extra="" carry_lead="" # return a string to append to installed systems boot parameters # it may include a '--' after a '---' # see LP: 1402042 for some history here. # this is similar to 'user-params' from d-i local preferred_sep="---" # KERNEL_CMDLINE_COPY_TO_INSTALL_SEP local legacy_sep="--" case "$cmdline" in *\ ${preferred_sep}\ *) extra=${cmdline#* ${preferred_sep} } lead=${cmdline%% ${preferred_sep} *} ;; *\ ${legacy_sep}\ *) extra="${cmdline#* ${legacy_sep} }" lead=${cmdline%% ${legacy_sep} *} ;; *) extra="" lead="$cmdline" ;; esac if [ -n "$extra" ]; then carry_extra=$(set -f; c=""; for p in $extra; do case "$p" in (BOOTIF=*|initrd=*|BOOT_IMAGE=*) continue;; esac c="$c $p"; done echo "${c# }" ) fi # these get copied even if they werent after the separator local padded=" $carry_extra " carry_lead=$(set -f; padded=" ${carry_extra} " c="" for p in $lead; do # skip any that are already in carry_extra [ "${padded#* $p }" != "$padded" ] && continue case "$p" in (console=*) c="$c $p";; esac done echo "${c# }" ) _RET="${carry_lead:+${carry_lead} }${carry_extra}" } install_grub() { local long_opts="uefi,update-nvram" local getopt_out="" mp_efi="" getopt_out=$(getopt --name "${0##*/}" \ --options "" --long "${long_opts}" -- "$@") && eval set -- "${getopt_out}" local uefi=0 local update_nvram=0 while [ $# -ne 0 ]; do cur="$1"; next="$2"; case "$cur" in --uefi) uefi=$((${uefi}+1));; --update-nvram) update_nvram=$((${update_nvram}+1));; --) shift; break;; esac shift; done [ $# -lt 2 ] && { grub_install_usage "must provide mount-point and target-dev" 1>&2; return 1; } local mp="$1" local cmdline tmp r="" shift local grubdevs grubdevs=( "$@" ) if [ "${#grubdevs[@]}" = "1" -a "${grubdevs[0]}" = "none" ]; then grubdevs=( ) fi # find the mp device mp_dev=$(awk -v "MP=$mp" '$2 == MP { print $1 }' /proc/mounts) || { error "unable to determine device for mount $mp"; return 1; } [ -z "$mp_dev" ] && { error "did not find '$mp' in /proc/mounts" cat /proc/mounts 1>&2 return 1 } [ -b "$mp_dev" ] || { error "$mp_dev is not a block device!"; return 1; } # get dpkg arch local dpkg_arch="" dpkg_arch=$(chroot "$mp" dpkg --print-architecture) r=$? [ $r -eq 0 ] || { error "failed to get dpkg architecture [$r]" return 1; } # grub is not the bootloader you are looking for if [ "${dpkg_arch}" = "s390x" ]; then return 0; fi # set correct grub package local grub_name="grub-pc" local grub_target="i386-pc" if [ "${dpkg_arch#ppc64}" != "${dpkg_arch}" ]; then grub_name="grub-ieee1275" grub_target="powerpc-ieee1275" elif [ "$uefi" -ge 1 ]; then grub_name="grub-efi-$dpkg_arch" case "$dpkg_arch" in amd64) grub_target="x86_64-efi";; arm64) grub_target="arm64-efi";; esac fi # check that the grub package is installed tmp=$(chroot "$mp" dpkg-query --show \ --showformat='${Status}\n' $grub_name) r=$? if [ $r -ne 0 -a $r -ne 1 ]; then error "failed to check if $grub_name installed"; return 1; fi case "$tmp" in install\ ok\ installed) :;; *) debug 1 "$grub_name not installed, not doing anything"; return 0;; esac local grub_d="etc/default/grub.d" local mygrub_cfg="$grub_d/50-curtin-settings.cfg" [ -d "$mp/$grub_d" ] || mkdir -p "$mp/$grub_d" || { error "Failed to create $grub_d"; return 1; } # LP: #1179940 . The 50-cloudig-settings.cfg file is written by the cloud # images build and defines/override some settings. Disable it. local cicfg="$grub_d/50-cloudimg-settings.cfg" if [ -f "$mp/$cicfg" ]; then debug 1 "moved $cicfg out of the way" mv "$mp/$cicfg" "$mp/$cicfg.disabled" fi # get the user provided / carry-over kernel arguments local newargs="" read cmdline < /proc/cmdline && get_carryover_params "$cmdline" && newargs="$_RET" || { error "Failed to get carryover parrameters from cmdline"; return 1; } debug 1 "carryover command line params: $newargs" : > "$mp/$mygrub_cfg" || { error "Failed to write '$mygrub_cfg'"; return 1; } { [ "${REPLACE_GRUB_LINUX_DEFAULT:-1}" = "0" ] || echo "GRUB_CMDLINE_LINUX_DEFAULT=\"$newargs\"" echo "# disable grub os prober that might find other OS installs." echo "GRUB_DISABLE_OS_PROBER=true" echo "GRUB_TERMINAL=console" } >> "$mp/$mygrub_cfg" local short="" bd="" grubdev grubdevs_new="" grubdevs_new=() for grubdev in "${grubdevs[@]}"; do if is_md "$grubdev"; then short=${grubdev##*/} for bd in "/sys/block/$short/slaves/"/*; do [ -d "$bd" ] || continue bd=${bd##*/} bd="/dev/${bd%[0-9]}" # FIXME: part2bd grubdevs_new[${#grubdevs_new[@]}]="$bd" done else grubdevs_new[${#grubdevs_new[@]}]="$grubdev" fi done grubdevs=( "${grubdevs_new[@]}" ) if [ "$uefi" -ge 1 ]; then nvram="--no-nvram" if [ "$update_nvram" -ge 1 ]; then nvram="" fi debug 1 "installing ${grub_name} to: /boot/efi" chroot "$mp" env DEBIAN_FRONTEND=noninteractive sh -ec ' dpkg-reconfigure "$1" update-grub # grub-install in 12.04 does not contain --no-nvram, --target, # or --efi-directory target="--target=$2" no_nvram="$3" efi_dir="--efi-directory=/boot/efi" gi_out=$(grub-install --help 2>&1) echo "$gi_out" | grep -q -- "$no_nvram" || no_nvram="" echo "$gi_out" | grep -q -- "--target" || target="" echo "$gi_out" | grep -q -- "--efi-directory" || efi_dir="" grub-install $target $efi_dir \ --bootloader-id=ubuntu --recheck $no_nvram' -- \ "${grub_name}" "${grub_target}" "$nvram" &2; } fail() { [ $# -eq 0 ] || error "$@"; exit 1; } bad_Usage() { Usage 1>&2; [ $# -eq 0 ] || error "$@"; return 1; } cleanup() { [ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}" } debug() { local level=${1}; shift; [ "${level}" -gt "${VERBOSITY}" ] && return error "${@}" } print_vars() { local k karray t="" cur="" tic="'" repltic="'\''" for k in "$@"; do karray="$k[@]" karray=( "${!karray}" ) if [ ${#karray[@]} -gt 1 ]; then cur="(" for t in "${karray[@]}"; do cur="${cur} '${t//${tic}/${repltic}}'" done cur="${cur} )" else t=${!k} cur="'${t//${tic}/${repltic}}'" #printf "%s=\"%s\"" "$k" "${t//'/'\''}" fi printf "%s=%s\n" "$k" "$cur" done } write_extractor() { echo "#!/bin/bash" echo "# vi: ts=4 expandtab syntax=sh" print_vars "$@" echo "CREATE_TIME='$(date -R)'" echo "PAYLOAD_MARKER='$PAYLOAD_MARKER'" cat <<"END_EXTRACTOR" VERBOSITY=0 INFO_KEYS=("LABEL" "PREFIX" "COMMAND" "CREATE_TIME" "MD5SUM") error() { echo "$@" 1>&2; } fail() { [ $# -eq 0 ] || error "$@"; exit 1; } Usage() { cat <&2; [ $# -eq 0 ] || error "$@"; return 1; } debug() { local level=${1}; shift; [ "${level}" -gt "${VERBOSITY}" ] && return error "${@}" } print_vars() { local k karray t="" cur="" tic="'" repltic="'\''" for k in "$@"; do karray="$k[@]" karray=( "${!karray}" ) if [ ${#karray[@]} -gt 1 ]; then cur="(" for t in "${karray[@]}"; do cur="${cur} '${t//${tic}/${repltic}}'" done cur="${cur} )" else t=${!k} cur="'${t//${tic}/${repltic}}'" #printf "%s=\"%s\"" "$k" "${t//'/'\''}" fi printf "%s=%s\n" "$k" "$cur" done } dump_b64() { sed -n "1,/^${PAYLOAD_MARKER}$/!p" "$1" } dump_bin() { dump_b64 "$@" | base64 --decode } extract() { mkdir "$2" || { error "failed to make '$2'"; return 1; } dump_bin "$1" | tar -Sxzf - -C "$2" } main() { local short_opts="hp:v" local long_opts="help,no-execute,prefix:,verbose" local getopt_out=$(getopt --name "${0##*/}" \ --options "${short_opts}" --long "${long_opts}" -- "$@") && eval set -- "${getopt_out}" || { bad_Usage; return; } local cur="" next="" local prefix="$PREFIX" execute=true while [ $# -ne 0 ]; do cur="$1"; next="$2"; case "$cur" in -h|--help) Usage ; exit 0;; --no-execute) execute=false;; -p|--prefix) prefix="$next"; shift;; -v|--verbose) VERBOSITY=$((${VERBOSITY}+1));; --) shift; break;; esac shift; done [ $# -lt 2 ] || { bad_Usage "got $# args. expected only 1"; return; } mode="$1" case "${mode:-extract}" in info) print_vars "${INFO_KEYS[@]}"; return;; check) found=$(dump_bin "$0" | md5sum) || { error "failed to calculate checksum"; return 1; } found=${found% -} [ "$found" = "$MD5SUM" ] && error "found = expected = $found" && return 0 error "found = ${found}. expected = ${MD5SUM}" return 1;; dump) dump_bin "$0"; return;; extract) extract "$0" "$prefix" || { error "failed extraction"; return 1; } if [ "${#COMMAND[@]}" = "1" -a -z "${COMMAND}" ]; then error "extracted to '$PWD/$prefix'. no command to run." return 0; fi if ! $execute; then error "extracted to '$PWD/$prefix'. disabled command" return 0; fi cd "$prefix" || { error "failed to change to $prefix"; return 1; } local np="" if [ -n "$PYPATH" ]; then np="${PYPATH//_pwd_/$PWD}${PYTHONPATH:+:${PYTHONPATH}}" export PYTHONPATH="$np" fi if [ -n "$BINPATH" ]; then np="${BINPATH//_pwd_/$PWD}${PATH:+:${PATH}}" export PATH="$np" fi debug 1 "executing: ${COMMAND[*]}" exec "${COMMAND[@]}" ;; esac return 0 } set -o pipefail main "$@" exit END_EXTRACTOR echo "$PAYLOAD_MARKER" } Usage() { cat < "$output" || { error "failed to redirect output to $output"; return 1; } fi write_extractor MD5SUM PREFIX LABEL COMMAND PYPATH BINPATH || { error "failed to write extractro"; return 1; } base64 < "$payload" || { error "failed to base64 encode payload"; return 1; } return 0 } main "$@" # vi: ts=4 expandtab syntax=sh curtin-0.1.0~bzr399/helpers/smtar0000755000000000000000000000652712741214565015062 0ustar 00000000000000#!/bin/sh # smtar (smart tar) # GNU Tar can only determine the compression type if input is a local file. # If input is a pipe, it will not even attempt. # # This works around that limitation by using 'file' to determine # the compression format via a local temp file of BUFLEN (1024) bytes. # After determining format, it passes the correct flag to tar. # # Compression format determination is done with 'file' via use of a temp file # # The following are supported: # # # compression option provided explicitly: just exec tar # $ ./smtar -tvz < my.tar.gz # # # file argument provided, tar can determine: just exec tar # $ ./smtar -tvf my.tar.gz # $ ./smtar -tvf my.tar # # # input from stdin. determine the appropriate compress flag and execute # $ ./smtar -tv < my.tar.bz2 # $ ./smtar -tv < my.tar.bz2 # $ cat my.tar.xz | ./smtar -tv -f - # $ wget http://some.tar | ./smtar -tv -f - # # TEMPF="" BUFLEN="1024" cleanup() { [ -z "$TEMPF" ] || rm -f "$TEMPF"; } error() { echo "$@" 1>&2; } fail() { [ $# -eq 0 ] || error "$@"; exit 1; } find_tar_filearg() { # walk through list of args, return the 'file' argument in _RET local cur="" next="" while [ $# -ne 0 ]; do cur="$1" next="$2" case "$cur" in --file=*) _RET=${cur#*=}; return 0;; --file) _RET=$next; return 0;; --*=*) :;; *-f) _RET="$next"; return 0;; --) _RET=""; return 0;; esac shift done return 1 } tar_has_compress_opt() { # this isnt perfect, but catch common ways # without fully parsing the args, we risk interpreting # tar -xvf -J # as bzip2 compression, where in reality its a file name '-J' local cur="" next="" while [ $# -ne 0 ]; do cur="$1" next="$2" case "$cur" in -z|--gzip|--gunzip|--ungzip) return 0;; -j|--bzip2) return 0;; -J|--xz) return 0;; -Z|--compress|--uncompress) return 0;; --) return 1;; esac shift done return 1 } # see if we can get out without reading anything if [ -t 0 ] || tar_has_compress_opt; then # input is a terminal, or args contain a compress option exec tar "$@" fi # if there was a compression arg in input, then let it be find_tar_filearg "$@" if ! [ "$_RET" = "/dev/stdin" -o "$_RET" = "-" -o -z "$_RET" ]; then exec "tar" "$@" fi # now we have work to do zopt="" TEMPF=$(mktemp) || fail "mktemp failed" trap cleanup EXIT head -c "$BUFLEN" > "$TEMPF" || fail "FAILED: head -c '$BUFLEN'" size=$(stat --format="%s" "$TEMPF") file_out=$(LANG=C file --mime-type "$TEMPF") # my.tar: application/x-tar # my.tar.bz2: application/x-bzip2 # my.tar.gz: application/gzip # my.tar.xz: application/x-xz # my.tar.Z: application/x-compress if [ $? -eq 0 ]; then case "$file_out" in */x-bzip2|*/bzip2) zopt="--bzip2";; */x-gzip|*/gzip) zopt="--gzip";; */x-xz|*/xz) zopt="--xz";; */x-compress|*/compress) zopt="--compress";; *) zopt="";; esac else error "WARN: 'file' failed on input" fi if [ "$size" -lt "$BUFLEN" ]; then # input was less than BUFLEN chars, so we just exec tar with input from it exec < "$TEMPF" rm -f "$TEMPF" exec tar $zopt "$@" else ( cat "$TEMPF" && rm "$TEMPF" && exec cat ) | exec tar $zopt "$@" fi # vi: ts=4 expandtab syntax=sh curtin-0.1.0~bzr399/tests/unittests/0000755000000000000000000000000012741214565015536 5ustar 00000000000000curtin-0.1.0~bzr399/tests/vmtests/0000755000000000000000000000000012741214565015201 5ustar 00000000000000curtin-0.1.0~bzr399/tests/unittests/__init__.py0000644000000000000000000000004312741214565017644 0ustar 00000000000000# vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/tests/unittests/test_basic.py0000644000000000000000000000032112741214565020224 0ustar 00000000000000from unittest import TestCase class TestImport(TestCase): def test_import(self): import curtin self.assertFalse(getattr(curtin, 'BOGUS_ENTRY', None)) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/tests/unittests/test_block.py0000644000000000000000000001762212741214565020251 0ustar 00000000000000from unittest import TestCase import os import mock import tempfile import shutil from curtin import util from curtin import block class TestBlock(TestCase): @mock.patch("curtin.block.util") def test_get_volume_uuid(self, mock_util): path = "/dev/sda1" expected_call = ["blkid", "-o", "export", path] mock_util.subp.return_value = (""" UUID=182e8e23-5322-46c9-a1b8-cf2c6a88f9f7 """, "") uuid = block.get_volume_uuid(path) mock_util.subp.assert_called_with(expected_call, capture=True) self.assertEqual(uuid, "182e8e23-5322-46c9-a1b8-cf2c6a88f9f7") @mock.patch("curtin.block.get_proc_mounts") @mock.patch("curtin.block._lsblock") def test_get_mountpoints(self, mock_lsblk, mock_proc_mounts): mock_lsblk.return_value = {"sda1": {"MOUNTPOINT": None}, "sda2": {"MOUNTPOINT": ""}, "sda3": {"MOUNTPOINT": "/mnt"}} mock_proc_mounts.return_value = [ ('sysfs', '/sys', 'sysfs', 'sysfs_opts', '0', '0'), ] mountpoints = block.get_mountpoints() self.assertTrue(mock_lsblk.called) self.assertEqual(sorted(mountpoints), sorted(["/mnt", "/sys"])) @mock.patch("curtin.block.os.path.realpath") @mock.patch("curtin.block.os.path.exists") @mock.patch("curtin.block.os.listdir") def test_lookup_disk(self, mock_os_listdir, mock_os_path_exists, mock_os_path_realpath): serial = "SERIAL123" mock_os_listdir.return_value = ["sda_%s-part1" % serial, "sda_%s" % serial, "other"] mock_os_path_exists.return_value = True mock_os_path_realpath.return_value = "/dev/sda" path = block.lookup_disk(serial) mock_os_listdir.assert_called_with("/dev/disk/by-id/") mock_os_path_realpath.assert_called_with("/dev/disk/by-id/sda_%s" % serial) self.assertTrue(mock_os_path_exists.called) self.assertEqual(path, "/dev/sda") with self.assertRaises(ValueError): mock_os_path_exists.return_value = False block.lookup_disk(serial) with self.assertRaises(ValueError): mock_os_path_exists.return_value = True mock_os_listdir.return_value = ["other"] block.lookup_disk(serial) class TestSysBlockPath(TestCase): @mock.patch("curtin.block.get_blockdev_for_partition") @mock.patch("os.path.exists") def test_existing_valid_devname(self, m_os_path_exists, m_get_blk): m_os_path_exists.return_value = True m_get_blk.return_value = ('foodevice', None) self.assertEqual('/sys/class/block/foodevice', block.sys_block_path("foodevice")) @mock.patch("curtin.block.get_blockdev_for_partition") @mock.patch("os.path.exists") def test_existing_devpath_allowed(self, m_os_path_exists, m_get_blk): m_os_path_exists.return_value = True m_get_blk.return_value = ('foodev', None) self.assertEqual('/sys/class/block/foodev', block.sys_block_path("/dev/foodev")) @mock.patch("curtin.block.get_blockdev_for_partition") @mock.patch("os.path.exists") def test_add_works(self, m_os_path_exists, m_get_blk): m_os_path_exists.return_value = True m_get_blk.return_value = ('foodev', None) self.assertEqual('/sys/class/block/foodev/md/b', block.sys_block_path("/dev/foodev", "md/b")) @mock.patch("curtin.block.get_blockdev_for_partition") @mock.patch("os.path.exists") def test_add_works_leading_slash(self, m_os_path_exists, m_get_blk): m_os_path_exists.return_value = True m_get_blk.return_value = ('foodev', None) self.assertEqual('/sys/class/block/foodev/md/b', block.sys_block_path("/dev/foodev", "/md/b")) @mock.patch("curtin.block.get_blockdev_for_partition") @mock.patch("os.path.exists") def test_invalid_devname_raises(self, m_os_path_exists, m_get_blk): m_os_path_exists.return_value = False with self.assertRaises(ValueError): block.sys_block_path("foodevice") @mock.patch("curtin.block.get_blockdev_for_partition") def test_invalid_with_add(self, m_get_blk): # test the device exists, but 'add' does not # path_exists returns true unless 'md/device' is in it # so /sys/class/foodev/ exists, but not /sys/class/foodev/md/device add = "md/device" def path_exists(path): return add not in path m_get_blk.return_value = ("foodev", None) with mock.patch('os.path.exists', side_effect=path_exists): self.assertRaises(OSError, block.sys_block_path, "foodev", add) @mock.patch("curtin.block.get_blockdev_for_partition") @mock.patch("os.path.exists") def test_not_strict_does_not_care(self, m_os_path_exists, m_get_blk): m_os_path_exists.return_value = False m_get_blk.return_value = ('foodev', None) self.assertEqual('/sys/class/block/foodev/md/b', block.sys_block_path("foodev", "/md/b", strict=False)) class TestWipeFile(TestCase): def __init__(self, *args, **kwargs): super(TestWipeFile, self).__init__(*args, **kwargs) def tfile(self, *args): # return a temp file in a dir that will be cleaned up tmpdir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, tmpdir) return os.path.sep.join([tmpdir] + list(args)) def test_non_exist_raises_file_not_found(self): try: p = self.tfile("enofile") block.wipe_file(p) raise Exception("%s did not raise exception" % p) except Exception as e: if not util.is_file_not_found_exc(e): raise Exception("exc was not file_not_found: %s" % e) def test_non_exist_dir_raises_file_not_found(self): try: p = self.tfile("enodir", "file") block.wipe_file(p) raise Exception("%s did not raise exception" % p) except Exception as e: if not util.is_file_not_found_exc(e): raise Exception("exc was not file_not_found: %s" % e) def test_default_is_zero(self): flen = 1024 myfile = self.tfile("def_zero") util.write_file(myfile, flen * b'\1', omode="wb") block.wipe_file(myfile) found = util.load_file(myfile, mode="rb") self.assertEqual(found, flen * b'\0') def test_reader_used(self): flen = 17 def reader(size): return size * b'\1' myfile = self.tfile("reader_used") # populate with nulls util.write_file(myfile, flen * b'\0', omode="wb") block.wipe_file(myfile, reader=reader, buflen=flen) found = util.load_file(myfile, mode="rb") self.assertEqual(found, flen * b'\1') def test_reader_twice(self): flen = 37 data = {'x': 20 * b'a' + 20 * b'b'} expected = data['x'][0:flen] def reader(size): buf = data['x'][0:size] data['x'] = data['x'][size:] return buf myfile = self.tfile("reader_twice") util.write_file(myfile, flen * b'\xff', omode="wb") block.wipe_file(myfile, reader=reader, buflen=20) found = util.load_file(myfile, mode="rb") self.assertEqual(found, expected) def test_reader_fhandle(self): srcfile = self.tfile("fhandle_src") trgfile = self.tfile("fhandle_trg") data = '\n'.join(["this is source file." for f in range(0, 10)] + []) util.write_file(srcfile, data) util.write_file(trgfile, 'a' * len(data)) with open(srcfile, "rb") as fp: block.wipe_file(trgfile, reader=fp.read) found = util.load_file(trgfile) self.assertEqual(data, found) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/tests/unittests/test_block_mdadm.py0000644000000000000000000010427512741214565021414 0ustar 00000000000000from unittest import TestCase from mock import call, patch from curtin.block import dev_short from curtin.block import mdadm import os import subprocess class MdadmTestBase(TestCase): def setUp(self): super(MdadmTestBase, self).setUp() def add_patch(self, target, attr): """Patches specified target object and sets it as attr on test instance also schedules cleanup""" m = patch(target, autospec=True) p = m.start() self.addCleanup(m.stop) setattr(self, attr, p) class TestBlockMdadmAssemble(MdadmTestBase): def setUp(self): super(TestBlockMdadmAssemble, self).setUp() self.add_patch('curtin.block.mdadm.util', 'mock_util') self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') # Common mock settings self.mock_valid.return_value = True self.mock_util.lsb_release.return_value = {'codename': 'precise'} self.mock_util.subp.side_effect = [ ("", ""), # mdadm assemble ("", ""), # udevadm settle ] def test_mdadm_assemble_scan(self): mdadm.mdadm_assemble(scan=True) expected_calls = [ call(["mdadm", "--assemble", "--scan"], capture=True, rcs=[0, 1, 2]), call(["udevadm", "settle"]), ] self.mock_util.subp.assert_has_calls(expected_calls) def test_mdadm_assemble_md_devname(self): md_devname = "/dev/md0" mdadm.mdadm_assemble(md_devname=md_devname) expected_calls = [ call(["mdadm", "--assemble", md_devname, "--run"], capture=True, rcs=[0, 1, 2]), call(["udevadm", "settle"]), ] self.mock_util.subp.assert_has_calls(expected_calls) def test_mdadm_assemble_md_devname_short(self): with self.assertRaises(ValueError): md_devname = "md0" mdadm.mdadm_assemble(md_devname=md_devname) def test_mdadm_assemble_md_devname_none(self): with self.assertRaises(ValueError): md_devname = None mdadm.mdadm_assemble(md_devname=md_devname) def test_mdadm_assemble_md_devname_devices(self): md_devname = "/dev/md0" devices = ["/dev/vdc1", "/dev/vdd1"] mdadm.mdadm_assemble(md_devname=md_devname, devices=devices) expected_calls = [ call(["mdadm", "--assemble", md_devname, "--run"] + devices, capture=True, rcs=[0, 1, 2]), call(["udevadm", "settle"]), ] self.mock_util.subp.assert_has_calls(expected_calls) class TestBlockMdadmCreate(MdadmTestBase): def setUp(self): super(TestBlockMdadmCreate, self).setUp() self.add_patch('curtin.block.mdadm.util', 'mock_util') self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') # Common mock settings self.mock_valid.return_value = True self.mock_util.lsb_release.return_value = {'codename': 'precise'} def prepare_mock(self, md_devname, raidlevel, devices, spares): side_effects = [] expected_calls = [] hostname = 'ubuntu' # don't mock anything if raidlevel and spares mismatch if spares and raidlevel not in mdadm.SPARE_RAID_LEVELS: return (side_effects, expected_calls) side_effects.append((hostname, "")) # hostname -s expected_calls.append(call(["hostname", "-s"], capture=True, rcs=[0])) # prepare side-effects for d in devices + spares: side_effects.append(("", "")) # mdadm --zero-superblock expected_calls.append( call(["mdadm", "--zero-superblock", d], capture=True)) side_effects.append(("", "")) # udevadm settle expected_calls.append(call(["udevadm", "settle"])) side_effects.append(("", "")) # udevadm control --stop-exec-queue expected_calls.append(call(["udevadm", "control", "--stop-exec-queue"])) side_effects.append(("", "")) # mdadm create # build command how mdadm_create does cmd = (["mdadm", "--create", md_devname, "--run", "--homehost=%s" % hostname, "--level=%s" % raidlevel, "--raid-devices=%s" % len(devices)] + devices) if spares: cmd += ["--spare-devices=%s" % len(spares)] + spares expected_calls.append(call(cmd, capture=True)) side_effects.append(("", "")) # udevadm control --start-exec-queue expected_calls.append(call(["udevadm", "control", "--start-exec-queue"])) side_effects.append(("", "")) # udevadm settle expected_calls.append(call(["udevadm", "settle", "--exit-if-exists=%s" % md_devname])) return (side_effects, expected_calls) def test_mdadm_create_raid0(self): md_devname = "/dev/md0" raidlevel = 0 devices = ["/dev/vdc1", "/dev/vdd1"] spares = [] (side_effects, expected_calls) = self.prepare_mock(md_devname, raidlevel, devices, spares) self.mock_util.subp.side_effect = side_effects mdadm.mdadm_create(md_devname=md_devname, raidlevel=raidlevel, devices=devices, spares=spares) self.mock_util.subp.assert_has_calls(expected_calls) def test_mdadm_create_raid0_devshort(self): md_devname = "md0" raidlevel = 0 devices = ["/dev/vdc1", "/dev/vdd1"] spares = [] with self.assertRaises(ValueError): mdadm.mdadm_create(md_devname=md_devname, raidlevel=raidlevel, devices=devices, spares=spares) def test_mdadm_create_raid0_with_spares(self): md_devname = "/dev/md0" raidlevel = 0 devices = ["/dev/vdc1", "/dev/vdd1"] spares = ["/dev/vde1"] (side_effects, expected_calls) = self.prepare_mock(md_devname, raidlevel, devices, spares) self.mock_util.subp.side_effect = side_effects with self.assertRaises(ValueError): mdadm.mdadm_create(md_devname=md_devname, raidlevel=raidlevel, devices=devices, spares=spares) self.mock_util.subp.assert_has_calls(expected_calls) def test_mdadm_create_md_devname_none(self): md_devname = None raidlevel = 0 devices = ["/dev/vdc1", "/dev/vdd1"] spares = ["/dev/vde1"] with self.assertRaises(ValueError): mdadm.mdadm_create(md_devname=md_devname, raidlevel=raidlevel, devices=devices, spares=spares) def test_mdadm_create_md_devname_missing(self): self.mock_valid.return_value = False md_devname = "/dev/wark" raidlevel = 0 devices = ["/dev/vdc1", "/dev/vdd1"] spares = ["/dev/vde1"] with self.assertRaises(ValueError): mdadm.mdadm_create(md_devname=md_devname, raidlevel=raidlevel, devices=devices, spares=spares) def test_mdadm_create_invalid_raidlevel(self): md_devname = "/dev/md0" raidlevel = 27 devices = ["/dev/vdc1", "/dev/vdd1"] spares = ["/dev/vde1"] with self.assertRaises(ValueError): mdadm.mdadm_create(md_devname=md_devname, raidlevel=raidlevel, devices=devices, spares=spares) def test_mdadm_create_check_min_devices(self): md_devname = "/dev/md0" raidlevel = 5 devices = ["/dev/vdc1", "/dev/vdd1"] spares = ["/dev/vde1"] with self.assertRaises(ValueError): mdadm.mdadm_create(md_devname=md_devname, raidlevel=raidlevel, devices=devices, spares=spares) def test_mdadm_create_raid5(self): md_devname = "/dev/md0" raidlevel = 5 devices = ['/dev/vdc1', '/dev/vdd1', '/dev/vde1'] spares = ['/dev/vdg1'] (side_effects, expected_calls) = self.prepare_mock(md_devname, raidlevel, devices, spares) self.mock_util.subp.side_effect = side_effects mdadm.mdadm_create(md_devname=md_devname, raidlevel=raidlevel, devices=devices, spares=spares) self.mock_util.subp.assert_has_calls(expected_calls) class TestBlockMdadmExamine(MdadmTestBase): def setUp(self): super(TestBlockMdadmExamine, self).setUp() self.add_patch('curtin.block.mdadm.util', 'mock_util') self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') # Common mock settings self.mock_valid.return_value = True self.mock_util.lsb_release.return_value = {'codename': 'precise'} def test_mdadm_examine_export(self): self.mock_util.lsb_release.return_value = {'codename': 'xenial'} self.mock_util.subp.return_value = ( """ MD_LEVEL=raid0 MD_DEVICES=2 MD_METADATA=0.90 MD_UUID=93a73e10:427f280b:b7076c02:204b8f7a """, "") device = "/dev/vde" data = mdadm.mdadm_examine(device, export=True) expected_calls = [ call(["mdadm", "--examine", "--export", device], capture=True), ] self.mock_util.subp.assert_has_calls(expected_calls) self.assertEqual(data['MD_UUID'], '93a73e10:427f280b:b7076c02:204b8f7a') def test_mdadm_examine_no_export(self): self.mock_util.subp.return_value = ("""/dev/vde: Magic : a92b4efc Version : 1.2 Feature Map : 0x0 Array UUID : 93a73e10:427f280b:b7076c02:204b8f7a Name : wily-foobar:0 (local to host wily-foobar) Creation Time : Sat Dec 12 16:06:05 2015 Raid Level : raid1 Raid Devices : 2 Avail Dev Size : 20955136 (9.99 GiB 10.73 GB) Used Dev Size : 20955136 (9.99 GiB 10.73 GB) Array Size : 10477568 (9.99 GiB 10.73 GB) Data Offset : 16384 sectors Super Offset : 8 sectors Unused Space : before=16296 sectors, after=0 sectors State : clean Device UUID : 8fcd62e6:991acc6e:6cb71ee3:7c956919 Update Time : Sat Dec 12 16:09:09 2015 Bad Block Log : 512 entries available at offset 72 sectors Checksum : 65b57c2e - correct Events : 17 Device Role : spare Array State : AA ('A' == active, '.' == missing, 'R' == replacing) """, "") # mdadm --examine /dev/vde device = "/dev/vde" data = mdadm.mdadm_examine(device, export=False) expected_calls = [ call(["mdadm", "--examine", device], capture=True), ] self.mock_util.subp.assert_has_calls(expected_calls) self.assertEqual(data['MD_UUID'], '93a73e10:427f280b:b7076c02:204b8f7a') def test_mdadm_examine_no_raid(self): self.mock_util.subp.side_effect = subprocess.CalledProcessError("", "") device = "/dev/sda" data = mdadm.mdadm_examine(device, export=False) expected_calls = [ call(["mdadm", "--examine", device], capture=True), ] # don't mock anything if raidlevel and spares mismatch self.mock_util.subp.assert_has_calls(expected_calls) self.assertEqual(data, {}) class TestBlockMdadmStop(MdadmTestBase): def setUp(self): super(TestBlockMdadmStop, self).setUp() self.add_patch('curtin.block.mdadm.util', 'mock_util') self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') # Common mock settings self.mock_valid.return_value = True self.mock_util.lsb_release.return_value = {'codename': 'xenial'} self.mock_util.subp.side_effect = [ ("", ""), # mdadm stop device ] def test_mdadm_stop_no_devpath(self): with self.assertRaises(ValueError): mdadm.mdadm_stop(None) def test_mdadm_stop(self): device = "/dev/vdc" mdadm.mdadm_stop(device) expected_calls = [ call(["mdadm", "--stop", device], rcs=[0, 1], capture=True), ] self.mock_util.subp.assert_has_calls(expected_calls) class TestBlockMdadmRemove(MdadmTestBase): def setUp(self): super(TestBlockMdadmRemove, self).setUp() self.add_patch('curtin.block.mdadm.util', 'mock_util') self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') # Common mock settings self.mock_valid.return_value = True self.mock_util.lsb_release.return_value = {'codename': 'xenial'} self.mock_util.subp.side_effect = [ ("", ""), # mdadm remove device ] def test_mdadm_remove_no_devpath(self): with self.assertRaises(ValueError): mdadm.mdadm_remove(None) def test_mdadm_remove(self): device = "/dev/vdc" mdadm.mdadm_remove(device) expected_calls = [ call(["mdadm", "--remove", device], rcs=[0, 1], capture=True), ] self.mock_util.subp.assert_has_calls(expected_calls) class TestBlockMdadmQueryDetail(MdadmTestBase): def setUp(self): super(TestBlockMdadmQueryDetail, self).setUp() self.add_patch('curtin.block.mdadm.util', 'mock_util') self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') # Common mock settings self.mock_valid.return_value = True self.mock_util.lsb_release.return_value = {'codename': 'precise'} def test_mdadm_query_detail_export(self): self.mock_util.lsb_release.return_value = {'codename': 'xenial'} self.mock_util.subp.return_value = ( """ MD_LEVEL=raid1 MD_DEVICES=2 MD_METADATA=1.2 MD_UUID=93a73e10:427f280b:b7076c02:204b8f7a MD_NAME=wily-foobar:0 MD_DEVICE_vdc_ROLE=0 MD_DEVICE_vdc_DEV=/dev/vdc MD_DEVICE_vdd_ROLE=1 MD_DEVICE_vdd_DEV=/dev/vdd MD_DEVICE_vde_ROLE=spare MD_DEVICE_vde_DEV=/dev/vde """, "") device = "/dev/md0" self.mock_valid.return_value = True data = mdadm.mdadm_query_detail(device, export=True) expected_calls = [ call(["mdadm", "--query", "--detail", "--export", device], capture=True), ] self.mock_util.subp.assert_has_calls(expected_calls) self.assertEqual(data['MD_UUID'], '93a73e10:427f280b:b7076c02:204b8f7a') def test_mdadm_query_detail_no_export(self): self.mock_util.subp.return_value = ("""/dev/md0: Version : 1.2 Creation Time : Sat Dec 12 16:06:05 2015 Raid Level : raid1 Array Size : 10477568 (9.99 GiB 10.73 GB) Used Dev Size : 10477568 (9.99 GiB 10.73 GB) Raid Devices : 2 Total Devices : 3 Persistence : Superblock is persistent Update Time : Sat Dec 12 16:09:09 2015 State : clean Active Devices : 2 Working Devices : 3 Failed Devices : 0 Spare Devices : 1 Name : wily-foobar:0 (local to host wily-foobar) UUID : 93a73e10:427f280b:b7076c02:204b8f7a Events : 17 Number Major Minor RaidDevice State 0 253 32 0 active sync /dev/vdc 1 253 48 1 active sync /dev/vdd 2 253 64 - spare /dev/vde """, "") # mdadm --query --detail /dev/md0 device = "/dev/md0" data = mdadm.mdadm_query_detail(device, export=False) expected_calls = [ call(["mdadm", "--query", "--detail", device], capture=True), ] self.mock_util.subp.assert_has_calls(expected_calls) self.assertEqual(data['MD_UUID'], '93a73e10:427f280b:b7076c02:204b8f7a') class TestBlockMdadmDetailScan(MdadmTestBase): def setUp(self): super(TestBlockMdadmDetailScan, self).setUp() self.add_patch('curtin.block.mdadm.util', 'mock_util') self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') # Common mock settings self.scan_output = ("ARRAY /dev/md0 metadata=1.2 spares=2 name=0 " + "UUID=b1eae2ff:69b6b02e:1d63bb53:ddfa6e4a") self.mock_valid.return_value = True self.mock_util.lsb_release.return_value = {'codename': 'xenial'} self.mock_util.subp.side_effect = [ (self.scan_output, ""), # mdadm --detail --scan ] def test_mdadm_remove(self): data = mdadm.mdadm_detail_scan() expected_calls = [ call(["mdadm", "--detail", "--scan"], capture=True), ] self.mock_util.subp.assert_has_calls(expected_calls) self.assertEqual(self.scan_output, data) def test_mdadm_remove_error(self): self.mock_util.subp.side_effect = [ ("wark", "error"), # mdadm --detail --scan ] data = mdadm.mdadm_detail_scan() expected_calls = [ call(["mdadm", "--detail", "--scan"], capture=True), ] self.mock_util.subp.assert_has_calls(expected_calls) self.assertEqual(None, data) class TestBlockMdadmMdHelpers(MdadmTestBase): def setUp(self): super(TestBlockMdadmMdHelpers, self).setUp() self.add_patch('curtin.block.mdadm.util', 'mock_util') self.add_patch('curtin.block.mdadm.is_valid_device', 'mock_valid') self.mock_valid.return_value = True self.mock_util.lsb_release.return_value = {'codename': 'xenial'} def test_valid_mdname(self): mdname = "/dev/md0" result = mdadm.valid_mdname(mdname) expected_calls = [ call(mdname) ] self.mock_valid.assert_has_calls(expected_calls) self.assertTrue(result) def test_valid_mdname_short(self): mdname = "md0" with self.assertRaises(ValueError): mdadm.valid_mdname(mdname) def test_valid_mdname_none(self): mdname = None with self.assertRaises(ValueError): mdadm.valid_mdname(mdname) def test_valid_mdname_not_valid_device(self): self.mock_valid.return_value = False mdname = "/dev/md0" with self.assertRaises(ValueError): mdadm.valid_mdname(mdname) @patch('curtin.block.mdadm.sys_block_path') @patch('curtin.block.mdadm.os.path.isfile') def test_md_sysfs_attr(self, mock_isfile, mock_sysblock): mdname = "/dev/md0" attr_name = 'array_state' sysfs_path = '/sys/class/block/{}/md/{}'.format(dev_short(mdname), attr_name) mock_sysblock.side_effect = ['/sys/class/block/md0/md'] mock_isfile.side_effect = [True] mdadm.md_sysfs_attr(mdname, attr_name) self.mock_util.load_file.assert_called_with(sysfs_path) mock_sysblock.assert_called_with(mdname, 'md') mock_isfile.assert_called_with(sysfs_path) def test_md_sysfs_attr_devname_none(self): mdname = None attr_name = 'array_state' with self.assertRaises(ValueError): mdadm.md_sysfs_attr(mdname, attr_name) def test_md_raidlevel_short(self): for rl in [0, 1, 5, 6, 10, 'linear', 'stripe']: self.assertEqual(rl, mdadm.md_raidlevel_short(rl)) if isinstance(rl, int): long_rl = 'raid%d' % rl self.assertEqual(rl, mdadm.md_raidlevel_short(long_rl)) def test_md_minimum_devices(self): min_to_rl = { 2: [0, 1, 'linear', 'stripe'], 3: [5], 4: [6, 10], } for rl in [0, 1, 5, 6, 10, 'linear', 'stripe']: min_devs = mdadm.md_minimum_devices(rl) self.assertTrue(rl in min_to_rl[min_devs]) def test_md_minimum_devices_invalid_rl(self): min_devs = mdadm.md_minimum_devices(27) self.assertEqual(min_devs, -1) @patch('curtin.block.mdadm.md_sysfs_attr') def test_md_check_array_state_rw(self, mock_attr): mdname = '/dev/md0' mock_attr.return_value = 'clean' self.assertTrue(mdadm.md_check_array_state_rw(mdname)) @patch('curtin.block.mdadm.md_sysfs_attr') def test_md_check_array_state_rw_false(self, mock_attr): mdname = '/dev/md0' mock_attr.return_value = 'inactive' self.assertFalse(mdadm.md_check_array_state_rw(mdname)) @patch('curtin.block.mdadm.md_sysfs_attr') def test_md_check_array_state_ro(self, mock_attr): mdname = '/dev/md0' mock_attr.return_value = 'readonly' self.assertTrue(mdadm.md_check_array_state_ro(mdname)) @patch('curtin.block.mdadm.md_sysfs_attr') def test_md_check_array_state_ro_false(self, mock_attr): mdname = '/dev/md0' mock_attr.return_value = 'inactive' self.assertFalse(mdadm.md_check_array_state_ro(mdname)) @patch('curtin.block.mdadm.md_sysfs_attr') def test_md_check_array_state_error(self, mock_attr): mdname = '/dev/md0' mock_attr.return_value = 'inactive' self.assertTrue(mdadm.md_check_array_state_error(mdname)) @patch('curtin.block.mdadm.md_sysfs_attr') def test_md_check_array_state_error_false(self, mock_attr): mdname = '/dev/md0' mock_attr.return_value = 'active' self.assertFalse(mdadm.md_check_array_state_error(mdname)) def test_md_device_key_role(self): devname = '/dev/vda' rolekey = mdadm.md_device_key_role(devname) self.assertEqual('MD_DEVICE_vda_ROLE', rolekey) def test_md_device_key_role_no_dev(self): devname = None with self.assertRaises(ValueError): mdadm.md_device_key_role(devname) def test_md_device_key_dev(self): devname = '/dev/vda' devkey = mdadm.md_device_key_dev(devname) self.assertEqual('MD_DEVICE_vda_DEV', devkey) def test_md_device_key_dev_no_dev(self): devname = None with self.assertRaises(ValueError): mdadm.md_device_key_dev(devname) @patch('curtin.block.get_blockdev_for_partition') @patch('curtin.block.mdadm.os.path.exists') @patch('curtin.block.mdadm.os.listdir') def tests_md_get_spares_list(self, mock_listdir, mock_exists, mock_getbdev): mdname = '/dev/md0' devices = ['dev-vda', 'dev-vdb', 'dev-vdc'] states = ['in-sync', 'in-sync', 'spare'] mock_exists.return_value = True mock_listdir.return_value = devices self.mock_util.load_file.side_effect = states mock_getbdev.return_value = ('md0', None) sysfs_path = '/sys/class/block/md0/md/' expected_calls = [] for d in devices: expected_calls.append(call(os.path.join(sysfs_path, d, 'state'))) spares = mdadm.md_get_spares_list(mdname) self.mock_util.load_file.assert_has_calls(expected_calls) self.assertEqual(['/dev/vdc'], spares) @patch('curtin.block.get_blockdev_for_partition') @patch('curtin.block.mdadm.os.path.exists') def tests_md_get_spares_list_nomd(self, mock_exists, mock_getbdev): mdname = '/dev/md0' mock_exists.return_value = False mock_getbdev.return_value = ('md0', None) with self.assertRaises(OSError): mdadm.md_get_spares_list(mdname) @patch('curtin.block.get_blockdev_for_partition') @patch('curtin.block.mdadm.os.path.exists') @patch('curtin.block.mdadm.os.listdir') def tests_md_get_devices_list(self, mock_listdir, mock_exists, mock_getbdev): mdname = '/dev/md0' devices = ['dev-vda', 'dev-vdb', 'dev-vdc'] states = ['in-sync', 'in-sync', 'spare'] mock_exists.return_value = True mock_listdir.return_value = devices self.mock_util.load_file.side_effect = states mock_getbdev.return_value = ('md0', None) sysfs_path = '/sys/class/block/md0/md/' expected_calls = [] for d in devices: expected_calls.append(call(os.path.join(sysfs_path, d, 'state'))) devs = mdadm.md_get_devices_list(mdname) self.mock_util.load_file.assert_has_calls(expected_calls) self.assertEqual(sorted(['/dev/vda', '/dev/vdb']), sorted(devs)) @patch('curtin.block.get_blockdev_for_partition') @patch('curtin.block.mdadm.os.path.exists') def tests_md_get_devices_list_nomd(self, mock_exists, mock_getbdev): mdname = '/dev/md0' mock_exists.return_value = False mock_getbdev.return_value = ('md0', None) with self.assertRaises(OSError): mdadm.md_get_devices_list(mdname) @patch('curtin.block.mdadm.os') def test_md_check_array_uuid(self, mock_os): devname = '/dev/md0' md_uuid = '93a73e10:427f280b:b7076c02:204b8f7a' mock_os.path.realpath.return_value = devname rv = mdadm.md_check_array_uuid(devname, md_uuid) self.assertTrue(rv) @patch('curtin.block.mdadm.os') def test_md_check_array_uuid_mismatch(self, mock_os): devname = '/dev/md0' md_uuid = '93a73e10:427f280b:b7076c02:204b8f7a' mock_os.path.realpath.return_value = '/dev/md1' with self.assertRaises(ValueError): mdadm.md_check_array_uuid(devname, md_uuid) @patch('curtin.block.mdadm.mdadm_query_detail') def test_md_get_uuid(self, mock_query): mdname = '/dev/md0' md_uuid = '93a73e10:427f280b:b7076c02:204b8f7a' mock_query.return_value = {'MD_UUID': md_uuid} uuid = mdadm.md_get_uuid(mdname) self.assertEqual(md_uuid, uuid) @patch('curtin.block.mdadm.mdadm_query_detail') def test_md_get_uuid_dev_none(self, mock_query): mdname = None with self.assertRaises(ValueError): mdadm.md_get_uuid(mdname) def test_md_check_raid_level(self): for rl in mdadm.VALID_RAID_LEVELS: self.assertTrue(mdadm.md_check_raidlevel(rl)) def test_md_check_raid_level_bad(self): bogus = '27' self.assertTrue(bogus not in mdadm.VALID_RAID_LEVELS) with self.assertRaises(ValueError): mdadm.md_check_raidlevel(bogus) @patch('curtin.block.mdadm.md_sysfs_attr') def test_md_check_array_state(self, mock_attr): mdname = '/dev/md0' mock_attr.side_effect = [ 'clean', # array_state '0', # degraded 'idle', # sync_action ] self.assertTrue(mdadm.md_check_array_state(mdname)) @patch('curtin.block.mdadm.md_sysfs_attr') def test_md_check_array_state_norw(self, mock_attr): mdname = '/dev/md0' mock_attr.side_effect = [ 'suspended', # array_state '0', # degraded 'idle', # sync_action ] with self.assertRaises(ValueError): mdadm.md_check_array_state(mdname) @patch('curtin.block.mdadm.md_sysfs_attr') def test_md_check_array_state_degraded(self, mock_attr): mdname = '/dev/md0' mock_attr.side_effect = [ 'clean', # array_state '1', # degraded 'idle', # sync_action ] with self.assertRaises(ValueError): mdadm.md_check_array_state(mdname) @patch('curtin.block.mdadm.md_sysfs_attr') def test_md_check_array_state_degraded_empty(self, mock_attr): mdname = '/dev/md0' mock_attr.side_effect = [ 'clean', # array_state '', # unknown 'idle', # sync_action ] with self.assertRaises(ValueError): mdadm.md_check_array_state(mdname) @patch('curtin.block.mdadm.md_sysfs_attr') def test_md_check_array_state_sync(self, mock_attr): mdname = '/dev/md0' mock_attr.side_effect = [ 'clean', # array_state '0', # degraded 'recovery', # sync_action ] with self.assertRaises(ValueError): mdadm.md_check_array_state(mdname) @patch('curtin.block.mdadm.md_check_array_uuid') @patch('curtin.block.mdadm.md_get_uuid') def test_md_check_uuid(self, mock_guuid, mock_ckuuid): mdname = '/dev/md0' mock_guuid.return_value = '93a73e10:427f280b:b7076c02:204b8f7a' mock_ckuuid.return_value = True rv = mdadm.md_check_uuid(mdname) self.assertTrue(rv) @patch('curtin.block.mdadm.md_check_array_uuid') @patch('curtin.block.mdadm.md_get_uuid') def test_md_check_uuid_nouuid(self, mock_guuid, mock_ckuuid): mdname = '/dev/md0' mock_guuid.return_value = None with self.assertRaises(ValueError): mdadm.md_check_uuid(mdname) @patch('curtin.block.mdadm.md_get_devices_list') def test_md_check_devices(self, mock_devlist): mdname = '/dev/md0' devices = ['/dev/vdc', '/dev/vdd'] mock_devlist.return_value = devices rv = mdadm.md_check_devices(mdname, devices) self.assertEqual(rv, None) @patch('curtin.block.mdadm.md_get_devices_list') def test_md_check_devices_wrong_devs(self, mock_devlist): mdname = '/dev/md0' devices = ['/dev/vdc', '/dev/vdd'] mock_devlist.return_value = ['/dev/sda'] with self.assertRaises(ValueError): mdadm.md_check_devices(mdname, devices) def test_md_check_devices_no_devs(self): mdname = '/dev/md0' devices = [] with self.assertRaises(ValueError): mdadm.md_check_devices(mdname, devices) @patch('curtin.block.mdadm.md_get_spares_list') def test_md_check_spares(self, mock_devlist): mdname = '/dev/md0' spares = ['/dev/vdc', '/dev/vdd'] mock_devlist.return_value = spares rv = mdadm.md_check_spares(mdname, spares) self.assertEqual(rv, None) @patch('curtin.block.mdadm.md_get_spares_list') def test_md_check_spares_wrong_devs(self, mock_devlist): mdname = '/dev/md0' spares = ['/dev/vdc', '/dev/vdd'] mock_devlist.return_value = ['/dev/sda'] with self.assertRaises(ValueError): mdadm.md_check_spares(mdname, spares) @patch('curtin.block.mdadm.mdadm_examine') @patch('curtin.block.mdadm.mdadm_query_detail') @patch('curtin.block.mdadm.md_get_uuid') def test_md_check_array_membership(self, mock_uuid, mock_query, mock_examine): mdname = '/dev/md0' devices = ['/dev/vda', '/dev/vdb', '/dev/vdc', '/dev/vdd'] md_uuid = '93a73e10:427f280b:b7076c02:204b8f7a' md_dict = {'MD_UUID': md_uuid} mock_query.return_value = md_dict mock_uuid.return_value = md_uuid mock_examine.side_effect = [md_dict] * len(devices) expected_calls = [] for dev in devices: expected_calls.append(call(dev, export=False)) rv = mdadm.md_check_array_membership(mdname, devices) self.assertEqual(rv, None) mock_uuid.assert_has_calls([call(mdname)]) mock_examine.assert_has_calls(expected_calls) @patch('curtin.block.mdadm.mdadm_examine') @patch('curtin.block.mdadm.mdadm_query_detail') @patch('curtin.block.mdadm.md_get_uuid') def test_md_check_array_membership_bad_dev(self, mock_uuid, mock_query, mock_examine): mdname = '/dev/md0' devices = ['/dev/vda', '/dev/vdb', '/dev/vdc', '/dev/vdd'] md_uuid = '93a73e10:427f280b:b7076c02:204b8f7a' md_dict = {'MD_UUID': md_uuid} mock_query.return_value = md_dict mock_uuid.return_value = md_uuid mock_examine.side_effect = [ md_dict, {}, md_dict, md_dict, ] # one device isn't a member with self.assertRaises(ValueError): mdadm.md_check_array_membership(mdname, devices) @patch('curtin.block.mdadm.mdadm_examine') @patch('curtin.block.mdadm.mdadm_query_detail') @patch('curtin.block.mdadm.md_get_uuid') def test_md_check_array_membership_wrong_array(self, mock_uuid, mock_query, mock_examine): mdname = '/dev/md0' devices = ['/dev/vda', '/dev/vdb', '/dev/vdc', '/dev/vdd'] md_uuid = '93a73e10:427f280b:b7076c02:204b8f7a' md_dict = {'MD_UUID': '11111111:427f280b:b7076c02:204b8f7a'} mock_query.return_value = md_dict mock_uuid.return_value = md_uuid mock_examine.side_effect = [md_dict] * len(devices) with self.assertRaises(ValueError): mdadm.md_check_array_membership(mdname, devices) @patch('curtin.block.mdadm.md_check_array_membership') @patch('curtin.block.mdadm.md_check_spares') @patch('curtin.block.mdadm.md_check_devices') @patch('curtin.block.mdadm.md_check_uuid') @patch('curtin.block.mdadm.md_check_raidlevel') @patch('curtin.block.mdadm.md_check_array_state') def test_md_check_all_good(self, mock_array, mock_raid, mock_uuid, mock_dev, mock_spare, mock_member): md_devname = '/dev/md0' raidlevel = 1 devices = ['/dev/vda', '/dev/vdb'] spares = ['/dev/vdc'] mock_array.return_value = None mock_raid.return_value = None mock_uuid.return_value = None mock_dev.return_value = None mock_spare.return_value = None mock_member.return_value = None mdadm.md_check(md_devname, raidlevel, devices=devices, spares=spares) mock_array.assert_has_calls([call(md_devname)]) mock_raid.assert_has_calls([call(raidlevel)]) mock_uuid.assert_has_calls([call(md_devname)]) mock_dev.assert_has_calls([call(md_devname, devices)]) mock_spare.assert_has_calls([call(md_devname, spares)]) mock_member.assert_has_calls([call(md_devname, devices + spares)]) def test_md_check_all_good_devshort(self): md_devname = 'md0' raidlevel = 1 devices = ['/dev/vda', '/dev/vdb'] spares = ['/dev/vdc'] with self.assertRaises(ValueError): mdadm.md_check(md_devname, raidlevel, devices=devices, spares=spares) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/tests/unittests/test_block_mkfs.py0000644000000000000000000001503112741214565021261 0ustar 00000000000000from curtin.block import mkfs from unittest import TestCase import mock class TestBlockMkfs(TestCase): test_uuid = "fb26cc6c-ae73-11e5-9e38-2fb63f0c3155" def _get_config(self, fstype): return {"fstype": fstype, "type": "format", "id": "testfmt", "volume": "null", "label": "format1", "uuid": self.test_uuid} def _assert_same_flags(self, call, expected): print("call:\n{}".format(call)) print("expected:\n{}".format(expected)) for flag in expected: if type(flag) == list: flag_name = flag[0] flag_val = flag[1] self.assertIn(flag_name, call) flag_index = call.index(flag_name) self.assertTrue(len(call) > flag_index) self.assertEquals(call[flag_index + 1], flag_val) call.remove(flag_name) call.remove(flag_val) else: self.assertIn(flag, call) call.remove(flag) # Only remaining vals in call should be mkfs.fstype and dev path self.assertEquals(len(call), 2) @mock.patch("curtin.block.mkfs.block") @mock.patch("curtin.block.mkfs.os") @mock.patch("curtin.block.mkfs.util") def _run_mkfs_with_config(self, config, expected_cmd, expected_flags, mock_util, mock_os, mock_block, release="wily", strict=False): # Pretend we are on wily as there are no known edge cases for it mock_util.lsb_release.return_value = {"codename": release} mock_os.path.exists.return_value = True mock_block.get_blockdev_sector_size.return_value = (512, 512) mkfs.mkfs_from_config("/dev/null", config, strict=strict) self.assertTrue(mock_util.subp.called) calls = mock_util.subp.call_args_list self.assertEquals(len(calls), 1) # Get first function call, tuple of first positional arg and its # (nonexistant) keyword arg, and unpack to get cmd call = calls[0][0][0] self.assertEquals(call[0], expected_cmd) self._assert_same_flags(call, expected_flags) def test_mkfs_ext(self): conf = self._get_config("ext4") expected_flags = [["-L", "format1"], "-F", ["-U", self.test_uuid]] self._run_mkfs_with_config(conf, "mkfs.ext4", expected_flags) def test_mkfs_btrfs(self): conf = self._get_config("btrfs") expected_flags = [["--label", "format1"], "--force", ["--uuid", self.test_uuid]] self._run_mkfs_with_config(conf, "mkfs.btrfs", expected_flags) def test_mkfs_btrfs_on_precise(self): # Test precise+btrfs where there is no force or uuid conf = self._get_config("btrfs") expected_flags = [["--label", "format1"]] self._run_mkfs_with_config(conf, "mkfs.btrfs", expected_flags, release="precise") def test_mkfs_btrfs_on_trusty(self): # Test trusty btrfs where there is no uuid conf = self._get_config("btrfs") expected_flags = [["--label", "format1"], "--force"] self._run_mkfs_with_config(conf, "mkfs.btrfs", expected_flags, release="trusty") def test_mkfs_fat(self): conf = self._get_config("fat32") expected_flags = [["-n", "format1"], ["-F", "32"]] self._run_mkfs_with_config(conf, "mkfs.vfat", expected_flags) def test_mkfs_vfat(self): """Ensure we can use vfat without fatsize""" conf = self._get_config("vfat") expected_flags = [["-n", "format1"], ] self._run_mkfs_with_config(conf, "mkfs.vfat", expected_flags) def test_mkfs_invalid_fstype(self): """Do not proceed if fstype is None or invalid""" with self.assertRaises(ValueError): conf = self._get_config(None) self._run_mkfs_with_config(conf, "mkfs.ext4", []) with self.assertRaises(ValueError): conf = self._get_config("fakefilesystemtype") self._run_mkfs_with_config(conf, "mkfs.ext3", []) def test_mkfs_invalid_label(self): """Do not proceed if filesystem label is too long""" with self.assertRaises(ValueError): conf = self._get_config("ext4") conf['label'] = "thislabelislongerthan16chars" self._run_mkfs_with_config(conf, "mkfs.ext4", [], strict=True) conf = self._get_config("swap") expected_flags = ["--force", ["--label", "abcdefghijklmno"], ["--uuid", conf['uuid']]] conf['label'] = "abcdefghijklmnop" # 16 chars, 15 is max # Raise error, do not truncate with strict = True with self.assertRaises(ValueError): self._run_mkfs_with_config(conf, "mkswap", expected_flags, strict=True) # Do not raise with strict = False self._run_mkfs_with_config(conf, "mkswap", expected_flags) @mock.patch("curtin.block.mkfs.block") @mock.patch("curtin.block.mkfs.util") @mock.patch("curtin.block.mkfs.os") def test_mkfs_kwargs(self, mock_os, mock_util, mock_block): """Ensure that kwargs are being followed""" mock_block.get_blockdev_sector_size.return_value = (512, 512) mkfs.mkfs("/dev/null", "ext4", [], uuid=self.test_uuid, label="testlabel", force=True) expected_flags = ["-F", ["-L", "testlabel"], ["-U", self.test_uuid]] calls = mock_util.subp.call_args_list self.assertEquals(len(calls), 1) call = calls[0][0][0] self.assertEquals(call[0], "mkfs.ext4") self._assert_same_flags(call, expected_flags) @mock.patch("curtin.block.mkfs.os") def test_mkfs_invalid_block_device(self, mock_os): """Do not proceed if block device is none or is not valid block dev""" with self.assertRaises(ValueError): mock_os.path.exists.return_value = False mkfs.mkfs("/dev/null", "ext4") with self.assertRaises(ValueError): mock_os.path.exists.return_value = True mkfs.mkfs(None, "ext4") @mock.patch("curtin.block.mkfs.block") @mock.patch("curtin.block.mkfs.util") @mock.patch("curtin.block.mkfs.os") def test_mkfs_generates_uuid(self, mock_os, mock_util, mock_block): """Ensure that block.mkfs generates and returns a uuid if None is provided""" mock_block.get_blockdev_sector_size.return_value = (512, 512) uuid = mkfs.mkfs("/dev/null", "ext4") self.assertIsNotNone(uuid) curtin-0.1.0~bzr399/tests/unittests/test_config.py0000644000000000000000000001034012741214565020412 0ustar 00000000000000from unittest import TestCase import copy import json import textwrap from curtin import config class TestMerge(TestCase): def test_merge_cfg_string(self): d1 = {'str1': 'str_one'} d2 = {'dict1': {'d1.e1': 'd1-e1'}} expected = {'str1': 'str_one', 'dict1': {'d1.e1': 'd1-e1'}} config.merge_config(d1, d2) self.assertEqual(d1, expected) class TestCmdArg2Cfg(TestCase): def test_cmdarg_flat(self): self.assertEqual(config.cmdarg2cfg("foo=bar"), {'foo': 'bar'}) def test_dict_dict(self): self.assertEqual(config.cmdarg2cfg("foo/v1/v2=bar"), {'foo': {'v1': {'v2': 'bar'}}}) def test_no_equal_raises_value_error(self): self.assertRaises(ValueError, config.cmdarg2cfg, "foo/v1/v2"), def test_json(self): self.assertEqual( config.cmdarg2cfg('json:foo/bar=["a", "b", "c"]', delim="/"), {'foo': {'bar': ['a', 'b', 'c']}}) def test_cmdarg_multiple_equal(self): self.assertEqual( config.cmdarg2cfg("key=mykey=value"), {"key": "mykey=value"}) def test_with_merge_cmdarg(self): cfg1 = {'foo': {'key1': 'val1', 'mylist': [1, 2]}, 'f': 'fval'} cfg2 = {'foo': {'key2': 'val2', 'mylist2': ['a', 'b']}, 'g': 'gval'} via_merge = copy.deepcopy(cfg1) config.merge_config(via_merge, cfg2) via_merge_cmdarg = copy.deepcopy(cfg1) config.merge_cmdarg(via_merge_cmdarg, 'json:=' + json.dumps(cfg2)) self.assertEqual(via_merge, via_merge_cmdarg) class TestConfigArchive(TestCase): def test_archive_dict(self): myarchive = _replace_consts(textwrap.dedent(""" _ARCH_HEAD_ - type: _CONF_TYPE_ content: | key1: val1 key2: val2 - content: | _CONF_HEAD_ key1: override_val1 """)) ret = config.load_config_archive(myarchive) self.assertEqual(ret, {'key1': 'override_val1', 'key2': 'val2'}) def test_archive_string(self): myarchive = _replace_consts(textwrap.dedent(""" _ARCH_HEAD_ - | _CONF_HEAD_ key1: val1 key2: val2 - | _CONF_HEAD_ key1: override_val1 """)) ret = config.load_config_archive(myarchive) self.assertEqual(ret, {'key1': 'override_val1', 'key2': 'val2'}) def test_archive_mixed_dict_string(self): myarchive = _replace_consts(textwrap.dedent(""" _ARCH_HEAD_ - type: _CONF_TYPE_ content: | key1: val1 key2: val2 - | _CONF_HEAD_ key1: override_val1 """)) ret = config.load_config_archive(myarchive) self.assertEqual(ret, {'key1': 'override_val1', 'key2': 'val2'}) def test_recursive_string(self): myarchive = _replace_consts(textwrap.dedent(""" _ARCH_HEAD_ - | _ARCH_HEAD_ - | _CONF_HEAD_ key1: val1 key2: val2 - | _ARCH_HEAD_ - | _CONF_HEAD_ key1: override_val1 """)) ret = config.load_config_archive(myarchive) self.assertEqual(ret, {'key1': 'override_val1', 'key2': 'val2'}) def test_recursive_dict(self): myarchive = _replace_consts(textwrap.dedent(""" _ARCH_HEAD_ - type: _CONF_TYPE_ content: | key1: val1 key2: val2 - content: | _ARCH_HEAD_ - | _CONF_HEAD_ key1: override_val1 """)) ret = config.load_config_archive(myarchive) self.assertEqual(ret, {'key1': 'override_val1', 'key2': 'val2'}) def _replace_consts(cfgstr): repls = {'_ARCH_HEAD_': config.ARCHIVE_HEADER, '_ARCH_TYPE_': config.ARCHIVE_TYPE, '_CONF_HEAD_': config.CONFIG_HEADER, '_CONF_TYPE_': config.CONFIG_TYPE} for k, v in repls.items(): cfgstr = cfgstr.replace(k, v) return cfgstr # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/tests/unittests/test_feature.py0000644000000000000000000000061012741214565020577 0ustar 00000000000000from unittest import TestCase import curtin class TestExportsFeatures(TestCase): def test_has_storage_v1(self): self.assertIn('STORAGE_CONFIG_V1', curtin.FEATURES) def test_has_network_v1(self): self.assertIn('NETWORK_CONFIG_V1', curtin.FEATURES) def test_has_reporting_events_webhook(self): self.assertIn('REPORTING_EVENTS_WEBHOOK', curtin.FEATURES) curtin-0.1.0~bzr399/tests/unittests/test_net.py0000644000000000000000000004737612741214565017756 0ustar 00000000000000from unittest import TestCase import os import shutil import tempfile import yaml from curtin import net import curtin.net.network_state as network_state from textwrap import dedent class TestNetParserData(TestCase): def test_parse_deb_config_data_ignores_comments(self): contents = dedent("""\ # ignore # iface eth0 inet static # address 192.168.1.1 """) ifaces = {} net.parse_deb_config_data(ifaces, contents, '', '') self.assertEqual({}, ifaces) def test_parse_deb_config_data_basic(self): contents = dedent("""\ iface eth0 inet static address 192.168.1.2 netmask 255.255.255.0 hwaddress aa:bb:cc:dd:ee:ff """) ifaces = {} net.parse_deb_config_data( ifaces, contents, '', '/etc/network/interfaces') self.assertEqual({ 'eth0': { 'auto': False, 'family': 'inet', 'method': 'static', 'address': '192.168.1.2', 'netmask': '255.255.255.0', 'hwaddress': 'aa:bb:cc:dd:ee:ff', '_source_path': '/etc/network/interfaces', }, }, ifaces) def test_parse_deb_config_data_auto(self): contents = dedent("""\ auto eth0 eth1 iface eth0 inet manual iface eth1 inet manual """) ifaces = {} net.parse_deb_config_data( ifaces, contents, '', '/etc/network/interfaces') self.assertEqual({ 'eth0': { 'auto': True, 'control': 'auto', 'family': 'inet', 'method': 'manual', '_source_path': '/etc/network/interfaces', }, 'eth1': { 'auto': True, 'family': 'inet', 'control': 'auto', 'method': 'manual', '_source_path': '/etc/network/interfaces', }, }, ifaces) def test_parse_deb_config_data_commands(self): contents = dedent("""\ iface eth0 inet manual pre-up preup1 pre-up preup2 up up1 post-up postup1 pre-down predown1 down down1 down down2 post-down postdown1 """) ifaces = {} net.parse_deb_config_data( ifaces, contents, '', '/etc/network/interfaces') self.assertEqual({ 'eth0': { 'auto': False, 'family': 'inet', 'method': 'manual', 'pre-up': ['preup1', 'preup2'], 'up': ['up1'], 'post-up': ['postup1'], 'pre-down': ['predown1'], 'down': ['down1', 'down2'], 'post-down': ['postdown1'], '_source_path': '/etc/network/interfaces', }, }, ifaces) def test_parse_deb_config_data_dns(self): contents = dedent("""\ iface eth0 inet static dns-nameservers 192.168.1.1 192.168.1.2 dns-search curtin local """) ifaces = {} net.parse_deb_config_data( ifaces, contents, '', '/etc/network/interfaces') self.assertEqual({ 'eth0': { 'auto': False, 'family': 'inet', 'method': 'static', 'dns': { 'nameservers': ['192.168.1.1', '192.168.1.2'], 'search': ['curtin', 'local'], }, '_source_path': '/etc/network/interfaces', }, }, ifaces) def test_parse_deb_config_data_bridge(self): contents = dedent("""\ iface eth0 inet manual iface eth1 inet manual iface br0 inet static address 192.168.1.1 netmask 255.255.255.0 bridge_maxwait 30 bridge_ports eth0 eth1 bridge_pathcost eth0 1 bridge_pathcost eth1 2 bridge_portprio eth0 0 bridge_portprio eth1 1 """) ifaces = {} net.parse_deb_config_data( ifaces, contents, '', '/etc/network/interfaces') self.assertEqual({ 'eth0': { 'auto': False, 'family': 'inet', 'method': 'manual', '_source_path': '/etc/network/interfaces', }, 'eth1': { 'auto': False, 'family': 'inet', 'method': 'manual', '_source_path': '/etc/network/interfaces', }, 'br0': { 'auto': False, 'family': 'inet', 'method': 'static', 'address': '192.168.1.1', 'netmask': '255.255.255.0', 'bridge': { 'maxwait': '30', 'ports': ['eth0', 'eth1'], 'pathcost': { 'eth0': '1', 'eth1': '2', }, 'portprio': { 'eth0': '0', 'eth1': '1' }, }, '_source_path': '/etc/network/interfaces', }, }, ifaces) def test_parse_deb_config_data_bond(self): contents = dedent("""\ iface eth0 inet manual bond-master bond0 bond-primary eth0 bond-mode active-backup iface eth1 inet manual bond-master bond0 bond-primary eth0 bond-mode active-backup iface bond0 inet static address 192.168.1.1 netmask 255.255.255.0 bond-slaves none bond-primary eth0 bond-mode active-backup bond-miimon 100 """) ifaces = {} net.parse_deb_config_data( ifaces, contents, '', '/etc/network/interfaces') self.assertEqual({ 'eth0': { 'auto': False, 'family': 'inet', 'method': 'manual', 'bond': { 'master': 'bond0', 'primary': 'eth0', 'mode': 'active-backup', }, '_source_path': '/etc/network/interfaces', }, 'eth1': { 'auto': False, 'family': 'inet', 'method': 'manual', 'bond': { 'master': 'bond0', 'primary': 'eth0', 'mode': 'active-backup', }, '_source_path': '/etc/network/interfaces', }, 'bond0': { 'auto': False, 'family': 'inet', 'method': 'static', 'address': '192.168.1.1', 'netmask': '255.255.255.0', 'bond': { 'slaves': 'none', 'primary': 'eth0', 'mode': 'active-backup', 'miimon': '100', }, '_source_path': '/etc/network/interfaces', }, }, ifaces) class TestNetParser(TestCase): def setUp(self): self.target = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.target) def make_config(self, path=None, name=None, contents=None, parse=True): if path is None: path = self.target if name is None: name = 'interfaces' path = os.path.join(path, name) if contents is None: contents = dedent("""\ auto eth0 iface eth0 inet static address 192.168.1.2 netmask 255.255.255.0 hwaddress aa:bb:cc:dd:ee:ff """) with open(path, 'w') as stream: stream.write(contents) ifaces = None if parse: ifaces = {} net.parse_deb_config_data(ifaces, contents, '', path) return path, ifaces def test_parse_deb_config(self): path, data = self.make_config() observed = net.parse_deb_config(path) self.assertEqual(data, observed) def test_parse_deb_config_source(self): path, data = self.make_config(name='interfaces2') contents = dedent("""\ source interfaces2 iface eth1 inet manual """) i_path, _ = self.make_config( contents=contents, parse=False) data['eth1'] = { 'auto': False, 'family': 'inet', 'method': 'manual', '_source_path': i_path, } observed = net.parse_deb_config(i_path) self.assertEqual(data, observed) def test_parse_deb_config_source_with_glob(self): path, data = self.make_config(name='eth0') contents = dedent("""\ source eth* iface eth1 inet manual """) i_path, _ = self.make_config( contents=contents, parse=False) data['eth1'] = { 'auto': False, 'family': 'inet', 'method': 'manual', '_source_path': i_path, } observed = net.parse_deb_config(i_path) self.assertEqual(data, observed) def test_parse_deb_config_source_dir(self): subdir = os.path.join(self.target, 'interfaces.d') os.mkdir(subdir) path, data = self.make_config( path=subdir, name='interfaces2') contents = dedent("""\ source-directory interfaces.d source interfaces2 iface eth1 inet manual """) i_path, _ = self.make_config( contents=contents, parse=False) data['eth1'] = { 'auto': False, 'family': 'inet', 'method': 'manual', '_source_path': i_path, } observed = net.parse_deb_config(i_path) self.assertEqual(data, observed) def test_parse_deb_config_source_dir_glob(self): subdir = os.path.join(self.target, 'interfaces0.d') os.mkdir(subdir) self.make_config( path=subdir, name='eth0', contents="iface eth0 inet manual") self.make_config( path=subdir, name='eth1', contents="iface eth1 inet manual") subdir2 = os.path.join(self.target, 'interfaces1.d') os.mkdir(subdir2) self.make_config( path=subdir2, name='eth2', contents="iface eth2 inet manual") self.make_config( path=subdir2, name='eth3', contents="iface eth3 inet manual") contents = dedent("""\ source-directory interfaces*.d """) i_path, _ = self.make_config( contents=contents, parse=False) data = { 'eth0': { 'auto': False, 'family': 'inet', 'method': 'manual', '_source_path': os.path.join(subdir, "eth0"), }, 'eth1': { 'auto': False, 'family': 'inet', 'method': 'manual', '_source_path': os.path.join(subdir, "eth1"), }, 'eth2': { 'auto': False, 'family': 'inet', 'method': 'manual', '_source_path': os.path.join(subdir2, "eth2"), }, 'eth3': { 'auto': False, 'family': 'inet', 'method': 'manual', '_source_path': os.path.join(subdir2, "eth3"), }, } observed = net.parse_deb_config(i_path) self.assertEqual(data, observed) def test_parse_deb_config_source_dir_glob_ignores_none_matching(self): subdir = os.path.join(self.target, 'interfaces0.d') os.mkdir(subdir) self.make_config( path=subdir, name='.eth0', contents="iface eth0 inet manual") contents = dedent("""\ source-directory interfaces*.d """) i_path, _ = self.make_config( contents=contents, parse=False) observed = net.parse_deb_config(i_path) self.assertEqual({}, observed) class TestNetConfig(TestCase): def setUp(self): self.target = tempfile.mkdtemp() self.config_f = os.path.join(self.target, 'config') self.config = ''' # YAML example of a simple network config network: version: 1 config: # Physical interfaces. - type: physical name: eth0 mac_address: "c0:d6:9f:2c:e8:80" subnets: - type: dhcp4 - type: static address: 192.168.21.3/24 dns_nameservers: - 8.8.8.8 - 8.8.4.4 dns_search: barley.maas sach.maas - type: physical name: eth1 mac_address: "cf:d6:af:48:e8:80" - type: nameserver address: - 1.2.3.4 - 5.6.7.8 search: - wark.maas ''' with open(self.config_f, 'w') as fp: fp.write(self.config) def get_net_config(self, config=None): if config is None: config = self.config cfg = yaml.safe_load(config) return cfg.get('network') def get_net_state(self, config=None): net_cfg = self.get_net_config(config) version = net_cfg.get('version') config = net_cfg.get('config') ns = network_state.NetworkState(version=version, config=config) ns.parse_config() return ns def tearDown(self): shutil.rmtree(self.target) def test_parse_net_config_data(self): ns = self.get_net_state() net_state_from_cls = ns.network_state net_state_from_fn = net.parse_net_config_data(self.get_net_config()) self.assertEqual(net_state_from_cls, net_state_from_fn) def test_parse_net_config(self): ns = self.get_net_state() net_state_from_cls = ns.network_state net_state_from_fn = net.parse_net_config(self.config_f) self.assertEqual(net_state_from_cls, net_state_from_fn) def test_render_persistent_net(self): ns = self.get_net_state() udev_rules = ('# Autogenerated by curtin\n' 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ' 'ATTR{address}=="cf:d6:af:48:e8:80", NAME="eth1"\n' 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ' 'ATTR{address}=="c0:d6:9f:2c:e8:80", NAME="eth0"\n') persist_net_rules = net.render_persistent_net(ns.network_state) self.assertEqual(sorted(udev_rules.split('\n')), sorted(persist_net_rules.split('\n'))) def test_render_interfaces(self): ns = self.get_net_state() ifaces = dedent("""\ auto lo iface lo inet loopback dns-nameservers 1.2.3.4 5.6.7.8 dns-search wark.maas auto eth0 iface eth0 inet dhcp post-up ifup eth0:1 auto eth0:1 iface eth0:1 inet static address 192.168.21.3/24 dns-nameservers 8.8.8.8 8.8.4.4 dns-search barley.maas sach.maas iface eth1 inet manual source /etc/network/interfaces.d/*.cfg """) net_ifaces = net.render_interfaces(ns.network_state) print(ns.network_state.get('interfaces')) self.assertEqual(sorted(ifaces.split('\n')), sorted(net_ifaces.split('\n'))) def test_render_interfaces_bonds(self): bond_config = open('examples/tests/bonding_network.yaml', 'r').read() ns = self.get_net_state(bond_config) ifaces = dedent("""\ auto lo iface lo inet loopback auto interface0 iface interface0 inet dhcp auto interface1 iface interface1 inet manual bond-mode active-backup bond-master bond0 auto interface2 iface interface2 inet manual bond-mode active-backup bond-master bond0 auto bond0 iface bond0 inet static address 10.23.23.2/24 bond-mode active-backup hwaddress 52:54:00:12:34:06 bond-slaves none post-up ifup bond0:1 auto bond0:1 iface bond0:1 inet static address 10.23.24.2/24 source /etc/network/interfaces.d/*.cfg """) net_ifaces = net.render_interfaces(ns.network_state) print("\n".join(list(map(str, enumerate(sorted(ifaces.split('\n'))))))) print("\n^^ LOCAL -- RENDER vv") print("\n".join(list(map(str, enumerate(sorted(net_ifaces.split('\n'))))))) print(ns.network_state.get('interfaces')) self.assertEqual(sorted(ifaces.split('\n')), sorted(net_ifaces.split('\n'))) def test_render_interfaces_iscsiroot(self): iscsi_config = open('examples/network-iscsiroot.yaml', 'r').read() ns = self.get_net_state(iscsi_config) ifaces = dedent("""\ auto lo iface lo inet loopback # control-manual interface0 iface interface0 inet dhcp auto interface1 iface interface1 inet static address 192.168.14.2/24 gateway 192.168.14.1 mtu 1492 post-up ifup interface1:1 auto interface1:1 iface interface1:1 inet static address 192.168.14.4/24 allow-hotplug interface2 iface interface2 inet static gateway 10.11.12.1 address 10.11.12.13/22 source /etc/network/interfaces.d/*.cfg """) net_ifaces = net.render_interfaces(ns.network_state) print("\n".join(sorted(ifaces.split('\n')))) print("\n^^ LOCAL -- RENDER vv") print("\n".join(sorted(net_ifaces.split('\n')))) print(ns.network_state.get('interfaces')) self.assertEqual(sorted(ifaces.split('\n')), sorted(net_ifaces.split('\n'))) def test_render_interfaces_ipv6_aliases(self): ipv6_aliases_config = ''' # YAML example of a simple network config network: version: 1 config: # Physical interfaces. - type: physical name: eth0 mac_address: "c0:d6:9f:2c:e8:80" subnets: - type: static address: fde9:8f83:4a81:1:0:1:0:6/64 - type: static address: 192.168.0.1/24 ''' ns = self.get_net_state(ipv6_aliases_config) ifaces = dedent("""\ auto lo iface lo inet loopback auto eth0 iface eth0 inet6 static address fde9:8f83:4a81:1:0:1:0:6/64 post-up ifup eth0:1 auto eth0:1 iface eth0:1 inet static address 192.168.0.1/24 source /etc/network/interfaces.d/*.cfg """) net_ifaces = net.render_interfaces(ns.network_state) print("\n".join(sorted(ifaces.split('\n')))) print("\n^^ LOCAL -- RENDER vv") print("\n".join(sorted(net_ifaces.split('\n')))) print(ns.network_state.get('interfaces')) self.assertEqual(sorted(ifaces.split('\n')), sorted(net_ifaces.split('\n'))) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/tests/unittests/test_partitioning.py0000644000000000000000000003705612741214565021671 0ustar 00000000000000import unittest import mock import curtin.commands.block_meta from sys import version_info if version_info.major == 2: import __builtin__ as builtins else: import builtins parted = None # FIXME: remove these tests entirely. This is here for flake8 @unittest.skip class TestBlock(unittest.TestCase): storage_config = { "sda": {"id": "sda", "type": "disk", "ptable": "msdos", "serial": "DISK_1", "grub_device": "True"}, "sdb": {"id": "sdb", "type": "disk", "ptable": "msdos"}, "sda1": {"id": "sda1", "type": "partition", "number": 1, "size": "8GB", "device": "sda", "flag": "boot"}, "sda2": {"id": "sda2", "type": "partition", "number": 2, "size": "1GB", "device": "sda"}, "sda3": {"id": "sda3", "type": "partition", "number": 3, "size": "2GB", "device": "sda"}, "volgroup1": {"id": "volgroup1", "type": "lvm_volgroup", "devices": ["sda3"], "name": "lvm_vg1"}, "lvm_part1": {"id": "lvm_part1", "type": "lvm_partition", "volgroup": "volgroup1", "size": "1G", "name": "lvm_p1"}, "lvm_part2": {"id": "lvm_part2", "type": "lvm_partition", "volgroup": "volgroup1", "name": "lvm_p2"}, "bcache0": {"id": "bcache0", "type": "bcache", "backing_device": "lvm_part1", "cache_device": "sdc1"}, "crypt0": {"id": "crypt0", "type": "dm_crypt", "volume": "sdb1", "key": "testkey"}, "raiddev": {"id": "raiddev", "type": "raid", "raidlevel": 1, "devices": ["sdx1", "sdy1"], "spare_devices": ["sdz1"], "name": "md0"}, "fake0": {"id": "fake0", "type": "faketype"}, "sda1_root": {"id": "sda1_root", "type": "format", "fstype": "ext4", "volume": "sda1", "label": "root_part"}, "sda2_home": {"id": "sda2_home", "type": "format", "fstype": "fat32", "volume": "sda2"}, "raid_format": {"id": "raid_format", "type": "format", "fstype": "ext4", "volume": "raiddev"}, "sda1_mount": {"id": "sda1_mount", "type": "mount", "path": "/", "device": "sda1_root"}, "sda2_mount": {"id": "sda2_mount", "type": "mount", "path": "/home", "device": "sda2_home"}, "raid_mount": {"id": "raid_mount", "type": "mount", "path": "/srv/data", "device": "raid_format"} } @mock.patch("curtin.commands.block_meta.devsync") @mock.patch("curtin.commands.block_meta.glob") @mock.patch("curtin.commands.block_meta.block") @mock.patch("curtin.commands.block_meta.parted") def test_get_path_to_storage_volume(self, mock_parted, mock_block, mock_glob, mock_devsync): # Test disk mock_block.lookup_disk.side_effect = \ lambda x: "/dev/fake/serial-%s" % x path = curtin.commands.block_meta.get_path_to_storage_volume( "sda", self.storage_config) self.assertEqual(path, "/dev/fake/serial-DISK_1") mock_devsync.assert_called_with("/dev/fake/serial-DISK_1") # Test partition path = curtin.commands.block_meta.get_path_to_storage_volume( "sda1", self.storage_config) mock_parted.getDevice.assert_called_with("/dev/fake/serial-DISK_1") self.assertTrue(mock_parted.newDisk.called) mock_devsync.assert_called_with("/dev/fake/serial-DISK_1") # Test lvm partition path = curtin.commands.block_meta.get_path_to_storage_volume( "lvm_part1", self.storage_config) self.assertEqual(path, "/dev/lvm_vg1/lvm_p1") mock_devsync.assert_called_with("/dev/lvm_vg1/lvm_p1") # Test dmcrypt path = curtin.commands.block_meta.get_path_to_storage_volume( "crypt0", self.storage_config) self.assertEqual(path, "/dev/mapper/crypt0") mock_devsync.assert_called_with("/dev/mapper/crypt0") # Test raid path = curtin.commands.block_meta.get_path_to_storage_volume( "raiddev", self.storage_config) self.assertEqual(path, "/dev/md0") mock_devsync.assert_called_with("/dev/md0") # Test bcache mock_glob.glob.return_value = ["/sys/block/bcache1/slaves/hd0", "/sys/block/bcache0/slaves/lvm_p1"] path = curtin.commands.block_meta.get_path_to_storage_volume( "bcache0", self.storage_config) self.assertEqual(path, "/dev/bcache0") with self.assertRaises(NotImplementedError): curtin.commands.block_meta.get_path_to_storage_volume( "fake0", self.storage_config) @mock.patch("curtin.commands.block_meta.get_path_to_storage_volume") @mock.patch("curtin.commands.block_meta.parted") def test_disk_handler(self, mock_parted, mock_get_path_to_storage_volume): disk_path = "/dev/sda" mock_get_path_to_storage_volume.return_value = disk_path curtin.commands.block_meta.disk_handler(self.storage_config.get("sda"), self.storage_config) self.assertTrue(mock_get_path_to_storage_volume.called) mock_parted.getDevice.assert_called_with(disk_path) mock_parted.freshDisk.assert_called_with( mock_parted.getDevice(), "msdos") @mock.patch("curtin.commands.block_meta.time") @mock.patch("curtin.commands.block_meta.os.path") @mock.patch("curtin.commands.block_meta.util") @mock.patch("curtin.commands.block_meta.parted") @mock.patch("curtin.commands.block_meta.get_path_to_storage_volume") def test_partition_handler(self, mock_get_path_to_storage_volume, mock_parted, mock_util, mock_path, mock_time): mock_path.exists.return_value = True mock_get_path_to_storage_volume.return_value = "/dev/fake" mock_parted.sizeToSectors.return_value = parted.sizeToSectors(8, "GB", 512) curtin.commands.block_meta.partition_handler( self.storage_config.get("sda1"), self.storage_config) mock_get_path_to_storage_volume.assert_called_with( "sda", self.storage_config) mock_parted.getDevice.assert_called_with( mock_get_path_to_storage_volume.return_value) self.assertTrue(mock_parted.newDisk.called) mock_parted.Geometry.assert_called_with( device=mock_parted.newDisk().device, start=2048, length=mock_parted.sizeToSectors.return_value) mock_parted.Partition().setFlag.assert_called_with( mock_parted.PARTITION_BOOT) curtin.commands.block_meta.partition_handler( self.storage_config.get("sda2"), self.storage_config) self.assertEqual(mock_parted.Partition().setFlag.call_count, 1) with self.assertRaises(ValueError): curtin.commands.block_meta.partition_handler({}, self.storage_config) @mock.patch("curtin.commands.block_meta.get_path_to_storage_volume") @mock.patch("curtin.commands.block_meta.util") def test_format_handler(self, mock_util, mock_get_path_to_storage_volume): mock_get_path_to_storage_volume.return_value = "/dev/fake0" curtin.commands.block_meta.format_handler( self.storage_config.get("sda1_root"), self.storage_config) mock_util.subp.assert_called_with( ["mkfs.ext4", "-q", "-L", "root_part", mock_get_path_to_storage_volume.return_value]) curtin.commands.block_meta.format_handler( self.storage_config.get("sda2_home"), self.storage_config) mock_util.subp.assert_called_with( ["mkfs.fat", "-F", "32", mock_get_path_to_storage_volume.return_value]) curtin.commands.block_meta.format_handler( {"type": "format", "fstype": "invalid", "volume": "fake", "id": "fake1"}, self.storage_config) args = mock_util.subp.call_args_list self.assertTrue(mock.call(["which", "mkfs.invalid"]) in args) @mock.patch.object(builtins, "open") @mock.patch("curtin.commands.block_meta.block") @mock.patch("curtin.commands.block_meta.get_path_to_storage_volume") @mock.patch("curtin.commands.block_meta.util") def test_mount_handler(self, mock_util, mock_get_path_to_storage_volume, mock_block, mock_open): mock_util.load_command_environment.return_value = {"fstab": "/tmp/dir/fstab", "target": "/tmp/mntdir"} mock_get_path_to_storage_volume.return_value = "/dev/fake0" mock_block.get_volume_uuid.return_value = "UUID123" curtin.commands.block_meta.mount_handler( self.storage_config.get("sda2_mount"), self.storage_config) mock_util.ensure_dir.assert_called_with("/tmp/mntdir/home") mock_open.assert_called_with("/tmp/dir/fstab", "a") mock_util.subp.assert_called_with(["mount", "/dev/fake0", "/tmp/mntdir/home"]) args = mock_get_path_to_storage_volume.call_args_list self.assertTrue(len(args) == 1) self.assertTrue(args[0] == mock.call("sda2", self.storage_config)) mock_block.get_volume_uuid.assert_called_with("/dev/fake0") curtin.commands.block_meta.mount_handler( self.storage_config.get("raid_mount"), self.storage_config) mock_util.ensure_dir.assert_called_with("/tmp/mntdir/srv/data") args = mock_get_path_to_storage_volume.call_args_list self.assertTrue(len(args) == 3) self.assertTrue(args[2] == mock.call("raiddev", self.storage_config)) @mock.patch("curtin.commands.block_meta.get_path_to_storage_volume") @mock.patch("curtin.commands.block_meta.util") def test_lvm_volgroup_handler(self, mock_util, mock_get_path_to_storage_volume): mock_get_path_to_storage_volume.return_value = "/dev/fake0" curtin.commands.block_meta.lvm_volgroup_handler( self.storage_config.get("volgroup1"), self.storage_config) mock_util.subp.assert_called_with( ["vgcreate", "lvm_vg1", mock_get_path_to_storage_volume.return_value]) @mock.patch("curtin.commands.block_meta.util") def test_lvm_partition_handler(self, mock_util): base_cmd = ["lvcreate", "lvm_vg1", "-n"] curtin.commands.block_meta.lvm_partition_handler( self.storage_config.get("lvm_part1"), self.storage_config) mock_util.subp.assert_called_with(base_cmd + ["lvm_p1", "-L", "1G"]) curtin.commands.block_meta.lvm_partition_handler( self.storage_config.get("lvm_part2"), self.storage_config) mock_util.subp.assert_called_with(base_cmd + ["lvm_p2", "-l", "100%FREE"]) @mock.patch("curtin.commands.block_meta.block") @mock.patch("curtin.commands.block_meta.os.remove") @mock.patch("curtin.commands.block_meta.get_path_to_storage_volume") @mock.patch("curtin.commands.block_meta.tempfile") @mock.patch.object(builtins, "open") @mock.patch("curtin.commands.block_meta.util") def test_dm_crypt_handler(self, mock_util, mock_open, mock_tempfile, mock_get_path_to_storage_volume, mock_remove, mock_block): tmp_path = "/tmp/tmpfile1" mock_util.load_command_environment.return_value = {"fstab": "/tmp/dir/fstab"} mock_get_path_to_storage_volume.return_value = "/dev/fake0" mock_tempfile.mkstemp.return_value = ["fp", tmp_path] mock_block.get_volume_uuid.return_value = "UUID123" curtin.commands.block_meta.dm_crypt_handler( self.storage_config.get("crypt0"), self.storage_config) mock_get_path_to_storage_volume.assert_called_with( "sdb1", self.storage_config) self.assertTrue(mock_tempfile.mkstemp.called) calls = mock_util.subp.call_args_list self.assertEqual( mock.call(["cryptsetup", "luksFormat", mock_get_path_to_storage_volume.return_value, tmp_path]), calls[0]) self.assertEqual( mock.call(["cryptsetup", "open", "--type", "luks", mock_get_path_to_storage_volume.return_value, "crypt0", "--key-file", tmp_path]), calls[1]) mock_remove.assert_called_with(tmp_path) mock_open.assert_called_with("/tmp/dir/crypttab", "a") mock_block.get_volume_uuid.assert_called_with( mock_get_path_to_storage_volume.return_value) @mock.patch("curtin.commands.block_meta.get_path_to_storage_volume") @mock.patch.object(builtins, "open") @mock.patch("curtin.commands.block_meta.util") def test_raid_handler(self, mock_util, mock_open, mock_get_path_to_storage_volume): main_cmd = ["yes", "|", "mdadm", "--create", "/dev/md0", "--level=1", "--raid-devices=2", "/dev/fake/sdx1", "/dev/fake/sdy1", "--spare-devices=1", "/dev/fake/sdz1"] mock_util.load_command_environment.return_value = {"fstab": "/tmp/dir/fstab"} mock_get_path_to_storage_volume.side_effect = \ lambda x, y: "/dev/fake/%s" % x mock_util.subp.return_value = ("mdadm scan info", None) curtin.commands.block_meta.raid_handler( self.storage_config.get("raiddev"), self.storage_config) path_calls = list(args[0] for args, kwargs in mock_get_path_to_storage_volume.call_args_list) subp_calls = mock_util.subp.call_args_list for path in self.storage_config.get("raiddev").get("devices") + \ self.storage_config.get("raiddev").get("spare_devices"): self.assertTrue(path in path_calls) self.assertTrue(mock.call(["mdadm", "--zero-superblock", mock_get_path_to_storage_volume.side_effect(path, None)]) in subp_calls) self.assertTrue(mock.call(" ".join(main_cmd), shell=True) in subp_calls) self.assertTrue(mock.call(["mdadm", "--detail", "--scan"], capture=True) in subp_calls) mock_open.assert_called_with("/tmp/dir/mdadm.conf", "w") @mock.patch.object(builtins, "open") @mock.patch("curtin.commands.block_meta.get_path_to_storage_volume") @mock.patch("curtin.commands.block_meta.util") def test_bcache_handler(self, mock_util, mock_get_path_to_storage_volume, mock_open): mock_get_path_to_storage_volume.side_effect = ["/dev/fake0", "/dev/fake1", "/dev/fake0"] curtin.commands.block_meta.bcache_handler( self.storage_config.get("bcache0"), self.storage_config) calls = mock_util.subp.call_args_list self.assertTrue(mock.call(["modprobe", "bcache"]) == calls[0]) self.assertTrue(mock.call(["make-bcache", "-B", "/dev/fake0", "-C", "/dev/fake1"]) == calls[1]) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/tests/unittests/test_reporter.py0000644000000000000000000002201212741214565021006 0ustar 00000000000000# Copyright (C) 2014 Canonical Ltd. # # Author: Newell Jensen # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . from __future__ import ( absolute_import, print_function, unicode_literals, ) from unittest import TestCase from mock import patch from curtin.reporter.legacy import ( EmptyReporter, load_reporter, LoadReporterException, ) # #XXX: see `XXX` below for details from curtin.reporter.legacy.maas import ( load_factory, MAASReporter ) from curtin import reporter from curtin.reporter import handlers from curtin import url_helper from curtin.reporter import events import os import tempfile import base64 class TestLegacyReporter(TestCase): @patch('curtin.reporter.legacy.LOG') def test_load_reporter_logs_empty_cfg(self, mock_LOG): cfg = {} reporter = load_reporter(cfg) self.assertIsInstance(reporter, EmptyReporter) self.assertTrue(mock_LOG.info.called) @patch('curtin.reporter.legacy.LOG') def test_load_reporter_logs_cfg_with_no_module( self, mock_LOG): cfg = {'reporter': {'empty': {}}} reporter = load_reporter(cfg) self.assertIsInstance(reporter, EmptyReporter) self.assertTrue(mock_LOG.error.called) @patch('curtin.reporter.legacy.LOG') def test_load_reporter_logs_cfg_wrong_options(self, mock_LOG): # we are passing wrong config options for maas reporter # to test load_reporter in event reporter options are wrong cfg = {'reporter': {'maas': {'wrong': 'wrong'}}} reporter = load_reporter(cfg) self.assertIsInstance(reporter, EmptyReporter) self.assertTrue(mock_LOG.error.called) class TestMAASReporter(TestCase): def test_load_factory_raises_exception_wrong_options(self): options = {'wrong': 'wrong'} self.assertRaises( LoadReporterException, load_factory, options) def test_load_factory_returns_maas_reporter_good_options(self): options = { 'url': 'url', 'consumer_key': 'consumer_key', 'token_key': 'token_key', 'token_secret': 'token_secret'} reporter = load_factory(options) self.assertIsInstance(reporter, MAASReporter) class TestReporter(TestCase): config = {'element1': {'type': 'webhook', 'level': 'INFO', 'consumer_key': "ck_foo", 'consumer_secret': 'cs_foo', 'token_key': 'tk_foo', 'token_secret': 'ts_foo', 'endpoint': '127.0.0.1:8000'}} ev_name = 'event_name_1' ev_desc = 'test event description' def _get_reported_event(self, mock_report_event): self.assertTrue(mock_report_event.called) calls = mock_report_event.call_args_list self.assertTrue(len(calls) > 0) call = calls[-1][0] self.assertIsInstance(call[0], events.ReportingEvent) return call[0] def test_default_configuration(self): handler_registry = \ reporter.instantiated_handler_registry.registered_items self.assertTrue('logging' in handler_registry) self.assertIsInstance(handler_registry['logging'], handlers.LogHandler) @patch('curtin.reporter.instantiated_handler_registry') @patch('curtin.reporter.DictRegistry') def test_update_config(self, mock_registry, mock_handler_registry): reporter.update_configuration(self.config) mock_handler_registry.unregister_item.assert_called_with('element1') calls = mock_handler_registry.register_item.call_args_list self.assertEqual(len(calls), 1) webhook = calls[0][0][1] self.assertEqual(webhook.endpoint, self.config['element1']['endpoint']) self.assertEqual(webhook.level, 20) self.assertIsInstance(webhook.oauth_helper, url_helper.OauthUrlHelper) @patch('curtin.url_helper.OauthUrlHelper') def test_webhook_handler(self, mock_url_helper): event = events.ReportingEvent(events.START_EVENT_TYPE, 'test_event', 'test event', level='INFO') webhook_handler = handlers.WebHookHandler('127.0.0.1:8000', level='INFO') webhook_handler.publish_event(event) webhook_handler.oauth_helper.geturl.assert_called_with( url='127.0.0.1:8000', data=event.as_dict(), headers=webhook_handler.headers, retries=None) event.level = 'DEBUG' webhook_handler.oauth_helper.geturl.called = False webhook_handler.publish_event(event) webhook_handler = handlers.WebHookHandler('127.0.0.1:8000', level="INVALID") self.assertEquals(webhook_handler.level, 30) @patch('curtin.reporter.events.report_event') def test_report_start_event(self, mock_report_event): events.report_start_event(self.ev_name, self.ev_desc) event_dict = self._get_reported_event(mock_report_event).as_dict() self.assertEqual(event_dict.get('name'), self.ev_name) self.assertEqual(event_dict.get('level'), 'INFO') self.assertEqual(event_dict.get('description'), 'started: ' + self.ev_desc) self.assertEqual(event_dict.get('event_type'), events.START_EVENT_TYPE) @patch('curtin.reporter.events.report_event') def test_report_finish_event(self, mock_report_event): events.report_finish_event(self.ev_name, self.ev_desc) event = self._get_reported_event(mock_report_event) self.assertIsInstance(event, events.FinishReportingEvent) event_dict = event.as_dict() self.assertEqual(event_dict.get('description'), 'finished: ' + self.ev_desc) @patch('curtin.reporter.events.report_event') def test_report_finished_event_levelset(self, mock_report_event): events.report_finish_event(self.ev_name, self.ev_desc, result=events.status.FAIL) event_dict = self._get_reported_event(mock_report_event).as_dict() self.assertEqual(event_dict.get('level'), 'ERROR') self.assertEqual(event_dict.get('description'), 'failed: ' + self.ev_desc) events.report_finish_event(self.ev_name, self.ev_desc, result=events.status.WARN) event_dict = self._get_reported_event(mock_report_event).as_dict() self.assertEqual(event_dict.get('level'), 'WARN') self.assertEqual(event_dict.get('description'), 'failed: ' + self.ev_desc) @patch('curtin.reporter.events.report_event') def test_report_finished_post_files(self, mock_report_event): test_data = b'abcdefg' tmp = tempfile.mkstemp() try: with open(tmp[1], 'wb') as fp: fp.write(test_data) events.report_finish_event(self.ev_name, self.ev_desc, post_files=[tmp[1]]) event = self._get_reported_event(mock_report_event) files = event.as_dict().get('files') self.assertTrue(len(files) == 1) self.assertEqual(files[0].get('path'), tmp[1]) self.assertEqual(files[0].get('encoding'), 'base64') self.assertEqual(files[0].get('content'), base64.b64encode(test_data).decode()) finally: os.remove(tmp[1]) @patch('curtin.url_helper.OauthUrlHelper') def test_webhook_handler_post_files(self, mock_url_helper): test_data = b'abcdefg' tmp = tempfile.mkstemp() tmpfname = tmp[1] try: with open(tmpfname, 'wb') as fp: fp.write(test_data) event = events.FinishReportingEvent('test_event_name', 'test event description', post_files=[tmpfname], level='INFO') webhook_handler = handlers.WebHookHandler('127.0.0.1:8000', level='INFO') webhook_handler.publish_event(event) webhook_handler.oauth_helper.geturl.assert_called_with( url='127.0.0.1:8000', data=event.as_dict(), headers=webhook_handler.headers, retries=None) finally: os.remove(tmpfname) curtin-0.1.0~bzr399/tests/unittests/test_util.py0000644000000000000000000002232012741214565020123 0ustar 00000000000000from unittest import TestCase import mock import os import stat import shutil import tempfile from curtin import util class TestLogTimer(TestCase): def test_logger_called(self): data = {} def mylog(msg): data['msg'] = msg with util.LogTimer(mylog, "mymessage"): pass self.assertIn("msg", data) self.assertIn("mymessage", data['msg']) class TestDisableDaemons(TestCase): prcpath = "usr/sbin/policy-rc.d" def setUp(self): self.target = tempfile.mkdtemp() self.temp_prc = os.path.join(self.target, self.prcpath) def tearDown(self): shutil.rmtree(self.target) def test_disable_daemons_in_root_works(self): ret = util.disable_daemons_in_root(self.target) self.assertTrue(ret) self.assertTrue(os.path.exists(self.temp_prc)) ret = util.undisable_daemons_in_root(self.target) # return should have been true (it removed) and file should be gone self.assertTrue(ret) self.assertFalse(os.path.exists(self.temp_prc)) def test_disable_daemons_with_existing_is_false(self): util.write_file(os.path.join(self.target, self.prcpath), "foo") ret = util.disable_daemons_in_root(self.target) # the return should have been false (it did not create) # but the file should still exist self.assertFalse(ret) self.assertTrue(os.path.exists(self.temp_prc)) class TestWhich(TestCase): def setUp(self): self.orig_is_exe = util.is_exe util.is_exe = self.my_is_exe self.orig_path = os.environ.get("PATH") os.environ["PATH"] = "/usr/bin:/usr/sbin:/bin:/sbin" def tearDown(self): if self.orig_path is None: del os.environ["PATH"] else: os.environ["PATH"] = self.orig_path util.is_exe = self.orig_is_exe self.exe_list = [] def my_is_exe(self, fpath): return os.path.abspath(fpath) in self.exe_list def test_target_none(self): self.exe_list = ["/usr/bin/ls"] self.assertEqual(util.which("ls"), "/usr/bin/ls") def test_no_program_target_none(self): self.exe_list = [] self.assertEqual(util.which("fuzz"), None) def test_target_set(self): self.exe_list = ["/foo/bin/ls"] self.assertEqual(util.which("ls", target="/foo"), "/bin/ls") def test_no_program_target_set(self): self.exe_list = ["/usr/bin/ls"] self.assertEqual(util.which("fuzz"), None) def test_custom_path_target_unset(self): self.exe_list = ["/usr/bin2/fuzz"] self.assertEqual( util.which("fuzz", search=["/bin1", "/usr/bin2"]), "/usr/bin2/fuzz") def test_custom_path_target_set(self): self.exe_list = ["/target/usr/bin2/fuzz"] found = util.which("fuzz", search=["/bin1", "/usr/bin2"], target="/target") self.assertEqual(found, "/usr/bin2/fuzz") class TestLsbRelease(TestCase): def setUp(self): self._reset_cache() def _reset_cache(self): keys = [k for k in util._LSB_RELEASE.keys()] for d in keys: del util._LSB_RELEASE[d] @mock.patch("curtin.util.subp") def test_lsb_release_functional(self, mock_subp): output = '\n'.join([ "Distributor ID: Ubuntu", "Description: Ubuntu 14.04.2 LTS", "Release: 14.04", "Codename: trusty", ]) rdata = {'id': 'Ubuntu', 'description': 'Ubuntu 14.04.2 LTS', 'codename': 'trusty', 'release': '14.04'} def fake_subp(cmd, capture=False): return output, 'No LSB modules are available.' mock_subp.side_effect = fake_subp found = util.lsb_release() mock_subp.assert_called_with(['lsb_release', '--all'], capture=True) self.assertEqual(found, rdata) @mock.patch("curtin.util.subp") def test_lsb_release_unavailable(self, mock_subp): def doraise(*args, **kwargs): raise util.ProcessExecutionError("foo") mock_subp.side_effect = doraise expected = {k: "UNAVAILABLE" for k in ('id', 'description', 'codename', 'release')} self.assertEqual(util.lsb_release(), expected) class TestSubp(TestCase): stdin2err = ['bash', '-c', 'cat >&2'] stdin2out = ['cat'] utf8_invalid = b'ab\xaadef' utf8_valid = b'start \xc3\xa9 end' utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7' def printf_cmd(self, *args): # bash's printf supports \xaa. So does /usr/bin/printf # but by using bash, we remove dependency on another program. return(['bash', '-c', 'printf "$@"', 'printf'] + list(args)) def test_subp_handles_utf8(self): # The given bytes contain utf-8 accented characters as seen in e.g. # the "deja dup" package in Ubuntu. cmd = self.printf_cmd(self.utf8_valid_2) (out, _err) = util.subp(cmd, capture=True) self.assertEqual(out, self.utf8_valid_2.decode('utf-8')) def test_subp_respects_decode_false(self): (out, err) = util.subp(self.stdin2out, capture=True, decode=False, data=self.utf8_valid) self.assertTrue(isinstance(out, bytes)) self.assertTrue(isinstance(err, bytes)) self.assertEqual(out, self.utf8_valid) def test_subp_decode_ignore(self): # this executes a string that writes invalid utf-8 to stdout (out, _err) = util.subp(self.printf_cmd('abc\\xaadef'), capture=True, decode='ignore') self.assertEqual(out, 'abcdef') def test_subp_decode_strict_valid_utf8(self): (out, _err) = util.subp(self.stdin2out, capture=True, decode='strict', data=self.utf8_valid) self.assertEqual(out, self.utf8_valid.decode('utf-8')) def test_subp_decode_invalid_utf8_replaces(self): (out, _err) = util.subp(self.stdin2out, capture=True, data=self.utf8_invalid) expected = self.utf8_invalid.decode('utf-8', errors='replace') self.assertEqual(out, expected) def test_subp_decode_strict_raises(self): args = [] kwargs = {'args': self.stdin2out, 'capture': True, 'decode': 'strict', 'data': self.utf8_invalid} self.assertRaises(UnicodeDecodeError, util.subp, *args, **kwargs) def test_subp_capture_stderr(self): data = b'hello world' (out, err) = util.subp(self.stdin2err, capture=True, decode=False, data=data) self.assertEqual(err, data) self.assertEqual(out, b'') def test_returns_none_if_no_capture(self): (out, err) = util.subp(self.stdin2out, data=b'') self.assertEqual(err, None) self.assertEqual(out, None) class TestHuman2Bytes(TestCase): GB = 1024 * 1024 * 1024 MB = 1024 * 1024 def test_float_equal_int_is_allowed(self): self.assertEqual(1000, util.human2bytes(1000.0)) def test_float_in_string_nonequal_int_raises_type_error(self): self.assertRaises(ValueError, util.human2bytes, "1000.4B") def test_float_nonequal_int_raises_type_error(self): self.assertRaises(ValueError, util.human2bytes, 1000.4) def test_int_gets_int(self): self.assertEqual(100, util.human2bytes(100)) def test_no_suffix_is_bytes(self): self.assertEqual(100, util.human2bytes("100")) def test_suffix_M(self): self.assertEqual(100 * self.MB, util.human2bytes("100M")) def test_suffix_B(self): self.assertEqual(100, util.human2bytes("100B")) def test_suffix_G(self): self.assertEqual(int(10 * self.GB), util.human2bytes("10G")) def test_float_in_string(self): self.assertEqual(int(3.5 * self.GB), util.human2bytes("3.5G")) def test_GB_equals_G(self): self.assertEqual(util.human2bytes("3GB"), util.human2bytes("3G")) class TestSetUnExecutable(TestCase): tmpf = None tmpd = None def tearDown(self): if self.tmpf: if os.path.exists(self.tmpf): os.unlink(self.tmpf) self.tmpf = None if self.tmpd: shutil.rmtree(self.tmpd) self.tmpd = None def tempfile(self, data=None): fp, self.tmpf = tempfile.mkstemp() if data: fp.write(data) os.close(fp) return self.tmpf def test_change_needed_returns_original_mode(self): tmpf = self.tempfile() os.chmod(tmpf, 0o755) ret = util.set_unexecutable(tmpf) self.assertEqual(ret, 0o0755) def test_no_change_needed_returns_none(self): tmpf = self.tempfile() os.chmod(tmpf, 0o600) ret = util.set_unexecutable(tmpf) self.assertEqual(ret, None) def test_change_does_as_expected(self): tmpf = self.tempfile() os.chmod(tmpf, 0o755) ret = util.set_unexecutable(tmpf) self.assertEqual(ret, 0o0755) self.assertEqual(stat.S_IMODE(os.stat(tmpf).st_mode), 0o0644) def test_strict_no_exists_raises_exception(self): self.tmpd = tempfile.mkdtemp() bogus = os.path.join(self.tmpd, 'bogus') self.assertRaises(ValueError, util.set_unexecutable, bogus, True) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/tests/vmtests/__init__.py0000644000000000000000000011671212741214565017322 0ustar 00000000000000import atexit import datetime import errno import logging import json import os import pathlib import random import re import shutil import subprocess import textwrap import time import yaml import curtin.net as curtin_net import curtin.util as util from curtin.commands.install import INSTALL_PASS_MSG from .image_sync import query as imagesync_query from .image_sync import mirror as imagesync_mirror from .helpers import check_call, TimeoutExpired from unittest import TestCase, SkipTest IMAGE_SRC_URL = os.environ.get( 'IMAGE_SRC_URL', "http://maas.ubuntu.com/images/ephemeral-v2/daily/streams/v1/index.sjson") IMAGE_DIR = os.environ.get("IMAGE_DIR", "/srv/images") try: IMAGES_TO_KEEP = int(os.environ.get("IMAGES_TO_KEEP", 1)) except ValueError: raise ValueError("IMAGES_TO_KEEP in environment was not an integer") DEFAULT_SSTREAM_OPTS = [ '--keyring=/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg'] DEVNULL = open(os.devnull, 'w') KEEP_DATA = {"pass": "none", "fail": "all"} IMAGE_SYNCS = [] TARGET_IMAGE_FORMAT = "raw" OVMF_CODE = "/usr/share/OVMF/OVMF_CODE.fd" OVMF_VARS = "/usr/share/OVMF/OVMF_VARS.fd" # precise -> vivid don't have split UEFI firmware, fallback if not os.path.exists(OVMF_CODE): OVMF_CODE = "/usr/share/ovmf/OVMF.fd" OVMF_VARS = OVMF_CODE DEFAULT_BRIDGE = os.environ.get("CURTIN_VMTEST_BRIDGE", "user") OUTPUT_DISK_NAME = 'output_disk.img' _TOPDIR = None def remove_empty_dir(dirpath): if os.path.exists(dirpath): try: os.rmdir(dirpath) except OSError as e: if e.errno == errno.ENOTEMPTY: pass def remove_dir(dirpath): if os.path.exists(dirpath): shutil.rmtree(dirpath) def _topdir(): global _TOPDIR if _TOPDIR: return _TOPDIR envname = 'CURTIN_VMTEST_TOPDIR' envdir = os.environ.get(envname) if envdir: if not os.path.exists(envdir): os.mkdir(envdir) _TOPDIR = envdir elif not os.path.isdir(envdir): raise ValueError("%s=%s exists but is not a directory" % (envname, envdir)) else: _TOPDIR = envdir else: tdir = os.environ.get('TMPDIR', '/tmp') for i in range(0, 10): try: ts = datetime.datetime.now().isoformat() # : in path give grief at least to tools/launch ts = ts.replace(":", "") outd = os.path.join(tdir, 'vmtest-{}'.format(ts)) os.mkdir(outd) _TOPDIR = outd break except OSError as e: if e.errno != errno.EEXIST: raise time.sleep(random.random()/10) if not _TOPDIR: raise Exception("Unable to initialize topdir in TMPDIR [%s]" % tdir) atexit.register(remove_empty_dir, _TOPDIR) return _TOPDIR def _initialize_logging(): # Configure logging module to save output to disk and present it on # sys.stderr logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') envlog = os.environ.get('CURTIN_VMTEST_LOG') if envlog: logfile = envlog else: logfile = _topdir() + ".log" fh = logging.FileHandler(logfile, mode='w', encoding='utf-8') fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) ch = logging.StreamHandler() ch.setLevel(logging.INFO) ch.setFormatter(formatter) logger.addHandler(fh) logger.addHandler(ch) logger.info("Logfile: %s . Working dir: %s", logfile, _topdir()) return logger def get_env_var_bool(envname, default=False): """get a boolean environment variable. If environment variable is not set, use default. False values are case insensitive 'false', '0', ''.""" if not isinstance(default, bool): raise ValueError("default '%s' for '%s' is not a boolean" % (default, envname)) val = os.environ.get(envname) if val is None: return default return val.lower() not in ("false", "0", "") def sync_images(src_url, base_dir, filters, verbosity=0): # do a sync with provided filters # only sync once per set of filters. global IMAGE_SYNCS manages that. global IMAGE_SYNCS sfilters = ','.join(sorted(filters)) if sfilters in IMAGE_SYNCS: logger.debug("already synced for filters: %s", sfilters) return logger.info('Syncing images from %s with filters=%s', src_url, filters) imagesync_mirror(output_d=base_dir, source=src_url, mirror_filters=filters, max_items=IMAGES_TO_KEEP, verbosity=verbosity) IMAGE_SYNCS.append(sfilters) logger.debug("now done syncs: %s" % IMAGE_SYNCS) return def get_images(src_url, local_d, release, arch, krel=None, sync=True): # ensure that the image items (roottar, kernel, initrd) # we need for release and arch are available in base_dir. # returns updated ftypes dictionary {ftype: item_url} if krel is None: krel = release ftypes = { 'vmtest.root-image': '', 'vmtest.root-tgz': '', 'boot-kernel': '', 'boot-initrd': '' } common_filters = ['release=%s' % release, 'krel=%s' % krel, 'arch=%s' % arch] filters = ['ftype~(%s)' % ("|".join(ftypes.keys()))] + common_filters if sync: imagesync_mirror(output_d=local_d, source=src_url, mirror_filters=common_filters, max_items=IMAGES_TO_KEEP) query_str = 'query = %s' % (' '.join(filters)) logger.debug('Query %s for image. %s', local_d, query_str) fail_msg = None try: results = imagesync_query(local_d, max_items=IMAGES_TO_KEEP, filter_list=filters) logger.debug("Query '%s' returned: %s", query_str, results) fail_msg = "Empty result returned." except Exception as e: logger.debug("Query '%s' failed: %s", query_str, e) results = None fail_msg = str(e) if not results and not sync: # try to fix this with a sync logger.info(fail_msg + " Attempting to fix with an image sync. (%s)", query_str) return get_images(src_url, local_d, release, arch, krel, sync=True) elif not results: raise ValueError("Nothing found in query: %s" % query_str) missing = [] expected = sorted(ftypes.keys()) found = sorted(f.get('ftype') for f in results) if expected != found: raise ValueError("Query returned unexpected ftypes=%s. " "Expected=%s" % (found, expected)) for item in results: ftypes[item['ftype']] = item['item_url'] last_item = item missing = [(ftype, path) for ftype, path in ftypes.items() if not os.path.exists(path)] if len(missing): raise ValueError("missing files for ftypes: %s" % missing) # trusty amd64/hwe-p 20150101 version_info = ('%(release)s %(arch)s/%(subarch)s %(version_name)s' % last_item) return version_info, ftypes class ImageStore: """Local mirror of MAAS images simplestreams data.""" # By default sync on demand. sync = True # images are expected in dirs named //YYYYMMDD[.X] image_dir_re = re.compile(r"^[0-9]{4}[01][0-9][0123][0-9]([.][0-9])*$") def __init__(self, source_url, base_dir): """Initialize the ImageStore. source_url is the simplestreams source from where the images will be downloaded. base_dir is the target dir in the filesystem to keep the mirror. """ self.source_url = source_url self.base_dir = base_dir if not os.path.isdir(self.base_dir): os.makedirs(self.base_dir) self.url = pathlib.Path(self.base_dir).as_uri() def get_image(self, release, arch, krel=None): """Return tuple of version info, and paths for root image, kernel, initrd, tarball.""" if krel is None: krel = release ver_info, ftypes = get_images( self.source_url, self.base_dir, release, arch, krel, self.sync) root_image_path = ftypes['vmtest.root-image'] kernel_path = ftypes['boot-kernel'] initrd_path = ftypes['boot-initrd'] tarball = ftypes['vmtest.root-tgz'] return ver_info, (root_image_path, kernel_path, initrd_path, tarball) class TempDir(object): boot = None collect = None disks = None install = None logs = None output_disk = None def __init__(self, name, user_data): # Create tmpdir self.tmpdir = os.path.join(_topdir(), name) try: os.mkdir(self.tmpdir) except OSError as e: if e.errno == errno.EEXIST: raise ValueError("name '%s' already exists in %s" % (name, _topdir)) else: raise e # make subdirs self.collect = os.path.join(self.tmpdir, "collect") self.install = os.path.join(self.tmpdir, "install") self.boot = os.path.join(self.tmpdir, "boot") self.logs = os.path.join(self.tmpdir, "logs") self.disks = os.path.join(self.tmpdir, "disks") self.dirs = (self.collect, self.install, self.boot, self.logs, self.disks) for d in self.dirs: os.mkdir(d) self.success_file = os.path.join(self.logs, "success") self.errors_file = os.path.join(self.logs, "errors.json") # write cloud-init for installed system meta_data_file = os.path.join(self.install, "meta-data") with open(meta_data_file, "w") as fp: fp.write("instance-id: inst-123\n") user_data_file = os.path.join(self.install, "user-data") with open(user_data_file, "w") as fp: fp.write(user_data) # create target disk logger.debug('Creating target disk') self.target_disk = os.path.join(self.disks, "install_disk.img") subprocess.check_call(["qemu-img", "create", "-f", TARGET_IMAGE_FORMAT, self.target_disk, "10G"], stdout=DEVNULL, stderr=subprocess.STDOUT) # create seed.img for installed system's cloud init logger.debug('Creating seed disk') self.seed_disk = os.path.join(self.boot, "seed.img") subprocess.check_call(["cloud-localds", self.seed_disk, user_data_file, meta_data_file], stdout=DEVNULL, stderr=subprocess.STDOUT) # create output disk, mount ro logger.debug('Creating output disk') self.output_disk = os.path.join(self.boot, OUTPUT_DISK_NAME) subprocess.check_call(["qemu-img", "create", "-f", TARGET_IMAGE_FORMAT, self.output_disk, "10M"], stdout=DEVNULL, stderr=subprocess.STDOUT) subprocess.check_call(["mkfs.ext2", "-F", self.output_disk], stdout=DEVNULL, stderr=subprocess.STDOUT) def collect_output(self): logger.debug('extracting output disk') subprocess.check_call(['tar', '-C', self.collect, '-xf', self.output_disk], stdout=DEVNULL, stderr=subprocess.STDOUT) class VMBaseClass(TestCase): __test__ = False arch_skip = [] boot_timeout = 300 collect_scripts = [] conf_file = "examples/tests/basic.yaml" disk_block_size = 512 disk_driver = 'virtio-blk' disk_to_check = {} extra_disks = [] extra_kern_args = None fstab_expected = {} image_store_class = ImageStore install_timeout = 3000 interactive = False multipath = False multipath_num_paths = 2 nvme_disks = [] recorded_errors = 0 recorded_failures = 0 uefi = False # these get set from base_vm_classes release = None arch = None krel = None @classmethod def setUpClass(cls): # check if we should skip due to host arch if cls.arch in cls.arch_skip: reason = "{} is not supported on arch {}".format(cls.__name__, cls.arch) raise SkipTest(reason) setup_start = time.time() logger.info('Starting setup for testclass: {}'.format(cls.__name__)) # get boot img image_store = cls.image_store_class(IMAGE_SRC_URL, IMAGE_DIR) # Disable sync if env var is set. image_store.sync = get_env_var_bool('CURTIN_VMTEST_IMAGE_SYNC', False) logger.debug("Image sync = %s", image_store.sync) img_verstr, (boot_img, boot_kernel, boot_initrd, tarball) = ( image_store.get_image(cls.release, cls.arch, cls.krel)) logger.debug("Image %s\n boot=%s\n kernel=%s\n initrd=%s\n" " tarball=%s\n", img_verstr, boot_img, boot_kernel, boot_initrd, tarball) # set up tempdir cls.td = TempDir( name=cls.__name__, user_data=generate_user_data(collect_scripts=cls.collect_scripts)) logger.info('Using tempdir: %s , Image: %s', cls.td.tmpdir, img_verstr) cls.install_log = os.path.join(cls.td.logs, 'install-serial.log') cls.boot_log = os.path.join(cls.td.logs, 'boot-serial.log') logger.debug('Install console log: {}'.format(cls.install_log)) logger.debug('Boot console log: {}'.format(cls.boot_log)) # if interactive, launch qemu without 'background & wait' if cls.interactive: dowait = "--no-dowait" else: dowait = "--dowait" # create launch cmd cmd = ["tools/launch", "--arch=" + cls.arch, "-v", dowait] if not cls.interactive: cmd.extend(["--silent", "--power=off"]) cmd.extend(["--serial-log=" + cls.install_log]) if cls.extra_kern_args: cmd.extend(["--append=" + cls.extra_kern_args]) # publish the root tarball install_src = "PUBURL/" + os.path.basename(tarball) cmd.append("--publish=%s" % tarball) # check for network configuration cls.network_state = curtin_net.parse_net_config(cls.conf_file) logger.debug("Network state: {}".format(cls.network_state)) # build -n arg list with macaddrs from net_config physical config macs = [] interfaces = {} if cls.network_state: interfaces = cls.network_state.get('interfaces') for ifname in interfaces: logger.debug("Interface name: {}".format(ifname)) iface = interfaces.get(ifname) hwaddr = iface.get('mac_address') if hwaddr: macs.append(hwaddr) netdevs = [] if len(macs) > 0: for mac in macs: netdevs.extend(["--netdev=" + DEFAULT_BRIDGE + ",mac={}".format(mac)]) else: netdevs.extend(["--netdev=" + DEFAULT_BRIDGE]) # build disk arguments disks = [] sc = util.load_file(cls.conf_file) storage_config = yaml.load(sc).get('storage', {}).get('config', {}) cls.disk_wwns = ["wwn=%s" % x.get('wwn') for x in storage_config if 'wwn' in x] cls.disk_serials = ["serial=%s" % x.get('serial') for x in storage_config if 'serial' in x] target_disk = "{}:{}:{}:{}:".format(cls.td.target_disk, "", cls.disk_driver, cls.disk_block_size) if len(cls.disk_wwns): target_disk += cls.disk_wwns[0] if len(cls.disk_serials): target_disk += cls.disk_serials[0] disks.extend(['--disk', target_disk]) # --disk source:size:driver:block_size:devopts for (disk_no, disk_sz) in enumerate(cls.extra_disks): dpath = os.path.join(cls.td.disks, 'extra_disk_%d.img' % disk_no) extra_disk = '{}:{}:{}:{}:'.format(dpath, disk_sz, cls.disk_driver, cls.disk_block_size) if len(cls.disk_wwns): w_index = disk_no + 1 if w_index < len(cls.disk_wwns): extra_disk += cls.disk_wwns[w_index] if len(cls.disk_serials): w_index = disk_no + 1 if w_index < len(cls.disk_serials): extra_disk += cls.disk_serials[w_index] disks.extend(['--disk', extra_disk]) # build nvme disk args if needed for (disk_no, disk_sz) in enumerate(cls.nvme_disks): dpath = os.path.join(cls.td.disks, 'nvme_disk_%d.img' % disk_no) nvme_disk = '{}:{}:nvme:{}:{}'.format(dpath, disk_sz, cls.disk_block_size, "serial=nvme-%d" % disk_no) disks.extend(['--disk', nvme_disk]) # proxy config configs = [cls.conf_file] proxy = get_apt_proxy() if get_apt_proxy is not None: proxy_config = os.path.join(cls.td.install, 'proxy.cfg') with open(proxy_config, "w") as fp: fp.write(json.dumps({'apt_proxy': proxy}) + "\n") configs.append(proxy_config) if cls.uefi: logger.debug("Testcase requested launching with UEFI") # always attempt to update target nvram (via grub) grub_config = os.path.join(cls.td.install, 'grub.cfg') with open(grub_config, "w") as fp: fp.write(json.dumps({'grub': {'update_nvram': True}})) configs.append(grub_config) # make our own copy so we can store guest modified values nvram = os.path.join(cls.td.disks, "ovmf_vars.fd") shutil.copy(OVMF_VARS, nvram) cmd.extend(["--uefi", nvram]) if cls.multipath: disks = disks * cls.multipath_num_paths cmd.extend(netdevs + disks + [boot_img, "--kernel=%s" % boot_kernel, "--initrd=%s" % boot_initrd, "--", "curtin", "-vv", "install"] + ["--config=%s" % f for f in configs] + [install_src]) # run vm with installer lout_path = os.path.join(cls.td.logs, "install-launch.out") logger.info('Running curtin installer: {}'.format(cls.install_log)) try: with open(lout_path, "wb") as fpout: cls.boot_system(cmd, timeout=cls.install_timeout, console_log=cls.install_log, proc_out=fpout, purpose="install") except TimeoutExpired: logger.error('Curtin installer failed with timeout') cls.tearDownClass() raise finally: if os.path.exists(cls.install_log): with open(cls.install_log, 'rb') as l: content = l.read().decode('utf-8', errors='replace') logger.debug('install serial console output:\n%s', content) else: logger.warn("Boot for install did not produce a console log.") logger.debug('') try: if os.path.exists(cls.install_log): with open(cls.install_log, 'rb') as l: install_log = l.read().decode('utf-8', errors='replace') errmsg, errors = check_install_log(install_log) if errmsg: for e in errors: logger.error(e) logger.error(errmsg) raise Exception(cls.__name__ + ":" + errmsg) else: logger.info('Install OK') else: logger.info('Install Failed') raise Exception("No install log was produced") except: cls.tearDownClass() raise # create --disk params for nvme disks bsize_args = "logical_block_size={}".format(cls.disk_block_size) bsize_args += ",physical_block_size={}".format(cls.disk_block_size) bsize_args += ",min_io_size={}".format(cls.disk_block_size) target_disks = [] for (disk_no, disk) in enumerate([cls.td.target_disk]): disk = '--disk={},driver={},format={},{}'.format( disk, cls.disk_driver, TARGET_IMAGE_FORMAT, bsize_args) if len(cls.disk_wwns): disk += ",%s" % cls.disk_wwns[0] if len(cls.disk_serials): disk += ",%s" % cls.disk_serials[0] target_disks.extend([disk]) extra_disks = [] for (disk_no, disk_sz) in enumerate(cls.extra_disks): dpath = os.path.join(cls.td.disks, 'extra_disk_%d.img' % disk_no) disk = '--disk={},driver={},format={},{}'.format( dpath, cls.disk_driver, TARGET_IMAGE_FORMAT, bsize_args) if len(cls.disk_wwns): w_index = disk_no + 1 if w_index < len(cls.disk_wwns): disk += ",%s" % cls.disk_wwns[w_index] if len(cls.disk_serials): w_index = disk_no + 1 if w_index < len(cls.disk_serials): disk += ",%s" % cls.disk_serials[w_index] extra_disks.extend([disk]) nvme_disks = [] disk_driver = 'nvme' for (disk_no, disk_sz) in enumerate(cls.nvme_disks): dpath = os.path.join(cls.td.disks, 'nvme_disk_%d.img' % disk_no) disk = '--disk={},driver={},format={},{}'.format( dpath, disk_driver, TARGET_IMAGE_FORMAT, bsize_args) nvme_disks.extend([disk]) if cls.multipath: target_disks = target_disks * cls.multipath_num_paths extra_disks = extra_disks * cls.multipath_num_paths nvme_disks = nvme_disks * cls.multipath_num_paths # output disk is always virtio-blk, with serial of output_disk.img output_disk = '--disk={},driver={},format={},{},{}'.format( cls.td.output_disk, 'virtio-blk', TARGET_IMAGE_FORMAT, bsize_args, 'serial=%s' % os.path.basename(cls.td.output_disk)) target_disks.extend([output_disk]) # create xkvm cmd cmd = (["tools/xkvm", "-v", dowait] + netdevs + target_disks + extra_disks + nvme_disks + ["--", "-drive", "file=%s,if=virtio,media=cdrom" % cls.td.seed_disk, "-m", "1024"]) if not cls.interactive: if cls.arch == 's390x': cmd.extend([ "-nographic", "-nodefaults", "-chardev", "file,path=%s,id=charconsole0" % cls.boot_log, "-device", "sclpconsole,chardev=charconsole0,id=console0"]) else: cmd.extend(["-nographic", "-serial", "file:" + cls.boot_log]) if cls.uefi: logger.debug("Testcase requested booting with UEFI") uefi_opts = ["-drive", "if=pflash,format=raw,file=" + nvram] if OVMF_CODE != OVMF_VARS: # reorder opts, code then writable space uefi_opts = (["-drive", "if=pflash,format=raw,readonly,file=" + OVMF_CODE] + uefi_opts) cmd.extend(uefi_opts) # run vm with installed system, fail if timeout expires try: logger.info('Booting target image: {}'.format(cls.boot_log)) logger.debug('{}'.format(" ".join(cmd))) xout_path = os.path.join(cls.td.logs, "boot-xkvm.out") with open(xout_path, "wb") as fpout: cls.boot_system(cmd, console_log=cls.boot_log, proc_out=fpout, timeout=cls.boot_timeout, purpose="first_boot") except Exception as e: logger.error('Booting after install failed: %s', e) cls.tearDownClass() raise e finally: if os.path.exists(cls.boot_log): with open(cls.boot_log, 'rb') as l: content = l.read().decode('utf-8', errors='replace') logger.debug('boot serial console output:\n%s', content) else: logger.warn("Booting after install not produce" " a console log.") # mount output disk try: cls.td.collect_output() except: cls.tearDownClass() raise logger.info( "%s: setUpClass finished. took %.02f seconds. Running testcases.", cls.__name__, time.time() - setup_start) @classmethod def tearDownClass(cls): success = False sfile = os.path.exists(cls.td.success_file) efile = os.path.exists(cls.td.errors_file) if not (sfile or efile): logger.warn("class %s had no status. Possibly no tests run.", cls.__name__) elif (sfile and efile): logger.warn("class %s had success and fail.", cls.__name__) elif sfile: success = True clean_working_dir(cls.td.tmpdir, success, keep_pass=KEEP_DATA['pass'], keep_fail=KEEP_DATA['fail']) @classmethod def expected_interfaces(cls): expected = [] interfaces = {} if cls.network_state: interfaces = cls.network_state.get('interfaces') # handle interface aliases when subnets have multiple entries for iface in interfaces.values(): subnets = iface.get('subnets', {}) if subnets: for index, subnet in zip(range(0, len(subnets)), subnets): if index == 0: expected.append(iface) else: expected.append("{}:{}".format(iface, index)) else: expected.append(iface) return expected @classmethod def parse_deb_config(cls, path): return curtin_net.parse_deb_config(path) @classmethod def get_network_state(cls): return cls.network_state @classmethod def get_expected_etc_network_interfaces(cls): return curtin_net.render_interfaces(cls.network_state) @classmethod def get_expected_etc_resolvconf(cls): ifaces = {} eni = curtin_net.render_interfaces(cls.network_state) curtin_net.parse_deb_config_data(ifaces, eni, None, None) return ifaces @classmethod def boot_system(cls, cmd, console_log, proc_out, timeout, purpose): # this is separated for easy override in Psuedo classes def myboot(): check_call(cmd, timeout=timeout, stdout=proc_out, stderr=subprocess.STDOUT) return True return boot_log_wrap(cls.__name__, myboot, cmd, console_log, timeout, purpose) # Misc functions that are useful for many tests def output_files_exist(self, files): for f in files: self.assertTrue(os.path.exists(os.path.join(self.td.collect, f))) def check_file_strippedline(self, filename, search): with open(os.path.join(self.td.collect, filename), "r") as fp: data = list(i.strip() for i in fp.readlines()) self.assertIn(search, data) def check_file_regex(self, filename, regex): with open(os.path.join(self.td.collect, filename), "r") as fp: data = fp.read() self.assertRegex(data, regex) # To get rid of deprecation warning in python 3. def assertRegex(self, s, r): try: # Python 3. super(VMBaseClass, self).assertRegex(s, r) except AttributeError: # Python 2. self.assertRegexpMatches(s, r) def get_blkid_data(self, blkid_file): with open(os.path.join(self.td.collect, blkid_file)) as fp: data = fp.read() ret = {} for line in data.splitlines(): if line == "": continue val = line.split('=') ret[val[0]] = val[1] return ret def test_fstab(self): if (os.path.exists(self.td.collect + "fstab") and self.fstab_expected is not None): with open(os.path.join(self.td.collect, "fstab")) as fp: fstab_lines = fp.readlines() fstab_entry = None for line in fstab_lines: for device, mntpoint in self.fstab_expected.items(): if device in line: fstab_entry = line self.assertIsNotNone(fstab_entry) self.assertEqual(fstab_entry.split(' ')[1], mntpoint) def test_dname(self): fpath = os.path.join(self.td.collect, "ls_dname") if (os.path.exists(fpath) and self.disk_to_check is not None): with open(fpath, "r") as fp: contents = fp.read().splitlines() for diskname, part in self.disk_to_check: if part is not 0: link = diskname + "-part" + str(part) self.assertIn(link, contents) self.assertIn(diskname, contents) def test_interfacesd_eth0_removed(self): """ Check that curtin has removed /etc/network/interfaces.d/eth0.cfg by examining the output of a find /etc/network > find_interfaces.d """ fpath = os.path.join(self.td.collect, "find_interfacesd") interfacesd = util.load_file(fpath) self.assertNotIn("/etc/network/interfaces.d/eth0.cfg", interfacesd.split("\n")) def run(self, result): super(VMBaseClass, self).run(result) self.record_result(result) def record_result(self, result): # record the result of this test to the class log dir # 'passed' gets False on failure, None (not set) on success. passed = result.test.passed in (True, None) all_good = passed and not os.path.exists(self.td.errors_file) if all_good: with open(self.td.success_file, "w") as fp: fp.write("all good\n") if not passed: data = {'errors': 1} if os.path.exists(self.td.success_file): os.unlink(self.td.success_file) if os.path.exists(self.td.errors_file): with open(self.td.errors_file, "r") as fp: data = json.loads(fp.read()) data['errors'] += 1 with open(self.td.errors_file, "w") as fp: fp.write(json.dumps(data, indent=2, sort_keys=True, separators=(',', ': ')) + "\n") class PsuedoImageStore(object): def __init__(self, source_url, base_dir): self.source_url = source_url self.base_dir = base_dir def get_image(self, release, arch, krel=None): """Return tuple of version info, and paths for root image, kernel, initrd, tarball.""" names = ['psuedo-root-image', 'psuedo-kernel', 'psuedo-initrd', 'psuedo-tarball'] return ( "psuedo-%s %s/hwe-P 20160101" % (release, arch), [os.path.join(self.base_dir, release, arch, f) for f in names]) class PsuedoVMBaseClass(VMBaseClass): # This mimics much of the VMBaseClass just with faster setUpClass # The tests here will fail only if CURTIN_VMTEST_DEBUG_ALLOW_FAIL # is set to a true value. This allows the code to mostly run # during a 'make vmtest' (keeping it running) but not to break test. # # boot_timeouts is a dict of {'purpose': 'mesg'} image_store_class = PsuedoImageStore # boot_results controls what happens when boot_system is called # a dictionary with key of the 'purpose' # inside each dictionary: # timeout_msg: a message for a TimeoutException to raise. # timeout: the value to pass as timeout to exception # exit: the exit value of a CalledProcessError to raise # console_log: what to write into the console log for that boot boot_results = { 'install': {'timeout': 0, 'exit': 0}, 'first_boot': {'timeout': 0, 'exit': 0}, } console_log_pass = '\n'.join([ 'Psuedo console log', INSTALL_PASS_MSG]) allow_test_fails = get_env_var_bool('CURTIN_VMTEST_DEBUG_ALLOW_FAIL', False) def collect_output(self): logger.debug('Psuedo extracting output disk') with open(os.path.join(self.td.collect, "fstab")) as fp: fp.write('\n'.join(("# psuedo fstab", "LABEL=root / ext4 defaults 0 1"))) @classmethod def boot_system(cls, cmd, console_log, proc_out, timeout, purpose): # this is separated for easy override in Psuedo classes data = {'timeout_msg': None, 'timeout': 0, 'exit': 0, 'log': cls.console_log_pass} data.update(cls.boot_results.get(purpose, {})) def myboot(): with open(console_log, "w") as fpout: fpout.write(data['log']) if data['timeout_msg']: raise TimeoutExpired(data['timeout_msg'], timeout=data['timeout']) if data['exit'] != 0: raise subprocess.CalledProcessError(cmd=cmd, returncode=data['exit']) return True return boot_log_wrap(cls.__name__, myboot, cmd, console_log, timeout, purpose) def test_fstab(self): pass def test_dname(self): pass def test_interfacesd_eth0_removed(self): pass def _maybe_raise(self, exc): if self.allow_test_fails: raise exc def check_install_log(install_log): # look if install is OK via curtin 'Installation ok" # if we dont find that, scan for known error messages and report # if we don't see any errors, fail with general error errors = [] errmsg = None # regexps expected in curtin output install_pass = INSTALL_PASS_MSG install_fail = "({})".format("|".join([ 'Installation\ failed', 'ImportError: No module named.*', 'Unexpected error while running command', 'E: Unable to locate package.*'])) install_is_ok = re.findall(install_pass, install_log) if len(install_is_ok) == 0: errors = re.findall(install_fail, install_log) if len(errors) > 0: for e in errors: logger.error(e) errmsg = ('Errors during curtin installer') else: errmsg = ('Failed to verify Installation is OK') return errmsg, errors def get_apt_proxy(): # get setting for proxy. should have same result as in tools/launch apt_proxy = os.environ.get('apt_proxy') if apt_proxy: return apt_proxy get_apt_config = textwrap.dedent(""" command -v apt-config >/dev/null 2>&1 out=$(apt-config shell x Acquire::HTTP::Proxy) out=$(sh -c 'eval $1 && echo $x' -- "$out") [ -n "$out" ] echo "$out" """) try: out = subprocess.check_output(['sh', '-c', get_apt_config]) if isinstance(out, bytes): out = out.decode() out = out.rstrip() return out except subprocess.CalledProcessError: pass return None def generate_user_data(collect_scripts=None, apt_proxy=None): # this returns the user data for the *booted* system # its a cloud-config-archive type, which is # just a list of parts. the 'x-shellscript' parts # will be executed in the order they're put in if collect_scripts is None: collect_scripts = [] parts = [] base_cloudconfig = { 'password': 'passw0rd', 'chpasswd': {'expire': False}, 'power_state': {'mode': 'poweroff'}, 'network': {'config': 'disabled'}, } ssh_keys, _err = util.subp(['tools/ssh-keys-list', 'cloud-config'], capture=True) # precises' cloud-init version has limited support for cloud-config-archive # and expects cloud-config pieces to be appendable to a single file and # yaml.load()'able. Resolve this by using yaml.dump() when generating # a list of parts parts = [{'type': 'text/cloud-config', 'content': yaml.dump(base_cloudconfig, indent=1)}, {'type': 'text/cloud-config', 'content': ssh_keys}] output_dir = '/mnt/output' output_dir_macro = 'OUTPUT_COLLECT_D' output_device = '/dev/disk/by-id/virtio-%s' % OUTPUT_DISK_NAME collect_prep = textwrap.dedent("mkdir -p " + output_dir) collect_post = textwrap.dedent( 'tar -C "%s" -cf "%s" .' % (output_dir, output_device)) # failsafe poweroff runs on precise only, where power_state does # not exist. precise_poweroff = textwrap.dedent("""#!/bin/sh -x [ "$(lsb_release -sc)" = "precise" ] || exit 0; shutdown -P now "Shutting down on precise" """) scripts = ([collect_prep] + collect_scripts + [collect_post] + [precise_poweroff]) for part in scripts: if not part.startswith("#!"): part = "#!/bin/sh -x\n" + part part = part.replace(output_dir_macro, output_dir) logger.debug('Cloud config archive content (pre-json):' + part) parts.append({'content': part, 'type': 'text/x-shellscript'}) return '#cloud-config-archive\n' + json.dumps(parts, indent=1) def clean_working_dir(tdir, result, keep_pass, keep_fail): if result: keep = keep_pass else: keep = keep_fail # result, keep-mode rkm = 'result=%s keep=%s' % ('pass' if result else 'fail', keep) if len(keep) == 1 and keep[0] == "all": logger.debug('Keeping tmpdir %s [%s]', tdir, rkm) elif len(keep) == 1 and keep[0] == "none": logger.debug('Removing tmpdir %s [%s]', tdir, rkm) shutil.rmtree(tdir) else: to_clean = [d for d in os.listdir(tdir) if d not in keep] logger.debug('Pruning dirs in %s [%s]: %s', tdir, rkm, ','.join(to_clean)) for d in to_clean: cpath = os.path.join(tdir, d) if os.path.isdir(cpath): shutil.rmtree(os.path.join(tdir, d)) else: os.unlink(cpath) return def apply_keep_settings(success=None, fail=None): data = {} flist = ( (success, "CURTIN_VMTEST_KEEP_DATA_PASS", "pass", "none"), (fail, "CURTIN_VMTEST_KEEP_DATA_FAIL", "fail", "all"), ) allowed = ("boot", "collect", "disks", "install", "logs", "none", "all") for val, ename, dname, default in flist: if val is None: val = os.environ.get(ename, default) toks = val.split(",") for name in toks: if name not in allowed: raise ValueError("'%s' had invalid token '%s'" % (ename, name)) data[dname] = toks global KEEP_DATA KEEP_DATA.update(data) def boot_log_wrap(name, func, cmd, console_log, timeout, purpose): logger.debug("%s[%s]: booting with timeout=%s log=%s cmd: %s", name, purpose, timeout, console_log, ' '.join(cmd)) ret = None start = time.time() try: ret = func() finally: end = time.time() logger.info("%s[%s]: boot took %.02f seconds. returned %s", name, purpose, end - start, ret) return ret apply_keep_settings() logger = _initialize_logging() curtin-0.1.0~bzr399/tests/vmtests/helpers.py0000644000000000000000000002615112741214565017222 0ustar 00000000000000#! /usr/bin/env python # Copyright (C) 2015 Canonical Ltd. # # Author: Scott Moser # # Curtin is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Curtin is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . import os import subprocess import signal import threading from unittest import TestLoader class Command(object): """ based on https://gist.github.com/kirpit/1306188 """ command = None process = None status = None exception = None returncode = -1 def __init__(self, command, signal=signal.SIGTERM): self.command = command self.signal = signal def run(self, timeout=None, **kwargs): """ Run a command then return: (status, output, error). """ def target(**kwargs): try: self.process = subprocess.Popen(self.command, **kwargs) self.process.communicate() self.status = self.process.returncode except subprocess.CalledProcessError as e: self.exception = e self.returncode = e.returncode except Exception as e: self.exception = e # thread thread = threading.Thread(target=target, kwargs=kwargs) thread.start() thread.join(timeout) if thread.is_alive(): self.process.send_signal(self.signal) thread.join() self.exception = TimeoutExpired( cmd=self.command, timeout=timeout) if self.exception: raise self.exception if self.status != 0: raise subprocess.CalledProcessError(cmd=self.command, returncode=self.status) return 0 try: TimeoutExpired = subprocess.TimeoutExpired except AttributeError: class TimeoutExpired(subprocess.CalledProcessError): def __init__(self, *args, **kwargs): if not kwargs: kwargs = {} if len(args): # if args are given, convert them to kwargs. # *args is a tuple, convert it to a list to use pop args = list(args) for arg in ('cmd', 'output', 'timeout'): kwargs[arg] = args.pop(0) if not len(args): break returncode = -1 if 'timeout' in kwargs: self.timeout = kwargs.pop('timeout') else: self.timeout = -1 # do not use super here as it confuses pylint # https://github.com/PyCQA/pylint/issues/773 subprocess.CalledProcessError.__init__(self, returncode, **kwargs) def check_call(cmd, signal=signal.SIGTERM, **kwargs): # provide a 'check_call' like interface, but kill with a nice signal return Command(cmd, signal).run(**kwargs) def find_releases(): """Return a sorted list of releases defined in test cases.""" # Use the TestLoader to load all tests cases defined within # tests/vmtests/ and figure out which releases they are testing. loader = TestLoader() # dir with the vmtest modules (i.e. tests/vmtests/) tests_dir = os.path.dirname(__file__) # The root_dir for the curtin branch. (i.e. curtin/) root_dir = os.path.split(os.path.split(tests_dir)[0])[0] # Find all test modules defined in curtin/tests/vmtests/ module_test_suites = loader.discover(tests_dir, top_level_dir=root_dir) releases = set() for mts in module_test_suites: for class_test_suite in mts: for test_case in class_test_suite: if getattr(test_case, 'release', ''): releases.add(getattr(test_case, 'release')) return sorted(releases) def _parse_ifconfig_xenial(ifconfig_out): """Parse ifconfig output from xenial or earlier and return a dictionary. given content like below, return: {'eth0': {'address': '10.8.1.78', 'broadcast': '10.8.1.255', 'inet6': [{'address': 'fe80::216:3eff:fe63:c05d', 'prefixlen': '64', 'scope': 'Link'}, {'address': 'fdec:2922:2f07:0:216:3eff:fe63:c05d', 'prefixlen': '64', 'scope': 'Global'}], 'interface': 'eth0', 'link_encap': 'Ethernet', 'mac_address': '00:16:3e:63:c0:5d', 'mtu': 1500, 'multicast': True, 'netmask': '255.255.255.0', 'running': True, 'up': True}} eth0 Link encap:Ethernet HWaddr 00:16:3e:63:c0:5d inet addr:10.8.1.78 Bcast:10.8.1.255 Mask:255.255.255.0 inet6 addr: fe80::216:3eff:fe63:c05d/64 Scope:Link inet6 addr: fdec:2922:2f07:0:216:3eff:fe63:c05d/64 Scope:Global UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1 RX packets:21503 errors:0 dropped:0 overruns:0 frame:0 TX packets:11346 errors:0 dropped:0 overruns:0 carrier:0 collisions:0 txqueuelen:1000 RX bytes:31556357 (31.5 MB) TX bytes:870943 (870.9 KB) """ ifaces = {} combined_fields = {'addr': 'address', 'Bcast': 'broadcast', 'Mask': 'netmask', 'MTU': 'mtu', 'encap': 'link_encap'} boolmap = {'RUNNING': 'running', 'UP': 'up', 'MULTICAST': 'multicast'} for line in ifconfig_out.splitlines(): if not line: continue if not line.startswith(" "): cur_iface = line.split()[0].rstrip(":") cur_data = {'inet6': [], 'interface': cur_iface} for t in boolmap.values(): cur_data[t] = False ifaces[cur_iface] = cur_data toks = line.split() if toks[0] == "inet6": cidr = toks[2] address, prefixlen = cidr.split("/") scope = toks[3].split(":")[1] cur_ipv6 = {'address': address, 'scope': scope, 'prefixlen': prefixlen} cur_data['inet6'].append(cur_ipv6) continue for i in range(0, len(toks)): cur_tok = toks[i] try: next_tok = toks[i+1] except IndexError: next_tok = None if cur_tok == "HWaddr": cur_data['mac_address'] = next_tok elif ":" in cur_tok: key, _colon, val = cur_tok.partition(":") if key in combined_fields: cur_data[combined_fields[key]] = val elif cur_tok in boolmap: cur_data[boolmap[cur_tok]] = True if 'mtu' in cur_data: cur_data['mtu'] = int(cur_data['mtu']) return ifaces def _parse_ifconfig_yakkety(ifconfig_out): """Parse ifconfig output from yakkety or later(?) and return a dictionary. given ifconfig output like below, return: {'ens2': {'address': '10.5.0.78', 'broadcast': '10.5.255.255', 'broadcast_flag': True, 'inet6': [{'address': 'fe80::f816:3eff:fe05:9673', 'prefixlen': '64', 'scopeid': '0x20'}, {'address': 'fe80::f816:3eff:fe05:9673', 'prefixlen': '64', 'scopeid': '0x20'}], 'interface': 'ens2', 'link_encap': 'Ethernet', 'mac_address': 'fa:16:3e:05:96:73', 'mtu': 1500, 'multicast': True, 'netmask': '255.255.0.0', 'running': True, 'up': True}} ens2: flags=4163 mtu 1500 inet 10.5.0.78 netmask 255.255.0.0 broadcast 10.5.255.255 inet6 fe80::f816:3eff:fe05:9673 prefixlen 64 scopeid 0x20 inet6 fe80::f816:3eff:fe05:9673 prefixlen 64 scopeid 0x20 ether fa:16:3e:05:96:73 txqueuelen 1000 (Ethernet) RX packets 33196 bytes 48916947 (48.9 MB) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 5458 bytes 411486 (411.4 KB) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 """ fmap = {'mtu': 'mtu', 'inet': 'address', 'netmask': 'netmask', 'broadcast': 'broadcast', 'ether': 'mac_address'} boolmap = {'RUNNING': 'running', 'UP': 'up', 'MULTICAST': 'multicast', 'BROADCAST': 'broadcast_flag'} ifaces = {} for line in ifconfig_out.splitlines(): if not line: continue if not line.startswith(" "): cur_iface = line.split()[0].rstrip(":") cur_data = {'inet6': [], 'interface': cur_iface} for t in boolmap.values(): cur_data[t] = False ifaces[cur_iface] = cur_data toks = line.split() if toks[0] == "inet6": cur_ipv6 = {'address': toks[1]} cur_data['inet6'].append(cur_ipv6) for i in range(0, len(toks)): cur_tok = toks[i] try: next_tok = toks[i+1] except IndexError: next_tok = None if cur_tok in fmap: cur_data[fmap[cur_tok]] = next_tok elif cur_tok in ('prefixlen', 'scopeid'): cur_ipv6[cur_tok] = next_tok cur_data['inet6'].append elif cur_tok.startswith("flags="): # flags=4163 flags = cur_tok[cur_tok.find("<") + 1: cur_tok.rfind(">")].split(",") for flag in flags: if flag in boolmap: cur_data[boolmap[flag]] = True elif cur_tok == "(Ethernet)": cur_data['link_encap'] = 'Ethernet' if 'mtu' in cur_data: cur_data['mtu'] = int(cur_data['mtu']) return ifaces def ifconfig_to_dict(ifconfig_a): # if the first token of the first line ends in a ':' then assume yakkety # parse ifconfig output and return a dictionary. # # return a dictionary of network information like: # {'ens2': {'address': '10.5.0.78', 'broadcast': '10.5.255.255', # 'broadcast_flag': True, # 'inet6': [{'address': 'fe80::f816:3eff:fe05:9673', # 'prefixlen': '64', 'scopeid': '0x20'}, # {'address': 'fe80::f816:3eff:fe05:9673', # 'prefixlen': '64', 'scopeid': '0x20'}], # 'interface': 'ens2', 'link_encap': 'Ethernet', # 'mac_address': 'fa:16:3e:05:96:73', 'mtu': 1500, # 'multicast': True, 'netmask': '255.255.0.0', # 'running': True, 'up': True}} line = ifconfig_a.lstrip().splitlines()[0] if line.split()[0].endswith(":"): return _parse_ifconfig_yakkety(ifconfig_a) else: return _parse_ifconfig_xenial(ifconfig_a) curtin-0.1.0~bzr399/tests/vmtests/image_sync.py0000644000000000000000000004165212741214565017701 0ustar 00000000000000#!/usr/bin/python3 from simplestreams import util as sutil from simplestreams import contentsource from simplestreams import objectstores from simplestreams import log from simplestreams.log import LOG from simplestreams import mirrors from simplestreams import filters import argparse import errno import hashlib import os import shutil import signal import sys import tempfile from curtin import util IMAGE_SRC_URL = os.environ.get( 'IMAGE_SRC_URL', "http://maas.ubuntu.com/images/ephemeral-v2/daily/streams/v1/index.sjson") KEYRING = '/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg' ITEM_NAME_FILTERS = ['ftype~(root-image.gz|boot-initrd|boot-kernel)'] FORMAT_JSON = 'JSON' VMTEST_CONTENT_ID = 'com.ubuntu.maas:daily:v2:download' VMTEST_JSON_PATH = "streams/v1/vmtest.json" DEFAULT_OUTPUT_FORMAT = ( "%(release)-7s %(arch)s/%(subarch)s %(version_name)-10s %(item_name)s") DEFAULT_ARCHES = { 'i386': ['i386'], 'i586': ['i386'], 'i686': ['i386'], 'x86_64': ['amd64'], 'ppc64le': ['ppc64el'], 'armhf': ['armhf'], 'aarch64': ['arm64'], 's390x': ['s390x'], } def get_file_info(path, sums=None): # return dictionary with size and checksums of existing file LOG.info("getting info for %s" % path) buflen = 1024*1024 if sums is None: sums = ['sha256'] sumers = {k: hashlib.new(k) for k in sums} ret = {'size': os.path.getsize(path)} with open(path, "rb") as fp: while True: buf = fp.read(buflen) for sumer in sumers.values(): sumer.update(buf) if len(buf) != buflen: break ret.update({k: sumers[k].hexdigest() for k in sumers}) LOG.info("done getting ifo for %s: %s" % (path, ret)) return ret def generate_root_derived(path_gz, base_d="/", info_func=get_file_info): fpath_gz = os.path.join(base_d, path_gz) ri_name = 'vmtest.root-image' rtgz_name = 'vmtest.root-tgz' ri_path = os.path.dirname(path_gz) + "/" + ri_name rtgz_path = os.path.dirname(path_gz) + "/" + rtgz_name ri_fpath = os.path.join(base_d, ri_path) rtgz_fpath = os.path.join(base_d, rtgz_path) new_items = {ri_name: {'ftype': ri_name, 'path': ri_path}, rtgz_name: {'ftype': rtgz_name, 'path': rtgz_path}} tmpd = None try: # create tmpdir under output dir tmpd = tempfile.mkdtemp(dir=os.path.dirname(fpath_gz)) tmp_img = ri_fpath tmp_rtgz = rtgz_fpath if not os.path.exists(ri_fpath): # uncompress path_gz to tmpdir/root-image tmp_img = os.path.join(tmpd, ri_name) LOG.info("uncompressing %s to %s" % (fpath_gz, tmp_img)) util.subp(['sh', '-c', 'exec gunzip -c "$0" > "$1"', fpath_gz, tmp_img]) if not os.path.exists(rtgz_fpath): tmp_rtgz = os.path.join(tmpd, rtgz_name) m2r = ['tools/maas2roottar', tmp_img, tmp_rtgz] LOG.info("creating root-tgz from %s" % tmp_img) util.subp(m2r) if tmp_img != ri_fpath: os.rename(tmp_img, ri_fpath) if tmp_rtgz != rtgz_fpath: os.rename(tmp_rtgz, rtgz_fpath) finally: if tmpd: shutil.rmtree(tmpd) new_items[ri_name].update(info_func(ri_fpath)) new_items[rtgz_name].update(info_func(rtgz_fpath)) return new_items def remove_empty_dir(dirpath): if os.path.exists(dirpath): # normpath never returns trailing / (unless '/') # so that dirname will always get our parent. dirpath = os.path.normpath(dirpath) try: os.rmdir(dirpath) LOG.info("removed empty directory '%s'", dirpath) remove_empty_dir(os.path.dirname(dirpath)) except OSError as e: if e.errno == errno.ENOTEMPTY: pass class FakeContentSource(contentsource.ContentSource): def __init__(self, path): self.url = path def open(self): raise ValueError( "'%s' content source never expected to be read" % self.url) def products_version_get(tree, pedigree): tprod = tree.get('products', {}).get(pedigree[0], {}) return tprod.get('versions', {}).get(pedigree[1], {}) class CurtinVmTestMirror(mirrors.ObjectFilterMirror): # This class works as a 'target' mirror. # it creates the vmtest files as it needs them and # writes the maas image files and maas json files intact. # but adds a streams/v1/vmtest.json file the created data. def __init__(self, config, out_d, verbosity=0): self.config = config self.filters = self.config.get('filters', []) self.out_d = os.path.abspath(out_d) self.objectstore = objectstores.FileStore( out_d, complete_callback=self.callback) self.file_info = {} self.data_path = ".vmtest-data" super(CurtinVmTestMirror, self).__init__(config=config, objectstore=self.objectstore) self.verbosity = verbosity self.dlstatus = {'columns': 80, 'total': 0, 'curpath': None} def callback(self, path, cur_bytes, tot_bytes): # progress written to screen if self.verbosity == 0: return # this is taken logically from simplstreams DotProgress if self.dlstatus['curpath'] != path: self.dlstatus['printed'] = 0 self.dlstatus['curpath'] = path sys.stderr.write('=> %s [%s]\n' % (path, tot_bytes)) if cur_bytes == tot_bytes: self.dlstatus['total'] += tot_bytes sys.stderr.write("\n") return columns = self.dlstatus['columns'] printed = self.dlstatus['printed'] toprint = int(cur_bytes * columns / tot_bytes) - printed if toprint <= 0: return sys.stderr.write('.' * toprint) sys.stderr.flush() self.dlstatus['printed'] += toprint def fpath(self, path): # return the full path to a local file in the mirror return os.path.join(self.out_d, path) def products_data_path(self, content_id): # our data path is .vmtest-data rather than .data return self.data_path + os.path.sep + content_id def _reference_count_data_path(self): # overridden from ObjectStoreMirrorWriter return self.data_path + os.path.sep + "references.json" def load_products(self, path=None, content_id=None): # overridden from ObjectStoreMirrorWriter # the reason is that we have copied here from trunk # is bug 1511364 which is not fixed in all ubuntu versions if content_id: try: dpath = self.products_data_path(content_id) return sutil.load_content(self.source(dpath).read()) except IOError as e: if e.errno != errno.ENOENT: raise if path: return {} raise TypeError("unable to load_products with no path") def insert_version(self, data, src, target, pedigree): # this is called for any version that had items inserted # data target['products'][pedigree[0]]['versions'][pedigree[1]] # a dictionary with possibly some tags and 'items': {'boot_initrd}... ri_name = 'vmtest.root-image' rtgz_name = 'vmtest.root-tgz' tver_data = products_version_get(target, pedigree) titems = tver_data.get('items') if ('root-image.gz' in titems and not (ri_name in titems and rtgz_name in titems)): # generate the root-image and root-tgz derived_items = generate_root_derived( titems['root-image.gz']['path'], base_d=self.out_d, info_func=self.get_file_info) for fname, item in derived_items.items(): self.insert_item(item, src, target, pedigree + (fname,), FakeContentSource(item['path'])) def get_file_info(self, path): # check and see if we might know checksum and size if path in self.file_info: return self.file_info[path] found = get_file_info(path) self.file_info[path] = found return found def remove_item(self, data, src, target, pedigree): super(CurtinVmTestMirror, self).remove_item(data, src, target, pedigree) if 'path' in data: remove_empty_dir(self.fpath(os.path.dirname(data['path']))) def insert_products(self, path, target, content): # The super classes' insert_products will # we override this because default mirror inserts content # where as we want to insert the rendered 'target' tree # the difference is that 'content' is the original (with gpg sign) # so our content will no longer have that signature. dpath = self.products_data_path(target['content_id']) self.store.insert_content(dpath, util.json_dumps(target)) if not path: return # this will end up writing the content exactly as it # was in the source, leaving the signed data in-tact self.store.insert_content(path, content) # for our vmtest content id, we want to write # a vmtest.json in streams/v1/vmtest.json that can be queried # even though it will not appear in index if target['content_id'] == VMTEST_CONTENT_ID: self.store.insert_content(VMTEST_JSON_PATH, util.json_dumps(target)) def insert_index_entry(self, data, src, pedigree, contentsource): # this is overridden, because the default implementation # when syncing an index.json will call insert_products # and also insert_index_entry. And both actually end up # writing the .[s]json file that they should write. Since # insert_products will do that, we just no-op this. return def set_logging(verbose, log_file): vlevel = min(verbose, 2) level = (log.ERROR, log.INFO, log.DEBUG)[vlevel] log.basicConfig(stream=log_file, level=level) return vlevel def main_mirror(args): if len(args.arches) == 0: try: karch = os.uname()[4] arches = DEFAULT_ARCHES[karch] except KeyError: msg = "No default arch list for kernel arch '%s'. Try '--arches'." sys.stderr.write(msg % karch + "\n") return False else: arches = [] for f in args.arches: arches.extend(f.split(",")) arch_filter = "arch~(" + "|".join(arches) + ")" mirror_filters = [arch_filter] + ITEM_NAME_FILTERS + args.filters vlevel = set_logging(args.verbose, args.log_file) sys.stderr.write( "summary: \n " + '\n '.join([ "source: %s" % args.source, "output: %s" % args.output_d, "arches: %s" % arches, "filters: %s" % mirror_filters, ]) + '\n') mirror(output_d=args.output_d, source=args.source, mirror_filters=mirror_filters, max_items=args.max_items, keyring=args.keyring, verbosity=vlevel) def mirror(output_d, source=IMAGE_SRC_URL, mirror_filters=None, max_items=1, keyring=KEYRING, verbosity=0): if mirror_filters is None: mirror_filters = [f for f in ITEM_NAME_FILTERS] filter_list = filters.get_filters(mirror_filters) (source_url, initial_path) = sutil.path_from_mirror_url(source, None) def policy(content, path): # pylint: disable=W0613 if initial_path.endswith('sjson'): return sutil.read_signed(content, keyring=keyring) else: return content smirror = mirrors.UrlMirrorReader(source_url, policy=policy) LOG.debug( "summary: \n " + '\n '.join([ "source: %s" % source_url, "path: %s" % initial_path, "output: %s" % output_d, "filters: %s" % filter_list, ]) + '\n') mirror_config = {'max_items': max_items, 'filters': filter_list} tmirror = CurtinVmTestMirror(config=mirror_config, out_d=output_d, verbosity=verbosity) tmirror.sync(smirror, initial_path) def query_ptree(ptree, max_num=None, ifilters=None, path2url=None): results = [] pkey = 'products' verkey = 'versions' for prodname, proddata in sorted(ptree.get(pkey, {}).items()): if verkey not in proddata: continue cur = 0 for vername in sorted(proddata[verkey].keys(), reverse=True): if max_num is not None and cur >= max_num: break verdata = proddata[verkey][vername] cur += 1 for itemname, itemdata in sorted(verdata.get('items', {}).items()): flat = sutil.products_exdata(ptree, (prodname, vername, itemname)) if ifilters is not None and len(ifilters) > 0: if not filters.filter_dict(ifilters, flat): continue if path2url and 'path' in flat: flat['item_url'] = path2url(flat['path']) results.append(flat) return results def query(mirror, max_items=1, filter_list=None, verbosity=0): if filter_list is None: filter_list = [] ifilters = filters.get_filters(filter_list) def fpath(path): # return the full path to a local file in the mirror return os.path.join(mirror, path) try: stree = sutil.load_content(util.load_file(fpath(VMTEST_JSON_PATH))) except OSError: raise results = query_ptree(stree, max_num=max_items, ifilters=ifilters, path2url=fpath) return results def main_query(args): vlevel = set_logging(args.verbose, args.log_file) results = query(args.mirror_url, args.max_items, args.filters, verbosity=vlevel) try: if args.output_format == FORMAT_JSON: print(util.json_dumps(results).decode()) else: output = [] for item in results: try: output.append(args.output_format % item) except KeyError as e: sys.stderr.write("output format failed (%s) for: %s\n" % (e, item)) sys.exit(1) for line in sorted(output): print(line) except IOError as e: if e.errno == errno.EPIPE: sys.exit(0x80 | signal.SIGPIPE) raise def main(): parser = argparse.ArgumentParser() parser.add_argument('--log-file', default=sys.stderr, type=argparse.FileType('w')) parser.add_argument('--verbose', '-v', action='count', default=0) parser.set_defaults(func=None) subparsers = parser.add_subparsers(help='subcommand help') mirror_p = subparsers.add_parser( 'mirror', help='like sstream-mirror but for vmtest images') mirror_p.set_defaults(func=main_mirror) mirror_p.add_argument('--max', type=int, default=1, dest='max_items', help='store at most MAX items in the target') mirror_p.add_argument('--verbose', '-v', action='count', default=0) mirror_p.add_argument('--dry-run', action='store_true', default=False, help='only report what would be done') mirror_p.add_argument('--arches', action='append', default=[], help='which arches to mirror.') mirror_p.add_argument('--source', default=IMAGE_SRC_URL, help='maas images mirror') mirror_p.add_argument('--keyring', action='store', default=KEYRING, help='keyring to be specified to gpg via --keyring') mirror_p.add_argument('output_d') mirror_p.add_argument('filters', nargs='*', default=[]) query_p = subparsers.add_parser( 'query', help='like sstream-query but for vmtest mirror') query_p.set_defaults(func=main_query) query_p.add_argument('--max', type=int, default=None, dest='max_items', help='store at most MAX items in the target') query_p.add_argument('--path', default=None, help='sync from index or products file in mirror') fmt_group = query_p.add_mutually_exclusive_group() fmt_group.add_argument('--output-format', '-o', action='store', dest='output_format', default=DEFAULT_OUTPUT_FORMAT, help="specify output format per python str.format") fmt_group.add_argument('--json', action='store_const', const=FORMAT_JSON, dest='output_format', help="output in JSON as a list of dicts.") query_p.add_argument('--verbose', '-v', action='count', default=0) query_p.add_argument('mirror_url') query_p.add_argument('filters', nargs='*', default=[]) args = parser.parse_args() if args.func is None: parser.print_help() sys.exit(1) args.func(args) if __name__ == '__main__': main() sys.exit(0) # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/tests/vmtests/releases.py0000644000000000000000000000217612741214565017364 0ustar 00000000000000from curtin.util import get_platform_arch class _ReleaseBase(object): repo = "maas-daily" arch = get_platform_arch() class _PreciseBase(_ReleaseBase): release = "precise" class _PreciseHWET(_ReleaseBase): release = "precise" krel = "trusty" class _TrustyBase(_ReleaseBase): release = "trusty" class _TrustyHWEU(_ReleaseBase): release = "trusty" krel = "utopic" class _TrustyHWEV(_ReleaseBase): release = "trusty" krel = "vivid" class _TrustyHWEW(_ReleaseBase): release = "trusty" krel = "wily" class _VividBase(_ReleaseBase): release = "vivid" class _WilyBase(_ReleaseBase): release = "wily" class _XenialBase(_ReleaseBase): release = "xenial" class _YakketyBase(_ReleaseBase): release = "yakkety" class _Releases(object): precise = _PreciseBase precise_hwe_t = _PreciseHWET trusty = _TrustyBase trusty_hwe_u = _TrustyHWEU trusty_hwe_v = _TrustyHWEV trusty_hwe_w = _TrustyHWEW vivid = _VividBase wily = _WilyBase xenial = _XenialBase yakkety = _YakketyBase base_vm_classes = _Releases # vi: ts=4 expandtab syntax=python curtin-0.1.0~bzr399/tests/vmtests/test_basic.py0000644000000000000000000003057312741214565017703 0ustar 00000000000000from . import ( VMBaseClass, get_apt_proxy) from .releases import base_vm_classes as relbase import os import re import textwrap class TestBasicAbs(VMBaseClass): interactive = False conf_file = "examples/tests/basic.yaml" extra_disks = ['128G', '128G', '4G'] nvme_disks = ['4G'] disk_to_check = [('main_disk', 1), ('main_disk', 2)] collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D blkid -o export /dev/vda > blkid_output_vda blkid -o export /dev/vda1 > blkid_output_vda1 blkid -o export /dev/vda2 > blkid_output_vda2 btrfs-show-super /dev/vdd > btrfs_show_super_vdd cat /proc/partitions > proc_partitions ls -al /dev/disk/by-uuid/ > ls_uuid cat /etc/fstab > fstab mkdir -p /dev/disk/by-dname ls /dev/disk/by-dname/ > ls_dname find /etc/network/interfaces.d > find_interfacesd v="" out=$(apt-config shell v Acquire::HTTP::Proxy) eval "$out" echo "$v" > apt-proxy """)] def test_output_files_exist(self): self.output_files_exist( ["blkid_output_vda", "blkid_output_vda1", "blkid_output_vda2", "btrfs_show_super_vdd", "fstab", "ls_dname", "ls_uuid", "proc_partitions"]) def test_ptable(self): blkid_info = self.get_blkid_data("blkid_output_vda") self.assertEquals(blkid_info["PTTYPE"], "dos") def test_partition_numbers(self): # vde should have partitions 1 and 10 disk = "vde" proc_partitions_path = os.path.join(self.td.collect, 'proc_partitions') self.assertTrue(os.path.exists(proc_partitions_path)) found = [] with open(proc_partitions_path, 'r') as fp: for line in fp.readlines(): if disk in line: found.append(line.split()[3]) # /proc/partitions should have 3 lines with 'vde' in them. expected = [disk + s for s in ["", "1", "10"]] self.assertEqual(found, expected) def test_partitions(self): with open(os.path.join(self.td.collect, "fstab")) as fp: fstab_lines = fp.readlines() print("\n".join(fstab_lines)) # Test that vda1 is on / blkid_info = self.get_blkid_data("blkid_output_vda1") fstab_entry = None for line in fstab_lines: if blkid_info['UUID'] in line: fstab_entry = line break self.assertIsNotNone(fstab_entry) self.assertEqual(fstab_entry.split(' ')[1], "/") # Test that vda2 is on /home blkid_info = self.get_blkid_data("blkid_output_vda2") fstab_entry = None for line in fstab_lines: if blkid_info['UUID'] in line: fstab_entry = line break self.assertIsNotNone(fstab_entry) self.assertEqual(fstab_entry.split(' ')[1], "/home") # Test whole disk vdd is mounted at /btrfs fstab_entry = None for line in fstab_lines: if "/dev/vdd" in line: fstab_entry = line break self.assertIsNotNone(fstab_entry) self.assertEqual(fstab_entry.split(' ')[1], "/btrfs") def test_whole_disk_format(self): # confirm the whole disk format is the expected device with open(os.path.join(self.td.collect, "btrfs_show_super_vdd"), "r") as fp: btrfs_show_super = fp.read() with open(os.path.join(self.td.collect, "ls_uuid"), "r") as fp: ls_uuid = fp.read() # extract uuid from btrfs superblock btrfs_fsid = [line for line in btrfs_show_super.split('\n') if line.startswith('fsid\t\t')] self.assertEqual(len(btrfs_fsid), 1) btrfs_uuid = btrfs_fsid[0].split()[1] self.assertTrue(btrfs_uuid is not None) # extract uuid from /dev/disk/by-uuid on /dev/vdd # parsing ls -al output on /dev/disk/by-uuid: # lrwxrwxrwx 1 root root 9 Dec 4 20:02 # d591e9e9-825a-4f0a-b280-3bfaf470b83c -> ../../vdg vdd_uuid = [line.split()[8] for line in ls_uuid.split('\n') if 'vdd' in line] self.assertEqual(len(vdd_uuid), 1) vdd_uuid = vdd_uuid.pop() self.assertTrue(vdd_uuid is not None) # compare them self.assertEqual(vdd_uuid, btrfs_uuid) def test_proxy_set(self): expected = get_apt_proxy() with open(os.path.join(self.td.collect, "apt-proxy")) as fp: apt_proxy_found = fp.read().rstrip() if expected: # the proxy should have gotten set through self.assertIn(expected, apt_proxy_found) else: # no proxy, so the output of apt-config dump should be empty self.assertEqual("", apt_proxy_found) class PreciseTestBasic(relbase.precise, TestBasicAbs): __test__ = True collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D blkid -o export /dev/vda > blkid_output_vda blkid -o export /dev/vda1 > blkid_output_vda1 blkid -o export /dev/vda2 > blkid_output_vda2 btrfs-show /dev/vdd > btrfs_show_super_vdd cat /proc/partitions > proc_partitions ls -al /dev/disk/by-uuid/ > ls_uuid cat /etc/fstab > fstab mkdir -p /dev/disk/by-dname ls /dev/disk/by-dname/ > ls_dname find /etc/network/interfaces.d > find_interfacesd v="" out=$(apt-config shell v Acquire::HTTP::Proxy) eval "$out" echo "$v" > apt-proxy """)] def test_whole_disk_format(self): # confirm the whole disk format is the expected device with open(os.path.join(self.td.collect, "btrfs_show_super_vdd"), "r") as fp: btrfs_show_super = fp.read() with open(os.path.join(self.td.collect, "ls_uuid"), "r") as fp: ls_uuid = fp.read() # extract uuid from btrfs superblock btrfs_fsid = re.findall('.*uuid:\ (.*)\n', btrfs_show_super) self.assertEqual(len(btrfs_fsid), 1) btrfs_uuid = btrfs_fsid.pop() self.assertTrue(btrfs_uuid is not None) # extract uuid from /dev/disk/by-uuid on /dev/vdd # parsing ls -al output on /dev/disk/by-uuid: # lrwxrwxrwx 1 root root 9 Dec 4 20:02 # d591e9e9-825a-4f0a-b280-3bfaf470b83c -> ../../vdg vdd_uuid = [line.split()[8] for line in ls_uuid.split('\n') if 'vdd' in line] self.assertEqual(len(vdd_uuid), 1) vdd_uuid = vdd_uuid.pop() self.assertTrue(vdd_uuid is not None) # compare them self.assertEqual(vdd_uuid, btrfs_uuid) def test_ptable(self): print("test_ptable does not work for Precise") def test_dname(self): print("test_dname does not work for Precise") class TrustyTestBasic(relbase.trusty, TestBasicAbs): __test__ = True # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect # sda-part2 to exist in /dev/disk/by-dname as we can on other releases # when dname works on trusty, then we need to re-enable by removing line. def test_dname(self): print("test_dname does not work for Trusty") def test_ptable(self): print("test_ptable does not work for Trusty") class PreciseHWETTestBasic(relbase.precise_hwe_t, PreciseTestBasic): # FIXME: off due to test_whole_disk_format failing __test__ = False class TrustyHWEUTestBasic(relbase.trusty_hwe_u, TrustyTestBasic): # off by default to safe test suite runtime, covered by bonding __test__ = False class TrustyHWEVTestBasic(relbase.trusty_hwe_v, TrustyTestBasic): # off by default to safe test suite runtime, covered by bonding __test__ = False class TrustyHWEWTestBasic(relbase.trusty_hwe_w, TrustyTestBasic): # off by default to safe test suite runtime, covered by bonding __test__ = False class WilyTestBasic(relbase.wily, TestBasicAbs): __test__ = True class XenialTestBasic(relbase.xenial, TestBasicAbs): __test__ = True class YakketyTestBasic(relbase.yakkety, TestBasicAbs): __test__ = True class TestBasicScsiAbs(TestBasicAbs): conf_file = "examples/tests/basic_scsi.yaml" disk_driver = 'scsi-hd' extra_disks = ['128G', '128G', '4G'] nvme_disks = ['4G'] collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D blkid -o export /dev/sda > blkid_output_sda blkid -o export /dev/sda1 > blkid_output_sda1 blkid -o export /dev/sda2 > blkid_output_sda2 btrfs-show-super /dev/sdc > btrfs_show_super_sdc cat /proc/partitions > proc_partitions ls -al /dev/disk/by-uuid/ > ls_uuid ls -al /dev/disk/by-id/ > ls_disk_id cat /etc/fstab > fstab mkdir -p /dev/disk/by-dname ls /dev/disk/by-dname/ > ls_dname find /etc/network/interfaces.d > find_interfacesd v="" out=$(apt-config shell v Acquire::HTTP::Proxy) eval "$out" echo "$v" > apt-proxy """)] def test_output_files_exist(self): self.output_files_exist( ["blkid_output_sda", "blkid_output_sda1", "blkid_output_sda2", "btrfs_show_super_sdc", "fstab", "ls_dname", "ls_uuid", "ls_disk_id", "proc_partitions"]) def test_ptable(self): blkid_info = self.get_blkid_data("blkid_output_sda") self.assertEquals(blkid_info["PTTYPE"], "dos") def test_partition_numbers(self): # vde should have partitions 1 and 10 disk = "sdd" proc_partitions_path = os.path.join(self.td.collect, 'proc_partitions') self.assertTrue(os.path.exists(proc_partitions_path)) found = [] with open(proc_partitions_path, 'r') as fp: for line in fp.readlines(): if disk in line: found.append(line.split()[3]) # /proc/partitions should have 3 lines with 'vde' in them. expected = [disk + s for s in ["", "1", "10"]] self.assertEqual(found, expected) def test_partitions(self): with open(os.path.join(self.td.collect, "fstab")) as fp: fstab_lines = fp.readlines() print("\n".join(fstab_lines)) # Test that vda1 is on / blkid_info = self.get_blkid_data("blkid_output_sda1") fstab_entry = None for line in fstab_lines: if blkid_info['UUID'] in line: fstab_entry = line break self.assertIsNotNone(fstab_entry) self.assertEqual(fstab_entry.split(' ')[1], "/") # Test that vda2 is on /home blkid_info = self.get_blkid_data("blkid_output_sda2") fstab_entry = None for line in fstab_lines: if blkid_info['UUID'] in line: fstab_entry = line break self.assertIsNotNone(fstab_entry) self.assertEqual(fstab_entry.split(' ')[1], "/home") # Test whole disk sdc is mounted at /btrfs fstab_entry = None for line in fstab_lines: if "/dev/sdc" in line: fstab_entry = line break self.assertIsNotNone(fstab_entry) self.assertEqual(fstab_entry.split(' ')[1], "/btrfs") def test_whole_disk_format(self): # confirm the whole disk format is the expected device with open(os.path.join(self.td.collect, "btrfs_show_super_sdc"), "r") as fp: btrfs_show_super = fp.read() with open(os.path.join(self.td.collect, "ls_uuid"), "r") as fp: ls_uuid = fp.read() # extract uuid from btrfs superblock btrfs_fsid = [line for line in btrfs_show_super.split('\n') if line.startswith('fsid\t\t')] self.assertEqual(len(btrfs_fsid), 1) btrfs_uuid = btrfs_fsid[0].split()[1] self.assertTrue(btrfs_uuid is not None) # extract uuid from /dev/disk/by-uuid on /dev/sdc # parsing ls -al output on /dev/disk/by-uuid: # lrwxrwxrwx 1 root root 9 Dec 4 20:02 # d591e9e9-825a-4f0a-b280-3bfaf470b83c -> ../../vdg uuid = [line.split()[8] for line in ls_uuid.split('\n') if 'sdc' in line] self.assertEqual(len(uuid), 1) uuid = uuid.pop() self.assertTrue(uuid is not None) # compare them self.assertEqual(uuid, btrfs_uuid) class XenialTestScsiBasic(relbase.xenial, TestBasicScsiAbs): __test__ = True curtin-0.1.0~bzr399/tests/vmtests/test_bcache_basic.py0000644000000000000000000000347112741214565021165 0ustar 00000000000000from . import VMBaseClass from .releases import base_vm_classes as relbase import textwrap import os class TestBcacheBasic(VMBaseClass): arch_skip = [ "s390x", # lp:1565029 ] conf_file = "examples/tests/bcache_basic.yaml" extra_disks = ['2G'] collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D bcache-super-show /dev/vda2 > bcache_super_vda2 ls /sys/fs/bcache > bcache_ls cat /sys/block/bcache0/bcache/cache_mode > bcache_cache_mode cat /proc/mounts > proc_mounts cat /proc/partitions > proc_partitions find /etc/network/interfaces.d > find_interfacesd """)] def test_bcache_output_files_exist(self): self.output_files_exist(["bcache_super_vda2", "bcache_ls", "bcache_cache_mode"]) def test_bcache_status(self): bcache_cset_uuid = None fname = os.path.join(self.td.collect, "bcache_super_vda2") with open(fname, "r") as fp: for line in fp.read().splitlines(): if line != "" and line.split()[0] == "cset.uuid": bcache_cset_uuid = line.split()[-1].rstrip() self.assertIsNotNone(bcache_cset_uuid) with open(os.path.join(self.td.collect, "bcache_ls"), "r") as fp: self.assertTrue(bcache_cset_uuid in fp.read().splitlines()) def test_bcache_cachemode(self): self.check_file_regex("bcache_cache_mode", r"\[writeback\]") class PreciseHWETBcacheBasic(relbase.precise_hwe_t, TestBcacheBasic): __test__ = True class TrustyBcacheBasic(relbase.trusty, TestBcacheBasic): __test__ = False # covered by test_raid5_bcache class XenialBcacheBasic(relbase.xenial, TestBcacheBasic): __test__ = True class YakketyBcacheBasic(relbase.yakkety, TestBcacheBasic): __test__ = True curtin-0.1.0~bzr399/tests/vmtests/test_bonding.py0000644000000000000000000001731212741214565020236 0ustar 00000000000000from . import VMBaseClass, logger, helpers from .releases import base_vm_classes as relbase import ipaddress import os import re import textwrap import yaml class TestNetworkAbs(VMBaseClass): interactive = False conf_file = "examples/tests/bonding_network.yaml" extra_disks = [] extra_nics = [] collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D ifconfig -a > ifconfig_a cp -av /etc/network/interfaces . cp -av /etc/udev/rules.d/70-persistent-net.rules . ip -o route show > ip_route_show route -n > route_n dpkg-query -W -f '${Status}' ifenslave > ifenslave_installed find /etc/network/interfaces.d > find_interfacesd """)] def test_output_files_exist(self): self.output_files_exist(["ifconfig_a", "interfaces", "70-persistent-net.rules", "ip_route_show", "ifenslave_installed", "route_n"]) def test_ifenslave_installed(self): with open(os.path.join(self.td.collect, "ifenslave_installed")) as fp: status = fp.read().strip() logger.debug('ifenslave installed: {}'.format(status)) self.assertEqual('install ok installed', status) def test_etc_network_interfaces(self): with open(os.path.join(self.td.collect, "interfaces")) as fp: eni = fp.read() logger.debug('etc/network/interfaces:\n{}'.format(eni)) expected_eni = self.get_expected_etc_network_interfaces() eni_lines = eni.split('\n') for line in expected_eni.split('\n'): self.assertTrue(line in eni_lines) def test_ifconfig_output(self): '''check ifconfig output with test input''' network_state = self.get_network_state() logger.debug('expected_network_state:\n{}'.format( yaml.dump(network_state, default_flow_style=False, indent=4))) with open(os.path.join(self.td.collect, "ifconfig_a")) as fp: ifconfig_a = fp.read() logger.debug('ifconfig -a:\n{}'.format(ifconfig_a)) ifconfig_dict = helpers.ifconfig_to_dict(ifconfig_a) logger.debug('parsed ifcfg dict:\n{}'.format( yaml.dump(ifconfig_dict, default_flow_style=False, indent=4))) with open(os.path.join(self.td.collect, "ip_route_show")) as fp: ip_route_show = fp.read() logger.debug("ip route show:\n{}".format(ip_route_show)) for line in [line for line in ip_route_show.split('\n') if 'src' in line]: m = re.search(r'^(?P\S+)\sdev\s' + r'(?P\S+)\s+' + r'proto kernel\s+scope link' + r'\s+src\s(?P\S+)', line) route_info = m.groupdict('') logger.debug(route_info) with open(os.path.join(self.td.collect, "route_n")) as fp: route_n = fp.read() logger.debug("route -n:\n{}".format(route_n)) interfaces = network_state.get('interfaces') for iface in interfaces.values(): subnets = iface.get('subnets', {}) if subnets: for index, subnet in zip(range(0, len(subnets)), subnets): iface['index'] = index if index == 0: ifname = "{name}".format(**iface) else: ifname = "{name}:{index}".format(**iface) self.check_interface(iface, ifconfig_dict.get(ifname), route_n) else: iface['index'] = 0 self.check_interface(iface, ifconfig_dict.get(iface['name']), route_n) def check_interface(self, iface, ifconfig, route_n): logger.debug( 'testing iface:\n{}\n\nifconfig:\n{}'.format(iface, ifconfig)) subnets = iface.get('subnets', {}) if subnets and iface['index'] != 0: ifname = "{name}:{index}".format(**iface) else: ifname = "{name}".format(**iface) # initial check, do we have the correct iface ? logger.debug('ifname={}'.format(ifname)) logger.debug("ifconfig['interface']={}".format(ifconfig['interface'])) self.assertEqual(ifname, ifconfig['interface']) # check physical interface attributes # FIXME: can't check mac_addr under bonding since # the bond might change slave mac addrs for key in ['mtu']: if key in iface and iface[key]: self.assertEqual(iface[key], ifconfig[key]) def __get_subnet(subnets, subidx): for index, subnet in zip(range(0, len(subnets)), subnets): if index == subidx: break return subnet # check subnet related attributes, and specifically only # the subnet specified by iface['index'] subnets = iface.get('subnets', {}) if subnets: subnet = __get_subnet(subnets, iface['index']) if 'address' in subnet and subnet['address']: if ':' in subnet['address']: inet_iface = ipaddress.IPv6Interface( subnet['address']) else: inet_iface = ipaddress.IPv4Interface( subnet['address']) # check ip addr self.assertEqual(str(inet_iface.ip), ifconfig['address']) self.assertEqual(str(inet_iface.netmask), ifconfig['netmask']) self.assertEqual( str(inet_iface.network.broadcast_address), ifconfig['broadcast']) # handle gateway by looking at routing table if 'gateway' in subnet and subnet['gateway']: gw_ip = subnet['gateway'] gateways = [line for line in route_n.split('\n') if 'UG' in line and gw_ip in line] logger.debug('matching gateways:\n{}'.format(gateways)) self.assertEqual(len(gateways), 1) [gateways] = gateways (dest, gw, genmask, flags, metric, ref, use, iface) = \ gateways.split() logger.debug('expected gw:{} found gw:{}'.format(gw_ip, gw)) self.assertEqual(gw_ip, gw) class PreciseHWETTestBonding(relbase.precise_hwe_t, TestNetworkAbs): __test__ = True # package names on precise are different, need to check on ifenslave-2.6 collect_scripts = TestNetworkAbs.collect_scripts + [textwrap.dedent(""" cd OUTPUT_COLLECT_D dpkg-query -W -f '${Status}' ifenslave-2.6 > ifenslave_installed """)] class TrustyTestBonding(relbase.trusty, TestNetworkAbs): __test__ = False class TrustyHWEUTestBonding(relbase.trusty_hwe_u, TrustyTestBonding): __test__ = True class TrustyHWEVTestBonding(relbase.trusty_hwe_v, TrustyTestBonding): # Working, but off by default to safe test suite runtime # oldest/newest HWE-* covered above/below __test__ = False class TrustyHWEWTestBonding(relbase.trusty_hwe_w, TrustyTestBonding): __test__ = True class WilyTestBonding(relbase.wily, TestNetworkAbs): __test__ = True class XenialTestBonding(relbase.xenial, TestNetworkAbs): __test__ = True class YakketyTestBonding(relbase.yakkety, TestNetworkAbs): __test__ = True curtin-0.1.0~bzr399/tests/vmtests/test_lvm.py0000644000000000000000000000442312741214565017413 0ustar 00000000000000from . import VMBaseClass from .releases import base_vm_classes as relbase import textwrap class TestLvmAbs(VMBaseClass): conf_file = "examples/tests/lvm.yaml" interactive = False extra_disks = [] collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D cat /etc/fstab > fstab ls /dev/disk/by-dname > ls_dname find /etc/network/interfaces.d > find_interfacesd pvdisplay -C --separator = -o vg_name,pv_name --noheadings > pvs lvdisplay -C --separator = -o lv_name,vg_name --noheadings > lvs """)] fstab_expected = { '/dev/vg1/lv1': '/srv/data', '/dev/vg1/lv2': '/srv/backup', } disk_to_check = [('main_disk', 1), ('main_disk', 5), ('main_disk', 6), ('vg1-lv1', 0), ('vg1-lv2', 0)] def test_lvs(self): self.check_file_strippedline("lvs", "lv1=vg1") self.check_file_strippedline("lvs", "lv2=vg1") def test_pvs(self): self.check_file_strippedline("pvs", "vg1=/dev/vda5") self.check_file_strippedline("pvs", "vg1=/dev/vda6") def test_output_files_exist(self): self.output_files_exist( ["fstab", "ls_dname"]) class PreciseTestLvm(relbase.precise, TestLvmAbs): __test__ = True # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect # sda-part2 to exist in /dev/disk/by-dname as we can on other releases # when dname works on trusty, then we need to re-enable by removing line. def test_dname(self): print("test_dname does not work for Trusty") class PreciseHWETTestLvm(relbase.precise_hwe_t, PreciseTestLvm): __test__ = True class TrustyTestLvm(relbase.trusty, TestLvmAbs): __test__ = True # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect # sda-part2 to exist in /dev/disk/by-dname as we can on other releases # when dname works on trusty, then we need to re-enable by removing line. def test_dname(self): print("test_dname does not work for Trusty") class WilyTestLvm(relbase.wily, TestLvmAbs): __test__ = True class XenialTestLvm(relbase.xenial, TestLvmAbs): __test__ = True class YakketyTestLvm(relbase.yakkety, TestLvmAbs): __test__ = True curtin-0.1.0~bzr399/tests/vmtests/test_mdadm_bcache.py0000644000000000000000000003315112741214565021164 0ustar 00000000000000from . import VMBaseClass from .releases import base_vm_classes as relbase import textwrap import os class TestMdadmAbs(VMBaseClass): interactive = False extra_disks = [] active_mdadm = "1" collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D cat /etc/fstab > fstab mdadm --detail --scan > mdadm_status mdadm --detail --scan | grep -c ubuntu > mdadm_active1 grep -c active /proc/mdstat > mdadm_active2 ls /dev/disk/by-dname > ls_dname find /etc/network/interfaces.d > find_interfacesd """)] def test_mdadm_output_files_exist(self): self.output_files_exist( ["fstab", "mdadm_status", "mdadm_active1", "mdadm_active2", "ls_dname"]) def test_mdadm_status(self): # ubuntu: is the name assigned to the md array self.check_file_regex("mdadm_status", r"ubuntu:[0-9]*") self.check_file_strippedline("mdadm_active1", self.active_mdadm) self.check_file_strippedline("mdadm_active2", self.active_mdadm) class TestMdadmBcacheAbs(TestMdadmAbs): arch_skip = [ "s390x", # lp:1565029 ] conf_file = "examples/tests/mdadm_bcache.yaml" disk_to_check = [('main_disk', 1), ('main_disk', 2), ('main_disk', 3), ('main_disk', 4), ('main_disk', 5), ('main_disk', 6), ('md0', 0), ('cached_array', 0), ('cached_array_2', 0)] extra_disks = ['4G', '4G'] collect_scripts = TestMdadmAbs.collect_scripts + [textwrap.dedent(""" cd OUTPUT_COLLECT_D bcache-super-show /dev/vda6 > bcache_super_vda6 bcache-super-show /dev/vda7 > bcache_super_vda7 bcache-super-show /dev/md0 > bcache_super_md0 ls /sys/fs/bcache > bcache_ls cat /sys/block/bcache0/bcache/cache_mode > bcache_cache_mode cat /sys/block/bcache1/bcache/cache_mode >> bcache_cache_mode cat /sys/block/bcache2/bcache/cache_mode >> bcache_cache_mode cat /proc/mounts > proc_mounts find /etc/network/interfaces.d > find_interfacesd """)] fstab_expected = { '/dev/vda1': '/media/sda1', '/dev/vda7': '/boot', '/dev/bcache1': '/media/data', '/dev/bcache0': '/media/bcache_normal', '/dev/bcache2': '/media/bcachefoo_fulldiskascache_storage' } def test_bcache_output_files_exist(self): self.output_files_exist(["bcache_super_vda6", "bcache_super_vda7", "bcache_super_md0", "bcache_ls", "bcache_cache_mode"]) def test_bcache_status(self): bcache_supers = [ "bcache_super_vda6", "bcache_super_vda7", "bcache_super_md0", ] bcache_cset_uuid = None found = {} for bcache_super in bcache_supers: with open(os.path.join(self.td.collect, bcache_super), "r") as fp: for line in fp.read().splitlines(): if line != "" and line.split()[0] == "cset.uuid": bcache_cset_uuid = line.split()[-1].rstrip() if bcache_cset_uuid in found: found[bcache_cset_uuid].append(bcache_super) else: found[bcache_cset_uuid] = [bcache_super] self.assertIsNotNone(bcache_cset_uuid) with open(os.path.join(self.td.collect, "bcache_ls"), "r") as fp: self.assertTrue(bcache_cset_uuid in fp.read().splitlines()) # one cset.uuid for all devices self.assertEqual(len(found), 1) # three devices with same cset.uuid self.assertEqual(len(found[bcache_cset_uuid]), 3) # check the cset.uuid in the dict self.assertEqual(list(found.keys()).pop(), bcache_cset_uuid) def test_bcache_cachemode(self): # definition is on order 0->back,1->through,2->around # but after reboot it can be anything since order is not guaranteed # until we find a way to redetect the order we just check that all # three are there self.check_file_regex("bcache_cache_mode", r"\[writeback\]") self.check_file_regex("bcache_cache_mode", r"\[writethrough\]") self.check_file_regex("bcache_cache_mode", r"\[writearound\]") class TrustyTestMdadmBcache(relbase.trusty, TestMdadmBcacheAbs): __test__ = True # FIXME(LP: #1523037): dname does not work on trusty # when dname works on trusty, then we need to re-enable by removing line. def test_dname(self): print("test_dname does not work for Trusty") def test_ptable(self): print("test_ptable does not work for Trusty") class TrustyHWEUTestMdadmBcache(relbase.trusty_hwe_u, TrustyTestMdadmBcache): __test__ = True class WilyTestMdadmBcache(relbase.wily, TestMdadmBcacheAbs): __test__ = True class XenialTestMdadmBcache(relbase.xenial, TestMdadmBcacheAbs): __test__ = True class YakketyTestMdadmBcache(relbase.yakkety, TestMdadmBcacheAbs): __test__ = True class TestMirrorbootAbs(TestMdadmAbs): # alternative config for more complex setup conf_file = "examples/tests/mirrorboot.yaml" # initialize secondary disk extra_disks = ['4G'] disk_to_check = [('main_disk', 1), ('main_disk', 2), ('second_disk', 1), ('md0', 0)] class TrustyTestMirrorboot(relbase.trusty, TestMirrorbootAbs): __test__ = True # FIXME(LP: #1523037): dname does not work on trusty # when dname works on trusty, then we need to re-enable by removing line. def test_dname(self): print("test_dname does not work for Trusty") def test_ptable(self): print("test_ptable does not work for Trusty") class TrustyHWEUTestMirrorboot(relbase.trusty_hwe_u, TrustyTestMirrorboot): # This tests kernel upgrade in target __test__ = True class WilyTestMirrorboot(relbase.wily, TestMirrorbootAbs): __test__ = True class XenialTestMirrorboot(relbase.xenial, TestMirrorbootAbs): __test__ = True class YakketyTestMirrorboot(relbase.yakkety, TestMirrorbootAbs): __test__ = True class TestRaid5bootAbs(TestMdadmAbs): # alternative config for more complex setup conf_file = "examples/tests/raid5boot.yaml" # initialize secondary disk extra_disks = ['4G', '4G'] disk_to_check = [('main_disk', 1), ('main_disk', 2), ('second_disk', 1), ('third_disk', 1), ('md0', 0)] class TrustyTestRaid5Boot(relbase.trusty, TestRaid5bootAbs): __test__ = True # FIXME(LP: #1523037): dname does not work on trusty # when dname works on trusty, then we need to re-enable by removing line. def test_dname(self): print("test_dname does not work for Trusty") def test_ptable(self): print("test_ptable does not work for Trusty") class TrustyHWEUTestRaid5Boot(relbase.trusty_hwe_u, TrustyTestRaid5Boot): # This tests kernel upgrade in target __test__ = True class WilyTestRaid5boot(relbase.wily, TestRaid5bootAbs): __test__ = True class XenialTestRaid5boot(relbase.xenial, TestRaid5bootAbs): __test__ = True class YakketyTestRaid5boot(relbase.yakkety, TestRaid5bootAbs): __test__ = True class TestRaid6bootAbs(TestMdadmAbs): # alternative config for more complex setup conf_file = "examples/tests/raid6boot.yaml" # initialize secondary disk extra_disks = ['4G', '4G', '4G'] disk_to_check = [('main_disk', 1), ('main_disk', 2), ('second_disk', 1), ('third_disk', 1), ('fourth_disk', 1), ('md0', 0)] collect_scripts = TestMdadmAbs.collect_scripts + [textwrap.dedent(""" cd OUTPUT_COLLECT_D mdadm --detail --scan > mdadm_detail """)] def test_raid6_output_files_exist(self): self.output_files_exist( ["mdadm_detail"]) def test_mdadm_custom_name(self): # the raid6boot.yaml sets this name, check if it was set self.check_file_regex("mdadm_detail", r"ubuntu:foobar") class TrustyTestRaid6boot(relbase.trusty, TestRaid6bootAbs): __test__ = True # FIXME(LP: #1523037): dname does not work on trusty # when dname works on trusty, then we need to re-enable by removing line. def test_dname(self): print("test_dname does not work for Trusty") def test_ptable(self): print("test_ptable does not work for Trusty") class TrustyHWEUTestRaid6boot(relbase.trusty_hwe_u, TrustyTestRaid6boot): __test__ = True class WilyTestRaid6boot(relbase.wily, TestRaid6bootAbs): __test__ = True class XenialTestRaid6boot(relbase.xenial, TestRaid6bootAbs): __test__ = True class YakketyTestRaid6boot(relbase.yakkety, TestRaid6bootAbs): __test__ = True class TestRaid10bootAbs(TestMdadmAbs): # alternative config for more complex setup conf_file = "examples/tests/raid10boot.yaml" # initialize secondary disk extra_disks = ['4G', '4G', '4G'] disk_to_check = [('main_disk', 1), ('main_disk', 2), ('second_disk', 1), ('third_disk', 1), ('fourth_disk', 1), ('md0', 0)] class TrustyTestRaid10boot(relbase.trusty, TestRaid10bootAbs): __test__ = True # FIXME(LP: #1523037): dname does not work on trusty # when dname works on trusty, then we need to re-enable by removing line. def test_dname(self): print("test_dname does not work for Trusty") def test_ptable(self): print("test_ptable does not work for Trusty") class TrustyHWEUTestRaid10boot(relbase.trusty_hwe_u, TrustyTestRaid10boot): __test__ = True class WilyTestRaid10boot(relbase.wily, TestRaid10bootAbs): __test__ = True class XenialTestRaid10boot(relbase.xenial, TestRaid10bootAbs): __test__ = True class YakketyTestRaid10boot(relbase.yakkety, TestRaid10bootAbs): __test__ = True class TestAllindataAbs(TestMdadmAbs): # more complex, needs more time # alternative config for more complex setup conf_file = "examples/tests/allindata.yaml" # we have to avoid a systemd hang due to the way it handles dmcrypt extra_kern_args = "--- luks=no" active_mdadm = "4" # initialize secondary disk extra_disks = ['5G', '5G', '5G'] disk_to_check = [('main_disk', 1), ('main_disk', 2), ('main_disk', 3), ('main_disk', 4), ('main_disk', 5), ('second_disk', 1), ('second_disk', 2), ('second_disk', 3), ('second_disk', 4), ('third_disk', 1), ('third_disk', 2), ('third_disk', 3), ('third_disk', 4), ('fourth_disk', 1), ('fourth_disk', 2), ('fourth_disk', 3), ('fourth_disk', 4), ('md0', 0), ('md1', 0), ('md2', 0), ('md3', 0), ('vg1-lv1', 0), ('vg1-lv2', 0)] collect_scripts = TestMdadmAbs.collect_scripts + [textwrap.dedent(""" cd OUTPUT_COLLECT_D pvdisplay -C --separator = -o vg_name,pv_name --noheadings > pvs lvdisplay -C --separator = -o lv_name,vg_name --noheadings > lvs cat /etc/crypttab > crypttab yes "testkey" | cryptsetup open /dev/vg1/lv3 dmcrypt0 --type luks ls -laF /dev/mapper/dmcrypt0 > mapper mkdir -p /tmp/xfstest mount /dev/mapper/dmcrypt0 /tmp/xfstest xfs_info /tmp/xfstest/ > xfs_info """)] fstab_expected = { '/dev/vg1/lv1': '/srv/data', '/dev/vg1/lv2': '/srv/backup', } def test_output_files_exist(self): self.output_files_exist(["pvs", "lvs", "crypttab", "mapper", "xfs_info"]) def test_lvs(self): self.check_file_strippedline("lvs", "lv1=vg1") self.check_file_strippedline("lvs", "lv2=vg1") self.check_file_strippedline("lvs", "lv3=vg1") def test_pvs(self): self.check_file_strippedline("pvs", "vg1=/dev/md0") self.check_file_strippedline("pvs", "vg1=/dev/md1") self.check_file_strippedline("pvs", "vg1=/dev/md2") self.check_file_strippedline("pvs", "vg1=/dev/md3") def test_dmcrypt(self): self.check_file_regex("crypttab", r"dmcrypt0.*luks") self.check_file_regex("mapper", r"^lrwxrwxrwx.*/dev/mapper/dmcrypt0") self.check_file_regex("xfs_info", r"^meta-data=/dev/mapper/dmcrypt0") class TrustyTestAllindata(relbase.trusty, TestAllindataAbs): __test__ = False # luks=no does not disable mounting of device # FIXME(LP: #1523037): dname does not work on trusty # when dname works on trusty, then we need to re-enable by removing line. def test_dname(self): print("test_dname does not work for Trusty") def test_ptable(self): print("test_ptable does not work for Trusty") class TrustyHWEUTestAllindata(relbase.trusty_hwe_u, TrustyTestAllindata): __test__ = False # lukes=no does not disable mounting of device class WilyTestAllindata(relbase.wily, TestAllindataAbs): __test__ = True class XenialTestAllindata(relbase.xenial, TestAllindataAbs): __test__ = True class YakketyTestAllindata(relbase.yakkety, TestAllindataAbs): __test__ = True curtin-0.1.0~bzr399/tests/vmtests/test_multipath.py0000644000000000000000000000415112741214565020622 0ustar 00000000000000from . import VMBaseClass from .releases import base_vm_classes as relbase import os import textwrap class TestMultipathBasicAbs(VMBaseClass): conf_file = "examples/tests/multipath.yaml" multipath = True disk_driver = 'scsi-hd' extra_disks = [] nvme_disks = [] collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D blkid -o export /dev/sda > blkid_output_sda blkid -o export /dev/sda1 > blkid_output_sda1 blkid -o export /dev/sda2 > blkid_output_sda2 blkid -o export /dev/sdb > blkid_output_sdb blkid -o export /dev/sdb1 > blkid_output_sdb1 blkid -o export /dev/sdb2 > blkid_output_sdb2 dmsetup ls > dmsetup_ls dmsetup info > dmsetup_info cat /proc/partitions > proc_partitions multipath -ll > multipath_ll multipath -v3 -ll > multipath_v3_ll multipath -r > multipath_r cp -a /etc/multipath* . ls -al /dev/disk/by-uuid/ > ls_uuid ls -al /dev/disk/by-id/ > ls_disk_id readlink -f /sys/class/block/sda/holders/dm-0 > holders_sda readlink /sys/class/block/sdb/holders/dm-0 > holders_sdb cat /etc/fstab > fstab mkdir -p /dev/disk/by-dname ls /dev/disk/by-dname/ > ls_dname find /etc/network/interfaces.d > find_interfacesd """)] def test_multipath_disks_match(self): sda = os.path.join(self.td.collect, 'holders_sda') sdb = os.path.join(self.td.collect, 'holders_sdb') self.assertTrue(os.path.exists(sda)) self.assertTrue(os.path.exists(sdb)) with open(sda, 'r') as fp: sda_data = fp.read() print('sda holders:\n%s' % sda_data) with open(sda, 'r') as fp: sdb_data = fp.read() print('sdb holders:\n%s' % sda_data) self.assertEqual(sda_data, sdb_data) class TrustyTestMultipathBasic(relbase.trusty, TestMultipathBasicAbs): __test__ = True class XenialTestMultipathBasic(relbase.xenial, TestMultipathBasicAbs): __test__ = True class YakketyTestMultipathBasic(relbase.yakkety, TestMultipathBasicAbs): __test__ = True curtin-0.1.0~bzr399/tests/vmtests/test_network.py0000644000000000000000000004053012741214565020305 0ustar 00000000000000from . import VMBaseClass, logger, helpers from .releases import base_vm_classes as relbase import ipaddress import os import re import subprocess import textwrap import yaml class TestNetworkAbs(VMBaseClass): interactive = False conf_file = "examples/tests/basic_network.yaml" extra_disks = [] extra_nics = [] collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D ifconfig -a > ifconfig_a cp -av /etc/network/interfaces . cp -av /etc/network/interfaces.d . find /etc/network/interfaces.d > find_interfacesd cp /etc/resolv.conf . cp -av /etc/udev/rules.d/70-persistent-net.rules . ip -o route show > ip_route_show route -n > route_n cp -av /run/network ./run_network """)] def test_output_files_exist(self): self.output_files_exist(["ifconfig_a", "interfaces", "resolv.conf", "70-persistent-net.rules", "ip_route_show", "route_n"]) def test_etc_network_interfaces(self): with open(os.path.join(self.td.collect, "interfaces")) as fp: eni = fp.read() logger.debug('etc/network/interfaces:\n{}'.format(eni)) expected_eni = self.get_expected_etc_network_interfaces() eni_lines = eni.split('\n') for line in expected_eni.split('\n'): self.assertTrue(line in eni_lines) def test_etc_resolvconf(self): with open(os.path.join(self.td.collect, "resolv.conf")) as fp: resolvconf = fp.read() logger.debug('etc/resolv.conf:\n{}'.format(resolvconf)) resolv_lines = resolvconf.split('\n') logger.debug('resolv.conf lines:\n{}'.format(resolv_lines)) # resolv.conf ''' nameserver X.Y.Z.A nameserver 1.2.3.4 search foo.bar ''' # eni '''' auto eth1:1 iface eth1:1 inet static dns-nameserver X.Y.Z.A dns-search foo.bar ''' # iface dict '''' eth1:1: dns: nameserver: X.Y.Z.A search: foo.bar ''' expected_ifaces = self.get_expected_etc_resolvconf() logger.debug('parsed eni ifaces:\n{}'.format(expected_ifaces)) for ifname in expected_ifaces.keys(): iface = expected_ifaces.get(ifname) for k, v in iface.get('dns', {}).items(): dns_line = '{} {}'.format( k.replace('nameservers', 'nameserver'), " ".join(v)) logger.debug('dns_line:{}'.format(dns_line)) self.assertTrue(dns_line in resolv_lines) def test_ifconfig_output(self): '''check ifconfig output with test input''' network_state = self.get_network_state() logger.debug('expected_network_state:\n{}'.format( yaml.dump(network_state, default_flow_style=False, indent=4))) with open(os.path.join(self.td.collect, "ifconfig_a")) as fp: ifconfig_a = fp.read() logger.debug('ifconfig -a:\n{}'.format(ifconfig_a)) ifconfig_dict = helpers.ifconfig_to_dict(ifconfig_a) logger.debug('parsed ifcfg dict:\n{}'.format( yaml.dump(ifconfig_dict, default_flow_style=False, indent=4))) with open(os.path.join(self.td.collect, "ip_route_show")) as fp: ip_route_show = fp.read() logger.debug("ip route show:\n{}".format(ip_route_show)) for line in [line for line in ip_route_show.split('\n') if 'src' in line]: m = re.search(r'^(?P\S+)\sdev\s' + r'(?P\S+)\s+' + r'proto kernel\s+scope link' + r'\s+src\s(?P\S+)', line) route_info = m.groupdict('') logger.debug(route_info) with open(os.path.join(self.td.collect, "route_n")) as fp: route_n = fp.read() logger.debug("route -n:\n{}".format(route_n)) interfaces = network_state.get('interfaces') for iface in interfaces.values(): subnets = iface.get('subnets', {}) if subnets: for index, subnet in zip(range(0, len(subnets)), subnets): iface['index'] = index if index == 0: ifname = "{name}".format(**iface) else: ifname = "{name}:{index}".format(**iface) self.check_interface(iface, ifconfig_dict.get(ifname), route_n) else: iface['index'] = 0 self.check_interface(iface, ifconfig_dict.get(iface['name']), route_n) def check_interface(self, iface, ifconfig, route_n): logger.debug( 'testing iface:\n{}\n\nifconfig:\n{}'.format(iface, ifconfig)) subnets = iface.get('subnets', {}) if subnets and iface['index'] != 0: ifname = "{name}:{index}".format(**iface) else: ifname = "{name}".format(**iface) # initial check, do we have the correct iface ? logger.debug('ifname={}'.format(ifname)) logger.debug("ifconfig['interface']={}".format(ifconfig['interface'])) self.assertEqual(ifname, ifconfig['interface']) # check physical interface attributes for key in ['mac_address', 'mtu']: if key in iface and iface[key]: self.assertEqual(iface[key], ifconfig[key]) def __get_subnet(subnets, subidx): for index, subnet in zip(range(0, len(subnets)), subnets): if index == subidx: break return subnet # check subnet related attributes, and specifically only # the subnet specified by iface['index'] subnets = iface.get('subnets', {}) if subnets: subnet = __get_subnet(subnets, iface['index']) if 'address' in subnet and subnet['address']: if ':' in subnet['address']: inet_iface = ipaddress.IPv6Interface( subnet['address']) else: inet_iface = ipaddress.IPv4Interface( subnet['address']) # check ip addr self.assertEqual(str(inet_iface.ip), ifconfig['address']) self.assertEqual(str(inet_iface.netmask), ifconfig['netmask']) self.assertEqual( str(inet_iface.network.broadcast_address), ifconfig['broadcast']) # handle gateway by looking at routing table if 'gateway' in subnet and subnet['gateway']: gw_ip = subnet['gateway'] gateways = [line for line in route_n.split('\n') if 'UG' in line and gw_ip in line] logger.debug('matching gateways:\n{}'.format(gateways)) self.assertEqual(len(gateways), 1) [gateways] = gateways (dest, gw, genmask, flags, metric, ref, use, iface) = \ gateways.split() logger.debug('expected gw:{} found gw:{}'.format(gw_ip, gw)) self.assertEqual(gw_ip, gw) class TestNetworkStaticAbs(TestNetworkAbs): conf_file = "examples/tests/basic_network_static.yaml" class TestNetworkVlanAbs(TestNetworkAbs): conf_file = "examples/tests/vlan_network.yaml" collect_scripts = TestNetworkAbs.collect_scripts + [textwrap.dedent(""" cd OUTPUT_COLLECT_D dpkg-query -W -f '${Status}' vlan > vlan_installed ip -d link show interface1.2667 > ip_link_show_interface1.2667 ip -d link show interface1.2668 > ip_link_show_interface1.2668 ip -d link show interface1.2669 > ip_link_show_interface1.2669 ip -d link show interface1.2670 > ip_link_show_interface1.2670 """)] def get_vlans(self): network_state = self.get_network_state() logger.debug('get_vlans ns:\n{}'.format( yaml.dump(network_state, default_flow_style=False, indent=4))) interfaces = network_state.get('interfaces') return [iface for iface in interfaces.values() if iface['type'] == 'vlan'] def test_output_files_exist_vlan(self): link_files = ["ip_link_show_{}".format(vlan['name']) for vlan in self.get_vlans()] self.output_files_exist(["vlan_installed"] + link_files) def test_vlan_installed(self): with open(os.path.join(self.td.collect, "vlan_installed")) as fp: status = fp.read().strip() logger.debug('vlan installed?: {}'.format(status)) self.assertEqual('install ok installed', status) def test_vlan_enabled(self): # we must have at least one self.assertGreaterEqual(len(self.get_vlans()), 1) # did they get configured? for vlan in self.get_vlans(): link_file = "ip_link_show_" + vlan['name'] vlan_msg = "vlan protocol 802.1Q id " + str(vlan['vlan_id']) self.check_file_regex(link_file, vlan_msg) class TestNetworkENISource(TestNetworkAbs): """ Curtin now emits a source /etc/network/interfaces.d/*.cfg line. This test exercises this feature by emitting additional network configuration in /etc/network/interfaces.d/eth2.cfg This relies on the network_config.yaml of the TestClass to define a spare nic with no configuration. This ensures that a udev rule for eth2 is emitted so we can reference the interface in our injected configuration. Note, ifupdown allows multiple stanzas with the same iface name and combines the options together during ifup. We rely on this feature allowing etc/network/interfaces to have an unconfigured iface eth2 inet manual line, and then defer the configuration to /etc/network/interfaces.d/eth2.cfg This testcase then uses curtin.net.deb_parse_config method to extract information about what curtin wrote and compare that with what was actually configured (which we capture via ifconfig) """ conf_file = "examples/tests/network_source.yaml" collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D ifconfig -a > ifconfig_a cp -av /etc/network/interfaces . cp -a /etc/network/interfaces.d . find /etc/network/interfaces.d > find_interfacesd cp /etc/resolv.conf . cp -av /etc/udev/rules.d/70-persistent-net.rules . ip -o route show > ip_route_show route -n > route_n """)] def test_source_cfg_exists(self): """Test that our curthooks wrote our injected config.""" self.output_files_exist(["interfaces.d/interface2.cfg"]) def test_etc_network_interfaces_source_cfg(self): """ Compare injected configuration as parsed by curtin matches how ifup configured the interface.""" # interfaces uses absolute paths, fix for test-case interfaces = os.path.join(self.td.collect, "interfaces") cmd = ['sed', '-i.orig', '-e', 's,/etc/network/,,g', '{}'.format(interfaces)] subprocess.check_call(cmd, stderr=subprocess.STDOUT) curtin_ifaces = self.parse_deb_config(interfaces) logger.debug('parsed eni dict:\n{}'.format( yaml.dump(curtin_ifaces, default_flow_style=False, indent=4))) print('parsed eni dict:\n{}'.format( yaml.dump(curtin_ifaces, default_flow_style=False, indent=4))) with open(os.path.join(self.td.collect, "ifconfig_a")) as fp: ifconfig_a = fp.read() logger.debug('ifconfig -a:\n{}'.format(ifconfig_a)) ifconfig_dict = helpers.ifconfig_to_dict(ifconfig_a) logger.debug('parsed ifconfig dict:\n{}'.format( yaml.dump(ifconfig_dict, default_flow_style=False, indent=4))) print('parsed ifconfig dict:\n{}'.format( yaml.dump(ifconfig_dict, default_flow_style=False, indent=4))) iface = 'interface2' self.assertTrue(iface in curtin_ifaces) expected_address = curtin_ifaces[iface].get('address', None) self.assertIsNotNone(expected_address) # handle CIDR notation def _nocidr(addr): return addr.split("/")[0] actual_address = ifconfig_dict[iface].get('address', "") self.assertEqual(_nocidr(expected_address), _nocidr(actual_address)) class PreciseHWETTestNetwork(relbase.precise_hwe_t, TestNetworkAbs): # FIXME: off due to hang at test: Starting execute cloud user/final scripts __test__ = False class PreciseHWETTestNetworkStatic(relbase.precise_hwe_t, TestNetworkStaticAbs): # FIXME: off due to hang at test: Starting execute cloud user/final scripts __test__ = False class TrustyTestNetwork(relbase.trusty, TestNetworkAbs): __test__ = True class TrustyTestNetworkStatic(relbase.trusty, TestNetworkStaticAbs): __test__ = True class TrustyHWEUTestNetwork(relbase.trusty_hwe_u, TrustyTestNetwork): # Working, off by default to safe test suite runtime, covered by bonding __test__ = False class TrustyHWEUTestNetworkStatic(relbase.trusty_hwe_u, TestNetworkStaticAbs): # Working, off by default to safe test suite runtime, covered by bonding __test__ = False class TrustyHWEVTestNetwork(relbase.trusty_hwe_v, TrustyTestNetwork): # Working, off by default to safe test suite runtime, covered by bonding __test__ = False class TrustyHWEVTestNetworkStatic(relbase.trusty_hwe_v, TestNetworkStaticAbs): # Working, off by default to safe test suite runtime, covered by bonding __test__ = False class TrustyHWEWTestNetwork(relbase.trusty_hwe_w, TrustyTestNetwork): # Working, off by default to safe test suite runtime, covered by bonding __test__ = False class TrustyHWEWTestNetworkStatic(relbase.trusty_hwe_w, TestNetworkStaticAbs): # Working, off by default to safe test suite runtime, covered by bonding __test__ = False class WilyTestNetwork(relbase.wily, TestNetworkAbs): __test__ = True class WilyTestNetworkStatic(relbase.wily, TestNetworkStaticAbs): __test__ = True class XenialTestNetwork(relbase.xenial, TestNetworkAbs): __test__ = True class XenialTestNetworkStatic(relbase.xenial, TestNetworkStaticAbs): __test__ = True class YakketyTestNetwork(relbase.yakkety, TestNetworkAbs): __test__ = True class YakketyTestNetworkStatic(relbase.yakkety, TestNetworkStaticAbs): __test__ = True class PreciseTestNetworkVlan(relbase.precise, TestNetworkVlanAbs): __test__ = True # precise ip -d link show output is different (of course) def test_vlan_enabled(self): # we must have at least one self.assertGreaterEqual(len(self.get_vlans()), 1) # did they get configured? for vlan in self.get_vlans(): link_file = "ip_link_show_" + vlan['name'] vlan_msg = "vlan id " + str(vlan['vlan_id']) self.check_file_regex(link_file, vlan_msg) class TrustyTestNetworkVlan(relbase.trusty, TestNetworkVlanAbs): __test__ = True class WilyTestNetworkVlan(relbase.wily, TestNetworkVlanAbs): __test__ = True class XenialTestNetworkVlan(relbase.xenial, TestNetworkVlanAbs): __test__ = True class YakketyTestNetworkVlan(relbase.yakkety, TestNetworkVlanAbs): __test__ = True class PreciseTestNetworkENISource(relbase.precise, TestNetworkENISource): __test__ = False # not working, still debugging though; possible older ifupdown doesn't # like the multiple iface method. class TrustyTestNetworkENISource(relbase.trusty, TestNetworkENISource): __test__ = True class WilyTestNetworkENISource(relbase.wily, TestNetworkENISource): __test__ = True class XenialTestNetworkENISource(relbase.xenial, TestNetworkENISource): __test__ = True class YakketyTestNetworkENISource(relbase.yakkety, TestNetworkENISource): __test__ = True curtin-0.1.0~bzr399/tests/vmtests/test_nvme.py0000644000000000000000000000544612741214565017570 0ustar 00000000000000from . import VMBaseClass from .releases import base_vm_classes as relbase import os import textwrap class TestNvmeAbs(VMBaseClass): arch_skip = [ "s390x", # nvme is a pci device, no pci on s390x ] interactive = False conf_file = "examples/tests/nvme.yaml" install_timeout = 600 boot_timeout = 120 extra_disks = [] nvme_disks = ['4G', '4G'] disk_to_check = [('main_disk', 1), ('main_disk', 2), ('main_disk', 15), ('nvme_disk', 1), ('nvme_disk', 2), ('nvme_disk', 3), ('second_nvme', 1)] collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D ls /sys/class/ > sys_class ls /sys/class/nvme/ > ls_nvme ls /dev/nvme* > ls_dev_nvme ls /dev/disk/by-dname/ > ls_dname blkid -o export /dev/vda > blkid_output_vda blkid -o export /dev/vda1 > blkid_output_vda1 blkid -o export /dev/vda2 > blkid_output_vda2 btrfs-show-super /dev/vdd > btrfs_show_super_vdd cat /proc/partitions > proc_partitions ls -al /dev/disk/by-uuid/ > ls_uuid cat /etc/fstab > fstab mkdir -p /dev/disk/by-dname ls /dev/disk/by-dname/ > ls_dname find /etc/network/interfaces.d > find_interfacesd v="" out=$(apt-config shell v Acquire::HTTP::Proxy) eval "$out" echo "$v" > apt-proxy """)] def test_output_files_exist(self): self.output_files_exist(["ls_nvme", "ls_dname", "ls_dev_nvme"]) def test_nvme_device_names(self): ls_nvme = os.path.join(self.td.collect, 'ls_nvme') # trusty and vivid do not have sys/class/nvme but # nvme devices do work if os.path.getsize(ls_nvme) > 0: self.check_file_strippedline("ls_nvme", "nvme0") self.check_file_strippedline("ls_nvme", "nvme1") else: self.check_file_strippedline("ls_dev_nvme", "/dev/nvme0") self.check_file_strippedline("ls_dev_nvme", "/dev/nvme1") class PreciseTestNvme(relbase.precise, TestNvmeAbs): __test__ = False # Precise kernel doesn't have NVME support, with TrustyHWE it would class TrustyTestNvme(relbase.trusty, TestNvmeAbs): __test__ = True # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect # sda-part2 to exist in /dev/disk/by-dname as we can on other releases # when dname works on trusty, then we need to re-enable by removing line. def test_dname(self): print("test_dname does not work for Trusty") def test_ptable(self): print("test_ptable does not work for Trusty") class WilyTestNvme(relbase.wily, TestNvmeAbs): __test__ = True class XenialTestNvme(relbase.xenial, TestNvmeAbs): __test__ = True class YakketyTestNvme(relbase.yakkety, TestNvmeAbs): __test__ = True curtin-0.1.0~bzr399/tests/vmtests/test_raid5_bcache.py0000644000000000000000000000707412741214565021113 0ustar 00000000000000from . import VMBaseClass from .releases import base_vm_classes as relbase import textwrap import os class TestMdadmAbs(VMBaseClass): interactive = False extra_disks = ['10G', '10G', '10G', '10G'] active_mdadm = "1" collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D cat /etc/fstab > fstab mdadm --detail --scan > mdadm_status mdadm --detail --scan | grep -c ubuntu > mdadm_active1 grep -c active /proc/mdstat > mdadm_active2 ls /dev/disk/by-dname > ls_dname find /etc/network/interfaces.d > find_interfacesd """)] def test_mdadm_output_files_exist(self): self.output_files_exist( ["fstab", "mdadm_status", "mdadm_active1", "mdadm_active2", "ls_dname"]) def test_mdadm_status(self): # ubuntu: is the name assigned to the md array self.check_file_regex("mdadm_status", r"ubuntu:[0-9]*") self.check_file_strippedline("mdadm_active1", self.active_mdadm) self.check_file_strippedline("mdadm_active2", self.active_mdadm) class TestMdadmBcacheAbs(TestMdadmAbs): conf_file = "examples/tests/raid5bcache.yaml" disk_to_check = [('md0', 0), ('sda', 2)] collect_scripts = TestMdadmAbs.collect_scripts + [textwrap.dedent(""" cd OUTPUT_COLLECT_D bcache-super-show /dev/vda2 > bcache_super_vda2 ls /sys/fs/bcache > bcache_ls cat /sys/block/bcache0/bcache/cache_mode > bcache_cache_mode cat /proc/mounts > proc_mounts cat /proc/partitions > proc_partitions find /etc/network/interfaces.d > find_interfacesd """)] fstab_expected = { '/dev/bcache0': '/', '/dev/md0': '/srv/data', } def test_bcache_output_files_exist(self): self.output_files_exist(["bcache_super_vda2", "bcache_ls", "bcache_cache_mode"]) def test_bcache_status(self): bcache_cset_uuid = None fname = os.path.join(self.td.collect, "bcache_super_vda2") with open(fname, "r") as fp: for line in fp.read().splitlines(): if line != "" and line.split()[0] == "cset.uuid": bcache_cset_uuid = line.split()[-1].rstrip() self.assertIsNotNone(bcache_cset_uuid) with open(os.path.join(self.td.collect, "bcache_ls"), "r") as fp: self.assertTrue(bcache_cset_uuid in fp.read().splitlines()) def test_bcache_cachemode(self): self.check_file_regex("bcache_cache_mode", r"\[writeback\]") class PreciseHWETTestRaid5Bcache(relbase.precise_hwe_t, TestMdadmBcacheAbs): # FIXME: off due to failing install: RUN_ARRAY failed: Invalid argument __test__ = False class TrustyTestRaid5Bcache(relbase.trusty, TestMdadmBcacheAbs): __test__ = True # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect # sda-part2 to exist in /dev/disk/by-dname as we can on other releases # when dname works on trusty, then we need to re-enable by removing line. disk_to_check = [('md0', 0)] class TrustyHWEUTestRaid5Bcache(relbase.trusty_hwe_u, TrustyTestRaid5Bcache): __test__ = True class TrustyHWEVTestRaid5Bcache(relbase.trusty_hwe_v, TrustyTestRaid5Bcache): __test__ = True class TrustyHWEWTestRaid5Bcache(relbase.trusty_hwe_w, TrustyTestRaid5Bcache): __test__ = False class WilyTestRaid5Bcache(relbase.wily, TestMdadmBcacheAbs): __test__ = True class XenialTestRaid5Bcache(relbase.xenial, TestMdadmBcacheAbs): __test__ = True class YakketyTestRaid5Bcache(relbase.yakkety, TestMdadmBcacheAbs): __test__ = True curtin-0.1.0~bzr399/tests/vmtests/test_simple.py0000644000000000000000000000225012741214565020102 0ustar 00000000000000from . import VMBaseClass from .releases import base_vm_classes as relbase import textwrap class TestSimple(VMBaseClass): # Test that curtin with no config does the right thing conf_file = "examples/tests/simple.yaml" extra_disks = [] extra_nics = [] collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D sfdisk --list > sfdisk_list for d in /dev/[sv]d[a-z] /dev/xvd?; do [ -b "$d" ] || continue echo == $d == sgdisk --print $d done > sgdisk_list blkid > blkid cat /proc/partitions > proc_partitions cp /etc/network/interfaces interfaces if [ -f /var/log/cloud-init-output.log ]; then cp /var/log/cloud-init-output.log . fi cp /var/log/cloud-init.log . find /etc/network/interfaces.d > find_interfacesd """)] def test_output_files_exist(self): self.output_files_exist(["sfdisk_list", "blkid", "proc_partitions", "interfaces"]) class TrustyTestSimple(relbase.trusty, TestSimple): __test__ = True class XenialTestSimple(relbase.xenial, TestSimple): __test__ = True curtin-0.1.0~bzr399/tests/vmtests/test_uefi_basic.py0000644000000000000000000001046712741214565020713 0ustar 00000000000000from . import (VMBaseClass) from .releases import base_vm_classes as relbase import os import textwrap class TestBasicAbs(VMBaseClass): interactive = False arch_skip = ["s390x"] conf_file = "examples/tests/uefi_basic.yaml" extra_disks = [] uefi = True disk_to_check = [('main_disk', 1), ('main_disk', 2)] collect_scripts = [textwrap.dedent(""" cd OUTPUT_COLLECT_D blkid -o export /dev/vda > blkid_output_vda blkid -o export /dev/vda1 > blkid_output_vda1 blkid -o export /dev/vda2 > blkid_output_vda2 cat /proc/partitions > proc_partitions ls -al /dev/disk/by-uuid/ > ls_uuid cat /etc/fstab > fstab mkdir -p /dev/disk/by-dname ls /dev/disk/by-dname/ > ls_dname find /etc/network/interfaces.d > find_interfacesd ls /sys/firmware/efi/ > ls_sys_firmware_efi cat /sys/class/block/vda/queue/logical_block_size > vda_lbs cat /sys/class/block/vda/queue/physical_block_size > vda_pbs blockdev --getsz /dev/vda > vda_blockdev_getsz blockdev --getss /dev/vda > vda_blockdev_getss blockdev --getpbsz /dev/vda > vda_blockdev_getpbsz blockdev --getbsz /dev/vda > vda_blockdev_getbsz """)] def test_output_files_exist(self): self.output_files_exist( ["blkid_output_vda", "blkid_output_vda1", "blkid_output_vda2", "fstab", "ls_dname", "ls_uuid", "ls_sys_firmware_efi", "proc_partitions"]) def test_sys_firmware_efi(self): sys_efi_expected = [ 'config_table', 'efivars', 'fw_platform_size', 'fw_vendor', 'runtime', 'runtime-map', 'systab', 'vars', ] sys_efi = self.td.collect + "ls_sys_firmware_efi" if (os.path.exists(sys_efi)): with open(sys_efi) as fp: efi_lines = fp.read().strip().split('\n') self.assertEqual(sorted(sys_efi_expected), sorted(efi_lines)) def test_disk_block_sizes(self): """ Test disk logical and physical block size are match the class block size. """ for bs in ['lbs', 'pbs']: with open(os.path.join(self.td.collect, 'vda_' + bs), 'r') as fp: size = int(fp.read()) self.assertEqual(self.disk_block_size, size) def test_disk_block_size_with_blockdev(self): """ validate maas setting --getsz get size in 512-byte sectors --getss get logical block (sector) size --getpbsz get physical block (sector) size --getbsz get blocksize """ for syscall in ['getss', 'getpbsz']: with open(os.path.join(self.td.collect, 'vda_blockdev_' + syscall), 'r') as fp: size = int(fp.read()) self.assertEqual(self.disk_block_size, size) class PreciseUefiTestBasic(relbase.precise, TestBasicAbs): __test__ = True def test_ptable(self): print("test_ptable does not work for Precise") def test_dname(self): print("test_dname does not work for Precise") class TrustyUefiTestBasic(relbase.trusty, TestBasicAbs): __test__ = True # FIXME(LP: #1523037): dname does not work on trusty, so we cannot expect # sda-part2 to exist in /dev/disk/by-dname as we can on other releases # when dname works on trusty, then we need to re-enable by removing line. def test_dname(self): print("test_dname does not work for Trusty") def test_ptable(self): print("test_ptable does not work for Trusty") class WilyUefiTestBasic(relbase.wily, TestBasicAbs): __test__ = True class XenialUefiTestBasic(relbase.xenial, TestBasicAbs): __test__ = True class YakketyUefiTestBasic(relbase.yakkety, TestBasicAbs): __test__ = True class PreciseUefiTestBasic4k(PreciseUefiTestBasic): disk_block_size = 4096 class TrustyUefiTestBasic4k(TrustyUefiTestBasic): disk_block_size = 4096 class WilyUefiTestBasic4k(WilyUefiTestBasic): disk_block_size = 4096 class XenialUefiTestBasic4k(XenialUefiTestBasic): disk_block_size = 4096 class YakketyUefiTestBasic4k(YakketyUefiTestBasic): disk_block_size = 4096 curtin-0.1.0~bzr399/tests/vmtests/test_vmtests.py0000644000000000000000000000201412741214565020314 0ustar 00000000000000""" This just tests the vmtest harness. Useful for quickly running multiple tests that can pass or fail. To see these tests fail, run: CURTIN_VMTEST_DEBUG_ALLOW_FAIL=1 nosetests3 tests/vmtests/test_vmtests.py """ from . import (PsuedoVMBaseClass) from .releases import base_vm_classes as relbase class PsuedoBase(PsuedoVMBaseClass): # Just present to show structure used in other tests pass class PsuedoTestAllPass(relbase.trusty, PsuedoBase): __test__ = True # These boot_results would cause first_boot failure # boot_results = { # 'install': {'timeout': 0, 'exit': 0}, # 'first_boot': {'timeout': 0, 'exit': 1}, # } def test_pass(self): pass def test_pass2(self): pass class PsuedoTestMixedPassAndFail(relbase.xenial, PsuedoBase): __test__ = True def test_pass(self): pass def test_fail(self): self._maybe_raise(Exception("This failed.")) def test_fail2(self): self._maybe_raise(Exception("This second test failed.")) curtin-0.1.0~bzr399/tools/build-deb0000755000000000000000000000356712741214565015262 0ustar 00000000000000#!/bin/sh set -e sourcename="curtin" TEMP_D="" UNCOMMITTED=${UNCOMMITTED:-0} fail() { echo "$@" 1>&2; exit 1; } cleanup() { [ -z "$TEMP_D" ] || rm -Rf "$TEMP_D" } if [ "$1" = "-h" -o "$1" = "--help" ]; then cat <&2; exit 1; } cleanup() { [ -z "$TEMP_D" ] || rm -Rf "$TEMP_D" } export_uncommitted="" if [ "${UNCOMMITTED:-0}" != "0" ]; then export_uncommitted="--uncommitted" fi [ "$1" = "-h" -o "$1" = "--help" ] && { Usage; exit 0; } TEMP_D=$(mktemp -d) trap cleanup EXIT case "${1:-HEAD}" in tag:*) version="${1#tag:}";; HEAD) revno="$(bzr revno)"; revargs="-r $revno";; [0-9]*) revno="$1" ; revargs="-r $1";; esac output="$2" if [ -z "$version" ]; then bzr cat $revargs debian/changelog.trunk > "$TEMP_D/clog" || fail "failed to extract debian/change.log.trunk at $revargs" clogver_o=$(sed -n '1s,.*(\([^)]*\)).*,\1,p' $TEMP_D/clog) clogver_upstream=${clogver_o%%-*} mmm=${clogver_o%%~*} version="$mmm~bzr$revno" fi if [ -z "$output" ]; then output="$sourcename-$version.tar.gz" fi bzr export ${export_uncommitted} \ --format=tgz --root="$sourcename-${version}" $revargs $output echo "wrote $output" curtin-0.1.0~bzr399/tools/jenkins-runner0000755000000000000000000000251012741214565016366 0ustar 00000000000000#!/bin/bash topdir=${CURTIN_VMTEST_TOPDIR:-"${WORKSPACE:-$PWD}/output"} pkeep=${CURTIN_VMTEST_KEEP_DATA_PASS:-logs,collect} fkeep=${CURTIN_VMTEST_KEEP_DATA_FAIL:-logs,collect} export CURTIN_VMTEST_KEEP_DATA_PASS=$pkeep export CURTIN_VMTEST_KEEP_DATA_FAIL=$fkeep export CURTIN_VMTEST_TOPDIR="$topdir" export CURTIN_VMTEST_LOG=${CURTIN_VMTEST_LOG:-"$topdir/debug.log"} export IMAGE_DIR=${IMAGE_DIR:-/srv/images} fail() { echo "$@" 1>&2; exit 1; } if [ -d "$topdir" ]; then fail "topdir '$topdir' existed." fi mkdir -p "$topdir" || fail "failed mkdir $topdir" if [ $# -eq 0 ]; then set -- -vv --nologcapture tests/vmtests/ fi start_s=$(date +%s) # dump CURTIN* variables just for info for v in ${!CURTIN_*}; do echo "$v=${!v}" done # avoid LOG info by running python3 tests/vmtests/image_sync.py # rather than python3 -m tests.vmtests.image_sync (LP: #1594465) echo "Working with images in $IMAGE_DIR" fmt=" %(release)-7s %(arch)s/%(subarch)s %(version_name)-10s" PYTHONPATH="$PWD" python3 tests/vmtests/image_sync.py query \ --output-format="$fmt" "$IMAGE_DIR" ftype=root-image.gz || { echo "WARNING: error querying images in $IMAGE_DIR" 1>&2; } echo "$(date -R): vmtest start: nosetests3 $*" nosetests3 "$@" ret=$? end_s=$(date +%s) echo "$(date -R): vmtest end [$ret] in $(($end_s-$start_s))s" exit $ret # vi: ts=4 expandtab curtin-0.1.0~bzr399/tools/launch0000755000000000000000000005535512741214565014707 0ustar 00000000000000#!/bin/bash VERBOSITY=0 TEMP_D="" HTTP_PID="" XKVM_PID="" HTTP_PORT_TRIES=${HTTP_PORT_TRIES:-5} HTTP_PORT_MIN=${HTTP_PORT_MIN:-12000} HTTP_PORT_MAX=${HTTP_PORT_MAX:-65500} MY_D=$(dirname "$0") DEFAULT_ROOT_ARG="root=LABEL=cloudimg-rootfs" error() { echo "$@" 1>&2; } Usage() { cat <[,][,] bsize defaults to 512b sector size opts is a comma delimitted list of property=value elements. Examine qemu-kvm -device scsi-hd,? for details. --vnc D use -vnc D (mutually exclusive with --silent) --uefi N enable uefi boot method, store nvram at N -h | --help show this message -i | --initrd F use initramfs F -k | --kernel F use kernel K --mem K memory in Kb -n | --netdev netdev can be 'user' or a bridge -p | --publish F make file 'F' available in web server --silent use -nographic --vnc D use -vnc D (mutually exclusive with --silent) directly through to qemu-system. Note, qemu adds 5900 to port numbers. (:0 = port 5900) --serial-log F : log to F (default 'serial.log') --root-arg X pass 'X' through as the root= param when booting a kernel. default: $DEFAULT_ROOT_PARAM -v | --verbose be more verbose --no-install-deps do not install insert '--install-deps' on curtin command invocations use of --kernel/--initrd will seed cloud-init via cmdline rather than the local datasource Example: * boot myboot.img, and install my-root.tar.gz ${0##*/} myboot.img --publish my-root.tar.gz curtin \ install PUBURL/my-root.tar.gz EOF } bad_Usage() { Usage 1>&2; [ $# -eq 0 ] || error "$@"; exit 1; } cleanup() { local msg="" pid="" [ ! -d "$TEMP_D" ] || msg="${msg:+$msg }remove temp dir ${TEMP_D}." [ -z "$HTTP_PID" ] || msg="${msg:+$msg }kill http pid ${HTTP_PID}." [ -z "$XKVM_PID" ] || msg="${msg:+$msg }kill xkvm pid ${XKVM_PID}." debug 1 "cleaning up [${SECONDS}s].${msg:+ $msg}" [ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}" for pid in ${XKVM_PID} ${HTTP_PID}; do kill $pid done } sighandle() { debug 1 "recieved $1" exit ${2:-1} } register_signal_handlers() { local cur for cur in TERM INT; do trap "sighandle $cur" "SIG$cur" done } debug() { local level=${1}; shift; [ "${level}" -gt "${VERBOSITY}" ] && return error "${@}" } get_my_ip() { [ -z "$IP_ADDR" ] || { _RET="${IP_ADDR}"; return 0; } local Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT local iface ipaddr="" tmpf="" # work around LP: #1483440 cp "/proc/net/route" "${TEMP_D}/current-route" while read Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT; do [ "$Mask" = "00000000" ] && break done < "${TEMP_D}/current-route" iface="$Iface" ipaddr=$(LC_ALL=C /sbin/ip -4 addr list dev "$iface" scope global) || return ipaddr=${ipaddr#* inet } ipaddr=${ipaddr%%/*} _RET="$ipaddr" } write_metadata() { cat < "$config1" < "$config2" <<"EOF" #upstart-job # precise does not do cloud-config poweroff description "power-state for precise" start on stopped cloud-final console output task script [ "$(lsb_release -sc)" = "precise" ] || exit 0 target="PSTATE" msg="precise-powerstate: $target" case "$target" in on) exit 0;; off|poweroff) shutdown -P now "$msg";; reboot) shutdown -r now "$msg";; *) echo "$msg : unknown target"; exit 1;; esac echo "$msg" exit 0 end script EOF } write_userdata() { local x cat </dev/null 2>&1 && return [ -x "$MY_D/xkvm" ] && PATH="$MY_D:$PATH" && return cat 1>&2 < "$pubdir/$statfile" ( set -eb; cd "$pubdir" && exec python -m SimpleHTTPServer "$port" ) >"$pubdir/ws.log" 2>&1 & hpid=$! HTTP_PID=$hpid # set so cleanup cleans up during wget debug 3 "checking web service [pid=$hpid] on $burl" found=$(env -u http_proxy wget -q --waitretry=0.4 --retry-connrefused \ --tries=10 "$burl/$statfile" --timeout=4 -O - 2>/dev/null) && [ "$found" = "$contents" ] && { _RET=$hpid return 0 } ret=$? kill $hpid && HTTP_PID="" return $ret } start_http() { # start_http(pubdir, ip, port="", tries=5) # starts a web service at 'port' that serves files in 'pubdir' # waits until it is verified to be lisenting at ip # if port is not provided, '$tries' random ports are tried. # # sets HTTP_PID and returns in _RET the port selected. local pubdir="$1" ip="$2" port="$3" tries="${4:-5}" i="" [ -z "$ip" ] && ip="localhost" local pmin="$HTTP_PORT_MIN" pmax="$HTTP_PORT_MAX" ret="" tried="" local ptries="" ptries=( ) if [ -n "$port" ]; then ptries=( $port ) elif [ $(($pmax-$pmin+1)) -le $tries ]; then # if tries spans the whole range, then just try them all local range=$(($pmax-$pmin+1)) for((i=0;i<$range;i++)); do ptries[$i]=$((pmin+$i)) done else for((i=0;i<$tries;i++)); do ptries[$i]="random" done fi for port in "${ptries[@]}"; do [ "$port" = "random" ] && port=$(($pmin+($RANDOM%($pmax+1-$pmin)))) debug 2 "trying http server $ip:$port" _start_http "$pubdir" "$ip" "$port" && HTTP_PID="$_RET" && _RET="$port" && debug 1 "serving $pubdir at http://$ip:$port/ in pid $HTTP_PID" && return 0 ret=$? tried="$tried $port" done error "failed to start http on service on $ip tried ports: ${tried# }" return $ret } find_apt_proxy() { # pick an apt proxy for the guest local out="" # if user set uncommon 'apt_proxy', then trust it [ -n "$apt_proxy" ] && echo "$apt_proxy" && return 0 # see if the host has an apt proxy configured, and use it if command -v apt-config >/dev/null 2>&1; then out=$(apt-config shell x Acquire::HTTP::Proxy) && out=$(sh -c 'eval $1 && echo $x' -- "$out") && [ -n "$out" ] && echo "$out" && return fi return 1 } main() { local short_opts="a:A:d:h:i:k:n:p:v" local long_opts="add:,append:,arch:,bios:,disk:,dowait,help,initrd:,kernel:,mem:,netdev:,no-dowait,power:,publish:,root-arg:,silent,serial-log:,uefi:,verbose,vnc:" local getopt_out="" getopt_out=$(getopt --name "${0##*/}" \ --options "${short_opts}" --long "${long_opts}" -- "$@") && eval set -- "${getopt_out}" || { bad_Usage; return 1; } local seed="" local bootimg="" bootimg_dist="" target="" mem="1024" local udata="" ip="" http_port="${HTTP_PORT}" burl="" local tmp="" top_d local initrd="" kernel="" uappend="" iargs="" disk_args="" local pubs="" disks="" pstate="null" local uefi="" bios="" bsize="512" local netdevs="" install_deps="--install-deps" local arch_hint="" local video="-curses -vga std" serial_log="serial.log" local root_arg="$DEFAULT_ROOT_ARG" # dowait: run xkvm with a '&' and then 'wait' on the pid. # the reason to do this or not do this has to do with interactivity # if detached with &, then user input will not go to xkvm. # if *not* detached, then signal handling is blocked until # the foreground subprocess returns. which means we can't handle # a sigterm and kill xkvm. # We default to dowait=false if input and output are a terminal local dowait="" [ -t 0 -a -t 1 ] && dowait=false || dowait=true pubs=( ) disks=( ) addfiles=( ) netdevs=( ) pt=( ) # if output is to a terminal, then set dowait default to false [ -t 0 ] && dowait=false || dowait=true while [ $# -ne 0 ]; do cur=${1}; next=${2}; case "$cur" in --add) addfiles[${#addfiles[@]}]="$next"; shift;; -a|--append) uappend="$next"; shift;; -A|--arch) arch_hint="$next"; shift;; --bios) bios="$2"; shift;; -d|--disk) disks[${#disks[@]}]="$next"; shift;; --dowait) pt[${#pt[@]}]="$cur"; dowait=true;; -h|--help) Usage ; exit 0;; -i|--initrd) initrd="$next"; shift;; -k|--kernel) kernel="$next"; shift;; --mem) mem="$next"; shift;; -n|--netdev) netdevs[${#netdevs[@]}]="$next"; shift;; --no-dowait) pt[${#pt[@]}]="$cur"; dowait=false;; --no-install-deps) install_deps="";; --power) case "$next" in off) pstate="poweroff";; on|none) pstate="null";; reboot) pstate="$next";; *) error "Invalid power state, must be: off, on, reboot";; esac shift;; -p|--publish) pubs[${#pub[@]}]="$next"; shift;; --root-arg) root_arg="$next";; --serial-log) serial_log="$next"; shift;; --silent) video="-nographic";; --uefi) uefi="$2"; shift;; -v|--verbose) VERBOSITY=$((${VERBOSITY}+1));; --vnc) video="-vnc $next" debug 1 "VNC requested - $next" shift;; --) shift; break;; esac shift; done # handle passing through '-v' if given local t="" for((i=0;i<${VERBOSITY};i++)); do t="${t}v"; done [ -n "$t" ] && pt[${#pt[@]}]="-$t" [ $# -ge 0 ] || { bad_Usage "must provide boot-image"; return 1; } bootimg_dist="$1" shift cmdargs=( "$@" ) xkvm_check || return TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX") || { error "failed to make tempdir"; return 1; } trap cleanup EXIT register_signal_handlers local bios_opts="" bios_opts=( ) if [ -n "$bios" ]; then bios_opts=( -drive "if=pflash,format=raw,file=$bios" ) elif [ -n "$uefi" ]; then case `lsb_release -sc` in precise|trusty|vivid) # for non-split UEFI firmware, the code and # var space are in the same file. We must # make a copy so we can retain modifications. local ovmf_code="/usr/share/ovmf/OVMF.fd" local ovmf_var=$ovmf_code ;; *) # anything newer than vivid has split UEFI firmware local ovmf_code="/usr/share/OVMF/OVMF_CODE.fd" local ovmf_var="/usr/share/OVMF/OVMF_VARS.fd" ;; esac [ -f "$ovmf_code" ] || { error "no --uefi requires ovmf bios: apt-get install ovmf" return 1; } # user specified where to write nvram data in --uefi param # pre-populate it with the OVMF_VARS.fd template local nvram=${uefi} cp -a "${ovmf_var}" "${nvram}" || { error "failed to create OVMF nvram file: '$nvram'" return 1; } # default to the rw copy of UEFI code local uefi_opts="-drive file=$nvram,if=pflash,format=raw" # if firmware is split, use readonly-code section if [ "$ovmf_code" != "$ovmf_var" ]; then # to ensure bootability, re-order firmware, code then variables uefi_opts="-drive file=$ovmf_code,if=pflash,format=raw,readonly $uefi_opts" fi bios_opts=( $uefi_opts ) fi if [ "${#disks[@]}" -eq 0 ]; then disks=( "${TEMP_D}/disk1.img" ) fi bootimg_dist=$(readlink -f "$bootimg_dist") || { error "bad bootimg $bootimg_dist"; return 1; } [ -z "$initrd" -o -f "$initrd" ] || { error "initrd not a file: $initrd"; return 1; } [ -z "$kernel" -o -f "$kernel" ] || { error "kernel not a file: $kernel"; return 1; } tmp=$(dirname "$0") && top_d=$(cd "$tmp" && cd .. && pwd) || { error "failed to get dir for $0"; return 1; } local disk="" src="" size="" fmt="" out="" id="" driver="" if="" local split_input="" serial="" disk_args=( ) id=1 for disk in "${disks[@]}"; do ((id++)) # 1=src # 2=src:size # 3=src:size:driver # 4=src:size:driver:bsize # 5=src:size:driver:bsize:devopts src=$(echo $disk | awk -F: '{print $1}') size=$(echo $disk | awk -F: '{print $2}') driver=$(echo $disk | awk -F: '{print $3}') bsize=$(echo $disk | awk -F: '{print $4}') devopts=$(echo $disk | awk -F: '{print $5}') if [ -z "${src}" ]; then error "Failed to provide disk source" exit 1 fi if [ -z "${size}" ]; then size=5G fi if [ -z "${driver}" ]; then driver="virtio-blk" fi if [ -z "${bsize}" ]; then bsize="512" fi if [ ! -f "$src" ]; then qemu-img create -f raw "${src}" "$size" || { error "failed create $src of size $size"; return 1; } fmt="raw" else out=$(LANG=C qemu-img info "$src") && fmt=$(echo "$out" | awk '$0 ~ /^file format:/ { print $3 }') || { error "failed to determine format of $src"; return 1; } fi # prepend comma if passing devopts if [ -n "${devopts}" ]; then devopts=",${devopts}" fi # set logical/physical size blocksz is logical:phys local logbs=$(round_up ${bsize%%:*}) local phybs=$(round_up ${bsize##*:}) local bs_args="logical_block_size=$logbs" bs_args="${bs_args},physical_block_size=$phybs" bs_args="${bs_args},min_io_size=$logbs" disk_args=( "${disk_args[@]}" "-drive" "file=${src},if=none,cache=unsafe,format=$fmt,id=drv${id},index=$id" ) disk_args=( "${disk_args[@]}" "-device" "${driver},drive=drv${id},${bs_args}${devopts}" ) done get_my_ip || { error "failed to get your ip. set IP_ADDR"; return 1; } ip=${_RET} local tok src pub fpath # tok in pubs looks like file[:pubname] # link them into the temp dir for publishing for tok in "${pubs[@]}"; do case "$tok" in *:*) src="${tok%:*}"; pub="${tok##*:}";; *) src=${tok}; pub="";; esac fpath=$(readlink -f "$src") || { error "'$src': failed to get path"; return 1; } if [ -n "$pub" ]; then pub="${src##*/}" fi ln -sf "$fpath" "${TEMP_D}/${pub}" done start_http "${TEMP_D}" "$ip" "$http_port" "${HTTP_TRIES}" "${TEMP_D}/install-cmd" || { error "failed to pack"; return 1; } udata="${TEMP_D}/user-data" mdata="${TEMP_D}/meta-data" local ccfiles="" ccfiles=( ) if [ -n "${pstate}" ]; then write_pstate_config "$pstate" "${TEMP_D}/pstate.1" "${TEMP_D}/pstate.2" ccfiles[${#ccfiles[@]}]="${TEMP_D}/pstate.1" ccfiles[${#ccfiles[@]}]="${TEMP_D}/pstate.2" fi if tmp=$(find_apt_proxy); then debug 1 "using $tmp for proxy" printf '#cloud-config\napt_proxy: "%s"\n' "$tmp" > "${TEMP_D}/cc-proxy" ccfiles[${#ccfiles[@]}]="${TEMP_D}/cc-proxy" fi if command -v ssh-keys-list >/dev/null 2>&1; then ssh-keys-list cloud-config > "${TEMP_D}/cc-ssh-keys" && ccfiles[${#ccfiles[@]}]="${TEMP_D}/cc-ssh-keys" || { error "failed to get users ssh keys."; return 1; } fi write_metadata > "$mdata" || { error "failed to write meta-data"; return 1; } write_userdata "${TEMP_D}/install-cmd" "${ccfiles[@]}" > "$udata" || { error "failed to write user-data"; return 1; } bootimg="${TEMP_D}/boot.img" qemu-img create -f qcow2 -b "${bootimg_dist}" "$bootimg" 2G || { error "failed create from ${bootimg_dist}"; return 1; } local seedargs="" seedargs=() if [ -n "$kernel" ]; then local append="" root="" # Note: root_arg is by default done by LABEL. This assumes # a.) our root device is not multipath # b.) no other disks attached will have this LABEL # c.) the LABEL is in fact correct. # all of these assumptions are true under vmtest. if [ -z "$root_arg" ]; then debug 1 "WARN: root_arg is empty with kernel." fi append="${root_arg:+${root_arg} }ds=nocloud-net;seedfrom=$burl" local console_name="" case "${arch_hint}" in s390x) console_name="";; ppc64*) console_name="hvc0";; *) console_name="ttyS0";; esac if [ -n "$console_name" ]; then append="${append} console=${console_name}" fi append="${append} $uappend" seedargs=( "${seedargs[@]}" -kernel "$kernel" ) [ -n "$initrd" ] && seedargs=( "${seedargs[@]}" -initrd "$initrd" ) seedargs=( "${seedargs[@]}" -append "$append" ) else seed="${TEMP_D}/seed.img" cloud-localds "$seed" "$udata" "$mdata" || { error "failed cloud-localds"; return 1; } seedargs=( "-drive" "file=${seed},if=virtio,media=cdrom" ) fi local netargs netargs=( ) for dev in "${netdevs[@]}"; do netargs=( "${netargs[@]}" "--netdev=${dev}" ) done local cmd serial_args="" chardev_arg="" [ "${serial_log}" = "none" ] && serial_log="" if [ -n "${serial_log}" ]; then if [ "${arch_hint}" = "s390x" ]; then if [ "${serial_log}" = "stdio" ]; then chardev_arg="stdio" else chardev_arg="file,path=${serial_log}" fi serial_args="-nodefaults -chardev ${chardev_arg},id=charconsole0 -device sclpconsole,chardev=charconsole0,id=console0" else serial_args="-serial file:${serial_log}" #debug mode serial_args="-serial ${serial_log} -monitor stdio" fi fi # -monitor stdio cmd=( xkvm "${pt[@]}" "${netargs[@]}" -- "${bios_opts[@]}" -m ${mem} ${serial_args} ${video} -drive "file=$bootimg,if=none,cache=unsafe,format=qcow2,id=boot,index=0" -device "virtio-blk,drive=boot" "${disk_args[@]}" "${seedargs[@]}" ) debug 1 "running with dowait=$dowait: ${cmd[*]}" local sstart=$SECONDS if $dowait; then "${cmd[@]}" & XKVM_PID=$! debug 1 "xkvm pid: $XKVM_PID. launch pid: $$" wait "${XKVM_PID}" ret=$? XKVM_PID="" else "${cmd[@]}" ret=$? fi debug 1 "xkvm returned $ret took $(($SECONDS-$sstart))" return $ret } random_wwn() { # wwn must be a int64, less than (1 << 63) - 1 # we achieve this by combining 4 (1 << 15) ints printf "0x%04x%04x%04x%04x" $RANDOM $RANDOM $RANDOM $RANDOM } round_up() { local size="${1}" local multiple="${2:-512}" local max_size=$((32 * 1024)) # 32k max size=$(( (($size + $multiple - 1) / $multiple) * $multiple)) if [ $size -gt $max_size ]; then echo $max_size return elif [ $size -lt $multiple ]; then echo $multiple return fi echo $size } test_start_http() { # run this like: # HTTP_PORT_MIN=59000 HTTP_PORT_MAX=63001 ./tools/launch \ # /tmp/smfoo localhost VERBOSITY=3 trap cleanup EXIT register_signal_handlers echo "mypid: $$" start_http "$@" || { ret=$?; echo "returned $ret"; return $ret; } ret=$? port=$_RET echo "pid $HTTP_PID is serving on $port" sleep ${SLEEPTIME:-3} & XKVM_PID=$! wait $XKVM_PID ret=$? XKVM_PID="" return $ret } main "$@" # vi: ts=4 expandtab curtin-0.1.0~bzr399/tools/maas2roottar0000755000000000000000000001542312741214565016043 0ustar 00000000000000#!/bin/bash VERBOSITY=0 TEMP_D="" error() { echo "$@" 1>&2; } fail() { [ $# -eq 0 ] || error "$@"; exit 1; } Usage() { cat <&2; [ $# -eq 0 ] || error "$@"; return 1; } bad_create_Usage() { create_Usage 1>&2; [ $# -eq 0 ] || error "$@"; return 1; } cleanup() { [ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}" } debug() { local level=${1}; shift; [ "${level}" -gt "${VERBOSITY}" ] && return error "${@}" } hascmd() { local cmd="$1" envref="$2" if [ -n "${envref}" -a -n "${!envref}" ]; then cmd="${!envref}" fi if [ -x "$cmd" ]; then _RET="$cmd" elif command -v "$cmd" >/dev/null 2>&1; then _RET="$cmd" else return 1 fi } main() { local short_opts="hv" local long_opts="help,no-patch,delete-temp,krd,verbose" local getopt_out="" getopt_out=$(getopt --name "${0##*/}" \ --options "${short_opts}" --long "${long_opts}" -- "$@") && eval set -- "${getopt_out}" || { bad_Usage; return; } local keep=true cur="" next="" local sudo="" krd="" vflags="" while [ $# -ne 0 ]; do cur="$1"; next="$2"; case "$cur" in -h|--help) Usage ; exit 0;; --delete-temp) keep=false; shift;; --no-patch|--krd) pt[${#pt[@]}]="$cur";; -v|--verbose) VERBOSITY=$((${VERBOSITY}+1)); vflags="${vflags}v";; --) shift; break;; esac shift; done [ $# -eq 1 -o $# -eq 2 ] || { bad_Usage "must provide 1 or 2 arguments"; return; } img_gz="$1" output="$2" if [ -z "$output" ]; then output=${img_gz%.gz} output=${output%.raw} output=${output%.img} output=${output}.tar.gz fi debug 1 "converting ${img_gz} to ${output}" TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX") || { error "failed to make tempdir"; return 1; } trap cleanup EXIT [ "$(id -u)" = "0" ] && sudo="" || sudo="sudo" [ -z "$vflags" ] || vflags="-${vflags}" [ -f "$img_gz" ] || { error "$img_gz: not a file" return 1 } local micb="" hascmd mount-image-callback MOUNT_IMAGE_CALLBACK && micb="$_RET" || { error "no mount-image-callback. install cloud-image-utils."; return 1; } if $(LANG=C file "$img_gz" | grep -qi "gzip"); then img="${TEMP_D}/root.img" if $keep; then img=${img_gz%.gz} [ "$img" = "$img_gz" ] && img="$img.raw" else img="${TEMP_D}/root.img" fi if [ -f "$img" ]; then debug 1 "re-using existing $img" else debug 1 "uncompressing $img" zcat "$img_gz" > "$img.$$" && mv "$img.$$" "$img" || { rm -f "$img.$$" error "failed uncompress $img" return 1 } fi else keep=true img="$img_gz" fi $sudo mount-image-callback "$img" -- "$0" create-tgz \ ${vflags} "${pt[@]}" _MOUNTPOINT_ "$output" || { error "failed mount-image-callback on $img for create-tgz" return 1 } $keep || rm -f "$img" || { error "failed to remove $img"; return 1; } } chownto() { [ -n "$1" ] || return 0 chown "$@" || { error "failed chown $@"; return 1; } } patch_root() { local rootd="$1" debug 1 "patching $rootd" local eni="$rootd/etc/network/interfaces" if [ -L "$eni" -o -e "$eni" ]; then rm -f "$eni.${0##*/}" mv "$eni" "$eni.${0##*/}" fi ( echo "auto lo"; echo "iface lo inet loopback"; echo "source /etc/network/interfaces.d/*.cfg" ) > "$eni" } create_tgz() { local short_opts="hv" local long_opts="help,no-patch,krd,verbose" local getopt_out="" getopt_out=$(getopt --name "${0##*/}" \ --options "${short_opts}" --long "${long_opts}" -- "$@") && eval set -- "${getopt_out}" || { bad_Usage; return; } local cur="" next="" patch=true krd=false local vflags="" while [ $# -ne 0 ]; do cur="$1"; next="$2"; case "$cur" in -h|--help) Usage ; exit 0;; --delete-temp) keep=false; shift;; --no-patch) patch=false;; --krd) krd=true;; -v|--verbose) VERBOSITY=$((${VERBOSITY}+1)); vflags="${vflags}v";; --) shift; break;; esac shift; done [ $# -eq 2 ] || { bad_create_Usage "expected 2 args, got $#: $*"; return; } [ "$(id -u)" = "0" ] && sudo="" || sudo="sudo" [ -z "$vflags" ] || vflags="-${vflags}" rootd="$1" output="$2" local chownto="" [ -n "$SUDO_UID" -a -n "$SUDO_GID" ] && chownto="$SUDO_UID:$SUDO_GID" if $patch; then patch_root "$rootd" || { error "failed to patch root"; return 1; } fi debug 1 "creating tarball in $output from $rootd" local gzflag="--use-compress-program=gzip" if command -v pigz >/dev/null 2>&1; then gzflag="--use-compress-program=pigz" fi tar -C "$rootd" -cpSf "$output" $gzflag \ --numeric-owner --xattrs "--xattrs-include=*" . || { error "failed tar command" rm -f "$output" return 1 } chownto "$chownto" "$output" || return local kernel initrd f="" if $krd; then local kfile="" ifile="" kernel="${output%.tar.*}-kernel" initrd="${output%.tar.*}-initrd" for f in "$rootd/boot/"*; do [ -f "$f" ] || continue case "${f##*/}" in vmlin*) kfile="$f";; initrd*) ifile="$f";; esac done [ -n "$ifile" ] || { error "failed to find initrd"; return 1; } [ -n "$kfile" ] || { error "failed to find kernel"; return 1; } cp "$ifile" "$initrd" && cp "$kfile" "$kernel" || { error "failed copy initrd or kernel"; return 1; } chownto "$chownto" "$kernel" "$initrd" || return fi debug 1 "wrote $output${kfile:+ $kernel}${initrd:+ $initrd}" } if [ "$1" = "create-tgz" -o "$1" = "create_tgz" ]; then shift create_tgz "$@" else main "$@" fi # vi: ts=4 expandtab curtin-0.1.0~bzr399/tools/noproxy0000644000000000000000000000054312741214565015135 0ustar 00000000000000#!/usr/bin/env python # # clean http_proxy variables from environment as they make httpretty # fail, but may be in the environment for reasons such as pip install import os import sys for k in ('http_proxy', 'https_proxy', 'HTTP_PROXY', 'HTTPS_PROXY'): if k in os.environ: del os.environ[k] os.execvpe(sys.argv[1], sys.argv[1:], os.environ) curtin-0.1.0~bzr399/tools/report-webhook-logger0000755000000000000000000000506512741214565017652 0ustar 00000000000000#!/usr/bin/python3 try: # python2 import SimpleHTTPServer as http_server import SocketServer as socketserver except ImportError: import http.server as http_server import socketserver import json import sys EXAMPLE_CONFIG = """\ # example config reporting: mypost: type: webhook endpoint: %(endpoint)s install: log_file: /tmp/foo post_files: [/tmp/foo] # example python: from curtin.reporter import events, update_configuration cfg = {'mypost': {'type': 'webhook', 'endpoint': '%(endpoint)s'}} update_configuration(cfg) with events.ReportEventStack(name="myname", description="mydesc", reporting_enabled=True): print("do something") """ if len(sys.argv) > 2: PORT = int(sys.argv[2]) addr = sys.argv[1] elif len(sys.argv) > 1: PORT = int(sys.argv[1]) addr = "" else: PORT = 8000 addr = "" def render_event_string(event_str): return json.dumps(json.loads(event_str), indent=1) class ServerHandler(http_server.SimpleHTTPRequestHandler): def log_request(self, code, size=None): lines = [ "== %s %s ==" % (self.command, self.path), str(self.headers).replace('\r', '')] if self._message: lines.append(self._message) sys.stdout.write('\n'.join(lines) + '\n') sys.stdout.flush() def do_GET(self): self._message = None self.send_response(200) self.end_headers() self.wfile.write("content of %s\n" % self.path) def do_POST(self): length = int(self.headers['Content-Length']) post_data = self.rfile.read(length).decode('utf-8') try: self._message = render_event_string(post_data) except Exception as e: self._message = '\n'.join( ["failed printing event: %s" % e, post_data]) msg = "received post to %s" % self.path self.send_response(200) self.send_header("Content-type", "text/plain") self.end_headers() self.wfile.write(msg.encode('utf-8')) # avoid 'Address already in use' after ctrl-c socketserver.TCPServer.allow_reuse_address = True Handler = ServerHandler httpd = socketserver.TCPServer(("", PORT), Handler) httpd.allow_reuse_address = True info = { 'interface': addr or "localhost", 'port': PORT, 'endpoint': "http://" + (addr or "localhost") + ":%s" % PORT } print("Serving at: %(endpoint)s" % info) print("Post to this with:\n%s\n" % (EXAMPLE_CONFIG % info)) try: httpd.serve_forever() except KeyboardInterrupt: sys.stdout.flush() pass httpd.server_close() sys.exit(0) curtin-0.1.0~bzr399/tools/run-pep80000755000000000000000000000060412741214565015076 0ustar 00000000000000#!/bin/bash pycheck_dirs=( "curtin/" "tests/" ) bin_files=( ) CR=" " [ "$1" = "-v" ] && { verbose="$1"; shift; } || verbose="" set -f if [ $# -eq 0 ]; then unset IFS IFS="$CR" files=( "${bin_files[@]}" "${pycheck_dirs[@]}" ) unset IFS else files=( "$@" ) fi myname=${0##*/} cmd=( "${myname#run-}" $verbose "${files[@]}" ) echo "Running: " "${cmd[@]}" 1>&2 exec "${cmd[@]}" curtin-0.1.0~bzr399/tools/run-pyflakes0000755000000000000000000000061412741214565016041 0ustar 00000000000000#!/bin/bash PYTHON_VERSION=${PYTHON_VERSION:-2} CR=" " vmtests="" if [ "$PYTHON_VERSION" = "3" ]; then vmtests="tests/vmtests/" fi pycheck_dirs=( "curtin/" "tests/unittests/" $vmtests ) set -f if [ $# -eq 0 ]; then files=( "${pycheck_dirs[@]}" ) else files=( "$@" ) fi cmd=( "python${PYTHON_VERSION}" -m "pyflakes" "${files[@]}" ) echo "Running: " "${cmd[@]}" 1>&2 exec "${cmd[@]}" curtin-0.1.0~bzr399/tools/run-pyflakes30000755000000000000000000000007412741214565016124 0ustar 00000000000000#!/bin/sh PYTHON_VERSION=3 exec "${0%/*}/run-pyflakes" "$@" curtin-0.1.0~bzr399/tools/ssh-keys-list0000755000000000000000000000403712741214565016143 0ustar 00000000000000#!/usr/bin/python3 import argparse import subprocess import os.path import sys import json VALID_MODES = ( # one key per line 'dump', # suitable for use in cloud-config 'cloud-config', # suitable for use in cloud-localds meta-data 'meta-data', # simple json list of strings keys 'json' ) def jdump(data): return json.dumps(data, indent=2, sort_keys=True, separators=(',', ': ')) def read_pubkeys(): keys = [] try: out = subprocess.check_output(['ssh-add', '-L'], stderr=subprocess.STDOUT) if isinstance(out, bytes): out = out.decode() keys += out.splitlines() except Exception: pass ssh_dir = os.path.expanduser('~' + os.path.sep + '.ssh') keyfiles = ['id_ecdsa.pub', 'id_ed25519.pub', 'id_rsa.pub'] for f in [os.path.join(ssh_dir, f) for f in keyfiles]: if os.path.isfile(f): with open(f, "r") as fp: keys += fp.read().splitlines() return list(set(keys)) def format_pubkeys(mode, keys): if mode not in VALID_MODES: raise ValueError("unknown mode '%s'. Expected one of: %s" % (mode, ' '.join(VALID_MODES))) if mode == 'dump': if len(keys) != 0: data = '\n'.join(keys) + "\n" elif mode == "cloud-config" or mode == "meta-data": data = "" if mode == "cloud-config": data = "#cloud-config\n" name = 'ssh_authorized_keys' else: name = 'public-keys' data += name + ": " + jdump(keys) + "\n" elif mode == "json": return jdump(keys) + "\n" return data def main(): parser = argparse.ArgumentParser( description='Dump users public ssh keys in a variety of formats') parser.add_argument('mode', nargs='?', help='output mode', choices=VALID_MODES, default='dump') args = parser.parse_args() sys.stdout.write(format_pubkeys(args.mode, read_pubkeys())) if __name__ == '__main__': sys.exit(main()) curtin-0.1.0~bzr399/tools/tox-venv0000755000000000000000000000722312741214565015212 0ustar 00000000000000#!/bin/sh # https://gist.github.com/smoser/2d4100a6a5d230ca937f CR=' ' error() { echo "$@" 1>&2; } fail() { [ $# -eq 0 ] || error "$@"; exit 1; } get_env_dirs() { # read 'tox --showconfig'. return list of # envname:dir local key="" equal="" val="" curenv="" out="" while read key equal val; do case "$key" in "[testenv:"*) curenv=${key#*:}; curenv=${curenv%%"]"*}; continue;; esac if [ "${key#*=}" != "$key" ]; then # older tox shows key=value or key= value # newer tox shows: key = value key=${key%%=*} val=${equal} fi [ "$key" = "envdir" ] || continue out="${out:+${out}${CR}}${curenv}:$val" done echo "$out" } load_config() { local tox_ini="$1" out="" envs="" if [ "$tox_ini" = "${CACHED_ENVS_INI}" ]; then _RET="$CACHED_ENVS" return fi out=$(tox -c "$tox_ini" --showconfig) || return 1 envs=$(echo "$out" | get_env_dirs) || return 1 CACHED_ENVS="$envs" CACHED_ENVS_INI="$tox_ini" _RET="$envs" } list_environments() { local tox_ini="$1" prefix=" " out="" envs="" oifs="$IFS" load_config "$tox_ini" || return 1 envs="${_RET}" IFS="$CR" for d in ${envs}; do env=${d%%:*} dir=${d#*:} [ -f "$dir/bin/activate" ] && s="*" || s="" echo "${prefix}$env$s"; done IFS="$oifs" } get_env_dir() { local tox_ini="$1" env="$2" oifs="$IFS" t="" d="" envs="" if [ "${TOX_VENV_SHORTCUT:-1}" != "0" ]; then local stox_d="${tox_ini%/*}/.tox/${env}" if [ -e "${stox_d}/bin/activate" ]; then _RET="${stox_d}" return fi fi load_config "$tox_ini" && envs="$_RET" || return 1 IFS="$CR" for t in $envs; do [ "$env" = "${t%%:*}" ] && d="${t#*:}" && break done IFS=${oifs} [ -n "$d" ] || return 1 _RET="$d" } Usage() { local tox_ini="$1" cat <&2; exit 1; } [ "$1" = "-h" -o "$1" = "--help" ] && { Usage "$tox_ini"; exit 0; } [ -f "$tox_ini" ] || fail "$tox_ini: did not find tox.ini" if [ "$1" = "-l" -o "$1" = "--list" ]; then list_environments "$tox_ini" exit fi nocreate="false" if [ "$1" = "--no-create" ]; then nocreate="true" shift fi env="$1" shift get_env_dir "$tox_ini" "$env" && activate="$_RET/bin/activate" || activate="" if [ -z "$activate" -o ! -f "$activate" ]; then if $nocreate; then fail "tox env '$env' did not exist, and no-create specified" elif [ -n "$activate" ]; then error "attempting to create $env:" error " tox -c $tox_ini --recreate --notest -e $env" tox -c "$tox_ini" --recreate --notest -e "$env" || fail "failed creation of env $env" else error "$env: not a valid tox environment?" error "found tox_ini=$tox_ini" error "try one of:" list_environments "$tox_ini" 1>&2 fail fi fi . "$activate" [ "$#" -gt 0 ] || set -- ${SHELL:-/bin/bash} debian_chroot="tox:$env" exec "$@" # vi: ts=4 expandtab curtin-0.1.0~bzr399/tools/usquery0000755000000000000000000000571612741214565015146 0ustar 00000000000000#!/bin/bash # Copyright 2015 Canonical, Ltd. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . # # # https://github.com/smoser/talk-simplestreams/blob/master/bin/u-stool declare -A sdata CIU_COM="http://cloud-images.ubuntu.com" CIU_COM_R="$CIU_COM/releases" MU_COM="http://maas.ubuntu.com/images/ephemeral-v2/" CST="https://swift.canonistack.canonical.com/v1/AUTH_a48765cc0e864be980ee21ae26aaaed4" sdata=( [uc-release]="$CIU_COM_R/streams/v1/index.sjson" [uc-aws]="$CIU_COM_R/streams/v1/com.ubuntu.cloud:released:aws.sjson" [uc-azure]="$CIU_COM_R/streams/v1/com.ubuntu.cloud:released:azure.sjson" [uc-dl]="$CIU_COM_R/streams/v1/com.ubuntu.cloud:released:download.sjson" [uc-daily]="$CIU_COM/daily/streams/v1/index.sjson" [maas-release]="$MU_COM/releases/streams/v1/index.sjson" [maas-daily]="$MU_COM/daily/streams/v1/index.sjson" [cirros]="http://download.cirros-cloud.net/streams/v1/index.json" [cstack]="$CST/simplestreams/data/streams/v1/index.json" [luc-release]="./luc-release/streams/v1/index.json" [luc-aws]="./luc-release/streams/v1/com.ubuntu.cloud:released:aws.json" ) SPROG="sstream-query" case "$0" in *smirror) SPROG="sstream-mirror";; *squery) SPROG="sstream-query";; *) echo "Expect to be called usmirror or usquery, not ${0##*/}"; exit 1;; esac error() { echo "$@" 1>&2; } fail() { [ $# -eq 0 ] || error "$@"; exit 1; } Usage() { cat <&2; exit 1; } keyopt="" case "$url" in *.json) :;; *) keyopt="--keyring=/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg";; esac cmd=( "$SPROG" ${keyopt:+"${keyopt}"} "${opts[@]}" "${args[@]}" ) $dry && echo "${cmd[@]}" && exit "${cmd[@]}" curtin-0.1.0~bzr399/tools/vmtest-sync-images0000755000000000000000000000324112741214565017157 0ustar 00000000000000#!/usr/bin/python3 # This tool keeps a local copy of the maas images used by vmtests. # It keeps only the latest copy of the available images. import os import shutil import sys # Fix path so we can import ImageStore class. sys.path.insert(1, os.path.realpath(os.path.join( os.path.dirname(__file__), '..'))) from tests.vmtests import ( IMAGE_DIR, IMAGE_SRC_URL, sync_images) from tests.vmtests.image_sync import ITEM_NAME_FILTERS from tests.vmtests.helpers import find_releases from curtin.util import get_platform_arch DEFAULT_ARCH = get_platform_arch() if __name__ == '__main__': if len(sys.argv) > 1 and sys.argv[1] == "--clean": print("cleaning image dir %s" % IMAGE_DIR) for subd in (".vmtest-data", "streams"): fp = os.path.join(IMAGE_DIR, subd) if os.path.exists(fp): print(" removing %s" % subd) shutil.rmtree(fp) if os.path.exists(IMAGE_DIR): for dirpath, dirnames, filenames in os.walk(IMAGE_DIR): for f in filenames: if f.startswith("vmtest"): fpath = os.path.join(dirpath, f) print(" removing vmtest file %s" % fpath) os.unlink(fpath) arg_releases = [r for r in sys.argv[1:] if r != "--clean"] if len(arg_releases): releases = arg_releases else: releases = find_releases() release_filter = 'release~{}'.format('|'.join(releases)) my_filters = ['arch=' + DEFAULT_ARCH, release_filter] + ITEM_NAME_FILTERS # Sync images. sync_images(IMAGE_SRC_URL, IMAGE_DIR, filters=my_filters, verbosity=1) curtin-0.1.0~bzr399/tools/vmtest-system-setup0000755000000000000000000000275312741214565017431 0ustar 00000000000000#!/bin/bash _APT_UPDATED=false error() { echo "$@" 1>&2; } fail() { [ $# -eq 0 ] || error "$@"; exit 2; } rel="$(lsb_release -sc)" case "$(uname -m)" in i?86|x86_64) qemu="qemu-system-x86";; ppc*) qemu="qemu-system-ppc";; s390x) qemu="qemu-system-s390x";; esac DEPS=( cloud-image-utils make python3 python3-nose python3-simplestreams python3-yaml ovmf simplestreams $qemu ubuntu-cloudimage-keyring ) apt_get() { local ret="" if [ "$1" != "update" ] && ! $_APT_UPDATED; then error "updating apt" apt_get update >/dev/null || { ret=$?; error "failed to update apt [$ret]"; return $ret; } _APT_UPDATED=true fi sudo DEBIAN_FRONTEND=noninteractive apt-get --quiet \ --assume-yes "$@" &1 | awk ' $0 ~ /[Nn]o packages/ { sub("[.]$","",$NF); pkgs[n]=$NF; n=n+1; } $2 == "" { pkgs[n]=$1; n=n+1; }; END { for(p in pkgs) {printf("%s ",pkgs[p])}; printf("\n"); }' n=0 } apt_install() { local needed needed=$(filter_installed_packages "$@") [ -z "$needed" ] && return 0 error "installing: $needed" apt_get install "$@" } apt_install "${DEPS[@]}" # vi: ts=4 expandtab curtin-0.1.0~bzr399/tools/write-curtin0000755000000000000000000000075112741214565016057 0ustar 00000000000000#!/usr/bin/python3 import os import sys sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) from curtin.pack import write_exe_wrapper path = "bin/curtin" if len(sys.argv) > 1: path = sys.argv[1] if path == "-": parg = None else: parg = path ret = write_exe_wrapper(entrypoint="curtin.commands.main", path=parg, deps_check_entry="curtin.deps.check") if path == "-": sys.stdout.write(ret) sys.stderr.write("wrote to %s\n" % path) curtin-0.1.0~bzr399/tools/xkvm0000755000000000000000000004470512741214565014417 0ustar 00000000000000#!/bin/bash set -f VERBOSITY=0 KVM_PID="" DRY_RUN=false TEMP_D="" DEF_BRIDGE="virbr0" TAPDEVS=( ) # OVS_CLEANUP gets populated with bridge:devname pairs used with ovs OVS_CLEANUP=( ) MAC_PREFIX="52:54:00:12:34" KVM="kvm" declare -A KVM_DEVOPTS error() { echo "$@" 1>&2; } fail() { [ $# -eq 0 ] || error "$@"; exit 1; } bad_Usage() { Usage 1>&2; [ $# -eq 0 ] || error "$@"; exit 1; } randmac() { # return random mac addr within final 3 tokens local random="" random=$(printf "%02x:%02x:%02x" \ "$((${RANDOM}%256))" "$((${RANDOM}%256))" "$((${RANDOM}%256))") padmac "$random" } cleanup() { [ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}" [ -z "${KVM_PID}" ] || kill "$KVM_PID" if [ ${#TAPDEVS[@]} -ne 0 ]; then local name item for item in "${TAPDEVS[@]}"; do [ "${item}" = "skip" ] && continue debug 1 "removing" "$item" name="${item%:*}" if $DRY_RUN; then error ip tuntap del mode tap "$name" else ip tuntap del mode tap "$name" fi [ $? -eq 0 ] || error "failed removal of $name" done if [ ${#OVS_CLEANUP[@]} -ne 0 ]; then # with linux bridges, there seems to be no harm in just deleting # the device (not detaching from the bridge). However, with # ovs, you have to remove them from the bridge, or later it # will refuse to add the same name. error "cleaning up ovs ports: ${OVS_CLEANUP[@]}" if ${DRY_RUN}; then error sudo "$0" tap-control ovs-cleanup "${OVS_CLEANUP[@]}" else sudo "$0" tap-control ovs-cleanup "${OVS_CLEANUP[@]}" fi fi fi } debug() { local level=${1}; shift; [ "${level}" -gt "${VERBOSITY}" ] && return error "${@}" } Usage() { cat <&1) && out=$(echo "$out" | sed -e "s,[^.]*[.],," -e 's,=.*,,') && KVM_DEVOPTS[$model]="$out" || { error "bad device model $model?"; exit 1; } fi opts=( ${KVM_DEVOPTS[$model]} ) for opt in "${opts[@]}"; do [ "$input" = "$opt" ] && return 0 done return 1 } padmac() { # return a full mac, given a subset. # assume whatever is input is the last portion to be # returned, and fill it out with entries from MAC_PREFIX local mac="$1" num="$2" prefix="${3:-$MAC_PREFIX}" itoks="" ptoks="" # if input is empty set to :$num [ -n "$mac" ] || mac=$(printf "%02x" "$num") || return itoks=( ${mac//:/ } ) ptoks=( ${prefix//:/ } ) rtoks=( ) for r in ${ptoks[@]:0:6-${#itoks[@]}} ${itoks[@]}; do rtoks[${#rtoks[@]}]="0x$r" done _RET=$(printf "%02x:%02x:%02x:%02x:%02x:%02x" "${rtoks[@]}") } make_nics_Usage() { cat <: for each tap created # type is one of "ovs" or "brctl" local short_opts="v" local long_opts="--verbose" local getopt_out="" getopt_out=$(getopt --name "${0##*/} make-nics" \ --options "${short_opts}" --long "${long_opts}" -- "$@") && eval set -- "${getopt_out}" || { make_nics_Usage 1>&2; return 1; } local cur="" next="" while [ $# -ne 0 ]; do cur=${1}; next=${2}; case "$cur" in -v|--verbose) VERBOSITY=$((${VERBOSITY}+1));; --) shift; break;; esac shift; done [ $# -ne 0 ] || { make_nics_Usage 1>&2; error "must give bridge"; return 1; } local owner="" ovsbrs="" tap="" tapnum="0" brtype="" bridge="" [ "$(id -u)" = "0" ] || { error "must be root for make-nics"; return 1; } owner="${SUDO_USER:-root}" ovsbrs="" if command -v ovs-vsctl >/dev/null 2>&1; then out=$(ovs-vsctl list-br) out=$(echo "$out" | sed "s/\n/,/") ovsbrs=",$out," fi for bridge in "$@"; do [ "$bridge" = "user" ] && echo skip && continue [ "${ovsbrs#*,${bridge},}" != "$ovsbrs" ] && btype="ovs" || btype="brctl" tapnum=0; while [ -e /sys/class/net/tapvm$tapnum ]; do tapnum=$(($tapnum+1)); done tap="tapvm$tapnum" debug 1 "creating $tap:$btype on $bridge" 1>&2 ip tuntap add mode tap user "$owner" "$tap" || { error "failed to create tap '$tap' for '$owner'"; return 1; } ip link set "$tap" up 1>&2 || { error "failed to bring up $tap"; ip tuntap del mode tap "$tap"; return 1; } if [ "$btype" = "ovs" ]; then ovs-vsctl add-port "$bridge" "$tap" 1>&2 || { error "failed: ovs-vsctl add-port $bridge $tap"; ovs-vsctl del-port "$bridge" "$tap" return 1; } else ip link set "$tap" master "$bridge" 1>&2 || { error "failed to add tap '$tap' to '$bridge'" ip tuntap del mode tap "$tap"; return 1 } fi echo "$tap:$btype" done } ovs_cleanup() { [ "$(id -u)" = "0" ] || { error "must be root for ovs-cleanup"; return 1; } local item="" errors=0 # TODO: if get owner (SUDO_USERNAME) and if that isn't # the owner, then do not delete. for item in "$@"; do name=${item#*:} bridge=${item%:*} ovs-vsctl del-port "$bridge" "$name" || errors=$((errors+1)) done return $errors } quote_cmd() { local quote='"' x="" vline="" for x in "$@"; do if [ "${x#* }" != "${x}" ]; then if [ "${x#*$quote}" = "${x}" ]; then x="\"$x\"" else x="'$x'" fi fi vline="${vline} $x" done echo "$vline" } main() { local short_opts="hd:n:v" local long_opts="help,dowait,disk:,dry-run,kvm:,no-dowait,netdev:,verbose" local getopt_out="" getopt_out=$(getopt --name "${0##*/}" \ --options "${short_opts}" --long "${long_opts}" -- "$@") && eval set -- "${getopt_out}" || { bad_Usage; return 1; } local bridge="$DEF_BRIDGE" oifs="$IFS" local netdevs="" need_tap="" ret="" p="" i="" pt="" cur="" conn="" local kvm="" kvmcmd="" archopts="" local def_disk_driver=${DEF_DISK_DRIVER:-"virtio-blk"} local def_netmodel=${DEF_NETMODEL:-"virtio-net-pci"} archopts=( ) kvmcmd=( ) netdevs=( ) addargs=( ) diskdevs=( ) diskargs=( ) # dowait: run qemu-system with a '&' and then 'wait' on the pid. # the reason to do this or not do this has to do with interactivity # if detached with &, then user input will not go to xkvm. # if *not* detached, then signal handling is blocked until # the foreground subprocess returns. which means we can't handle # a sigterm and kill the qemu-system process. # We default to dowait=false if input and output are a terminal local dowait="" [ -t 0 -a -t 1 ] && dowait=false || dowait=true while [ $# -ne 0 ]; do cur=${1}; next=${2}; case "$cur" in -h|--help) Usage; exit 0;; -d|--disk) diskdevs[${#diskdevs[@]}]="$next"; shift;; --dry-run) DRY_RUN=true;; --kvm) kvm="$next"; shift;; -n|--netdev) netdevs[${#netdevs[@]}]=$next; shift;; -v|--verbose) VERBOSITY=$((${VERBOSITY}+1));; --dowait) dowait=true;; --no-dowait) dowait=false;; --) shift; break;; esac shift; done [ ${#netdevs[@]} -eq 0 ] && netdevs=( "${DEF_BRIDGE}" ) pt=( "$@" ) local kvm_pkg="" virtio_scsi_bus="virtio-scsi-pci" [ -n "$kvm" ] && kvm_pkg="none" case $(uname -m) in i?86) [ -n "$kvm" ] || { kvm="qemu-system-i386"; kvm_pkg="qemu-system-x86"; } ;; x86_64) [ -n "$kvm" ] || { kvm="qemu-system-x86_64"; kvm_pkg="qemu-system-x86"; } ;; s390x) [ -n "$kvm" ] || { kvm="qemu-system-s390x"; kvm_pkg="qemu-system-misc"; } def_netmodel=${DEF_NETMODEL:-"virtio-net-ccw"} virtio_scsi_bus="virtio-scsi-ccw" ;; ppc64*) [ -n "$kvm" ] || { kvm="qemu-system-ppc64"; kvm_pkg="qemu-system-ppc"; } def_netmodel="virtio-net-pci" # virtio seems functional on in 14.10, but might want scsi here #def_diskif="scsi" archopts=( "${archopts[@]}" -machine pseries,usb=off ) archopts=( "${archopts[@]}" -device spapr-vscsi ) ;; *) kvm=qemu-system-$(uname -m);; esac KVM="$kvm" kvmcmd=( $kvm -enable-kvm ) local out="" fmt="" bus="" unit="" index="" serial="" driver="" devopts="" local busorindex="" driveopts="" cur="" val="" file="" for((i=0;i<${#diskdevs[@]};i++)); do cur=${diskdevs[$i]} IFS=","; set -- $cur; IFS="$oifs" driver="" id=$(printf "disk%02d" "$i") file="" fmt="" bus="" unit="" index="" serial="" for tok in "$@"; do [ "${tok#*=}" = "${tok}" -a -f "${tok}" -a -z "$file" ] && file="$tok" val=${tok#*=} case "$tok" in driver=*) driver=$val;; if=virtio) driver=virtio-blk;; if=scsi) driver=scsi-hd;; if=pflash) driver=;; if=sd|if=mtd|floppy) fail "do not know what to do with $tok on $cur";; id=*) id=$val;; file=*) file=$val;; fmt=*|format=*) fmt=$val;; serial=*) serial=$val;; bus=*) bus=$val;; unit=*) unit=$val;; index=*) index=$val;; esac done [ -z "$file" ] && fail "did not read a file from $cur" if [ -f "$file" -a -z "$fmt" ]; then out=$(LANG=C qemu-img info "$file") && fmt=$(echo "$out" | awk '$0 ~ /^file format:/ { print $3 }') || { error "failed to determine format of $file"; return 1; } else fmt=raw fi if [ -z "$driver" ]; then driver="$def_disk_driver" fi if [ -z "$serial" ]; then serial="${file##*/}" fi # make sure we add either bus= or index= if [ -n "$bus" -o "$unit" ] && [ -n "$index" ]; then fail "bus and index cant be specified together: $cur" elif [ -z "$bus" -a -z "$unit" -a -z "$index" ]; then index=$i elif [ -n "$bus" -a -z "$unit" ]; then unit=$i fi busorindex="${bus:+bus=$bus,unit=$unit}${index:+index=${index}}" diskopts="file=${file},id=$id,if=none,format=$fmt,$busorindex" devopts="$driver,drive=$id${serial:+,serial=${serial}}" for tok in "$@"; do case "$tok" in id=*|if=*|driver=*|$file|file=*) continue;; fmt=*|format=*) continue;; serial=*|bus=*|unit=*|index=*) continue;; esac isdevopt "$driver" "$tok" && devopts="${devopts},$tok" || diskopts="${diskopts},${tok}" done diskargs=( "${diskargs[@]}" -drive "$diskopts" -device "$devopts" ) done local mnics_vflag="" for((i=0;i<${VERBOSITY}-1;i++)); do mnics_vflag="${mnics_vflag}v"; done [ -n "$mnics_vflag" ] && mnics_vflag="-${mnics_vflag}" # now go through and split out options # -device virtio-net-pci,netdev=virtnet0,mac=52:54:31:15:63:02 # -netdev type=tap,id=virtnet0,vhost=on,script=/etc/kvm/kvm-ifup.br0,downscript=no local netopts="" devopts="" id="" need_taps=0 model="" local device_args netdev_args device_args=( ) netdev_args=( ) connections=( ) for((i=0;i<${#netdevs[@]};i++)); do id=$(printf "net%02d" "$i") netopts=""; devopts="" # mac=auto is 'unspecified' (let qemu assign one) mac="auto" #vhost="off" IFS=","; set -- ${netdevs[$i]}; IFS="$oifs" bridge=$1; shift; if [ "$bridge" = "user" ]; then netopts="type=user" ntype="user" connections[$i]="user" else need_taps=1 ntype="tap" netopts="type=tap" connections[$i]="$bridge" fi netopts="${netopts},id=$id" [ "$ntype" = "tap" ] && netopts="${netopts},script=no,downscript=no" model="${def_netmodel}" for tok in "$@"; do [ "${tok#model=}" = "${tok}" ] && continue case "${tok#model=}" in virtio) model=virtio-net-pci;; *) model=${tok#model=};; esac done for tok in "$@"; do case "$tok" in mac=*) mac="${tok#mac=}"; continue;; macaddr=*) mac=${tok#macaddr=}; continue;; model=*) continue;; esac isdevopt "$model" "$tok" && devopts="${devopts},$tok" || netopts="${netopts},${tok}" done devopts=${devopts#,} netopts=${netopts#,} if [ "$mac" != "auto" ]; then [ "$mac" = "random" ] && randmac && mac="$_RET" padmac "$mac" "$i" devopts="${devopts:+${devopts},}mac=$_RET" fi devopts="$model,netdev=$id${devopts:+,${devopts}}" #netopts="${netopts},vhost=${vhost}" device_args[$i]="$devopts" netdev_args[$i]="$netopts" done trap cleanup EXIT reqs=( "$kvm" ) pkgs=( "$kvm_pkg" ) for((i=0;i<${#reqs[@]};i++)); do req=${reqs[$i]} pkg=${pkgs[$i]} [ "$pkg" = "none" ] && continue command -v "$req" >/dev/null || { missing="${missing:+${missing} }${req}" missing_pkgs="${missing_pkgs:+${missing_pkgs} }$pkg" } done if [ -n "$missing" ]; then local reply cmd="" cmd=( sudo apt-get --quiet install ${missing_pkgs} ) error "missing prereqs: $missing"; error "install them now with the following?: ${cmd[*]}" read reply && [ "$reply" = "y" -o "$reply" = "Y" ] || { error "run: apt-get install ${missing_pkgs}"; return 1; } "${cmd[@]}" || { error "failed to install packages"; return 1; } fi if [ $need_taps -ne 0 ]; then local missing="" missing_pkgs="" reqs="" req="" pkgs="" pkg="" for i in "${connections[*]}"; do [ "$i" = "user" -o -e "/sys/class/net/dev/$i" ] || missing="${missing} $i" done [ -z "$missing" ] || { error "cannot create connection on ${missing# }." error "bridges do not exist."; return 1; } error "creating tap devices: ${connections[*]}" if $DRY_RUN; then error "sudo $0 tap-control make-nics" \ $mnics_vflag "${connections[@]}" taps="" for((i=0;i<${#connections[@]};i++)); do if [ "${connections[$i]}" = "user" ]; then taps="${taps} skip" else taps="${taps} dryruntap$i:brctl" fi done else taps=$(sudo "$0" tap-control make-nics \ ${mnics_vflag} "${connections[@]}") || { error "$failed to make-nics ${connections[*]}"; return 1; } fi TAPDEVS=( ${taps} ) for((i=0;i<${#TAPDEVS[@]};i++)); do cur=${TAPDEVS[$i]} [ "${cur#*:}" = "ovs" ] || continue conn=${connections[$i]} OVS_CLEANUP[${#OVS_CLEANUP[@]}]="${conn}:${cur%:*}" done debug 2 "tapdevs='${TAPDEVS[@]}'" [ ${#OVS_CLEANUP[@]} -eq 0 ] || error "OVS_CLEANUP='${OVS_CLEANUP[*]}'" for((i=0;i<${#TAPDEVS[@]};i++)); do cur=${TAPDEVS[$i]} [ "$cur" = "skip" ] && continue netdev_args[$i]="${netdev_args[$i]},ifname=${cur%:*}"; done fi netargs=() for((i=0;i<${#device_args[@]};i++)); do netargs=( "${netargs[@]}" -device "${device_args[$i]}" -netdev "${netdev_args[$i]}") done local bus_devices bus_devices=( -device "$virtio_scsi_bus,id=virtio-scsi-xkvm" ) cmd=( "${kvmcmd[@]}" "${archopts[@]}" "${bus_devices[@]}" "${netargs[@]}" "${diskargs[@]}" "${pt[@]}" ) local pcmd=$(quote_cmd "${cmd[@]}") error "$pcmd" ${DRY_RUN} && return 0 if $dowait; then "${cmd[@]}" & KVM_PID=$! debug 1 "kvm pid=$KVM_PID. my pid=$$" wait ret=$? KVM_PID="" else "${cmd[@]}" ret=$? fi return $ret } if [ "$1" = "tap-control" ]; then shift mode=$1 shift || fail "must give mode to tap-control" case "$mode" in make-nics) make_nics "$@";; ovs-cleanup) ovs_cleanup "$@";; *) fail "tap mode must be either make-nics or ovs-cleanup";; esac else main "$@" fi # vi: ts=4 expandtab