RBTools-0.7.11/0000755000232200023220000000000013230242636013473 5ustar debalancedebalanceRBTools-0.7.11/ez_setup.py0000644000232200023220000002377013230242633015711 0ustar debalancedebalance#!python """Bootstrap setuptools installation If you want to use setuptools in your package's setup.py, just include this file in the same directory with it, and add this to the top of your setup.py:: from ez_setup import use_setuptools use_setuptools() If you want to require a specific version of setuptools, set a download mirror, or use an alternate download directory, you can do so by supplying the appropriate options to ``use_setuptools()``. This file can also be run as a script to install or upgrade setuptools. """ import sys DEFAULT_VERSION = "0.6c11" DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3] md5_data = { 'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca', 'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb', 'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b', 'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a', 'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618', 'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac', 'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5', 'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4', 'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c', 'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b', 'setuptools-0.6c10-py2.3.egg': 'ce1e2ab5d3a0256456d9fc13800a7090', 'setuptools-0.6c10-py2.4.egg': '57d6d9d6e9b80772c59a53a8433a5dd4', 'setuptools-0.6c10-py2.5.egg': 'de46ac8b1c97c895572e5e8596aeb8c7', 'setuptools-0.6c10-py2.6.egg': '58ea40aef06da02ce641495523a0b7f5', 'setuptools-0.6c11-py2.3.egg': '2baeac6e13d414a9d28e7ba5b5a596de', 'setuptools-0.6c11-py2.4.egg': 'bd639f9b0eac4c42497034dec2ec0c2b', 'setuptools-0.6c11-py2.5.egg': '64c94f3bf7a72a13ec83e0b24f2749b2', 'setuptools-0.6c11-py2.6.egg': 'bfa92100bd772d5a213eedd356d64086', 'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27', 'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277', 'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa', 'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e', 'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e', 'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f', 'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2', 'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc', 'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167', 'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64', 'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d', 'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20', 'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab', 'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53', 'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2', 'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e', 'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372', 'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902', 'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de', 'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b', 'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03', 'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a', 'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6', 'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a', } import sys, os try: from hashlib import md5 except ImportError: from md5 import md5 def _validate_md5(egg_name, data): if egg_name in md5_data: digest = md5(data).hexdigest() if digest != md5_data[egg_name]: print >>sys.stderr, ( "md5 validation of %s failed! (Possible download problem?)" % egg_name ) sys.exit(2) return data def use_setuptools( version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, download_delay=15 ): """Automatically find/download setuptools and make it available on sys.path `version` should be a valid setuptools version number that is available as an egg for download under the `download_base` URL (which should end with a '/'). `to_dir` is the directory where setuptools will be downloaded, if it is not already available. If `download_delay` is specified, it should be the number of seconds that will be paused before initiating a download, should one be required. If an older version of setuptools is installed, this routine will print a message to ``sys.stderr`` and raise SystemExit in an attempt to abort the calling script. """ was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules def do_download(): egg = download_setuptools(version, download_base, to_dir, download_delay) sys.path.insert(0, egg) import setuptools; setuptools.bootstrap_install_from = egg try: import pkg_resources except ImportError: return do_download() try: pkg_resources.require("setuptools>="+version); return except pkg_resources.VersionConflict, e: if was_imported: print >>sys.stderr, ( "The required version of setuptools (>=%s) is not available, and\n" "can't be installed while this script is running. Please install\n" " a more recent version first, using 'easy_install -U setuptools'." "\n\n(Currently using %r)" ) % (version, e.args[0]) sys.exit(2) except pkg_resources.DistributionNotFound: pass del pkg_resources, sys.modules['pkg_resources'] # reload ok return do_download() def download_setuptools( version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, delay = 15 ): """Download setuptools from a specified location and return its filename `version` should be a valid setuptools version number that is available as an egg for download under the `download_base` URL (which should end with a '/'). `to_dir` is the directory where the egg will be downloaded. `delay` is the number of seconds to pause before an actual download attempt. """ import urllib2 egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3]) url = download_base + egg_name saveto = os.path.join(to_dir, egg_name) src = dst = None if not os.path.exists(saveto): # Avoid repeated downloads try: from distutils import log if delay: log.warn(""" --------------------------------------------------------------------------- This script requires setuptools version %s to run (even to display help). I will attempt to download it for you (from %s), but you may need to enable firewall access for this script first. I will start the download in %d seconds. (Note: if this machine does not have network access, please obtain the file %s and place it in this directory before rerunning this script.) ---------------------------------------------------------------------------""", version, download_base, delay, url ); from time import sleep; sleep(delay) log.warn("Downloading %s", url) src = urllib2.urlopen(url) # Read/write all in one block, so we don't create a corrupt file # if the download is interrupted. data = _validate_md5(egg_name, src.read()) dst = open(saveto,"wb"); dst.write(data) finally: if src: src.close() if dst: dst.close() return os.path.realpath(saveto) def main(argv, version=DEFAULT_VERSION): """Install or upgrade setuptools and EasyInstall""" try: import setuptools except ImportError: egg = None try: egg = download_setuptools(version, delay=0) sys.path.insert(0,egg) from setuptools.command.easy_install import main return main(list(argv)+[egg]) # we're done here finally: if egg and os.path.exists(egg): os.unlink(egg) else: if setuptools.__version__ == '0.0.1': print >>sys.stderr, ( "You have an obsolete version of setuptools installed. Please\n" "remove it from your system entirely before rerunning this script." ) sys.exit(2) req = "setuptools>="+version import pkg_resources try: pkg_resources.require(req) except pkg_resources.VersionConflict: try: from setuptools.command.easy_install import main except ImportError: from easy_install import main main(list(argv)+[download_setuptools(delay=0)]) sys.exit(0) # try to force an exit else: if argv: from setuptools.command.easy_install import main main(argv) else: print "Setuptools version",version,"or greater has been installed." print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)' def update_md5(filenames): """Update our built-in md5 registry""" import re for name in filenames: base = os.path.basename(name) f = open(name,'rb') md5_data[base] = md5(f.read()).hexdigest() f.close() data = [" %r: %r,\n" % it for it in md5_data.items()] data.sort() repl = "".join(data) import inspect srcfile = inspect.getsourcefile(sys.modules[__name__]) f = open(srcfile, 'rb'); src = f.read(); f.close() match = re.search("\nmd5_data = {\n([^}]+)}", src) if not match: print >>sys.stderr, "Internal error!" sys.exit(2) src = src[:match.start(1)] + repl + src[match.end(1):] f = open(srcfile,'w') f.write(src) f.close() if __name__=='__main__': if len(sys.argv)>2 and sys.argv[1]=='--md5update': update_md5(sys.argv[2:]) else: main(sys.argv[1:]) RBTools-0.7.11/README.md0000644000232200023220000001311113230242633014744 0ustar debalancedebalanceAbout RBTools ============= RBTools is a set of command line tools and a rich Python API for use with [Review Board](https://www.reviewboard.org/). These tools make it easy to post changes for review, keep them up-to-date, land reviewed changes, and test other people's changes in your own tree, check your workload, and much more. When using RBTools, you'll do most everything through the [rbt](https://www.reviewboard.org/docs/rbtools/latest/#rbt-command) command, which supports a number of [sub-commands](https://www.reviewboard.org/docs/rbtools/latest/rbt/commands/), like [post](https://www.reviewboard.org/docs/rbtools/latest/rbt/commands/post/#rbt-post), [diff](https://www.reviewboard.org/docs/rbtools/latest/rbt/commands/diff/#rbt-diff), [land](https://www.reviewboard.org/docs/rbtools/latest/rbt/commands/land/#rbt-land), and [patch](https://www.reviewboard.org/docs/rbtools/latest/rbt/commands/patch/#rbt-patch). Installing RBTools ------------------ We provide native installers for Windows and MacOS, along with Python packages for Linux and other platforms. See the [RBTools Downloads](https://www.reviewboard.org/downloads/rbtools/) page for downloads and installation instructions. See the [RBTools documentation](https://www.reviewboard.org/docs/rbtools/latest/) for more information. Using the Python API -------------------- The included Python API can be used to write scripts and applications that interface with Review Board, and can also be used to write new commands for RBTools. There's very little that you can't do with the Python API. To learn more, see the [RBTools Python API documentation](https://www.reviewboard.org/docs/rbtools/latest/api/) and the [Review Board API documentation](https://www.reviewboard.org/docs/manual/latest/webapi/). Getting Support --------------- We can help you get going with Review Board and RBTools, and diagnose any issues that may come up. There are two levels of support: Public community support, and private premium support. The public community support is available on our main [discussion list](http://groups.google.com/group/reviewboard/). We generally respond to requests within a couple of days. This support works well for general, non-urgent questions that don't need to expose confidential information. We can also provide more [dedicated, private support](https://www.beanbaginc.com/support/contracts/) for your organization through a support contract. We offer same-day responses (generally within a few hours, if not sooner), confidential communications, installation/upgrade assistance, emergency database repair, phone/chat (by appointment), priority fixes for urgent bugs, and backports of urgent fixes to older releases (when possible). Our Happy Users --------------- There are thousands of companies and organizations using Review Board and RBTools today. We respect the privacy of our users, but some of them have asked to feature them on the [Happy Users page](https://www.reviewboard.org/users/). If you're using Review Board, and you're a happy user, [let us know!](https://groups.google.com/group/reviewboard/) Reporting Bugs -------------- Hit a bug? Let us know by [filing a bug report](https://www.reviewboard.org/bugs/new/). You can also look through the [existing bug reports](https://www.reviewboard.org/bugs/) to see if anyone else has already filed the bug. Contributing ------------ Are you a developer? Do you want to integrate with RBTools, or work on RBTools itself? Great! Let's help you get started. First off, read through our [contributor guide](https://www.reviewboard.org/docs/codebase/dev/). We accept patches to Review Board, RBTools, and other related projects on [reviews.reviewboard.org](https://reviews.reviewboard.org/). (Please note that we do not accept pull requests.) Got any questions about anything related to RBTools and development? Head on over to our [development discussion list](https://groups.google.com/group/reviewboard-dev/). ### Testing RBTools If you're writing patches for RBTools, you'll need to know how to run our test suite. First, make sure you have the necessary dependencies. To run all the tests, you will need to install hgsubversion: ``` $ easy_install hgsubversion ``` This may need apr-config, also known as apr-1-config, to run. This is part of the apache distribution. On ubuntu, you can get it via: ``` $ sudo apt-get install libapr1-dev # also try apache2-dev or httpd-dev ``` hgsubversion also requires that you set up an :file:`.hgrc` in your home directory with the following contents: ``` [extensions] hgsvn = /path/to/hgsubversion ``` This will be something like `/usr/local/lib/python2.7/dist-packages/hgsubversion`. If you already have an `[extensions]` section in your `.hgrc`, just add the hgsvn line to it. You will also need nose: ``` $ easy_install nose ``` #### Running the Tests Running the test suite is easy. Simply run: ``` $ nosetests -v ``` from the top of the `rbtools` directory. You can also run a particular set of tests. For instance: ``` $ nosetests -v rbtools.api.tests ``` See `'nosetests --help'` for more options. Related Projects ---------------- * [Review Board](https://github.com/reviewboard/reviewboard/) - Our powerful, open source code review tool. * [Djblets](https://github.com/djblets/djblets/) - Our pack of Django utilities for datagrids, API, extensions, and more. Used by Review Board. * [ReviewBot](https://github.com/reviewboard/ReviewBot/) - Pluggable, automated code review for Review Board. * [rb-gateway](https://github.com/reviewboard/rb-gateway/) - Manages Git repositories, providing a full API enabling all of Review Board's feaures. RBTools-0.7.11/contrib/0000755000232200023220000000000013230242636015133 5ustar debalancedebalanceRBTools-0.7.11/contrib/installers/0000755000232200023220000000000013230242636017313 5ustar debalancedebalanceRBTools-0.7.11/contrib/installers/windows/0000755000232200023220000000000013230242636021005 5ustar debalancedebalanceRBTools-0.7.11/contrib/installers/windows/scripts/0000755000232200023220000000000013230242636022474 5ustar debalancedebalanceRBTools-0.7.11/contrib/installers/windows/scripts/get-version.py0000644000232200023220000000037013230242633025305 0ustar debalancedebalancefrom __future__ import print_function, unicode_literals from rbtools import VERSION # MSI files only use the first 3 version fields, and has no concept of # alphas/betas/RCs/patch levels. print('%s.%s.%s' % (VERSION[0], VERSION[1], VERSION[2])) RBTools-0.7.11/contrib/P4Tool.txt0000644000232200023220000000020513230242633017007 0ustar debalancedebalanceP4Win Tools for Review Board >>post-review python "" %C --p4-client $c --p4-port $p 1 0 1 0 0 1 0 0 RBTools-0.7.11/contrib/internal/0000755000232200023220000000000013230242636016747 5ustar debalancedebalanceRBTools-0.7.11/contrib/internal/release.py0000755000232200023220000001413313230242633020743 0ustar debalancedebalance#!/usr/bin/env python # # Performs a release of RBTools. This can only be run by the core # developers with release permissions. # import hashlib import mimetools import os import shutil import subprocess import sys import tempfile import urllib2 from fabazon.s3 import S3Bucket sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "..")) from rbtools import __version__, __version_info__, is_release PY_VERSIONS = ["2.6", "2.7"] LATEST_PY_VERSION = PY_VERSIONS[-1] PACKAGE_NAME = 'RBTools' RELEASES_BUCKET_NAME = 'downloads.reviewboard.org' RELEASES_BUCKET_KEY = '/releases/%s/%s.%s/' % (PACKAGE_NAME, __version_info__[0], __version_info__[1]) RBWEBSITE_API_URL = 'http://www.reviewboard.org/api/' RELEASES_API_URL = '%sproducts/rbtools/releases/' % RBWEBSITE_API_URL built_files = [] def load_config(): filename = os.path.join(os.path.expanduser('~'), '.rbwebsiterc') if not os.path.exists(filename): sys.stderr.write("A .rbwebsiterc file must exist in the form of:\n") sys.stderr.write("\n") sys.stderr.write("USERNAME = ''\n") sys.stderr.write("PASSWORD = ''\n") sys.exit(1) user_config = {} try: execfile(filename, user_config) except SyntaxError, e: sys.stderr.write('Syntax error in config file: %s\n' 'Line %i offset %i\n' % (filename, e.lineno, e.offset)) sys.exit(1) auth_handler = urllib2.HTTPBasicAuthHandler() auth_handler.add_password(realm='Web API', uri=RBWEBSITE_API_URL, user=user_config['USERNAME'], passwd=user_config['PASSWORD']) opener = urllib2.build_opener(auth_handler) urllib2.install_opener(opener) def execute(cmdline): if isinstance(cmdline, list): print ">>> %s" % subprocess.list2cmdline(cmdline) else: print ">>> %s" % cmdline p = subprocess.Popen(cmdline, shell=True, stdout=subprocess.PIPE) s = '' for data in p.stdout.readlines(): s += data sys.stdout.write(data) rc = p.wait() if rc != 0: print "!!! Error invoking command." sys.exit(1) return s def run_setup(target, pyver=LATEST_PY_VERSION): execute("python%s ./setup.py release %s" % (pyver, target)) def clone_git_tree(git_dir): new_git_dir = tempfile.mkdtemp(prefix='rbtools-release.') os.chdir(new_git_dir) execute('git clone %s .' % git_dir) return new_git_dir def build_targets(): for pyver in PY_VERSIONS: run_setup('bdist_egg', pyver) built_files.append(('dist/%s-%s-py%s.egg' % (PACKAGE_NAME, __version__, pyver), 'application/octet-stream')) run_setup('sdist') built_files.append(('dist/%s-%s.tar.gz' % (PACKAGE_NAME, __version__), 'application/x-tar')) def build_checksums(): sha_filename = 'dist/%s-%s.sha256sum' % (PACKAGE_NAME, __version__) out_f = open(sha_filename, 'w') for filename, mimetype in built_files: m = hashlib.sha256() in_f = open(filename, 'r') m.update(in_f.read()) in_f.close() out_f.write('%s %s\n' % (m.hexdigest(), os.path.basename(filename))) out_f.close() built_files.append((sha_filename, 'text/plain')) def upload_files(): bucket = S3Bucket(RELEASES_BUCKET_NAME) for filename, mimetype in built_files: bucket.upload(filename, '%s/%s' % (RELEASES_BUCKET_KEY, filename.split('/')[-1]), mimetype=mimetype, public=True) bucket.upload_directory_index(RELEASES_BUCKET_KEY) # This may be a new directory, so rebuild the parent as well. parent_key = '/'.join(RELEASES_BUCKET_KEY.split('/')[:-2]) bucket.upload_directory_index(parent_key) def tag_release(): execute("git tag release-%s" % __version__) def register_release(): if __version_info__[4] == 'final': run_setup("register") scm_revision = execute(['git rev-parse', 'release-%s' % __version__]) data = { 'major_version': __version_info__[0], 'minor_version': __version_info__[1], 'micro_version': __version_info__[2], 'patch_version': __version_info__[3], 'release_type': __version_info__[4], 'release_num': __version_info__[5], 'scm_revision': scm_revision, } boundary = mimetools.choose_boundary() content = '' for key, value in data.iteritems(): content += '--%s\r\n' % boundary content += 'Content-Disposition: form-data; name="%s"\r\n' % key content += '\r\n' content += str(value) + '\r\n' content += '--%s--\r\n' % boundary content += '\r\n' headers = { 'Content-Type': 'multipart/form-data; boundary=%s' % boundary, 'Content-Length': str(len(content)), } print 'Posting release to reviewboard.org' try: f = urllib2.urlopen(urllib2.Request(url=RELEASES_API_URL, data=content, headers=headers)) f.read() except urllib2.HTTPError, e: print "Error uploading. Got HTTP code %d:" % e.code print e.read() except urllib2.URLError, e: try: print "Error uploading. Got URL error:" % e.code print e.read() except AttributeError: pass def main(): if not os.path.exists("setup.py"): sys.stderr.write("This must be run from the root of the " "RBTools tree.\n") sys.exit(1) load_config() if not is_release(): sys.stderr.write("This version is not listed as a release.\n") sys.exit(1) cur_dir = os.getcwd() git_dir = clone_git_tree(cur_dir) build_targets() build_checksums() upload_files() os.chdir(cur_dir) shutil.rmtree(git_dir) tag_release() register_release() if __name__ == "__main__": main() RBTools-0.7.11/contrib/README.P4Tool0000644000232200023220000000113113230242633017124 0ustar debalancedebalanceAbout P4Tool ------------ P4Tool.txt is an extension to P4win that adds support for invoking post-review from the UI. Installation ------------ 1) Make a copy of P4Tool.txt and modify it for your setup. Specifically, you'll need to replace "" with the path to the post-review script on your system. If using a compiled post-review.exe, place the path to this file and remove "python" before the file path. 2) Import P4Tool.txt into P4win. Usage ----- To post a change for review, right-click on the change and select "post-review". RBTools-0.7.11/COPYING0000644000232200023220000000212513230242633014523 0ustar debalancedebalanceCopyright (c) 2007-2010 Christian Hammond Copyright (c) 2007-2010 David Trowbridge Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. RBTools-0.7.11/rbtools/0000755000232200023220000000000013230242636015157 5ustar debalancedebalanceRBTools-0.7.11/rbtools/testing/0000755000232200023220000000000013230242636016634 5ustar debalancedebalanceRBTools-0.7.11/rbtools/testing/testcase.py0000644000232200023220000000144213230242633021017 0ustar debalancedebalancefrom __future__ import unicode_literals import re import unittest class TestCase(unittest.TestCase): """The base class for RBTools test cases. Unlike the standard unittest.TestCase, this allows the test case description (generally the first line of the docstring) to wrap multiple lines. """ ws_re = re.compile(r'\s+') def shortDescription(self): """Returns the description of the current test. This changes the default behavior to replace all newlines with spaces, allowing a test description to span lines. It should still be kept short, though. """ doc = self._testMethodDoc if doc is not None: doc = doc.split('\n\n', 1)[0] doc = self.ws_re.sub(' ', doc).strip() return doc RBTools-0.7.11/rbtools/testing/__init__.py0000644000232200023220000000016013230242633020737 0ustar debalancedebalancefrom __future__ import unicode_literals from rbtools.testing.testcase import TestCase __all__ = ['TestCase'] RBTools-0.7.11/rbtools/commands/0000755000232200023220000000000013230242636016760 5ustar debalancedebalanceRBTools-0.7.11/rbtools/commands/publish.py0000644000232200023220000000221213230242633020772 0ustar debalancedebalancefrom __future__ import print_function, unicode_literals from rbtools.api.errors import APIError from rbtools.commands import Command, CommandError from rbtools.utils.commands import get_review_request class Publish(Command): """Publish a specific review request from a draft.""" name = 'publish' author = 'The Review Board Project' args = '' option_list = [ Command.server_options, Command.repository_options, ] def main(self, request_id): """Run the command.""" repository_info, tool = self.initialize_scm_tool( client_name=self.options.repository_type) server_url = self.get_server_url(repository_info, tool) api_client, api_root = self.get_api(server_url) request = get_review_request(request_id, api_root) try: draft = request.get_draft() draft = draft.update(public=True) except APIError as e: raise CommandError('Error publishing review request (it may ' 'already be published): %s' % e) print('Review request #%s is published.' % request_id) RBTools-0.7.11/rbtools/commands/post.py0000644000232200023220000011577213230242633020331 0ustar debalancedebalancefrom __future__ import print_function, unicode_literals import logging import os import re import sys from rbtools.api.errors import APIError from rbtools.commands import Command, CommandError, Option, OptionGroup from rbtools.utils.commands import (AlreadyStampedError, get_review_request, stamp_commit_with_review_url) from rbtools.utils.console import confirm from rbtools.utils.process import execute from rbtools.utils.review_request import (get_draft_or_current_value, get_revisions, guess_existing_review_request) class Post(Command): """Create and update review requests.""" name = 'post' author = 'The Review Board Project' description = 'Uploads diffs to create and update review requests.' args = '[revisions]' #: Reserved built-in fields that can be set using the ``--field`` argument. reserved_fields = ('description', 'testing-done', 'summary') GUESS_AUTO = 'auto' GUESS_YES = 'yes' GUESS_NO = 'no' GUESS_YES_INPUT_VALUES = (True, 'yes', 1, '1') GUESS_NO_INPUT_VALUES = (False, 'no', 0, '0') GUESS_CHOICES = (GUESS_AUTO, GUESS_YES, GUESS_NO) option_list = [ OptionGroup( name='Posting Options', description='Controls the behavior of a post, including what ' 'review request gets posted and how, and what ' 'happens after it is posted.', option_list=[ Option('-u', '--update', dest='update', action='store_true', default=False, help='Automatically determines the existing review ' 'request to update.', added_in='0.5.3'), Option('-r', '--review-request-id', dest='rid', metavar='ID', default=None, help='Specifies the existing review request ID to ' 'update.'), Option('-p', '--publish', dest='publish', action='store_true', default=False, config_key='PUBLISH', help='Publishes the review request immediately after ' 'posting.' '\n' 'All required fields must already be filled in ' 'on the review request or must be provided when ' 'posting.'), Option('-o', '--open', dest='open_browser', action='store_true', config_key='OPEN_BROWSER', default=False, help='Opens a web browser to the review request ' 'after posting.'), Option('-s', '--stamp', dest='stamp_when_posting', action='store_true', config_key='STAMP_WHEN_POSTING', default=False, help='Stamps the commit message with the review ' 'request URL while posting the review.', added_in='0.7.3'), Option('--submit-as', dest='submit_as', metavar='USERNAME', config_key='SUBMIT_AS', default=None, help='The username to use as the author of the ' 'review request, instead of the logged in user.', extended_help=( "This is useful when used in a repository's " "post-commit script to update or create review " "requests. See :ref:`automating-rbt-post` for " "more information on this use case." )), Option('--change-only', dest='change_only', action='store_true', default=False, help='Updates fields from the change description, ' 'but does not upload a new diff ' '(Perforce/Plastic only).'), Option('--diff-only', dest='diff_only', action='store_true', default=False, help='Uploads a new diff, but does not automatically ' 'update fields from the commit message/change ' 'description. Fields explicitly provided by ' 'other options will be ignored.'), ] ), Command.server_options, Command.repository_options, OptionGroup( name='Review Request Field Options', description='Options for setting the contents of fields in the ' 'review request.', option_list=[ Option('-f', '--field', dest='fields', action='append', default=None, metavar='FIELD_NAME=VALUE', help='Sets custom fields into the extra_data of a ' 'review request. Can also be used to set ' 'built-in fields like description, summary, ' 'testing-done.'), Option('-g', '--guess-fields', dest='guess_fields', action='store', config_key='GUESS_FIELDS', nargs='?', default=GUESS_AUTO, const=GUESS_YES, choices=GUESS_CHOICES, help='Equivalent to setting both --guess-summary ' 'and --guess-description.', extended_help=( 'This can optionally take a value to control the ' 'guessing behavior. See :ref:`guessing-behavior` ' 'for more information.' )), Option('--guess-summary', dest='guess_summary', action='store', config_key='GUESS_SUMMARY', nargs='?', default=None, const=GUESS_YES, choices=GUESS_CHOICES, help='Generates the Summary field based on the ' 'commit messages (Bazaar/Git/Mercurial only).', extended_help=( 'This can optionally take a value to control the ' 'guessing behavior. See :ref:`guessing-behavior` ' 'for more information.' )), Option('--guess-description', dest='guess_description', action='store', config_key='GUESS_DESCRIPTION', nargs='?', default=None, const=GUESS_YES, choices=GUESS_CHOICES, help='Generates the Description field based on the ' 'commit messages (Bazaar/Git/Mercurial only).', extended_help=( 'This can optionally take a value to control the ' 'guessing behavior. See :ref:`guessing-behavior` ' 'for more information.' )), Option('--change-description', default=None, metavar='TEXT', help='A description of what changed in this update ' 'of the review request. This is ignored for new ' 'review requests.'), Option('--summary', dest='summary', metavar='TEXT', default=None, help='The new contents for the Summary field.'), Option('--description', dest='description', metavar='TEXT', default=None, help='The new contents for the Description field.'), Option('--description-file', dest='description_file', default=None, metavar='FILENAME', help='A text file containing the new contents for the ' 'Description field.'), Option('--testing-done', dest='testing_done', metavar='TEXT', default=None, help='The new contents for the Testing Done field.'), Option('--testing-done-file', dest='testing_file', default=None, metavar='FILENAME', help='A text file containing the new contents for the ' 'Testing Done field.'), Option('--branch', dest='branch', config_key='BRANCH', metavar='BRANCH', default=None, help='The branch the change will be committed on or ' 'affects. This is a free-form field and does not ' 'control any behavior.'), Option('--bugs-closed', dest='bugs_closed', metavar='BUG_ID[,...]', default=None, help='The comma-separated list of bug IDs closed.'), Option('--target-groups', dest='target_groups', config_key='TARGET_GROUPS', metavar='NAME[,...]', default=None, help='The names of the groups that should perform the ' 'review.'), Option('--target-people', dest='target_people', metavar='USERNAME[,...]', config_key='TARGET_PEOPLE', default=None, help='The usernames of the people who should perform ' 'the review.'), Option('--depends-on', dest='depends_on', config_key='DEPENDS_ON', metavar='ID[,...]', default=None, help='A comma-separated list of review request IDs ' 'that this review request will depend on.', added_in='0.6.1'), Option('--markdown', dest='markdown', action='store_true', config_key='MARKDOWN', default=False, help='Specifies if the summary and description should ' 'be interpreted as Markdown-formatted text.' '\n' 'This is only supported in Review Board 2.0+.', added_in='0.6'), ] ), Command.diff_options, Command.branch_options, Command.perforce_options, Command.subversion_options, Command.tfs_options, ] def post_process_options(self): super(Post, self).post_process_options() extra_fields = {} if self.options.fields is None: self.options.fields = [] for field in self.options.fields: key_value_pair = field.split('=', 1) if len(key_value_pair) != 2: raise CommandError( 'The --field argument should be in the form of: ' '--field name=value; got "%s" instead.' % field ) key, value = key_value_pair if key in self.reserved_fields: key_var = key.replace('-', '_') if getattr(self.options, key_var): raise CommandError( 'The "{0}" field was provided by both --{0}= ' 'and --field {0}=. Please use --{0} instead.' .format(key) ) setattr(self.options, key_var, value) else: extra_fields['extra_data.%s' % key] = value self.options.extra_fields = extra_fields # -g implies --guess-summary and --guess-description self.options.guess_fields = self.normalize_guess_value( self.options.guess_fields, '--guess-fields') for field_name in ('guess_summary', 'guess_description'): # We want to ensure we only override --guess-{field} with # --guess-fields when --guess-{field} is not provided. # to the default (auto). if getattr(self.options, field_name) is None: setattr(self.options, field_name, self.options.guess_fields) if self.options.revision_range: raise CommandError( 'The --revision-range argument has been removed. To post a ' 'diff for one or more specific revisions, pass those ' 'revisions as arguments. For more information, see the ' 'RBTools 0.6 Release Notes.') if self.options.svn_changelist: raise CommandError( 'The --svn-changelist argument has been removed. To use a ' 'Subversion changelist, pass the changelist name as an ' 'additional argument after the command.') # Only one of --description and --description-file can be used if self.options.description and self.options.description_file: raise CommandError('The --description and --description-file ' 'options are mutually exclusive.') # If --description-file is used, read that file if self.options.description_file: if os.path.exists(self.options.description_file): with open(self.options.description_file, 'r') as fp: self.options.description = fp.read() else: raise CommandError( 'The description file %s does not exist.' % self.options.description_file) # Only one of --testing-done and --testing-done-file can be used if self.options.testing_done and self.options.testing_file: raise CommandError('The --testing-done and --testing-done-file ' 'options are mutually exclusive.') # If --testing-done-file is used, read that file if self.options.testing_file: if os.path.exists(self.options.testing_file): with open(self.options.testing_file, 'r') as fp: self.options.testing_done = fp.read() else: raise CommandError('The testing file %s does not exist.' % self.options.testing_file) # If we have an explicitly specified summary, override # --guess-summary if self.options.summary: self.options.guess_summary = self.GUESS_NO else: self.options.guess_summary = self.normalize_guess_value( self.options.guess_summary, '--guess-summary') # If we have an explicitly specified description, override # --guess-description if self.options.description: self.options.guess_description = self.GUESS_NO else: self.options.guess_description = self.normalize_guess_value( self.options.guess_description, '--guess-description') # If the --diff-filename argument is used, we can't do automatic # updating. if self.options.diff_filename and self.options.update: raise CommandError('The --update option cannot be used when ' 'using --diff-filename.') # If we have an explicitly specified review request ID, override # --update if self.options.rid and self.options.update: self.options.update = False def normalize_guess_value(self, guess, arg_name): if guess in self.GUESS_YES_INPUT_VALUES: return self.GUESS_YES elif guess in self.GUESS_NO_INPUT_VALUES: return self.GUESS_NO elif guess == self.GUESS_AUTO: return guess else: raise CommandError('Invalid value "%s" for argument "%s"' % (guess, arg_name)) def get_repository_path(self, repository_info, api_root): """Get the repository path from the server. This will compare the paths returned by the SCM client with those one the server, and return the first match. """ if isinstance(repository_info.path, list): repositories = api_root.get_repositories( only_fields='path,mirror_path', only_links='') for repo in repositories.all_items: if repo['path'] in repository_info.path: repository_info.path = repo['path'] break elif repo['mirror_path'] in repository_info.path: repository_info.path = repo['mirror_path'] break if isinstance(repository_info.path, list): error_str = [ 'There was an error creating this review request.\n', '\n', 'There was no matching repository path found on the server.\n', 'Unknown repository paths found:\n', ] for foundpath in repository_info.path: error_str.append('\t%s\n' % foundpath) error_str += [ 'Ask the administrator to add one of these repositories\n', 'to the Review Board server.\n', ] raise CommandError(''.join(error_str)) return repository_info.path def post_request(self, repository_info, repository, server_url, api_root, review_request_id=None, changenum=None, diff_content=None, parent_diff_content=None, commit_id=None, base_commit_id=None, submit_as=None, retries=3, base_dir=None): """Creates or updates a review request, and uploads a diff. On success the review request id and url are returned. """ supports_posting_commit_ids = \ self.tool.capabilities.has_capability('review_requests', 'commit_ids') if review_request_id: review_request = get_review_request( review_request_id, api_root, only_fields='absolute_url,bugs_closed,id,status', only_links='diffs,draft') if review_request.status == 'submitted': raise CommandError( 'Review request %s is marked as %s. In order to update ' 'it, please reopen the review request and try again.' % (review_request_id, review_request.status)) else: # No review_request_id, so we will create a new review request. try: # Until we are Python 2.7+ only, the keys in request_data have # to be bytes. See bug 3753 for details. request_data = { b'repository': repository } if changenum: request_data[b'changenum'] = changenum elif commit_id and supports_posting_commit_ids: request_data[b'commit_id'] = commit_id if submit_as: request_data[b'submit_as'] = submit_as review_requests = api_root.get_review_requests( only_fields='', only_links='create') review_request = review_requests.create(**request_data) except APIError as e: if e.error_code == 204 and changenum: # The change number is already in use. Get the review # request for that change and update it instead. rid = e.rsp['review_request']['id'] review_request = api_root.get_review_request( review_request_id=rid, only_fields='absolute_url,bugs_closed,id,status', only_links='diffs,draft') else: raise CommandError('Error creating review request: %s' % e) if (not repository_info.supports_changesets or not self.options.change_only): try: diff_kwargs = { 'parent_diff': parent_diff_content, 'base_dir': base_dir, } if (base_commit_id and self.tool.capabilities.has_capability('diffs', 'base_commit_ids')): # Both the Review Board server and SCMClient support # base commit IDs, so pass that along when creating # the diff. diff_kwargs['base_commit_id'] = base_commit_id review_request.get_diffs(only_fields='').upload_diff( diff_content, **diff_kwargs) except APIError as e: error_msg = [ u'Error uploading diff\n\n', ] if e.error_code == 101 and e.http_status == 403: error_msg.append( u'You do not have permissions to modify ' u'this review request\n') elif e.error_code == 219: error_msg.append( u'The generated diff file was empty. This ' u'usually means no files were\n' u'modified in this change.\n') else: error_msg.append(str(e).decode('utf-8') + u'\n') error_msg.append( u'Your review request still exists, but the diff is ' u'not attached.\n') error_msg.append(u'%s\n' % review_request.absolute_url) raise CommandError(u'\n'.join(error_msg)) try: draft = review_request.get_draft(only_fields='commit_id') except APIError as e: raise CommandError('Error retrieving review request draft: %s' % e) # Stamp the commit message with the review request URL before posting # the review, so that we can use the stamped commit message when # guessing the description. This enables the stamped message to be # present on the review if the user has chosen to publish immediately # upon posting. if self.options.stamp_when_posting: if not self.tool.can_amend_commit: print('Cannot stamp review URL onto the commit message; ' 'stamping is not supported with %s.' % self.tool.name) else: try: stamp_commit_with_review_url(self.revisions, review_request.absolute_url, self.tool) print('Stamped review URL onto the commit message.') except AlreadyStampedError: print('Commit message has already been stamped') except Exception as e: logging.debug('Caught exception while stamping the ' 'commit message. Proceeding to post ' 'without stamping.', exc_info=True) print('Could not stamp review URL onto the commit ' 'message.') # Update the review request draft fields based on options set # by the user, or configuration. update_fields = {} if self.options.publish: update_fields['public'] = True if not self.options.diff_only: # If the user has requested to guess the summary or description, # get the commit message and override the summary and description # options, which we'll fill in below. The guessing takes place # after stamping so that the guessed description matches the commit # when rbt exits. if not self.options.diff_filename: self.check_guess_fields() update_fields.update(self.options.extra_fields) if self.options.target_groups: update_fields['target_groups'] = self.options.target_groups if self.options.target_people: update_fields['target_people'] = self.options.target_people if self.options.depends_on: update_fields['depends_on'] = self.options.depends_on if self.options.summary: update_fields['summary'] = self.options.summary if self.options.branch: update_fields['branch'] = self.options.branch if self.options.bugs_closed: # Append to the existing list of bugs. self.options.bugs_closed = self.options.bugs_closed.strip(', ') bug_set = (set(re.split('[, ]+', self.options.bugs_closed)) | set(review_request.bugs_closed)) self.options.bugs_closed = ','.join(bug_set) update_fields['bugs_closed'] = self.options.bugs_closed if self.options.description: update_fields['description'] = self.options.description if self.options.testing_done: update_fields['testing_done'] = self.options.testing_done if ((self.options.description or self.options.testing_done) and self.options.markdown and self.tool.capabilities.has_capability('text', 'markdown')): # The user specified that their Description/Testing Done are # valid Markdown, so tell the server so it won't escape the text. update_fields['text_type'] = 'markdown' if self.options.change_description: update_fields['changedescription'] = \ self.options.change_description if supports_posting_commit_ids and commit_id != draft.commit_id: update_fields['commit_id'] = commit_id or '' if update_fields: try: draft = draft.update(**update_fields) except APIError as e: raise CommandError( 'Error updating review request draft: %s\n\n' 'Your review request still exists, but the diff is not ' 'attached.\n\n' '%s\n' % (e, review_request.absolute_url)) return review_request.id, review_request.absolute_url def check_guess_fields(self): """Checks and handles field guesses for the review request. This will attempt to guess the values for the summary and description fields, based on the contents of the commit message at the provided revisions, if requested by the caller. If the backend doesn't support guessing, or if guessing isn't requested, or if explicit values were set in the options, nothing will be set for the fields. """ is_new_review_request = (not self.options.rid and not self.options.update) guess_summary = ( self.options.guess_summary == self.GUESS_YES or (self.options.guess_summary == self.GUESS_AUTO and is_new_review_request)) guess_description = ( self.options.guess_description == self.GUESS_YES or (self.options.guess_description == self.GUESS_AUTO and is_new_review_request)) if self.revisions and (guess_summary or guess_description): try: commit_message = self.tool.get_commit_message(self.revisions) if commit_message: guessed_summary = commit_message['summary'] guessed_description = commit_message['description'] if guess_summary and guess_description: self.options.summary = guessed_summary self.options.description = guessed_description elif guess_summary: self.options.summary = guessed_summary elif guess_description: # If we're guessing the description but not the summary # (for example, if --summary was included), we probably # don't want to strip off the summary line of the # commit message. if guessed_description.startswith(guessed_summary): self.options.description = guessed_description else: self.options.description = \ guessed_summary + '\n\n' + guessed_description except NotImplementedError: # The SCMClient doesn't support getting commit messages, # so we can't provide the guessed versions. pass def _ask_review_request_match(self, review_request): question = ("Update Review Request #%s: '%s'? " % (review_request.id, get_draft_or_current_value( 'summary', review_request))) return confirm(question) def main(self, *args): """Create and update review requests.""" # The 'args' tuple must be made into a list for some of the # SCM Clients code. The way arguments were structured in # post-review meant this was a list, and certain parts of # the code base try and concatenate args to the end of # other lists. Until the client code is restructured and # cleaned up we will satisfy the assumption here. self.cmd_args = list(args) self.post_process_options() origcwd = os.path.abspath(os.getcwd()) repository_info, self.tool = self.initialize_scm_tool( client_name=self.options.repository_type) server_url = self.get_server_url(repository_info, self.tool) api_client, api_root = self.get_api(server_url) self.setup_tool(self.tool, api_root=api_root) if (self.options.exclude_patterns and not self.tool.supports_diff_exclude_patterns): raise CommandError( 'The %s backend does not support excluding files via the ' '-X/--exclude commandline options or the EXCLUDE_PATTERNS ' '.reviewboardrc option.' % self.tool.name) # Check if repository info on reviewboard server match local ones. repository_info = repository_info.find_server_repository_info(api_root) if self.options.diff_filename: self.revisions = None parent_diff = None base_commit_id = None commit_id = None if self.options.diff_filename == '-': if hasattr(sys.stdin, 'buffer'): # Make sure we get bytes on Python 3.x diff = sys.stdin.buffer.read() else: diff = sys.stdin.read() else: try: diff_path = os.path.join(origcwd, self.options.diff_filename) with open(diff_path, 'rb') as fp: diff = fp.read() except IOError as e: raise CommandError('Unable to open diff filename: %s' % e) else: self.revisions = get_revisions(self.tool, self.cmd_args) if self.revisions: extra_args = None else: extra_args = self.cmd_args # Generate a diff against the revisions or arguments, filtering # by the requested files if provided. diff_info = self.tool.diff( revisions=self.revisions, include_files=self.options.include_files or [], exclude_patterns=self.options.exclude_patterns or [], extra_args=extra_args) diff = diff_info['diff'] parent_diff = diff_info.get('parent_diff') base_commit_id = diff_info.get('base_commit_id') commit_id = diff_info.get('commit_id') logging.debug('Generated diff size: %d bytes', len(diff)) if parent_diff: logging.debug('Generated parent diff size: %d bytes', len(parent_diff)) repository = ( self.options.repository_name or self.options.repository_url or self.get_repository_path(repository_info, api_root)) base_dir = self.options.basedir or repository_info.base_path if repository is None: raise CommandError('Could not find the repository on the Review ' 'Board server.') if len(diff) == 0: raise CommandError("There don't seem to be any diffs!") # Validate the diffs to ensure that they can be parsed and that # all referenced files can be found. # # Review Board 2.0.14+ (with the diffs.validation.base_commit_ids # capability) is required to successfully validate against hosting # services that need a base_commit_id. This is basically due to # the limitations of a couple Git-specific hosting services # (Beanstalk, Bitbucket, and Unfuddle). # # In order to validate, we need to either not be dealing with a # base commit ID (--diff-filename), or be on a new enough version # of Review Board, or be using a non-Git repository. can_validate_base_commit_ids = \ self.tool.capabilities.has_capability('diffs', 'validation', 'base_commit_ids') if (not base_commit_id or can_validate_base_commit_ids or self.tool.name != 'Git'): # We can safely validate this diff before posting it, but we # need to ensure we only pass base_commit_id if the capability # is set. validate_kwargs = {} if can_validate_base_commit_ids: validate_kwargs['base_commit_id'] = base_commit_id try: diff_validator = api_root.get_diff_validation() diff_validator.validate_diff( repository, diff, parent_diff=parent_diff, base_dir=base_dir, **validate_kwargs) except APIError as e: msg_prefix = '' if e.error_code == 207: msg_prefix = '%s: ' % e.rsp['file'] raise CommandError('Error validating diff\n\n%s%s' % (msg_prefix, e)) except AttributeError: # The server doesn't have a diff validation resource. Post as # normal. pass if (repository_info.supports_changesets and not self.options.diff_filename and 'changenum' in diff_info): changenum = diff_info['changenum'] else: changenum = self.tool.get_changenum(self.revisions) # Not all scm clients support get_changenum, so if get_changenum # returns None (the default for clients that don't have changenums), # we'll prefer the existing commit_id. commit_id = changenum or commit_id if self.options.update and self.revisions: review_request = guess_existing_review_request( repository_info, self.options.repository_name, api_root, api_client, self.tool, self.revisions, guess_summary=False, guess_description=False, is_fuzzy_match_func=self._ask_review_request_match) if not review_request or not review_request.id: raise CommandError('Could not determine the existing review ' 'request to update.') self.options.rid = review_request.id # If only certain files within a commit are being submitted for review, # do not include the commit id. This prevents conflicts if multiple # files from the same commit are posted for review separately. if self.options.include_files or self.options.exclude_patterns: commit_id = None request_id, review_url = self.post_request( repository_info, repository, server_url, api_root, self.options.rid, changenum=changenum, diff_content=diff, parent_diff_content=parent_diff, commit_id=commit_id, base_commit_id=base_commit_id, submit_as=self.options.submit_as, base_dir=base_dir) diff_review_url = review_url + 'diff/' print('Review request #%s posted.' % request_id) print() print(review_url) print(diff_review_url) # Load the review up in the browser if requested to. if self.options.open_browser: if sys.platform == 'darwin': # The 'webbrowser' module currently does a bunch of stuff with # AppleScript, which is broken on macOS 10.12.5. See # https://bugs.python.org/issue30392 for more discussion. try: execute(['open', review_url]) except Exception as e: logging.exception('Error opening review URL %s: %s', review_url, e) else: try: import webbrowser if 'open_new_tab' in dir(webbrowser): # open_new_tab is only in python 2.5+ webbrowser.open_new_tab(review_url) elif 'open_new' in dir(webbrowser): webbrowser.open_new(review_url) else: os.system('start %s' % review_url) except Exception as e: logging.exception('Error opening review URL %s: %s', review_url, e) RBTools-0.7.11/rbtools/commands/login.py0000644000232200023220000000301013230242633020431 0ustar debalancedebalancefrom __future__ import print_function, unicode_literals import logging from rbtools.commands import Command from rbtools.utils.users import get_authenticated_session class Login(Command): """Logs into a Review Board server. The user will be prompted for a username and password, unless otherwise passed on the command line, allowing the user to log in and save a session cookie without needing to be in a repository or posting to the server. If the user is already logged in, this won't do anything. """ name = 'login' author = 'The Review Board Project' option_list = [ Command.server_options, ] def main(self): """Run the command.""" server_url = self.get_server_url(None, None) api_client, api_root = self.get_api(server_url) session = api_root.get_session(expand='user') was_authenticated = session.authenticated if not was_authenticated: session = get_authenticated_session(api_client, api_root, auth_required=True, session=session) if session.authenticated: if not was_authenticated or (self.options.username and self.options.password): logging.info('Successfully logged in to Review Board.') else: logging.info('You are already logged in to Review Board at %s', api_client.domain) RBTools-0.7.11/rbtools/commands/land.py0000644000232200023220000002360113230242633020247 0ustar debalancedebalancefrom __future__ import print_function, unicode_literals import logging import subprocess from rbtools.clients.errors import MergeError, PushError from rbtools.commands import Command, CommandError, Option, RB_MAIN from rbtools.utils.commands import (build_rbtools_cmd_argv, extract_commit_message, get_review_request) from rbtools.utils.console import confirm from rbtools.utils.review_request import (get_draft_or_current_value, get_revisions, guess_existing_review_request) class Land(Command): """Land changes from a review request onto the remote repository. This command takes a review request, applies it to a feature branch, merges it with the specified destination branch, and pushes the changes to an upstream repository. Notes: The review request needs to be approved first. ``--local`` option can be used to skip the patching step. """ name = 'land' author = 'The Review Board Project' args = '[]' option_list = [ Option( '--dest', dest='destination_branch', default=None, config_key='LAND_DEST_BRANCH', help='Specifies the destination branch to land changes on.'), Option( '-r', '--review-request-id', dest='rid', metavar='ID', default=None, help='Specifies the review request ID.'), Option( '--local', dest='is_local', action='store_true', default=None, help='Forces the change to be merged without patching, if ' 'merging a local branch. Defaults to true unless ' '--review-request-id is used.'), Option( '-p', '--push', dest='push', action='store_true', default=False, config_key='LAND_PUSH', help='Pushes the branch after landing the change.'), Option( '-n', '--no-push', dest='push', action='store_false', default=False, config_key='LAND_PUSH', help='Prevents pushing the branch after landing the change, ' 'if pushing is enabled by default.'), Option( '--squash', dest='squash', action='store_true', default=False, config_key='LAND_SQUASH', help='Squashes history into a single commit.'), Option( '--no-squash', dest='squash', action='store_false', default=False, config_key='LAND_SQUASH', help='Disables squashing history into a single commit, choosing ' 'instead to merge the branch, if squashing is enabled by ' 'default.'), Option( '-e', '--edit', dest='edit', action='store_true', default=False, help='Invokes the editor to edit the commit message before ' 'landing the change.'), Option( '--delete-branch', dest='delete_branch', action='store_true', config_key='LAND_DELETE_BRANCH', default=True, help="Deletes the local branch after it's landed. Only used if " "landing a local branch. This is the default."), Option( '--no-delete-branch', dest='delete_branch', action='store_false', config_key='LAND_DELETE_BRANCH', default=True, help="Prevents the local branch from being deleted after it's " "landed."), Option( '--dry-run', dest='dry_run', action='store_true', default=False, help='Simulates the landing of a change, without actually ' 'making any changes to the tree.'), Command.server_options, Command.repository_options, Command.branch_options, ] def patch(self, review_request_id): patch_command = [RB_MAIN, 'patch'] patch_command.extend(build_rbtools_cmd_argv(self.options)) if self.options.edit: patch_command.append('-c') else: patch_command.append('-C') patch_command.append(review_request_id) p = subprocess.Popen(patch_command) rc = p.wait() if rc: raise CommandError('Failed to execute command: %s' % patch_command) def main(self, branch_name=None, *args): """Run the command.""" self.cmd_args = list(args) if branch_name: self.cmd_args.insert(0, branch_name) repository_info, self.tool = self.initialize_scm_tool( client_name=self.options.repository_type) server_url = self.get_server_url(repository_info, self.tool) api_client, api_root = self.get_api(server_url) self.setup_tool(self.tool, api_root=api_root) dry_run = self.options.dry_run # Check if repository info on reviewboard server match local ones. repository_info = repository_info.find_server_repository_info(api_root) if (not self.tool.can_merge or not self.tool.can_push_upstream or not self.tool.can_delete_branch): raise CommandError( "This command does not support %s repositories." % self.tool.name) if self.tool.has_pending_changes(): raise CommandError('Working directory is not clean.') if self.options.rid: request_id = self.options.rid is_local = branch_name is not None else: request = guess_existing_review_request( repository_info, self.options.repository_name, api_root, api_client, self.tool, get_revisions(self.tool, self.cmd_args), guess_summary=False, guess_description=False, is_fuzzy_match_func=self._ask_review_request_match) if not request or not request.id: raise CommandError('Could not determine the existing review ' 'request URL to land.') request_id = request.id is_local = True if self.options.is_local is not None: is_local = self.options.is_local destination_branch = self.options.destination_branch if not destination_branch: raise CommandError('Please specify a destination branch.') if is_local: if branch_name is None: branch_name = self.tool.get_current_branch() if branch_name == destination_branch: raise CommandError('The local branch cannot be merged onto ' 'itself. Try a different local branch or ' 'destination branch.') review_request = get_review_request(request_id, api_root) try: is_rr_approved = review_request.approved approval_failure = review_request.approval_failure except AttributeError: # The Review Board server is an old version (pre-2.0) that # doesn't support the `approved` field. Determining it manually. if review_request.ship_it_count == 0: is_rr_approved = False approval_failure = \ 'The review request has not been marked "Ship It!"' else: is_rr_approved = True except Exception as e: logging.exception( 'Unexpected error while looking up review request ' 'approval state: %s', e) raise CommandError( 'An error was encountered while executing the land ' 'command.') finally: if not is_rr_approved: raise CommandError(approval_failure) if is_local: review_commit_message = extract_commit_message(review_request) author = review_request.get_submitter() if self.options.squash: print('Squashing branch "%s" into "%s"' % (branch_name, destination_branch)) else: print('Merging branch "%s" into "%s"' % (branch_name, destination_branch)) if not dry_run: try: self.tool.merge( branch_name, destination_branch, review_commit_message, author, self.options.squash, self.options.edit) except MergeError as e: raise CommandError(str(e)) if self.options.delete_branch: print('Deleting merged branch "%s"' % branch_name) if not dry_run: self.tool.delete_branch(branch_name, merged_only=False) else: print('Applying patch from review request %s' % request_id) if not dry_run: self.patch(request_id) if self.options.push: print('Pushing branch "%s" upstream' % destination_branch) if not dry_run: try: self.tool.push_upstream(destination_branch) except PushError as e: raise CommandError(str(e)) print('Review request %s has landed on "%s".' % (request_id, destination_branch)) def _ask_review_request_match(self, review_request): return confirm( 'Land Review Request #%s: "%s"? ' % (review_request.id, get_draft_or_current_value('summary', review_request))) RBTools-0.7.11/rbtools/commands/attach.py0000644000232200023220000000345713230242633020604 0ustar debalancedebalancefrom __future__ import print_function, unicode_literals import os from rbtools.api.errors import APIError from rbtools.commands import Command, CommandError, Option from rbtools.utils.commands import get_review_request class Attach(Command): """Attach a file to a review request.""" name = 'attach' author = 'The Review Board Project' args = ' ' option_list = [ Option('--filename', dest='filename', default=None, help='Custom filename for the file attachment.'), Option('--caption', dest='caption', default=None, help='Caption for the file attachment.'), Command.server_options, Command.repository_options, ] def main(self, request_id, path_to_file): self.repository_info, self.tool = self.initialize_scm_tool( client_name=self.options.repository_type) server_url = self.get_server_url(self.repository_info, self.tool) api_client, api_root = self.get_api(server_url) request = get_review_request(request_id, api_root) try: with open(path_to_file, 'rb') as f: content = f.read() except IOError: raise CommandError('%s is not a valid file.' % path_to_file) # Check if the user specified a custom filename, otherwise # use the original filename. filename = self.options.filename or os.path.basename(path_to_file) try: request.get_file_attachments().upload_attachment( filename, content, self.options.caption) except APIError as e: raise CommandError('Error uploading file: %s' % e) print('Uploaded %s to review request %s.' % (path_to_file, request_id)) RBTools-0.7.11/rbtools/commands/stamp.py0000644000232200023220000001321613230242633020456 0ustar debalancedebalanceimport logging from rbtools.commands import Command, CommandError, Option, OptionGroup from rbtools.utils.commands import (get_review_request, stamp_commit_with_review_url) from rbtools.utils.console import confirm from rbtools.utils.review_request import (find_review_request_by_change_id, get_draft_or_current_value, get_revisions, guess_existing_review_request) class Stamp(Command): """Add the review request URL to the commit message. Stamps the review request URL onto the commit message of the revision specified. The revisions argument behaves like it does in rbt post, where it is required for some SCMs (e.g. Perforce) and unnecessary/ignored for others (e.g. Git). Normally, this command will guess the review request (based on the revision number if provided, and the commit summary and description otherwise). However, if a review request ID is specified by the user, it stamps the URL of that review request instead of guessing. """ name = 'stamp' author = 'The Review Board Project' description = 'Adds the review request URL to the commit message.' args = '[revisions]' option_list = [ OptionGroup( name='Stamp Options', description='Controls the behavior of a stamp, including what ' 'review request URL gets stamped.', option_list=[ Option('-r', '--review-request-id', dest='rid', metavar='ID', default=None, help='Specifies the existing review request ID to ' 'be stamped.'), ] ), Command.server_options, Command.repository_options, Command.diff_options, Command.branch_options, Command.perforce_options, ] def no_commit_error(self): raise CommandError('No existing commit to stamp on.') def _ask_review_request_match(self, review_request): question = ("Stamp with Review Request #%s: '%s'? " % (review_request.id, get_draft_or_current_value( 'summary', review_request))) return confirm(question) def determine_review_request(self, api_client, api_root, repository_info, repository_name, revisions): """Determine the correct review request for a commit. A tuple (review request ID, review request absolute URL) is returned. If no review request ID is found by any of the strategies, (None, None) is returned. """ # First, try to match the changeset to a review request directly. if repository_info.supports_changesets: review_request = find_review_request_by_change_id( api_client, api_root, repository_info, repository_name, revisions) if review_request and review_request.id: return review_request.id, review_request.absolute_url # Fall back on guessing based on the description. This may return None # if no suitable review request is found. logging.debug('Attempting to guess review request based on ' 'summary and description') review_request = guess_existing_review_request( repository_info, repository_name, api_root, api_client, self.tool, revisions, guess_summary=False, guess_description=False, is_fuzzy_match_func=self._ask_review_request_match, no_commit_error=self.no_commit_error) if review_request: logging.debug('Found review request ID %d' % review_request.id) return review_request.id, review_request.absolute_url else: logging.debug('Could not find a matching review request') return None, None def main(self, *args): """Add the review request URL to a commit message.""" self.cmd_args = list(args) repository_info, self.tool = self.initialize_scm_tool( client_name=self.options.repository_type) server_url = self.get_server_url(repository_info, self.tool) api_client, api_root = self.get_api(server_url) self.setup_tool(self.tool, api_root=api_root) if not self.tool.can_amend_commit: raise NotImplementedError('rbt stamp is not supported with %s.' % self.tool.name) try: if self.tool.has_pending_changes(): raise CommandError('Working directory is not clean.') except NotImplementedError: pass revisions = get_revisions(self.tool, self.cmd_args) # Use the ID from the command line options if present. if self.options.rid: review_request = get_review_request(self.options.rid, api_root) review_request_id = self.options.rid review_request_url = review_request.absolute_url else: review_request_id, review_request_url = \ self. determine_review_request( api_client, api_root, repository_info, self.options.repository_name, revisions) if not review_request_url: raise CommandError('Could not determine the existing review ' 'request URL to stamp with.') stamp_commit_with_review_url(revisions, review_request_url, self.tool) print('Successfully stamped change with the URL:') print(review_request_url) RBTools-0.7.11/rbtools/commands/status.py0000644000232200023220000000502613230242633020655 0ustar debalancedebalancefrom __future__ import print_function, unicode_literals import logging from rbtools.commands import Command, Option from rbtools.utils.repository import get_repository_id from rbtools.utils.users import get_username class Status(Command): """Display review requests for the current repository.""" name = 'status' author = 'The Review Board Project' description = 'Output a list of your pending review requests.' args = '' option_list = [ Option('--all', dest='all_repositories', action='store_true', default=False, help='Shows review requests for all repositories instead ' 'of just the detected repository.'), Command.server_options, Command.repository_options, Command.perforce_options, Command.tfs_options, ] def output_request(self, request): print(' r/%s - %s' % (request.id, request.summary)) def output_draft(self, request, draft): print(' * r/%s - %s' % (request.id, draft.summary)) def main(self): repository_info, tool = self.initialize_scm_tool( client_name=self.options.repository_type) server_url = self.get_server_url(repository_info, tool) api_client, api_root = self.get_api(server_url) self.setup_tool(tool, api_root=api_root) username = get_username(api_client, api_root, auth_required=True) # Check if repository info on reviewboard server match local ones. repository_info = repository_info.find_server_repository_info(api_root) query_args = { 'from_user': username, 'status': 'pending', 'expand': 'draft', } if not self.options.all_repositories: repo_id = get_repository_id( repository_info, api_root, repository_name=self.options.repository_name) if repo_id: query_args['repository'] = repo_id else: logging.warning('The repository detected in the current ' 'directory was not found on\n' 'the Review Board server. Displaying review ' 'requests from all repositories.') requests = api_root.get_review_requests(**query_args) for request in requests.all_items: if request.draft: self.output_draft(request, request.draft[0]) else: self.output_request(request) RBTools-0.7.11/rbtools/commands/setup_repo.py0000644000232200023220000001174613230242633021525 0ustar debalancedebalancefrom __future__ import print_function, unicode_literals import difflib import os import six from six.moves import input from rbtools.commands import Command, CommandError from rbtools.utils.console import confirm from rbtools.utils.filesystem import CONFIG_FILE class SetupRepo(Command): """Configure a repository to point to a Review Board server. Interactively creates the configuration file .reviewboardrc in the current working directory. The user is prompted for the Review Board server url if it's not supplied as an option. Upon a successful server connection, an attempt is made to match the local repository to a repository on the Review Board server. If no match is found or if the user declines the match, the user is prompted to choose from other repositories on the Review Board server. If the client supports it, it attempts to guess the branch name on the server. """ name = 'setup-repo' author = 'The Review Board Project' description = ('Configure a repository to point to a Review Board server ' 'by generating the configuration file %s' % CONFIG_FILE) args = '' option_list = [ Command.server_options, Command.perforce_options, Command.tfs_options, ] def prompt_rb_repository(self, tool_name, repository_info, api_root): """Interactively prompt to select a matching repository. The user is prompted to choose a matching repository found on the Review Board server. """ # Go through each matching repo and prompt for a selection. If a # selection is made, immediately return the selected repo. for repository_page in api_root.get_repositories().all_pages: repo_paths = {} for repository in repository_page: if repository.tool != tool_name: continue repo_paths[repository['path']] = repository if 'mirror_path' in repository: repo_paths[repository['mirror_path']] = repository closest_path = difflib.get_close_matches(repository_info.path, six.iterkeys(repo_paths), n=4, cutoff=0.4) for path in closest_path: repo = repo_paths[path] question = ('Use the %s repository "%s" (%s)?' % (tool_name, repo['name'], repo['path'])) if confirm(question): return repo return None def _get_output(self, config): """Returns a string output based on the the provided config.""" settings = [] for setting, value in config: settings.append('%s = "%s"' % (setting, value)) settings.append('') return '\n'.join(settings) def generate_config_file(self, file_path, config): """Generates the config file in the current working directory.""" try: with open(file_path, 'w') as outfile: output = self._get_output(config) outfile.write(output) except IOError as e: raise CommandError('I/O error generating config file (%s): %s' % (e.errno, e.strerror)) print('Config written to %s' % file_path) def main(self, *args): server = self.options.server if not server: server = input('Enter the Review Board server URL: ') repository_info, tool = self.initialize_scm_tool() api_client, api_root = self.get_api(server) self.setup_tool(tool, api_root=api_root) # Check if repository info on reviewboard server match local ones. repository_info = repository_info.find_server_repository_info(api_root) selected_repo = self.prompt_rb_repository( tool.name, repository_info, api_root) if not selected_repo: print('No %s repository found or selected for %s. %s not created.' % (tool.name, server, CONFIG_FILE)) return config = [ ('REVIEWBOARD_URL', server), ('REPOSITORY', selected_repo['name']), ('REPOSITORY_TYPE', tool.entrypoint_name), ] try: branch = tool.get_current_branch() config.append(('BRANCH', branch)) config.append(('LAND_DEST_BRANCH', branch)) except NotImplementedError: pass outfile_path = os.path.join(os.getcwd(), CONFIG_FILE) output = self._get_output(config) if not os.path.exists(outfile_path): question = ('Create "%s" with the following?\n\n%s\n' % (outfile_path, output)) else: question = ('"%s" exists. Overwrite with the following?\n\n%s\n' % (outfile_path, output)) if not confirm(question): return self.generate_config_file(outfile_path, config) RBTools-0.7.11/rbtools/commands/list_repo_types.py0000644000232200023220000000062513230242633022556 0ustar debalancedebalancefrom __future__ import unicode_literals from rbtools.clients import print_clients from rbtools.commands import Command class ListRepoTypes(Command): """List available repository types.""" name = 'list-repo-types' author = 'The Review Board Project' description = 'Print a list of supported repository types.' def main(self, *args): print_clients(self.config, self.options) RBTools-0.7.11/rbtools/commands/tests/0000755000232200023220000000000013230242636020122 5ustar debalancedebalanceRBTools-0.7.11/rbtools/commands/tests/test_post.py0000644000232200023220000000667213230242633022530 0ustar debalancedebalance"""Test for RBTools post command.""" from __future__ import unicode_literals from rbtools.commands import CommandError from rbtools.commands.post import Post from rbtools.utils.testbase import RBTestBase class PostCommandTests(RBTestBase): """Tests for rbt post command.""" def _create_post_command(self, fields): """Create an argument parser with the given extra fields. Args: fields (list of unicode): A list of key-value pairs for the field argument. Each pair should be of the form key=value. Returns: argparse.ArgumentParser: Argument parser for commandline arguments """ post = Post() argv = ['rbt', 'post'] parser = post.create_arg_parser(argv) post.options = parser.parse_args(argv[2:]) post.options.fields = fields return post def test_post_one_extra_fields(self): """Testing one extra field argument with rbt post --field foo=bar""" post = self._create_post_command(['foo=bar']) post.post_process_options() self.assertEqual( post.options.extra_fields, {'extra_data.foo': 'bar'}) def test_post_multiple_extra_fields(self): """Testing multiple extra field arguments with rbt post --field foo=bar --field desc=new """ post = self._create_post_command(['foo=bar', 'desc=new']) post.post_process_options() self.assertEqual( post.options.extra_fields, { 'extra_data.foo': 'bar', 'extra_data.desc': 'new', }) def test_native_fields_through_extra_fields(self): """Testing built-in fields through extra_fields with rbt post --field description=testing --field summary='native testing' --field testing-done='No tests' """ post = self._create_post_command([ 'description=testing', 'summary=native testing', 'testing-done=No tests', ]) post.post_process_options() self.assertEqual(post.options.description, 'testing') self.assertEqual(post.options.summary, 'native testing') self.assertEqual(post.options.testing_done, 'No tests') def test_wrong_argument_entry(self): """Testing built-in fields through extra_fields with rbt post --field description and rbt post --field testing_done='No tests' """ post = self._create_post_command(['testing_done=No tests']) self.assertEqual(post.options.testing_done, None) post = self._create_post_command(['description']) self.assertRaises(CommandError, post.post_process_options) def test_multiple_delimiter(self): """Testing multiple delimiters with rbt post --field myField=this=string=has=equals=signs """ post = self._create_post_command( ['myField=this=string=has=equals=signs']) post.post_process_options() self.assertEqual( post.options.extra_fields, {'extra_data.myField': 'this=string=has=equals=signs'}) def test_arg_field_set_again_by_custom_fields(self): """Testing argument duplication with rbt post --field myField=test --description test """ post = self._create_post_command(['description=test']) post.options.description = 'test' self.assertRaises(CommandError, post.post_process_options) RBTools-0.7.11/rbtools/commands/tests/__init__.py0000644000232200023220000000000013230242633022216 0ustar debalancedebalanceRBTools-0.7.11/rbtools/commands/main.py0000644000232200023220000001163713230242633020263 0ustar debalancedebalancefrom __future__ import print_function, unicode_literals import argparse import os import pkg_resources import signal import subprocess import sys from rbtools import get_version_string from rbtools.commands import Option, RB_MAIN from rbtools.utils.aliases import run_alias from rbtools.utils.filesystem import load_config GLOBAL_OPTIONS = [ Option('-v', '--version', action='version', version='RBTools %s' % get_version_string()), Option('-h', '--help', action='store_true', dest='help', default=False), Option('command', nargs=argparse.REMAINDER, help='The RBTools command to execute, and any arguments. ' '(See below)'), ] def build_help_text(command_class): """Generate help text from a command class.""" command = command_class() parser = command.create_parser({}) return parser.format_help() def help(args, parser): if args: # TODO: First check for static help text file before # generating it at run time. ep = pkg_resources.get_entry_info('rbtools', 'rbtools_commands', args[0]) if ep: help_text = build_help_text(ep.load()) print(help_text) sys.exit(0) print('No help found for %s' % args[0]) sys.exit(0) parser.print_help() # We cast to a set to de-dupe the list, since third-parties may # try to override commands by using the same name, and then cast # back to a list for easy sorting. entrypoints = pkg_resources.iter_entry_points('rbtools_commands') commands = list(set([entrypoint.name for entrypoint in entrypoints])) common_commands = ['post', 'patch', 'close', 'diff'] print('\nThe most commonly used commands are:') for command in common_commands: print(' %s' % command) print('\nOther commands:') for command in sorted(commands): if command not in common_commands: print(' %s' % command) print("See '%s help ' for more information on a specific " "command." % RB_MAIN) sys.exit(0) def main(): """Execute a command.""" def exit_on_int(sig, frame): sys.exit(128 + sig) signal.signal(signal.SIGINT, exit_on_int) parser = argparse.ArgumentParser( prog=RB_MAIN, usage='%(prog)s [--version] [options] []', add_help=False) for option in GLOBAL_OPTIONS: option.add_to(parser) opt = parser.parse_args() if not opt.command: help([], parser) command_name = opt.command[0] args = opt.command[1:] if command_name == 'help': help(args, parser) elif opt.help or b'--help' in args or b'-h' in args: help(opt.command, parser) # Attempt to retrieve the command class from the entry points. We # first look in rbtools for the commands, and failing that, we look # for third-party commands. ep = pkg_resources.get_entry_info('rbtools', 'rbtools_commands', command_name) if not ep: try: ep = next(pkg_resources.iter_entry_points( 'rbtools_commands', command_name)) except StopIteration: # There aren't any custom entry points defined. pass if ep: try: command = ep.load()() except ImportError: # TODO: It might be useful to actual have the strack # trace here, due to an import somewhere down the import # chain failing. sys.stderr.write('Could not load command entry point %s\n' % ep.name) sys.exit(1) except Exception as e: sys.stderr.write('Unexpected error loading command %s: %s\n' % (ep.name, e)) sys.exit(1) command.run_from_argv([RB_MAIN, command_name] + args) else: # A command class could not be found, so try and execute # the "rb-" on the system. try: sys.exit( subprocess.call(['%s-%s' % (RB_MAIN, command_name)] + args, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr, env=os.environ.copy())) except OSError: # OSError is only raised in this scenario when subprocess.call # cannot find an executable with the name rbt-. If # this command doesn't exist, we will check if an alias exists # with the name before printing an error message. pass aliases = load_config().get('ALIASES', {}) if command_name in aliases: sys.exit(run_alias(aliases[command_name], args)) else: parser.error('"%s" is not a command' % command_name) if __name__ == '__main__': main() RBTools-0.7.11/rbtools/commands/diff.py0000644000232200023220000000505013230242633020237 0ustar debalancedebalancefrom __future__ import print_function, unicode_literals from rbtools.clients.errors import InvalidRevisionSpecError from rbtools.commands import Command, CommandError class Diff(Command): """Prints a diff to the terminal.""" name = 'diff' author = 'The Review Board Project' args = '[revisions]' option_list = [ Command.server_options, Command.diff_options, Command.branch_options, Command.repository_options, Command.perforce_options, Command.subversion_options, Command.tfs_options, ] def main(self, *args): """Print the diff to terminal.""" # The 'args' tuple must be made into a list for some of the # SCM Clients code. See comment in post. args = list(args) if self.options.revision_range: raise CommandError( 'The --revision-range argument has been removed. To create a ' 'diff for one or more specific revisions, pass those ' 'revisions as arguments. For more information, see the ' 'RBTools 0.6 Release Notes.') if self.options.svn_changelist: raise CommandError( 'The --svn-changelist argument has been removed. To use a ' 'Subversion changelist, pass the changelist name as an ' 'additional argument after the command.') repository_info, tool = self.initialize_scm_tool( client_name=self.options.repository_type) server_url = self.get_server_url(repository_info, tool) api_client, api_root = self.get_api(server_url) self.setup_tool(tool, api_root=api_root) try: revisions = tool.parse_revision_spec(args) extra_args = None except InvalidRevisionSpecError: if not tool.supports_diff_extra_args: raise revisions = None extra_args = args if (self.options.exclude_patterns and not tool.supports_diff_exclude_patterns): raise CommandError( 'The %s backend does not support excluding files via the ' '-X/--exclude commandline options or the EXCLUDE_PATTERNS ' '.reviewboardrc option.' % tool.name) diff_info = tool.diff( revisions=revisions, include_files=self.options.include_files or [], exclude_patterns=self.options.exclude_patterns or [], extra_args=extra_args) diff = diff_info['diff'] if diff: print(diff) RBTools-0.7.11/rbtools/commands/__init__.py0000644000232200023220000010375413230242633021100 0ustar debalancedebalancefrom __future__ import print_function, unicode_literals import argparse import getpass import inspect import logging import platform import os import subprocess import sys from six.moves import input from six.moves.urllib.parse import urlparse from rbtools import get_version_string from rbtools.api.capabilities import Capabilities from rbtools.api.client import RBClient from rbtools.api.errors import APIError, ServerInterfaceError from rbtools.clients import scan_usable_client from rbtools.clients.errors import OptionsCheckError from rbtools.utils.filesystem import (cleanup_tempfiles, get_home_path, load_config) # NOTE: This needs to be a byte string, since it's going to go in argv, # which are all expected to be byte strings. RB_MAIN = b'rbt' class CommandExit(Exception): def __init__(self, exit_code=0): super(CommandExit, self).__init__('Exit with code %s' % exit_code) self.exit_code = exit_code class CommandError(Exception): pass class ParseError(CommandError): pass class SmartHelpFormatter(argparse.HelpFormatter): """Smartly formats help text, preserving paragraphs.""" def _split_lines(self, text, width): # NOTE: This function depends on overriding _split_lines's behavior. # It is clearly documented that this function should not be # considered public API. However, given that the width we need # is calculated by HelpFormatter, and HelpFormatter has no # blessed public API, we have no other choice but to override # it here. lines = [] for line in text.splitlines(): lines += super(SmartHelpFormatter, self)._split_lines(line, width) lines.append('') return lines[:-1] class Option(object): """Represents an option for a command. The arguments to the constructor should be treated like those to argparse's add_argument, with the exception that the keyword argument 'config_key' is also valid. If config_key is provided it will be used to retrieve the config value as a default if the option is not specified. This will take precedence over the default argument. Serves as a wrapper around the ArgumentParser options, allowing us to specify defaults which will be grabbed from the configuration after it is loaded. """ def __init__(self, *opts, **attrs): self.opts = opts self.attrs = attrs def add_to(self, parent, config={}, argv=[]): """Adds the option to the parent parser or group. If the option maps to a configuration key, this will handle figuring out the correct default. Once we've determined the right set of flags, the option will be added to the parser. """ attrs = self.attrs.copy() if 'config_key' in attrs: config_key = attrs.pop('config_key') if config_key in config: attrs['default'] = config[config_key] if 'deprecated_in' in attrs: attrs['help'] += '\n[Deprecated since %s]' % attrs['deprecated_in'] # These are used for other purposes, and are not supported by # argparse. for attr in ('added_in', 'deprecated_in', 'extended_help', 'versions_changed'): attrs.pop(attr, None) parent.add_argument(*self.opts, **attrs) class OptionGroup(object): """Represents a named group of options. Each group has a name, an optional description, and a list of options. It serves as a way to organize related options, making it easier for users to scan for the options they want. This works like argparse's argument groups, but is designed to work with our special Option class. """ def __init__(self, name=None, description=None, option_list=[]): self.name = name self.description = description self.option_list = option_list def add_to(self, parser, config={}, argv=[]): """Adds the group and all its contained options to the parser.""" group = parser.add_argument_group(self.name, self.description) for option in self.option_list: option.add_to(group, config, argv) class LogLevelFilter(logging.Filter): """Filters log messages of a given level. Only log messages that have the specified level will be allowed by this filter. This prevents propagation of higher level types to lower log handlers. """ def __init__(self, level): self.level = level def filter(self, record): return record.levelno == self.level class Command(object): """Base class for rb commands. This class will handle retrieving the configuration, and parsing command line options. ``description`` is a string containing a short description of the command which is suitable for display in usage text. ``usage`` is a list of usage strings each showing a use case. These should not include the main rbt command or the command name; they will be added automatically. ``args`` is a string containing the usage text for what arguments the command takes. ``option_list`` is a list of command line options for the command. Each list entry should be an Option or OptionGroup instance. """ name = '' author = '' description = '' args = '' option_list = [] _global_options = [ Option('-d', '--debug', action='store_true', dest='debug', config_key='DEBUG', default=False, help='Displays debug output.', extended_help='This information can be valuable when debugging ' 'problems running the command.'), ] server_options = OptionGroup( name='Review Board Server Options', description='Options necessary to communicate and authenticate ' 'with a Review Board server.', option_list=[ Option('--server', dest='server', metavar='URL', config_key='REVIEWBOARD_URL', default=None, help='Specifies the Review Board server to use.'), Option('--username', dest='username', metavar='USERNAME', config_key='USERNAME', default=None, help='The user name to be supplied to the Review Board ' 'server.'), Option('--password', dest='password', metavar='PASSWORD', config_key='PASSWORD', default=None, help='The password to be supplied to the Review Board ' 'server.'), Option('--ext-auth-cookies', dest='ext_auth_cookies', metavar='EXT_AUTH_COOKIES', config_key='EXT_AUTH_COOKIES', default=None, help='Use an external cookie store with pre-fetched ' 'authentication data. This is useful with servers ' 'that require extra web authentication to access ' 'Review Board, e.g. on single sign-on enabled sites.', added_in='0.7.5'), Option('--api-token', dest='api_token', metavar='TOKEN', config_key='API_TOKEN', default=None, help='The API token to use for authentication, instead of ' 'using a username and password.', added_in='0.7'), Option('--disable-proxy', action='store_false', dest='enable_proxy', config_key='ENABLE_PROXY', default=True, help='Prevents requests from going through a proxy ' 'server.'), Option('--disable-ssl-verification', action='store_true', dest='disable_ssl_verification', config_key='DISABLE_SSL_VERIFICATION', default=False, help='Disable SSL certificate verification. This is useful ' 'with servers that have self-signed certificates.', added_in='0.7.3'), Option('--disable-cookie-storage', config_key='SAVE_COOKIES', dest='save_cookies', action='store_false', default=True, help='Use an in-memory cookie store instead of writing ' 'them to a file. No credentials will be saved or ' 'loaded.', added_in='0.7.3'), Option('--disable-cache', dest='disable_cache', config_key='DISABLE_CACHE', action='store_true', default=False, help='Disable the HTTP cache completely. This will ' 'result in slower requests.', added_in='0.7.3'), Option('--disable-cache-storage', dest='in_memory_cache', config_key='IN_MEMORY_CACHE', action='store_true', default=False, help='Disable storing the API cache on the filesystem, ' 'instead keeping it in memory temporarily.', added_in='0.7.3'), Option('--cache-location', dest='cache_location', metavar='FILE', config_key='CACHE_LOCATION', default=None, help='The file to use for the API cache database.', added_in='0.7.3'), ] ) repository_options = OptionGroup( name='Repository Options', option_list=[ Option('--repository', dest='repository_name', metavar='NAME', config_key='REPOSITORY', default=None, help='The name of the repository configured on ' 'Review Board that matches the local repository.'), Option('--repository-url', dest='repository_url', metavar='URL', config_key='REPOSITORY_URL', default=None, help='The URL for a repository.' '\n' 'When generating diffs, this can be used for ' 'creating a diff outside of a working copy ' '(currently only supported by Subversion with ' 'specific revisions or --diff-filename, and by ' 'ClearCase with relative paths outside the view).' '\n' 'For Git, this specifies the origin URL of the ' 'current repository, overriding the origin URL ' 'supplied by the client.', versions_changed={ '0.6': 'Prior versions used the `REPOSITORY` setting ' 'in .reviewboardrc, and allowed a ' 'repository name to be passed to ' '--repository-url. This is no ' 'longer supported in 0.6 and higher. You ' 'may need to update your configuration and ' 'scripts appropriately.', }), Option('--repository-type', dest='repository_type', metavar='TYPE', config_key='REPOSITORY_TYPE', default=None, help='The type of repository in the current directory. ' 'In most cases this should be detected ' 'automatically, but some directory structures ' 'containing multiple repositories require this ' 'option to select the proper type. The ' '`rbt list-repo-types` command can be used to ' 'list the supported values.'), ] ) diff_options = OptionGroup( name='Diff Generation Options', description='Options for choosing what gets included in a diff, ' 'and how the diff is generated.', option_list=[ Option('--revision-range', dest='revision_range', metavar='REV1:REV2', default=None, help='Generates a diff for the given revision range.', deprecated_in='0.6'), Option('-I', '--include', metavar='FILENAME', dest='include_files', action='append', help='Includes only the specified file in the diff. ' 'This can be used multiple times to specify ' 'multiple files.' '\n' 'Supported by: Bazaar, CVS, Git, Mercurial, ' 'Perforce, and Subversion.', added_in='0.6'), Option('-X', '--exclude', metavar='PATTERN', dest='exclude_patterns', action='append', config_key='EXCLUDE_PATTERNS', help='Excludes all files that match the given pattern ' 'from the diff. This can be used multiple times to ' 'specify multiple patterns. UNIX glob syntax is used ' 'for pattern matching.' '\n' 'Supported by: Bazaar, CVS, Git, Mercurial, ' 'Perforce, and Subversion.', extended_help=( 'Patterns that begin with a path separator (/ on Mac ' 'OS and Linux, \\ on Windows) will be treated as being ' 'relative to the root of the repository. All other ' 'patterns are treated as being relative to the current ' 'working directory.' '\n' 'For example, to exclude all ".txt" files from the ' 'resulting diff, you would use "-X /\'*.txt\'".' '\n' 'When working with Mercurial, the patterns are ' 'provided directly to "hg" and are not limited to ' 'globs. For more information on advanced pattern ' 'syntax in Mercurial, run "hg help patterns"' '\n' 'When working with CVS all diffs are generated ' 'relative to the current working directory so ' 'patterns beginning with a path separator are treated ' 'as relative to the current working directory.' '\n' 'When working with Perforce, an exclude pattern ' 'beginning with `//` will be matched against depot ' 'paths; all other patterns will be matched against ' 'local paths.'), added_in='0.7'), Option('--parent', dest='parent_branch', metavar='BRANCH', config_key='PARENT_BRANCH', default=None, help='The parent branch this diff should be generated ' 'against (Bazaar/Git/Mercurial only).'), Option('--diff-filename', dest='diff_filename', default=None, metavar='FILENAME', help='Uploads an existing diff file, instead of ' 'generating a new diff.'), ] ) branch_options = OptionGroup( name='Branch Options', description='Options for selecting branches.', option_list=[ Option('--tracking-branch', dest='tracking', metavar='BRANCH', config_key='TRACKING_BRANCH', default=None, help='The remote tracking branch from which your local ' 'branch is derived (Git/Mercurial only).' '\n' 'For Git, the default is to use the remote branch ' 'that the local branch is tracking, if any, falling ' 'back on `origin/master`.' '\n' 'For Mercurial, the default is one of: ' '`reviewboard`, `origin`, `parent`, or `default`.'), ] ) perforce_options = OptionGroup( name='Perforce Options', description='Perforce-specific options for selecting the ' 'Perforce client and communicating with the ' 'repository.', option_list=[ Option('--p4-client', dest='p4_client', config_key='P4_CLIENT', default=None, metavar='CLIENT_NAME', help='The Perforce client name for the repository.'), Option('--p4-port', dest='p4_port', config_key='P4_PORT', default=None, metavar='PORT', help='The IP address for the Perforce server.'), Option('--p4-passwd', dest='p4_passwd', config_key='P4_PASSWD', default=None, metavar='PASSWORD', help='The Perforce password or ticket of the user ' 'in the P4USER environment variable.'), ] ) subversion_options = OptionGroup( name='Subversion Options', description='Subversion-specific options for controlling diff ' 'generation.', option_list=[ Option('--basedir', dest='basedir', config_key='BASEDIR', default=None, metavar='PATH', help='The path within the repository where the diff ' 'was generated. This overrides the detected path. ' 'Often used when passing --diff-filename.'), Option('--svn-username', dest='svn_username', default=None, metavar='USERNAME', help='The username for the SVN repository.'), Option('--svn-password', dest='svn_password', default=None, metavar='PASSWORD', help='The password for the SVN repository.'), Option('--svn-prompt-password', dest='svn_prompt_password', config_key='SVN_PROMPT_PASSWORD', default=False, action='store_true', help="Prompt for the user's svn password. This option " "overrides the password provided by the " "--svn-password option.", added_in='0.7.3'), Option('--svn-show-copies-as-adds', dest='svn_show_copies_as_adds', metavar='y|n', default=None, help='Treat copied or moved files as new files.' '\n' 'This is only supported in Subversion 1.7+.', added_in='0.5.2'), Option('--svn-changelist', dest='svn_changelist', default=None, metavar='ID', help='Generates the diff for review based on a ' 'local changelist.', deprecated_in='0.6'), ] ) tfs_options = OptionGroup( name='TFS Options', description='Team Foundation Server specific options for ' 'communicating with the TFS server.', option_list=[ Option('--tfs-login', dest='tfs_login', default=None, metavar='TFS_LOGIN', help='Logs in to TFS as a specific user (ie.' 'user@domain,password). Visit https://msdn.microsoft.' 'com/en-us/library/hh190725.aspx to learn about ' 'saving credentials for reuse.'), Option('--tf-cmd', dest='tf_cmd', default=None, metavar='TF_CMD', config_key='TF_CMD', help='The full path of where to find the tf command. This ' 'overrides any detected path.'), Option('--tfs-shelveset-owner', dest='tfs_shelveset_owner', default=None, metavar='TFS_SHELVESET_OWNER', help='When posting a shelveset name created by another ' 'user (other than the one who owns the current ' 'workdir), look for that shelveset using this ' 'username.'), ] ) def __init__(self): self.log = logging.getLogger('rb.%s' % self.name) def create_parser(self, config, argv=[]): """Create and return the argument parser for this command.""" parser = argparse.ArgumentParser( prog=RB_MAIN, usage=self.usage(), add_help=False, formatter_class=SmartHelpFormatter) for option in self.option_list: option.add_to(parser, config, argv) for option in self._global_options: option.add_to(parser, config, argv) return parser def post_process_options(self): if self.options.disable_ssl_verification: try: import ssl ssl._create_unverified_context() except: raise CommandError('The --disable-ssl-verification flag is ' 'only available with Python 2.7.9+') def usage(self): """Return a usage string for the command.""" usage = '%%(prog)s %s [options] %s' % (self.name, self.args) if self.description: return '%s\n\n%s' % (usage, self.description) else: return usage def init_logging(self): """Initializes logging for the command. This will set up different log handlers based on the formatting we want for the given levels. The INFO log handler will just show the text, like a print statement. WARNING and higher will show the level name as a prefix, in the form of "LEVEL: message". If debugging is enabled, a debug log handler will be set up showing debug messages in the form of ">>> message", making it easier to distinguish between debugging and other messages. """ root = logging.getLogger() if self.options.debug: handler = logging.StreamHandler() handler.setFormatter(logging.Formatter('>>> %(message)s')) handler.setLevel(logging.DEBUG) handler.addFilter(LogLevelFilter(logging.DEBUG)) root.addHandler(handler) root.setLevel(logging.DEBUG) else: root.setLevel(logging.INFO) # Handler for info messages. We'll treat these like prints. handler = logging.StreamHandler() handler.setFormatter(logging.Formatter('%(message)s')) handler.setLevel(logging.INFO) handler.addFilter(LogLevelFilter(logging.INFO)) root.addHandler(handler) # Handler for warnings, errors, and criticals. They'll show the # level prefix and the message. handler = logging.StreamHandler() handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s')) handler.setLevel(logging.WARNING) root.addHandler(handler) logging.debug('RBTools %s', get_version_string()) logging.debug('Python %s', sys.version) logging.debug('Running on %s', platform.platform()) logging.debug('Home = %s', get_home_path()) logging.debug('Current directory = %s', os.getcwd()) def create_arg_parser(self, argv): """Create and return the argument parser. Args: argv (list of unicode): A list of command line arguments Returns: argparse.ArgumentParser: Argument parser for commandline arguments """ self.config = load_config() parser = self.create_parser(self.config, argv) parser.add_argument('args', nargs=argparse.REMAINDER) return parser def run_from_argv(self, argv): """Execute the command using the provided arguments. The options and commandline arguments will be parsed from ``argv`` and the commands ``main`` method will be called. """ parser = self.create_arg_parser(argv) self.options = parser.parse_args(argv[2:]) args = self.options.args # Check that the proper number of arguments have been provided. argspec = inspect.getargspec(self.main) minargs = len(argspec[0]) - 1 maxargs = minargs # Arguments that have a default value are considered optional. if argspec[3] is not None: minargs -= len(argspec[3]) if argspec[1] is not None: maxargs = None if len(args) < minargs or (maxargs is not None and len(args) > maxargs): parser.error('Invalid number of arguments provided') sys.exit(1) self.init_logging() logging.debug('Command line: %s', subprocess.list2cmdline(argv).decode('utf8')) try: exit_code = self.main(*args) or 0 except CommandError as e: if isinstance(e, ParseError): parser.error(e) elif self.options.debug: raise logging.error(e) exit_code = 1 except CommandExit as e: exit_code = e.exit_code except Exception as e: # If debugging is on, we'll let python spit out the # stack trace and report the exception, otherwise # we'll suppress the trace and print the exception # manually. if self.options.debug: raise logging.critical(e) exit_code = 1 cleanup_tempfiles() sys.exit(exit_code) def initialize_scm_tool(self, client_name=None): """Initialize the SCM tool for the current working directory.""" repository_info, tool = scan_usable_client(self.config, self.options, client_name=client_name) try: tool.check_options() except OptionsCheckError as e: raise CommandError('%s\n' % e) return repository_info, tool def setup_tool(self, tool, api_root=None): """Performs extra initialization on the tool. If api_root is not provided we'll assume we want to initialize the tool using only local information """ tool.capabilities = self.get_capabilities(api_root) def get_server_url(self, repository_info, tool): """Return the Review Board server url. Args: repository_info (rbtools.clients.RepositoryInfo, optional): Information about the current repository tool (rbtools.clients.SCMClient, optional): The repository client. Returns: unicode: The server URL. """ if self.options.server: server_url = self.options.server elif tool: server_url = tool.scan_for_server(repository_info) else: server_url = None if not server_url: raise CommandError('Unable to find a Review Board server for this ' 'source code tree.') return server_url def credentials_prompt(self, realm, uri, username=None, password=None, *args, **kwargs): """Prompt the user for credentials using the command line. This will prompt the user, and then return the provided username and password. This is used as a callback in the API when the user requires authorization. """ if username is None or password is None: if getattr(self.options, 'diff_filename', None) == '-': raise CommandError('HTTP authentication is required, but ' 'cannot be used with --diff-filename=-') # Interactive prompts don't work correctly when input doesn't come # from a terminal. This could seem to be a rare case not worth # worrying about, but this is what happens when using native # Python in Cygwin terminal emulator under Windows and it's very # puzzling to the users, especially because stderr is also _not_ # flushed automatically in this case, so the program just appears # to hang. if not sys.stdin.isatty(): logging.error('Authentication is required but input is not a ' 'tty.') if sys.platform == 'win32': logging.info('Check that you are not running this script ' 'from a Cygwin terminal emulator (or use ' 'Cygwin Python to run it).') raise CommandError('Unable to log in to Review Board.') print() print('Please log in to the Review Board server at %s.' % urlparse(uri)[1]) # getpass will write its prompt to stderr but input # writes to stdout. See bug 2831. if username is None: sys.stderr.write('Username: ') username = input() if password is None: password = getpass.getpass(b'Password: ') return username, password def otp_token_prompt(self, uri, token_method, *args, **kwargs): """Prompt the user for a one-time password token. Their account is configured with two-factor authentication. The server will have sent a token to their configured mobile device or application. The user will be prompted for this token. """ if getattr(self.options, 'diff_filename', None) == '-': raise CommandError('A two-factor authentication token is ' 'required, but cannot be used with ' '--diff-filename=-') print() print('Please enter your two-factor authentication token for Review ' 'Board.') if token_method == 'sms': print('You should be getting a text message with ' 'an authentication token.') print('Enter the token below.') elif token_method == 'call': print('You should be getting an automated phone call with ' 'an authentication token.') print('Enter the token below.') elif token_method == 'generator': print('Enter the token shown on your token generator app below.') print() return getpass.getpass(b'Token: ') def _make_api_client(self, server_url): """Return an RBClient object for the server. The RBClient will be instantiated with the proper arguments for talking to the provided Review Board server url. """ return RBClient( server_url, username=self.options.username, password=self.options.password, api_token=self.options.api_token, auth_callback=self.credentials_prompt, otp_token_callback=self.otp_token_prompt, disable_proxy=not self.options.enable_proxy, verify_ssl=not self.options.disable_ssl_verification, allow_caching=not self.options.disable_cache, cache_location=self.options.cache_location, in_memory_cache=self.options.in_memory_cache, save_cookies=self.options.save_cookies, ext_auth_cookies=self.options.ext_auth_cookies) def get_api(self, server_url): """Returns an RBClient instance and the associated root resource. Commands should use this method to gain access to the API, instead of instantianting their own client. """ if not urlparse(server_url).scheme: server_url = '%s%s' % ('http://', server_url) api_client = self._make_api_client(server_url) try: api_root = api_client.get_root() except ServerInterfaceError as e: raise CommandError('Could not reach the Review Board ' 'server at %s: %s' % (server_url, e)) except APIError as e: raise CommandError('Unexpected API Error: %s' % e) return api_client, api_root def get_capabilities(self, api_root): """Retrieve Capabilities from the server and return them.""" if 'capabilities' in api_root: # Review Board 2.0+ provides capabilities in the root resource. return Capabilities(api_root.capabilities) info = api_root.get_info() if 'capabilities' in info: return Capabilities(info.capabilities) else: return Capabilities({}) def main(self, *args): """The main logic of the command. This method should be overridden to implement the commands functionality. """ raise NotImplementedError() RBTools-0.7.11/rbtools/commands/install.py0000644000232200023220000001634713230242633021010 0ustar debalancedebalancefrom __future__ import division, print_function, unicode_literals import hashlib import logging import os import shutil import tempfile import zipfile import tqdm from six.moves.urllib.error import HTTPError, URLError from six.moves.urllib.request import urlopen from rbtools.commands import Command, CommandError from rbtools.utils.appdirs import user_data_dir from rbtools.utils.checks import check_install from rbtools.utils.process import execute class Install(Command): """Install a dependency. This allows RBTools to install external dependencies that may be needed for some features. """ name = 'install' author = 'The Review Board Project' description = 'Install an optional dependency.' args = '' option_list = [] package_urls = { 'tfs': 'http://downloads.beanbaginc.com/rb-tfs/rb-tfs.zip' } def main(self, package): """Run the command. Args: package (unicode): The name of the package to install. Raises: rbtools.commands.CommandError: An error occurred during installation. """ try: url = self.package_urls[package] except KeyError: err = 'Package "%s" not found. Available packages are:\n' % package err += '\n'.join( ' %s' % package_name for package_name in self.package_urls.keys() ) raise CommandError(err) label = 'Downloading %s' % package zip_filename = self.download_file(url, label=label) try: self.check_download(url, zip_filename) self.unzip( zip_filename, os.path.join(user_data_dir('rbtools'), 'packages', package)) finally: os.unlink(zip_filename) def check_download(self, url, zip_filename): """Check to see if the file was successfully downloaded. If the user has :command:`gpg` installed on their system, use that to check that the package was signed. Otherwise, check the sha256sum. Args: url (unicode): The URL that the file came from. zip_filename (unicode): The filename of the downloaded copy. Raises: rbtools.commands.CommandError: The authenticity of the file could not be verified. """ if check_install('gpg'): execute(['gpg', '--recv-keys', '4ED1F993']) sig_filename = self.download_file('%s.asc' % url) try: retcode, output, errors = execute( ['gpg', '--verify', sig_filename, zip_filename], with_errors=False, ignore_errors=True, return_error_code=True, return_errors=True) if retcode == 0: logging.debug('Verified file signature') else: raise CommandError( 'Unable to verify authenticity of file downloaded ' 'from %s:\n%s' % (url, errors)) finally: os.unlink(sig_filename) else: logging.info('"gpg" not installed. Skipping signature validation.') try: sha_url = '%s.sha256sum' % url logging.debug('Downloading %s', sha_url) response = urlopen(sha_url) real_sha = response.read().split(' ')[0] except (HTTPError, URLError) as e: raise CommandError('Error when downloading file: %s' % e) with open(zip_filename, 'rb') as f: our_sha = hashlib.sha256(f.read()).hexdigest() if real_sha == our_sha: logging.debug('Verified SHA256 hash') else: logging.debug('SHA256 hash does not match!') logging.debug(' Downloaded file hash was: %s', our_sha) logging.debug(' Expected hash was: %s', real_sha) raise CommandError( 'Unable to verify the checksum of the downloaded copy of ' '%s.\n' 'This could be due to an invasive proxy or an attempted ' 'man-in-the-middle attack.' % url) def unzip(self, zip_filename, package_dir): """Unzip a .zip file. This method will unpack the contents of a .zip file into a target directory. If that directory already exists, it will first be removed. Args: zip_filename (unicode): The absolute path to the .zip file to unpack. package_dir (unicode): The directory to unzip the files into. Raises: rbtools.commands.CommandError: The file could not be unzipped. """ logging.debug('Extracting %s to %s', zip_filename, package_dir) try: if os.path.exists(package_dir): if os.path.isdir(package_dir): shutil.rmtree(package_dir) else: os.remove(package_dir) os.makedirs(package_dir) except (IOError, OSError) as e: raise CommandError('Failed to set up package directory %s: %s' % (package_dir, e)) zip_file = zipfile.ZipFile(zip_filename, 'r') try: zip_file.extractall(package_dir) except Exception as e: raise CommandError('Failed to extract file: %s' % e) finally: zip_file.close() def download_file(self, url, label=None): """Download the given file. This is intended to be used as a context manager, and the bound value will be the filename of the downloaded file. Args: url (unicode): The URL of the file to download. label (unicode, optional): The label to use for the progress bar. If this is not specified, no progress bar will be shown. Yields: unicode: The filename of the downloaded file. Raises: rbtools.commands.CommandError: An error occurred while downloading the file. """ logging.debug('Downloading %s', url) try: response = urlopen(url) total_bytes = int( response.info().getheader('Content-Length').strip()) read_bytes = 0 bar_format = '{desc} {bar} {percentage:3.0f}% [{remaining}]' with tqdm.tqdm(total=total_bytes, desc=label or '', ncols=80, disable=label is None, bar_format=bar_format) as bar: try: f = tempfile.NamedTemporaryFile(delete=False) while read_bytes != total_bytes: chunk = response.read(8192) chunk_length = len(chunk) read_bytes += chunk_length f.write(chunk) bar.update(chunk_length) finally: f.close() return f.name except (HTTPError, URLError) as e: raise CommandError('Error when downloading file: %s' % e) RBTools-0.7.11/rbtools/commands/close.py0000644000232200023220000000500613230242633020435 0ustar debalancedebalancefrom __future__ import print_function, unicode_literals from rbtools.commands import Command, CommandError, Option from rbtools.utils.commands import get_review_request SUBMITTED = 'submitted' DISCARDED = 'discarded' class Close(Command): """Close a specific review request as discarded or submitted. By default, the command will change the status to submitted. The user can provide an optional description for this action. """ name = 'close' author = 'The Review Board Project' args = '' option_list = [ Option('--close-type', dest='close_type', default=SUBMITTED, help='Either `submitted` or `discarded`.'), Option('--description', dest='description', default=None, help='An optional description accompanying the change.'), Command.server_options, Command.repository_options, ] def check_valid_type(self, close_type): """Check if the user specificed a proper type. Type must either be 'discarded' or 'submitted'. If the type is wrong, the command will stop and alert the user. """ if close_type not in (SUBMITTED, DISCARDED): raise CommandError("%s is not valid type. Try '%s' or '%s'" % ( self.options.close_type, SUBMITTED, DISCARDED)) def main(self, request_id): """Run the command.""" close_type = self.options.close_type self.check_valid_type(close_type) if self.options.server: # Bypass getting the scm_tool to discover the server since it was # specified with --server or in .reviewboardrc repository_info, tool = None, None else: repository_info, tool = self.initialize_scm_tool( client_name=self.options.repository_type) server_url = self.get_server_url(repository_info, tool) api_client, api_root = self.get_api(server_url) request = get_review_request(request_id, api_root) if request.status == close_type: raise CommandError('Review request #%s is already %s.' % ( request_id, close_type)) if self.options.description: request = request.update(status=close_type, description=self.options.description) else: request = request.update(status=close_type) print('Review request #%s is set to %s.' % (request_id, request.status)) RBTools-0.7.11/rbtools/commands/clearcache.py0000644000232200023220000000143113230242633021400 0ustar debalancedebalancefrom rbtools.api.cache import clear_cache from rbtools.commands import Command, Option class ClearCache(Command): """Delete the HTTP cache used for the API.""" name = 'clear-cache' author = 'The Review Board Project' description = 'Delete the HTTP cache used for the API.' option_list = [ Option('--cache-location', dest='cache_location', metavar='FILE', config_key='CACHE_LOCATION', default=None, help='The file to use for the API cache database.', added_in='0.7.3'), ] def main(self): """Unlink the API cache's path.""" if self.options.cache_location: clear_cache(self.options.cache_location) else: clear_cache() RBTools-0.7.11/rbtools/commands/api_get.py0000644000232200023220000000423513230242633020743 0ustar debalancedebalancefrom __future__ import print_function, unicode_literals import json import re from rbtools.api.errors import APIError from rbtools.commands import (Command, CommandError, CommandExit, Option, ParseError) class APIGet(Command): name = 'api-get' author = 'The Review Board Project' description = 'Retrieve raw API resource payloads.' args = ' [--= ...]' option_list = [ Option('--pretty', action='store_true', dest='pretty_print', config_key='API_GET_PRETTY_PRINT', default=False, help='Pretty prints the resulting API payload.'), Command.server_options, ] def _dumps(self, payload): if self.options.pretty_print: return json.dumps(payload, sort_keys=True, indent=4) else: return json.dumps(payload) def main(self, path, *args): query_args = {} query_arg_re = re.compile('^--(?P.*)=(?P.*)$') for arg in args: m = query_arg_re.match(arg) if m: query_args[m.group('name')] = m.group('value') else: raise ParseError('Unexpected query argument %s' % arg) if self.options.server: server_url = self.options.server else: repository_info, tool = self.initialize_scm_tool() server_url = self.get_server_url(repository_info, tool) api_client, api_root = self.get_api(server_url) try: if path.startswith('http://') or path.startswith('https://'): resource = api_client.get_url(path, **query_args) else: resource = api_client.get_path(path, **query_args) except APIError as e: if e.rsp: print(self._dumps(e.rsp)) raise CommandExit(1) else: raise CommandError('Could not retrieve the requested ' 'resource: %s' % e) print(self._dumps(resource.rsp)) RBTools-0.7.11/rbtools/commands/patch.py0000644000232200023220000002062413230242633020432 0ustar debalancedebalancefrom __future__ import print_function, unicode_literals from rbtools.api.errors import APIError from rbtools.commands import Command, CommandError, Option from rbtools.utils.commands import extract_commit_message from rbtools.utils.filesystem import make_tempfile class Patch(Command): """Applies a specific patch from a RB server. The patch file indicated by the request id is downloaded from the server and then applied locally.""" name = 'patch' author = 'The Review Board Project' args = '' option_list = [ Option('-c', '--commit', dest='commit', action='store_true', default=False, help='Commits using information fetched ' 'from the review request (Git/Mercurial only).', added_in='0.5.3'), Option('-C', '--commit-no-edit', dest='commit_no_edit', action='store_true', default=False, help='Commits using information fetched ' 'from the review request (Git/Mercurial only). ' 'This differs from --commit by not invoking the editor ' 'to modify the commit message.'), Option('--diff-revision', dest='diff_revision', metavar='REVISION', default=None, help='The Review Board diff revision ID to use for the patch.'), Option('--px', dest='px', metavar='NUM', default=None, help="Strips the given number of paths from filenames in the " "diff. Equivalent to patch's `-p` argument."), Option('--print', dest='patch_stdout', action='store_true', default=False, help='Prints the patch to standard output instead of applying ' 'it to the tree.', added_in='0.5.3'), Option('-R', '--revert', dest='revert_patch', action='store_true', default=False, help='Revert the given patch instead of applying it.\n' 'This feature does not work with Bazaar or Mercurial ' 'repositories.', added_in='0.7.3'), Command.server_options, Command.repository_options, ] def get_patch(self, request_id, api_root, diff_revision=None): """Return the diff as a string, the used diff revision and its basedir. If a diff revision is not specified, then this will look at the most recent diff. """ try: diffs = api_root.get_diffs(review_request_id=request_id) except APIError as e: raise CommandError('Error getting diffs: %s' % e) # Use the latest diff if a diff revision was not given. # Since diff revisions start a 1, increment by one, and # never skip a number, the latest diff revisions number # should be equal to the number of diffs. if diff_revision is None: diff_revision = diffs.total_results try: diff = diffs.get_item(diff_revision) diff_body = diff.get_patch().data base_dir = getattr(diff, 'basedir', None) or '' except APIError: raise CommandError('The specified diff revision does not exist.') return diff_body, diff_revision, base_dir def apply_patch(self, repository_info, tool, request_id, diff_revision, diff_file_path, base_dir, revert=False): """Apply patch patch_file and display results to user.""" if revert: print('Patch is being reverted from request %s with diff revision ' '%s.' % (request_id, diff_revision)) else: print('Patch is being applied from request %s with diff revision ' '%s.' % (request_id, diff_revision)) result = tool.apply_patch(diff_file_path, repository_info.base_path, base_dir, self.options.px, revert=revert) if result.patch_output: print() print(result.patch_output.strip()) print() if not result.applied: if revert: raise CommandError( 'Unable to revert the patch. The patch may be invalid, or ' 'there may be conflicts that could not be resolved.') else: raise CommandError( 'Unable to apply the patch. The patch may be invalid, or ' 'there may be conflicts that could not be resolved.') if result.has_conflicts: if result.conflicting_files: if revert: print('The patch was partially reverted, but there were ' 'conflicts in:') else: print('The patch was partially applied, but there were ' 'conflicts in:') print() for filename in result.conflicting_files: print(' %s' % filename) print() elif revert: print('The patch was partially reverted, but there were ' 'conflicts.') else: print('The patch was partially applied, but there were ' 'conflicts.') return False else: if revert: print('Successfully reverted patch.') else: print('Successfully applied patch.') return True def main(self, request_id): """Run the command.""" if self.options.patch_stdout and self.options.server: server_url = self.options.server else: repository_info, tool = self.initialize_scm_tool( client_name=self.options.repository_type) if self.options.revert_patch and not tool.supports_patch_revert: raise CommandError('The %s backend does not support reverting ' 'patches.' % tool.name) server_url = self.get_server_url(repository_info, tool) api_client, api_root = self.get_api(server_url) if not self.options.patch_stdout: self.setup_tool(tool, api_root=api_root) # Check if repository info on reviewboard server match local ones. repository_info = repository_info.find_server_repository_info( api_root) # Get the patch, the used patch ID and base dir for the diff diff_body, diff_revision, base_dir = self.get_patch( request_id, api_root, self.options.diff_revision) if self.options.patch_stdout: print(diff_body) else: try: if tool.has_pending_changes(): message = 'Working directory is not clean.' if not self.options.commit: print('Warning: %s' % message) else: raise CommandError(message) except NotImplementedError: pass tmp_patch_file = make_tempfile(diff_body) success = self.apply_patch(repository_info, tool, request_id, diff_revision, tmp_patch_file, base_dir, revert=self.options.revert_patch) if success and (self.options.commit or self.options.commit_no_edit): try: review_request = api_root.get_review_request( review_request_id=request_id, force_text_type='plain') except APIError as e: raise CommandError('Error getting review request %s: %s' % (request_id, e)) message = extract_commit_message(review_request) author = review_request.get_submitter() try: tool.create_commit(message, author, not self.options.commit_no_edit) print('Changes committed to current branch.') except NotImplementedError: raise CommandError('--commit is not supported with %s' % tool.name) RBTools-0.7.11/rbtools/commands/logout.py0000644000232200023220000000173313230242633020644 0ustar debalancedebalancefrom __future__ import print_function, unicode_literals import logging from rbtools.commands import Command class Logout(Command): """Logs out of a Review Board server. The session cookie will be removed into from the .rbtools-cookies file. The next RBTools command you run will then prompt for credentials. """ name = 'logout' author = 'The Review Board Project' option_list = [ Command.server_options, ] def main(self): """Run the command.""" server_url = self.get_server_url(None, None) api_client, api_root = self.get_api(server_url) session = api_root.get_session(expand='user') if session.authenticated: api_client.logout() logging.info('You are now logged out of Review Board at %s', api_client.domain) else: logging.info('You are already logged out of Review Board at %s', api_client.domain) RBTools-0.7.11/rbtools/hooks/0000755000232200023220000000000013230242636016302 5ustar debalancedebalanceRBTools-0.7.11/rbtools/hooks/git.py0000644000232200023220000001022713230242633017436 0ustar debalancedebalanceimport six from collections import defaultdict from copy import deepcopy from rbtools.hooks.common import execute, get_review_request_id def get_branch_name(ref_name): """Returns the branch name corresponding to the specified ref name.""" branch_ref_prefix = 'refs/heads/' if ref_name.startswith(branch_ref_prefix): return ref_name[len(branch_ref_prefix):] def get_commit_hashes(old_rev, new_rev): """Returns a list of abbreviated commit hashes from old_rev to new_rev.""" git_command = ['git', 'rev-list', '--abbrev-commit', '--reverse', '%s..%s' % (old_rev, new_rev)] return execute(git_command).split('\n') def get_unique_commit_hashes(ref_name, new_rev): """Returns a list of abbreviated commit hashes unique to ref_name.""" git_command = ['git', 'rev-list', new_rev, '--abbrev-commit', '--reverse', '--not'] git_command.extend(get_excluded_branches(ref_name)) return execute(git_command).strip().split('\n') def get_excluded_branches(ref_name): """Returns a list of all branches, excluding the specified branch.""" git_command = ['git', 'for-each-ref', 'refs/heads/', '--format=%(refname)'] all_branches = execute(git_command).strip().split('\n') return [branch.strip() for branch in all_branches if branch != ref_name] def get_branches_containing_commit(commit_hash): """Returns a list of all branches containing the specified commit.""" git_command = ['git', 'branch', '--contains', commit_hash] branches = execute(git_command).replace('*', '').split('\n') return [branch.strip() for branch in branches] def get_commit_message(commit): """Returns the specified commit's commit message.""" git_command = ['git', 'show', '-s', '--pretty=format:%B', commit] return execute(git_command).strip() def get_review_id_to_commits_map(lines, regex): """Returns a dictionary, mapping a review request ID to a list of commits. The commits must be in the form: oldrev newrev refname (separated by newlines), as given by a Git pre-receive or post-receive hook. If a commit's commit message does not contain a review request ID, we append the commit to the key 0. """ review_id_to_commits_map = defaultdict(list) # Store a list of new branches (which have an all-zero old_rev value) # created in this push to handle them specially. new_branches = [] null_sha1 = '0' * 40 for line in lines: old_rev, new_rev, ref_name = line.split() branch_name = get_branch_name(ref_name) if not branch_name or new_rev == null_sha1: continue if old_rev == null_sha1: new_branches.append(branch_name) commit_hashes = get_unique_commit_hashes(ref_name, new_rev) else: commit_hashes = get_commit_hashes(old_rev, new_rev) for commit_hash in commit_hashes: if commit_hash: commit_message = get_commit_message(commit_hash) review_request_id = get_review_request_id(regex, commit_message) commit = '%s (%s)' % (branch_name, commit_hash) review_id_to_commits_map[review_request_id].append(commit) # If there are new branches, check every commit in the dictionary # (corresponding to only old branches) to see if the new branches also # contain that commit. if new_branches: review_id_to_commits_map_copy = deepcopy(review_id_to_commits_map) for review_id, commit_list in six.iteritems( review_id_to_commits_map_copy): for commit in commit_list: commit_branch = commit[:commit.find('(') - 1] if commit_branch in new_branches: continue commit_hash = commit[commit.find('(') + 1:-1] commit_branches = get_branches_containing_commit(commit_hash) for branch in set(new_branches).intersection(commit_branches): new_commit = '%s (%s)' % (branch, commit_hash) review_id_to_commits_map[review_id].append(new_commit) return review_id_to_commits_map RBTools-0.7.11/rbtools/hooks/common.py0000644000232200023220000001000613230242633020136 0ustar debalancedebalancefrom __future__ import print_function, unicode_literals import logging import subprocess from rbtools.api.client import RBClient from rbtools.api.errors import APIError, ServerInterfaceError SUBMITTED = 'submitted' class HookError(Exception): pass def get_api(server_url, **kwargs): """Returns an RBClient instance and the associated root resource. Hooks should use this method to gain access to the API, instead of instantiating their own client. Args: server_url (unicode): The server URL to retrieve. **kwargs (dict): Additional keyword arguments to pass to the :py:class:`~rbtools.api.client.RBClient` constructor. See :py:meth:`SyncTransport.__init__() ` for arguments that are accepted. Returns: tuple: This returns a 2-tuple of the :py:class:`~rbtools.api.client.RBClient` and :py:class:` rbtools.api.resource.Resource`. """ api_client = RBClient(server_url, **kwargs) try: api_root = api_client.get_root() except ServerInterfaceError as e: raise HookError('Could not reach the Review Board server at %s: %s' % (server_url, e)) except APIError as e: raise HookError('Unexpected API Error: %s' % e) return api_client, api_root def execute(command): """Executes the specified command and returns the stdout output.""" process = subprocess.Popen(command, stdout=subprocess.PIPE) output = process.communicate()[0].strip() if process.returncode: logging.warning('Failed to execute command: %s', command) return None return output def initialize_logging(): """Sets up a log handler to format log messages. Warning, error, and critical messages will show the level name as a prefix, followed by the message. """ root = logging.getLogger() handler = logging.StreamHandler() handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s')) handler.setLevel(logging.WARNING) root.addHandler(handler) def get_review_request_id(regex, commit_message): """Returns the review request ID referenced in the commit message. We assume there is at most one review request associated with each commit. If a matching review request cannot be found, we return 0. """ match = regex.search(commit_message) return (match and int(match.group('id'))) or 0 def get_review_request(review_request_id, api_root): """Returns the review request resource for the given ID.""" try: review_request = api_root.get_review_request( review_request_id=review_request_id) except APIError as e: raise HookError('Error getting review request: %s' % e) return review_request def close_review_request(server_url, username, password, review_request_id, description): """Closes the specified review request as submitted.""" api_client, api_root = get_api(server_url, username, password) review_request = get_review_request(review_request_id, api_root) if review_request.status == SUBMITTED: logging.warning('Review request #%s is already %s.', review_request_id, SUBMITTED) return if description: review_request = review_request.update(status=SUBMITTED, description=description) else: review_request = review_request.update(status=SUBMITTED) print('Review request #%s is set to %s.' % (review_request_id, review_request.status)) def get_review_request_approval(server_url, username, password, review_request_id): """Returns the approval information for the given review request.""" api_client, api_root = get_api(server_url, username, password) review_request = get_review_request(review_request_id, api_root) return review_request.approved, review_request.approval_failure RBTools-0.7.11/rbtools/hooks/__init__.py0000644000232200023220000000000013230242633020376 0ustar debalancedebalanceRBTools-0.7.11/rbtools/api/0000755000232200023220000000000013230242636015730 5ustar debalancedebalanceRBTools-0.7.11/rbtools/api/transport/0000755000232200023220000000000013230242636017764 5ustar debalancedebalanceRBTools-0.7.11/rbtools/api/transport/sync.py0000644000232200023220000001033213230242633021306 0ustar debalancedebalanceimport logging from rbtools.api.decode import decode_response from rbtools.api.factory import create_resource from rbtools.api.request import HttpRequest, ReviewBoardServer from rbtools.api.transport import Transport class SyncTransport(Transport): """A synchronous transport layer for the API client. The file provided in cookie_file is used to store and retrieve the authentication cookies for the API. The optional agent parameter can be used to specify a custom User-Agent string for the API. If not provided, the default RBTools User-Agent will be used. The optional session can be used to specify an 'rbsessionid' to use when authenticating with reviewboard. """ def __init__(self, url, cookie_file=None, username=None, password=None, api_token=None, agent=None, session=None, disable_proxy=False, auth_callback=None, otp_token_callback=None, verify_ssl=True, allow_caching=True, cache_location=None, in_memory_cache=False, save_cookies=True, ext_auth_cookies=None, *args, **kwargs): super(SyncTransport, self).__init__(url, *args, **kwargs) self.allow_caching = allow_caching self.cache_location = cache_location self.in_memory_cache = in_memory_cache self.server = ReviewBoardServer( self.url, cookie_file=cookie_file, username=username, password=password, api_token=api_token, session=session, disable_proxy=disable_proxy, auth_callback=auth_callback, otp_token_callback=otp_token_callback, verify_ssl=verify_ssl, save_cookies=save_cookies, ext_auth_cookies=ext_auth_cookies) def get_root(self): return self._execute_request(HttpRequest(self.server.url)) def get_path(self, path, *args, **kwargs): if not path.endswith('/'): path = path + '/' if path.startswith('/'): path = path[1:] return self._execute_request( HttpRequest(self.server.url + path, query_args=kwargs)) def get_url(self, url, *args, **kwargs): if not url.endswith('/'): url = url + '/' return self._execute_request(HttpRequest(url, query_args=kwargs)) def login(self, username, password): self.server.login(username, password) def logout(self): self.server.logout() def execute_request_method(self, method, *args, **kwargs): request = method(*args, **kwargs) if isinstance(request, HttpRequest): return self._execute_request(request) return request def _execute_request(self, request): """Execute an HTTPRequest and construct a resource from the payload""" logging.debug('Making HTTP %s request to %s' % (request.method, request.url)) rsp = self.server.make_request(request) info = rsp.info() mime_type = info['Content-Type'] item_content_type = info.get('Item-Content-Type', None) if request.method == 'DELETE': # DELETE calls don't return any data. Everything else should. return None else: payload = rsp.read() payload = decode_response(payload, mime_type) return create_resource(self, payload, request.url, mime_type=mime_type, item_mime_type=item_content_type) def enable_cache(self): """Enable caching for all future HTTP requests. The cache will be created at the default location if none is provided. If the in_memory parameter is True, the cache will be created in memory instead of on disk. This overrides the cache_location parameter. """ if self.allow_caching: self.server.enable_cache(cache_location=self.cache_location, in_memory=self.in_memory_cache) def __repr__(self): return '<%s(url=%r, cookie_file=%r, agent=%r)>' % ( self.__class__.__name__, self.url, self.server.cookie_file, self.server.agent) RBTools-0.7.11/rbtools/api/transport/__init__.py0000644000232200023220000000442113230242633022073 0ustar debalancedebalanceclass Transport(object): """Base class for API Transport layers. An API Transport layer acts as an intermediary between the API user and the Resource objects. All access to a resource's data, and all communication with the Review Board server are handled by the Transport. This allows for Transport implementations with unique interfaces which operate on the same underlying resource classes. Specifically, this allows for both a synchronous, and an asynchronous implementation of the transport. """ def __init__(self, url, *args, **kwargs): self.url = url def get_root(self, *args, **kwargs): """Retrieve the root api resource.""" raise NotImplementedError def get_path(self, path, *args, **kwargs): """Retrieve the api resource at the provided path.""" raise NotImplementedError def get_url(self, url, *args, **kwargs): """Retrieve the resource at the provided URL. The URL is not guaranteed to be part of the configured Review Board domain. """ raise NotImplementedError def login(self, username, password, *args, **kwargs): """Reset login information to be populated on next request. The transport should override this method and provide a way to reset the username and password which will be populated in the next request. """ raise NotImplementedError def logout(self): """Logs out of a Review Board session on the server. The transport should override this method and provide a way to reset the username and password which will be populated in the next request. """ raise NotImplementedError def execute_request_method(self, method, *args, **kwargs): """Execute a method and carry out the returned HttpRequest.""" return method(*args, **kwargs) def enable_cache(self, cache_location=None, in_memory=False): """Enable caching for all future HTTP requests. The cache will be created at the default location if none is provided. If the in_memory parameter is True, the cache will be created in memory instead of on disk. This overrides the cache_location parameter. """ raise NotImplementedError RBTools-0.7.11/rbtools/api/__init__.py0000644000232200023220000000000013230242633020024 0ustar debalancedebalanceRBTools-0.7.11/rbtools/api/decorators.py0000644000232200023220000000241013230242633020441 0ustar debalancedebalancefrom __future__ import unicode_literals def request_method_decorator(f): """Wraps methods returned from a resource to capture HttpRequests. When a method which returns HttpRequests is called, it will pass the method and arguments off to the transport to be executed. This wrapping allows the transport to skim arguments off the top of the method call, and modify any return values (such as executing a returned HttpRequest). However, if called with the ``internal`` argument set to True, the method itself will be executed and the value returned as-is. Thus, any method calls embedded inside the code for another method should use the ``internal`` argument to access the expected value. """ def request_method(self, *args, **kwargs): if kwargs.pop('internal', False): return f(self, *args, **kwargs) else: def method_wrapper(*args, **kwargs): return f(self, *args, **kwargs) return self._transport.execute_request_method(method_wrapper, *args, **kwargs) request_method.__name__ = f.__name__ request_method.__doc__ = f.__doc__ request_method.__dict__.update(f.__dict__) return request_method RBTools-0.7.11/rbtools/api/client.py0000644000232200023220000000211413230242633017553 0ustar debalancedebalancefrom __future__ import unicode_literals from six.moves.urllib.parse import urlparse from rbtools.api.transport.sync import SyncTransport class RBClient(object): """Entry point for accessing RB resources through the web API. By default the synchronous transport will be used. To use a different transport, provide the transport class in the 'transport_cls' parameter. """ def __init__(self, url, transport_cls=SyncTransport, *args, **kwargs): self.url = url self.domain = urlparse(url)[1] self._transport = transport_cls(url, *args, **kwargs) def get_root(self, *args, **kwargs): return self._transport.get_root(*args, **kwargs) def get_path(self, path, *args, **kwargs): return self._transport.get_path(path, *args, **kwargs) def get_url(self, url, *args, **kwargs): return self._transport.get_url(url, *args, **kwargs) def login(self, *args, **kwargs): return self._transport.login(*args, **kwargs) def logout(self, *args, **kwargs): return self._transport.logout(*args, **kwargs) RBTools-0.7.11/rbtools/api/utils.py0000644000232200023220000000206413230242633017441 0ustar debalancedebalancefrom __future__ import unicode_literals def parse_mimetype(mime_type): """Parse the mime type in to it's component parts.""" types = mime_type.split(';')[0].split('/') ret_val = { 'type': mime_type, 'main_type': types[0], 'sub_type': types[1] } sub_type = types[1].split('+') ret_val['vendor'] = '' if len(sub_type) == 1: ret_val['format'] = sub_type[0] else: ret_val['format'] = sub_type[1] ret_val['vendor'] = sub_type[0] vendor = ret_val['vendor'].split('.') if len(vendor) > 1: ret_val['resource'] = vendor[-1].replace('-', '_') else: ret_val['resource'] = '' return ret_val def rem_mime_format(mime_type): """Strip the subtype from a mimetype, leaving vendor specific information. Removes the portion of the subtype after a +, or the entire subtype if no vendor specific type information is present. """ if mime_type.rfind('+') != 0: return mime_type.rsplit('+', 1)[0] else: return mime_type.rsplit('/', 1)[0] RBTools-0.7.11/rbtools/api/factory.py0000644000232200023220000000335513230242633017754 0ustar debalancedebalancefrom __future__ import unicode_literals from rbtools.api.resource import (CountResource, ItemResource, ListResource, RESOURCE_MAP) from rbtools.api.utils import rem_mime_format SPECIAL_KEYS = set(('links', 'total_results', 'stat', 'count')) def create_resource(transport, payload, url, mime_type=None, item_mime_type=None, guess_token=True): """Construct and return a resource object. The mime type will be used to find a resource specific base class. Alternatively, if no resource specific base class exists, one of the generic base classes, Resource or ResourceList, will be used. If an item mime type is provided, it will be used by list resources to construct item resources from the list. If 'guess_token' is True, we will try and guess what key the resources body lives under. If False, we assume that the resource body is the body of the payload itself. This is important for constructing Item resources from a resource list. """ # Determine the key for the resources data. token = None if guess_token: other_keys = set(payload.keys()).difference(SPECIAL_KEYS) if len(other_keys) == 1: token = other_keys.pop() # Select the base class for the resource. if 'count' in payload: resource_class = CountResource elif mime_type and rem_mime_format(mime_type) in RESOURCE_MAP: resource_class = RESOURCE_MAP[rem_mime_format(mime_type)] elif token and isinstance(payload[token], list): resource_class = ListResource else: resource_class = ItemResource return resource_class(transport, payload, url, token=token, item_mime_type=item_mime_type) RBTools-0.7.11/rbtools/api/decode.py0000644000232200023220000000176013230242633017526 0ustar debalancedebalancefrom __future__ import unicode_literals import json from rbtools.api.utils import parse_mimetype DECODER_MAP = {} def DefaultDecoder(payload): """Default decoder for API payloads. The default decoder is used when a decoder is not found in the DECODER_MAP. This will stick the body of the response into the 'data' field. """ return { 'resource': { 'data': payload, }, } DEFAULT_DECODER = DefaultDecoder def JsonDecoder(payload): return json.loads(payload) DECODER_MAP['application/json'] = JsonDecoder def decode_response(payload, mime_type): """Decode a Web API response. The body of a Web API response will be decoded into a dictionary, according to the provided mime_type. """ mime = parse_mimetype(mime_type) format = '%s/%s' % (mime['main_type'], mime['format']) if format in DECODER_MAP: decoder = DECODER_MAP[format] else: decoder = DEFAULT_DECODER return decoder(payload) RBTools-0.7.11/rbtools/api/request.py0000644000232200023220000005110513230242633017771 0ustar debalancedebalancefrom __future__ import unicode_literals import base64 import logging import mimetypes import os import random import shutil import sys from io import BytesIO from json import loads as json_loads import six from six.moves.http_client import UNAUTHORIZED, NOT_MODIFIED from six.moves.http_cookiejar import Cookie, CookieJar, MozillaCookieJar from six.moves.urllib.error import HTTPError, URLError from six.moves.urllib.parse import parse_qsl, urlencode, urlparse, urlunparse from six.moves.urllib.request import ( BaseHandler, HTTPBasicAuthHandler, HTTPCookieProcessor, HTTPDigestAuthHandler, HTTPErrorProcessor, HTTPPasswordMgr, ProxyHandler, Request as URLRequest, build_opener, install_opener, urlopen) from rbtools import get_package_version from rbtools.api.cache import APICache from rbtools.api.errors import APIError, create_api_error, ServerInterfaceError from rbtools.utils.filesystem import get_home_path # Python 2.7.9+ added strict HTTPS certificate validation (finally). These APIs # don't exist everywhere so soft-import them. try: import ssl from six.moves.urllib.request import HTTPSHandler except ImportError: ssl = None HTTPSHandler = None RBTOOLS_COOKIE_FILE = '.rbtools-cookies' RB_COOKIE_NAME = 'rbsessionid' class HttpRequest(object): """High-level HTTP-request object.""" def __init__(self, url, method='GET', query_args={}): self.method = method self.headers = {} self._fields = {} self._files = {} # Replace all underscores in each query argument # key with dashes. query_args = dict([ (key.replace('_', '-'), value) for key, value in six.iteritems(query_args) ]) # Add the query arguments to the url url_parts = list(urlparse(url)) query = dict(parse_qsl(url_parts[4])) query.update(query_args) url_parts[4] = urlencode(query) self.url = urlunparse(url_parts) def add_field(self, name, value): self._fields[name] = value def add_file(self, name, filename, content): self._files[name] = { 'filename': filename, 'content': content, } def del_field(self, name): del self._fields[name] def del_file(self, filename): del self._files[filename] def encode_multipart_formdata(self): """Encodes data for use in an HTTP request. Parameters: fields - the fields to be encoded. This should be a dict in a key:value format files - the files to be encoded. This should be a dict in a key:dict, filename:value and content:value format """ if not (self._fields or self._files): return None, None NEWLINE = b'\r\n' BOUNDARY = self._make_mime_boundary() content = BytesIO() for key in self._fields: content.write(b'--' + BOUNDARY + NEWLINE) content.write(b'Content-Disposition: form-data; ' b'name="%s"' % key.encode('utf-8')) content.write(NEWLINE + NEWLINE) if isinstance(self._fields[key], six.binary_type): content.write(self._fields[key] + NEWLINE) else: content.write(six.text_type(self._fields[key]).encode('utf-8') + NEWLINE) for key in self._files: filename = self._files[key]['filename'] value = self._files[key]['content'] mime_type = mimetypes.guess_type(filename)[0] if mime_type: mime_type = mime_type.encode('utf-8') else: mime_type = b'application/octet-stream' content.write(b'--' + BOUNDARY + NEWLINE) content.write(b'Content-Disposition: form-data; name="%s"; ' % key.encode('utf-8')) content.write(b'filename="%s"' % filename.encode('utf-8') + NEWLINE) content.write(b'Content-Type: %s' % mime_type + NEWLINE) content.write(NEWLINE) if isinstance(value, six.text_type): content.write(value.encode('utf-8')) else: content.write(value) content.write(NEWLINE) content.write(b'--' + BOUNDARY + b'--' + NEWLINE + NEWLINE) content_type = b'multipart/form-data; boundary=%s' % BOUNDARY return content_type, content.getvalue() def _make_mime_boundary(self): """Create a mime boundary. This exists because mimetools.choose_boundary() is gone in Python 3.x, and email.generator._make_boundary isn't really appropriate to use here. """ fmt = '%%0%dd' % len(repr(sys.maxsize - 1)) token = random.randrange(sys.maxsize) return (b'=' * 15) + (fmt % token).encode('utf-8') + b'==' class Request(URLRequest): """A request which contains a method attribute.""" def __init__(self, url, body='', headers={}, method='PUT'): URLRequest.__init__(self, url, body, headers) self.method = method def get_method(self): return self.method class PresetHTTPAuthHandler(BaseHandler): """Handler that presets the use of HTTP Basic Auth.""" handler_order = 480 # After Basic auth AUTH_HEADER = 'Authorization' def __init__(self, url, password_mgr): self.url = url self.password_mgr = password_mgr self.used = False def reset(self, username, password): self.password_mgr.rb_user = username self.password_mgr.rb_pass = password self.used = False def http_request(self, request): if not self.used: if self.password_mgr.api_token: request.add_header(self.AUTH_HEADER, 'token %s' % self.password_mgr.api_token) self.used = True elif self.password_mgr.rb_user: # Note that we call password_mgr.find_user_password to get the # username and password we're working with. username, password = \ self.password_mgr.find_user_password('Web API', self.url) raw = '%s:%s' % (username, password) request.add_header(self.AUTH_HEADER, 'Basic %s' % base64.b64encode(raw).strip()) self.used = True return request https_request = http_request class ReviewBoardHTTPErrorProcessor(HTTPErrorProcessor): """Processes HTTP error codes. Python 2.6 gets HTTP error code processing right, but 2.4 and 2.5 only accepts HTTP 200 and 206 as success codes. This handler ensures that anything in the 200 range, as well as 304, is a success. """ def http_response(self, request, response): if not (200 <= response.code < 300 or response.code == NOT_MODIFIED): response = self.parent.error('http', request, response, response.code, response.msg, response.info()) return response https_response = http_response class ReviewBoardHTTPBasicAuthHandler(HTTPBasicAuthHandler): """Custom Basic Auth handler that doesn't retry excessively. urllib's HTTPBasicAuthHandler retries over and over, which is useless. This subclass only retries once to make sure we've attempted with a valid username and password. It will then fail so we can use our own retry handler. This also supports two-factor auth, for Review Board servers that support it. When requested by the server, the client will be prompted for a one-time password token, which would be sent generally through a mobile device. In this case, the client will prompt up to a set number of times until a valid token is entered. """ OTP_TOKEN_HEADER = 'X-ReviewBoard-OTP' MAX_OTP_TOKEN_ATTEMPTS = 5 def __init__(self, *args, **kwargs): HTTPBasicAuthHandler.__init__(self, *args, **kwargs) self._retried = False self._lasturl = "" self._needs_otp_token = False self._otp_token_attempts = 0 def retry_http_basic_auth(self, host, request, realm, *args, **kwargs): if self._lasturl != host: self._retried = False self._lasturl = host if self._retried: return None self._retried = True response = self._do_http_basic_auth(host, request, realm) if response and response.code != UNAUTHORIZED: self._retried = False return response def _do_http_basic_auth(self, host, request, realm): user, password = self.passwd.find_user_password(realm, host) if password is None: return None raw = '%s:%s' % (user, password) auth = 'Basic %s' % base64.b64encode(raw).strip() if (request.headers.get(self.auth_header, None) == auth and (not self._needs_otp_token or self._otp_token_attempts > self.MAX_OTP_TOKEN_ATTEMPTS)): # We've already tried with these credentials. No point # trying again. return None request.add_unredirected_header(self.auth_header, auth) try: response = self.parent.open(request, timeout=request.timeout) return response except HTTPError as e: if e.code == UNAUTHORIZED: headers = e.info() otp_header = headers.get(self.OTP_TOKEN_HEADER, '') if otp_header.startswith('required'): self._needs_otp_token = True # The server has requested a one-time password token, sent # through an external channel (cell phone or application). # Request this token from the user. required, token_method = otp_header.split(';') token = self.passwd.get_otp_token(request.get_full_url(), token_method.strip()) if not token: return None request.add_unredirected_header(self.OTP_TOKEN_HEADER, token) self._otp_token_attempts += 1 return self._do_http_basic_auth(host, request, realm) raise return None class ReviewBoardHTTPPasswordMgr(HTTPPasswordMgr): """Adds HTTP authentication support for URLs. Python 2.4's password manager has a bug in http authentication when the target server uses a non-standard port. This works around that bug on Python 2.4 installs. See: http://bugs.python.org/issue974757 """ def __init__(self, reviewboard_url, rb_user=None, rb_pass=None, api_token=None, auth_callback=None, otp_token_callback=None): HTTPPasswordMgr.__init__(self) self.passwd = {} self.rb_url = reviewboard_url self.rb_user = rb_user self.rb_pass = rb_pass self.api_token = api_token self.auth_callback = auth_callback self.otp_token_callback = otp_token_callback def find_user_password(self, realm, uri): if realm == 'Web API': if self.auth_callback: username, password = self.auth_callback(realm, uri, username=self.rb_user, password=self.rb_pass) self.rb_user = username self.rb_pass = password return self.rb_user, self.rb_pass else: # If this is an auth request for some other domain (since HTTP # handlers are global), fall back to standard password management. return HTTPPasswordMgr.find_user_password(self, realm, uri) def get_otp_token(self, uri, method): if self.otp_token_callback: return self.otp_token_callback(uri, method) def create_cookie_jar(cookie_file=None): """Return a cookie jar backed by cookie_file If cooie_file is not provided, we will default it. If the cookie_file does not exist, we will create it with the proper permissions. In the case where we default cookie_file, and it does not exist, we will attempt to copy the .post-review-cookies.txt file. """ home_path = get_home_path() if not cookie_file: cookie_file = os.path.join(home_path, RBTOOLS_COOKIE_FILE) post_review_cookies = os.path.join(home_path, '.post-review-cookies.txt') if (not os.path.isfile(cookie_file) and os.path.isfile(post_review_cookies)): try: shutil.copyfile(post_review_cookies, cookie_file) os.chmod(cookie_file, 0o600) except IOError as e: logging.warning("There was an error while copying " "post-review's cookies: %s", e) if not os.path.isfile(cookie_file): try: open(cookie_file, 'w').close() os.chmod(cookie_file, 0o600) except IOError as e: logging.warning('There was an error while creating a ' 'cookie file: %s', e) return MozillaCookieJar(cookie_file), cookie_file class ReviewBoardServer(object): """Represents a Review Board server we are communicating with. Provides methods for executing HTTP requests on a Review Board server's Web API. The ``auth_callback`` parameter can be used to specify a callable which will be called when authentication fails. This callable will be passed the realm, and url of the Review Board server and should return a 2-tuple of username, password. The user can be prompted for their credentials using this mechanism. """ def __init__(self, url, cookie_file=None, username=None, password=None, api_token=None, agent=None, session=None, disable_proxy=False, auth_callback=None, otp_token_callback=None, verify_ssl=True, save_cookies=True, ext_auth_cookies=None): if not url.endswith('/'): url += '/' self.url = url + 'api/' self.save_cookies = save_cookies self.ext_auth_cookies = ext_auth_cookies if self.save_cookies: self.cookie_jar, self.cookie_file = create_cookie_jar( cookie_file=cookie_file) try: self.cookie_jar.load(ignore_expires=True) except IOError: pass else: self.cookie_jar = CookieJar() self.cookie_file = None if self.ext_auth_cookies: try: self.cookie_jar.load(ext_auth_cookies, ignore_expires=True) except IOError as e: logging.critical('There was an error while loading a ' 'cookie file: %s', e) pass # Get the cookie domain from the url. If the domain # does not contain a '.' (e.g. 'localhost'), we assume # it is a local domain and suffix it (See RFC 2109). parsed_url = urlparse(url) self.domain = parsed_url[1].partition(':')[0] # Remove Port. if self.domain.count('.') < 1: self.domain = '%s.local' % self.domain if session: cookie = Cookie( version=0, name=RB_COOKIE_NAME, value=session, port=None, port_specified=False, domain=self.domain, domain_specified=True, domain_initial_dot=True, path=parsed_url[2], path_specified=True, secure=False, expires=None, discard=False, comment=None, comment_url=None, rest={'HttpOnly': None}) self.cookie_jar.set_cookie(cookie) if self.save_cookies: self.cookie_jar.save() if username: # If the username parameter is given, we have to clear the session # cookie manually or it will override the username:password # combination retrieved from the authentication callback. try: self.cookie_jar.clear(self.domain, parsed_url[2], RB_COOKIE_NAME) except KeyError: pass # Set up the HTTP libraries to support all of the features we need. password_mgr = ReviewBoardHTTPPasswordMgr(self.url, username, password, api_token, auth_callback, otp_token_callback) self.preset_auth_handler = PresetHTTPAuthHandler(self.url, password_mgr) handlers = [] if not verify_ssl: context = ssl._create_unverified_context() handlers.append(HTTPSHandler(context=context)) if disable_proxy: handlers.append(ProxyHandler({})) handlers += [ HTTPCookieProcessor(self.cookie_jar), ReviewBoardHTTPBasicAuthHandler(password_mgr), HTTPDigestAuthHandler(password_mgr), self.preset_auth_handler, ReviewBoardHTTPErrorProcessor(), ] if agent: self.agent = agent else: self.agent = ('RBTools/' + get_package_version()).encode('utf-8') opener = build_opener(*handlers) opener.addheaders = [ (b'User-agent', self.agent), ] install_opener(opener) self._cache = None self._urlopen = urlopen def enable_cache(self, cache_location=None, in_memory=False): """Enable caching for all future HTTP requests. The cache will be created at the default location if none is provided. If the in_memory parameter is True, the cache will be created in memory instead of on disk. This overrides the cache_location parameter. """ if not self._cache: self._cache = APICache(create_db_in_memory=in_memory, db_location=cache_location) self._urlopen = self._cache.make_request def login(self, username, password): """Reset the user information""" self.preset_auth_handler.reset(username, password) def logout(self): """Logs the user out of the session.""" self.preset_auth_handler.reset(None, None) self.make_request(HttpRequest('%ssession/' % self.url, method='DELETE')) self.cookie_jar.clear(self.domain) if self.save_cookies: self.cookie_jar.save() def process_error(self, http_status, data): """Processes an error, raising an APIError with the information.""" try: rsp = json_loads(data) assert rsp['stat'] == 'fail' logging.debug('Got API Error %d (HTTP code %d): %s' % (rsp['err']['code'], http_status, rsp['err']['msg'])) logging.debug('Error data: %r' % rsp) raise create_api_error(http_status, rsp['err']['code'], rsp, rsp['err']['msg']) except ValueError: logging.debug('Got HTTP error: %s: %s' % (http_status, data)) raise APIError(http_status, None, None, data) def make_request(self, request): """Perform an http request. The request argument should be an instance of 'rbtools.api.request.HttpRequest'. """ try: content_type, body = request.encode_multipart_formdata() headers = request.headers if body: headers.update({ b'Content-Type': content_type, b'Content-Length': str(len(body)), }) else: headers[b'Content-Length'] = '0' r = Request(request.url.encode('utf-8'), body, headers, request.method.encode('utf-8')) rsp = self._urlopen(r) except HTTPError as e: self.process_error(e.code, e.read()) except URLError as e: raise ServerInterfaceError('%s' % e.reason) if self.save_cookies: try: self.cookie_jar.save() except IOError: pass return rsp RBTools-0.7.11/rbtools/api/tests.py0000644000232200023220000007712213230242633017452 0ustar debalancedebalancefrom __future__ import unicode_literals import datetime import locale import re import six from rbtools.api.cache import APICache, CacheEntry, CachedHTTPResponse from rbtools.api.capabilities import Capabilities from rbtools.api.factory import create_resource from rbtools.api.request import HttpRequest, Request from rbtools.api.resource import (CountResource, ItemResource, ListResource, ResourceDictField, ResourceLinkField, ReviewRequestResource, RootResource) from rbtools.api.transport import Transport from rbtools.testing import TestCase class CapabilitiesTests(TestCase): """Tests for rbtools.api.capabilities.Capabilities""" def test_has_capability(self): """Testing Capabilities.has_capability with supported capability""" caps = Capabilities({ 'foo': { 'bar': { 'value': True, } } }) self.assertTrue(caps.has_capability('foo', 'bar', 'value')) def test_has_capability_with_unknown_capability(self): """Testing Capabilities.has_capability with unknown capability""" caps = Capabilities({}) self.assertFalse(caps.has_capability('mycap')) def test_has_capability_with_partial_path(self): """Testing Capabilities.has_capability with partial capability path""" caps = Capabilities({ 'foo': { 'bar': { 'value': True, } } }) self.assertFalse(caps.has_capability('foo', 'bar')) class MockTransport(Transport): """Mock transport which returns HttpRequests without executing them""" def __init__(self): pass class TestWithPayloads(TestCase): transport = MockTransport() item_payload = { 'resource_token': { 'field1': 1, 'field2': 2, 'nested_field': { 'nested1': 1, 'nested2': 2, }, 'nested_list': [ { 'href': 'http://localhost:8080/api/', 'method': 'GET', }, { 'href': 'http://localhost:8080/api/', 'method': 'GET', }, ], 'link_field': { 'href': 'http://localhost:8080/api/', 'method': 'GET', 'title': 'Link Field' }, }, 'links': { 'self': { 'href': 'http://localhost:8080/api/', 'method': 'GET', }, 'update': { 'href': 'http://localhost:8080/api/', 'method': 'PUT', }, 'delete': { 'href': 'http://localhost:8080/api/', 'method': 'DELETE', }, 'other_link': { 'href': 'http://localhost:8080/api/', 'method': 'GET', }, }, 'stat': 'ok', } list_payload = { 'resource_token': [ { 'field1': 1, 'field2': 2, 'links': { 'self': { 'href': 'http://localhost:8080/api/', 'method': 'GET', }, }, }, { 'field1': 1, 'field2': 2, 'links': { 'self': { 'href': 'http://localhost:8080/api/', 'method': 'GET', }, }, }, ], 'links': { 'self': { 'href': 'http://localhost:8080/api/', 'method': 'GET', }, 'create': { 'href': 'http://localhost:8080/api/', 'method': 'POST', }, 'other_link': { 'href': 'http://localhost:8080/api/', 'method': 'GET', }, }, 'total_results': 10, 'stat': 'ok', } count_payload = { 'count': 10, 'stat': 'ok' } root_payload = { 'uri_templates': { 'reviews': ('http://localhost:8080/api/review-requests/' '{review_request_id}/reviews/'), }, 'links': { 'self': { 'href': 'http://localhost:8080/api/', 'method': 'GET', }, 'groups': { 'href': 'http://localhost:8080/api/groups', 'method': 'GET', }, }, 'stat': 'ok', } class ResourceFactoryTests(TestWithPayloads): def test_token_guessing(self): """Testing guessing the resource's token.""" r = create_resource(self.transport, self.item_payload, '') self.assertTrue('resource_token' not in r._fields) for field in self.item_payload['resource_token']: self.assertTrue(field in r) r = create_resource(self.transport, self.count_payload, '') self.assertTrue('count' in r) def test_no_token_guessing(self): """Testing constructing without guessing the resource token.""" r = create_resource(self.transport, self.item_payload, '', guess_token=False) self.assertTrue('resource_token' in r) self.assertTrue('field1' not in r) self.assertTrue('field1' in r.resource_token) r = create_resource(self.transport, self.list_payload, '', guess_token=False) self.assertTrue('resource_token' in r) def test_item_construction(self): """Testing constructing an item resource.""" r = create_resource(self.transport, self.item_payload, '') self.assertTrue(isinstance(r, ItemResource)) self.assertEqual(r.field1, self.item_payload['resource_token']['field1']) self.assertEqual(r.field2, self.item_payload['resource_token']['field2']) def test_list_construction(self): """Testing constructing a list resource.""" r = create_resource(self.transport, self.list_payload, '') self.assertTrue(isinstance(r, ListResource)) def test_count_construction(self): """Testing constructing a count resource.""" r = create_resource(self.transport, self.count_payload, '') self.assertTrue(isinstance(r, CountResource)) self.assertEqual(r.count, self.count_payload['count']) def test_resource_specific_base_class(self): """Testing constructing a resource with a specific base class.""" r = create_resource(self.transport, self.root_payload, '') self.assertFalse(isinstance(r, RootResource)) r = create_resource( self.transport, self.root_payload, '', mime_type='application/vnd.reviewboard.org.root+json') self.assertTrue(isinstance(r, RootResource)) class ResourceTests(TestWithPayloads): def test_item_resource_fields(self): """Testing item resource fields.""" r = create_resource(self.transport, self.item_payload, '') for field in self.item_payload['resource_token']: self.assertTrue(field in r) self.assertTrue(hasattr(r, field)) def test_item_resource_links(self): """Testing item resource link generation.""" r = create_resource(self.transport, self.item_payload, '') self.assertTrue(hasattr(r, 'get_self')) self.assertTrue(callable(r.get_self)) request = r.get_self() self.assertTrue(isinstance(request, HttpRequest)) self.assertEqual(request.method, 'GET') self.assertEqual(request.url, self.item_payload['links']['self']['href']) self.assertTrue(hasattr(r, 'update')) self.assertTrue(callable(r.update)) request = r.update() self.assertTrue(isinstance(request, HttpRequest)) self.assertEqual(request.method, 'PUT') self.assertEqual(request.url, self.item_payload['links']['update']['href']) self.assertTrue(hasattr(r, 'delete')) self.assertTrue(callable(r.delete)) request = r.delete() self.assertTrue(isinstance(request, HttpRequest)) self.assertEqual(request.method, 'DELETE') self.assertEqual(request.url, self.item_payload['links']['delete']['href']) self.assertTrue(hasattr(r, 'get_other_link')) self.assertTrue(callable(r.get_other_link)) request = r.get_other_link() self.assertTrue(isinstance(request, HttpRequest)) self.assertEqual(request.method, 'GET') self.assertEqual(request.url, self.item_payload['links']['other_link']['href']) self.assertFalse(hasattr(r, 'create')) def test_list_resource_list(self): """Testing list resource lists.""" r = create_resource(self.transport, self.list_payload, '') self.assertEqual(r.num_items, len(self.list_payload['resource_token'])) self.assertEqual(r.total_results, self.list_payload['total_results']) for index in range(r.num_items): for field in r[index].iterfields(): self.assertEqual( r[index][field], self.list_payload['resource_token'][index][field]) def test_list_resource_links(self): """Testing link resource link generation.""" r = create_resource(self.transport, self.list_payload, '') self.assertTrue(hasattr(r, 'get_self')) self.assertTrue(callable(r.get_self)) request = r.get_self() self.assertTrue(isinstance(request, HttpRequest)) self.assertEqual(request.method, 'GET') self.assertEqual(request.url, self.list_payload['links']['self']['href']) self.assertTrue(hasattr(r, 'create')) self.assertTrue(callable(r.create)) request = r.create() self.assertTrue(isinstance(request, HttpRequest)) self.assertEqual(request.method, 'POST') self.assertEqual(request.url, self.list_payload['links']['create']['href']) self.assertTrue(hasattr(r, 'get_other_link')) self.assertTrue(callable(r.get_other_link)) request = r.get_other_link() self.assertTrue(isinstance(request, HttpRequest)) self.assertEqual(request.method, 'GET') self.assertEqual(request.url, self.list_payload['links']['other_link']['href']) self.assertFalse(hasattr(r, 'update')) self.assertFalse(hasattr(r, 'delete')) def test_root_resource_templates(self): """Testing generation of methods for the root resource uri templates.""" r = create_resource( self.transport, self.root_payload, '', mime_type='application/vnd.reviewboard.org.root+json') for template_name in self.root_payload['uri_templates']: method_name = "get_%s" % template_name self.assertTrue(hasattr(r, method_name)) self.assertTrue(callable(getattr(r, method_name))) def test_resource_dict_field(self): """Testing access of a dictionary field.""" r = create_resource(self.transport, self.item_payload, '') field = r.nested_field self.assertTrue(isinstance(field, ResourceDictField)) self.assertEqual( field.nested1, self.item_payload['resource_token']['nested_field']['nested1']) def test_resource_dict_field_iteration(self): """Testing iterating sub-fields of a dictionary field.""" r = create_resource(self.transport, self.item_payload, '') field = r.nested_field iterated_fields = set(f for f in field.iterfields()) nested_fields = set( f for f in self.item_payload['resource_token']['nested_field']) self.assertEqual(set(), nested_fields.symmetric_difference(iterated_fields)) def test_link_field(self): """Testing access of a link field.""" r = create_resource(self.transport, self.item_payload, '') field = r.link_field self.assertTrue(isinstance(field, ResourceLinkField)) request = field.get() self.assertEqual(request.method, 'GET') self.assertEqual( request.url, self.item_payload['resource_token']['link_field']['href']) class HttpRequestTests(TestCase): def setUp(self): self.request = HttpRequest('/') def test_default_values(self): """Testing the default values.""" self.assertEqual(self.request.url, '/') self.assertEqual(self.request.method, 'GET') content_type, content = self.request.encode_multipart_formdata() self.assertTrue(content_type is None) self.assertTrue(content is None) def _get_fields_as_dict(self, ctype, content): """Extract the fields of a HTTP multipart request as a dictionary.""" m = re.match(b'^multipart/form-data; boundary=(.*)$', ctype) self.assertFalse(m is None) fields = [l.strip() for l in content.split(b'--' + m.group(1))][1:-1] d = {} disposition_re = re.compile( b'Content-Disposition: form-data; name="(.*?)"$') for f in fields: lst = f.split(b'\r\n\r\n') self.assertEqual(len(lst), 2) k, v = lst m = disposition_re.match(k) self.assertFalse(m is None) d[m.group(1)] = v return d def test_post_form_data(self): """Testing the multipart form data generation.""" request = HttpRequest('/', 'POST') request.add_field('foo', 'bar') request.add_field('bar', 42) request.add_field('err', 'must-be-deleted') request.add_field('name', 'somestring') request.del_field('err') ctype, content = request.encode_multipart_formdata() d = self._get_fields_as_dict(ctype, content) self.assertEqual( d, {b'foo': b'bar', b'bar': b'42', b'name': b'somestring'}) def test_post_unicode_data(self): """Testing the encoding of multipart form data with unicode and binary field data """ konnichiwa = '\u3053\u3093\u306b\u3061\u306f' request = HttpRequest('/', 'POST') request.add_field('foo', konnichiwa) request.add_field('bar', konnichiwa.encode('utf-8')) request.add_field('baz', b'\xff') ctype, content = request.encode_multipart_formdata() fields = self._get_fields_as_dict(ctype, content) self.assertTrue('foo' in fields) self.assertEqual(fields['foo'], konnichiwa.encode('utf-8')) self.assertEqual(fields['bar'], konnichiwa.encode('utf-8')) self.assertEqual(fields['baz'], b'\xff') class ReviewRequestResourceTests(TestCase): def setUp(self): self.transport = MockTransport() def test_absolute_url_with_absolute_url_field(self): """Testing ReviewRequestResource.absolute_url with 'absolute_url' field """ payload = { 'review_request': { 'id': 123, 'absolute_url': 'https://example.com/r/123/', }, 'stat': 'ok', } r = create_resource( transport=self.transport, payload=payload, url='https://api.example.com/', mime_type='application/vnd.reviewboard.org.review-request') self.assertTrue(isinstance(r, ReviewRequestResource)) self.assertEqual(r.absolute_url, 'https://example.com/r/123/') def test_absolute_url_with_url_field(self): """Testing ReviewRequestResource.absolute_url with 'url' field""" payload = { 'review_request': { 'id': 123, 'url': '/r/123/', }, 'stat': 'ok', } r = create_resource( transport=self.transport, payload=payload, url='https://example.com/', mime_type='application/vnd.reviewboard.org.review-request') self.assertTrue(isinstance(r, ReviewRequestResource)) self.assertEqual(r.absolute_url, 'https://example.com/r/123/') def test_absolute_url_with_fallback(self): """Testing ReviewRequestResource.absolute_url with generated fallback URL """ payload = { 'review_request': { 'id': 123, }, 'stat': 'ok', } r = create_resource( transport=self.transport, payload=payload, url='https://example.com/', mime_type='application/vnd.reviewboard.org.review-request') self.assertTrue(isinstance(r, ReviewRequestResource)) self.assertEqual(r.absolute_url, 'https://example.com/r/123/') class MockResponse(object): """A mock up for a response from urllib2.""" def __init__(self, code, headers, body): """Create a new MockResponse.""" self.code = code self.headers = headers self.body = body if self.body: self.headers['Content-Length'] = len(body) if 'Content-Type' not in self.headers: self.headers['Content-Type'] = 'text/plain' def info(self): """Get the response headers.""" return self.headers def read(self): """Get the response body.""" return self.body def getcode(self): """Get the response code.""" return self.code class MockUrlOpener(object): """A mock url opener that records the number of hits it gets to URL.""" CONTENT = 'foobar' def __init__(self, endpoints): """Create a new MockUrlOpener given the endpoints: headers mapping.""" self.endpoints = {} for url, headers in six.iteritems(endpoints): self.endpoints[url] = { 'hit_count': 0, 'headers': headers } def __call__(self, request): """Call the URL opener to return a MockResponse for the URL.""" url = request.get_full_url() self.endpoints[url]['hit_count'] += 1 headers = self.endpoints[url]['headers'].copy() headers['Date'] = datetime.datetime.now() if 'If-none-match' in request.headers and 'ETag' in headers: # If the request includes an If-None-Match header, we should check # if the ETag in our headers matches. if headers.get('ETag') == request.headers['If-none-match']: resp = MockResponse(304, headers, None) else: resp = MockResponse(200, headers, self.CONTENT) elif 'If-modified-since' in request.headers: if 'max-age=0' in headers.get('Cache-Control', ''): # We are only testing the case for when max-age is 0 and when # max-age is very large because it is impractical to require # the tests to sleep(). resp = MockResponse(200, headers, self.CONTENT) else: request_datetime = datetime.datetime.strptime( request.headers['If-modified-since'], CacheEntry.DATE_FORMAT) header_datetime = datetime.datetime.strptime( headers['Last-Modified'], CacheEntry.DATE_FORMAT) if request_datetime < header_datetime: # The content has been modified resp = MockResponse(200, headers, self.CONTENT) else: resp = MockResponse(304, headers, None) else: resp = MockResponse(200, headers, self.CONTENT) return resp def get_hit_count(self, url): return self.endpoints[url]['hit_count'] class APICacheTests(TestCase): """Test cases for the APICache class.""" content = 'foobar' request_headers = { 'http://high_max_age': { 'Cache-Control': 'max-age=10000' }, 'http://zero_max_age': { 'Cache-Control': 'max-age=0', }, 'http://no_cache_etag': { 'Cache-Control': 'no-cache', 'ETag': 'etag', }, 'http://no_cache': { 'Cache-Control': 'no-cache', }, 'http://no_cache_date': { 'Cache-Control': 'no-cache', 'Last-Modified': '1999-12-31T00:00:00', }, 'http://no_store': { 'Cache-Control': 'no-store', }, 'http://must_revalidate': { 'Cache-Control': 'must-revalidate', 'ETag': 'etag' }, 'http://vary': { 'Cache-control': 'max-age=1000', 'Vary': 'User-agent' }, 'http://pragma': { 'Pragma': 'no-cache' }, 'http://expired': { 'Expires': 'Thu, 01 Dec 1983 20:00:00 GMT', }, 'http://expires_override': { 'Expires': 'Thu, 01 Dec 1983 20:00:00 GMT', 'Cache-Control': 'max-age=10000', }, } def setUp(self): """Create a MockUrlOpener and an instance of the APICache using it.""" self.urlopener = MockUrlOpener(self.request_headers) self.cache = APICache(create_db_in_memory=True, urlopen=self.urlopener) def test_cache_control_header_max_age_high(self): """Testing the cache with a high max-age value""" request = Request('http://high_max_age', method='GET') first_resp = self.cache.make_request(request) second_resp = self.cache.make_request(request) self.assertEqual( self.urlopener.get_hit_count('http://high_max_age'), 1) self.assertFalse(isinstance(first_resp, CachedHTTPResponse)) self.assertTrue(isinstance(second_resp, CachedHTTPResponse)) def test_cache_control_header_max_age_zero(self): """Testing the cache with a zero max-age value""" request = Request('http://zero_max_age', method='GET') first_resp = self.cache.make_request(request) second_resp = self.cache.make_request(request) self.assertEqual(self.urlopener.get_hit_count('http://zero_max_age'), 2) self.assertFalse(isinstance(first_resp, CachedHTTPResponse)) self.assertFalse(isinstance(second_resp, CachedHTTPResponse)) def test_cache_control_header_nocache(self): """Testing the cache with the no-cache control""" request = Request('http://no_cache', method='GET') first_resp = self.cache.make_request(request) second_resp = self.cache.make_request(request) self.assertEqual(self.urlopener.get_hit_count('http://no_cache'), 2) self.assertFalse(isinstance(first_resp, CachedHTTPResponse)) self.assertFalse(isinstance(second_resp, CachedHTTPResponse)) def test_cache_control_header_nocache_with_etag(self): """Testing the cache with the no-cache control and a specified ETag""" request = Request('http://no_cache_etag', method='GET') first_resp = self.cache.make_request(request) second_resp = self.cache.make_request(request) self.assertEqual(self.urlopener.get_hit_count('http://no_cache_etag'), 2) self.assertFalse(isinstance(first_resp, CachedHTTPResponse)) self.assertTrue(isinstance(second_resp, CachedHTTPResponse)) def test_cache_control_header_nocache_with_etag_updated(self): """Testing the cache with the no-cache control and an updated ETag""" request = Request('http://no_cache_etag', method='GET') first_resp = self.cache.make_request(request) # Pretend the end point has been updated since the last request. self.urlopener.endpoints['http://no_cache_etag']['headers']['ETag'] = ( 'new-etag') second_resp = self.cache.make_request(request) third_resp = self.cache.make_request(request) self.assertFalse(isinstance(first_resp, CachedHTTPResponse)) self.assertFalse(isinstance(second_resp, CachedHTTPResponse)) self.assertTrue(isinstance(third_resp, CachedHTTPResponse)) self.assertEqual(self.urlopener.get_hit_count('http://no_cache_etag'), 3) def test_cache_control_header_nocache_with_last_modfied(self): """Testing the cache with the no-cache control""" request = Request('http://no_cache_date', method='GET') first_resp = self.cache.make_request(request) second_resp = self.cache.make_request(request) self.assertEqual(self.urlopener.get_hit_count('http://no_cache_date'), 2) self.assertFalse(isinstance(first_resp, CachedHTTPResponse)) self.assertTrue(isinstance(second_resp, CachedHTTPResponse)) def test_cache_control_header_nocache_with_last_modified_updated(self): """Testing the cache with the no-cache control and an updated Last-Modified header """ endpoint = 'http://no_cache_lastmodified_updated' future_date = datetime.datetime.utcnow() + datetime.timedelta(days=1) self.urlopener.endpoints[endpoint] = { 'hit_count': 0, 'headers': { 'Cache-Control': 'no-cache', 'Last-Modified': '1999-12-31T00:00:00' }, } request = Request(endpoint, method='GET') first_resp = self.cache.make_request(request) self.urlopener.endpoints[endpoint]['headers']['Last-Modified'] = ( future_date.strftime(CacheEntry.DATE_FORMAT)) second_resp = self.cache.make_request(request) third_resp = self.cache.make_request(request) self.assertFalse(isinstance(first_resp, CachedHTTPResponse)) self.assertFalse(isinstance(second_resp, CachedHTTPResponse)) self.assertTrue(isinstance(third_resp, CachedHTTPResponse)) self.assertEqual(self.urlopener.get_hit_count(endpoint), 3) def test_cache_control_header_no_store(self): """Testing the cache with the no-store control""" request = Request('http://no_store', method='GET') first_resp = self.cache.make_request(request) second_resp = self.cache.make_request(request) self.assertEqual(self.urlopener.get_hit_count('http://no_store'), 2) self.assertFalse(isinstance(first_resp, CachedHTTPResponse)) self.assertFalse(isinstance(second_resp, CachedHTTPResponse)) def test_cache_control_header_must_revalidate(self): """Testing the cache with the must-revalidate control""" request = Request('http://must_revalidate', method='GET') first_resp = self.cache.make_request(request) second_resp = self.cache.make_request(request) self.assertEqual( self.urlopener.get_hit_count('http://must_revalidate'), 2) self.assertFalse(isinstance(first_resp, CachedHTTPResponse)) self.assertTrue(isinstance(second_resp, CachedHTTPResponse)) def test_vary_header(self): """Testing the cache with the Vary header""" request = Request('http://vary', headers={'User-agent': 'foo'}, method='GET') first_resp = self.cache.make_request(request) second_resp = self.cache.make_request(request) self.assertEqual(self.urlopener.get_hit_count('http://vary'), 1) self.assertFalse(isinstance(first_resp, CachedHTTPResponse)) self.assertTrue(isinstance(second_resp, CachedHTTPResponse)) def test_vary_header_different_requests(self): """Testing the cache with the Vary header and different requests""" first_request = Request('http://vary', headers={'User-agent': 'foo'}, method='GET') second_request = Request('http://vary', headers={'User-agent': 'bar'}, method='GET') first_resp = self.cache.make_request(first_request) second_resp = self.cache.make_request(second_request) self.assertEqual(self.urlopener.get_hit_count('http://vary'), 2) self.assertFalse(isinstance(first_resp, CachedHTTPResponse)) self.assertFalse(isinstance(second_resp, CachedHTTPResponse)) def test_pragma_header(self): """Testing the cache with the Pragma: no-cache header""" request = Request('http://pragma', method='GET') first_resp = self.cache.make_request(request) second_resp = self.cache.make_request(request) self.assertEqual(self.urlopener.get_hit_count('http://pragma'), 2) self.assertFalse(isinstance(first_resp, CachedHTTPResponse)) self.assertFalse(isinstance(second_resp, CachedHTTPResponse)) def test_expires_header_expired(self): """Testing the cache with the Expires header in the past""" request = Request('http://expired', method='GET') first_resp = self.cache.make_request(request) second_resp = self.cache.make_request(request) self.assertEqual(self.urlopener.get_hit_count('http://expired'), 2) self.assertFalse(isinstance(first_resp, CachedHTTPResponse)) self.assertFalse(isinstance(second_resp, CachedHTTPResponse)) def test_expires_header_future(self): """Testing the cache with the Expires header in the future""" # We generate the future date in the C locale so that it is properly # formatted. locale.setlocale(locale.LC_TIME, str('C')) future_date = datetime.datetime.utcnow() + datetime.timedelta(days=1) future_date = future_date.strftime(APICache.EXPIRES_FORMAT) + 'UTC' locale.resetlocale(locale.LC_TIME) self.urlopener.endpoints['http://expires_future'] = { 'hit_count': 0, 'headers': { 'Expires': future_date, }, } request = Request('http://expires_future', method='GET') first_resp = self.cache.make_request(request) second_resp = self.cache.make_request(request) self.assertEqual(self.urlopener.get_hit_count('http://expires_future'), 1) self.assertFalse(isinstance(first_resp, CachedHTTPResponse)) self.assertTrue(isinstance(second_resp, CachedHTTPResponse)) def test_expires_header_overriden_by_max_age(self): """Testing the cache with an Expires header that is overridden""" request = Request('http://expires_override', method='GET') first_resp = self.cache.make_request(request) second_resp = self.cache.make_request(request) self.assertEqual( self.urlopener.get_hit_count('http://expires_override'), 1) self.assertFalse(isinstance(first_resp, CachedHTTPResponse)) self.assertTrue(isinstance(second_resp, CachedHTTPResponse)) def test_saving_non_ascii_data(self): """Testing writing to the cache with non-ASCII data""" # "Hello world" in Japanese as unicode characters. hello_world = '\u3053\u3093\u306b\u3061\u306f\u4e16\u754c' entry = CacheEntry( url='http://unicode-example', vary_headers={}, max_age=0, etag='etag', local_date=datetime.datetime.now(), last_modified='Sat, 21 Mar 2015 05:33:22 GMT', mime_type='text/plain', item_mime_type=None, response_body=hello_world.encode('utf-8')) try: self.cache._save_entry(entry) except: self.fail('Could not write binary data to the API cache.') try: self.cache._save_entry(entry) except: self.fail('Could not update binary data in the API cache.') RBTools-0.7.11/rbtools/api/resource.py0000644000232200023220000006112013230242633020126 0ustar debalancedebalancefrom __future__ import unicode_literals import re import six from pkg_resources import parse_version from six.moves import range from six.moves.urllib.parse import urljoin from rbtools.api.cache import MINIMUM_VERSION from rbtools.api.decorators import request_method_decorator from rbtools.api.request import HttpRequest RESOURCE_MAP = {} LINKS_TOK = 'links' LINK_KEYS = set(['href', 'method', 'title']) _EXCLUDE_ATTRS = [LINKS_TOK, 'stat'] @request_method_decorator def _create(resource, data=None, query_args={}, *args, **kwargs): """Generate a POST request on a resource. Unlike other methods, any additional query args must be passed in using the 'query_args' parameter, since kwargs is used for the fields which will be sent. """ request = HttpRequest(resource._links['create']['href'], method=b'POST', query_args=query_args) if data is None: data = {} kwargs.update(data) for name, value in six.iteritems(kwargs): request.add_field(name, value) return request @request_method_decorator def _delete(resource, *args, **kwargs): """Generate a DELETE request on a resource.""" return HttpRequest(resource._links['delete']['href'], method='DELETE', query_args=kwargs) @request_method_decorator def _get_self(resource, *args, **kwargs): """Generate a request for a resource's 'self' link.""" return HttpRequest(resource._links['self']['href'], query_args=kwargs) @request_method_decorator def _update(resource, data=None, query_args={}, *args, **kwargs): """Generate a PUT request on a resource. Unlike other methods, any additional query args must be passed in using the 'query_args' parameter, since kwargs is used for the fields which will be sent. """ request = HttpRequest(resource._links['update']['href'], method='PUT', query_args=query_args) if data is None: data = {} kwargs.update(data) for name, value in six.iteritems(kwargs): request.add_field(name, value) return request # This dictionary is a mapping of special keys in a resources links, # to a name and method used for generating a request for that link. # This is used to special case the REST operation links. Any link # included in this dictionary will be generated separately, and links # with a None for the method will be ignored. SPECIAL_LINKS = { 'create': ['create', _create], 'delete': ['delete', _delete], 'next': ['get_next', None], 'prev': ['get_prev', None], 'self': ['get_self', _get_self], 'update': ['update', _update], } class Resource(object): """Defines common functionality for Item and List Resources. Resources are able to make requests to the Web API by returning an HttpRequest object. When an HttpRequest is returned from a method call, the transport layer will execute this request and return the result to the user. Methods for constructing requests to perform each of the supported REST operations will be generated automatically. These methods will have names corresponding to the operation (e.g. 'update()'). An additional method for re-requesting the resource using the 'self' link will be generated with the name 'get_self'. Each additional link will have a method generated which constructs a request for retrieving the linked resource. """ _excluded_attrs = [] def __init__(self, transport, payload, url, token=None, **kwargs): self._url = url self._transport = transport self._token = token self._payload = payload self._excluded_attrs = self._excluded_attrs + _EXCLUDE_ATTRS # Determine where the links live in the payload. This # can either be at the root, or inside the resources # token. if LINKS_TOK in self._payload: self._links = self._payload[LINKS_TOK] elif (token and isinstance(self._payload[token], dict) and LINKS_TOK in self._payload[token]): self._links = self._payload[token][LINKS_TOK] else: self._payload[LINKS_TOK] = {} self._links = {} # Add a method for each supported REST operation, and # for retrieving 'self'. for link, method in six.iteritems(SPECIAL_LINKS): if link in self._links and method[1]: setattr(self, method[0], lambda resource=self, meth=method[1], **kwargs: ( meth(resource, **kwargs))) # Generate request methods for any additional links # the resource has. for link, body in six.iteritems(self._links): if link not in SPECIAL_LINKS: setattr(self, 'get_%s' % link, lambda resource=self, url=body['href'], **kwargs: ( self._get_url(url, **kwargs))) def _wrap_field(self, field): if isinstance(field, dict): dict_keys = set(field.keys()) if ('href' in dict_keys and len(dict_keys.difference(LINK_KEYS)) == 0): return ResourceLinkField(self, field) else: return ResourceDictField(self, field) elif isinstance(field, list): return ResourceListField(self, field) else: return field @property def links(self): """Get the resource's links. This is a special property which allows direct access to the links dictionary for a resource. Unlike other properties which come from the resource fields, this one is only accessible as a property, and not using array syntax.""" return ResourceDictField(self, self._links) @request_method_decorator def _get_url(self, url, **kwargs): return HttpRequest(url, query_args=kwargs) @property def rsp(self): """Return the response payload used to create the resource.""" return self._payload class ResourceDictField(object): """Wrapper for dictionaries returned from a resource. Any dictionary returned from a resource will be wrapped using this class. Attribute access will correspond to accessing the dictionary key with the name of the attribute. """ def __init__(self, resource, fields): self._resource = resource self._fields = fields def __getattr__(self, name): if name in self._fields: return self._resource._wrap_field(self._fields[name]) else: raise AttributeError def __getitem__(self, key): try: return self.__getattr__(key) except AttributeError: raise KeyError def __contains__(self, key): return key in self._fields def iterfields(self): for field in self._fields: yield field def iteritems(self): for key, value in six.iteritems(self._fields): yield key, self._resource._wrap_field(value) def __repr__(self): return '%s(resource=%r, fields=%r)' % ( self.__class__.__name__, self._resource, self._fields) class ResourceLinkField(ResourceDictField): """Wrapper for link dictionaries returned from a resource. In order to support operations on links found outside of a resource's links dictionary, detected links are wrapped with this class. A links fields (href, method, and title) are accessed as attributes, and link operations are supported through method calls. Currently the only supported method is "GET", which can be invoked using the 'get' method. """ def __init__(self, resource, fields): super(ResourceLinkField, self).__init__(resource, fields) self._transport = resource._transport @request_method_decorator def get(self): return HttpRequest(self._fields['href']) class ResourceListField(list): """Wrapper for lists returned from a resource. Acts as a normal list, but wraps any returned items. """ def __init__(self, resource, list_field): super(ResourceListField, self).__init__(list_field) self._resource = resource def __getitem__(self, key): item = super(ResourceListField, self).__getitem__(key) return self._resource._wrap_field(item) def __iter__(self): for item in super(ResourceListField, self).__iter__(): yield self._resource._wrap_field(item) def __repr__(self): return '%s(resource=%r, list_field=%s)' % ( self.__class__.__name__, self._resource, super(ResourceListField, self).__repr__()) class ItemResource(Resource): """The base class for Item Resources. Any resource specific base classes for Item Resources should inherit from this class. If a resource specific base class does not exist for an Item Resource payload, this class will be used to create the resource. The body of the resource is copied into the fields dictionary. The Transport is responsible for providing access to this data, preferably as attributes for the wrapping class. """ _excluded_attrs = [] def __init__(self, transport, payload, url, token=None, **kwargs): super(ItemResource, self).__init__(transport, payload, url, token=token, **kwargs) self._fields = {} # Determine the body of the resource's data. if token is not None: data = self._payload[token] else: data = self._payload for name, value in six.iteritems(data): if name not in self._excluded_attrs: self._fields[name] = value def __getattr__(self, name): if name in self._fields: return self._wrap_field(self._fields[name]) else: raise AttributeError def __getitem__(self, key): try: return self.__getattr__(key) except AttributeError: raise KeyError def __contains__(self, key): return key in self._fields def iterfields(self): for key in self._fields: yield key def iteritems(self): for key, value in six.iteritems(self._fields): yield (key, self._wrap_field(value)) def __repr__(self): return '%s(transport=%r, payload=%r, url=%r, token=%r)' % ( self.__class__.__name__, self._transport, self._payload, self._url, self._token) class CountResource(ItemResource): """Resource returned by a query with 'counts-only' true. When a resource is requested using 'counts-only', the payload will not contain the regular fields for the resource. In order to special case all payloads of this form, this class is used for resource construction. """ def __init__(self, transport, payload, url, **kwargs): super(CountResource, self).__init__(transport, payload, url, token=None) @request_method_decorator def get_self(self, **kwargs): """Generate an GET request for the resource list. This will return an HttpRequest to retrieve the list resource which this resource is a count for. Any query arguments used in the request for the count will still be present, only the 'counts-only' argument will be removed """ # TODO: Fix this. It is generating a new request # for a URL with 'counts-only' set to False, but # RB treats the argument being set to any value # as true. kwargs.update({'counts_only': False}) return HttpRequest(self._url, query_args=kwargs) class ListResource(Resource): """The base class for List Resources. Any resource specific base classes for List Resources should inherit from this class. If a resource specific base class does not exist for a List Resource payload, this class will be used to create the resource. Instances of this class will act as a sequence, providing access to the payload for each Item resource in the list. Iteration is over the page of item resources returned by a single request, and not the entire list of resources. To iterate over all item resources 'get_next()' or 'get_prev()' should be used to grab additional pages of items. """ def __init__(self, transport, payload, url, token=None, item_mime_type=None, **kwargs): super(ListResource, self).__init__(transport, payload, url, token=token, **kwargs) self._item_mime_type = item_mime_type if token: self._item_list = payload[self._token] else: self._item_list = payload self.num_items = len(self._item_list) self.total_results = payload['total_results'] def __len__(self): return self.num_items def __nonzero__(self): return self.__bool__() def __bool__(self): return True def __getitem__(self, key): payload = self._item_list[key] # TODO: Should try and guess the url based on the parent url, # and the id number if the self link doesn't exist. try: url = payload['links']['self']['href'] except KeyError: url = '' # We need to import this here because of the mutual imports. from rbtools.api.factory import create_resource return create_resource(self._transport, payload, url, mime_type=self._item_mime_type, guess_token=False) def __iter__(self): for i in range(self.num_items): yield self[i] @request_method_decorator def get_next(self, **kwargs): if 'next' not in self._links: raise StopIteration() return HttpRequest(self._links['next']['href'], query_args=kwargs) @request_method_decorator def get_prev(self, **kwargs): if 'prev' not in self._links: raise StopIteration() return HttpRequest(self._links['prev']['href'], query_args=kwargs) @request_method_decorator def get_item(self, pk, **kwargs): """Retrieve the item resource with the corresponding primary key.""" return HttpRequest(urljoin(self._url, '%s/' % pk), query_args=kwargs) @property def all_pages(self): """Yield all pages of item resources. Each page of resources is itself an instance of the same ``ListResource`` class. """ page = self while True: yield page page = page.get_next() @property def all_items(self): """Yield all item resources in all pages of this resource.""" for page in self.all_pages: for item in page: yield item def __repr__(self): return ('%s(transport=%r, payload=%r, url=%r, token=%r, ' 'item_mime_type=%r)' % (self.__class__.__name__, self._transport, self._payload, self._url, self._token, self._item_mime_type)) class RootResource(ItemResource): """The Root resource specific base class. Provides additional methods for fetching any resource directly using the uri templates. A method of the form "get_" is called to retrieve the HttpRequest corresponding to the resource. Template replacement values should be passed in as a dictionary to the values parameter. """ _excluded_attrs = ['uri_templates'] _TEMPLATE_PARAM_RE = re.compile('\{(?P[A-Za-z_0-9]*)\}') def __init__(self, transport, payload, url, **kwargs): super(RootResource, self).__init__(transport, payload, url, token=None) # Generate methods for accessing resources directly using # the uri-templates. for name, url in six.iteritems(payload['uri_templates']): attr_name = 'get_%s' % name if not hasattr(self, attr_name): setattr(self, attr_name, lambda resource=self, url=url, **kwargs: ( self._get_template_request(url, **kwargs))) server_version = payload.get('product', {}).get('package_version') if (server_version is not None and parse_version(server_version) >= parse_version(MINIMUM_VERSION)): transport.enable_cache() @request_method_decorator def _get_template_request(self, url_template, values={}, **kwargs): """Generate an HttpRequest from a uri-template. This will replace each '{variable}' in the template with the value from kwargs['variable'], or if it does not exist, the value from values['variable']. The resulting url is used to create an HttpRequest. """ def get_template_value(m): try: return str(kwargs.pop(m.group('key'), None) or values[m.group('key')]) except KeyError: raise ValueError('Template was not provided a value for "%s"' % m.group('key')) url = self._TEMPLATE_PARAM_RE.sub(get_template_value, url_template) return HttpRequest(url, query_args=kwargs) RESOURCE_MAP['application/vnd.reviewboard.org.root'] = RootResource class DiffListResource(ListResource): """The Diff List resource specific base class. Provides additional functionality to assist in the uploading of new diffs. """ @request_method_decorator def upload_diff(self, diff, parent_diff=None, base_dir=None, base_commit_id=None, **kwargs): """Uploads a new diff. The diff and parent_diff arguments should be strings containing the diff output. """ # TODO: This method should be unified with validate_diff() method of # ValidateDiffResource, since they both perform the same operation. request = HttpRequest(self._url, method=b'POST', query_args=kwargs) request.add_file('path', 'diff', diff) if parent_diff: request.add_file('parent_diff_path', 'parent_diff', parent_diff) if base_dir: request.add_field('basedir', base_dir) if base_commit_id: request.add_field('base_commit_id', base_commit_id) return request RESOURCE_MAP['application/vnd.reviewboard.org.diffs'] = DiffListResource class DiffResource(ItemResource): """The Diff resource specific base class. Provides the 'get_patch' method for retrieving the content of the actual diff file itself. """ @request_method_decorator def get_patch(self, **kwargs): """Retrieves the actual diff file contents.""" request = HttpRequest(self._url, query_args=kwargs) request.headers['Accept'] = 'text/x-patch' return request RESOURCE_MAP['application/vnd.reviewboard.org.diff'] = DiffResource class FileDiffResource(ItemResource): """The File Diff resource specific base class.""" @request_method_decorator def get_patch(self, **kwargs): """Retrieves the actual diff file contents.""" request = HttpRequest(self._url, query_args=kwargs) request.headers['Accept'] = 'text/x-patch' return request @request_method_decorator def get_diff_data(self, **kwargs): """Retrieves the actual raw diff data for the file.""" request = HttpRequest(self._url, query_args=kwargs) request.headers['Accept'] = \ 'application/vnd.reviewboard.org.diff.data+json' return request RESOURCE_MAP['application/vnd.reviewboard.org.file'] = FileDiffResource class FileAttachmentListResource(ListResource): """The File Attachment List resource specific base class.""" @request_method_decorator def upload_attachment(self, filename, content, caption=None, **kwargs): """Uploads a new attachment. The content argument should contain the body of the file to be uploaded, in string format. """ request = HttpRequest(self._url, method=b'POST', query_args=kwargs) request.add_file('path', filename, content) if caption: request.add_field('caption', caption) return request RESOURCE_MAP['application/vnd.reviewboard.org.file-attachments'] = \ FileAttachmentListResource RESOURCE_MAP['application/vnd.reviewboard.org.user-file-attachments'] = \ FileAttachmentListResource class DraftFileAttachmentListResource(FileAttachmentListResource): """The Draft File Attachment List resource specific base class.""" pass RESOURCE_MAP['application/vnd.reviewboard.org.draft-file-attachments'] = \ DraftFileAttachmentListResource class ScreenshotListResource(ListResource): """The Screenshot List resource specific base class.""" @request_method_decorator def upload_screenshot(self, filename, content, caption=None, **kwargs): """Uploads a new screenshot. The content argument should contain the body of the screenshot to be uploaded, in string format. """ request = HttpRequest(self._url, method=b'POST', query_args=kwargs) request.add_file('path', filename, content) if caption: request.add_field('caption', caption) return request RESOURCE_MAP['application/vnd.reviewboard.org.screenshots'] = \ ScreenshotListResource class DraftScreenshotListResource(ScreenshotListResource): """The Draft Screenshot List resource specific base class.""" pass RESOURCE_MAP['application/vnd.reviewboard.org.draft-screenshots'] = \ DraftScreenshotListResource class ReviewRequestResource(ItemResource): """The Review Request resource specific base class.""" @property def absolute_url(self): """Returns the absolute URL for the Review Request. The value of absolute_url is returned if it's defined. Otherwise the absolute URL is generated and returned. """ if 'absolute_url' in self._fields: return self._fields['absolute_url'] else: base_url = self._url.split('/api/')[0] return urljoin(base_url, self.url) @property def url(self): """Returns the relative URL to the Review Request. The value of 'url' is returned if it's defined. Otherwise, a relative URL is generated and returned. This provides compatibility with versions of Review Board older than 1.7.8, which do not have a 'url' field. """ return self._fields.get('url', '/r/%s/' % self.id) @request_method_decorator def submit(self, description=None, changenum=None): """Submit a review request""" data = { 'status': 'submitted', } if description: data['description'] = description if changenum: data['changenum'] = changenum return self.update(data=data, internal=True) @request_method_decorator def get_or_create_draft(self, **kwargs): request = self.get_draft(internal=True) request.method = b'POST' for name, value in six.iteritems(kwargs): request.add_field(name, value) return request RESOURCE_MAP['application/vnd.reviewboard.org.review-request'] = \ ReviewRequestResource class ValidateDiffResource(ItemResource): """The Validate Diff resource specific base class. Provides additional functionality to assist in the validation of diffs. """ @request_method_decorator def validate_diff(self, repository, diff, parent_diff=None, base_dir=None, base_commit_id=None, **kwargs): """Validates a diff. The diff and parent_diff arguments should be strings containing the diff output. """ # TODO: This method should be unified with upload_diff() method of # DiffListResource, since they both perform the same operation. request = HttpRequest(self._url, method=b'POST', query_args=kwargs) request.add_field('repository', repository) request.add_file('path', 'diff', diff) if parent_diff: request.add_file('parent_diff_path', 'parent_diff', parent_diff) if base_dir: request.add_field('basedir', base_dir) if base_commit_id: request.add_field('base_commit_id', base_commit_id) return request RESOURCE_MAP['application/vnd.reviewboard.org.diff-validation'] = \ ValidateDiffResource RBTools-0.7.11/rbtools/api/errors.py0000644000232200023220000000302113230242633017607 0ustar debalancedebalancefrom __future__ import unicode_literals import six class APIError(Exception): def __init__(self, http_status, error_code, rsp=None, *args, **kwargs): Exception.__init__(self, *args, **kwargs) self.http_status = http_status self.error_code = error_code self.rsp = rsp def __str__(self): code_str = 'HTTP %d' % self.http_status if self.error_code: code_str += ', API Error %d' % self.error_code if self.rsp and 'err' in self.rsp: return '%s (%s)' % (self.rsp['err']['msg'], code_str) else: return code_str class AuthorizationError(APIError): pass class BadRequestError(APIError): def __str__(self): lines = [super(BadRequestError, self).__str__()] if self.rsp and 'fields' in self.rsp: lines.append('') for field, error in six.iteritems(self.rsp['fields']): lines.append(' %s: %s' % (field, '; '.join(error))) return '\n'.join(lines) class CacheError(Exception): """An exception for caching errors.""" class ServerInterfaceError(Exception): def __init__(self, msg, *args, **kwargs): Exception.__init__(self, *args, **kwargs) self.msg = msg def __str__(self): return self.msg API_ERROR_TYPE = { 400: BadRequestError, 401: AuthorizationError, } def create_api_error(http_status, *args, **kwargs): error_type = API_ERROR_TYPE.get(http_status, APIError) return error_type(http_status, *args, **kwargs) RBTools-0.7.11/rbtools/api/capabilities.py0000644000232200023220000000142613230242633020733 0ustar debalancedebalancefrom __future__ import unicode_literals class Capabilities(object): """Stores and retrieves Review Board server capabilities.""" def __init__(self, capabilities): self.capabilities = capabilities def has_capability(self, *args): caps = self.capabilities try: for arg in args: caps = caps[arg] # If only part of a capability path is specified, we don't want # to evaluate to True just because it has contents. We want to # only say we have a capability if it is indeed 'True'. return caps is True except (TypeError, KeyError): # The server either doesn't support the capability, # or returned no capabilities at all. return False RBTools-0.7.11/rbtools/api/cache.py0000644000232200023220000005467113230242633017357 0ustar debalancedebalancefrom __future__ import print_function, unicode_literals import contextlib import datetime import json import locale import logging import os import sqlite3 import threading import six from six.moves.urllib.request import urlopen from rbtools.api.errors import CacheError from rbtools.utils.appdirs import user_cache_dir MINIMUM_VERSION = '2.0.14' # Minimum server version to enable the API cache. _locale_lock = threading.Lock() # Lock for getting / setting locale. class CacheEntry(object): """An entry in the API Cache.""" DATE_FORMAT = '%Y-%m-%dT%H:%M:%S' # ISO Date format def __init__(self, url, vary_headers, max_age, etag, local_date, last_modified, mime_type, item_mime_type, response_body): """Create a new cache entry.""" self.url = url self.vary_headers = vary_headers self.max_age = max_age self.etag = etag self.local_date = local_date self.last_modified = last_modified self.mime_type = mime_type self.item_mime_type = item_mime_type self.response_body = response_body def matches_request(self, request): """Determine if the cache entry matches the given request. This is done by comparing the value of the headers field to the headers in the request """ if self.vary_headers: for header, value in six.iteritems(self.vary_headers): if request.headers.get(header) != value: return False return True def up_to_date(self): """Determine if the cache entry is up to date.""" if self.max_age is not None: max_age = datetime.timedelta(seconds=self.max_age) return self.local_date + max_age > datetime.datetime.now() return True class HTTPResponse(object): """An uncached HTTP response that can be read() more than once. This is intended to be API-compatible with a urllib2 response object. This allows a response to be read more than once. """ def __init__(self, response): """Extract the data from a urllib2 HTTP response.""" self.headers = response.info() self.content = response.read() self.code = response.getcode() def info(self): """Get the headers associated with the response.""" return self.headers def read(self): """Get the content associated with the response.""" return self.content def getcode(self): """Get the associated HTTP response code.""" return self.code class CachedHTTPResponse(object): """A response returned from the APICache. This is intended to be API-compatible with a urllib2 response object. """ def __init__(self, cache_entry): """Create a new CachedResponse from the given CacheEntry.""" self.headers = { 'Content-Type': cache_entry.mime_type, 'Item-Content-Type': cache_entry.item_mime_type, } self.content = cache_entry.response_body def info(self): """Get the headers associated with the response.""" return self.headers def read(self): """Get the content associated with the response.""" return self.content def getcode(self): """Get the associated HTTP response code, which is always 200. This method returns 200 because it is pretending that it made a successful HTTP request. """ return 200 class APICache(object): """An API cache backed by a SQLite database.""" # The format for the Expires: header. Requires an English locale. EXPIRES_FORMAT = '%a, %d %b %Y %H:%M:%S %Z' DEFAULT_CACHE_DIR = user_cache_dir('rbtools') DEFAULT_CACHE_PATH = os.path.join(DEFAULT_CACHE_DIR, 'apicache.db') # The API Cache's schema version. If the schema is updated, update this # value. SCHEMA_VERSION = 2 def __init__(self, create_db_in_memory=False, db_location=None, urlopen=urlopen): """Create a new instance of the APICache If the db_path is provided, it will be used as the path to the SQLite database; otherwise, the default cache (in the CACHE_DIR) will be used. The urlopen parameter determines the method that is used to open URLs. """ self.urlopen = urlopen if create_db_in_memory: logging.debug('Creating API cache in memory.') self.db = sqlite3.connect(':memory:') self.cache_path = None self._create_schema() else: self.cache_path = db_location or self.DEFAULT_CACHE_PATH try: cache_exists = os.path.exists(self.cache_path) create_schema = True if not cache_exists: cache_dir = os.path.dirname(self.cache_path) if not os.path.exists(cache_dir): logging.debug('Cache directory "%s" does not exist; ' 'creating.', cache_dir) os.makedirs(cache_dir) logging.debug('API cache "%s" does not exist; creating.', self.cache_path) self.db = sqlite3.connect(self.cache_path) if cache_exists: try: with contextlib.closing(self.db.cursor()) as c: c.execute('SELECT version FROM cache_info') row = c.fetchone() if row and row[0] == self.SCHEMA_VERSION: create_schema = False except sqlite3.Error as e: self._die( 'Could not get the HTTP cache schema version', e) if create_schema: self._create_schema() except (OSError, sqlite3.Error): # OSError will be thrown if we cannot create the directory or # file for the API cache. sqlite3.Error will be thrown if # connect fails. In either case, HTTP requests can still be # made, they will just passed through to the URL opener without # attempting to interact with the API cache. logging.warn('Could not create or access API cache "%s". Try ' 'running "rbt clear-cache" to clear the HTTP ' 'cache for the API.', self.cache_path) if self.db is not None: self.db.row_factory = APICache._row_factory def make_request(self, request): """Perform the specified request. If there is an up-to-date cached entry in our store, a CachedResponse will be returned. Otherwise, The urlopen method will be used to execute the request and a CachedResponse (if our entry is still up to date) or a Response (if it is not) will be returned. """ if self.db is None or request.method != 'GET': # We can only cache HTTP GET requests and only if we were able to # access the API cache database. return self.urlopen(request) entry = self._get_entry(request) if entry: if entry.up_to_date(): logging.debug('Cached response for HTTP GET %s up to date', request.get_full_url()) response = CachedHTTPResponse(entry) else: if entry.etag: request.add_header(b'If-None-Match', entry.etag) if entry.last_modified: request.add_header(b'If-Modified-Since', entry.last_modified) response = HTTPResponse(self.urlopen(request)) if response.getcode() == 304: logging.debug('Cached response for HTTP GET %s expired ' 'and was not modified', request.get_full_url()) entry.local_date = datetime.datetime.now() self._save_entry(entry) response = CachedHTTPResponse(entry) elif 200 <= response.getcode() < 300: logging.debug('Cached response for HTTP GET %s expired ' 'and was modified', request.get_full_url()) response_headers = response.info() cache_info = self._get_caching_info(request.headers, response_headers) if cache_info: entry.max_age = cache_info['max_age'] entry.etag = cache_info['etag'] entry.local_date = datetime.datetime.now() entry.last_modified = cache_info['last_modified'] entry.mime_type = response_headers['Content-Type'] entry.item_mime_type = \ response_headers.get('Item-Content-Type') entry.response_body = response.read() if entry.vary_headers != cache_info['vary_headers']: # The Vary: header has changed since the last time # we retrieved the resource so we need to remove # the old cache entry and save the new one. self._delete_entry(entry) entry.vary_headers = cache_info['vary_headers'] self._save_entry(entry) else: # This resource is no longer cache-able so we should # delete our cached version. logging.debug('Cached response for HTTP GET request ' 'to %s is no longer cacheable', request.get_full_url()) self._delete_entry(entry) else: response = HTTPResponse(self.urlopen(request)) response_headers = response.info() cache_info = self._get_caching_info(request.headers, response_headers) if cache_info: self._save_entry(CacheEntry( request.get_full_url(), cache_info['vary_headers'], cache_info['max_age'], cache_info['etag'], datetime.datetime.now(), cache_info['last_modified'], response_headers.get('Content-Type'), response_headers.get('Item-Content-Type'), response.read())) logging.debug('Added cache entry for HTTP GET request to %s', request.get_full_url()) else: logging.debug('HTTP GET request to %s cannot be cached', request.get_full_url()) return response def _get_caching_info(self, request_headers, response_headers): """Get the caching info for the response to the given request. A dictionary with caching information is returned, or None if the response cannot be cached. """ max_age = None no_cache = False expires = response_headers.get('Expires') if expires: # We switch to the C locale to parse the 'Expires' header because # the formatting specifiers are locale specific and the header # *must* be provided in English. After parsing the header, we # restore the locale to the user's previous locale. # # We also note that changing the locale is not thread-safe so we # use a lock around this. with _locale_lock: old_locale = locale.setlocale(locale.LC_TIME) try: # 'setlocale' requires the second parameter to be a 'str' # in both Python 2.x and Python 3+. locale.setlocale(locale.LC_TIME, str('C')) expires = datetime.datetime.strptime(expires, self.EXPIRES_FORMAT) # We assign to max_age because the value of max-age in the # Cache-Control header overrides the behaviour of the # 'Expires' header. now = datetime.datetime.now() if expires < now: max_age = 0 else: max_age = (expires - now).seconds except ValueError: logging.error('The format of the "Expires" header (value ' '%s) does not match the expected format.', expires) except locale.Error: logging.error('The C locale is unavailable on this ' 'system. The "Expires" header cannot be ' 'parsed.') finally: locale.setlocale(locale.LC_TIME, old_locale) # The value of the Cache-Control header is a list of comma separated # values. We only care about some of them, notably max-age, no-cache, # no-store, and must-revalidate. The other values are only applicable # to intermediaries. for kvp in self._split_csv(response_headers.get('Cache-Control', '')): if kvp.startswith('max-age'): max_age = int(kvp.split('=')[1].strip()) elif kvp.startswith('no-cache'): # The no-cache specifier optionally has an associated header # that we shouldn't cache. However, the *only* headers we are # caching are headers that describe the the cached content: # Content-Type, and Item-Content-Type. no_cache = True elif kvp == 'no-store': # If no-store is specified, we cannot cache anything about this # resource. return None elif kvp == 'must-revalidate': # We treat must-revalidate identical to no-cache because we are # not an intermediary. no_cache = True # The Pragma: header is an obsolete header that may contain the value # no-cache, which is equivalent to Cache-Control: no-cache. We check # for it for posterity's sake. if 'no-cache' in response_headers.get('Pragma', ''): no_cache = True etag = response_headers.get('ETag') last_modified = response_headers.get('Last-Modified') vary_headers = response_headers.get('Vary') # The Vary header specifies a list of headers that *may* alter the # returned response. The cached response can only be used when these # headers have the same value as those provided in the request. if vary_headers: vary_headers = dict( (header, request_headers.get(header)) for header in self._split_csv(vary_headers) ) else: vary_headers = {} if no_cache: # If no-cache is specified, the resource must always be requested, # so we will treat this as if the max_age is zero. max_age = 0 if no_cache and not etag and not last_modified: # We have no information with which to provide the server to check # if our content is up to date. Therefore, the information cannot # be cached. return None return { 'max_age': max_age, 'etag': etag, 'last_modified': last_modified, 'vary_headers': vary_headers } def _create_schema(self): """Create the schema for the API cache database.""" try: with contextlib.closing(self.db.cursor()) as c: c.execute('DROP TABLE IF EXISTS api_cache') c.execute('DROP TABLE IF EXISTS cache_info') c.execute('''CREATE TABLE api_cache( url TEXT, vary_headers TEXT, max_age INTEGER, etag TEXT, local_date TEXT, last_modified TEXT, mime_type TEXT, item_mime_type TEXT, response_body BLOB, PRIMARY KEY(url, vary_headers) )''') c.execute('CREATE TABLE cache_info(version INTEGER)') c.execute('INSERT INTO cache_info(version) VALUES(?)', (self.SCHEMA_VERSION,)) self._write_db() except sqlite3.Error as e: self._die('Could not create database schema for the HTTP cache', e) def _get_entry(self, request): """Find an entry in the API cache store that matches the request. If no such cache entry exists, this returns None. """ url = request.get_full_url() try: with contextlib.closing(self.db.cursor()) as c: for row in c.execute('SELECT * FROM api_cache WHERE url=?', (url,)): if row.matches_request(request): return row except sqlite3.Error as e: self._die('Could not retrieve an entry from the HTTP cache', e) return None def _save_entry(self, entry): """Save the entry into the store. If the entry already exists in the store, do an UPDATE; otherwise do an INSERT. This does not commit to the database. """ vary_headers = json.dumps(entry.vary_headers) local_date = entry.local_date.strftime(entry.DATE_FORMAT) try: with contextlib.closing(self.db.cursor()) as c: try: c.execute('''INSERT INTO api_cache (url, vary_headers, max_age, etag, local_date, last_modified, mime_type, item_mime_type, response_body) VALUES(?,?,?,?,?,?,?,?,?)''', (entry.url, vary_headers, entry.max_age, entry.etag, local_date, entry.last_modified, entry.mime_type, entry.item_mime_type, sqlite3.Binary(entry.response_body))) except sqlite3.IntegrityError: c.execute('''UPDATE api_cache SET max_age=?, etag=?, local_date=?, last_modified=?, mime_type=?, item_mime_type=?, response_body=? WHERE url=? AND vary_headers=?''', (entry.max_age, entry.etag, local_date, entry.last_modified, entry.mime_type, entry.item_mime_type, sqlite3.Binary(entry.response_body), entry.url, vary_headers)) self._write_db() except sqlite3.Error as e: self._die('Could not write entry to the HTTP cache for the API', e) def _delete_entry(self, entry): """Remove the entry from the store.""" try: with contextlib.closing(self.db.cursor()) as c: c.execute( 'DELETE FROM api_cache WHERE URL=? AND vary_headers=?', (entry.url, json.dumps(entry.vary_headers))) self._write_db() except sqlite3.Error as e: self._die('Could not delete entry from the HTTP cache for the API', e) @staticmethod def _row_factory(cursor, row): """A factory for creating individual Cache Entries from db rows.""" return CacheEntry( url=row[0], vary_headers=json.loads(row[1]), max_age=row[2], etag=row[3], local_date=datetime.datetime.strptime(row[4], CacheEntry.DATE_FORMAT), last_modified=row[5], mime_type=row[6], item_mime_type=row[7], response_body=six.binary_type(row[8]), ) def _write_db(self): """Flush the contents of the DB to the disk.""" if self.db: try: self.db.commit() except sqlite3.Error as e: self._die('Could not write database to disk', e) def _die(self, message, inner_exception): """Build an appropriate CacheError and raise it.""" message = '%s: %s.' % (message, inner_exception) if self.cache_path: if self.cache_path == APICache.DEFAULT_CACHE_PATH: cache_args = '' else: cache_args = ' --cache-location %s' % self.cache_path message += (' Try running "rbt clear-cache%s" to manually clear ' 'the HTTP Cache for the API.' % cache_args) raise CacheError(message) def _split_csv(self, csvline): """Split a line of comma-separated values into a list.""" return [ s.strip() for s in csvline.split(',') ] def clear_cache(cache_path=APICache.DEFAULT_CACHE_PATH): """Delete the HTTP cache used for the API.""" try: os.unlink(cache_path) print("Cleared cache in '%s'" % cache_path) except Exception as e: logging.error('Could not clear cache in "%s": %s. Try manually ' 'removing it if it exists.', cache_path, e) RBTools-0.7.11/rbtools/__init__.py0000644000232200023220000000441613230242633017272 0ustar debalancedebalance# # __init__.py -- Basic version and package information # # Copyright (c) 2007-2009 Christian Hammond # Copyright (c) 2007-2009 David Trowbridge # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # from __future__ import unicode_literals # The version of RBTools # # This is in the format of: # # (Major, Minor, Micro, Patch, alpha/beta/rc/final, Release Number, Released) # VERSION = (0, 7, 11, 0, 'final', 0, True) def get_version_string(): version = '%s.%s' % (VERSION[0], VERSION[1]) if VERSION[2] or VERSION[3]: version += '.%s' % VERSION[2] if VERSION[3]: version += '.%s' % VERSION[3] if VERSION[4] != 'final': if VERSION[4] == 'rc': version += ' RC%s' % VERSION[5] else: version += ' %s %s' % (VERSION[4], VERSION[5]) if not is_release(): version += ' (dev)' return version def get_package_version(): version = '%s.%s' % (VERSION[0], VERSION[1]) if VERSION[2] or VERSION[3]: version += '.%s' % VERSION[2] if VERSION[3]: version += '.%s' % VERSION[3] if VERSION[4] != 'final': version += '%s%s' % (VERSION[4], VERSION[5]) return version def is_release(): return VERSION[6] __version_info__ = VERSION[:-1] __version__ = get_package_version() RBTools-0.7.11/rbtools/utils/0000755000232200023220000000000013230242636016317 5ustar debalancedebalanceRBTools-0.7.11/rbtools/utils/checks.py0000644000232200023220000000454513230242633020136 0ustar debalancedebalancefrom __future__ import unicode_literals import os import subprocess import sys from rbtools.utils.process import execute GNU_DIFF_WIN32_URL = 'http://gnuwin32.sourceforge.net/packages/diffutils.htm' def check_install(command): """Check if the given command is installed. Try executing an external command and return a boolean indicating whether that command is installed or not. The 'command' argument should be something that executes quickly, without hitting the network (for instance, 'svn help' or 'git --version'). """ try: subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return True except (OSError, ValueError): # We catch ValueError exceptions here to work around bug in the # version of Python that ships with OS X 10.11. I don't know if the # logic is 100% reliable but if we get a ValueError here, it typically # means the command we are trying to run doesn't exist. See # http://bugs.python.org/issue26083 return False def check_gnu_diff(): """Checks if GNU diff is installed, and informs the user if it's not.""" has_gnu_diff = False try: if hasattr(os, 'uname') and os.uname()[0] == 'SunOS': diff_cmd = 'gdiff' else: diff_cmd = 'diff' result = execute([diff_cmd, '--version'], ignore_errors=True) has_gnu_diff = 'GNU diffutils' in result except OSError: pass if not has_gnu_diff: error = ('GNU diff is required in order to generate diffs. ' 'Make sure it is installed and in the path.\n') if os.name == 'nt': error += ('On Windows, you can install this from %s\n' % GNU_DIFF_WIN32_URL) raise Exception(error) def is_valid_version(actual, expected): """ Takes two tuples, both in the form: (major_version, minor_version, micro_version) Returns true if the actual version is greater than or equal to the expected version, and false otherwise. """ return ((actual[0] > expected[0]) or (actual[0] == expected[0] and actual[1] > expected[1]) or (actual[0] == expected[0] and actual[1] == expected[1] and actual[2] >= expected[2])) RBTools-0.7.11/rbtools/utils/console.py0000644000232200023220000000245313230242633020334 0ustar debalancedebalancefrom __future__ import print_function, unicode_literals import os import subprocess from distutils.util import strtobool from six.moves import input from rbtools.utils.filesystem import make_tempfile def confirm(question): """Interactively prompt for a Yes/No answer. Accepted values (case-insensitive) depend on distutils.util.strtobool(): 'Yes' values: y, yes, t, true, on, 1 'No' values: n, no , f, false, off, 0 """ while True: full_question = '%s [Yes/No]: ' % question answer = input(full_question.encode('utf-8')).lower() try: return strtobool(answer) except ValueError: print('%s is not a valid answer.' % answer) def edit_text(content): """Allows a user to edit a block of text and returns the saved result. The environment's default text editor is used if available, otherwise vi is used. """ tempfile = make_tempfile(content.encode('utf8')) editor = os.environ.get('VISUAL') or os.environ.get('EDITOR') or 'vi' try: subprocess.call(editor.split() + [tempfile]) except OSError: print('No editor found. Set EDITOR environment variable or install ' 'vi.') raise f = open(tempfile) result = f.read() f.close() return result.decode('utf8') RBTools-0.7.11/rbtools/utils/match_score.py0000644000232200023220000000257713230242633021170 0ustar debalancedebalancefrom __future__ import unicode_literals from difflib import SequenceMatcher class Score(object): """Encapsulates ranking information for matching existing requests. This is currently used with 'rbt post -u' to match the new change with existing review requests. The 'get_match' method will return a new Score, and then multiple scores can be ranked against each other.""" EXACT_MATCH_SCORE = 1.0 def __init__(self, summary_score, description_score): self.summary_score = summary_score self.description_score = description_score def is_exact_match(self): return (self.summary_score == self.EXACT_MATCH_SCORE and self.description_score == self.EXACT_MATCH_SCORE) @staticmethod def get_match(summary_pair, description_pair): """Get a score based on a pair of summaries and a pair of descriptions. The scores for summary and description pairs are calculated independently using SequenceMatcher, and returned as part of a Score object. """ if not summary_pair or not description_pair: return None summary_score = SequenceMatcher( None, summary_pair[0], summary_pair[1]).ratio() description_score = SequenceMatcher( None, description_pair[0], description_pair[1]).ratio() return Score(summary_score, description_score) RBTools-0.7.11/rbtools/utils/appdirs.py0000755000232200023220000005354613230242633020350 0ustar debalancedebalance#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2005-2010 ActiveState Software Inc. # Copyright (c) 2013 Eddy Petrișor """Utilities for determining application-specific dirs. See for details and usage. """ # Dev Notes: # - MSDN on where to store app data files: # http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120 # - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html # - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html __version_info__ = (1, 4, 0) __version__ = '.'.join(map(str, __version_info__)) import sys import os PY3 = sys.version_info[0] == 3 if PY3: unicode = str if sys.platform.startswith('java'): import platform os_name = platform.java_ver()[3][0] if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc. system = 'win32' elif os_name.startswith('Mac'): # "Mac OS X", etc. system = 'darwin' else: # "Linux", "SunOS", "FreeBSD", etc. # Setting this to "linux2" is not ideal, but only Windows or Mac # are actually checked for and the rest of the module expects # *sys.platform* style strings. system = 'linux2' else: system = sys.platform def user_data_dir(appname=None, appauthor=None, version=None, roaming=False): r"""Return full path to the user-specific data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See for a discussion of issues. Typical user data directories are: Mac OS X: ~/Library/Application Support/ Unix: ~/.local/share/ # or in $XDG_DATA_HOME, if defined Win XP (not roaming): C:\Documents and Settings\\Application Data\\ Win XP (roaming): C:\Documents and Settings\\Local Settings\Application Data\\ Win 7 (not roaming): C:\Users\\AppData\Local\\ Win 7 (roaming): C:\Users\\AppData\Roaming\\ For Unix, we follow the XDG spec and support $XDG_DATA_HOME. That means, by default "~/.local/share/". """ if system == "win32": if appauthor is None: appauthor = appname const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" path = os.path.normpath(_get_win_folder(const)) if appname: if appauthor is not False: path = os.path.join(path, appauthor, appname) else: path = os.path.join(path, appname) elif system == 'darwin': path = os.path.expanduser('~/Library/Application Support/') if appname: path = os.path.join(path, appname) else: path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share")) if appname: path = os.path.join(path, appname) if appname and version: path = os.path.join(path, version) return path def site_data_dir(appname=None, appauthor=None, version=None, multipath=False): """Return full path to the user-shared data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "multipath" is an optional parameter only applicable to *nix which indicates that the entire list of data dirs should be returned. By default, the first item from XDG_DATA_DIRS is returned, or '/usr/local/share/', if XDG_DATA_DIRS is not set Typical user data directories are: Mac OS X: /Library/Application Support/ Unix: /usr/local/share/ or /usr/share/ Win XP: C:\Documents and Settings\All Users\Application Data\\ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) Win 7: C:\ProgramData\\ # Hidden, but writeable on Win 7. For Unix, this is using the $XDG_DATA_DIRS[0] default. WARNING: Do not use this on Windows. See the Vista-Fail note above for why. """ if system == "win32": if appauthor is None: appauthor = appname path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) if appname: if appauthor is not False: path = os.path.join(path, appauthor, appname) else: path = os.path.join(path, appname) elif system == 'darwin': path = os.path.expanduser('/Library/Application Support') if appname: path = os.path.join(path, appname) else: # XDG default for $XDG_DATA_DIRS # only first, if multipath is False path = os.getenv('XDG_DATA_DIRS', os.pathsep.join(['/usr/local/share', '/usr/share'])) pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] if appname: if version: appname = os.path.join(appname, version) pathlist = [os.sep.join([x, appname]) for x in pathlist] if multipath: path = os.pathsep.join(pathlist) else: path = pathlist[0] return path if appname and version: path = os.path.join(path, version) return path def user_config_dir(appname=None, appauthor=None, version=None, roaming=False): r"""Return full path to the user-specific config dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See for a discussion of issues. Typical user data directories are: Mac OS X: same as user_data_dir Unix: ~/.config/ # or in $XDG_CONFIG_HOME, if defined Win *: same as user_data_dir For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. That means, by deafult "~/.config/". """ if system in ["win32", "darwin"]: path = user_data_dir(appname, appauthor, None, roaming) else: path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")) if appname: path = os.path.join(path, appname) if appname and version: path = os.path.join(path, version) return path def site_config_dir(appname=None, appauthor=None, version=None, multipath=False): """Return full path to the user-shared data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "multipath" is an optional parameter only applicable to *nix which indicates that the entire list of config dirs should be returned. By default, the first item from XDG_CONFIG_DIRS is returned, or '/etc/xdg/', if XDG_CONFIG_DIRS is not set Typical user data directories are: Mac OS X: same as site_data_dir Unix: /etc/xdg/ or $XDG_CONFIG_DIRS[i]/ for each value in $XDG_CONFIG_DIRS Win *: same as site_data_dir Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False WARNING: Do not use this on Windows. See the Vista-Fail note above for why. """ if system in ["win32", "darwin"]: path = site_data_dir(appname, appauthor) if appname and version: path = os.path.join(path, version) else: # XDG default for $XDG_CONFIG_DIRS # only first, if multipath is False path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] if appname: if version: appname = os.path.join(appname, version) pathlist = [os.sep.join([x, appname]) for x in pathlist] if multipath: path = os.pathsep.join(pathlist) else: path = pathlist[0] return path def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True): r"""Return full path to the user-specific cache dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "opinion" (boolean) can be False to disable the appending of "Cache" to the base app data dir for Windows. See discussion below. Typical user cache directories are: Mac OS X: ~/Library/Caches/ Unix: ~/.cache/ (XDG default) Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Cache Vista: C:\Users\\AppData\Local\\\Cache On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming app data dir (the default returned by `user_data_dir` above). Apps typically put cache data somewhere *under* the given dir here. Some examples: ...\Mozilla\Firefox\Profiles\\Cache ...\Acme\SuperApp\Cache\1.0 OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. This can be disabled with the `opinion=False` option. """ if system == "win32": if appauthor is None: appauthor = appname path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) if appname: if appauthor is not False: path = os.path.join(path, appauthor, appname) else: path = os.path.join(path, appname) if opinion: path = os.path.join(path, "Cache") elif system == 'darwin': path = os.path.expanduser('~/Library/Caches') if appname: path = os.path.join(path, appname) else: path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) if appname: path = os.path.join(path, appname) if appname and version: path = os.path.join(path, version) return path def user_log_dir(appname=None, appauthor=None, version=None, opinion=True): r"""Return full path to the user-specific log dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be ".". Only applied when appname is present. "opinion" (boolean) can be False to disable the appending of "Logs" to the base app data dir for Windows, and "log" to the base cache dir for Unix. See discussion below. Typical user cache directories are: Mac OS X: ~/Library/Logs/ Unix: ~/.cache//log # or under $XDG_CACHE_HOME if defined Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Logs Vista: C:\Users\\AppData\Local\\\Logs On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in examples of what some windows apps use for a logs dir.) OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA` value for Windows and appends "log" to the user cache dir for Unix. This can be disabled with the `opinion=False` option. """ if system == "darwin": path = os.path.join( os.path.expanduser('~/Library/Logs'), appname) elif system == "win32": path = user_data_dir(appname, appauthor, version) version = False if opinion: path = os.path.join(path, "Logs") else: path = user_cache_dir(appname, appauthor, version) version = False if opinion: path = os.path.join(path, "log") if appname and version: path = os.path.join(path, version) return path class AppDirs(object): """Convenience wrapper for getting application dirs.""" def __init__(self, appname, appauthor=None, version=None, roaming=False, multipath=False): self.appname = appname self.appauthor = appauthor self.version = version self.roaming = roaming self.multipath = multipath @property def user_data_dir(self): return user_data_dir(self.appname, self.appauthor, version=self.version, roaming=self.roaming) @property def site_data_dir(self): return site_data_dir(self.appname, self.appauthor, version=self.version, multipath=self.multipath) @property def user_config_dir(self): return user_config_dir(self.appname, self.appauthor, version=self.version, roaming=self.roaming) @property def site_config_dir(self): return site_config_dir(self.appname, self.appauthor, version=self.version, multipath=self.multipath) @property def user_cache_dir(self): return user_cache_dir(self.appname, self.appauthor, version=self.version) @property def user_log_dir(self): return user_log_dir(self.appname, self.appauthor, version=self.version) #---- internal support stuff def _get_win_folder_from_registry(csidl_name): """This is a fallback technique at best. I'm not sure if using the registry for this guarantees us the correct answer for all CSIDL_* names. """ import _winreg shell_folder_name = { "CSIDL_APPDATA": "AppData", "CSIDL_COMMON_APPDATA": "Common AppData", "CSIDL_LOCAL_APPDATA": "Local AppData", }[csidl_name] key = _winreg.OpenKey( _winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" ) dir, type = _winreg.QueryValueEx(key, shell_folder_name) return dir def _get_win_folder_with_pywin32(csidl_name): from win32com.shell import shellcon, shell dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0) # Try to make this a unicode path because SHGetFolderPath does # not return unicode strings when there is unicode data in the # path. try: dir = unicode(dir) # Downgrade to short path name if have highbit chars. See # . has_high_char = False for c in dir: if ord(c) > 255: has_high_char = True break if has_high_char: try: import win32api dir = win32api.GetShortPathName(dir) except ImportError: pass except UnicodeError: pass return dir def _get_win_folder_with_ctypes(csidl_name): import ctypes csidl_const = { "CSIDL_APPDATA": 26, "CSIDL_COMMON_APPDATA": 35, "CSIDL_LOCAL_APPDATA": 28, }[csidl_name] buf = ctypes.create_unicode_buffer(1024) ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) # Downgrade to short path name if have highbit chars. See # . has_high_char = False for c in buf: if ord(c) > 255: has_high_char = True break if has_high_char: buf2 = ctypes.create_unicode_buffer(1024) if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): buf = buf2 return buf.value def _get_win_folder_with_jna(csidl_name): import array from com.sun import jna from com.sun.jna.platform import win32 buf_size = win32.WinDef.MAX_PATH * 2 buf = array.zeros('c', buf_size) shell = win32.Shell32.INSTANCE shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf) dir = jna.Native.toString(buf.tostring()).rstrip("\0") # Downgrade to short path name if have highbit chars. See # . has_high_char = False for c in dir: if ord(c) > 255: has_high_char = True break if has_high_char: buf = array.zeros('c', buf_size) kernel = win32.Kernel32.INSTANCE if kernel.GetShortPathName(dir, buf, buf_size): dir = jna.Native.toString(buf.tostring()).rstrip("\0") return dir if system == "win32": try: import win32com.shell _get_win_folder = _get_win_folder_with_pywin32 except ImportError: try: from ctypes import windll _get_win_folder = _get_win_folder_with_ctypes except ImportError: try: import com.sun.jna _get_win_folder = _get_win_folder_with_jna except ImportError: _get_win_folder = _get_win_folder_from_registry #---- self test code if __name__ == "__main__": appname = "MyApp" appauthor = "MyCompany" props = ("user_data_dir", "site_data_dir", "user_config_dir", "site_config_dir", "user_cache_dir", "user_log_dir") print("-- app dirs (with optional 'version')") dirs = AppDirs(appname, appauthor, version="1.0") for prop in props: print("%s: %s" % (prop, getattr(dirs, prop))) print("\n-- app dirs (without optional 'version')") dirs = AppDirs(appname, appauthor) for prop in props: print("%s: %s" % (prop, getattr(dirs, prop))) print("\n-- app dirs (without optional 'appauthor')") dirs = AppDirs(appname) for prop in props: print("%s: %s" % (prop, getattr(dirs, prop))) print("\n-- app dirs (with disabled 'appauthor')") dirs = AppDirs(appname, appauthor=False) for prop in props: print("%s: %s" % (prop, getattr(dirs, prop))) RBTools-0.7.11/rbtools/utils/testbase.py0000644000232200023220000000507013230242633020502 0ustar debalancedebalancefrom __future__ import unicode_literals import os import sys import uuid from six.moves import cStringIO as StringIO from rbtools.utils.filesystem import cleanup_tempfiles, make_tempdir from rbtools.testing import TestCase class RBTestBase(TestCase): """Base class for RBTools tests. Its side effect in that it change home directory before test suit will run. This is because RBTools actively works with files and almost all tests employ file I/O operations.""" def setUp(self): self._old_cwd = os.getcwd() self.set_user_home_tmp() def tearDown(self): os.chdir(self._old_cwd) cleanup_tempfiles() def create_tmp_dir(self): """Creates and returns a temporary directory.""" return make_tempdir() def chdir_tmp(self, dir=None): """Changes current directory to a temporary directory.""" dirname = make_tempdir(parent=dir) os.chdir(dirname) return dirname def gen_uuid(self): """Generates UUID value which can be useful where some unique value is required.""" return str(uuid.uuid4()) def get_user_home(self): """Returns current user's home directory.""" return os.environ['HOME'] def is_exe_in_path(self, name): """Checks whether an executable is in the user's search path. This expects a name without any system-specific executable extension. It will append the proper extension as necessary. For example, use "myapp" and not "myapp.exe". This will return True if the app is in the path, or False otherwise. Taken from djblets.util.filesystem to avoid an extra dependency """ if sys.platform == 'win32' and not name.endswith('.exe'): name += '.exe' for dir in os.environ['PATH'].split(os.pathsep): if os.path.exists(os.path.join(dir, name)): return True return False def reset_cl_args(self, values=[]): """Replaces command-line arguments with new ones. Useful for testing program's command-line options. """ sys.argv = values def set_user_home(self, path): """Set home directory of current user.""" os.environ['HOME'] = path def set_user_home_tmp(self): """Set temporary directory as current user's home.""" self.set_user_home(make_tempdir()) def catch_output(self, func): stdout = sys.stdout outbuf = StringIO() sys.stdout = outbuf func() sys.stdout = stdout return outbuf.getvalue() RBTools-0.7.11/rbtools/utils/__init__.py0000644000232200023220000000000013230242633020413 0ustar debalancedebalanceRBTools-0.7.11/rbtools/utils/filesystem.py0000644000232200023220000001217713230242633021062 0ustar debalancedebalancefrom __future__ import unicode_literals import logging import os import shutil import tempfile CONFIG_FILE = '.reviewboardrc' tempfiles = [] tempdirs = [] builtin = {} def cleanup_tempfiles(): for tmpfile in tempfiles: try: os.unlink(tmpfile) except: pass for tmpdir in tempdirs: shutil.rmtree(tmpdir, ignore_errors=True) def _load_python_file(filename, config): with open(filename) as f: exec(compile(f.read(), filename, 'exec'), config) return config def make_tempfile(content=None): """Creates a temporary file and returns the path. The path is stored in an array for later cleanup. """ fd, tmpfile = tempfile.mkstemp() if content: os.write(fd, content) os.close(fd) tempfiles.append(tmpfile) return tmpfile def make_tempdir(parent=None): """Creates a temporary directory and returns the path. The path is stored in an array for later cleanup. """ tmpdir = tempfile.mkdtemp(dir=parent) tempdirs.append(tmpdir) return tmpdir def make_empty_files(files): """Creates each file in the given list and any intermediate directories.""" for f in files: path = os.path.dirname(f) if path and not os.path.exists(path): try: os.makedirs(path) except OSError as e: logging.error('Unable to create directory %s: %s', path, e) continue try: with open(f, 'w'): # Set the file access and modified times to the current time. os.utime(f, None) except IOError as e: logging.error('Unable to create empty file %s: %s', f, e) def walk_parents(path): """Walks up the tree to the root directory.""" while os.path.splitdrive(path)[1] != os.sep: yield path path = os.path.dirname(path) def get_home_path(): """Retrieve the homepath.""" if 'HOME' in os.environ: return os.environ['HOME'] elif 'APPDATA' in os.environ: return os.environ['APPDATA'] else: return '' def get_config_paths(): """Return the paths to each :file:`.reviewboardrc` influencing the cwd. A list of paths to :file:`.reviewboardrc` files will be returned, where each subsequent list entry should have lower precedence than the previous. i.e. configuration found in files further up the list will take precedence. Configuration in the paths set in :envvar:`$RBTOOLS_CONFIG_PATH` will take precedence over files found in the current working directory or its parents. """ config_paths = [] # Apply config files from $RBTOOLS_CONFIG_PATH first, ... for path in os.environ.get('RBTOOLS_CONFIG_PATH', '').split(os.pathsep): # Filter out empty paths, this also takes care of if # $RBTOOLS_CONFIG_PATH is unset or empty. if not path: continue filename = os.path.realpath(os.path.join(path, CONFIG_FILE)) if (os.path.exists(filename) and filename not in config_paths): config_paths.append(filename) # ... then config files from the current or parent directories. for path in walk_parents(os.getcwd()): filename = os.path.realpath(os.path.join(path, CONFIG_FILE)) if (os.path.exists(filename) and filename not in config_paths): config_paths.append(filename) # Finally, the user's own config file. home_config_path = os.path.realpath(os.path.join(get_home_path(), CONFIG_FILE)) if (os.path.exists(home_config_path) and home_config_path not in config_paths): config_paths.append(home_config_path) return config_paths def parse_config_file(filename): """Parse a .reviewboardrc file. Returns a dictionary containing the configuration from the file. The ``filename`` argument should contain a full path to a .reviewboardrc file. """ config = { 'TREES': {}, 'ALIASES': {}, } try: config = _load_python_file(filename, config) except SyntaxError as e: raise Exception('Syntax error in config file: %s\n' 'Line %i offset %i\n' % (filename, e.lineno, e.offset)) return dict((k, config[k]) for k in set(config.keys()) - set(builtin.keys())) def load_config(): """Load configuration from .reviewboardrc files. This will read all of the .reviewboardrc files influencing the cwd and return a dictionary containing the configuration. """ config = {} trees = {} aliases = {} for filename in reversed(get_config_paths()): parsed_config = parse_config_file(filename) trees.update(parsed_config.pop('TREES')) aliases.update(parsed_config.pop('ALIASES')) config.update(parsed_config) config['TREES'] = trees config['ALIASES'] = aliases return config # This extracts a dictionary of the built-in globals in order to have a clean # dictionary of settings, consisting of only what has been specified in the # config file. exec('True', builtin) RBTools-0.7.11/rbtools/utils/tests.py0000644000232200023220000001141513230242633020032 0ustar debalancedebalancefrom __future__ import unicode_literals import os import re import shutil import sys from rbtools.utils import aliases, checks, filesystem, process from rbtools.utils.testbase import RBTestBase class UtilitiesTest(RBTestBase): """Tests for rbtools.api units. Any new modules created under rbtools/api should be tested here. """ def test_check_install(self): """Testing 'check_install' method.""" self.assertTrue(checks.check_install([sys.executable, ' --version'])) self.assertFalse(checks.check_install([self.gen_uuid()])) def test_make_tempfile(self): """Testing 'make_tempfile' method.""" fname = filesystem.make_tempfile() self.assertTrue(os.path.isfile(fname)) self.assertEqual(os.stat(fname).st_uid, os.geteuid()) self.assertTrue(os.access(fname, os.R_OK | os.W_OK)) def test_make_empty_files(self): """Testing 'make_empty_files' method.""" # Use make_tempdir to get a unique directory name tmpdir = filesystem.make_tempdir() self.assertTrue(os.path.isdir(tmpdir)) filesystem.cleanup_tempfiles() fname = os.path.join(tmpdir, 'file') filesystem.make_empty_files([fname]) self.assertTrue(os.path.isdir(tmpdir)) self.assertTrue(os.path.isfile(fname)) self.assertEqual(os.stat(fname).st_uid, os.geteuid()) self.assertTrue(os.access(fname, os.R_OK | os.W_OK)) shutil.rmtree(tmpdir, ignore_errors=True) def test_execute(self): """Testing 'execute' method.""" self.assertTrue(re.match('.*?%d.%d.%d' % sys.version_info[:3], process.execute([sys.executable, '-V']))) def test_is_valid_version(self): """Testing 'is_valid_version' method.""" self.assertTrue(checks.is_valid_version((1, 0, 0), (1, 0, 0))) self.assertTrue(checks.is_valid_version((1, 1, 0), (1, 0, 0))) self.assertTrue(checks.is_valid_version((1, 0, 1), (1, 0, 0))) self.assertTrue(checks.is_valid_version((1, 1, 0), (1, 1, 0))) self.assertTrue(checks.is_valid_version((1, 1, 1), (1, 1, 0))) self.assertTrue(checks.is_valid_version((1, 1, 1), (1, 1, 1))) self.assertFalse(checks.is_valid_version((0, 9, 9), (1, 0, 0))) self.assertFalse(checks.is_valid_version((1, 0, 9), (1, 1, 0))) self.assertFalse(checks.is_valid_version((1, 1, 0), (1, 1, 1))) class AliasTest(RBTestBase): """Tests for parameter substitution in rbtools aliases.""" def _replace_arguments(self, cmd, args): """Convenience method to return a list instead of generator. This allows us to compare with self.assertEqual to another list. """ return list(aliases.replace_arguments(cmd, args)) def test_alias_substitution_basic(self): """Testing variable substitution in rbtools aliases""" self.assertEqual(self._replace_arguments('$1', ['HEAD']), ['HEAD']) def test_alias_subtitution_multiple(self): """Testing variable substitution where multiple variables appear""" self.assertEqual(self._replace_arguments('$1..$2', ['a', 'b']), ['a..b']) def test_alias_substitution_blank(self): """Testing variable substitution where the argument isn't supplied""" self.assertEqual(self._replace_arguments('rbt post $1', []), ['rbt', 'post', '']) def test_alias_substitution_append(self): """Testing variable substitution where no variables are supplied""" self.assertEqual(self._replace_arguments('echo', ['a', 'b', 'c']), ['echo', 'a', 'b', 'c']) def test_alias_dont_substitute_alphabetic_variables(self): """Testing variable substitution with alphabetic variables""" self.assertEqual(self._replace_arguments('$1 $test', ['f']), ['f', '$test']) def test_alias_substitution_star(self): """Testing variable substitution with the $* variable""" self.assertEqual(self._replace_arguments('$*', ['a', 'b', 'c']), ['a', 'b', 'c']) def test_alias_substitution_star_whitespace(self): """Testing $* variable substitution with whitespace-containing args""" self.assertEqual(self._replace_arguments('$*', ['a', 'b', 'c d e']), ['a', 'b', 'c d e']) def test_alias_substitution_bad_quotes(self): """Testing alias substitution with bad quotes.""" self.assertRaises(ValueError, lambda: self._replace_arguments('"$1 $2\\"', [])) def test_alias_substition_unescaped_quotes(self): """Testing alias substitution with a slash at the end of the string""" self.assertEqual(self._replace_arguments('"$1 \\\\"', ['a']), ['a \\']) RBTools-0.7.11/rbtools/utils/repository.py0000644000232200023220000000165413230242633021113 0ustar debalancedebalancefrom __future__ import unicode_literals def get_repository_id(repository_info, api_root, repository_name=None): """Get the repository ID from the server. This will compare the paths returned by the SCM client with those on the server, and return the id of the first match. """ detected_paths = repository_info.path if not isinstance(detected_paths, list): detected_paths = [detected_paths] repositories = api_root.get_repositories( only_fields='id,name,mirror_path,path', only_links='') for repo in repositories.all_items: # NOTE: Versions of Review Board prior to 1.7.19 didn't include a # 'mirror_path' parameter, so we have to conditionally fetch it. if (repo.name == repository_name or repo.path in detected_paths or getattr(repo, 'mirror_path', None) in detected_paths): return repo.id return None RBTools-0.7.11/rbtools/utils/users.py0000644000232200023220000000550013230242633020027 0ustar debalancedebalancefrom __future__ import unicode_literals import getpass import logging import sys from six.moves import input, range from rbtools.api.errors import AuthorizationError from rbtools.commands import CommandError def get_authenticated_session(api_client, api_root, auth_required=False, session=None, num_retries=3): """Return an authenticated session. None will be returned if the user is not authenticated, unless the 'auth_required' parameter is True, in which case the user will be prompted to login. """ if not session: session = api_root.get_session(expand='user') if not session.authenticated: if not auth_required: return None # Interactive prompts don't work correctly when input doesn't come # from a terminal. This could seem to be a rare case not worth # worrying about, but this is what happens when using native # Python in Cygwin terminal emulator under Windows and it's very # puzzling to the users, especially because stderr is also _not_ # flushed automatically in this case, so the program just appears # to hang. if not sys.stdin.isatty(): logging.error('Authentication is required but input is not a tty.') if sys.platform == 'win32': logging.info('Check that you are not running this script ' 'from a Cygwin terminal emulator (or use ' 'Cygwin Python to run it).') raise CommandError('Unable to log in to Review Board.') logging.info('Please log in to the Review Board server at %s', api_client.domain) for i in range(num_retries): sys.stderr.write('Username: ') username = input() password = getpass.getpass(b'Password: ') api_client.login(username, password) try: session = session.get_self() break except AuthorizationError: sys.stderr.write('\n') if i < num_retries - 1: logging.error('The username or password was incorrect. ' 'Please try again.') else: raise CommandError('Unable to log in to Review Board.') return session def get_user(api_client, api_root, auth_required=False): """Return the user resource for the current session.""" session = get_authenticated_session(api_client, api_root, auth_required) if session: return session.user return None def get_username(api_client, api_root, auth_required=False): """Return the username for the current session.""" user = get_user(api_client, api_root, auth_required) if user: return user.username return None RBTools-0.7.11/rbtools/utils/review_request.py0000644000232200023220000002052613230242633021744 0ustar debalancedebalanceimport logging from rbtools.api.errors import APIError from rbtools.clients.errors import InvalidRevisionSpecError from rbtools.commands import CommandError from rbtools.utils.match_score import Score from rbtools.utils.repository import get_repository_id from rbtools.utils.users import get_user def get_draft_or_current_value(field_name, review_request): """Returns the draft or current field value from a review request. If a draft exists for the supplied review request, return the draft's field value for the supplied field name, otherwise return the review request's field value for the supplied field name. """ if review_request.draft: fields = review_request.draft[0] else: fields = review_request return fields[field_name] def get_possible_matches(review_requests, summary, description, limit=5): """Returns a sorted list of tuples of score and review request. Each review request is given a score based on the summary and description provided. The result is a sorted list of tuples containing the score and the corresponding review request, sorted by the highest scoring review request first. """ candidates = [] # Get all potential matches. for review_request in review_requests.all_items: summary_pair = (get_draft_or_current_value('summary', review_request), summary) description_pair = (get_draft_or_current_value('description', review_request), description) score = Score.get_match(summary_pair, description_pair) candidates.append((score, review_request)) # Sort by summary and description on descending rank. sorted_candidates = sorted( candidates, key=lambda m: (m[0].summary_score, m[0].description_score), reverse=True ) return sorted_candidates[:limit] def get_revisions(tool, cmd_args): """Returns the parsed revisions from the command line arguments. These revisions are used for diff generation and commit message extraction. They will be cached for future calls. """ # Parse the provided revisions from the command line and generate # a spec or set of specialized extra arguments that the SCMClient # can use for diffing and commit lookups. try: revisions = tool.parse_revision_spec(cmd_args) except InvalidRevisionSpecError: if not tool.supports_diff_extra_args: raise revisions = None return revisions def find_review_request_by_change_id(api_client, api_root, repository_info, repository_name, revisions): """Ask ReviewBoard for the review request ID for the tip revision. Note that this function calls the ReviewBoard API with the only_fields paramater, thus the returned review request will contain only the fields specified by the only_fields variable. If no review request is found, None will be returned instead. """ only_fields = 'id,commit_id,changenum,status,url,absolute_url' change_id = revisions['tip'] logging.debug('Attempting to find review request from tip revision ID: %s' % change_id) # Strip off any prefix that might have been added by the SCM. change_id = change_id.split(':', 1)[1] optional_args = {} if change_id.isdigit(): # Populate integer-only changenum field also for compatibility # with older API versions optional_args['changenum'] = int(change_id) user = get_user(api_client, api_root, auth_required=True) repository_id = get_repository_id( repository_info, api_root, repository_name) # Don't limit query to only pending requests because it's okay to stamp a # submitted review. review_requests = api_root.get_review_requests(repository=repository_id, from_user=user.username, commit_id=change_id, only_links='self', only_fields=only_fields, **optional_args) if review_requests: count = review_requests.total_results # Only one review can be associated with a specific commit ID. if count > 0: assert count == 1, '%d review requests were returned' % count review_request = review_requests[0] logging.debug('Found review request %s with status %s' % (review_request.id, review_request.status)) if review_request.status != 'discarded': return review_request return None def guess_existing_review_request(repository_info, repository_name, api_root, api_client, tool, revisions, guess_summary, guess_description, is_fuzzy_match_func=None, no_commit_error=None, submit_as=None): """Try to guess the existing review request ID if it is available. The existing review request is guessed by comparing the existing summary and description to the current post's summary and description, respectively. The current post's summary and description are guessed if they are not provided. If the summary and description exactly match those of an existing review request, that request is immediately returned. Otherwise, the user is prompted to select from a list of potential matches, sorted by the highest ranked match first. Note that this function calls the ReviewBoard API with the only_fields paramater, thus the returned review request will contain only the fields specified by the only_fields variable. """ only_fields = 'id,summary,description,draft,url,absolute_url' if submit_as: username = submit_as else: user = get_user(api_client, api_root, auth_required=True) username = user.username repository_id = get_repository_id( repository_info, api_root, repository_name) try: # Get only pending requests by the current user for this # repository. review_requests = api_root.get_review_requests( repository=repository_id, from_user=username, status='pending', expand='draft', only_fields=only_fields, only_links='draft', show_all_unpublished=True) if not review_requests: raise CommandError('No existing review requests to update for ' 'user %s.' % user.username) except APIError as e: raise CommandError('Error getting review requests for user ' '%s: %s' % (user.username, e)) summary = None description = None if not guess_summary or not guess_description: try: commit_message = tool.get_commit_message(revisions) if commit_message: if not guess_summary: summary = commit_message['summary'] if not guess_description: description = commit_message['description'] elif callable(no_commit_error): no_commit_error() except NotImplementedError: raise CommandError('--summary and --description are required.') if not summary and not description: return None possible_matches = get_possible_matches(review_requests, summary, description) exact_match_count = num_exact_matches(possible_matches) for score, review_request in possible_matches: # If the score is the only exact match, return the review request # ID without confirmation, otherwise prompt. if ((score.is_exact_match() and exact_match_count == 1) or (callable(is_fuzzy_match_func) and is_fuzzy_match_func(review_request))): return review_request return None def num_exact_matches(possible_matches): """Returns the number of exact matches in the possible match list.""" count = 0 for score, request in possible_matches: if score.is_exact_match(): count += 1 return count RBTools-0.7.11/rbtools/utils/aliases.py0000644000232200023220000000422213230242633020307 0ustar debalancedebalancefrom __future__ import unicode_literals import logging import re import shlex import sys import subprocess import six from rbtools.commands import RB_MAIN # Regular expression for matching argument replacement _arg_re = re.compile(r'\$(\d+)') # Prior to Python 2.7.3, the shlex module could not accept unicode input. _SHLEX_SUPPORTS_UNICODE = sys.version_info >= (2, 7, 3) def replace_arguments(cmd, args): """Do parameter substitution for the given command. The special variable $* is expanded to contain all filenames. """ def arg_sub(m): """Replace a positional variable with the appropriate argument.""" index = int(m.group(1)) - 1 try: return args[index] except IndexError: return '' did_replacement = False shlex_convert_text_type = (not _SHLEX_SUPPORTS_UNICODE and isinstance(cmd, six.text_type)) if shlex_convert_text_type: cmd = cmd.encode('utf-8') for part in shlex.split(cmd): if part == '$*': did_replacement = True for arg in args: yield arg else: part, subs = _arg_re.subn(arg_sub, part) if subs != 0: did_replacement = True if shlex_convert_text_type: part = part.decode('utf-8') yield part if not did_replacement: for arg in args: yield arg def run_alias(alias, args): """Run the alias with the given arguments, after expanding parameters. Parameter expansion is done by the replace_arguments function. """ use_shell = alias.startswith('!') try: if use_shell: # If we are using the shell, we must provide our program as a # string instead of a sequence. cmd = subprocess.list2cmdline(replace_arguments(alias[1:], args)) else: cmd = [RB_MAIN] + list(replace_arguments(alias, args)) return subprocess.call(cmd, shell=use_shell) except ValueError as e: logging.error('Could not execute alias "%s"; it was malformed: %s', alias, e) return 1 RBTools-0.7.11/rbtools/utils/commands.py0000644000232200023220000000726013230242633020474 0ustar debalancedebalancefrom __future__ import unicode_literals import six from rbtools.api.errors import APIError from rbtools.commands import CommandError DEFAULT_OPTIONS_MAP = { 'debug': '--debug', 'server': '--server', 'enable_proxy': '--disable-proxy', 'disable_ssl_verification': '--disable-ssl-verification', 'username': '--username', 'password': '--password', 'api_token': '--api-token', 'repository_name': '--repository', 'repository_url': '--repository-url', 'repository_type': '--repository-type', } #: The format string used to specify a URL to a review request in commits. #: #: Commands that prepare a commit message for pushing, such as rbt stamp, #: rbt patch, and rbt land, must use this format to indicate the URL to the #: matching review request. Review Board will parse the commit messages when #: executing any post-receive hooks, looking for this string and a valid URL. STAMP_STRING_FORMAT = 'Reviewed at %s' class AlreadyStampedError(CommandError): """An error indicating the change has already been stamped.""" def get_review_request(review_request_id, api_root, **kwargs): """Returns the review request resource for the given ID.""" try: review_request = api_root.get_review_request( review_request_id=review_request_id, **kwargs) except APIError as e: raise CommandError('Error getting review request %s: %s' % (review_request_id, e)) return review_request def extract_commit_message(review_request): """Returns a commit message based on the review request. The commit message returned contains the Summary, Description, Bugs, and Testing Done fields from the review request, if available. """ info = [] summary = review_request.summary description = review_request.description testing_done = review_request.testing_done if not description.startswith(summary): info.append(summary) info.append(description) if testing_done: info.append('Testing Done:\n%s' % testing_done) if review_request.bugs_closed: info.append('Bugs closed: %s' % ', '.join(review_request.bugs_closed)) info.append(STAMP_STRING_FORMAT % review_request.absolute_url) return '\n\n'.join(info) def build_rbtools_cmd_argv(options, options_map=DEFAULT_OPTIONS_MAP): """Generates a list of command line arguments from parsed command options. Used for building command line arguments from existing options, when calling another RBTools command. ``options_map`` specifies the options and their corresponding argument names that need to be included. """ argv = [] for option_key, arg_name in six.iteritems(options_map): option_value = getattr(options, option_key, None) if option_value is True and option_key != 'enable_proxy': argv.append(arg_name) elif option_value not in (True, False, None): argv.extend([arg_name, option_value]) # This is a special case where --disable-proxy is stored in # enable_proxy with its value inverted. if 'enable_proxy' in options_map and not options.enable_proxy: argv.append(options_map['enable_proxy']) return argv def stamp_commit_with_review_url(revisions, review_request_url, tool): """Amend the tip revision message to include review_request_url.""" commit_message = tool.get_raw_commit_message(revisions) stamp_string = STAMP_STRING_FORMAT % review_request_url if stamp_string in commit_message: raise AlreadyStampedError('This change is already stamped.') new_message = (commit_message.rstrip() + '\n\n' + stamp_string) tool.amend_commit_description(new_message, revisions) RBTools-0.7.11/rbtools/utils/process.py0000644000232200023220000001553613230242633020356 0ustar debalancedebalancefrom __future__ import print_function, unicode_literals import logging import os import subprocess import sys import six def execute(command, env=None, split_lines=False, ignore_errors=False, extra_ignore_errors=(), translate_newlines=True, with_errors=True, none_on_ignored_error=False, return_error_code=False, log_output_on_error=True, results_unicode=True, return_errors=False): """Utility function to execute a command and return the output. :param command: The command to execute as either a string or a list of strings. :param env: The environment variables to pass to the called executable. These variables will be added to the current set of environment variables. :param split_lines: Determines if the program's output will be split into multiple lines. :param ignore_errors: If ``False``, RBTools will exit if a command returns a non-zero status. :param extra_ignore_errors: The set of return status codes that should be treated as if the program exited with status 0. :param translate_newlines: If ``True``, all line endings will be translated to ``\n``. :param with_errors: If ``True``, the command's standard output and standard error streams will be combined. This parameter is mutually exclusive with the ``return_errors`` parameter. :param none_on_ignored_error: If ``True``, this function will return ``None`` if either ``ignore_errors`` is ``True` and the program returns a non-zero exit status or the program exits with a status code in ``extra_ignored_errors``. :param return_error_code: Determines if the exit status of the executed command will also be returned. :param log_output_on_error: Determines if the output of the executed command will be logged if it returns a non-zero status code. :param results_unicode: Determines if the output will be interpreted as UTF-8. If ``True``, the process's output will be returned as a ``six.text_type``. Otherwise, it will return a ``six.binary_type``. :param return_errors: Determines if the standard error stream will be returned. This parameter is mutually exclusive with the ``with_errors`` parameter. :returns: This function returns either a single value or a 2- or 3-tuple. If ``return_error_code`` is True, the error code of the process will be returned as the first element of the tuple. If ``return_errors`` is True, the process' standard error stream will be returned as the last element of the tuple. If both of ``return_error_code`` and ``return_errors`` are ``False``, then the process' output will be returned. If either or both of them are ``True``, then this is the other element of the returned tuple. """ def post_process_output(output): """Post process the given output to convert it to the desired type.""" # If Popen is called with universal_newlines=True, the resulting data # returned from stdout will be a text stream (and therefore a unicode # object). Otherwise, it will be a byte stream. Translate the results # into the desired type. encoding = sys.getfilesystemencoding() if split_lines and len(output) > 0: if results_unicode and isinstance(output[0], six.binary_type): return [line.decode(encoding) for line in output] elif not results_unicode and isinstance(output[0], six.text_type): return [line.encode('utf-8') for line in output] elif not split_lines: if results_unicode and isinstance(output, six.binary_type): return output.decode(encoding) elif not results_unicode and isinstance(output, six.text_type): return output.encode('utf-8') return output assert not (with_errors and return_errors) if isinstance(command, list): logging.debug(b'Running: ' + subprocess.list2cmdline(command)) else: logging.debug(b'Running: ' + command) new_env = os.environ.copy() if env: new_env.update(env) # TODO: This can break on systems that don't have the en_US locale # installed (which isn't very many). Ideally in this case, we could # put something in the config file, but that's not plumbed through to here. new_env['LC_ALL'] = 'en_US.UTF-8' new_env['LANGUAGE'] = 'en_US.UTF-8' if with_errors: errors_output = subprocess.STDOUT else: errors_output = subprocess.PIPE if sys.platform.startswith('win'): # Convert all environment variables to byte strings, so that subprocess # doesn't blow up on Windows. new_env = dict( (six.binary_type(key), six.binary_type(value)) for key, value in six.iteritems(new_env) ) p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=errors_output, shell=False, universal_newlines=translate_newlines, env=new_env) else: p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=errors_output, shell=False, close_fds=True, universal_newlines=translate_newlines, env=new_env) errors = None if split_lines: data = p.stdout.readlines() if return_errors: errors = p.stderr.readlines() else: data = p.stdout.read() if return_errors: errors = p.stderr.read() rc = p.wait() if rc and not ignore_errors and rc not in extra_ignore_errors: raise Exception('Failed to execute command: %s\n%s' % (command, data)) elif rc: if log_output_on_error: logging.debug('Command exited with rc %s: %s\n%s---' % (rc, command, data)) else: logging.debug('Command exited with rc %s: %s' % (rc, command)) if rc and none_on_ignored_error: data = None if data is not None: data = post_process_output(data) if return_errors: errors = post_process_output(errors) if return_error_code or return_errors: if return_error_code and return_errors: return rc, data, errors elif return_error_code: return rc, data else: return data, errors else: return data RBTools-0.7.11/rbtools/utils/diffs.py0000644000232200023220000000557513230242633017775 0ustar debalancedebalancefrom __future__ import unicode_literals import fnmatch import os def filename_match_any_patterns(filename, patterns, base_dir=''): """Check if the given filename matches any of the patterns. If base_dir is not supplied, it will treat the filename as relative to the current working directory. """ if base_dir: filename = os.path.abspath(os.path.join(base_dir, filename)) return any(fnmatch.fnmatch(filename, pattern) for pattern in patterns) def filter_diff(diff, file_index_re, exclude_patterns, base_dir=''): """Filter through the lines of diff to exclude files. This function looks for lines that indicate the start of a new file in the diff and checks if the filename matches any of the given patterns. If it does, the diff lines corresponding to that file will not be yielded; if the filename does not match any patterns, the lines will be yielded as normal. The file_index_re parameter is a *compiled* regular expression that matches if and only if a new file's diff is being started. It *must* have one sub-group to match the filename. The base_dir parameter is the directory that the filenames will be relative to, which is the root of the repository in most cases. """ include_file = True for line in diff: m = file_index_re.match(line) if m: filename = m.group(1).decode('utf-8') include_file = not filename_match_any_patterns(filename, exclude_patterns, base_dir) if include_file: yield line def normalize_patterns(patterns, base_dir, cwd=None): """Normalize the patterns so that they are all absolute paths. Paths that begin with a path separator are interpreted as being relative to base_dir. All other paths are interpreted as being relative to the current working directory. """ # Some SCMs (e.g., git) require us to execute git commands from the top # level git directory, so their respective SCMClient's diff method will # provide us with what the cwd was when the command was executed. if cwd is None: cwd = os.getcwd() sep_len = len(os.path.sep) def normalize(p): if p.startswith(os.path.sep): p = os.path.join(base_dir, p[sep_len:]) else: p = os.path.join(cwd, p) return os.path.normpath(p) return [normalize(pattern) for pattern in patterns] def remove_filenames_matching_patterns(filenames, patterns, base_dir): """Return an iterable of all filenames that do not match any patterns. The base_dir parameter is the directory that the filenames will be relative to. """ return ( filename for filename in filenames if not filename_match_any_patterns(filename, patterns, base_dir) ) RBTools-0.7.11/rbtools/clients/0000755000232200023220000000000013230242636016620 5ustar debalancedebalanceRBTools-0.7.11/rbtools/clients/bazaar.py0000644000232200023220000002152113230242633020430 0ustar debalancedebalancefrom __future__ import unicode_literals import logging import os import re from rbtools.clients import SCMClient, RepositoryInfo from rbtools.clients.errors import TooManyRevisionsError from rbtools.utils.checks import check_install from rbtools.utils.diffs import filter_diff, normalize_patterns from rbtools.utils.process import execute USING_PARENT_PREFIX = 'Using parent branch ' class BazaarClient(SCMClient): """ Bazaar client wrapper that fetches repository information and generates compatible diffs. The :class:`RepositoryInfo` object reports whether the repository supports parent diffs (every branch with a parent supports them). """ name = 'Bazaar' supports_diff_exclude_patterns = True INDEX_FILE_RE = re.compile(b"===.+'(.+?)'\n") # Regular expression that matches the path to the current branch. # # For branches with shared repositories, Bazaar reports # "repository branch: /foo", but for standalone branches it reports # "branch root: /foo". BRANCH_REGEX = ( r'\w*(repository branch|branch root|checkout root|checkout of branch):' r' (?P.+)$') # Revision separator (two ..s without escaping, and not followed by a /). # This is the same regex used in bzrlib/option.py:_parse_revision_spec. REVISION_SEPARATOR_REGEX = re.compile(r'\.\.(?![\\/])') def get_repository_info(self): """ Find out information about the current Bazaar branch (if any) and return it. """ if not check_install(['bzr', 'help']): logging.debug('Unable to execute "bzr help": skipping Bazaar') return None bzr_info = execute(['bzr', 'info'], ignore_errors=True) if 'ERROR: Not a branch:' in bzr_info: # This is not a branch: repository_info = None else: # This is a branch, let's get its attributes: branch_match = re.search(self.BRANCH_REGEX, bzr_info, re.MULTILINE) path = branch_match.group('branch_path') if path == '.': path = os.getcwd() repository_info = RepositoryInfo( path=path, base_path='/', # Diffs are always relative to the root. supports_parent_diffs=True) return repository_info def parse_revision_spec(self, revisions=[]): """Parses the given revision spec. The 'revisions' argument is a list of revisions as specified by the user. Items in the list do not necessarily represent a single revision, since the user can use SCM-native syntaxes such as "r1..r2" or "r1:r2". SCMTool-specific overrides of this method are expected to deal with such syntaxes. This will return a dictionary with the following keys: 'base': A revision to use as the base of the resulting diff. 'tip': A revision to use as the tip of the resulting diff. 'parent_base': (optional) The revision to use as the base of a parent diff. These will be used to generate the diffs to upload to Review Board (or print). The diff for review will include the changes in (base, tip], and the parent diff (if necessary) will include (parent, base]. If a single revision is passed in, this will return the parent of that revision for 'base' and the passed-in revision for 'tip'. If zero revisions are passed in, this will return the current HEAD as 'tip', and the upstream branch as 'base', taking into account parent branches explicitly specified via --parent. """ n_revs = len(revisions) result = {} if n_revs == 0: # No revisions were passed in--start with HEAD, and find the # submit branch automatically. result['tip'] = self._get_revno() result['base'] = self._get_revno('ancestor:') elif n_revs == 1 or n_revs == 2: # If there's a single argument, try splitting it on '..' if n_revs == 1: revisions = self.REVISION_SEPARATOR_REGEX.split(revisions[0]) n_revs = len(revisions) if n_revs == 1: # Single revision. Extract the parent of that revision to use # as the base. result['base'] = self._get_revno('before:' + revisions[0]) result['tip'] = self._get_revno(revisions[0]) elif n_revs == 2: # Two revisions. result['base'] = self._get_revno(revisions[0]) result['tip'] = self._get_revno(revisions[1]) else: raise TooManyRevisionsError # XXX: I tried to automatically find the parent diff revision here, # but I really don't understand the difference between submit # branch, parent branch, bound branches, etc. If there's some way # to know what to diff against, we could use # 'bzr missing --mine-only --my-revision=(base) --line' # to see if we need a parent diff. else: raise TooManyRevisionsError if self.options.parent_branch: result['parent_base'] = result['base'] result['base'] = self._get_revno( 'ancestor:%s' % self.options.parent_branch) return result def _get_revno(self, revision_spec=None): command = ['bzr', 'revno'] if revision_spec: command += ['-r', revision_spec] result = execute(command).strip().split('\n') if len(result) == 1: return 'revno:' + result[0] elif len(result) == 2 and result[0].startswith(USING_PARENT_PREFIX): branch = result[0][len(USING_PARENT_PREFIX):] return 'revno:%s:%s' % (result[1], branch) def diff(self, revisions, include_files=[], exclude_patterns=[], extra_args=[]): """Returns the diff for the given revision spec. If the revision spec is empty, this returns the diff of the current branch with respect to its parent. If a single revision is passed in, this returns the diff of the change introduced in that revision. If two revisions are passed in, this will do a diff between those two revisions. The summary and description are set if guessing is enabled. """ exclude_patterns = normalize_patterns(exclude_patterns, self.get_repository_info().path) diff = self._get_range_diff(revisions['base'], revisions['tip'], include_files, exclude_patterns) if 'parent_base' in revisions: parent_diff = self._get_range_diff( revisions['parent_base'], revisions['base'], include_files, exclude_patterns) else: parent_diff = None return { 'diff': diff, 'parent_diff': parent_diff, } def _get_range_diff(self, base, tip, include_files, exclude_patterns=[]): """Return the diff between 'base' and 'tip'.""" diff_cmd = ['bzr', 'diff', '-q', '-r', '%s..%s' % (base, tip)] + include_files diff = execute(diff_cmd, ignore_errors=True, log_output_on_error=False, split_lines=True, results_unicode=False) if diff: if exclude_patterns: diff = filter_diff(diff, self.INDEX_FILE_RE, exclude_patterns, base_dir=self.get_repository_info().path) return b''.join(diff) else: return None def get_raw_commit_message(self, revisions): # The result is content in the form of: # # 2014-01-02 First Name # # line 1 # line 2 # ... # # 2014-01-02 First Name # # ... log_cmd = ['bzr', 'log', '-r', '%s..%s' % (revisions['base'], revisions['tip'])] # Find out how many commits there are, then log limiting to one fewer. # This is because diff treats the range as (r1, r2] while log treats # the lange as [r1, r2]. lines = execute(log_cmd + ['--line'], ignore_errors=True, split_lines=True) n_revs = len(lines) - 1 lines = execute(log_cmd + ['--gnu-changelog', '-l', str(n_revs)], ignore_errors=True, split_lines=True) message = [] for line in lines: # We only care about lines that start with a tab (commit message # lines) or blank lines. if line.startswith('\t'): message.append(line[1:]) elif not line.strip(): message.append(line) return ''.join(message).strip() RBTools-0.7.11/rbtools/clients/git.py0000644000232200023220000010621113230242633017753 0ustar debalancedebalancefrom __future__ import unicode_literals import logging import os import re import sys from rbtools.clients import PatchResult, SCMClient, RepositoryInfo from rbtools.clients.errors import (AmendError, MergeError, PushError, InvalidRevisionSpecError, TooManyRevisionsError, SCMError) from rbtools.clients.perforce import PerforceClient from rbtools.clients.svn import SVNClient, SVNRepositoryInfo from rbtools.utils.checks import check_install, is_valid_version from rbtools.utils.console import edit_text from rbtools.utils.diffs import (normalize_patterns, remove_filenames_matching_patterns) from rbtools.utils.process import execute class GitClient(SCMClient): """ A wrapper around git that fetches repository information and generates compatible diffs. This will attempt to generate a diff suitable for the remote repository, whether git, SVN or Perforce. """ name = 'Git' supports_diff_exclude_patterns = True supports_patch_revert = True can_amend_commit = True can_merge = True can_push_upstream = True can_delete_branch = True def __init__(self, **kwargs): super(GitClient, self).__init__(**kwargs) # Store the 'correct' way to invoke git, just plain old 'git' by # default. self.git = 'git' self._original_cwd = None def _supports_git_config_flag(self): """Return if the installed version of git supports the -c flag. This will execute ``git --version`` on the first call and cache the result. """ if not hasattr(self, '_git_version_at_least_180'): self._git_version_least_180 = False version_str = execute([self.git, 'version'], ignore_errors=True, none_on_ignored_error=True) if version_str: m = re.search('(\d+)\.(\d+)\.(\d+)', version_str) if m: git_version = (int(m.group(1)), int(m.group(2)), int(m.group(3))) self._git_version_at_least_180 = git_version >= (1, 8, 0) return self._git_version_at_least_180 def parse_revision_spec(self, revisions=[]): """Parses the given revision spec. The 'revisions' argument is a list of revisions as specified by the user. Items in the list do not necessarily represent a single revision, since the user can use SCM-native syntaxes such as "r1..r2" or "r1:r2". SCMTool-specific overrides of this method are expected to deal with such syntaxes. This will return a dictionary with the following keys: 'base': A revision to use as the base of the resulting diff. 'tip': A revision to use as the tip of the resulting diff. 'parent_base': (optional) The revision to use as the base of a parent diff. 'commit_id': (optional) The ID of the single commit being posted, if not using a range. These will be used to generate the diffs to upload to Review Board (or print). The diff for review will include the changes in (base, tip], and the parent diff (if necessary) will include (parent_base, base]. If a single revision is passed in, this will return the parent of that revision for 'base' and the passed-in revision for 'tip'. If zero revisions are passed in, this will return the current HEAD as 'tip', and the upstream branch as 'base', taking into account parent branches explicitly specified via --parent. """ n_revs = len(revisions) result = {} if n_revs == 0: # No revisions were passed in--start with HEAD, and find the # tracking branch automatically. parent_branch = self.get_parent_branch() head_ref = self._rev_parse(self.get_head_ref())[0] merge_base = self._rev_parse( self._get_merge_base(head_ref, self.upstream_branch))[0] result = { 'tip': head_ref, 'commit_id': head_ref, } if parent_branch: result['base'] = self._rev_parse(parent_branch)[0] result['parent_base'] = merge_base else: result['base'] = merge_base # Since the user asked us to operate on HEAD, warn them about a # dirty working directory. if (self.has_pending_changes() and not self.config.get('SUPPRESS_CLIENT_WARNINGS', False)): logging.warning('Your working directory is not clean. Any ' 'changes which have not been committed ' 'to a branch will not be included in your ' 'review request.') elif n_revs == 1 or n_revs == 2: # Let `git rev-parse` sort things out. parsed = self._rev_parse(revisions) n_parsed_revs = len(parsed) assert n_parsed_revs <= 3 if n_parsed_revs == 1: # Single revision. Extract the parent of that revision to use # as the base. parent = self._rev_parse('%s^' % parsed[0])[0] result = { 'base': parent, 'tip': parsed[0], 'commit_id': parsed[0], } elif n_parsed_revs == 2: if parsed[1].startswith('^'): # Passed in revisions were probably formatted as # "base..tip". The rev-parse output includes all ancestors # of the first part, and none of the ancestors of the # second. Basically, the second part is the base (after # stripping the ^ prefix) and the first is the tip. result = { 'base': parsed[1][1:], 'tip': parsed[0], } else: # First revision is base, second is tip result = { 'base': parsed[0], 'tip': parsed[1], } elif n_parsed_revs == 3 and parsed[2].startswith('^'): # Revision spec is diff-since-merge. Find the merge-base of the # two revs to use as base. merge_base = execute([self.git, 'merge-base', parsed[0], parsed[1]]).strip() result = { 'base': merge_base, 'tip': parsed[0], } else: raise InvalidRevisionSpecError( 'Unexpected result while parsing revision spec') parent_base = self._get_merge_base(result['base'], self.upstream_branch) if parent_base != result['base']: result['parent_base'] = parent_base else: raise TooManyRevisionsError return result def get_repository_info(self): """Get repository information for the current Git working tree. This function changes the directory to the top level directory of the current working tree. """ if not check_install(['git', '--help']): # CreateProcess (launched via subprocess, used by check_install) # does not automatically append .cmd for things it finds in PATH. # If we're on Windows, and this works, save it for further use. if (sys.platform.startswith('win') and check_install(['git.cmd', '--help'])): self.git = 'git.cmd' else: logging.debug('Unable to execute "git --help" or "git.cmd ' '--help": skipping Git') return None git_dir = execute([self.git, "rev-parse", "--git-dir"], ignore_errors=True).rstrip("\n") if git_dir.startswith("fatal:") or not os.path.isdir(git_dir): return None # Sometimes core.bare is not set, and generates an error, so ignore # errors. Valid values are 'true' or '1'. bare = execute([self.git, 'config', 'core.bare'], ignore_errors=True).strip() self.bare = bare in ('true', '1') # If we are not working in a bare repository, then we will change # directory to the top level working tree lose our original position. # However, we need the original working directory for file exclusion # patterns, so we save it here. if self._original_cwd is None: self._original_cwd = os.getcwd() # Running in directories other than the top level of # of a work-tree would result in broken diffs on the server if not self.bare: git_top = execute([self.git, "rev-parse", "--show-toplevel"], ignore_errors=True).rstrip("\n") # Top level might not work on old git version se we use git dir # to find it. if (git_top.startswith('fatal:') or not os.path.isdir(git_dir) or git_top.startswith('cygdrive')): git_top = git_dir os.chdir(os.path.abspath(git_top)) self.head_ref = execute([self.git, 'symbolic-ref', '-q', 'HEAD'], ignore_errors=True).strip() # We know we have something we can work with. Let's find out # what it is. We'll try SVN first, but only if there's a .git/svn # directory. Otherwise, it may attempt to create one and scan # revisions, which can be slow. Also skip SVN detection if the git # repository was specified on command line. git_svn_dir = os.path.join(git_dir, 'svn') if (not getattr(self.options, 'repository_url', None) and os.path.isdir(git_svn_dir) and len(os.listdir(git_svn_dir)) > 0): data = execute([self.git, "svn", "info"], ignore_errors=True) m = re.search(r'^Repository Root: (.+)$', data, re.M) if m: path = m.group(1) m = re.search(r'^URL: (.+)$', data, re.M) if m: base_path = m.group(1)[len(path):] or "/" m = re.search(r'^Repository UUID: (.+)$', data, re.M) if m: uuid = m.group(1) self.type = "svn" # Get SVN tracking branch if getattr(self.options, 'tracking', None): self.upstream_branch = self.options.tracking else: data = execute([self.git, "svn", "rebase", "-n"], ignore_errors=True) m = re.search(r'^Remote Branch:\s*(.+)$', data, re.M) if m: self.upstream_branch = m.group(1) else: sys.stderr.write('Failed to determine SVN ' 'tracking branch. Defaulting' 'to "master"\n') self.upstream_branch = 'master' return SVNRepositoryInfo(path=path, base_path=base_path, uuid=uuid, supports_parent_diffs=True) else: # Versions of git-svn before 1.5.4 don't (appear to) support # 'git svn info'. If we fail because of an older git install, # here, figure out what version of git is installed and give # the user a hint about what to do next. version = execute([self.git, "svn", "--version"], ignore_errors=True) version_parts = re.search('version (\d+)\.(\d+)\.(\d+)', version) svn_remote = execute( [self.git, "config", "--get", "svn-remote.svn.url"], ignore_errors=True) if (version_parts and svn_remote and not is_valid_version((int(version_parts.group(1)), int(version_parts.group(2)), int(version_parts.group(3))), (1, 5, 4))): raise SCMError('Your installation of git-svn must be ' 'upgraded to version 1.5.4 or later.') # Okay, maybe Perforce (git-p4). git_p4_ref = os.path.join(git_dir, 'refs', 'remotes', 'p4', 'master') if os.path.exists(git_p4_ref): data = execute([self.git, 'config', '--get', 'git-p4.port'], ignore_errors=True) m = re.search(r'(.+)', data) if m: port = m.group(1) else: port = os.getenv('P4PORT') if port: self.type = 'perforce' self.upstream_branch = 'remotes/p4/master' return RepositoryInfo(path=port, base_path='', supports_parent_diffs=True) # Nope, it's git then. # Check for a tracking branch and determine merge-base self.upstream_branch = '' if self.head_ref: short_head = self._strip_heads_prefix(self.head_ref) merge = execute([self.git, 'config', '--get', 'branch.%s.merge' % short_head], ignore_errors=True).strip() remote = execute([self.git, 'config', '--get', 'branch.%s.remote' % short_head], ignore_errors=True).strip() merge = self._strip_heads_prefix(merge) if remote and remote != '.' and merge: self.upstream_branch = '%s/%s' % (remote, merge) url = None if getattr(self.options, 'repository_url', None): url = self.options.repository_url self.upstream_branch = self.get_origin(self.upstream_branch, True)[0] else: self.upstream_branch, origin_url = \ self.get_origin(self.upstream_branch, True) if not origin_url or origin_url.startswith("fatal:"): self.upstream_branch, origin_url = self.get_origin() url = origin_url.rstrip('/') # Central bare repositories don't have origin URLs. # We return git_dir instead and hope for the best. if not url: url = os.path.abspath(git_dir) # There is no remote, so skip this part of upstream_branch. self.upstream_branch = self.upstream_branch.split('/')[-1] if url: self.type = "git" return RepositoryInfo(path=url, base_path='', supports_parent_diffs=True) return None def _strip_heads_prefix(self, ref): """Strips prefix from ref name, if possible.""" return re.sub(r'^refs/heads/', '', ref) def get_origin(self, default_upstream_branch=None, ignore_errors=False): """Get upstream remote origin from options or parameters. Returns a tuple: (upstream_branch, remote_url) """ upstream_branch = (getattr(self.options, 'tracking', None) or default_upstream_branch or 'origin/master') upstream_remote = upstream_branch.split('/')[0] origin_url = execute( [self.git, "config", "--get", "remote.%s.url" % upstream_remote], ignore_errors=True).rstrip("\n") return (upstream_branch, origin_url) def scan_for_server(self, repository_info): # Scan first for dot files, since it's faster and will cover the # user's $HOME/.reviewboardrc server_url = super(GitClient, self).scan_for_server(repository_info) if server_url: return server_url # TODO: Maybe support a server per remote later? Is that useful? url = execute([self.git, "config", "--get", "reviewboard.url"], ignore_errors=True).strip() if url: return url if self.type == "svn": # Try using the reviewboard:url property on the SVN repo, if it # exists. prop = SVNClient().scan_for_server_property(repository_info) if prop: return prop elif self.type == 'perforce': prop = PerforceClient().scan_for_server(repository_info) if prop: return prop return None def get_raw_commit_message(self, revisions): """Extracts the commit message based on the provided revision range.""" return execute( [self.git, 'log', '--reverse', '--pretty=format:%s%n%n%b', '^%s' % revisions['base'], revisions['tip']], ignore_errors=True).strip() def get_parent_branch(self): """Returns the parent branch.""" parent_branch = getattr(self.options, 'parent_branch', None) if self.type == 'perforce': parent_branch = parent_branch or 'p4' return parent_branch def get_head_ref(self): """Returns the HEAD reference.""" head_ref = "HEAD" if self.head_ref: head_ref = self.head_ref return head_ref def _get_merge_base(self, rev1, rev2): """Returns the merge base.""" return execute([self.git, "merge-base", rev1, rev2]).strip() def _rev_parse(self, revisions): """Runs `git rev-parse` and returns a list of revisions.""" if not isinstance(revisions, list): revisions = [revisions] return execute([self.git, 'rev-parse'] + revisions).strip().split('\n') def diff(self, revisions, include_files=[], exclude_patterns=[], extra_args=[]): """Perform a diff using the given revisions. If no revisions are specified, this will do a diff of the contents of the current branch since the tracking branch (which defaults to 'master'). If one revision is specified, this will get the diff of that specific change. If two revisions are specified, this will do a diff between those two revisions. If a parent branch is specified via the command-line options, or would make sense given the requested revisions and the tracking branch, this will also return a parent diff. """ exclude_patterns = normalize_patterns(exclude_patterns, self._get_root_directory(), cwd=self.original_cwd) try: merge_base = revisions['parent_base'] except KeyError: merge_base = revisions['base'] diff_lines = self.make_diff(merge_base, revisions['base'], revisions['tip'], include_files, exclude_patterns) if 'parent_base' in revisions: parent_diff_lines = self.make_diff(merge_base, revisions['parent_base'], revisions['base'], include_files, exclude_patterns) base_commit_id = revisions['parent_base'] else: parent_diff_lines = None base_commit_id = revisions['base'] return { 'diff': diff_lines, 'parent_diff': parent_diff_lines, 'commit_id': revisions.get('commit_id'), 'base_commit_id': base_commit_id, } def make_diff(self, merge_base, base, tip, include_files, exclude_patterns): """Performs a diff on a particular branch range.""" rev_range = "%s..%s" % (base, tip) if include_files: include_files = ['--'] + include_files git_cmd = [self.git] if self._supports_git_config_flag(): git_cmd.extend(['-c', 'core.quotepath=false']) if self.type in ('svn', 'perforce'): diff_cmd_params = ['--no-color', '--no-prefix', '-r', '-u'] elif self.type == 'git': diff_cmd_params = ['--no-color', '--full-index', '--ignore-submodules'] if self._supports_git_config_flag(): git_cmd.extend(['-c', 'diff.noprefix=false']) if (self.capabilities is not None and self.capabilities.has_capability('diffs', 'moved_files')): diff_cmd_params.append('-M') else: diff_cmd_params.append('--no-renames') else: assert False # By default, don't allow using external diff commands. This prevents # things from breaking horribly if someone configures a graphical diff # viewer like p4merge or kaleidoscope. This can be overridden by # setting GIT_USE_EXT_DIFF = True in ~/.reviewboardrc if not self.config.get('GIT_USE_EXT_DIFF', False): diff_cmd_params.append('--no-ext-diff') diff_cmd = git_cmd + ['diff'] + diff_cmd_params if exclude_patterns: # If we have specified files to exclude, we will get a list of all # changed files and run `git diff` on each un-excluded file # individually. changed_files_cmd = git_cmd + ['diff-tree'] + diff_cmd_params if self.type in ('svn', 'perforce'): # We don't want to send -u along to git diff-tree because it # will generate diff information along with the list of # changed files. changed_files_cmd.remove('-u') elif self.type == 'git': changed_files_cmd.append('-r') changed_files = execute( changed_files_cmd + [rev_range] + include_files, split_lines=True, with_errors=False, ignore_errors=True, none_on_ignored_error=True, log_output_on_error=False) # The output of git diff-tree will be a list of entries that have # changed between the two revisions that we give it. The last part # of the line is the name of the file that has changed. changed_files = remove_filenames_matching_patterns( (filename.split()[-1] for filename in changed_files), exclude_patterns, base_dir=self._get_root_directory()) diff_lines = [] for filename in changed_files: lines = execute(diff_cmd + [rev_range, '--', filename], split_lines=True, with_errors=False, ignore_errors=True, none_on_ignored_error=True, log_output_on_error=False, results_unicode=False) if lines is None: logging.error( 'Could not get diff for all files (git-diff failed ' 'for "%s"). Refusing to return a partial diff.' % filename) diff_lines = None break diff_lines += lines else: diff_lines = execute(diff_cmd + [rev_range] + include_files, split_lines=True, with_errors=False, ignore_errors=True, none_on_ignored_error=True, log_output_on_error=False, results_unicode=False) if self.type == 'svn': return self.make_svn_diff(merge_base, diff_lines) elif self.type == 'perforce': return self.make_perforce_diff(merge_base, diff_lines) else: return b''.join(diff_lines) def make_svn_diff(self, merge_base, diff_lines): """ Formats the output of git diff such that it's in a form that svn diff would generate. This is needed so the SVNTool in Review Board can properly parse this diff. """ rev = execute([self.git, "svn", "find-rev", merge_base]).strip() if not rev: return None diff_data = b'' original_file = b'' filename = b'' newfile = False for i, line in enumerate(diff_lines): if line.startswith(b'diff '): # Grab the filename and then filter this out. # This will be in the format of: # # diff --git a/path/to/file b/path/to/file info = line.split(b' ') diff_data += b'Index: %s\n' % info[2] diff_data += b'=' * 67 diff_data += b'\n' elif line.startswith(b'index '): # Filter this out. pass elif line.strip() == b'--- /dev/null': # New file newfile = True elif (line.startswith(b'--- ') and i + 1 < len(diff_lines) and diff_lines[i + 1].startswith(b'+++ ')): newfile = False original_file = line[4:].strip() diff_data += b'--- %s\t(revision %s)\n' % (original_file, rev) elif line.startswith(b'+++ '): filename = line[4:].strip() if newfile: diff_data += b'--- %s\t(revision 0)\n' % filename diff_data += b'+++ %s\t(revision 0)\n' % filename else: # We already printed the "--- " line. diff_data += b'+++ %s\t(working copy)\n' % original_file elif (line.startswith(b'new file mode') or line.startswith(b'deleted file mode')): # Filter this out. pass elif line.startswith(b'Binary files '): # Add the following so that we know binary files were # added/changed. diff_data += b'Cannot display: file marked as a binary type.\n' diff_data += b'svn:mime-type = application/octet-stream\n' else: diff_data += line return diff_data def make_perforce_diff(self, merge_base, diff_lines): """Format the output of git diff to look more like perforce's.""" diff_data = b'' filename = b'' p4rev = b'' # Find which depot changelist we're based on log = execute([self.git, 'log', merge_base], ignore_errors=True) for line in log: m = re.search(br'[rd]epo.-paths = "(.+)": change = (\d+).*\]', log, re.M) if m: base_path = m.group(1).strip() p4rev = m.group(2).strip() break else: # We should really raise an error here, base_path is required pass for i, line in enumerate(diff_lines): if line.startswith(b'diff '): # Grab the filename and then filter this out. # This will be in the format of: # diff --git a/path/to/file b/path/to/file filename = line.split(b' ')[2].strip() elif (line.startswith(b'index ') or line.startswith(b'new file mode ')): # Filter this out pass elif (line.startswith(b'--- ') and i + 1 < len(diff_lines) and diff_lines[i + 1].startswith(b'+++ ')): data = execute( ['p4', 'files', base_path + filename + '@' + p4rev], ignore_errors=True, results_unicode=False) m = re.search(br'^%s%s#(\d+).*$' % (re.escape(base_path), re.escape(filename)), data, re.M) if m: file_version = m.group(1).strip() else: file_version = 1 diff_data += b'--- %s%s\t%s%s#%s\n' % (base_path, filename, base_path, filename, file_version) elif line.startswith(b'+++ '): # TODO: add a real timestamp diff_data += b'+++ %s%s\t%s\n' % (base_path, filename, b'TIMESTAMP') else: diff_data += line return diff_data def has_pending_changes(self): """Checks if there are changes waiting to be committed. Returns True if the working directory has been modified or if changes have been staged in the index, otherwise returns False. """ status = execute(['git', 'status', '--porcelain', '--untracked-files=no', '--ignore-submodules=dirty']) return status != '' def amend_commit_description(self, message, revisions): """Update a commit message to the given string. Since git can amend only the most recent commit, an AmendError will be raised if revisions points to a commit other than the the most recent commit. """ if revisions and revisions['tip']: commit_ids = execute([self.git, 'rev-parse', 'HEAD', revisions['tip']], split_lines=True) head_id = commit_ids[0].strip() revision_id = commit_ids[1].strip() if head_id != revision_id: raise AmendError('Commit "%s" is not the latest commit, ' 'and thus cannot be modified' % revision_id) execute([self.git, 'commit', '--amend', '-m', message]) def apply_patch(self, patch_file, base_path=None, base_dir=None, p=None, revert=False): """Apply the given patch to index. This will take the given patch file and apply it to the index, scheduling all changes for commit. """ cmd = ['git', 'apply', '-3'] if revert: cmd.append('-R') if p: cmd += ['-p', p] cmd.append(patch_file) rc, data = self._execute(cmd, with_errors=True, return_error_code=True) if rc == 0: return PatchResult(applied=True, patch_output=data) elif 'with conflicts' in data: return PatchResult( applied=True, has_conflicts=True, conflicting_files=[ line.split(' ', 1)[1] for line in data.splitlines() if line.startswith('U') ], patch_output=data) else: return PatchResult(applied=False, patch_output=data) def create_commit(self, message, author, run_editor, files=[], all_files=False): """Commits the given modified files. This is expected to be called after applying a patch. This commits the patch using information from the review request, opening the commit message in $EDITOR to allow the user to update it. """ if run_editor: modified_message = edit_text(message) else: modified_message = message if all_files: execute(['git', 'add', '--all', ':/']) elif files: execute(['git', 'add'] + files) execute(['git', 'commit', '-m', modified_message, '--author="%s <%s>"' % (author.fullname, author.email)]) def delete_branch(self, branch_name, merged_only=True): """Deletes the specified branch. If merged_only is False, then the branch will be deleted even if not yet merged into an upstream branch. """ if merged_only: delete_flag = '-d' else: delete_flag = '-D' execute(['git', 'branch', delete_flag, branch_name]) def merge(self, target, destination, message, author, squash=False, run_editor=False): """Merges the target branch with destination branch.""" rc, output = execute( ['git', 'checkout', destination], ignore_errors=True, return_error_code=True) if rc: raise MergeError("Could not checkout to branch '%s'.\n\n%s" % (destination, output)) if squash: method = '--squash' else: method = '--no-ff' rc, output = execute( ['git', 'merge', target, method, '--no-commit'], ignore_errors=True, return_error_code=True) if rc: raise MergeError("Could not merge branch '%s' into '%s'.\n\n%s" % (target, destination, output)) self.create_commit(message, author, run_editor) def push_upstream(self, remote_branch): """Pushes the current branch to upstream.""" origin_url = self.get_origin()[1] rc, output = execute( ['git', 'pull', '--rebase', origin_url, remote_branch], ignore_errors=True, return_error_code=True) if rc: raise PushError('Could not pull changes from upstream.') rc, output = execute( ['git', 'push', origin_url, remote_branch], ignore_errors=True, return_error_code=True) if rc: raise PushError("Could not push branch '%s' to upstream" % remote_branch) def get_current_branch(self): """Returns the name of the current branch.""" return execute([self.git, "rev-parse", "--abbrev-ref", "HEAD"], ignore_errors=True).strip() def _get_root_directory(self): """Get the root directory of the repository as an absolute path.""" git_dir = execute([self.git, "rev-parse", "--git-dir"], ignore_errors=True).rstrip("\n") if git_dir.startswith("fatal:") or not os.path.isdir(git_dir): logging.error("Could not find git repository path.") return None return os.path.abspath(os.path.join(git_dir, "..")) @property def original_cwd(self): """Get the original current working directory.""" if self._original_cwd is None: # If this is None, then we haven't called get_repository_info and # shouldn't have changed directories. self._original_cwd = os.getcwd() return self._original_cwd RBTools-0.7.11/rbtools/clients/perforce.py0000644000232200023220000016764613230242633021020 0ustar debalancedebalancefrom __future__ import print_function, unicode_literals import logging import marshal import os import re import six import socket import stat import string import subprocess import sys from fnmatch import fnmatch from locale import getpreferredencoding from rbtools.clients import SCMClient, RepositoryInfo from rbtools.clients.errors import (AmendError, EmptyChangeError, InvalidRevisionSpecError, SCMError, TooManyRevisionsError) from rbtools.utils.checks import check_gnu_diff, check_install from rbtools.utils.filesystem import make_empty_files, make_tempfile from rbtools.utils.process import execute class P4Wrapper(object): """A wrapper around p4 commands. All calls out to p4 go through an instance of this class. It keeps a separation between all the standard SCMClient logic and any parsing and handling of p4 invocation and results. """ KEYVAL_RE = re.compile('^([^:]+): (.+)$') COUNTERS_RE = re.compile('^([^ ]+) = (.+)$') def __init__(self, options): self.options = options def is_supported(self): return check_install(['p4', 'help']) def counters(self): lines = self.run_p4(['counters'], split_lines=True) return self._parse_keyval_lines(lines, self.COUNTERS_RE) def change(self, changenum, marshalled=True, password=None): return self.run_p4(['change', '-o', str(changenum)], password=password, ignore_errors=True, none_on_ignored_error=True, marshalled=marshalled) def modify_change(self, new_change_spec): """new_change_spec must contain the changelist number.""" return self.run_p4(['change', '-i'], input_string=new_change_spec) def files(self, path): return self.run_p4(['files', path], marshalled=True) def filelog(self, path): return self.run_p4(['filelog', path], marshalled=True) def fstat(self, depot_path, fields=[]): args = ['fstat'] if fields: args += ['-T', ','.join(fields)] args.append(depot_path) lines = self.run_p4(args, split_lines=True) stat_info = {} for line in lines: line = line.strip() if line.startswith('... '): parts = line.split(' ', 2) stat_info[parts[1]] = parts[2] return stat_info def info(self): lines = self.run_p4(['info'], ignore_errors=True, split_lines=True) return self._parse_keyval_lines(lines) def opened(self, changenum): return self.run_p4(['opened', '-c', str(changenum)], marshalled=True) def print_file(self, depot_path, out_file=None): cmd = ['print'] if out_file: cmd += ['-o', out_file] cmd += ['-q', depot_path] return self.run_p4(cmd) def where(self, depot_path): return self.run_p4(['where', depot_path], marshalled=True) def run_p4(self, p4_args, marshalled=False, password=None, ignore_errors=False, input_string=None, *args, **kwargs): """Invoke p4. In the current implementation, the arguments 'marshalled' and 'input_string' cannot be used together, i.e. this command doesn't allow inputting and outputting at the same time. """ cmd = ['p4'] if marshalled: cmd += ['-G'] if getattr(self.options, 'p4_client', None): cmd += ['-c', self.options.p4_client] if getattr(self.options, 'p4_port', None): cmd += ['-p', self.options.p4_port] if getattr(self.options, 'p4_passwd', None): cmd += ['-P', self.options.p4_passwd] cmd += p4_args if password is not None: cmd += ['-P', password] if marshalled: p = subprocess.Popen(cmd, stdout=subprocess.PIPE) result = [] has_error = False while 1: try: data = marshal.load(p.stdout) except EOFError: break else: result.append(data) if data.get('code', None) == 'error': has_error = True rc = p.wait() if not ignore_errors and (rc or has_error): for record in result: if 'data' in record: print(record['data']) raise SCMError('Failed to execute command: %s\n' % cmd) return result elif input_string is not None: p = subprocess.Popen(cmd, stdin=subprocess.PIPE) p.communicate(input_string) # Send input, wait, set returncode if not ignore_errors and p.returncode: raise SCMError('Failed to execute command: %s\n' % cmd) return None else: result = execute(cmd, ignore_errors=ignore_errors, *args, **kwargs) return result def _parse_keyval_lines(self, lines, regex=KEYVAL_RE): keyvals = {} for line in lines: m = regex.match(line) if m: key = m.groups()[0] value = m.groups()[1] keyvals[key] = value.strip() return keyvals class PerforceClient(SCMClient): """ A wrapper around the p4 Perforce tool that fetches repository information and generates compatible diffs. """ name = 'Perforce' can_amend_commit = True supports_diff_exclude_patterns = True supports_diff_extra_args = True supports_patch_revert = True DATE_RE = re.compile(br'(\w+)\s+(\w+)\s+(\d+)\s+(\d\d:\d\d:\d\d)\s+' br'(\d\d\d\d)') ENCODED_COUNTER_URL_RE = re.compile('reviewboard.url\.(\S+)') REVISION_CURRENT_SYNC = '--rbtools-current-sync' REVISION_PENDING_CLN_PREFIX = '--rbtools-pending-cln:' REVISION_DEFAULT_CLN = 'default' ADDED_FILES_RE = re.compile(r'^==== //depot/(\S+)#\d+ ==A== \S+ ====$', re.M) DELETED_FILES_RE = re.compile(r'^==== //depot/(\S+)#\d+ ==D== \S+ ====$', re.M) def __init__(self, p4_class=P4Wrapper, **kwargs): super(PerforceClient, self).__init__(**kwargs) self.p4 = p4_class(self.options) def get_repository_info(self): if not self.p4.is_supported(): logging.debug('Unable to execute "p4 help": skipping Perforce') return None p4_info = self.p4.info() # For the repository path, we first prefer p4 brokers, then the # upstream p4 server. If neither of those are found, just return None. repository_path = (p4_info.get('Broker address') or p4_info.get('Server address')) if repository_path is None: return None client_root = p4_info.get('Client root') if client_root is None: return None # A 'null' client root is a valid configuration on Windows # client, so don't enforce the repository directory check. if (client_root.lower() != 'null' or not sys.platform.startswith('win')): norm_cwd = os.path.normcase(os.path.realpath(os.getcwd()) + os.path.sep) norm_client_root = os.path.normcase(os.path.realpath(client_root) + os.path.sep) # Don't accept the repository if the current directory # is outside the root of the Perforce client. if not norm_cwd.startswith(norm_client_root): return None try: parts = repository_path.split(':') hostname = None if len(parts) == 3 and parts[0] == 'ssl': hostname = parts[1] port = parts[2] elif len(parts) == 2: hostname, port = parts if not hostname: raise SCMError('Path %s is not a valid Perforce P4PORT' % repository_path) info = socket.gethostbyaddr(hostname) # Build the list of repository paths we want to tr to look up. servers = [hostname] if info[0] != hostname: servers.append(info[0]) # If aliases exist for hostname, create a list of alias:port # strings for repository_path. if info[1]: servers += info[1] repository_path = ['%s:%s' % (server, port) for server in servers] # If there's only one repository path found, then we don't # need to do a more expensive lookup of all registered # paths. We can look up just this path directly. if len(repository_path) == 1: repository_path = repository_path[0] except (socket.gaierror, socket.herror): pass server_version = p4_info.get('Server version', None) if not server_version: return None m = re.search(r'[^ ]*/([0-9]+)\.([0-9]+)/[0-9]+ .*$', server_version, re.M) if m: self.p4d_version = int(m.group(1)), int(m.group(2)) else: # Gracefully bail if we don't get a match return None # Now that we know it's Perforce, make sure we have GNU diff # installed, and error out if we don't. check_gnu_diff() return RepositoryInfo(path=repository_path, supports_changesets=True) def parse_revision_spec(self, revisions=[]): """Parses the given revision spec. The 'revisions' argument is a list of revisions as specified by the user. Items in the list do not necessarily represent a single revision, since the user can use SCM-native syntaxes such as "r1..r2" or "r1:r2". SCMTool-specific overrides of this method are expected to deal with such syntaxes. This will return a dictionary with the following keys: 'base': A revision to use as the base of the resulting diff. 'tip': A revision to use as the tip of the resulting diff. These will be used to generate the diffs to upload to Review Board (or print). The diff for review will include the changes in (base, tip]. If zero revisions are passed in, this will return the 'default' changelist. If a single revision is passed in, this will return the parent of that revision for 'base' and the passed-in revision for 'tip'. The result may have special internal revisions or prefixes based on whether the changeset is submitted, pending, or shelved. If two revisions are passed in, they need to both be submitted changesets. """ n_revs = len(revisions) if n_revs == 0: return { 'base': self.REVISION_CURRENT_SYNC, 'tip': (self.REVISION_PENDING_CLN_PREFIX + self.REVISION_DEFAULT_CLN) } elif n_revs == 1: # A single specified CLN can be any of submitted, pending, or # shelved. These are stored with special prefixes and/or names # because the way that we get the contents of the files changes # based on which of these is in effect. status = self._get_changelist_status(revisions[0]) # Both pending and shelved changes are treated as "pending", # through the same code path. This is because the documentation for # 'p4 change' tells a filthy lie, saying that shelved changes will # have their status listed as shelved. In fact, when you shelve # changes, it sticks the data up on the server, but leaves your # working copy intact, and the change is still marked as pending. # Even after reverting the working copy, the change won't have its # status as "shelved". That said, there's perhaps a way that it # could (perhaps from other clients?), so it's still handled in # this conditional. # # The diff routine will first look for opened files in the client, # and if that fails, it will then do the diff against the shelved # copy. if status in ('pending', 'shelved'): return { 'base': self.REVISION_CURRENT_SYNC, 'tip': self.REVISION_PENDING_CLN_PREFIX + revisions[0], } elif status == 'submitted': try: cln = int(revisions[0]) return { 'base': str(cln - 1), 'tip': str(cln), } except ValueError: raise InvalidRevisionSpecError( '%s does not appear to be a valid changelist' % revisions[0]) else: raise InvalidRevisionSpecError( '%s does not appear to be a valid changelist' % revisions[0]) elif n_revs == 2: result = {} # The base revision must be a submitted CLN status = self._get_changelist_status(revisions[0]) if status == 'submitted': result['base'] = revisions[0] elif status in ('pending', 'shelved'): raise InvalidRevisionSpecError( '%s cannot be used as the base CLN for a diff because ' 'it is %s.' % (revisions[0], status)) else: raise InvalidRevisionSpecError( '%s does not appear to be a valid changelist' % revisions[0]) # Tip revision can be any of submitted, pending, or shelved CLNs status = self._get_changelist_status(revisions[1]) if status == 'submitted': result['tip'] = revisions[1] elif status in ('pending', 'shelved'): raise InvalidRevisionSpecError( '%s cannot be used for a revision range diff because it ' 'is %s' % (revisions[1], status)) else: raise InvalidRevisionSpecError( '%s does not appear to be a valid changelist' % revisions[1]) return result else: raise TooManyRevisionsError def _get_changelist_status(self, changelist): if changelist == self.REVISION_DEFAULT_CLN: return 'pending' else: change = self.p4.change(changelist) if len(change) == 1 and 'Status' in change[0]: return change[0]['Status'] return None def scan_for_server(self, repository_info): # Scan first for dot files, since it's faster and will cover the # user's $HOME/.reviewboardrc server_url = \ super(PerforceClient, self).scan_for_server(repository_info) if server_url: return server_url return self.scan_for_server_counter(repository_info) def scan_for_server_counter(self, repository_info): """ Checks the Perforce counters to see if the Review Board server's url is specified. Since Perforce only started supporting non-numeric counter values in server version 2008.1, we support both a normal counter 'reviewboard.url' with a string value and embedding the url in a counter name like 'reviewboard.url.http:||reviewboard.example.com'. Note that forward slashes aren't allowed in counter names, so pipe ('|') characters should be used. These should be safe because they should not be used unencoded in urls. """ counters = self.p4.counters() # Try for a "reviewboard.url" counter first. url = counters.get('reviewboard.url', None) if url: return url # Next try for a counter of the form: # reviewboard_url.http:||reviewboard.example.com for key, value in six.iteritems(counters): m = self.ENCODED_COUNTER_URL_RE.match(key) if m: return m.group(1).replace('|', '/') return None def diff(self, revisions, include_files=[], exclude_patterns=[], extra_args=[]): """ Goes through the hard work of generating a diff on Perforce in order to take into account adds/deletes and to provide the necessary revision information. """ exclude_patterns = self.normalize_exclude_patterns(exclude_patterns) if not revisions: # The "path posting" is still interesting enough to keep around. If # the given arguments don't parse as valid changelists, fall back # on that behavior. return self._path_diff(extra_args, exclude_patterns) # Support both //depot/... paths and local filenames. For the moment, # this does *not* support any of perforce's traversal literals like ... depot_include_files = [] local_include_files = [] for filename in include_files: if filename.startswith('//'): depot_include_files.append(filename) else: # The way we determine files to include or not is via # 'p4 where', which gives us absolute paths. local_include_files.append( os.path.realpath(os.path.abspath(filename))) base = revisions['base'] tip = revisions['tip'] cl_is_pending = tip.startswith(self.REVISION_PENDING_CLN_PREFIX) cl_is_shelved = False if not cl_is_pending: # Submitted changes are handled by a different method logging.info('Generating diff for range of submitted changes: %s ' 'to %s', base, tip) return self._compute_range_changes( base, tip, depot_include_files, local_include_files, exclude_patterns) # Strip off the prefix tip = tip.split(':', 1)[1] # Try to get the files out of the working directory first. If that # doesn't work, look at shelved files. opened_files = self.p4.opened(tip) if not opened_files: opened_files = self.p4.files('//...@=%s' % tip) cl_is_shelved = True if not opened_files: raise EmptyChangeError if cl_is_shelved: logging.info('Generating diff for shelved changeset %s' % tip) else: logging.info('Generating diff for pending changeset %s' % tip) diff_lines = [] action_mapping = { 'edit': 'M', 'integrate': 'M', 'add': 'A', 'branch': 'A', 'import': 'A', 'delete': 'D', } # XXX: Theoretically, shelved files should handle moves just fine--you # can shelve and unshelve changes containing moves. Unfortunately, # there doesn't seem to be any way to match up the added and removed # files when the changeset is shelved, because none of the usual # methods (fstat, filelog) provide the source move information when the # changeset is shelved. if self._supports_moves() and not cl_is_shelved: action_mapping['move/add'] = 'MV-a' action_mapping['move/delete'] = 'MV' else: # The Review Board server doesn't support moved files for # perforce--create a diff that shows moved files as adds and # deletes. action_mapping['move/add'] = 'A' action_mapping['move/delete'] = 'D' for f in opened_files: depot_file = f['depotFile'] local_file = self._depot_to_local(depot_file) new_depot_file = '' try: base_revision = int(f['rev']) except ValueError: # For actions like deletes, there won't be any "current # revision". Just pass through whatever was there before. base_revision = f['rev'] action = f['action'] if ((depot_include_files and depot_file not in depot_include_files) or (local_include_files and local_file not in local_include_files) or self._should_exclude_file(local_file, depot_file, exclude_patterns)): continue old_file = '' new_file = '' logging.debug('Processing %s of %s', action, depot_file) try: changetype_short = action_mapping[action] except KeyError: raise SCMError('Unsupported action type "%s" for %s' % (action, depot_file)) if changetype_short == 'M': try: old_file, new_file = self._extract_edit_files( depot_file, local_file, base_revision, tip, cl_is_shelved, False) except ValueError as e: if not self.config.get('SUPPRESS_CLIENT_WARNINGS', False): logging.warning('Skipping file %s: %s', depot_file, e) continue elif changetype_short == 'A': # Perforce has a charming quirk where the revision listed for # a file is '1' in both the first submitted revision, as well # as before it's added. On the Review Board side, when we parse # the diff, we'll check to see if that revision exists, but # that only works for pending changes. If the change is shelved # or submitted, revision 1 will exist, which causes the # displayed diff to contain revision 1 twice. # # Setting the revision in the diff file to be '0' will avoid # problems with patches that add files. base_revision = 0 try: old_file, new_file = self._extract_add_files( depot_file, local_file, tip, cl_is_shelved, cl_is_pending) except ValueError as e: if not self.config.get('SUPPRESS_CLIENT_WARNINGS', False): logging.warning('Skipping file %s: %s', depot_file, e) continue if os.path.islink(new_file): if not self.config.get('SUPPRESS_CLIENT_WARNINGS', False): logging.warning('Skipping symlink %s', new_file) continue elif changetype_short == 'D': try: old_file, new_file = self._extract_delete_files( depot_file, base_revision) except ValueError as e: if not self.config.get('SUPPRESS_CLIENT_WARNINGS', False): logging.warning( 'Skipping file %s#%s: %s', depot_file, base_revision, e) continue elif changetype_short == 'MV-a': # The server supports move information. We ignore this # particular entry, and handle the moves within the equivalent # 'move/delete' entry. continue elif changetype_short == 'MV': try: old_file, new_file, new_depot_file = \ self._extract_move_files( depot_file, tip, base_revision, cl_is_shelved) except ValueError as e: if not self.config.get('SUPPRESS_CLIENT_WARNINGS', False): logging.warning('Skipping file %s: %s', depot_file, e) continue dl = self._do_diff(old_file, new_file, depot_file, base_revision, new_depot_file, changetype_short, ignore_unmodified=True) diff_lines += dl return { 'diff': b''.join(diff_lines), 'changenum': self.get_changenum(revisions), } def get_changenum(self, revisions): """Return the change number for the given revisions. This is only used when the client is supposed to send a change number to the server (such as with Perforce). Args: revisions (dict): A revisions dictionary as returned by ``parse_revision_spec``. Returns: unicode: The change number to send to the Review Board server. """ # This is used to report the change number to the Review Board server # when posting pending changesets. By reporting the change number, we # extract the changeset description server-side. Ideally we'd change # this to remove the server-side implementation and just implement # --guess-summary and --guess-description, but that would likely # create a lot of unhappy users. if revisions is not None: tip = revisions['tip'] if tip.startswith(self.REVISION_PENDING_CLN_PREFIX): tip = tip[len(self.REVISION_PENDING_CLN_PREFIX):] if tip != self.REVISION_DEFAULT_CLN: return tip return None def _compute_range_changes(self, base, tip, depot_include_files, local_include_files, exclude_patterns): """Compute the changes across files given a revision range. This will look at the history of all changes within the given range and compute the full set of changes contained therein. Just looking at the two trees isn't enough, since files may have moved around and we want to include that information. """ # Start by looking at the filelog to get a history of all the changes # within the changeset range. This processing step is done because in # marshalled mode, the filelog doesn't sort its entries at all, and can # also include duplicate information, especially when files have moved # around. changesets = {} # We expect to generate a diff for (base, tip], but filelog gives us # [base, tip]. Increment the base to avoid this. real_base = str(int(base) + 1) for file_entry in self.p4.filelog('//...@%s,%s' % (real_base, tip)): cid = 0 while True: change_key = 'change%d' % cid if change_key not in file_entry: break action = file_entry['action%d' % cid] depot_file = file_entry['depotFile'] try: cln = int(file_entry[change_key]) except ValueError: if not self.config.get('SUPPRESS_CLIENT_WARNINGS', False): logging.warning('Skipping file %s: unable to parse ' 'change number "%s"', depot_file, file_entry[change_key]) break if action == 'integrate': action = 'edit' elif action == 'branch': action = 'add' if action not in ('edit', 'add', 'delete', 'move/add', 'move/delete'): raise Exception('Unsupported action type "%s" for %s' % (action, depot_file)) try: rev_key = 'rev%d' % cid rev = int(file_entry[rev_key]) except ValueError: if not self.config.get('SUPPRESS_CLIENT_WARNINGS', False): logging.warning('Skipping file %s: unable to parse ' 'revision number "%s"', depot_file, file_entry[rev_key]) break change = { 'rev': rev, 'action': action, } if action == 'move/add': change['oldFilename'] = file_entry['file0,%d' % cid] elif action == 'move/delete': change['newFilename'] = file_entry['file1,%d' % cid] cid += 1 changesets.setdefault(cln, {})[depot_file] = change # Now run through the changesets in order and compute a change journal # for each file. files = [] for cln in sorted(changesets.keys()): changeset = changesets[cln] for depot_file, change in six.iteritems(changeset): action = change['action'] # Moves will be handled in the 'move/delete' entry if action == 'move/add': continue file_entry = None for f in files: if f['depotFile'] == depot_file: file_entry = f break if file_entry is None: file_entry = { 'initialDepotFile': depot_file, 'initialRev': change['rev'], 'newFile': action == 'add', 'rev': change['rev'], 'action': 'none', } files.append(file_entry) self._accumulate_range_change(file_entry, change) if not files: raise EmptyChangeError # Now generate the diff supports_moves = self._supports_moves() diff_lines = [] for f in files: action = f['action'] depot_file = f['depotFile'] try: local_file = self._depot_to_local(depot_file) except SCMError: if not self.config.get('SUPPRESS_CLIENT_WARNINGS', False): logging.warning('Could not find local filename for "%s"', depot_file) local_file = None rev = f['rev'] initial_depot_file = f['initialDepotFile'] initial_rev = f['initialRev'] if ((depot_include_files and depot_file not in depot_include_files) or (local_include_files and local_file and local_file not in local_include_files) or self._should_exclude_file(local_file, depot_file, exclude_patterns)): continue if action == 'add': try: old_file, new_file = self._extract_add_files( depot_file, local_file, rev, False, False) except ValueError as e: if not self.config.get('SUPPRESS_CLIENT_WARNINGS', False): logging.warning('Skipping file %s: %s', depot_file, e) continue diff_lines += self._do_diff( old_file, new_file, depot_file, 0, '', 'A', ignore_unmodified=True) elif action == 'delete': try: old_file, new_file = self._extract_delete_files( initial_depot_file, initial_rev) except ValueError: if not self.config.get('SUPPRESS_CLIENT_WARNINGS', False): logging.warning('Skipping file %s: %s', depot_file, e) continue diff_lines += self._do_diff( old_file, new_file, initial_depot_file, initial_rev, depot_file, 'D', ignore_unmodified=True) elif action == 'edit': try: old_file, new_file = self._extract_edit_files( depot_file, local_file, initial_rev, rev, False, True) except ValueError: if not self.config.get('SUPPRESS_CLIENT_WARNINGS', False): logging.warning('Skipping file %s: %s', depot_file, e) continue diff_lines += self._do_diff( old_file, new_file, initial_depot_file, initial_rev, depot_file, 'M', ignore_unmodified=True) elif action == 'move': try: old_file_a, new_file_a = self._extract_add_files( depot_file, local_file, rev, False, False) old_file_b, new_file_b = self._extract_delete_files( initial_depot_file, initial_rev) except ValueError: if not self.config.get('SUPPRESS_CLIENT_WARNINGS', False): logging.warning('Skipping file %s: %s', depot_file, e) continue if supports_moves: # Show the change as a move diff_lines += self._do_diff( old_file_a, new_file_b, initial_depot_file, initial_rev, depot_file, 'MV', ignore_unmodified=True) else: # Show the change as add and delete diff_lines += self._do_diff( old_file_a, new_file_a, depot_file, 0, '', 'A', ignore_unmodified=True) diff_lines += self._do_diff( old_file_b, new_file_b, initial_depot_file, initial_rev, depot_file, 'D', ignore_unmodified=True) elif action == 'skip': continue else: # We should never get here. The results of # self._accumulate_range_change should never be anything other # than add, delete, move, or edit. assert False return { 'diff': b''.join(diff_lines) } def _accumulate_range_change(self, file_entry, change): """Compute the effects of a given change on a given file""" old_action = file_entry['action'] current_action = change['action'] if old_action == 'none': # This is the first entry for this file. new_action = current_action file_entry['depotFile'] = file_entry['initialDepotFile'] # If the first action was an edit or a delete, then the initial # revision (that we'll use to generate the diff) is n-1 if current_action in ('edit', 'delete'): file_entry['initialRev'] -= 1 elif current_action == 'add': # If we're adding a file that existed in the base changeset, it # means it was previously deleted and then added back. We # therefore want the operation to look like an edit. If it # didn't exist, then we added, deleted, and are now adding # again. if old_action == 'skip': new_action = 'add' else: new_action = 'edit' elif current_action == 'edit': # Edits don't affect the previous type of change # (edit+edit=edit, move+edit=move, add+edit=add). new_action = old_action elif current_action == 'delete': # If we're deleting a file which did not exist in the base # changeset, then we want to just skip it entirely (since it # means it's been added and then deleted). Otherwise, it's a # real delete. if file_entry['newFile']: new_action = 'skip' else: new_action = 'delete' elif current_action == 'move/delete': new_action = 'move' file_entry['depotFile'] = change['newFilename'] file_entry['rev'] = change['rev'] file_entry['action'] = new_action def _extract_edit_files(self, depot_file, local_file, rev_a, rev_b, cl_is_shelved, cl_is_submitted): """Extract the 'old' and 'new' files for an edit operation. Returns a tuple of (old filename, new filename). This can raise a ValueError if the extraction fails. """ # Get the old version out of perforce old_filename = make_tempfile() self._write_file('%s#%s' % (depot_file, rev_a), old_filename) if cl_is_shelved: new_filename = make_tempfile() self._write_file('%s@=%s' % (depot_file, rev_b), new_filename) elif cl_is_submitted: new_filename = make_tempfile() self._write_file('%s#%s' % (depot_file, rev_b), new_filename) else: # Just reference the file within the client view new_filename = local_file return old_filename, new_filename def _extract_add_files(self, depot_file, local_file, revision, cl_is_shelved, cl_is_pending): """Extract the 'old' and 'new' files for an add operation. Returns a tuple of (old filename, new filename). This can raise a ValueError if the extraction fails. """ # Make an empty tempfile for the old file old_filename = make_tempfile() if cl_is_shelved: new_filename = make_tempfile() self._write_file('%s@=%s' % (depot_file, revision), new_filename) elif cl_is_pending: # Just reference the file within the client view new_filename = local_file else: new_filename = make_tempfile() self._write_file('%s#%s' % (depot_file, revision), new_filename) return old_filename, new_filename def _extract_delete_files(self, depot_file, revision): """Extract the 'old' and 'new' files for a delete operation. Returns a tuple of (old filename, new filename). This can raise a ValueError if extraction fails. """ # Get the old version out of perforce old_filename = make_tempfile() self._write_file('%s#%s' % (depot_file, revision), old_filename) # Make an empty tempfile for the new file new_filename = make_tempfile() return old_filename, new_filename def _extract_move_files(self, old_depot_file, tip, base_revision, cl_is_shelved): """Extract the 'old' and 'new' files for a move operation. Returns a tuple of (old filename, new filename, new depot path). This can raise a ValueError if extraction fails. """ # XXX: fstat *ought* to work, but perforce doesn't supply the movedFile # field in fstat (or apparently anywhere else) when a change is # shelved. For now, _diff_pending will avoid calling this method at all # for shelved changes, and instead treat them as deletes and adds. assert not cl_is_shelved # if cl_is_shelved: # fstat_path = '%s@=%s' % (depot_file, tip) # else: fstat_path = old_depot_file stat_info = self.p4.fstat(fstat_path, ['clientFile', 'movedFile']) if 'clientFile' not in stat_info or 'movedFile' not in stat_info: raise ValueError('Unable to get moved file information') old_filename = make_tempfile() self._write_file('%s#%s' % (old_depot_file, base_revision), old_filename) # if cl_is_shelved: # fstat_path = '%s@=%s' % (stat_info['movedFile'], tip) # else: fstat_path = stat_info['movedFile'] stat_info = self.p4.fstat(fstat_path, ['clientFile', 'depotFile']) if 'clientFile' not in stat_info or 'depotFile' not in stat_info: raise ValueError('Unable to get moved file information') # Grab the new depot path (to include in the diff index) new_depot_file = stat_info['depotFile'] # Reference the new file directly in the client view new_filename = stat_info['clientFile'] return old_filename, new_filename, new_depot_file def _path_diff(self, args, exclude_patterns): """ Process a path-style diff. This allows people to post individual files in various ways. Multiple paths may be specified in `args`. The path styles supported are: //path/to/file Upload file as a "new" file. //path/to/dir/... Upload all files as "new" files. //path/to/file[@#]rev Upload file from that rev as a "new" file. //path/to/file[@#]rev,[@#]rev Upload a diff between revs. //path/to/dir/...[@#]rev,[@#]rev Upload a diff of all files between revs in that directory. """ r_revision_range = re.compile(r'^(?P//[^@#]+)' + r'(?P[#@][^,]+)?' + r'(?P,[#@][^,]+)?$') empty_filename = make_tempfile() tmp_diff_from_filename = make_tempfile() tmp_diff_to_filename = make_tempfile() diff_lines = [] for path in args: m = r_revision_range.match(path) if not m: raise SCMError('Path %s does not match a valid Perforce path.' % path) revision1 = m.group('revision1') revision2 = m.group('revision2') first_rev_path = m.group('path') if revision1: first_rev_path += revision1 records = self.p4.files(first_rev_path) # Make a map for convenience. files = {} # Records are: # 'rev': '1' # 'func': '...' # 'time': '1214418871' # 'action': 'edit' # 'type': 'ktext' # 'depotFile': '...' # 'change': '123456' for record in records: if record['action'] not in ('delete', 'move/delete'): if revision2: files[record['depotFile']] = [record, None] else: files[record['depotFile']] = [None, record] if revision2: # [1:] to skip the comma. second_rev_path = m.group('path') + revision2[1:] records = self.p4.files(second_rev_path) for record in records: if record['action'] not in ('delete', 'move/delete'): try: m = files[record['depotFile']] m[1] = record except KeyError: files[record['depotFile']] = [None, record] old_file = new_file = empty_filename changetype_short = None for depot_path, (first_record, second_record) in \ six.iteritems(files): old_file = new_file = empty_filename if first_record is None: new_path = '%s#%s' % (depot_path, second_record['rev']) self._write_file(new_path, tmp_diff_to_filename) new_file = tmp_diff_to_filename changetype_short = 'A' base_revision = 0 elif second_record is None: old_path = '%s#%s' % (depot_path, first_record['rev']) self._write_file(old_path, tmp_diff_from_filename) old_file = tmp_diff_from_filename changetype_short = 'D' base_revision = int(first_record['rev']) elif first_record['rev'] == second_record['rev']: # We when we know the revisions are the same, we don't need # to do any diffing. This speeds up large revision-range # diffs quite a bit. continue else: old_path = '%s#%s' % (depot_path, first_record['rev']) new_path = '%s#%s' % (depot_path, second_record['rev']) self._write_file(old_path, tmp_diff_from_filename) self._write_file(new_path, tmp_diff_to_filename) new_file = tmp_diff_to_filename old_file = tmp_diff_from_filename changetype_short = 'M' base_revision = int(first_record['rev']) local_path = self._depot_to_local(depot_path) if self._should_exclude_file(local_path, depot_path, exclude_patterns): continue # TODO: We're passing new_depot_file='' here just to make # things work like they did before the moved file change was # added (58ccae27). This section of code needs to be updated # to properly work with moved files. dl = self._do_diff(old_file, new_file, depot_path, base_revision, '', changetype_short, ignore_unmodified=True) diff_lines += dl os.unlink(empty_filename) os.unlink(tmp_diff_from_filename) os.unlink(tmp_diff_to_filename) return { 'diff': b''.join(diff_lines), } def _do_diff(self, old_file, new_file, depot_file, base_revision, new_depot_file, changetype_short, ignore_unmodified=False): """ Do the work of producing a diff for Perforce. old_file - The absolute path to the "old" file. new_file - The absolute path to the "new" file. depot_file - The depot path in Perforce for this file. base_revision - The base perforce revision number of the old file as an integer. new_depot_file - Location of the new file. Only used for moved files. changetype_short - The change type as a short string. ignore_unmodified - If True, will return an empty list if the file is not changed. Returns a list of strings of diff lines. """ if hasattr(os, 'uname') and os.uname()[0] == 'SunOS': diff_cmd = ['gdiff', '-urNp', old_file, new_file] else: diff_cmd = ['diff', '-urNp', old_file, new_file] # Diff returns "1" if differences were found. dl = execute(diff_cmd, extra_ignore_errors=(1, 2), log_output_on_error=False, translate_newlines=False, results_unicode=False) # If the input file has ^M characters at end of line, lets ignore them. dl = dl.replace(b'\r\r\n', b'\r\n') dl = dl.splitlines(True) cwd = os.getcwd() if depot_file.startswith(cwd): local_path = depot_file[len(cwd) + 1:] else: local_path = depot_file if changetype_short == 'MV': is_move = True if new_depot_file.startswith(cwd): new_local_path = new_depot_file[len(cwd) + 1:] else: new_local_path = new_depot_file else: is_move = False new_local_path = local_path # Special handling for the output of the diff tool on binary files: # diff outputs "Files a and b differ" # and the code below expects the output to start with # "Binary files " if (len(dl) == 1 and dl[0].startswith(b'Files %s and %s differ' % (old_file, new_file))): dl = [b'Binary files %s and %s differ\n' % (old_file, new_file)] if dl == [] or dl[0].startswith(b'Binary files '): is_empty_and_changed = (self.supports_empty_files() and changetype_short in ('A', 'D')) if dl == [] and (is_move or is_empty_and_changed): line = ('==== %s#%s ==%s== %s ====\n' % (depot_file, base_revision, changetype_short, new_local_path)).encode('utf-8') dl.insert(0, line) dl.append(b'\n') else: if ignore_unmodified: return [] else: print('Warning: %s in your changeset is unmodified' % local_path) elif len(dl) > 1: m = re.search(br'(\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d)', dl[1]) if m: timestamp = m.group(1).decode('utf-8') else: # Thu Sep 3 11:24:48 2007 m = self.DATE_RE.search(dl[1]) if not m: raise SCMError('Unable to parse diff header: %s' % dl[1]) month_map = { b'Jan': b'01', b'Feb': b'02', b'Mar': b'03', b'Apr': b'04', b'May': b'05', b'Jun': b'06', b'Jul': b'07', b'Aug': b'08', b'Sep': b'09', b'Oct': b'10', b'Nov': b'11', b'Dec': b'12', } month = month_map[m.group(2)] day = m.group(3) timestamp = m.group(4) year = m.group(5) timestamp = '%s-%s-%s %s' % (year, month, day, timestamp) dl[0] = ('--- %s\t%s#%s\n' % (local_path, depot_file, base_revision)).encode('utf-8') dl[1] = ('+++ %s\t%s\n' % (new_local_path, timestamp)).encode('utf-8') if is_move: dl.insert(0, ('Moved to: %s\n' % new_depot_file).encode('utf-8')) dl.insert(0, ('Moved from: %s\n' % depot_file).encode('utf-8')) # Not everybody has files that end in a newline (ugh). This ensures # that the resulting diff file isn't broken. if not dl[-1].endswith(b'\n'): dl.append(b'\n') else: raise SCMError('No valid diffs: %s' % dl[0].decode('utf-8')) return dl def _write_file(self, depot_path, tmpfile): """ Grabs a file from Perforce and writes it to a temp file. p4 print sets the file readonly and that causes a later call to unlink fail. So we make the file read/write. """ logging.debug('Writing "%s" to "%s"' % (depot_path, tmpfile)) self.p4.print_file(depot_path, out_file=tmpfile) # The output of 'p4 print' will be a symlink if that's what version # control contains. There's a few reasons to skip these files... # # * Relative symlinks will likely be broken, causing an unexpected # OSError. # * File that's symlinked to isn't necessarily in version control. # * Users expect that this will only process files under version # control. If I can replace a file they opened with a symlink to # private keys in '~/.ssh', then they'd probably be none too happy # when rbt uses their credentials to publish its contents. if os.path.islink(tmpfile): raise ValueError('"%s" is a symlink' % depot_path) else: os.chmod(tmpfile, stat.S_IREAD | stat.S_IWRITE) def _depot_to_local(self, depot_path): """ Given a path in the depot return the path on the local filesystem to the same file. If there are multiple results, take only the last result from the where command. """ where_output = self.p4.where(depot_path) try: return where_output[-1]['path'] except: # XXX: This breaks on filenames with spaces. return where_output[-1]['data'].split(' ')[2].strip() def get_raw_commit_message(self, revisions): """Extract the commit message based on the provided revision range. Since local changelists in perforce are not ordered with respect to one another, this implementation looks at only the tip revision. """ changelist = revisions['tip'] # The parsed revision spec may include a prefix indicating that it is # pending. This prefix, which is delimited by a colon, must be # stripped in order to run p4 change on the actual changelist number. if ':' in changelist: changelist = changelist.split(':', 1)[1] if changelist == self.REVISION_DEFAULT_CLN: # The default changelist has no description and couldn't be # accessed from p4 change anyway return '' logging.debug('Fetching description for changelist %s', changelist) change = self.p4.change(changelist) if len(change) == 1 and 'Description' in change[0]: return change[0]['Description'].decode(getpreferredencoding()) else: return '' def apply_patch_for_empty_files(self, patch, p_num, revert=False): """Returns True if any empty files in the patch are applied. If there are no empty files in the patch or if an error occurs while applying the patch, we return False. """ patched_empty_files = False if revert: added_files = self.DELETED_FILES_RE.findall(patch) deleted_files = self.ADDED_FILES_RE.findall(patch) else: added_files = self.ADDED_FILES_RE.findall(patch) deleted_files = self.DELETED_FILES_RE.findall(patch) # Prepend the root of the Perforce client to each file name. p4_info = self.p4.info() client_root = p4_info.get('Client root') added_files = ['%s/%s' % (client_root, f) for f in added_files] deleted_files = ['%s/%s' % (client_root, f) for f in deleted_files] if added_files: make_empty_files(added_files) result = execute(['p4', 'add'] + added_files, ignore_errors=True, none_on_ignored_error=True) if result is None: logging.error('Unable to execute "p4 add" on: %s', ', '.join(added_files)) else: patched_empty_files = True if deleted_files: result = execute(['p4', 'delete'] + deleted_files, ignore_errors=True, none_on_ignored_error=True) if result is None: logging.error('Unable to execute "p4 delete" on: %s', ', '.join(deleted_files)) else: patched_empty_files = True return patched_empty_files def _supports_moves(self): return (self.capabilities and self.capabilities.has_capability('scmtools', 'perforce', 'moved_files')) def _supports_empty_files(self): """Checks if the RB server supports added/deleted empty files.""" return (self.capabilities and self.capabilities.has_capability('scmtools', 'perforce', 'empty_files')) def _should_exclude_file(self, local_file, depot_file, exclude_patterns): """Determine if a file should be excluded from a diff. Check if the file identified by (local_file, depot_file) should be excluded from the diff. If a pattern beings with '//', then it will be matched against the depot_file. Otherwise, it will be matched against the local file. This function expects `exclude_patterns` to be normalized. """ for pattern in exclude_patterns: if pattern.startswith('//'): if fnmatch(depot_file, pattern): return True elif local_file and fnmatch(local_file, pattern): return True return False def normalize_exclude_patterns(self, patterns): """Normalize the set of patterns so all non-depot paths are absolute. A path with a leading // is interpreted as a depot pattern and remains unchanged. A path with a leading path separator is interpreted as being relative to the Perforce client root. All other paths are interpreted as being relative to the current working directory. Non-depot paths are transformed into absolute paths. """ cwd = os.getcwd() base_dir = self.p4.info().get('Client root') def normalize(p): if p.startswith('//'): # Absolute depot patterns remain unchanged. return p elif pattern.startswith(os.path.sep): # Patterns beginning with the operating system's path separator # are relative to the repository root. assert base_dir is not None p = os.path.join(base_dir, p[1:]) else: # All other patterns are considered to be relative to the # current working directory. p = os.path.join(cwd, p) return os.path.normpath(p) return [normalize(pattern) for pattern in patterns] def _replace_description_in_changelist_spec(self, old_spec, new_description): """Replace the description in the given changelist spec. old_spec is a formatted p4 changelist spec string (the raw output from p4 change). This method replaces the existing description with new_description, and returns the new changelist spec. """ new_spec = '' whitespace = tuple(string.whitespace) description_key = 'Description:' skipping_old_description = False for line in old_spec.splitlines(True): if not skipping_old_description: if not line.startswith(description_key): new_spec += line else: # Insert the new description. Don't include the first line # of the old one if it happens to be on the same line as # the key. skipping_old_description = True new_spec += description_key for desc_line in new_description.splitlines(): new_spec += '\t%s\n' % desc_line else: # Ignore the description from the original file (all lines # that start with whitespace until the next key is # encountered). if line.startswith(whitespace): continue else: skipping_old_description = False new_spec += '\n%s' % line return new_spec def amend_commit_description(self, message, revisions): """Update a commit message to the given string. Since local changelists on perforce have no ordering with respect to each other, the revisions argument is mandatory. """ # Get the changelist number from the tip revision, removing the prefix # if necessary. Don't allow amending submitted or default changelists. changelist_id = revisions['tip'] logging.debug('Preparing to amend change %s' % changelist_id) if not changelist_id.startswith(self.REVISION_PENDING_CLN_PREFIX): raise AmendError('Cannot modify submitted changelist %s' % changelist_id) changelist_num = changelist_id.split(':', 1)[1] if changelist_num == self.REVISION_DEFAULT_CLN: raise AmendError('Cannot modify the default changelist') elif not changelist_num.isdigit(): raise AmendError('%s is an invalid changelist ID' % changelist_num) # Get the current changelist description and insert the new message. # Since p4 change -i doesn't take in marshalled objects, we get the # description as raw text and manually edit it. change = self.p4.change(changelist_num, marshalled=False) new_change = self._replace_description_in_changelist_spec( change, message) self.p4.modify_change(new_change) RBTools-0.7.11/rbtools/clients/plastic.py0000644000232200023220000002442613230242633020636 0ustar debalancedebalanceimport logging import os import re from rbtools.clients import SCMClient, RepositoryInfo from rbtools.clients.errors import (InvalidRevisionSpecError, TooManyRevisionsError, SCMError) from rbtools.utils.checks import check_install from rbtools.utils.filesystem import make_tempfile from rbtools.utils.process import execute class PlasticClient(SCMClient): """ A wrapper around the cm Plastic tool that fetches repository information and generates compatible diffs """ name = 'Plastic' supports_patch_revert = True REVISION_CHANGESET_PREFIX = 'cs:' def __init__(self, **kwargs): super(PlasticClient, self).__init__(**kwargs) def get_repository_info(self): if not check_install(['cm', 'version']): logging.debug('Unable to execute "cm version": skipping Plastic') return None # Get the workspace directory, so we can strip it from the diff output self.workspacedir = execute(["cm", "gwp", ".", "--format={1}"], split_lines=False, ignore_errors=True).strip() logging.debug("Workspace is %s" % self.workspacedir) # Get the repository that the current directory is from split = execute(["cm", "ls", self.workspacedir, "--format={8}"], split_lines=True, ignore_errors=True) # remove blank lines split = [x for x in split if x] m = re.search(r'^rep:(.+)$', split[0], re.M) if not m: return None path = m.group(1) return RepositoryInfo(path, supports_changesets=True, supports_parent_diffs=False) def parse_revision_spec(self, revisions=[]): """Parses the given revision spec. The 'revisions' argument is a list of revisions as specified by the user. Items in the list do not necessarily represent a single revision, since the user can use SCM-native syntaxes such as "r1..r2" or "r1:r2". SCMTool-specific overrides of this method are expected to deal with such syntaxes. This will return a dictionary with the following keys: 'base': Always None. 'tip': A revision string representing either a changeset or a branch. These will be used to generate the diffs to upload to Review Board (or print). The Plastic implementation requires that one and only one revision is passed in. The diff for review will include the changes in the given changeset or branch. """ n_revisions = len(revisions) if n_revisions == 0: raise InvalidRevisionSpecError( 'Either a changeset or a branch must be specified') elif n_revisions == 1: return { 'base': None, 'tip': revisions[0], } else: raise TooManyRevisionsError def diff(self, revisions, include_files=[], exclude_patterns=[], extra_args=[]): """ Performs a diff across all modified files in a Plastic workspace Parent diffs are not supported (the second value in the tuple). """ # TODO: use 'files' changenum = None tip = revisions['tip'] if tip.startswith(self.REVISION_CHANGESET_PREFIX): logging.debug('Doing a diff against changeset %s', tip) try: changenum = str(int( tip[len(self.REVISION_CHANGESET_PREFIX):])) except ValueError: pass else: logging.debug('Doing a diff against branch %s', tip) if not getattr(self.options, 'branch', None): self.options.branch = tip diff_entries = execute( ['cm', 'diff', tip, '--format={status} {path} rev:revid:{revid} ' 'rev:revid:{parentrevid} src:{srccmpath} ' 'dst:{dstcmpath}{newline}'], split_lines=True) logging.debug('Got files: %s', diff_entries) diff = self._process_diffs(diff_entries) return { 'diff': diff, 'changenum': changenum, } def _process_diffs(self, my_diff_entries): # Diff generation based on perforce client diff_lines = [] empty_filename = make_tempfile() tmp_diff_from_filename = make_tempfile() tmp_diff_to_filename = make_tempfile() for f in my_diff_entries: f = f.strip() if not f: continue m = re.search(r'(?P[ACMD]) (?P.*) ' r'(?Prev:revid:[-\d]+) ' r'(?Prev:revid:[-\d]+) ' r'src:(?P.*) ' r'dst:(?P.*)$', f) if not m: raise SCMError('Could not parse "cm log" response: %s' % f) changetype = m.group("type") filename = m.group("file") if changetype == "M": # Handle moved files as a delete followed by an add. # Clunky, but at least it works oldfilename = m.group("srcpath") oldspec = m.group("revspec") newfilename = m.group("dstpath") newspec = m.group("revspec") self._write_file(oldfilename, oldspec, tmp_diff_from_filename) dl = self._diff_files(tmp_diff_from_filename, empty_filename, oldfilename, "rev:revid:-1", oldspec, changetype) diff_lines += dl self._write_file(newfilename, newspec, tmp_diff_to_filename) dl = self._diff_files(empty_filename, tmp_diff_to_filename, newfilename, newspec, "rev:revid:-1", changetype) diff_lines += dl else: newrevspec = m.group("revspec") parentrevspec = m.group("parentrevspec") logging.debug("Type %s File %s Old %s New %s" % (changetype, filename, parentrevspec, newrevspec)) old_file = new_file = empty_filename if (changetype in ['A'] or (changetype in ['C'] and parentrevspec == "rev:revid:-1")): # There's only one content to show self._write_file(filename, newrevspec, tmp_diff_to_filename) new_file = tmp_diff_to_filename elif changetype in ['C']: self._write_file(filename, parentrevspec, tmp_diff_from_filename) old_file = tmp_diff_from_filename self._write_file(filename, newrevspec, tmp_diff_to_filename) new_file = tmp_diff_to_filename elif changetype in ['D']: self._write_file(filename, parentrevspec, tmp_diff_from_filename) old_file = tmp_diff_from_filename else: raise SCMError("Don't know how to handle change type " "'%s' for %s" % (changetype, filename)) dl = self._diff_files(old_file, new_file, filename, newrevspec, parentrevspec, changetype) diff_lines += dl os.unlink(empty_filename) os.unlink(tmp_diff_from_filename) os.unlink(tmp_diff_to_filename) return ''.join(diff_lines) def _diff_files(self, old_file, new_file, filename, newrevspec, parentrevspec, changetype): """ Do the work of producing a diff for Plastic (based on the Perforce one) old_file - The absolute path to the "old" file. new_file - The absolute path to the "new" file. filename - The file in the Plastic workspace newrevspec - The revid spec of the changed file parentrevspecspec - The revision spec of the "old" file changetype - The change type as a single character string Returns a list of strings of diff lines. """ if filename.startswith(self.workspacedir): filename = filename[len(self.workspacedir):] diff_cmd = ["diff", "-urN", old_file, new_file] # Diff returns "1" if differences were found. dl = execute(diff_cmd, extra_ignore_errors=(1, 2), translate_newlines = False) # If the input file has ^M characters at end of line, lets ignore them. dl = dl.replace('\r\r\n', '\r\n') dl = dl.splitlines(True) # Special handling for the output of the diff tool on binary files: # diff outputs "Files a and b differ" # and the code below expects the output to start with # "Binary files " if (len(dl) == 1 and dl[0].startswith('Files %s and %s differ' % (old_file, new_file))): dl = ['Binary files %s and %s differ\n' % (old_file, new_file)] if dl == [] or dl[0].startswith("Binary files "): if dl == []: return [] dl.insert(0, "==== %s (%s) ==%s==\n" % (filename, newrevspec, changetype)) dl.append('\n') else: dl[0] = "--- %s\t%s\n" % (filename, parentrevspec) dl[1] = "+++ %s\t%s\n" % (filename, newrevspec) # Not everybody has files that end in a newline. This ensures # that the resulting diff file isn't broken. if dl[-1][-1] != '\n': dl.append('\n') return dl def _write_file(self, filename, filespec, tmpfile): """ Grabs a file from Plastic and writes it to a temp file """ logging.debug("Writing '%s' (rev %s) to '%s'" % (filename, filespec, tmpfile)) execute(["cm", "cat", filespec, "--file=" + tmpfile]) RBTools-0.7.11/rbtools/clients/tfs.py0000644000232200023220000011326113230242633017767 0ustar debalancedebalancefrom __future__ import unicode_literals import logging import os import re import sys import tempfile import xml.etree.ElementTree as ET from six.moves.urllib.parse import unquote from rbtools.clients import RepositoryInfo, SCMClient from rbtools.clients.errors import (InvalidRevisionSpecError, SCMError, TooManyRevisionsError) from rbtools.utils.appdirs import user_data_dir from rbtools.utils.checks import check_gnu_diff, check_install from rbtools.utils.diffs import filename_match_any_patterns from rbtools.utils.process import execute class TFExeWrapper(object): """Implementation wrapper for using VS2017's tf.exe.""" REVISION_WORKING_COPY = '--rbtools-working-copy' def __init__(self, config=None, options=None): """Initialize the wrapper. Args: config (dict, optional): The loaded configuration. options (argparse.Namespace, optional): The command-line options. """ self.config = config self.options = options def get_repository_info(self): """Determine and return the repository info. Returns: rbtools.clients.RepositoryInfo: The repository info object. If the current working directory does not correspond to a TFS checkout, this returns ``None``. """ workfold = self._run_tf(['vc', 'workfold', os.getcwd()]) m = re.search('^Collection: (.*)$', workfold, re.MULTILINE) if not m: logging.debug('Could not find the collection from "tf vc ' 'workfold"') return None # Now that we know it's TFS, make sure we have GNU diff installed, and # error out if we don't. check_gnu_diff() return RepositoryInfo(unquote(m.group(1))) def parse_revision_spec(self, revisions): """Parse the given revision spec. The ``revisions`` argument is a list of revisions as specified by the user. Items in the list do not necessarily represent a single revision, since the user can use the TFS-native syntax of "r1~r2". Versions passed in can be any versionspec, such as a changeset number, ``L``-prefixed label name, ``W`` (latest workspace version), or ``T`` (latest upstream version). This will return a dictionary with the following keys: ``base``: A revision to use as the base of the resulting diff. ``tip``: A revision to use as the tip of the resulting diff. ``parent_base`` (optional): The revision to use as the base of a parent diff. These will be used to generate the diffs to upload to Review Board (or print). The diff for review will include the changes in (base, tip], and the parent diff (if necessary) will include (parent, base]. If a single revision is passed in, this will return the parent of that revision for 'base' and the passed-in revision for 'tip'. If zero revisions are passed in, this will return revisions relevant for the "current change" (changes in the work folder which have not yet been checked in). Args: revisions (list of unicode): The revision spec to parse. Returns: dict: A dictionary with ``base`` and ``tip`` keys, each of which is a string describing the revision. These may be special internal values. Raises: rbtools.clients.errors.TooManyRevisionsError: Too many revisions were specified. rbtools.clients.errors.InvalidRevisionSpecError: The given revision spec could not be parsed. """ n_revisions = len(revisions) if n_revisions == 1 and '~' in revisions[0]: revisions = revisions[0].split('~') n_revisions = len(revisions) if n_revisions == 0: # Most recent checked-out revision -- working copy return { 'base': self._convert_symbolic_revision('W'), 'tip': self.REVISION_WORKING_COPY, } elif n_revisions == 1: # Either a numeric revision (n-1:n) or a changelist revision = self._convert_symbolic_revision(revisions[0]) return { 'base': revision - 1, 'tip': revision, } elif n_revisions == 2: # Diff between two numeric revisions return { 'base': self._convert_symbolic_revision(revisions[0]), 'tip': self._convert_symbolic_revision(revisions[1]), } else: raise TooManyRevisionsError return { 'base': None, 'tip': None, } def _convert_symbolic_revision(self, revision, path=None): """Convert a symbolic revision into a numeric changeset. Args: revision (unicode): The TFS versionspec to convert. path (unicode, optional): The itemspec that the revision applies to. Returns: int: The changeset number corresponding to the versionspec. """ # We pass results_unicode=False because that uses the filesystem # encoding to decode the output, but the XML results we get should # always be UTF-8, and are well-formed with the encoding specified. We # can therefore let ElementTree determine how to decode it. data = self._run_tf(['vc', 'history', '/stopafter:1', '/recursive', '/format:detailed', '/version:%s' % revision, path or os.getcwd()]) m = re.search('^Changeset: (\d+)$', data, re.MULTILINE) if not m: logging.debug('Failed to parse output from "tf vc history":\n%s', data) raise InvalidRevisionSpecError( '"%s" does not appear to be a valid versionspec' % revision) def diff(self, revisions, include_files, exclude_patterns): """Return the generated diff. Args: revisions (dict): A dictionary containing ``base`` and ``tip`` keys. include_files (list): A list of file paths to include in the diff. exclude_patterns (list): A list of file paths to exclude from the diff. Returns: dict: A dictionary containing ``diff``, ``parent_diff``, and ``base_commit_id`` keys. In the case of TFS, the parent diff key will always be ``None``. """ base = str(revisions['base']) tip = str(revisions['tip']) if tip == self.REVISION_WORKING_COPY: # TODO: support committed revisions return self._diff_working_copy(base, include_files, exclude_patterns) else: raise SCMError('Posting committed changes is not yet supported ' 'for TFS when using the tf.exe wrapper.') def _diff_working_copy(self, base, include_files, exclude_patterns): """Return a diff of the working copy. Args: base (unicode): The base revision to diff against. include_files (list): A list of file paths to include in the diff. exclude_patterns (list): A list of file paths to exclude from the diff. Returns: dict: A dictionary containing ``diff``, ``parent_diff``, and ``base_commit_id`` keys. In the case of TFS, the parent diff key will always be ``None``. """ # We pass results_unicode=False because that uses the filesystem # encoding, but the XML results we get should always be UTF-8, and are # well-formed with the encoding specified. We can therefore let # ElementTree determine how to decode it. status = self._run_tf(['vc', 'status', '/format:xml'], results_unicode=False) root = ET.fromstring(status) diff = [] for pending_change in root.findall( './PendingSet/PendingChanges/PendingChange'): action = pending_change.attrib['chg'].split(' ') old_filename = \ pending_change.attrib.get('srcitem', '').encode('utf-8') new_filename = pending_change.attrib['item'].encode('utf-8') local_filename = pending_change.attrib['local'] old_version = \ pending_change.attrib.get('svrfm', '0').encode('utf-8') file_type = pending_change.attrib['type'] encoding = pending_change.attrib['enc'] new_version = b'(pending)' old_data = b'' new_data = b'' binary = (encoding == '-1') copied = 'Branch' in action if (not file_type or (not os.path.isfile(local_filename) and 'Delete' not in action)): continue if (exclude_patterns and filename_match_any_patterns(local_filename, exclude_patterns, base_dir=None)): continue if 'Add' in action: old_filename = b'/dev/null' if not binary: with open(local_filename, 'rb') as f: new_data = f.read() old_data = b'' elif 'Delete' in action: old_data = self._run_tf( ['vc', 'view', '/version:%s' % old_version.decode('utf-8'), old_filename.decode('utf-8')], results_unicode=False) new_data = b'' new_version = b'(deleted)' elif 'Edit' in action: if not binary: old_data = self._run_tf( ['vc', 'view', old_filename.decode('utf-8'), '/version:%s' % old_version.decode('utf-8')], results_unicode=False) with open(local_filename, 'rb') as f: new_data = f.read() old_label = b'%s\t%s' % (old_filename, old_version) new_label = b'%s\t%s' % (new_filename, new_version) if copied: diff.append(b'Copied from: %s\n' % old_filename) if binary: if 'Add' in action: old_filename = new_filename diff.append(b'--- %s\n' % old_label) diff.append(b'+++ %s\n' % new_label) diff.append(b'Binary files %s and %s differ\n' % (old_filename, new_filename)) elif old_filename != new_filename and old_data == new_data: # Renamed file with no changes. diff.append(b'--- %s\n' % old_label) diff.append(b'+++ %s\n' % new_label) else: old_tmp = tempfile.NamedTemporaryFile(delete=False) old_tmp.write(old_data) old_tmp.close() new_tmp = tempfile.NamedTemporaryFile(delete=False) new_tmp.write(new_data) new_tmp.close() unified_diff = execute( ['diff', '-u', '--label', old_label.decode('utf-8'), '--label', new_label.decode('utf-8'), old_tmp.name, new_tmp.name], extra_ignore_errors=(1,), log_output_on_error=False, results_unicode=False) diff.append(unified_diff) os.unlink(old_tmp.name) os.unlink(new_tmp.name) return { 'diff': b''.join(diff), 'parent_diff': None, 'base_commit_id': base, } def _run_tf(self, args, **kwargs): """Run the "tf" command. Args: args (list): A list of arguments to pass to rb-tfs. **kwargs (dict): Additional keyword arguments for the :py:meth:`execute` call. Returns: unicode: The output of the command. """ command = ['tf'] + args + ['/noprompt'] if getattr(self.options, 'tfs_login', None): command.append('/login:%s' % self.options.tfs_login) return execute(command, ignore_errors=True, **kwargs) class TEEWrapper(object): """Implementation wrapper for using Team Explorer Everywhere.""" REVISION_WORKING_COPY = '--rbtools-working-copy' def __init__(self, config=None, options=None): """Initialize the wrapper. Args: config (dict, optional): The loaded configuration. options (argparse.Namespace, optional): The command-line options. """ self.config = config self.options = options self.tf = None tf_locations = [] if options and getattr(options, 'tf_cmd', None): tf_locations.append(options.tf_cmd) if sys.platform.startswith('win'): # First check in the system path. If that doesn't work, look in the # two standard install locations. tf_locations.extend([ 'tf.cmd', r'%programfiles(x86)%\Microsoft Visual Studio 12.0\Common7\IDE\tf.cmd', r'%programfiles%\Microsoft Team Foundation Server 12.0\Tools\tf.cmd', ]) else: tf_locations.append('tf') for location in tf_locations: location = os.path.expandvars(location) if check_install([location, 'help']): self.tf = location break def get_repository_info(self): """Determine and return the repository info. Returns: rbtools.clients.RepositoryInfo: The repository info object. If the current working directory does not correspond to a TFS checkout, this returns ``None``. """ if self.tf is None: logging.debug('Unable to execute "tf help": skipping TFS') return None workfold = self._run_tf(['workfold', os.getcwd()]) m = re.search('^Collection: (.*)$', workfold, re.MULTILINE) if not m: logging.debug('Could not find the collection from "tf workfold"') return None # Now that we know it's TFS, make sure we have GNU diff installed, # and error out if we don't. check_gnu_diff() path = unquote(m.group(1)) return RepositoryInfo(path) def parse_revision_spec(self, revisions): """Parse the given revision spec. The ``revisions`` argument is a list of revisions as specified by the user. Items in the list do not necessarily represent a single revision, since the user can use the TFS-native syntax of "r1~r2". Versions passed in can be any versionspec, such as a changeset number, ``L``-prefixed label name, ``W`` (latest workspace version), or ``T`` (latest upstream version). This will return a dictionary with the following keys: ``base``: A revision to use as the base of the resulting diff. ``tip``: A revision to use as the tip of the resulting diff. ``parent_base`` (optional): The revision to use as the base of a parent diff. These will be used to generate the diffs to upload to Review Board (or print). The diff for review will include the changes in (base, tip], and the parent diff (if necessary) will include (parent, base]. If a single revision is passed in, this will return the parent of that revision for 'base' and the passed-in revision for 'tip'. If zero revisions are passed in, this will return revisions relevant for the "current change" (changes in the work folder which have not yet been checked in). Args: revisions (list of unicode): The revision spec to parse. Returns: dict: A dictionary with ``base`` and ``tip`` keys, each of which is a string describing the revision. These may be special internal values. Raises: rbtools.clients.errors.TooManyRevisionsError: Too many revisions were specified. rbtools.clients.errors.InvalidRevisionSpecError: The given revision spec could not be parsed. """ n_revisions = len(revisions) if n_revisions == 1 and '~' in revisions[0]: revisions = revisions[0].split('~') n_revisions = len(revisions) if n_revisions == 0: # Most recent checked-out revision -- working copy return { 'base': self._convert_symbolic_revision('W'), 'tip': self.REVISION_WORKING_COPY, } elif n_revisions == 1: # Either a numeric revision (n-1:n) or a changelist revision = self._convert_symbolic_revision(revisions[0]) return { 'base': revision - 1, 'tip': revision, } elif n_revisions == 2: # Diff between two numeric revisions return { 'base': self._convert_symbolic_revision(revisions[0]), 'tip': self._convert_symbolic_revision(revisions[1]), } else: raise TooManyRevisionsError return { 'base': None, 'tip': None, } def _convert_symbolic_revision(self, revision, path=None): """Convert a symbolic revision into a numeric changeset. Args: revision (unicode): The TFS versionspec to convert. path (unicode, optional): The itemspec that the revision applies to. Returns: int: The changeset number corresponding to the versionspec. """ args = ['history', '-stopafter:1', '-recursive', '-format:xml'] # 'tf history -version:W'` doesn't seem to work (even though it's # supposed to). Luckily, W is the default when -version isn't passed, # so just elide it. if revision != 'W': args.append('-version:%s' % revision) args.append(path or os.getcwd()) # We pass results_unicode=False because that uses the filesystem # encoding to decode the output, but the XML results we get should # always be UTF-8, and are well-formed with the encoding specified. We # can therefore let ElementTree determine how to decode it. data = self._run_tf(args, results_unicode=False) try: root = ET.fromstring(data) item = root.find('./changeset') if item is not None: return int(item.attrib['id']) else: raise Exception('No changesets found') except Exception as e: logging.debug('Failed to parse output from "tf history": %s\n%s', e, data, exc_info=True) raise InvalidRevisionSpecError( '"%s" does not appear to be a valid versionspec' % revision) def diff(self, revisions, include_files, exclude_patterns): """Return the generated diff. Args: revisions (dict): A dictionary containing ``base`` and ``tip`` keys. include_files (list): A list of file paths to include in the diff. exclude_patterns (list): A list of file paths to exclude from the diff. Returns: dict: A dictionary containing ``diff``, ``parent_diff``, and ``base_commit_id`` keys. In the case of TFS, the parent diff key will always be ``None``. """ base = str(revisions['base']) tip = str(revisions['tip']) if tip == self.REVISION_WORKING_COPY: return self._diff_working_copy(base, include_files, exclude_patterns) else: raise SCMError('Posting committed changes is not yet supported ' 'for TFS when using the Team Explorer Everywhere ' 'wrapper.') def _diff_working_copy(self, base, include_files, exclude_patterns): """Return a diff of the working copy. Args: base (unicode): The base revision to diff against. include_files (list): A list of file paths to include in the diff. exclude_patterns (list): A list of file paths to exclude from the diff. Returns: dict: A dictionary containing ``diff``, ``parent_diff``, and ``base_commit_id`` keys. In the case of TFS, the parent diff key will always be ``None``. """ # We pass results_unicode=False because that uses the filesystem # encoding, but the XML results we get should always be UTF-8, and are # well-formed with the encoding specified. We can therefore let # ElementTree determine how to decode it. status = self._run_tf(['status', '-format:xml'], results_unicode=False) root = ET.fromstring(status) diff = [] for pending_change in root.findall('./pending-changes/pending-change'): action = pending_change.attrib['change-type'].split(', ') new_filename = pending_change.attrib['server-item'].encode('utf-8') local_filename = pending_change.attrib['local-item'] old_version = pending_change.attrib['version'].encode('utf-8') file_type = pending_change.attrib.get('file-type') new_version = b'(pending)' old_data = b'' new_data = b'' copied = 'branch' in action if (not file_type or (not os.path.isfile(local_filename) and 'delete' not in action)): continue if (exclude_patterns and filename_match_any_patterns(local_filename, exclude_patterns, base_dir=None)): continue if 'rename' in action: old_filename = \ pending_change.attrib['source-item'].encode('utf-8') else: old_filename = new_filename if copied: old_filename = \ pending_change.attrib['source-item'].encode('utf-8') old_version = ( '%d' % self._convert_symbolic_revision( 'W', old_filename.decode('utf-8'))) if 'add' in action: old_filename = b'/dev/null' if file_type != 'binary': with open(local_filename) as f: new_data = f.read() old_data = b'' elif 'delete' in action: old_data = self._run_tf( ['print', '-version:%s' % old_version.decode('utf-8'), old_filename.decode('utf-8')], results_unicode=False) new_data = b'' new_version = b'(deleted)' elif 'edit' in action: old_data = self._run_tf( ['print', '-version:%s' % old_version.decode('utf-8'), old_filename.decode('utf-8')], results_unicode=False) with open(local_filename) as f: new_data = f.read() old_label = b'%s\t%s' % (old_filename, old_version) new_label = b'%s\t%s' % (new_filename, new_version) if copied: diff.append(b'Copied from: %s\n' % old_filename) if file_type == 'binary': if 'add' in action: old_filename = new_filename diff.append(b'--- %s\n' % old_label) diff.append(b'+++ %s\n' % new_label) diff.append(b'Binary files %s and %s differ\n' % (old_filename, new_filename)) elif old_filename != new_filename and old_data == new_data: # Renamed file with no changes diff.append(b'--- %s\n' % old_label) diff.append(b'+++ %s\n' % new_label) else: old_tmp = tempfile.NamedTemporaryFile(delete=False) old_tmp.write(old_data) old_tmp.close() new_tmp = tempfile.NamedTemporaryFile(delete=False) new_tmp.write(new_data) new_tmp.close() unified_diff = execute( ['diff', '-u', '--label', old_label.decode('utf-8'), '--label', new_label.decode('utf-8'), old_tmp.name, new_tmp.name], extra_ignore_errors=(1,), log_output_on_error=False, results_unicode=False) diff.append(unified_diff) os.unlink(old_tmp.name) os.unlink(new_tmp.name) if len(root.findall('./candidate-pending-changes/pending-change')) > 0: logging.warning('There are added or deleted files which have not ' 'been added to TFS. These will not be included ' 'in your review request.') return { 'diff': b''.join(diff), 'parent_diff': None, 'base_commit_id': base, } def _run_tf(self, args, **kwargs): """Run the "tf" command. Args: args (list): A list of arguments to pass to rb-tfs. **kwargs (dict): Additional keyword arguments for the :py:meth:`execute` call. Returns: unicode: The output of the command. """ cmdline = [self.tf, '-noprompt'] if getattr(self.options, 'tfs_login', None): cmdline.append('-login:%s' % self.options.tfs_login) cmdline += args # Use / style arguments when running on windows. if sys.platform.startswith('win'): for i, arg in enumerate(cmdline): if arg.startswith('-'): cmdline[i] = '/' + arg[1:] return execute(cmdline, ignore_errors=True, **kwargs) class TFHelperWrapper(object): """Implementation wrapper using our own helper.""" def __init__(self, helper_path, config=None, options=None): """Initialize the wrapper. Args: helper_path (unicode): The path to the helper binary. config (dict, optional): The loaded configuration. options (argparse.Namespace, optional): The command-line options. """ self.helper_path = helper_path self.config = config self.options = options def get_repository_info(self): """Determine and return the repository info. Returns: rbtools.clients.RepositoryInfo: The repository info object. If the current working directory does not correspond to a TFS checkout, this returns ``None``. """ rc, path, errors = self._run_helper(['get-collection'], ignore_errors=True) if rc == 0: return RepositoryInfo(path.strip()) else: return None def parse_revision_spec(self, revisions): """Parse the given revision spec. The ``revisions`` argument is a list of revisions as specified by the user. Items in the list do not necessarily represent a single revision, since the user can use the TFS-native syntax of "r1~r2". Versions passed in can be any versionspec, such as a changeset number, ``L``-prefixed label name, ``W`` (latest workspace version), or ``T`` (latest upstream version). This will return a dictionary with the following keys: ``base``: A revision to use as the base of the resulting diff. ``tip``: A revision to use as the tip of the resulting diff. ``parent_base`` (optional): The revision to use as the base of a parent diff. These will be used to generate the diffs to upload to Review Board (or print). The diff for review will include the changes in (base, tip], and the parent diff (if necessary) will include (parent, base]. If a single revision is passed in, this will return the parent of that revision for 'base' and the passed-in revision for 'tip'. If zero revisions are passed in, this will return revisions relevant for the "current change" (changes in the work folder which have not yet been checked in). Args: revisions (list of unicode): The revision spec to parse. Returns: dict: A dictionary with ``base`` and ``tip`` keys, each of which is a string describing the revision. These may be special internal values. Raises: rbtools.clients.errors.TooManyRevisionsError: Too many revisions were specified. rbtools.clients.errors.InvalidRevisionSpecError: The given revision spec could not be parsed. """ if len(revisions) > 2: raise TooManyRevisionsError rc, revisions, errors = self._run_helper( ['parse-revision'] + revisions, split_lines=True) if rc == 0: return { 'base': revisions[0].strip(), 'tip': revisions[1].strip() } else: raise InvalidRevisionSpecError('\n'.join(errors)) def diff(self, revisions, include_files, exclude_patterns): """Return the generated diff. Args: revisions (dict): A dictionary containing ``base`` and ``tip`` keys. include_files (list): A list of file paths to include in the diff. exclude_patterns (list): A list of file paths to exclude from the diff. Returns: dict: A dictionary containing ``diff``, ``parent_diff``, and ``base_commit_id`` keys. In the case of TFS, the parent diff key will always be ``None``. Raises: rbtools.clients.errors.SCMError: Something failed when creating the diff. """ base = revisions['base'] tip = revisions['tip'] rc, diff, errors = self._run_helper(['diff', '--', base, tip], ignore_errors=True, log_output_on_error=False) if rc in (0, 2): if rc == 2: # Magic return code that means success, but there were # un-tracked files in the working directory. logging.warning('There are added or deleted files which have ' 'not been added to TFS. These will not be ' 'included in your review request.') return { 'diff': diff, 'parent_diff': None, 'base_commit_id': None, } else: raise SCMError(errors.strip()) def _run_helper(self, args, **kwargs): """Run the rb-tfs binary. Args: args (list): A list of arguments to pass to rb-tfs. **kwargs (dict): Additional keyword arguments for the :py:meth:`execute` call. Returns: tuple: A 3-tuple of return code, output, and error output. The output and error output may be lists depending on the contents of ``kwargs``. """ if len(args) == 0: raise ValueError('_run_helper called without any arguments') cmdline = ['java'] cmdline += getattr(self.config, 'JAVA_OPTS', ['-Xmx2048M']) cmdline += ['-jar', self.helper_path] cmdline.append(args[0]) if self.options: if self.options.debug: cmdline.append('--debug') if getattr(self.options, 'tfs_shelveset_owner', None): cmdline += ['--shelveset-owner', self.options.tfs_shelveset_owner] if getattr(self.options, 'tfs_login', None): cmdline += ['--login', self.options.tfs_login] cmdline += args[1:] return execute(cmdline, with_errors=False, results_unicode=False, return_error_code=True, return_errors=True, **kwargs) class TFSClient(SCMClient): """A client for Team Foundation Server.""" name = 'Team Foundation Server' supports_diff_exclude_patterns = True supports_patch_revert = True def __init__(self, config=None, options=None): """Initialize the client. Args: config (dict, optional): The loaded configuration. options (argparse.Namespace, optional): The command-line options. """ super(TFSClient, self).__init__(config, options) # There are three different backends that can be used to access the # underlying TFS repository. We try them in this order: # - VS2017+ tf.exe # - Our custom rb-tfs wrapper, built on the TFS Java SDK # - Team Explorer Everywhere's tf command use_tf_exe = False try: tf_vc_output = execute(['tf', 'vc', 'help'], ignore_errors=True, none_on_ignored_error=True) # VS2015 has a tf.exe but it's not good enough. if 'Version Control Tool, Version 15' in tf_vc_output: use_tf_exe = True except OSError: pass helper_path = os.path.join(user_data_dir('rbtools'), 'packages', 'tfs', 'rb-tfs.jar') if use_tf_exe: self.tf_wrapper = TFExeWrapper(config, options) elif os.path.exists(helper_path): self.tf_wrapper = TFHelperWrapper(helper_path, config, options) else: self.tf_wrapper = TEEWrapper(config, options) def get_repository_info(self): """Determine and return the repository info. Returns: rbtools.clients.RepositoryInfo: The repository info object. If the current working directory does not correspond to a TFS checkout, this returns ``None``. """ return self.tf_wrapper.get_repository_info() def parse_revision_spec(self, revisions): """Parse the given revision spec. The ``revisions`` argument is a list of revisions as specified by the user. Items in the list do not necessarily represent a single revision, since the user can use the TFS-native syntax of "r1~r2". Versions passed in can be any versionspec, such as a changeset number, ``L``-prefixed label name, ``W`` (latest workspace version), or ``T`` (latest upstream version). This will return a dictionary with the following keys: ``base``: A revision to use as the base of the resulting diff. ``tip``: A revision to use as the tip of the resulting diff. ``parent_base`` (optional): The revision to use as the base of a parent diff. These will be used to generate the diffs to upload to Review Board (or print). The diff for review will include the changes in (base, tip], and the parent diff (if necessary) will include (parent, base]. If a single revision is passed in, this will return the parent of that revision for 'base' and the passed-in revision for 'tip'. If zero revisions are passed in, this will return revisions relevant for the "current change" (changes in the work folder which have not yet been checked in). Args: revisions (list of unicode): The revision spec to parse. Returns: dict: A dictionary with ``base`` and ``tip`` keys, each of which is a string describing the revision. These may be special internal values. Raises: rbtools.clients.errors.TooManyRevisionsError: Too many revisions were specified. rbtools.clients.errors.InvalidRevisionSpecError: The given revision spec could not be parsed. """ return self.tf_wrapper.parse_revision_spec(revisions) def diff(self, revisions, include_files=[], exclude_patterns=[], extra_args=[]): """Return the generated diff. Args: revisions (dict): A dictionary containing ``base`` and ``tip`` keys. include_files (list, optional): A list of file paths to include in the diff. exclude_patterns (list, optional): A list of file paths to exclude from the diff. extra_args (list, optional): Unused. Returns: dict: A dictionary containing ``diff``, ``parent_diff``, and ``base_commit_id`` keys. In the case of TFS, the parent diff key will always be ``None``. """ return self.tf_wrapper.diff(revisions, include_files, exclude_patterns) RBTools-0.7.11/rbtools/clients/mercurial.py0000644000232200023220000006370513230242633021165 0ustar debalancedebalancefrom __future__ import unicode_literals import logging import os import re import uuid from six.moves.urllib.parse import urlsplit, urlunparse from rbtools.clients import PatchResult, SCMClient, RepositoryInfo from rbtools.clients.errors import (InvalidRevisionSpecError, TooManyRevisionsError, SCMError) from rbtools.clients.svn import SVNClient from rbtools.utils.checks import check_install from rbtools.utils.filesystem import make_empty_files from rbtools.utils.console import edit_text from rbtools.utils.process import execute class MercurialClient(SCMClient): """ A wrapper around the hg Mercurial tool that fetches repository information and generates compatible diffs. """ name = 'Mercurial' PRE_CREATION = '/dev/null' PRE_CREATION_DATE = 'Thu Jan 01 00:00:00 1970 +0000' supports_diff_exclude_patterns = True def __init__(self, **kwargs): super(MercurialClient, self).__init__(**kwargs) self.hgrc = {} self._type = 'hg' self._remote_path = () self._initted = False self._hg_env = { 'HGPLAIN': '1', } self._hgext_path = os.path.normpath(os.path.join( os.path.dirname(__file__), '..', 'helpers', 'hgext.py')) # `self._remote_path_candidates` is an ordered set of hgrc # paths that are checked if `tracking` option is not given # explicitly. The first candidate found to exist will be used, # falling back to `default` (the last member.) self._remote_path_candidates = ['reviewboard', 'origin', 'parent', 'default'] @property def hidden_changesets_supported(self): """Return whether the repository supports hidden changesets. Mercurial 1.9 and above support hidden changesets. These are changesets that have been hidden from regular repository view. They still exist and are accessible, but only if the --hidden command argument is specified. Since we may encounter hidden changesets (e.g. the user specifies hidden changesets as part of the revision spec), we need to be aware of hidden changesets. """ if not hasattr(self, '_hidden_changesets_supported'): # The choice of command is arbitrary. parents for the initial # revision should be fast. result = execute(['hg', 'parents', '--hidden', '-r', '0'], ignore_errors=True, with_errors=False, none_on_ignored_error=True) self._hidden_changesets_supported = result is not None return self._hidden_changesets_supported @property def hg_root(self): """Return the root of the working directory. This will return the root directory of the current repository. If the current working directory is not inside a mercurial repository, this returns None. """ if not hasattr(self, '_hg_root'): root = execute(['hg', 'root'], env=self._hg_env, ignore_errors=True) if not root.startswith('abort:'): self._hg_root = root.strip() else: self._hg_root = None return self._hg_root def _init(self): """Initialize the client.""" if self._initted or not self.hg_root: return self._load_hgrc() svn_info = execute(['hg', 'svn', 'info'], ignore_errors=True) if (not svn_info.startswith('abort:') and not svn_info.startswith('hg: unknown command') and not svn_info.lower().startswith('not a child of')): self._type = 'svn' self._svn_info = svn_info else: self._type = 'hg' for candidate in self._remote_path_candidates: rc_key = 'paths.%s' % candidate if rc_key in self.hgrc: self._remote_path = (candidate, self.hgrc[rc_key]) logging.debug('Using candidate path %r: %r' % self._remote_path) break self._initted = True def get_repository_info(self): """Return the repository info object.""" if not check_install(['hg', '--help']): logging.debug('Unable to execute "hg --help": skipping Mercurial') return None self._init() if not self.hg_root: # hg aborted => no mercurial repository here. return None if self._type == 'svn': return self._calculate_hgsubversion_repository_info(self._svn_info) else: path = self.hg_root base_path = '/' if self._remote_path: path = self._remote_path[1] base_path = '' return RepositoryInfo(path=path, base_path=base_path, supports_parent_diffs=True) def parse_revision_spec(self, revisions=[]): """Parse the given revision spec. The 'revisions' argument is a list of revisions as specified by the user. Items in the list do not necessarily represent a single revision, since the user can use SCM-native syntaxes such as "r1..r2" or "r1:r2". SCMTool-specific overrides of this method are expected to deal with such syntaxes. This will return a dictionary with the following keys: 'base': A revision to use as the base of the resulting diff. 'tip': A revision to use as the tip of the resulting diff. 'parent_base': (optional) The revision to use as the base of a parent diff. 'commit_id': (optional) The ID of the single commit being posted, if not using a range. These will be used to generate the diffs to upload to Review Board (or print). The diff for review will include the changes in (base, tip], and the parent diff (if necessary) will include (parent, base]. If zero revisions are passed in, this will return the outgoing changes from the parent of the working directory. If a single revision is passed in, this will return the parent of that revision for 'base' and the passed-in revision for 'tip'. This will result in generating a diff for the changeset specified. If two revisions are passed in, they will be used for the 'base' and 'tip' revisions, respectively. In all cases, a parent base will be calculated automatically from changesets not present on the remote. """ self._init() n_revisions = len(revisions) if n_revisions == 1: # If there's a single revision, try splitting it based on hg's # revision range syntax (either :: or ..). If this splits, then # it's handled as two revisions below. revisions = re.split(r'\.\.|::', revisions[0]) n_revisions = len(revisions) result = {} if n_revisions == 0: # No revisions: Find the outgoing changes. Only consider the # working copy revision and ancestors because that makes sense. # If a user wishes to include other changesets, they can run # `hg up` or specify explicit revisions as command arguments. if self._type == 'svn': result['base'] = self._get_parent_for_hgsubversion() result['tip'] = '.' else: # Ideally, generating a diff for outgoing changes would be as # simple as just running `hg outgoing --patch `, but # there are a couple problems with this. For one, the # server-side diff parser isn't equipped to filter out diff # headers such as "comparing with..." and # "changeset: :". Another problem is that the output # of `hg outgoing` potentially includes changesets across # multiple branches. # # In order to provide the most accurate comparison between # one's local clone and a given remote (something akin to git's # diff command syntax `git diff ..`), we have # to do the following: # # - Get the name of the current branch # - Get a list of outgoing changesets, specifying a custom # format # - Filter outgoing changesets by the current branch name # - Get the "top" and "bottom" outgoing changesets # # These changesets are then used as arguments to # `hg diff -r -r `. # # Future modifications may need to be made to account for odd # cases like having multiple diverged branches which share # partial history--or we can just punish developers for doing # such nonsense :) outgoing = \ self._get_bottom_and_top_outgoing_revs_for_remote(rev='.') if outgoing[0] is None or outgoing[1] is None: raise InvalidRevisionSpecError( 'There are no outgoing changes') result['base'] = self._identify_revision(outgoing[0]) result['tip'] = self._identify_revision(outgoing[1]) result['commit_id'] = result['tip'] # Since the user asked us to operate on tip, warn them about a # dirty working directory. if (self.has_pending_changes() and not self.config.get('SUPPRESS_CLIENT_WARNINGS', False)): logging.warning('Your working directory is not clean. Any ' 'changes which have not been committed ' 'to a branch will not be included in your ' 'review request.') if self.options.parent_branch: result['parent_base'] = result['base'] result['base'] = self._identify_revision( self.options.parent_branch) elif n_revisions == 1: # One revision: Use the given revision for tip, and find its parent # for base. result['tip'] = self._identify_revision(revisions[0]) result['commit_id'] = result['tip'] result['base'] = self._execute( ['hg', 'parents', '--hidden', '-r', result['tip'], '--template', '{node|short}']).split()[0] if len(result['base']) != 12: raise InvalidRevisionSpecError( "Can't determine parent revision" ) elif n_revisions == 2: # Two revisions: Just use the given revisions result['base'] = self._identify_revision(revisions[0]) result['tip'] = self._identify_revision(revisions[1]) else: raise TooManyRevisionsError if 'base' not in result or 'tip' not in result: raise InvalidRevisionSpecError( '"%s" does not appear to be a valid revision spec' % revisions) if self._type == 'hg' and 'parent_base' not in result: # If there are missing changesets between base and the remote, we # need to generate a parent diff. outgoing = self._get_outgoing_changesets(self._get_remote_branch(), rev=result['base']) logging.debug('%d outgoing changesets between remote and base.', len(outgoing)) if not outgoing: return result parent_base = self._execute( ['hg', 'parents', '--hidden', '-r', outgoing[0][1], '--template', '{node|short}']).split() if len(parent_base) == 0: raise Exception( 'Could not find parent base revision. Ensure upstream ' 'repository is not empty.') result['parent_base'] = parent_base[0] logging.debug('Identified %s as parent base', result['parent_base']) return result def _identify_revision(self, revision): identify = self._execute( ['hg', 'identify', '-i', '--hidden', '-r', str(revision)], ignore_errors=True, none_on_ignored_error=True) if identify is None: raise InvalidRevisionSpecError( '"%s" does not appear to be a valid revision' % revision) else: return identify.split()[0] def _calculate_hgsubversion_repository_info(self, svn_info): def _info(r): m = re.search(r, svn_info, re.M) if m: return urlsplit(m.group(1)) else: return None self._type = 'svn' root = _info(r'^Repository Root: (.+)$') url = _info(r'^URL: (.+)$') if not (root and url): return None scheme, netloc, path, _, _ = root root = urlunparse([scheme, root.netloc.split("@")[-1], path, "", "", ""]) base_path = url.path[len(path):] return RepositoryInfo(path=root, base_path=base_path, supports_parent_diffs=True) def _load_hgrc(self): for line in execute(['hg', 'showconfig'], split_lines=True): line = line.split('=', 1) if len(line) == 2: key, value = line else: key = line[0] value = '' self.hgrc[key] = value.strip() def get_raw_commit_message(self, revisions): """Return the raw commit message. This extracts all descriptions in the given revision range and concatenates them, most recent ones going first. """ rev1 = revisions['base'] rev2 = revisions['tip'] delim = str(uuid.uuid1()) descs = self._execute( ['hg', 'log', '--hidden', '-r', '%s::%s' % (rev1, rev2), '--template', '{desc}%s' % delim], env=self._hg_env, results_unicode=False) # This initial element in the base changeset, which we don't # care about. The last element is always empty due to the string # ending with . descs = descs.split(delim)[1:-1] return b'\n\n'.join([desc.strip() for desc in descs]) def diff(self, revisions, include_files=[], exclude_patterns=[], extra_args=[]): """Return a diff across all modified files in the given revisions.""" self._init() diff_cmd = ['hg', 'diff', '--hidden', '--nodates'] if self.supports_empty_files(): diff_cmd.append('-g') if self._type == 'svn': diff_cmd.append('--svn') diff_cmd += include_files for pattern in exclude_patterns: diff_cmd.append('-X') diff_cmd.append(pattern) diff = self._execute( diff_cmd + ['-r', revisions['base'], '-r', revisions['tip']], env=self._hg_env, log_output_on_error=False, results_unicode=False) if 'parent_base' in revisions: base_commit_id = revisions['parent_base'] parent_diff = self._execute( diff_cmd + ['-r', base_commit_id, '-r', revisions['base']], env=self._hg_env, results_unicode=False) else: base_commit_id = revisions['base'] parent_diff = None # If reviewboard requests a relative revision via hgweb it will fail # since hgweb does not support the relative revision syntax (^1, -1). # Rewrite this relative node id to an absolute node id. base_commit_id = self._execute( ['hg', 'log', '-r', base_commit_id, '--template', '{node}'], env=self._hg_env, results_unicode=False) return { 'diff': diff, 'parent_diff': parent_diff, 'commit_id': revisions.get('commit_id'), 'base_commit_id': base_commit_id, } def _get_files_in_changeset(self, rev): """Return a set of all files in the specified changeset.""" cmd = ['hg', 'locate', '-r', rev] files = execute(cmd, env=self._hg_env, ignore_errors=True, none_on_ignored_error=True) if files: files = files.replace('\\', '/') # workaround for issue 3894 return set(files.splitlines()) return set() def _get_parent_for_hgsubversion(self): """Return the parent Subversion branch. Returns the parent branch defined in the command options if it exists, otherwise returns the parent Subversion branch of the current repository. """ return (getattr(self.options, 'tracking', None) or execute(['hg', 'parent', '--svn', '--template', '{node}\n']).strip()) def _get_remote_branch(self): """Return the remote branch assoicated with this repository. If the remote branch is not defined, the parent branch of the repository is returned. """ remote = getattr(self.options, 'tracking', None) if not remote: try: remote = self._remote_path[0] except IndexError: remote = None if not remote: raise SCMError('Could not determine remote branch to use for ' 'diff creation. Specify --tracking-branch to ' 'continue.') return remote def create_commit(self, message, author, run_editor, files=[], all_files=False): """Commit the given modified files. This is expected to be called after applying a patch. This commits the patch using information from the review request, opening the commit message in $EDITOR to allow the user to update it. """ if run_editor: modified_message = edit_text(message) else: modified_message = message hg_command = ['hg', 'commit', '-m', modified_message, '-u %s <%s>' % (author.fullname, author.email)] execute(hg_command + files) def _get_current_branch(self): """Return the current branch of this repository.""" return execute(['hg', 'branch'], env=self._hg_env).strip() def _get_bottom_and_top_outgoing_revs_for_remote(self, rev=None): """Return the bottom and top outgoing revisions. Returns the bottom and top outgoing revisions for the changesets between the current branch and the remote branch. """ remote = self._get_remote_branch() current_branch = self._get_current_branch() outgoing = [o for o in self._get_outgoing_changesets(remote, rev=rev) if current_branch == o[2]] if outgoing: top_rev, bottom_rev = \ self._get_top_and_bottom_outgoing_revs(outgoing) else: top_rev = None bottom_rev = None return bottom_rev, top_rev def _get_outgoing_changesets(self, remote, rev=None): """Return the outgoing changesets between us and a remote. This will return a list of tuples of (rev, node, branch) for each outgoing changeset. The list will be sorted in revision order. If rev is specified, we will limit the changesets to ancestors of the specified revision. Otherwise, all changesets not in the remote will be returned. """ outgoing_changesets = [] args = ['hg', '-q', 'outgoing', '--template', "{rev}\\t{node|short}\\t{branch}\\n", remote] if rev: args.extend(['-r', rev]) # We must handle the special case where there are no outgoing commits # as mercurial has a non-zero return value in this case. raw_outgoing = execute(args, env=self._hg_env, extra_ignore_errors=(1,)) for line in raw_outgoing.splitlines(): if not line: continue # Ignore warning messages that hg might put in, such as # "warning: certificate for foo can't be verified (Python too old)" if line.startswith('warning: '): continue rev, node, branch = [f.strip() for f in line.split('\t')] branch = branch or 'default' if not rev.isdigit(): raise Exception('Unexpected output from hg: %s' % line) logging.debug('Found outgoing changeset %s:%s' % (rev, node)) outgoing_changesets.append((int(rev), node, branch)) return outgoing_changesets def _get_top_and_bottom_outgoing_revs(self, outgoing_changesets): revs = set(t[0] for t in outgoing_changesets) top_rev = max(revs) bottom_rev = min(revs) for rev, node, branch in reversed(outgoing_changesets): parents = execute( ["hg", "log", "-r", str(rev), "--template", "{parents}"], env=self._hg_env) parents = re.split(':[^\s]+\s*', parents) parents = [int(p) for p in parents if p != ''] parents = [p for p in parents if p not in outgoing_changesets] if len(parents) > 0: bottom_rev = parents[0] break else: bottom_rev = rev - 1 bottom_rev = max(0, bottom_rev) return top_rev, bottom_rev def scan_for_server(self, repository_info): # Scan first for dot files, since it's faster and will cover the # user's $HOME/.reviewboardrc server_url = \ super(MercurialClient, self).scan_for_server(repository_info) if not server_url and self.hgrc.get('reviewboard.url'): server_url = self.hgrc.get('reviewboard.url').strip() if not server_url and self._type == "svn": # Try using the reviewboard:url property on the SVN repo, if it # exists. prop = SVNClient().scan_for_server_property(repository_info) if prop: return prop return server_url def _execute(self, cmd, *args, **kwargs): if not self.hidden_changesets_supported and '--hidden' in cmd: cmd = [p for p in cmd if p != '--hidden'] # Add our extension which normalizes settings. This is the easiest # way to normalize settings since it doesn't require us to chase # a tail of diff-related config options. cmd.extend([ '--config', 'extensions.rbtoolsnormalize=%s' % self._hgext_path ]) return execute(cmd, *args, **kwargs) def has_pending_changes(self): """Check if there are changes waiting to be committed. Returns True if the working directory has been modified, otherwise returns False. """ status = execute(['hg', 'status', '--modified', '--added', '--removed', '--deleted']) return status != '' def apply_patch(self, patch_file, base_path=None, base_dir=None, p=None, revert=False): """Import the given patch. This will take the given patch file and apply it to the working directory. """ cmd = ['hg', 'patch', '--no-commit'] if p: cmd += ['-p', p] cmd.append(patch_file) rc, data = self._execute(cmd, with_errors=True, return_error_code=True) return PatchResult(applied=(rc == 0), patch_output=data) def apply_patch_for_empty_files(self, patch, p_num, revert=False): """Return True if any empty files in the patch are applied. If there are no empty files in the patch or if an error occurs while applying the patch, we return False. """ patched_empty_files = False added_files = re.findall(r'--- %s\t%s\n' r'\+\+\+ b/(\S+)\t[^\r\n\t\f]+\n' r'(?:[^@]|$)' % (self.PRE_CREATION, re.escape(self.PRE_CREATION_DATE)), patch) deleted_files = re.findall(r'--- a/(\S+)\t[^\r\n\t\f]+\n' r'\+\+\+ %s\t%s\n' r'(?:[^@]|$)' % (self.PRE_CREATION, re.escape(self.PRE_CREATION_DATE)), patch) if added_files: added_files = self._strip_p_num_slashes(added_files, int(p_num)) make_empty_files(added_files) result = execute(['hg', 'add'] + added_files, ignore_errors=True, none_on_ignored_error=True) if result is None: logging.error('Unable to execute "hg add" on: %s', ', '.join(added_files)) else: patched_empty_files = True if deleted_files: deleted_files = self._strip_p_num_slashes(deleted_files, int(p_num)) result = execute(['hg', 'remove'] + deleted_files, ignore_errors=True, none_on_ignored_error=True) if result is None: logging.error('Unable to execute "hg remove" on: %s', ', '.join(deleted_files)) else: patched_empty_files = True return patched_empty_files def supports_empty_files(self): """Check if the RB server supports added/deleted empty files.""" return (self.capabilities and self.capabilities.has_capability('scmtools', 'mercurial', 'empty_files')) RBTools-0.7.11/rbtools/clients/tests/0000755000232200023220000000000013230242636017762 5ustar debalancedebalanceRBTools-0.7.11/rbtools/clients/tests/test_p4.py0000644000232200023220000005231313230242633021717 0ustar debalancedebalance"""Unit tests for PerforceClient.""" from __future__ import unicode_literals import os import re import time from hashlib import md5 from rbtools.api.capabilities import Capabilities from rbtools.clients.errors import (InvalidRevisionSpecError, TooManyRevisionsError) from rbtools.clients.perforce import PerforceClient, P4Wrapper from rbtools.clients.tests import SCMClientTests from rbtools.utils.filesystem import make_tempfile from rbtools.utils.testbase import RBTestBase class P4WrapperTests(RBTestBase): """Unit tests for P4Wrapper.""" def is_supported(self): return True def test_counters(self): """Testing P4Wrapper.counters""" class TestWrapper(P4Wrapper): def run_p4(self, cmd, *args, **kwargs): return [ 'a = 1\n', 'b = 2\n', 'c = 3\n', ] p4 = TestWrapper(None) info = p4.counters() self.assertEqual(len(info), 3) self.assertEqual(info['a'], '1') self.assertEqual(info['b'], '2') self.assertEqual(info['c'], '3') def test_info(self): """Testing P4Wrapper.info""" class TestWrapper(P4Wrapper): def run_p4(self, cmd, *args, **kwargs): return [ 'User name: myuser\n', 'Client name: myclient\n', 'Client host: myclient.example.com\n', 'Client root: /path/to/client\n', 'Server uptime: 111:43:38\n', ] p4 = TestWrapper(None) info = p4.info() self.assertEqual(len(info), 5) self.assertEqual(info['User name'], 'myuser') self.assertEqual(info['Client name'], 'myclient') self.assertEqual(info['Client host'], 'myclient.example.com') self.assertEqual(info['Client root'], '/path/to/client') self.assertEqual(info['Server uptime'], '111:43:38') class PerforceClientTests(SCMClientTests): """Unit tests for PerforceClient.""" class P4DiffTestWrapper(P4Wrapper): def __init__(self, options): super( PerforceClientTests.P4DiffTestWrapper, self).__init__(options) self._timestamp = time.mktime(time.gmtime(0)) def fstat(self, depot_path, fields=[]): assert depot_path in self.fstat_files fstat_info = self.fstat_files[depot_path] for field in fields: assert field in fstat_info return fstat_info def opened(self, changenum): return [info for info in self.repo_files if info['change'] == changenum] def print_file(self, depot_path, out_file): for info in self.repo_files: if depot_path == '%s#%s' % (info['depotFile'], info['rev']): fp = open(out_file, 'w') fp.write(info['text']) fp.close() return assert False def where(self, depot_path): assert depot_path in self.where_files return [{ 'path': self.where_files[depot_path], }] def change(self, changenum): return [{ 'Change': str(changenum), 'Date': '2013/01/02 22:33:44', 'User': 'joe@example.com', 'Status': 'pending', 'Description': 'This is a test.\n', }] def info(self): return { 'Client root': '/', } def run_p4(self, *args, **kwargs): assert False def test_scan_for_server_counter_with_reviewboard_url(self): """Testing PerforceClient.scan_for_server_counter with reviewboard.url""" RB_URL = 'http://reviewboard.example.com/' class TestWrapper(P4Wrapper): def counters(self): return { 'reviewboard.url': RB_URL, 'foo': 'bar', } client = PerforceClient(TestWrapper) url = client.scan_for_server_counter(None) self.assertEqual(url, RB_URL) def test_repository_info(self): """Testing PerforceClient.get_repository_info""" SERVER_PATH = 'perforce.example.com:1666' class TestWrapper(P4Wrapper): def is_supported(self): return True def info(self): return { 'Client root': os.getcwd(), 'Server address': SERVER_PATH, 'Server version': 'P4D/FREEBSD60X86_64/2012.2/525804 ' '(2012/09/18)', } client = PerforceClient(TestWrapper) info = client.get_repository_info() self.assertNotEqual(info, None) self.assertEqual(info.path, SERVER_PATH) self.assertEqual(client.p4d_version, (2012, 2)) def test_repository_info_outside_client_root(self): """Testing PerforceClient.get_repository_info outside client root""" SERVER_PATH = 'perforce.example.com:1666' class TestWrapper(P4Wrapper): def is_supported(self): return True def info(self): return { 'Client root': '/', 'Server address': SERVER_PATH, 'Server version': 'P4D/FREEBSD60X86_64/2012.2/525804 ' '(2012/09/18)', } client = PerforceClient(TestWrapper) info = client.get_repository_info() self.assertEqual(info, None) def test_scan_for_server_counter_with_reviewboard_url_encoded(self): """Testing PerforceClient.scan_for_server_counter with encoded reviewboard.url.http:||""" URL_KEY = 'reviewboard.url.http:||reviewboard.example.com/' RB_URL = 'http://reviewboard.example.com/' class TestWrapper(P4Wrapper): def counters(self): return { URL_KEY: '1', 'foo': 'bar', } client = PerforceClient(TestWrapper) url = client.scan_for_server_counter(None) self.assertEqual(url, RB_URL) def test_diff_with_pending_changelist(self): """Testing PerforceClient.diff with a pending changelist""" client = self._build_client() client.p4.repo_files = [ { 'depotFile': '//mydepot/test/README', 'rev': '2', 'action': 'edit', 'change': '12345', 'text': 'This is a test.\n', }, { 'depotFile': '//mydepot/test/README', 'rev': '3', 'action': 'edit', 'change': '', 'text': 'This is a mess.\n', }, { 'depotFile': '//mydepot/test/COPYING', 'rev': '1', 'action': 'add', 'change': '12345', 'text': 'Copyright 2013 Joe User.\n', }, { 'depotFile': '//mydepot/test/Makefile', 'rev': '3', 'action': 'delete', 'change': '12345', 'text': 'all: all\n', }, ] readme_file = make_tempfile() copying_file = make_tempfile() makefile_file = make_tempfile() client.p4.print_file('//mydepot/test/README#3', readme_file) client.p4.print_file('//mydepot/test/COPYING#1', copying_file) client.p4.where_files = { '//mydepot/test/README': readme_file, '//mydepot/test/COPYING': copying_file, '//mydepot/test/Makefile': makefile_file, } revisions = client.parse_revision_spec(['12345']) diff = client.diff(revisions) self._compare_diff(diff, '07aa18ff67f9aa615fcda7ecddcb354e') def test_diff_for_submitted_changelist(self): """Testing PerforceClient.diff with a submitted changelist""" class TestWrapper(self.P4DiffTestWrapper): def change(self, changelist): return [{ 'Change': '12345', 'Date': '2013/12/19 11:32:45', 'User': 'example', 'Status': 'submitted', 'Description': 'My change description\n', }] def filelog(self, path): return [ { 'change0': '12345', 'action0': 'edit', 'rev0': '3', 'depotFile': '//mydepot/test/README', } ] client = PerforceClient(TestWrapper) client.p4.repo_files = [ { 'depotFile': '//mydepot/test/README', 'rev': '2', 'action': 'edit', 'change': '12345', 'text': 'This is a test.\n', }, { 'depotFile': '//mydepot/test/README', 'rev': '3', 'action': 'edit', 'change': '', 'text': 'This is a mess.\n', }, ] readme_file = make_tempfile() client.p4.print_file('//mydepot/test/README#3', readme_file) client.p4.where_files = { '//mydepot/test/README': readme_file, } client.p4.repo_files = [ { 'depotFile': '//mydepot/test/README', 'rev': '2', 'action': 'edit', 'change': '12345', 'text': 'This is a test.\n', }, { 'depotFile': '//mydepot/test/README', 'rev': '3', 'action': 'edit', 'change': '', 'text': 'This is a mess.\n', }, ] revisions = client.parse_revision_spec(['12345']) diff = client.diff(revisions) self._compare_diff(diff, '8af5576f5192ca87731673030efb5f39', expect_changenum=False) def test_diff_with_moved_files_cap_on(self): """Testing PerforceClient.diff with moved files and capability on""" self._test_diff_with_moved_files( '5926515eaf4cf6d8257a52f7d9f0e530', caps={ 'scmtools': { 'perforce': { 'moved_files': True } } }) def test_diff_with_moved_files_cap_off(self): """Testing PerforceClient.diff with moved files and capability off""" self._test_diff_with_moved_files('20e5ab395e170dce1b062a796e6c2c13') def _test_diff_with_moved_files(self, expected_diff_hash, caps={}): client = self._build_client() client.capabilities = Capabilities(caps) client.p4.repo_files = [ { 'depotFile': '//mydepot/test/README', 'rev': '2', 'action': 'move/delete', 'change': '12345', 'text': 'This is a test.\n', }, { 'depotFile': '//mydepot/test/README-new', 'rev': '1', 'action': 'move/add', 'change': '12345', 'text': 'This is a mess.\n', }, { 'depotFile': '//mydepot/test/COPYING', 'rev': '2', 'action': 'move/delete', 'change': '12345', 'text': 'Copyright 2013 Joe User.\n', }, { 'depotFile': '//mydepot/test/COPYING-new', 'rev': '1', 'action': 'move/add', 'change': '12345', 'text': 'Copyright 2013 Joe User.\n', }, ] readme_file = make_tempfile() copying_file = make_tempfile() readme_file_new = make_tempfile() copying_file_new = make_tempfile() client.p4.print_file('//mydepot/test/README#2', readme_file) client.p4.print_file('//mydepot/test/COPYING#2', copying_file) client.p4.print_file('//mydepot/test/README-new#1', readme_file_new) client.p4.print_file('//mydepot/test/COPYING-new#1', copying_file_new) client.p4.where_files = { '//mydepot/test/README': readme_file, '//mydepot/test/COPYING': copying_file, '//mydepot/test/README-new': readme_file_new, '//mydepot/test/COPYING-new': copying_file_new, } client.p4.fstat_files = { '//mydepot/test/README': { 'clientFile': readme_file, 'movedFile': '//mydepot/test/README-new', }, '//mydepot/test/README-new': { 'clientFile': readme_file_new, 'depotFile': '//mydepot/test/README-new', }, '//mydepot/test/COPYING': { 'clientFile': copying_file, 'movedFile': '//mydepot/test/COPYING-new', }, '//mydepot/test/COPYING-new': { 'clientFile': copying_file_new, 'depotFile': '//mydepot/test/COPYING-new', }, } revisions = client.parse_revision_spec(['12345']) diff = client.diff(revisions) self._compare_diff(diff, expected_diff_hash) def _build_client(self): self.options.p4_client = 'myclient' self.options.p4_port = 'perforce.example.com:1666' self.options.p4_passwd = '' client = PerforceClient(self.P4DiffTestWrapper, options=self.options) client.p4d_version = (2012, 2) return client def _compare_diff(self, diff_info, expected_diff_hash, expect_changenum=True): self.assertTrue(isinstance(diff_info, dict)) self.assertTrue('diff' in diff_info) if expect_changenum: self.assertTrue('changenum' in diff_info) diff_content = re.sub(br'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}', br'1970-01-01 00:00:00', diff_info['diff']) self.assertEqual(md5(diff_content).hexdigest(), expected_diff_hash) def test_parse_revision_spec_no_args(self): """Testing PerforceClient.parse_revision_spec with no specified revisions""" client = self._build_client() revisions = client.parse_revision_spec() self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertEqual( revisions['base'], PerforceClient.REVISION_CURRENT_SYNC) self.assertEqual( revisions['tip'], PerforceClient.REVISION_PENDING_CLN_PREFIX + 'default') def test_parse_revision_spec_pending_cln(self): """Testing PerforceClient.parse_revision_spec with a pending changelist""" class TestWrapper(P4Wrapper): def change(self, changelist): return [{ 'Change': '12345', 'Date': '2013/12/19 11:32:45', 'User': 'example', 'Status': 'pending', 'Description': 'My change description\n', }] client = PerforceClient(TestWrapper) revisions = client.parse_revision_spec(['12345']) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' not in revisions) self.assertEqual( revisions['base'], PerforceClient.REVISION_CURRENT_SYNC) self.assertEqual( revisions['tip'], PerforceClient.REVISION_PENDING_CLN_PREFIX + '12345') def test_parse_revision_spec_submitted_cln(self): """Testing PerforceClient.parse_revision_spec with a submitted changelist""" class TestWrapper(P4Wrapper): def change(self, changelist): return [{ 'Change': '12345', 'Date': '2013/12/19 11:32:45', 'User': 'example', 'Status': 'submitted', 'Description': 'My change description\n', }] client = PerforceClient(TestWrapper) revisions = client.parse_revision_spec(['12345']) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' not in revisions) self.assertEqual(revisions['base'], '12344') self.assertEqual(revisions['tip'], '12345') def test_parse_revision_spec_shelved_cln(self): """Testing PerforceClient.parse_revision_spec with a shelved changelist""" class TestWrapper(P4Wrapper): def change(self, changelist): return [{ 'Change': '12345', 'Date': '2013/12/19 11:32:45', 'User': 'example', 'Status': 'shelved', 'Description': 'My change description\n', }] client = PerforceClient(TestWrapper) revisions = client.parse_revision_spec(['12345']) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' not in revisions) self.assertEqual( revisions['base'], PerforceClient.REVISION_CURRENT_SYNC) self.assertEqual( revisions['tip'], PerforceClient.REVISION_PENDING_CLN_PREFIX + '12345') def test_parse_revision_spec_two_args(self): """Testing PerforceClient.parse_revision_spec with two changelists""" class TestWrapper(P4Wrapper): def change(self, changelist): change = { 'Change': str(changelist), 'Date': '2013/12/19 11:32:45', 'User': 'example', 'Description': 'My change description\n', } if changelist == '99' or changelist == '100': change['Status'] = 'submitted' elif changelist == '101': change['Status'] = 'pending' elif changelist == '102': change['Status'] = 'shelved' else: assert False return [change] client = PerforceClient(TestWrapper) revisions = client.parse_revision_spec(['99', '100']) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' not in revisions) self.assertEqual(revisions['base'], '99') self.assertEqual(revisions['tip'], '100') self.assertRaises(InvalidRevisionSpecError, client.parse_revision_spec, ['99', '101']) self.assertRaises(InvalidRevisionSpecError, client.parse_revision_spec, ['99', '102']) self.assertRaises(InvalidRevisionSpecError, client.parse_revision_spec, ['101', '100']) self.assertRaises(InvalidRevisionSpecError, client.parse_revision_spec, ['102', '100']) self.assertRaises(InvalidRevisionSpecError, client.parse_revision_spec, ['102', '10284']) def test_parse_revision_spec_invalid_spec(self): """Testing PerforceClient.parse_revision_spec with invalid specifications""" class TestWrapper(P4Wrapper): def change(self, changelist): return [] client = PerforceClient(TestWrapper) self.assertRaises(InvalidRevisionSpecError, client.parse_revision_spec, ['aoeu']) self.assertRaises(TooManyRevisionsError, client.parse_revision_spec, ['1', '2', '3']) def test_diff_exclude(self): """Testing PerforceClient.normalize_exclude_patterns""" repo_root = self.chdir_tmp() os.mkdir('subdir') cwd = os.getcwd() class ExcludeWrapper(P4Wrapper): def info(self): return { 'Client root': repo_root, } client = PerforceClient(ExcludeWrapper) patterns = [ "//depot/path", os.path.join(os.path.sep, "foo"), "foo", ] normalized_patterns = [ # Depot paths should remain unchanged. patterns[0], # "Absolute" paths (i.e., ones that begin with a path separator) # should be relative to the repository root. os.path.join(repo_root, patterns[1][1:]), # Relative paths should be relative to the current working # directory. os.path.join(cwd, patterns[2]), ] result = client.normalize_exclude_patterns(patterns) self.assertEqual(result, normalized_patterns) RBTools-0.7.11/rbtools/clients/tests/test_svn.py0000644000232200023220000007740713230242633022215 0ustar debalancedebalance"""Unit tests for SubversionClient.""" from __future__ import unicode_literals import json import os import sys from functools import wraps from hashlib import md5 from kgb import SpyAgency from nose import SkipTest from six.moves.urllib.request import urlopen from six.moves import cStringIO as StringIO from rbtools.api.client import RBClient from rbtools.api.tests import MockResponse from rbtools.clients.errors import (InvalidRevisionSpecError, TooManyRevisionsError) from rbtools.clients.svn import SVNRepositoryInfo, SVNClient from rbtools.clients.tests import FOO1, FOO2, FOO3, SCMClientTests from rbtools.utils.checks import is_valid_version from rbtools.utils.process import execute def svn_version_set_hash(svn16_hash, svn17_hash, svn19_hash): """Pass the appropriate hash to the wrapped function. SVN 1.6, 1.7/1.8, and 1.9+ will generate slightly different output for ``svn diff`` when generating the diff with a working copy. This works around that by checking the installed SVN version and passing the appropriate hash. """ def decorator(f): @wraps(f) def wrapped(self): self.client.get_repository_info() version = self.client.subversion_client_version if version < (1, 7): return f(self, svn16_hash) elif version < (1, 9): return f(self, svn17_hash) else: return f(self, svn19_hash) return wrapped return decorator class SVNRepositoryInfoTests(SpyAgency, SCMClientTests): """Unit tests for rbtools.clients.svn.SVNRepositoryInfo.""" payloads = { 'http://localhost:8080/api/': { 'mimetype': 'application/vnd.reviewboard.org.root+json', 'rsp': { 'uri_templates': {}, 'links': { 'self': { 'href': 'http://localhost:8080/api/', 'method': 'GET', }, 'repositories': { 'href': 'http://localhost:8080/api/repositories/', 'method': 'GET', }, }, 'stat': 'ok', }, }, 'http://localhost:8080/api/repositories/?tool=Subversion': { 'mimetype': 'application/vnd.reviewboard.org.repositories+json', 'rsp': { 'repositories': [ { # This one doesn't have a mirror_path, to emulate # Review Board 1.6. 'id': 1, 'name': 'SVN Repo 1', 'path': 'https://svn1.example.com/', 'links': { 'info': { 'href': ('https://localhost:8080/api/' 'repositories/1/info/'), 'method': 'GET', }, }, }, { 'id': 2, 'name': 'SVN Repo 2', 'path': 'https://svn2.example.com/', 'mirror_path': 'svn+ssh://svn2.example.com/', 'links': { 'info': { 'href': ('https://localhost:8080/api/' 'repositories/2/info/'), 'method': 'GET', }, }, }, ], 'links': { 'next': { 'href': ('http://localhost:8080/api/repositories/' '?tool=Subversion&page=2'), 'method': 'GET', }, }, 'total_results': 3, 'stat': 'ok', }, }, 'http://localhost:8080/api/repositories/?tool=Subversion&page=2': { 'mimetype': 'application/vnd.reviewboard.org.repositories+json', 'rsp': { 'repositories': [ { 'id': 3, 'name': 'SVN Repo 3', 'path': 'https://svn3.example.com/', 'mirror_path': 'svn+ssh://svn3.example.com/', 'links': { 'info': { 'href': ('https://localhost:8080/api/' 'repositories/3/info/'), 'method': 'GET', }, }, }, ], 'total_results': 3, 'stat': 'ok', }, }, 'https://localhost:8080/api/repositories/1/info/': { 'mimetype': 'application/vnd.reviewboard.org.repository-info+json', 'rsp': { 'info': { 'uuid': 'UUID-1', 'url': 'https://svn1.example.com/', 'root_url': 'https://svn1.example.com/', }, 'stat': 'ok', }, }, 'https://localhost:8080/api/repositories/2/info/': { 'mimetype': 'application/vnd.reviewboard.org.repository-info+json', 'rsp': { 'info': { 'uuid': 'UUID-2', 'url': 'https://svn2.example.com/', 'root_url': 'https://svn2.example.com/', }, 'stat': 'ok', }, }, 'https://localhost:8080/api/repositories/3/info/': { 'mimetype': 'application/vnd.reviewboard.org.repository-info+json', 'rsp': { 'info': { 'uuid': 'UUID-3', 'url': 'https://svn3.example.com/', 'root_url': 'https://svn3.example.com/', }, 'stat': 'ok', }, }, } def setUp(self): super(SVNRepositoryInfoTests, self).setUp() self.spy_on(urlopen, call_fake=self._urlopen) self.api_client = RBClient('http://localhost:8080/') self.root_resource = self.api_client.get_root() def test_find_server_repository_info_with_path_match(self): """Testing SVNRepositoryInfo.find_server_repository_info with path matching """ info = SVNRepositoryInfo('https://svn1.example.com/', '/', '') repo_info = info.find_server_repository_info(self.root_resource) self.assertEqual(repo_info, info) self.assertEqual(repo_info.repository_id, 1) def test_find_server_repository_info_with_mirror_path_match(self): """Testing SVNRepositoryInfo.find_server_repository_info with mirror_path matching """ info = SVNRepositoryInfo('svn+ssh://svn2.example.com/', '/', '') repo_info = info.find_server_repository_info(self.root_resource) self.assertEqual(repo_info, info) self.assertEqual(repo_info.repository_id, 2) def test_find_server_repository_info_with_uuid_match(self): """Testing SVNRepositoryInfo.find_server_repository_info with UUID matching """ info = SVNRepositoryInfo('svn+ssh://blargle/', '/', 'UUID-3') repo_info = info.find_server_repository_info(self.root_resource) self.assertNotEqual(repo_info, info) self.assertEqual(repo_info.repository_id, 3) def test_relative_paths(self): """Testing SVNRepositoryInfo._get_relative_path""" info = SVNRepositoryInfo('http://svn.example.com/svn/', '/', '') self.assertEqual(info._get_relative_path('/foo', '/bar'), None) self.assertEqual(info._get_relative_path('/', '/trunk/myproject'), None) self.assertEqual(info._get_relative_path('/trunk/myproject', '/'), '/trunk/myproject') self.assertEqual( info._get_relative_path('/trunk/myproject', ''), '/trunk/myproject') self.assertEqual( info._get_relative_path('/trunk/myproject', '/trunk'), '/myproject') self.assertEqual( info._get_relative_path('/trunk/myproject', '/trunk/myproject'), '/') def _urlopen(self, request, **kwargs): url = request.get_full_url() try: payload = self.payloads[url] except KeyError: return MockResponse(404, {}, json.dumps({ 'rsp': { 'stat': 'fail', 'err': { 'code': 100, 'msg': 'Object does not exist', }, }, })) return MockResponse( 200, { 'Content-Type': payload['mimetype'], }, json.dumps(payload['rsp'])) class SVNClientTests(SCMClientTests): def setUp(self): super(SVNClientTests, self).setUp() if not self.is_exe_in_path('svn'): raise SkipTest('svn not found in path') self.svn_dir = os.path.join(self.testdata_dir, 'svn-repo') self.clone_dir = self.chdir_tmp() self.svn_repo_url = 'file://' + self.svn_dir self._run_svn(['co', self.svn_repo_url, 'svn-repo']) os.chdir(os.path.join(self.clone_dir, 'svn-repo')) self.client = SVNClient(options=self.options) self.options.svn_show_copies_as_adds = None def _run_svn(self, command): return execute(['svn'] + command, env=None, split_lines=False, ignore_errors=False, extra_ignore_errors=(), translate_newlines=True) def _svn_add_file(self, filename, data, changelist=None): """Add a file to the test repo.""" is_new = not os.path.exists(filename) f = open(filename, 'w') f.write(data) f.close() if is_new: self._run_svn(['add', filename]) if changelist: self._run_svn(['changelist', changelist, filename]) def _svn_add_dir(self, dirname): """Add a directory to the test repo.""" if not os.path.exists(dirname): os.mkdir(dirname) self._run_svn(['add', dirname]) def test_parse_revision_spec_no_args(self): """Testing SVNClient.parse_revision_spec with no specified revisions""" revisions = self.client.parse_revision_spec() self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' not in revisions) self.assertEqual(revisions['base'], 'BASE') self.assertEqual(revisions['tip'], '--rbtools-working-copy') def test_parse_revision_spec_one_revision(self): """Testing SVNClient.parse_revision_spec with one specified numeric revision""" revisions = self.client.parse_revision_spec(['3']) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' not in revisions) self.assertEqual(revisions['base'], 2) self.assertEqual(revisions['tip'], 3) def test_parse_revision_spec_one_revision_changelist(self): """Testing SVNClient.parse_revision_spec with one specified changelist revision""" self._svn_add_file('foo.txt', FOO3, 'my-change') revisions = self.client.parse_revision_spec(['my-change']) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' not in revisions) self.assertEqual(revisions['base'], 'BASE') self.assertEqual(revisions['tip'], SVNClient.REVISION_CHANGELIST_PREFIX + 'my-change') def test_parse_revision_spec_one_revision_nonexistant_changelist(self): """Testing SVNClient.parse_revision_spec with one specified invalid changelist revision""" self._svn_add_file('foo.txt', FOO3, 'my-change') self.assertRaises( InvalidRevisionSpecError, lambda: self.client.parse_revision_spec(['not-my-change'])) def test_parse_revision_spec_one_arg_two_revisions(self): """Testing SVNClient.parse_revision_spec with R1:R2 syntax""" revisions = self.client.parse_revision_spec(['1:3']) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' not in revisions) self.assertEqual(revisions['base'], 1) self.assertEqual(revisions['tip'], 3) def test_parse_revision_spec_two_arguments(self): """Testing SVNClient.parse_revision_spec with two revisions""" revisions = self.client.parse_revision_spec(['1', '3']) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' not in revisions) self.assertEqual(revisions['base'], 1) self.assertEqual(revisions['tip'], 3) def test_parse_revision_spec_one_revision_url(self): """Testing SVNClient.parse_revision_spec with one revision and a repository URL""" self.options.repository_url = \ 'http://svn.apache.org/repos/asf/subversion/trunk' revisions = self.client.parse_revision_spec(['1549823']) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' not in revisions) self.assertEqual(revisions['base'], 1549822) self.assertEqual(revisions['tip'], 1549823) def test_parse_revision_spec_two_revisions_url(self): """Testing SVNClient.parse_revision_spec with R1:R2 syntax and a repository URL""" self.options.repository_url = \ 'http://svn.apache.org/repos/asf/subversion/trunk' revisions = self.client.parse_revision_spec(['1549823:1550211']) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' not in revisions) self.assertEqual(revisions['base'], 1549823) self.assertEqual(revisions['tip'], 1550211) def test_parse_revision_spec_invalid_spec(self): """Testing SVNClient.parse_revision_spec with invalid specifications""" self.assertRaises(InvalidRevisionSpecError, self.client.parse_revision_spec, ['aoeu']) self.assertRaises(InvalidRevisionSpecError, self.client.parse_revision_spec, ['aoeu', '1234']) self.assertRaises(TooManyRevisionsError, self.client.parse_revision_spec, ['1', '2', '3']) def test_parse_revision_spec_non_unicode_log(self): """Testing SVNClient.parse_revision_spec with a non-utf8 log entry""" # Note: the svn log entry for commit r2 contains one non-utf8 character revisions = self.client.parse_revision_spec(['2']) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' not in revisions) self.assertEqual(revisions['base'], 1) self.assertEqual(revisions['tip'], 2) def test_get_commit_message_working_copy(self): """Testing SVNClient.get_commit_message with a working copy change""" revisions = self.client.parse_revision_spec() message = self.client.get_commit_message(revisions) self.assertIsNone(message) def test_get_commit_message_committed_revision(self): """Testing SVNClient.get_commit_message with a single committed revision """ revisions = self.client.parse_revision_spec(['2']) message = self.client.get_commit_message(revisions) self.assertTrue('summary' in message) self.assertTrue('description' in message) self.assertEqual(message['summary'], 'Commit 2 -- a non-utf8 character: \xe9') self.assertEqual(message['description'], 'Commit 2 -- a non-utf8 character: \xe9\n') def test_get_commit_message_committed_revisions(self): """Testing SVNClient.get_commit_message with multiple committed revisions """ revisions = self.client.parse_revision_spec(['1:3']) message = self.client.get_commit_message(revisions) self.assertTrue('summary' in message) self.assertTrue('description' in message) self.assertEqual(message['summary'], 'Commit 2 -- a non-utf8 character: \xe9') self.assertEqual(message['description'], 'Commit 3') @svn_version_set_hash('6613644d417f7c90f83f3a2d16b1dad5', '7630ea80056a7340d93a556e9af60c63', '6a5339da19e60c7706e44aeebfa4da5f') def test_diff_exclude(self, md5sum): """Testing SVNClient diff with file exclude patterns""" self._svn_add_file('bar.txt', FOO1) self._svn_add_file('exclude.txt', FOO2) revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions, exclude_patterns=['exclude.txt']) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) print result['diff'] self.assertEqual(md5(result['diff']).hexdigest(), md5sum) def test_diff_exclude_in_subdir(self): """Testing SVNClient diff with exclude patterns in a subdir""" self._svn_add_file('foo.txt', FOO1) self._svn_add_dir('subdir') self._svn_add_file(os.path.join('subdir', 'exclude.txt'), FOO2) os.chdir('subdir') revisions = self.client.parse_revision_spec([]) result = self.client.diff( revisions, exclude_patterns=['exclude.txt']) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(result['diff'], '') def test_diff_exclude_root_pattern_in_subdir(self): """Testing SVNClient diff with repo exclude patterns in a subdir""" self._svn_add_file('exclude.txt', FOO1) self._svn_add_dir('subdir') os.chdir('subdir') revisions = self.client.parse_revision_spec([]) result = self.client.diff( revisions, exclude_patterns=[os.path.join(os.path.sep, 'exclude.txt'), '.']) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(result['diff'], '') @svn_version_set_hash('043befc507b8177a0f010dc2cecc4205', '1b68063237c584d38a9a3ddbdf1f72a2', '466f7c2092e085354f5b24b91d48dd80') def test_same_diff_multiple_methods(self, md5_sum): """Testing SVNClient identical diff generated from root, subdirectory, and via target""" # Test diff generation for a single file, where 'svn diff' is invoked # from three different locations. This should result in an identical # diff for all three cases. Add a new subdirectory and file # (dir1/A.txt) which will be the lone change captured in the diff. # Cases: # 1) Invoke 'svn diff' from checkout root. # 2) Invoke 'svn diff' from dir1/ subdirectory. # 3) Create dir2/ subdirectory parallel to dir1/. Invoke 'svn diff' # from dir2/ where '../dir1/A.txt' is provided as a specific # target. # # This test is inspired by #3749 which broke cases 2 and 3. self._svn_add_dir('dir1') self._svn_add_file('dir1/A.txt', FOO3) # Case 1: Generate diff from checkout root. revisions = self.client.parse_revision_spec() result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), md5_sum) # Case 2: Generate diff from dir1 subdirectory. os.chdir('dir1') result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), md5_sum) # Case 3: Generate diff from dir2 subdirectory, but explicitly target # only ../dir1/A.txt. os.chdir('..') self._svn_add_dir('dir2') os.chdir('dir2') result = self.client.diff(revisions, ['../dir1/A.txt']) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), md5_sum) @svn_version_set_hash('902d662a110400f7470294b2d9e72d36', '13803373ded9af750384a4601d5173ce', 'f11dfbe58925871c5f64b6ca647a8d3c') def test_diff_non_unicode_characters(self, md5_sum): """Testing SVNClient diff with a non-utf8 file""" self._svn_add_file('A.txt', '\xe2'.encode('iso-8859-1')) self._run_svn(['propset', 'svn:mime-type', 'text/plain', 'A.txt']) revisions = self.client.parse_revision_spec() result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), md5_sum) @svn_version_set_hash('79cbd5c4974f97d173ee87c50fa9cff2', 'bfa99e54b8c23b97b1dee23d2763c4fd', '7c6a4506828826aa7043adca347ef327') def test_diff_non_unicode_filename(self, md5_sum): """Testing SVNClient diff with a non-utf8 filename""" self.options.svn_show_copies_as_adds = 'y' filename = '\xe2' self._run_svn(['copy', 'foo.txt', filename]) self._run_svn(['propset', 'svn:mime-type', 'text/plain', filename]) # Generate identical diff from checkout root and via changelist. revisions = self.client.parse_revision_spec() result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), md5_sum) self._run_svn(['changelist', 'cl1', filename]) revisions = self.client.parse_revision_spec(['cl1']) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), md5_sum) @svn_version_set_hash('60c4d21f4d414da947f4e7273e6d1326', '60c4d21f4d414da947f4e7273e6d1326', '571e47c456698bad35bca06523473008') def test_diff_non_unicode_filename_repository_url(self, md5sum): """Testing SVNClient diff with a non-utf8 filename via repository_url option""" self.options.repository_url = self.svn_repo_url # Note: commit r4 adds one file with a non-utf8 character in both its # filename and content. revisions = self.client.parse_revision_spec(['4']) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), md5sum) @svn_version_set_hash('ac1835240ec86ee14ddccf1f2236c442', 'ac1835240ec86ee14ddccf1f2236c442', '610f5506e670dc55a2464a6ad9af015c') def test_show_copies_as_adds_enabled(self, md5sum): """Testing SVNClient with --show-copies-as-adds functionality enabled""" self.check_show_copies_as_adds('y', md5sum) @svn_version_set_hash('d41d8cd98f00b204e9800998ecf8427e', 'd41d8cd98f00b204e9800998ecf8427e', 'b656e2f9b70ade256c3fe855c13ee52c') def test_show_copies_as_adds_disabled(self, md5sum): """Testing SVNClient with --show-copies-as-adds functionality disabled""" self.check_show_copies_as_adds('n', md5sum) def check_show_copies_as_adds(self, state, md5sum): """Helper function to evaluate --show-copies-as-adds""" self.client.get_repository_info() # Ensure valid SVN client version. if not is_valid_version(self.client.subversion_client_version, self.client.SHOW_COPIES_AS_ADDS_MIN_VERSION): raise SkipTest('Subversion client is too old to test ' '--show-copies-as-adds.') self.options.svn_show_copies_as_adds = state self._svn_add_dir('dir1') self._svn_add_dir('dir2') self._run_svn(['copy', 'foo.txt', 'dir1']) # Generate identical diff via several methods: # 1) from checkout root # 2) via changelist # 3) from checkout root when all relevant files belong to a changelist # 4) via explicit include target revisions = self.client.parse_revision_spec() result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), md5sum) self._run_svn(['changelist', 'cl1', 'dir1/foo.txt']) revisions = self.client.parse_revision_spec(['cl1']) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), md5sum) revisions = self.client.parse_revision_spec() result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), md5sum) self._run_svn(['changelist', '--remove', 'dir1/foo.txt']) os.chdir('dir2') revisions = self.client.parse_revision_spec() result = self.client.diff(revisions, ['../dir1']) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), md5sum) def test_history_scheduled_with_commit_nominal(self): """Testing SVNClient.history_scheduled_with_commit nominal cases""" self.client.get_repository_info() # Ensure valid SVN client version. if not is_valid_version(self.client.subversion_client_version, self.client.SHOW_COPIES_AS_ADDS_MIN_VERSION): raise SkipTest('Subversion client is too old to test ' 'history_scheduled_with_commit().') self._svn_add_dir('dir1') self._svn_add_dir('dir2') self._run_svn(['copy', 'foo.txt', 'dir1']) # Squash stderr to prevent error message in test output. sys.stderr = StringIO() # Ensure SystemExit is raised when attempting to generate diff via # several methods: # 1) from checkout root # 2) via changelist # 3) from checkout root when all relevant files belong to a changelist # 4) via explicit include target revisions = self.client.parse_revision_spec() self.assertRaises(SystemExit, self.client.diff, revisions) self._run_svn(['changelist', 'cl1', 'dir1/foo.txt']) revisions = self.client.parse_revision_spec(['cl1']) self.assertRaises(SystemExit, self.client.diff, revisions) revisions = self.client.parse_revision_spec() self.assertRaises(SystemExit, self.client.diff, revisions) self._run_svn(['changelist', '--remove', 'dir1/foo.txt']) os.chdir('dir2') revisions = self.client.parse_revision_spec() self.assertRaises(SystemExit, self.client.diff, revisions, ['../dir1']) def test_history_scheduled_with_commit_special_case_non_local_mods(self): """Testing SVNClient.history_scheduled_with_commit is bypassed when diff is not for local modifications in a working copy""" self.client.get_repository_info() # Ensure valid SVN client version. if not is_valid_version(self.client.subversion_client_version, self.client.SHOW_COPIES_AS_ADDS_MIN_VERSION): raise SkipTest('Subversion client is too old to test ' 'history_scheduled_with_commit().') # While within a working copy which contains a scheduled commit with # addition-with-history, ensure history_scheduled_with_commit() is not # executed when generating a diff between two revisions either # 1) locally or 2) via --reposistory-url option. self._run_svn(['copy', 'foo.txt', 'foo_copy.txt']) revisions = self.client.parse_revision_spec(['1:2']) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), 'ed154720a7459c2649cab4d2fa34fa93') self.options.repository_url = self.svn_repo_url revisions = self.client.parse_revision_spec(['2']) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), 'ed154720a7459c2649cab4d2fa34fa93') def test_history_scheduled_with_commit_special_case_exclude(self): """Testing SVNClient.history_scheduled_with_commit with exclude file""" self.client.get_repository_info() # Ensure valid SVN client version. if not is_valid_version(self.client.subversion_client_version, self.client.SHOW_COPIES_AS_ADDS_MIN_VERSION): raise SkipTest('Subversion client is too old to test ' 'history_scheduled_with_commit().') # Lone file with history is also excluded. In this case there should # be no SystemExit raised and an (empty) diff should be produced. Test # from checkout root and via changelist. self._run_svn(['copy', 'foo.txt', 'foo_copy.txt']) revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions, [], ['foo_copy.txt']) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), 'd41d8cd98f00b204e9800998ecf8427e') self._run_svn(['changelist', 'cl1', 'foo_copy.txt']) revisions = self.client.parse_revision_spec(['cl1']) result = self.client.diff(revisions, [], ['foo_copy.txt']) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), 'd41d8cd98f00b204e9800998ecf8427e') def test_rename_diff_mangling_bug_4546(self): """Test diff with removal of lines that look like headers""" # If a file has lines that look like "-- XX (YY)", and one of those # files gets removed, our rename handling would filter them out. Test # that the bug is fixed. with open('bug-4546.txt', 'w') as f: f.write('-- test line1\n' '-- test line2\n' '-- test line (test2)\n') revisions = self.client.parse_revision_spec() result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertTrue('--- test line (test1)' in result['diff']) RBTools-0.7.11/rbtools/clients/tests/__init__.py0000644000232200023220000000654213230242633022077 0ustar debalancedebalance"""Unit tests for RBTools clients.""" from __future__ import unicode_literals import os from rbtools.tests import OptionsStub from rbtools.utils.testbase import RBTestBase class SCMClientTests(RBTestBase): """Base class for RBTools client unit tests.""" def setUp(self): super(SCMClientTests, self).setUp() self.options = OptionsStub() self.testdata_dir = os.path.join(os.path.dirname(__file__), 'testdata') FOO = b"""\ ARMA virumque cano, Troiae qui primus ab oris Italiam, fato profugus, Laviniaque venit litora, multum ille et terris iactatus et alto vi superum saevae memorem Iunonis ob iram; multa quoque et bello passus, dum conderet urbem, inferretque deos Latio, genus unde Latinum, Albanique patres, atque altae moenia Romae. Musa, mihi causas memora, quo numine laeso, quidve dolens, regina deum tot volvere casus insignem pietate virum, tot adire labores impulerit. Tantaene animis caelestibus irae? """ FOO1 = b"""\ ARMA virumque cano, Troiae qui primus ab oris Italiam, fato profugus, Laviniaque venit litora, multum ille et terris iactatus et alto vi superum saevae memorem Iunonis ob iram; multa quoque et bello passus, dum conderet urbem, inferretque deos Latio, genus unde Latinum, Albanique patres, atque altae moenia Romae. Musa, mihi causas memora, quo numine laeso, """ FOO2 = b"""\ ARMA virumque cano, Troiae qui primus ab oris ARMA virumque cano, Troiae qui primus ab oris ARMA virumque cano, Troiae qui primus ab oris Italiam, fato profugus, Laviniaque venit litora, multum ille et terris iactatus et alto vi superum saevae memorem Iunonis ob iram; multa quoque et bello passus, dum conderet urbem, inferretque deos Latio, genus unde Latinum, Albanique patres, atque altae moenia Romae. Musa, mihi causas memora, quo numine laeso, """ FOO3 = b"""\ ARMA virumque cano, Troiae qui primus ab oris ARMA virumque cano, Troiae qui primus ab oris Italiam, fato profugus, Laviniaque venit litora, multum ille et terris iactatus et alto vi superum saevae memorem Iunonis ob iram; dum conderet urbem, inferretque deos Latio, genus unde Latinum, Albanique patres, atque altae moenia Romae. Albanique patres, atque altae moenia Romae. Musa, mihi causas memora, quo numine laeso, """ FOO4 = b"""\ Italiam, fato profugus, Laviniaque venit litora, multum ille et terris iactatus et alto vi superum saevae memorem Iunonis ob iram; dum conderet urbem, inferretque deos Latio, genus unde Latinum, Albanique patres, atque altae moenia Romae. Musa, mihi causas memora, quo numine laeso, """ FOO5 = b"""\ litora, multum ille et terris iactatus et alto Italiam, fato profugus, Laviniaque venit vi superum saevae memorem Iunonis ob iram; dum conderet urbem, Albanique patres, atque altae moenia Romae. Albanique patres, atque altae moenia Romae. Musa, mihi causas memora, quo numine laeso, inferretque deos Latio, genus unde Latinum, ARMA virumque cano, Troiae qui primus ab oris ARMA virumque cano, Troiae qui primus ab oris """ FOO6 = b"""\ ARMA virumque cano, Troiae qui primus ab oris ARMA virumque cano, Troiae qui primus ab oris Italiam, fato profugus, Laviniaque venit litora, multum ille et terris iactatus et alto vi superum saevae memorem Iunonis ob iram; dum conderet urbem, inferretque deos Latio, genus unde Latinum, Albanique patres, atque altae moenia Romae. Albanique patres, atque altae moenia Romae. Musa, mihi causas memora, quo numine laeso, """ RBTools-0.7.11/rbtools/clients/tests/test_bzr.py0000644000232200023220000004037113230242633022172 0ustar debalancedebalance"""Unit tests for BazaarClient.""" from __future__ import unicode_literals import os from hashlib import md5 from tempfile import mktemp from nose import SkipTest from rbtools.clients import RepositoryInfo from rbtools.clients.bazaar import BazaarClient from rbtools.clients.errors import TooManyRevisionsError from rbtools.clients.tests import FOO, FOO1, FOO2, FOO3, SCMClientTests from rbtools.utils.process import execute class BazaarClientTests(SCMClientTests): """Unit tests for BazaarClient.""" def setUp(self): super(BazaarClientTests, self).setUp() if not self.is_exe_in_path("bzr"): raise SkipTest("bzr not found in path") self.set_user_home( os.path.join(self.testdata_dir, 'homedir')) self.orig_dir = os.getcwd() self.original_branch = self.chdir_tmp() self._run_bzr(["init", "."]) self._bzr_add_file_commit("foo.txt", FOO, "initial commit") self.child_branch = mktemp() self._run_bzr(["branch", self.original_branch, self.child_branch]) self.client = BazaarClient(options=self.options) os.chdir(self.orig_dir) self.options.parent_branch = None def _run_bzr(self, command, *args, **kwargs): return execute(['bzr'] + command, *args, **kwargs) def _bzr_add_file_commit(self, file, data, msg): """Add a file to a Bazaar repository with the content of data and commit with msg.""" with open(file, 'w') as foo: foo.write(data) self._run_bzr(["add", file]) self._run_bzr(["commit", "-m", msg, '--author', 'Test User']) def _compare_diffs(self, filename, full_diff, expected_diff_digest, change_type='modified'): """Testing that the full_diff for ``filename`` matches the ``expected_diff``.""" diff_lines = full_diff.splitlines() self.assertEqual(('=== %s file \'%s\'' % (change_type, filename)).encode('utf-8'), diff_lines[0]) self.assertTrue(diff_lines[1].startswith( ('--- %s\t' % filename).encode('utf-8'))) self.assertTrue(diff_lines[2].startswith( ('+++ %s\t' % filename).encode('utf-8'))) diff_body = b'\n'.join(diff_lines[3:]) self.assertEqual(md5(diff_body).hexdigest(), expected_diff_digest) def _count_files_in_diff(self, diff): return len([ line for line in diff.split(b'\n') if line.startswith(b'===') ]) def test_get_repository_info_original_branch(self): """Testing BazaarClient get_repository_info with original branch""" os.chdir(self.original_branch) ri = self.client.get_repository_info() self.assertTrue(isinstance(ri, RepositoryInfo)) self.assertEqual(os.path.realpath(ri.path), os.path.realpath(self.original_branch)) self.assertTrue(ri.supports_parent_diffs) self.assertEqual(ri.base_path, '/') self.assertFalse(ri.supports_changesets) def test_get_repository_info_child_branch(self): """Testing BazaarClient get_repository_info with child branch""" os.chdir(self.child_branch) ri = self.client.get_repository_info() self.assertTrue(isinstance(ri, RepositoryInfo)) self.assertEqual(os.path.realpath(ri.path), os.path.realpath(self.child_branch)) self.assertTrue(ri.supports_parent_diffs) self.assertEqual(ri.base_path, "/") self.assertFalse(ri.supports_changesets) def test_get_repository_info_no_branch(self): """Testing BazaarClient get_repository_info, no branch""" self.chdir_tmp() ri = self.client.get_repository_info() self.assertEqual(ri, None) def test_too_many_revisions(self): """Testing BazaarClient parse_revision_spec with too many revisions""" self.assertRaises(TooManyRevisionsError, self.client.parse_revision_spec, [1, 2, 3]) def test_diff_simple(self): """Testing BazaarClient simple diff case""" os.chdir(self.child_branch) self._bzr_add_file_commit("foo.txt", FOO1, "delete and modify stuff") revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self._compare_diffs('foo.txt', result['diff'], 'a6326b53933f8b255a4b840485d8e210') def test_diff_exclude(self): """Testing BazaarClient diff with file exclusion.""" os.chdir(self.child_branch) self._bzr_add_file_commit("foo.txt", FOO1, "commit 1") self._bzr_add_file_commit("exclude.txt", FOO2, "commit 2") revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions, exclude_patterns=['exclude.txt']) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self._compare_diffs('foo.txt', result['diff'], 'a6326b53933f8b255a4b840485d8e210') self.assertEqual(self._count_files_in_diff(result['diff']), 1) def test_diff_exclude_in_subdir(self): """Testing BazaarClient diff with file exclusion in a subdirectory.""" os.chdir(self.child_branch) self._bzr_add_file_commit('foo.txt', FOO1, 'commit 1') os.mkdir('subdir') os.chdir('subdir') self._bzr_add_file_commit('exclude.txt', FOO2, 'commit 2') revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions, exclude_patterns=['exclude.txt', '.']) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self._compare_diffs('foo.txt', result['diff'], 'a6326b53933f8b255a4b840485d8e210') self.assertEqual(self._count_files_in_diff(result['diff']), 1) def test_diff_exclude_root_pattern_in_subdir(self): """Testing BazaarClient diff with file exclusion in the repo root.""" os.chdir(self.child_branch) self._bzr_add_file_commit('exclude.txt', FOO2, 'commit 1') os.mkdir('subdir') os.chdir('subdir') self._bzr_add_file_commit('foo.txt', FOO1, 'commit 2') revisions = self.client.parse_revision_spec([]) result = self.client.diff( revisions, exclude_patterns=[os.path.sep + 'exclude.txt', os.path.sep + 'subdir']) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self._compare_diffs(os.path.join('subdir', 'foo.txt'), result['diff'], '4deffcb296180fa166eddff2512bd0e4', change_type='added') def test_diff_specific_files(self): """Testing BazaarClient diff with specific files""" os.chdir(self.child_branch) self._bzr_add_file_commit("foo.txt", FOO1, "delete and modify stuff") self._bzr_add_file_commit("bar.txt", "baz", "added bar") revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions, ['foo.txt']) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self._compare_diffs('foo.txt', result['diff'], 'a6326b53933f8b255a4b840485d8e210') def test_diff_simple_multiple(self): """Testing BazaarClient simple diff with multiple commits case""" os.chdir(self.child_branch) self._bzr_add_file_commit("foo.txt", FOO1, "commit 1") self._bzr_add_file_commit("foo.txt", FOO2, "commit 2") self._bzr_add_file_commit("foo.txt", FOO3, "commit 3") revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self._compare_diffs('foo.txt', result['diff'], '4109cc082dce22288c2f1baca9b107b6') def test_diff_parent(self): """Testing BazaarClient diff with changes only in the parent branch""" os.chdir(self.child_branch) self._bzr_add_file_commit("foo.txt", FOO1, "delete and modify stuff") grand_child_branch = mktemp() self._run_bzr(["branch", self.child_branch, grand_child_branch]) os.chdir(grand_child_branch) revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(result['diff'], None) def test_diff_grand_parent(self): """Testing BazaarClient diff with changes between a 2nd level descendant""" os.chdir(self.child_branch) self._bzr_add_file_commit("foo.txt", FOO1, "delete and modify stuff") grand_child_branch = mktemp() self._run_bzr(["branch", self.child_branch, grand_child_branch]) os.chdir(grand_child_branch) # Requesting the diff between the grand child branch and its grand # parent: self.options.parent_branch = self.original_branch revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self._compare_diffs("foo.txt", result['diff'], 'a6326b53933f8b255a4b840485d8e210') def test_guessed_summary_and_description(self): """Testing BazaarClient guessing summary and description""" os.chdir(self.child_branch) self._bzr_add_file_commit("foo.txt", FOO1, "commit 1") self._bzr_add_file_commit("foo.txt", FOO2, "commit 2") self._bzr_add_file_commit("foo.txt", FOO3, "commit 3") self.options.guess_summary = True self.options.guess_description = True revisions = self.client.parse_revision_spec([]) commit_message = self.client.get_commit_message(revisions) self.assertEqual("commit 3", commit_message['summary']) description = commit_message['description'] self.assertTrue("commit 1" in description) self.assertTrue("commit 2" in description) self.assertFalse("commit 3" in description) def test_guessed_summary_and_description_in_grand_parent_branch(self): """Testing BazaarClient guessing summary and description for grand parent branch.""" os.chdir(self.child_branch) self._bzr_add_file_commit("foo.txt", FOO1, "commit 1") self._bzr_add_file_commit("foo.txt", FOO2, "commit 2") self._bzr_add_file_commit("foo.txt", FOO3, "commit 3") self.options.guess_summary = True self.options.guess_description = True grand_child_branch = mktemp() self._run_bzr(["branch", self.child_branch, grand_child_branch]) os.chdir(grand_child_branch) # Requesting the diff between the grand child branch and its grand # parent: self.options.parent_branch = self.original_branch revisions = self.client.parse_revision_spec([]) commit_message = self.client.get_commit_message(revisions) self.assertEqual("commit 3", commit_message['summary']) description = commit_message['description'] self.assertTrue("commit 1" in description) self.assertTrue("commit 2" in description) self.assertFalse("commit 3" in description) def test_guessed_summary_and_description_with_revision_range(self): """Testing BazaarClient guessing summary and description with a revision range.""" os.chdir(self.child_branch) self._bzr_add_file_commit("foo.txt", FOO1, "commit 1") self._bzr_add_file_commit("foo.txt", FOO2, "commit 2") self._bzr_add_file_commit("foo.txt", FOO3, "commit 3") self.options.guess_summary = True self.options.guess_description = True revisions = self.client.parse_revision_spec(['2..3']) commit_message = self.client.get_commit_message(revisions) self.assertEqual("commit 2", commit_message['summary']) self.assertEqual("commit 2", commit_message['description']) def test_parse_revision_spec_no_args(self): """Testing BazaarClient.parse_revision_spec with no specified revisions""" os.chdir(self.child_branch) base_commit_id = self.client._get_revno() self._bzr_add_file_commit("foo.txt", FOO1, "commit 1") tip_commit_id = self.client._get_revno() revisions = self.client.parse_revision_spec() self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' not in revisions) self.assertEqual(revisions['base'], base_commit_id) self.assertEqual(revisions['tip'], tip_commit_id) def test_parse_revision_spec_one_arg(self): """Testing BazaarClient.parse_revision_spec with one specified revision""" os.chdir(self.child_branch) base_commit_id = self.client._get_revno() self._bzr_add_file_commit("foo.txt", FOO1, "commit 1") tip_commit_id = self.client._get_revno() revisions = self.client.parse_revision_spec([tip_commit_id]) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' not in revisions) self.assertEqual(revisions['base'], base_commit_id) self.assertEqual(revisions['tip'], tip_commit_id) def test_parse_revision_spec_one_arg_parent(self): """Testing BazaarClient.parse_revision_spec with one specified revision and a parent diff""" os.chdir(self.original_branch) parent_base_commit_id = self.client._get_revno() grand_child_branch = mktemp() self._run_bzr(["branch", self.child_branch, grand_child_branch]) os.chdir(grand_child_branch) base_commit_id = self.client._get_revno() self._bzr_add_file_commit("foo.txt", FOO2, "commit 2") tip_commit_id = self.client._get_revno() self.options.parent_branch = self.child_branch revisions = self.client.parse_revision_spec([tip_commit_id]) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('parent_base' in revisions) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertEqual(revisions['parent_base'], parent_base_commit_id) self.assertEqual(revisions['base'], base_commit_id) self.assertEqual(revisions['tip'], tip_commit_id) def test_parse_revision_spec_one_arg_split(self): """Testing BazaarClient.parse_revision_spec with R1..R2 syntax""" os.chdir(self.child_branch) self._bzr_add_file_commit("foo.txt", FOO1, "commit 1") base_commit_id = self.client._get_revno() self._bzr_add_file_commit("foo.txt", FOO2, "commit 2") tip_commit_id = self.client._get_revno() revisions = self.client.parse_revision_spec( ['%s..%s' % (base_commit_id, tip_commit_id)]) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('parent_base' not in revisions) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertEqual(revisions['base'], base_commit_id) self.assertEqual(revisions['tip'], tip_commit_id) def test_parse_revision_spec_two_args(self): """Testing BazaarClient.parse_revision_spec with two revisions""" os.chdir(self.child_branch) self._bzr_add_file_commit("foo.txt", FOO1, "commit 1") base_commit_id = self.client._get_revno() self._bzr_add_file_commit("foo.txt", FOO2, "commit 2") tip_commit_id = self.client._get_revno() revisions = self.client.parse_revision_spec( [base_commit_id, tip_commit_id]) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('parent_base' not in revisions) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertEqual(revisions['base'], base_commit_id) self.assertEqual(revisions['tip'], tip_commit_id) RBTools-0.7.11/rbtools/clients/tests/test_git.py0000644000232200023220000007510613230242633022164 0ustar debalancedebalance"""Unit tests for GitClient.""" from __future__ import unicode_literals import os from hashlib import md5 import six from kgb import SpyAgency from nose import SkipTest from rbtools.clients import RepositoryInfo from rbtools.clients.errors import (MergeError, PushError, TooManyRevisionsError) from rbtools.clients.git import GitClient from rbtools.clients.tests import FOO1, FOO2, FOO3, SCMClientTests from rbtools.utils.console import edit_text from rbtools.utils.filesystem import load_config from rbtools.utils.process import execute class GitClientTests(SpyAgency, SCMClientTests): """Unit tests for GitClient.""" TESTSERVER = "http://127.0.0.1:8080" AUTHOR = type( b'Author', (object,), { 'fullname': 'name', 'email': 'email' }) def _run_git(self, command): return execute(['git'] + command, env=None, split_lines=False, ignore_errors=False, extra_ignore_errors=(), translate_newlines=True) def _git_add_file_commit(self, filename, data, msg): """Add a file to a git repository. Args: filename (unicode): The filename to write to. data (unicode): The content of the file to write. msg (unicode): The commit message to use. """ foo = open(filename, 'w') foo.write(data) foo.close() self._run_git(['add', filename]) self._run_git(['commit', '-m', msg]) def _git_get_head(self): return self._run_git(['rev-parse', 'HEAD']).strip() def setUp(self): super(GitClientTests, self).setUp() if not self.is_exe_in_path('git'): raise SkipTest('git not found in path') self.set_user_home( os.path.join(self.testdata_dir, 'homedir')) self.git_dir = os.path.join(self.testdata_dir, 'git-repo') self.clone_dir = self.chdir_tmp() self._run_git(['clone', self.git_dir, self.clone_dir]) self.client = GitClient(options=self.options) self.options.parent_branch = None def test_get_repository_info_simple(self): """Testing GitClient get_repository_info, simple case""" ri = self.client.get_repository_info() self.assertTrue(isinstance(ri, RepositoryInfo)) self.assertEqual(ri.base_path, '') self.assertEqual(ri.path.rstrip("/.git"), self.git_dir) self.assertTrue(ri.supports_parent_diffs) self.assertFalse(ri.supports_changesets) def test_scan_for_server_simple(self): """Testing GitClient scan_for_server, simple case""" ri = self.client.get_repository_info() server = self.client.scan_for_server(ri) self.assertTrue(server is None) def test_scan_for_server_reviewboardrc(self): """Testing GitClient scan_for_server, .reviewboardrc case""" rc = open(os.path.join(self.clone_dir, '.reviewboardrc'), 'w') rc.write('REVIEWBOARD_URL = "%s"' % self.TESTSERVER) rc.close() self.client.config = load_config() ri = self.client.get_repository_info() server = self.client.scan_for_server(ri) self.assertEqual(server, self.TESTSERVER) def test_scan_for_server_property(self): """Testing GitClient scan_for_server using repo property""" self._run_git(['config', 'reviewboard.url', self.TESTSERVER]) ri = self.client.get_repository_info() self.assertEqual(self.client.scan_for_server(ri), self.TESTSERVER) def test_diff_simple(self): """Testing GitClient simple diff case""" self.client.get_repository_info() base_commit_id = self._git_get_head() self._git_add_file_commit('foo.txt', FOO1, 'delete and modify stuff') commit_id = self._git_get_head() revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertEqual(len(result), 4) self.assertTrue('diff' in result) self.assertTrue('parent_diff' in result) self.assertTrue('base_commit_id' in result) self.assertTrue('commit_id' in result) self.assertEqual(md5(result['diff']).hexdigest(), '69d4616cf985f6b10571036db744e2d8') self.assertEqual(result['parent_diff'], None) self.assertEqual(result['base_commit_id'], base_commit_id) self.assertEqual(result['commit_id'], commit_id) def test_too_many_revisions(self): """Testing GitClient parse_revision_spec with too many revisions""" self.assertRaises(TooManyRevisionsError, self.client.parse_revision_spec, [1, 2, 3]) def test_diff_simple_multiple(self): """Testing GitClient simple diff with multiple commits case""" self.client.get_repository_info() base_commit_id = self._git_get_head() self._git_add_file_commit('foo.txt', FOO1, 'commit 1') self._git_add_file_commit('foo.txt', FOO2, 'commit 1') self._git_add_file_commit('foo.txt', FOO3, 'commit 1') commit_id = self._git_get_head() revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertEqual(len(result), 4) self.assertTrue('diff' in result) self.assertTrue('parent_diff' in result) self.assertTrue('base_commit_id' in result) self.assertTrue('commit_id' in result) self.assertEqual(md5(result['diff']).hexdigest(), 'c9a31264f773406edff57a8ed10d9acc') self.assertEqual(result['parent_diff'], None) self.assertEqual(result['base_commit_id'], base_commit_id) self.assertEqual(result['commit_id'], commit_id) def test_diff_exclude(self): """Testing GitClient simple diff with file exclusion.""" self.client.get_repository_info() base_commit_id = self._git_get_head() self._git_add_file_commit('foo.txt', FOO1, 'commit 1') self._git_add_file_commit('exclude.txt', FOO2, 'commit 2') commit_id = self._git_get_head() revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions, exclude_patterns=['exclude.txt']) self.assertTrue(isinstance(result, dict)) self.assertEqual(len(result), 4) self.assertTrue('diff' in result) self.assertTrue('parent_diff' in result) self.assertTrue('base_commit_id' in result) self.assertEqual(md5(result['diff']).hexdigest(), '69d4616cf985f6b10571036db744e2d8') self.assertEqual(result['parent_diff'], None) self.assertEqual(result['base_commit_id'], base_commit_id) self.assertEqual(result['commit_id'], commit_id) def test_diff_exclude_in_subdir(self): """Testing GitClient simple diff with file exclusion in a subdir""" base_commit_id = self._git_get_head() os.mkdir('subdir') self._git_add_file_commit('foo.txt', FOO1, 'commit 1') os.chdir('subdir') self._git_add_file_commit('exclude.txt', FOO2, 'commit 2') self.client.get_repository_info() commit_id = self._git_get_head() revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions, exclude_patterns=['exclude.txt']) self.assertTrue(isinstance(result, dict)) self.assertEqual(len(result), 4) self.assertTrue('diff' in result) self.assertTrue('parent_diff' in result) self.assertTrue('base_commit_id' in result) self.assertEqual(md5(result['diff']).hexdigest(), '69d4616cf985f6b10571036db744e2d8') self.assertEqual(result['parent_diff'], None) self.assertEqual(result['base_commit_id'], base_commit_id) self.assertEqual(result['commit_id'], commit_id) def test_diff_exclude_root_pattern_in_subdir(self): """Testing GitClient diff with file exclusion in the repo root.""" base_commit_id = self._git_get_head() os.mkdir('subdir') self._git_add_file_commit('foo.txt', FOO1, 'commit 1') self._git_add_file_commit('exclude.txt', FOO2, 'commit 2') os.chdir('subdir') self.client.get_repository_info() commit_id = self._git_get_head() revisions = self.client.parse_revision_spec([]) result = self.client.diff( revisions, exclude_patterns=[os.path.sep + 'exclude.txt']) self.assertTrue(isinstance(result, dict)) self.assertEqual(len(result), 4) self.assertTrue('diff' in result) self.assertTrue('parent_diff' in result) self.assertTrue('base_commit_id' in result) self.assertEqual(md5(result['diff']).hexdigest(), '69d4616cf985f6b10571036db744e2d8') self.assertEqual(result['parent_diff'], None) self.assertEqual(result['base_commit_id'], base_commit_id) self.assertEqual(result['commit_id'], commit_id) def test_diff_branch_diverge(self): """Testing GitClient diff with divergent branches""" self._git_add_file_commit('foo.txt', FOO1, 'commit 1') self._run_git(['checkout', '-b', 'mybranch', '--track', 'origin/master']) base_commit_id = self._git_get_head() self._git_add_file_commit('foo.txt', FOO2, 'commit 2') commit_id = self._git_get_head() self.client.get_repository_info() revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertEqual(len(result), 4) self.assertTrue('diff' in result) self.assertTrue('parent_diff' in result) self.assertTrue('base_commit_id' in result) self.assertTrue('commit_id' in result) self.assertEqual(md5(result['diff']).hexdigest(), 'cfb79a46f7a35b07e21765608a7852f7') self.assertEqual(result['parent_diff'], None) self.assertEqual(result['base_commit_id'], base_commit_id) self.assertEqual(result['commit_id'], commit_id) self._run_git(['checkout', 'master']) self.client.get_repository_info() commit_id = self._git_get_head() revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertEqual(len(result), 4) self.assertTrue('diff' in result) self.assertTrue('parent_diff' in result) self.assertTrue('base_commit_id' in result) self.assertTrue('commit_id' in result) self.assertEqual(md5(result['diff']).hexdigest(), '69d4616cf985f6b10571036db744e2d8') self.assertEqual(result['parent_diff'], None) self.assertEqual(result['base_commit_id'], base_commit_id) self.assertEqual(result['commit_id'], commit_id) def test_diff_tracking_no_origin(self): """Testing GitClient diff with a tracking branch, but no origin remote""" self._run_git(['remote', 'add', 'quux', self.git_dir]) self._run_git(['fetch', 'quux']) self._run_git(['checkout', '-b', 'mybranch', '--track', 'quux/master']) base_commit_id = self._git_get_head() self._git_add_file_commit('foo.txt', FOO1, 'delete and modify stuff') commit_id = self._git_get_head() self.client.get_repository_info() revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertEqual(len(result), 4) self.assertTrue('diff' in result) self.assertTrue('parent_diff' in result) self.assertTrue('base_commit_id' in result) self.assertTrue('commit_id' in result) self.assertEqual(md5(result['diff']).hexdigest(), '69d4616cf985f6b10571036db744e2d8') self.assertEqual(result['parent_diff'], None) self.assertEqual(result['base_commit_id'], base_commit_id) self.assertEqual(result['commit_id'], commit_id) def test_diff_local_tracking(self): """Testing GitClient diff with a local tracking branch""" base_commit_id = self._git_get_head() self._git_add_file_commit('foo.txt', FOO1, 'commit 1') self._run_git(['checkout', '-b', 'mybranch', '--track', 'master']) self._git_add_file_commit('foo.txt', FOO2, 'commit 2') commit_id = self._git_get_head() self.client.get_repository_info() revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertEqual(len(result), 4) self.assertTrue('diff' in result) self.assertTrue('parent_diff' in result) self.assertTrue('base_commit_id' in result) self.assertTrue('commit_id' in result) self.assertEqual(md5(result['diff']).hexdigest(), 'cfb79a46f7a35b07e21765608a7852f7') self.assertEqual(result['parent_diff'], None) self.assertEqual(result['base_commit_id'], base_commit_id) self.assertEqual(result['commit_id'], commit_id) def test_diff_tracking_override(self): """Testing GitClient diff with option override for tracking branch""" self.options.tracking = 'origin/master' self._run_git(['remote', 'add', 'bad', self.git_dir]) self._run_git(['fetch', 'bad']) self._run_git(['checkout', '-b', 'mybranch', '--track', 'bad/master']) base_commit_id = self._git_get_head() self._git_add_file_commit('foo.txt', FOO1, 'commit 1') commit_id = self._git_get_head() self.client.get_repository_info() revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertEqual(len(result), 4) self.assertTrue('diff' in result) self.assertTrue('parent_diff' in result) self.assertTrue('base_commit_id' in result) self.assertTrue('commit_id' in result) self.assertEqual(md5(result['diff']).hexdigest(), '69d4616cf985f6b10571036db744e2d8') self.assertEqual(result['parent_diff'], None) self.assertEqual(result['base_commit_id'], base_commit_id) self.assertEqual(result['commit_id'], commit_id) def test_diff_slash_tracking(self): """Testing GitClient diff with tracking branch that has slash in its name""" self._run_git(['fetch', 'origin']) self._run_git(['checkout', '-b', 'my/branch', '--track', 'origin/not-master']) base_commit_id = self._git_get_head() self._git_add_file_commit('foo.txt', FOO2, 'commit 2') commit_id = self._git_get_head() self.client.get_repository_info() revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertEqual(len(result), 4) self.assertTrue('diff' in result) self.assertTrue('parent_diff' in result) self.assertTrue('base_commit_id' in result) self.assertTrue('commit_id' in result) self.assertEqual(md5(result['diff']).hexdigest(), 'd2015ff5fd0297fd7f1210612f87b6b3') self.assertEqual(result['parent_diff'], None) self.assertEqual(result['base_commit_id'], base_commit_id) self.assertEqual(result['commit_id'], commit_id) def test_parse_revision_spec_no_args(self): """Testing GitClient.parse_revision_spec with no specified revisions""" base_commit_id = self._git_get_head() self._git_add_file_commit('foo.txt', FOO2, 'Commit 2') tip_commit_id = self._git_get_head() self.client.get_repository_info() revisions = self.client.parse_revision_spec() self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' not in revisions) self.assertEqual(revisions['base'], base_commit_id) self.assertEqual(revisions['tip'], tip_commit_id) def test_parse_revision_spec_no_args_parent(self): """Testing GitClient.parse_revision_spec with no specified revisions and a parent diff""" parent_base_commit_id = self._git_get_head() self._run_git(['fetch', 'origin']) self._run_git(['checkout', '-b', 'parent-branch', '--track', 'origin/not-master']) base_commit_id = self._git_get_head() self._run_git(['checkout', '-b', 'topic-branch']) self._git_add_file_commit('foo.txt', FOO2, 'Commit 2') tip_commit_id = self._git_get_head() self.options.parent_branch = 'parent-branch' self.client.get_repository_info() revisions = self.client.parse_revision_spec() self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' in revisions) self.assertEqual(revisions['parent_base'], parent_base_commit_id) self.assertEqual(revisions['base'], base_commit_id) self.assertEqual(revisions['tip'], tip_commit_id) def test_parse_revision_spec_one_arg(self): """Testing GitClient.parse_revision_spec with one specified revision""" base_commit_id = self._git_get_head() self._git_add_file_commit('foo.txt', FOO2, 'Commit 2') tip_commit_id = self._git_get_head() self.client.get_repository_info() revisions = self.client.parse_revision_spec([tip_commit_id]) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' not in revisions) self.assertEqual(revisions['base'], base_commit_id) self.assertEqual(revisions['tip'], tip_commit_id) def test_parse_revision_spec_one_arg_parent(self): """Testing GitClient.parse_revision_spec with one specified revision and a parent diff""" parent_base_commit_id = self._git_get_head() self._git_add_file_commit('foo.txt', FOO2, 'Commit 2') base_commit_id = self._git_get_head() self._git_add_file_commit('foo.txt', FOO3, 'Commit 3') tip_commit_id = self._git_get_head() self.client.get_repository_info() revisions = self.client.parse_revision_spec([tip_commit_id]) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' in revisions) self.assertEqual(revisions['parent_base'], parent_base_commit_id) self.assertEqual(revisions['base'], base_commit_id) self.assertEqual(revisions['tip'], tip_commit_id) def test_parse_revision_spec_two_args(self): """Testing GitClient.parse_revision_spec with two specified revisions""" base_commit_id = self._git_get_head() self._run_git(['checkout', '-b', 'topic-branch']) self._git_add_file_commit('foo.txt', FOO2, 'Commit 2') tip_commit_id = self._git_get_head() self.client.get_repository_info() revisions = self.client.parse_revision_spec(['master', 'topic-branch']) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' not in revisions) self.assertEqual(revisions['base'], base_commit_id) self.assertEqual(revisions['tip'], tip_commit_id) def test_parse_revision_spec_one_arg_two_revs(self): """Testing GitClient.parse_revision_spec with diff-since syntax""" base_commit_id = self._git_get_head() self._run_git(['checkout', '-b', 'topic-branch']) self._git_add_file_commit('foo.txt', FOO2, 'Commit 2') tip_commit_id = self._git_get_head() self.client.get_repository_info() revisions = self.client.parse_revision_spec(['master..topic-branch']) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' not in revisions) self.assertEqual(revisions['base'], base_commit_id) self.assertEqual(revisions['tip'], tip_commit_id) def test_parse_revision_spec_one_arg_since_merge(self): """Testing GitClient.parse_revision_spec with diff-since-merge syntax""" base_commit_id = self._git_get_head() self._run_git(['checkout', '-b', 'topic-branch']) self._git_add_file_commit('foo.txt', FOO2, 'Commit 2') tip_commit_id = self._git_get_head() self.client.get_repository_info() revisions = self.client.parse_revision_spec(['master...topic-branch']) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' not in revisions) self.assertEqual(revisions['base'], base_commit_id) self.assertEqual(revisions['tip'], tip_commit_id) def test_get_raw_commit_message(self): """Testing GitClient.get_raw_commit_message""" self._git_add_file_commit('foo.txt', FOO2, 'Commit 2') self.client.get_repository_info() revisions = self.client.parse_revision_spec() self.assertEqual(self.client.get_raw_commit_message(revisions), 'Commit 2') def test_push_upstream_pull_exception(self): """Testing GitClient.push_upstream with an invalid remote branch. It must raise a PushError exception because the 'git pull' from an invalid upstream branch will fail. """ try: self.client.push_upstream('non-existent-branch') except PushError as e: self.assertEqual(six.text_type(e), 'Could not pull changes from upstream.') else: self.fail('Expected PushError') def test_push_upstream_no_push_exception(self): """Testing GitClient.push_upstream with 'git push' disabled. We set the push url to be an invalid one, which should normally cause the 'git push' to fail. However, push_upstream() must not fail (must not raise a PushError) because it gets its origin_url from the Git config, which still contains a valid fetch url. """ self._run_git(['remote', 'set-url', '--push', 'origin', 'bad-url']) # This line should not raise an exception. self.client.push_upstream('master') def test_merge_invalid_destination(self): """Testing GitClient.merge with an invalid destination branch. It must raise a MergeError exception because 'git checkout' to the invalid destination branch will fail. """ try: self.client.merge('master', 'non-existent-branch', 'commit message', self.AUTHOR) except MergeError as e: self.assertTrue(six.text_type(e).startswith( "Could not checkout to branch 'non-existent-branch'")) else: self.fail('Expected MergeError') def test_merge_invalid_target(self): """Testing GitClient.merge with an invalid target branch. It must raise a MergeError exception because 'git merge' from an invalid target branch will fail. """ try: self.client.merge('non-existent-branch', 'master', 'commit message', self.AUTHOR) except MergeError as e: self.assertTrue(six.text_type(e).startswith( "Could not merge branch 'non-existent-branch'")) else: self.fail('Expected MergeError') def test_merge_with_squash(self): """Testing GitClient.merge with squash set to True. We use a KGB function spy to check if execute is called with the right arguments i.e. with the '--squash' flag (and not with the '--no-ff' flag. """ self.spy_on(execute) self.client.get_repository_info() # Since pushing data upstream to the test repo corrupts its state, # we clone the clone and use one clone as the remote for the other. # We need to push data upstrem for the merge to work. self.git_dir = os.getcwd() self.clone_dir = self.chdir_tmp() self._run_git(['clone', self.git_dir, self.clone_dir]) self.client.get_repository_info() self._run_git(['checkout', '-b', 'new-branch']) self._git_add_file_commit('foo1.txt', FOO1, 'on new-branch') self._run_git(['push', 'origin', 'new-branch']) self.client.merge('new-branch', 'master', 'message', self.AUTHOR, True) self.assertEqual(execute.spy.calls[-2].args[0], ['git', 'merge', 'new-branch', '--squash', '--no-commit']) def test_merge_without_squash(self): """Testing GitClient.merge with squash set to False. We use a KGB function spy to check if execute is called with the right arguments i.e. with the '--no-ff' flag (and not with the '--squash' flag). """ self.spy_on(execute) self.client.get_repository_info() # Since pushing data upstream to the test repo corrupts its state, # we clone the clone and use one clone as the remote for the other. # We need to push data upstrem for the merge to work. self.git_dir = os.getcwd() self.clone_dir = self.chdir_tmp() self._run_git(['clone', self.git_dir, self.clone_dir]) self.client.get_repository_info() self._run_git(['checkout', '-b', 'new-branch']) self._git_add_file_commit('foo1.txt', FOO1, 'on new-branch') self._run_git(['push', 'origin', 'new-branch']) self.client.merge('new-branch', 'master', 'message', self.AUTHOR, False) self.assertEqual(execute.spy.calls[-2].args[0], ['git', 'merge', 'new-branch', '--no-ff', '--no-commit']) def test_create_commit_run_editor(self): """Testing GitClient.create_commit with run_editor set to True. We use a KGB function spy to check if edit_text is called, and then we intercept the call returning a custom commit message. We then ensure that execute is called with that custom commit message. """ self.spy_on(edit_text, call_fake=self.return_new_message) self.spy_on(execute) foo = open('foo.txt', 'w') foo.write('change') foo.close() self.client.create_commit('old_message', self.AUTHOR, True, ['foo.txt']) self.assertTrue(edit_text.spy.called) self.assertEqual(execute.spy.last_call.args[0], ['git', 'commit', '-m', 'new_message', '--author="name "']) def test_create_commit_without_run_editor(self): """Testing GitClient.create_commit with run_editor set to False. We use a KGB function spy to check if edit_text is not called. We set it up so that if edit_text was called, we intercept the call returning a custom commit message. However, since we are expecting edit_text to not be called, we ensure that execute is called with the old commit message (and not the custom new one). """ self.spy_on(edit_text, call_fake=self.return_new_message) self.spy_on(execute) foo = open('foo.txt', 'w') foo.write('change') foo.close() self.client.create_commit('old_message', self.AUTHOR, False, ['foo.txt']) self.assertFalse(edit_text.spy.called) self.assertEqual(execute.spy.last_call.args[0], ['git', 'commit', '-m', 'old_message', '--author="name "']) def test_create_commit_all_files(self): """Testing GitClient.create_commit with all_files set to True. We use a KGB function spy to check if execute is called with the right arguments i.e. with 'git add --all :/' (and not with 'git add '). """ self.spy_on(execute) foo = open('foo.txt', 'w') foo.write('change') foo.close() self.client.create_commit('message', self.AUTHOR, False, [], True) self.assertEqual(execute.spy.calls[0].args[0], ['git', 'add', '--all', ':/']) def test_create_commit_without_all_files(self): """Testing GitClient.create_commit with all_files set to False. We use a KGB function spy to check if execute is called with the right arguments i.e. with 'git add ' (and not with 'git add --all :/'). """ self.spy_on(execute) foo = open('foo.txt', 'w') foo.write('change') foo.close() self.client.create_commit('message', self.AUTHOR, False, ['foo.txt'], False) self.assertEqual(execute.spy.calls[0].args[0], ['git', 'add', 'foo.txt']) def test_delete_branch_with_merged_only(self): """Testing GitClient.delete_branch with merged_only set to True. We use a KGB function spy to check if execute is called with the right arguments i.e. with the -d flag (and not the -D flag). """ self.spy_on(execute) self._run_git(['branch', 'new-branch']) self.client.delete_branch('new-branch', True) self.assertTrue(execute.spy.called) self.assertEqual(execute.spy.last_call.args[0], ['git', 'branch', '-d', 'new-branch']) def test_delete_branch_without_merged_only(self): """Testing GitClient.delete_branch with merged_only set to False. We use a KGB function spy to check if execute is called with the right arguments i.e. with the -D flag (and not the -d flag). """ self.spy_on(execute) self._run_git(['branch', 'new-branch']) self.client.delete_branch('new-branch', False) self.assertTrue(execute.spy.called) self.assertEqual(execute.spy.last_call.args[0], ['git', 'branch', '-D', 'new-branch']) def return_new_message(self, message): return 'new_message' RBTools-0.7.11/rbtools/clients/tests/test_mercurial.py0000644000232200023220000006404213230242633023361 0ustar debalancedebalance"""Unit tests for MercurialClient.""" from __future__ import unicode_literals import os import re import sys import time from hashlib import md5 from random import randint from textwrap import dedent from nose import SkipTest from rbtools.clients import RepositoryInfo from rbtools.clients.mercurial import MercurialClient from rbtools.clients.tests import (FOO, FOO1, FOO2, FOO3, FOO4, FOO5, FOO6, SCMClientTests) from rbtools.utils.filesystem import load_config from rbtools.utils.process import execute class MercurialTestBase(SCMClientTests): """Base class for all Mercurial unit tests.""" def setUp(self): super(MercurialTestBase, self).setUp() self._hg_env = {} def _run_hg(self, command, ignore_errors=False, extra_ignore_errors=()): # We're *not* doing `env = env or {}` here because # we want the caller to be able to *enable* reading # of user and system-level hgrc configuration. env = self._hg_env.copy() if not env: env = { 'HGRCPATH': os.devnull, 'HGPLAIN': '1', } return execute(['hg'] + command, env, split_lines=False, ignore_errors=ignore_errors, extra_ignore_errors=extra_ignore_errors, translate_newlines=True) def _hg_add_file_commit(self, filename, data, msg, branch=None): outfile = open(filename, 'w') outfile.write(data) outfile.close() if branch: self._run_hg(['branch', branch]) self._run_hg(['add', filename]) self._run_hg(['commit', '-m', msg]) class MercurialClientTests(MercurialTestBase): """Unit tests for MercurialClient.""" TESTSERVER = 'http://127.0.0.1:8080' CLONE_HGRC = dedent(""" [paths] default = %(hg_dir)s cloned = %(clone_dir)s [reviewboard] url = %(test_server)s [diff] git = true """).rstrip() def setUp(self): super(MercurialClientTests, self).setUp() if not self.is_exe_in_path('hg'): raise SkipTest('hg not found in path') self.hg_dir = os.path.join(self.testdata_dir, 'hg-repo') self.clone_dir = self.chdir_tmp() self._run_hg(['clone', self.hg_dir, self.clone_dir]) self.client = MercurialClient(options=self.options) clone_hgrc = open(self.clone_hgrc_path, 'wb') clone_hgrc.write(self.CLONE_HGRC % { 'hg_dir': self.hg_dir, 'clone_dir': self.clone_dir, 'test_server': self.TESTSERVER, }) clone_hgrc.close() self.options.parent_branch = None def _hg_get_tip(self): return self._run_hg(['identify']).split()[0] @property def clone_hgrc_path(self): return os.path.join(self.clone_dir, '.hg', 'hgrc') def test_get_repository_info_simple(self): """Testing MercurialClient get_repository_info, simple case""" ri = self.client.get_repository_info() self.assertTrue(isinstance(ri, RepositoryInfo)) self.assertEqual('', ri.base_path) hgpath = ri.path if os.path.basename(hgpath) == '.hg': hgpath = os.path.dirname(hgpath) self.assertEqual(self.hg_dir, hgpath) self.assertTrue(ri.supports_parent_diffs) self.assertFalse(ri.supports_changesets) def test_scan_for_server_simple(self): """Testing MercurialClient scan_for_server, simple case""" os.rename(self.clone_hgrc_path, os.path.join(self.clone_dir, '._disabled_hgrc')) self.client.hgrc = {} self.client._load_hgrc() ri = self.client.get_repository_info() server = self.client.scan_for_server(ri) self.assertTrue(server is None) def test_scan_for_server_when_present_in_hgrc(self): """Testing MercurialClient scan_for_server when present in hgrc""" ri = self.client.get_repository_info() server = self.client.scan_for_server(ri) self.assertEqual(self.TESTSERVER, server) def test_scan_for_server_reviewboardrc(self): """Testing MercurialClient scan_for_server when in .reviewboardrc""" rc = open(os.path.join(self.clone_dir, '.reviewboardrc'), 'w') rc.write('REVIEWBOARD_URL = "%s"' % self.TESTSERVER) rc.close() self.client.config = load_config() ri = self.client.get_repository_info() server = self.client.scan_for_server(ri) self.assertEqual(self.TESTSERVER, server) def test_diff_simple(self): """Testing MercurialClient diff, simple case""" self._hg_add_file_commit('foo.txt', FOO1, 'delete and modify stuff') revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), '68c2bdccf52a4f0baddd0ac9f2ecb7d2') def test_diff_simple_multiple(self): """Testing MercurialClient diff with multiple commits""" self._hg_add_file_commit('foo.txt', FOO1, 'commit 1') self._hg_add_file_commit('foo.txt', FOO2, 'commit 2') self._hg_add_file_commit('foo.txt', FOO3, 'commit 3') revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), '9c8796936646be5c7349973b0fceacbd') def test_diff_exclude(self): """Testing MercurialClient diff with file exclusion""" self._hg_add_file_commit('foo.txt', FOO1, 'commit 1') self._hg_add_file_commit('exclude.txt', FOO2, 'commit 2') revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions, exclude_patterns=['exclude.txt']) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), '68c2bdccf52a4f0baddd0ac9f2ecb7d2') def test_diff_exclude_empty(self): """Testing MercurialClient diff with empty file exclusion""" self._hg_add_file_commit('foo.txt', FOO1, 'commit 1') self._hg_add_file_commit('empty.txt', '', 'commit 2') revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions, exclude_patterns=['empty.txt']) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), '68c2bdccf52a4f0baddd0ac9f2ecb7d2') def test_diff_branch_diverge(self): """Testing MercurialClient diff with diverged branch""" self._hg_add_file_commit('foo.txt', FOO1, 'commit 1') self._run_hg(['branch', 'diverged']) self._hg_add_file_commit('foo.txt', FOO2, 'commit 2') revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), '6b12723baab97f346aa938005bc4da4d') self._run_hg(['update', '-C', 'default']) revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), '68c2bdccf52a4f0baddd0ac9f2ecb7d2') def test_diff_parent_diff_simple(self): """Testing MercurialClient parent diffs with a simple case""" self._hg_add_file_commit('foo.txt', FOO1, 'commit 1') self._hg_add_file_commit('foo.txt', FOO2, 'commit 2') self._hg_add_file_commit('foo.txt', FOO3, 'commit 3') revisions = self.client.parse_revision_spec(['2', '3']) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertTrue('parent_diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), '7a897f68a9dc034fc1e42fe7a33bb808') self.assertEqual(md5(result['parent_diff']).hexdigest(), '5cacbd79800a9145f982dcc0908b6068') def test_diff_parent_diff_branch_diverge(self): """Testing MercurialClient parent diffs with a diverged branch""" # This test is very similar to test_diff_parent_diff_simple except # we throw a branch into the mix. self._hg_add_file_commit('foo.txt', FOO1, 'commit 1') self._run_hg(['branch', 'diverged']) self._hg_add_file_commit('foo.txt', FOO2, 'commit 2') self._hg_add_file_commit('foo.txt', FOO3, 'commit 3') revisions = self.client.parse_revision_spec(['2', '3']) result = self.client.diff(revisions) self.assertTrue('parent_diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), '7a897f68a9dc034fc1e42fe7a33bb808') self.assertEqual(md5(result['parent_diff']).hexdigest(), '5cacbd79800a9145f982dcc0908b6068') def test_diff_parent_diff_simple_with_arg(self): """Testing MercurialClient parent diffs with a diverged branch and --parent option""" # This test is very similar to test_diff_parent_diff_simple except # we use the --parent option to post without explicit revisions self._hg_add_file_commit('foo.txt', FOO1, 'commit 1') self._hg_add_file_commit('foo.txt', FOO2, 'commit 2') self._hg_add_file_commit('foo.txt', FOO3, 'commit 3') self.options.parent_branch = '2' revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertTrue('parent_diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), '7a897f68a9dc034fc1e42fe7a33bb808') self.assertEqual(md5(result['parent_diff']).hexdigest(), '5cacbd79800a9145f982dcc0908b6068') def test_parse_revision_spec_no_args(self): """Testing MercurialClient.parse_revision_spec with no arguments""" base = self._hg_get_tip() self._hg_add_file_commit('foo.txt', FOO1, 'commit 1') self._hg_add_file_commit('foo.txt', FOO2, 'commit 2') tip = self._hg_get_tip() revisions = self.client.parse_revision_spec([]) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' not in revisions) self.assertEqual(revisions['base'], base) self.assertEqual(revisions['tip'], tip) def test_parse_revision_spec_one_arg_periods(self): """Testing MercurialClient.parse_revision_spec with r1..r2 syntax""" base = self._hg_get_tip() self._hg_add_file_commit('foo.txt', FOO1, 'commit 1') tip = self._hg_get_tip() revisions = self.client.parse_revision_spec(['0..1']) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' not in revisions) self.assertEqual(revisions['base'], base) self.assertEqual(revisions['tip'], tip) def test_parse_revision_spec_one_arg_colons(self): """Testing MercurialClient.parse_revision_spec with r1::r2 syntax""" base = self._hg_get_tip() self._hg_add_file_commit('foo.txt', FOO1, 'commit 1') tip = self._hg_get_tip() revisions = self.client.parse_revision_spec(['0..1']) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' not in revisions) self.assertEqual(revisions['base'], base) self.assertEqual(revisions['tip'], tip) def test_parse_revision_spec_one_arg(self): """Testing MercurialClient.parse_revision_spec with one revision""" base = self._hg_get_tip() self._hg_add_file_commit('foo.txt', FOO1, 'commit 1') tip = self._hg_get_tip() self._hg_add_file_commit('foo.txt', FOO2, 'commit 2') revisions = self.client.parse_revision_spec(['1']) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' not in revisions) self.assertEqual(revisions['base'], base) self.assertEqual(revisions['tip'], tip) def test_parse_revision_spec_two_args(self): """Testing MercurialClient.parse_revision_spec with two revisions""" base = self._hg_get_tip() self._hg_add_file_commit('foo.txt', FOO1, 'commit 1') self._hg_add_file_commit('foo.txt', FOO2, 'commit 2') tip = self._hg_get_tip() revisions = self.client.parse_revision_spec(['0', '2']) self.assertTrue(isinstance(revisions, dict)) self.assertTrue('base' in revisions) self.assertTrue('tip' in revisions) self.assertTrue('parent_base' not in revisions) self.assertEqual(revisions['base'], base) self.assertEqual(revisions['tip'], tip) def test_parse_revision_spec_parent_base(self): """Testing MercurialClient.parse_revision_spec with parent base""" start_base = self._hg_get_tip() self._hg_add_file_commit('foo.txt', FOO1, 'commit 1') commit1 = self._hg_get_tip() self._hg_add_file_commit('foo.txt', FOO2, 'commit 2') commit2 = self._hg_get_tip() self._hg_add_file_commit('foo.txt', FOO3, 'commit 3') commit3 = self._hg_get_tip() self._hg_add_file_commit('foo.txt', FOO4, 'commit 4') commit4 = self._hg_get_tip() self._hg_add_file_commit('foo.txt', FOO5, 'commit 5') self.assertEqual( self.client.parse_revision_spec(['1', '2']), dict(base=commit1, tip=commit2, parent_base=start_base)) self.assertEqual( self.client.parse_revision_spec(['4']), dict(base=commit3, tip=commit4, parent_base=start_base, commit_id=commit4)) self.assertEqual( self.client.parse_revision_spec(['2', '4']), dict(base=commit2, tip=commit4, parent_base=start_base)) def test_guess_summary_description_one(self): """Testing MercurialClient guess summary & description 1 commit""" self.options.guess_summary = True self.options.guess_description = True self._hg_add_file_commit('foo.txt', FOO1, 'commit 1') revisions = self.client.parse_revision_spec([]) commit_message = self.client.get_commit_message(revisions) self.assertEqual(commit_message['summary'], 'commit 1') def test_guess_summary_description_two(self): """Testing MercurialClient guess summary & description 2 commits""" self.options.guess_summary = True self.options.guess_description = True self._hg_add_file_commit('foo.txt', FOO1, 'summary 1\n\nbody 1') self._hg_add_file_commit('foo.txt', FOO2, 'summary 2\n\nbody 2') revisions = self.client.parse_revision_spec([]) commit_message = self.client.get_commit_message(revisions) self.assertEqual(commit_message['summary'], 'summary 1') self.assertEqual(commit_message['description'], 'body 1\n\nsummary 2\n\nbody 2') def test_guess_summary_description_three(self): """Testing MercurialClient guess summary & description 3 commits""" self.options.guess_summary = True self.options.guess_description = True self._hg_add_file_commit('foo.txt', FOO1, 'commit 1\n\ndesc1') self._hg_add_file_commit('foo.txt', FOO2, 'commit 2\n\ndesc2') self._hg_add_file_commit('foo.txt', FOO3, 'commit 3\n\ndesc3') revisions = self.client.parse_revision_spec([]) commit_message = self.client.get_commit_message(revisions) self.assertEqual(commit_message['summary'], 'commit 1') self.assertEqual(commit_message['description'], 'desc1\n\ncommit 2\n\ndesc2\n\ncommit 3\n\ndesc3') def test_guess_summary_description_one_middle(self): """Testing MercurialClient guess summary & description middle commit""" self.options.guess_summary = True self.options.guess_description = True self._hg_add_file_commit('foo.txt', FOO1, 'commit 1\n\ndesc1') self._hg_add_file_commit('foo.txt', FOO2, 'commit 2\n\ndesc2') tip = self._hg_get_tip() self._hg_add_file_commit('foo.txt', FOO3, 'commit 3\n\ndesc3') revisions = self.client.parse_revision_spec([tip]) commit_message = self.client.get_commit_message(revisions) self.assertEqual(commit_message['summary'], 'commit 2') self.assertEqual(commit_message['description'], 'desc2') class MercurialSubversionClientTests(MercurialTestBase): """Unit tests for hgsubversion.""" TESTSERVER = "http://127.0.0.1:8080" def __init__(self, *args, **kwargs): self._tmpbase = '' self.clone_dir = '' self.svn_repo = '' self.svn_checkout = '' self.client = None self._svnserve_pid = 0 self._max_svnserve_pid_tries = 12 self._svnserve_port = os.environ.get('SVNSERVE_PORT') self._required_exes = ('svnadmin', 'svnserve', 'svn') MercurialTestBase.__init__(self, *args, **kwargs) def setUp(self): super(MercurialSubversionClientTests, self).setUp() self._hg_env = {'FOO': 'BAR'} # Make sure hgsubversion is enabled. # # This will modify the .hgrc in the temp home directory created # for these tests. # # The "hgsubversion =" tells Mercurial to check for hgsubversion # in the default PYTHONPATH. fp = open('%s/.hgrc' % os.environ['HOME'], 'w') fp.write('[extensions]\n') fp.write('hgsubversion =\n') fp.close() for exe in self._required_exes: if not self.is_exe_in_path(exe): raise SkipTest('missing svn stuff! giving up!') if not self._has_hgsubversion(): raise SkipTest('unable to use `hgsubversion` extension! ' 'giving up!') if not self._tmpbase: self._tmpbase = self.create_tmp_dir() self._create_svn_repo() self._fire_up_svnserve() self._fill_in_svn_repo() try: self._get_testing_clone() except (OSError, IOError): msg = 'could not clone from svn repo! skipping...' raise SkipTest(msg).with_traceback(sys.exc_info()[2]) self._spin_up_client() self._stub_in_config_and_options() def _has_hgsubversion(self): try: output = self._run_hg(['svn', '--help'], ignore_errors=True, extra_ignore_errors=(255)) except OSError: return False return not re.search("unknown command ['\"]svn['\"]", output, re.I) def tearDown(self): super(MercurialSubversionClientTests, self).tearDown() os.kill(self._svnserve_pid, 9) def _svn_add_file_commit(self, filename, data, msg, add_file=True): outfile = open(filename, 'w') outfile.write(data) outfile.close() if add_file: execute(['svn', 'add', filename], ignore_errors=True) execute(['svn', 'commit', '-m', msg]) def _create_svn_repo(self): self.svn_repo = os.path.join(self._tmpbase, 'svnrepo') execute(['svnadmin', 'create', self.svn_repo]) def _fire_up_svnserve(self): if not self._svnserve_port: self._svnserve_port = str(randint(30000, 40000)) pid_file = os.path.join(self._tmpbase, 'svnserve.pid') execute(['svnserve', '--pid-file', pid_file, '-d', '--listen-port', self._svnserve_port, '-r', self._tmpbase]) for i in range(0, self._max_svnserve_pid_tries): try: self._svnserve_pid = int(open(pid_file).read().strip()) return except (IOError, OSError): time.sleep(0.25) # This will re-raise the last exception, which will be either # IOError or OSError if the above fails and this branch is reached raise def _fill_in_svn_repo(self): self.svn_checkout = os.path.join(self._tmpbase, 'checkout.svn') execute(['svn', 'checkout', 'file://%s' % self.svn_repo, self.svn_checkout]) os.chdir(self.svn_checkout) for subtree in ('trunk', 'branches', 'tags'): execute(['svn', 'mkdir', subtree]) execute(['svn', 'commit', '-m', 'filling in T/b/t']) os.chdir(os.path.join(self.svn_checkout, 'trunk')) for i, data in enumerate([FOO, FOO1, FOO2]): self._svn_add_file_commit('foo.txt', data, 'foo commit %s' % i, add_file=(i == 0)) def _get_testing_clone(self): self.clone_dir = os.path.join(self._tmpbase, 'checkout.hg') self._run_hg([ 'clone', 'svn://127.0.0.1:%s/svnrepo' % self._svnserve_port, self.clone_dir, ]) def _spin_up_client(self): os.chdir(self.clone_dir) self.client = MercurialClient(options=self.options) def _stub_in_config_and_options(self): self.options.parent_branch = None def testGetRepositoryInfoSimple(self): """Testing MercurialClient (+svn) get_repository_info, simple case""" ri = self.client.get_repository_info() self.assertEqual('svn', self.client._type) self.assertEqual('/trunk', ri.base_path) self.assertEqual('svn://127.0.0.1:%s/svnrepo' % self._svnserve_port, ri.path) def testCalculateRepositoryInfo(self): """Testing MercurialClient (+svn) _calculate_hgsubversion_repository_info properly determines repository and base paths""" info = ( "URL: svn+ssh://testuser@svn.example.net/repo/trunk\n" "Repository Root: svn+ssh://testuser@svn.example.net/repo\n" "Repository UUID: bfddb570-5023-0410-9bc8-bc1659bf7c01\n" "Revision: 9999\n" "Node Kind: directory\n" "Last Changed Author: user\n" "Last Changed Rev: 9999\n" "Last Changed Date: 2012-09-05 18:04:28 +0000 (Wed, 05 Sep 2012)") repo_info = self.client._calculate_hgsubversion_repository_info(info) self.assertEqual(repo_info.path, "svn+ssh://svn.example.net/repo") self.assertEqual(repo_info.base_path, "/trunk") def testScanForServerSimple(self): """Testing MercurialClient (+svn) scan_for_server, simple case""" ri = self.client.get_repository_info() server = self.client.scan_for_server(ri) self.assertTrue(server is None) def testScanForServerReviewboardrc(self): """Testing MercurialClient (+svn) scan_for_server in .reviewboardrc""" rc_filename = os.path.join(self.clone_dir, '.reviewboardrc') rc = open(rc_filename, 'w') rc.write('REVIEWBOARD_URL = "%s"' % self.TESTSERVER) rc.close() self.client.config = load_config() ri = self.client.get_repository_info() server = self.client.scan_for_server(ri) self.assertEqual(self.TESTSERVER, server) def testScanForServerProperty(self): """Testing MercurialClient (+svn) scan_for_server in svn property""" os.chdir(self.svn_checkout) execute(['svn', 'update']) execute(['svn', 'propset', 'reviewboard:url', self.TESTSERVER, self.svn_checkout]) execute(['svn', 'commit', '-m', 'adding reviewboard:url property']) os.chdir(self.clone_dir) self._run_hg(['pull']) self._run_hg(['update', '-C']) ri = self.client.get_repository_info() self.assertEqual(self.TESTSERVER, self.client.scan_for_server(ri)) def testDiffSimple(self): """Testing MercurialClient (+svn) diff, simple case""" self.client.get_repository_info() self._hg_add_file_commit('foo.txt', FOO4, 'edit 4') revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), '2eb0a5f2149232c43a1745d90949fcd5') self.assertEqual(result['parent_diff'], None) def testDiffSimpleMultiple(self): """Testing MercurialClient (+svn) diff with multiple commits""" self.client.get_repository_info() self._hg_add_file_commit('foo.txt', FOO4, 'edit 4') self._hg_add_file_commit('foo.txt', FOO5, 'edit 5') self._hg_add_file_commit('foo.txt', FOO6, 'edit 6') revisions = self.client.parse_revision_spec([]) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), '3d007394de3831d61e477cbcfe60ece8') self.assertEqual(result['parent_diff'], None) def testDiffOfRevision(self): """Testing MercurialClient (+svn) diff specifying a revision""" self.client.get_repository_info() self._hg_add_file_commit('foo.txt', FOO4, 'edit 4', branch='b') self._hg_add_file_commit('foo.txt', FOO5, 'edit 5', branch='b') self._hg_add_file_commit('foo.txt', FOO6, 'edit 6', branch='b') self._hg_add_file_commit('foo.txt', FOO4, 'edit 7', branch='b') revisions = self.client.parse_revision_spec(['3']) result = self.client.diff(revisions) self.assertTrue(isinstance(result, dict)) self.assertTrue('diff' in result) self.assertEqual(md5(result['diff']).hexdigest(), '2eb0a5f2149232c43a1745d90949fcd5') self.assertEqual(result['parent_diff'], None) RBTools-0.7.11/rbtools/clients/cvs.py0000644000232200023220000001266413230242633017773 0ustar debalancedebalanceimport logging import os import re import socket from rbtools.clients import SCMClient, RepositoryInfo from rbtools.clients.errors import (InvalidRevisionSpecError, TooManyRevisionsError) from rbtools.utils.checks import check_install from rbtools.utils.diffs import filter_diff, normalize_patterns from rbtools.utils.process import execute class CVSClient(SCMClient): """ A wrapper around the cvs tool that fetches repository information and generates compatible diffs. """ name = 'CVS' supports_diff_exclude_patterns = True supports_patch_revert = True INDEX_FILE_RE = re.compile(b'^Index: (.+)\n$') REVISION_WORKING_COPY = '--rbtools-working-copy' def __init__(self, **kwargs): super(CVSClient, self).__init__(**kwargs) def get_repository_info(self): if not check_install(['cvs']): logging.debug('Unable to execute "cvs": skipping CVS') return None cvsroot_path = os.path.join('CVS', 'Root') if not os.path.exists(cvsroot_path): return None with open(cvsroot_path, 'r') as fp: repository_path = fp.read().strip() i = repository_path.find('@') if i != -1: repository_path = repository_path[i + 1:] i = repository_path.rfind(':') if i != -1: host = repository_path[:i] try: canon = socket.getfqdn(host) repository_path = repository_path.replace('%s:' % host, '%s:' % canon) except socket.error as msg: logging.error('failed to get fqdn for %s, msg=%s', host, msg) return RepositoryInfo(path=repository_path) def parse_revision_spec(self, revisions=[]): """Parses the given revision spec. The 'revisions' argument is a list of revisions as specified by the user. Items in the list do not necessarily represent a single revision, since the user can use SCM-native syntaxes such as "r1..r2" or "r1:r2". SCMTool-specific overrides of this method are expected to deal with such syntaxes. This will return a dictionary with the following keys: 'base': A revision to use as the base of the resulting diff. 'tip': A revision to use as the tip of the resulting diff. These will be used to generate the diffs to upload to Review Board (or print). The diff for review will include the changes in (base, tip]. If a single revision is passed in, this will raise an exception, because CVS doesn't have a repository-wide concept of "revision", so selecting an individual "revision" doesn't make sense. With two revisions, this will treat those revisions as tags and do a diff between those tags. If zero revisions are passed in, this will return revisions relevant for the "current change". The exact definition of what "current" means is specific to each SCMTool backend, and documented in the implementation classes. The CVS SCMClient never fills in the 'parent_base' key. Users who are using other patch-stack tools who want to use parent diffs with CVS will have to generate their diffs by hand. Because `cvs diff` uses multiple arguments to define multiple tags, there's no single-argument/multiple-revision syntax available. """ n_revs = len(revisions) if n_revs == 0: return { 'base': 'BASE', 'tip': self.REVISION_WORKING_COPY, } elif n_revs == 1: raise InvalidRevisionSpecError( 'CVS does not support passing in a single revision.') elif n_revs == 2: return { 'base': revisions[0], 'tip': revisions[1], } else: raise TooManyRevisionsError return { 'base': None, 'tip': None, } def diff(self, revisions, include_files=[], exclude_patterns=[], extra_args=[]): """Get the diff for the given revisions. If revision_spec is empty, this will return the diff for the modified files in the working directory. If it's not empty and contains two revisions, this will do a diff between those revisions. """ # CVS paths are always relative to the current working directory. cwd = os.getcwd() exclude_patterns = normalize_patterns(exclude_patterns, cwd, cwd) include_files = include_files or [] # Diff returns "1" if differences were found. diff_cmd = ['cvs', 'diff', '-uN'] base = revisions['base'] tip = revisions['tip'] if (not (base == 'BASE' and tip == self.REVISION_WORKING_COPY)): diff_cmd.extend(['-r', base, '-r', tip]) diff = execute(diff_cmd + include_files, extra_ignore_errors=(1,), log_output_on_error=False, split_lines=True) if exclude_patterns: # CVS diffs are relative to the current working directory, so the # base_dir parameter to filter_diff is unnecessary. diff = filter_diff(diff, self.INDEX_FILE_RE, exclude_patterns, base_dir=cwd) return { 'diff': b''.join(diff) } RBTools-0.7.11/rbtools/clients/__init__.py0000644000232200023220000004271713230242633020741 0ustar debalancedebalancefrom __future__ import print_function, unicode_literals import logging import pkg_resources import re import six import sys from rbtools.clients.errors import SCMError from rbtools.utils.process import execute # The clients are lazy loaded via load_scmclients() SCMCLIENTS = None class PatchResult(object): """The result of a patch operation. This stores state on whether the patch could be applied (fully or partially), whether there are conflicts that can be resolved (as in conflict markers, not reject files), which files conflicted, and the patch output. """ def __init__(self, applied, has_conflicts=False, conflicting_files=[], patch_output=None): self.applied = applied self.has_conflicts = has_conflicts self.conflicting_files = conflicting_files self.patch_output = patch_output class SCMClient(object): """A base representation of an SCM tool. These are used for fetching repository information and generating diffs. """ name = None supports_diff_extra_args = False supports_diff_exclude_patterns = False supports_patch_revert = False can_amend_commit = False can_merge = False can_push_upstream = False can_delete_branch = False def __init__(self, config=None, options=None): self.config = config or {} self.options = options self.capabilities = None def get_repository_info(self): return None def check_options(self): pass def get_changenum(self, revisions): """Return the change number for the given revisions. This is only used when the client is supposed to send a change number to the server (such as with Perforce). Args: revisions (dict): A revisions dictionary as returned by ``parse_revision_spec``. Returns: unicode: The change number to send to the Review Board server. """ return None def scan_for_server(self, repository_info): """Find the server path. This will search for the server name in the .reviewboardrc config files. These are loaded with the current directory first, and searching through each parent directory, and finally $HOME/.reviewboardrc last. """ return self._get_server_from_config(self.config, repository_info) def parse_revision_spec(self, revisions=[]): """Parses the given revision spec. The 'revisions' argument is a list of revisions as specified by the user. Items in the list do not necessarily represent a single revision, since the user can use SCM-native syntaxes such as "r1..r2" or "r1:r2". SCMTool-specific overrides of this method are expected to deal with such syntaxes. This will return a dictionary with the following keys: 'base': A revision to use as the base of the resulting diff. 'tip': A revision to use as the tip of the resulting diff. 'parent_base': (optional) The revision to use as the base of a parent diff. These will be used to generate the diffs to upload to Review Board (or print). The diff for review will include the changes in (base, tip], and the parent diff (if necessary) will include (parent, base]. If a single revision is passed in, this will return the parent of that revision for 'base' and the passed-in revision for 'tip'. If zero revisions are passed in, this will return revisions relevant for the "current change". The exact definition of what "current" means is specific to each SCMTool backend, and documented in the implementation classes. """ return { 'base': None, 'tip': None, } def diff(self, revisions, include_files=[], exclude_patterns=[], extra_args=[]): """ Returns the generated diff and optional parent diff for this repository. The return value must be a dictionary, and must have, at a minimum, a 'diff' field. A 'parent_diff' can also be provided. It may also return 'base_commit_id', representing the revision/ID of the commit that the diff or parent diff is based on. This exists because in some diff formats, this may different from what's provided in the diff. """ return { 'diff': None, 'parent_diff': None, 'base_commit_id': None, } def _get_server_from_config(self, config, repository_info): if 'REVIEWBOARD_URL' in config: return config['REVIEWBOARD_URL'] elif 'TREES' in config: trees = config['TREES'] if not isinstance(trees, dict): raise ValueError('"TREES" in config file is not a dict!') # If repository_info is a list, check if any one entry is in trees. path = None if isinstance(repository_info.path, list): for path in repository_info.path: if path in trees: break else: path = None elif repository_info.path in trees: path = repository_info.path if path and 'REVIEWBOARD_URL' in trees[path]: return trees[path]['REVIEWBOARD_URL'] return None def _get_p_number(self, base_path, base_dir): """Return the appropriate value for the -p argument to patch. This function returns an integer. If the integer is -1, then the -p option should not be provided to patch. Otherwise, the return value is the argument to patch -p. """ if base_path and base_dir.startswith(base_path): return base_path.count('/') + 1 else: return -1 def _strip_p_num_slashes(self, files, p_num): """Strips the smallest prefix containing p_num slashes from file names. To match the behavior of the patch -pX option, adjacent slashes are counted as a single slash. """ if p_num > 0: regex = re.compile(r'[^/]*/+') return [regex.sub('', f, p_num) for f in files] else: return files def _execute(self, cmd, *args, **kwargs): """ Prints the results of the executed command and returns the data result from execute. """ return execute(cmd, ignore_errors=True, *args, **kwargs) def has_pending_changes(self): """Checks if there are changes waiting to be committed. Derived classes should override this method if they wish to support checking for pending changes. """ raise NotImplementedError def apply_patch(self, patch_file, base_path, base_dir, p=None, revert=False): """Apply the patch and return a PatchResult indicating its success.""" # Figure out the -p argument for patch. We override the calculated # value if it is supplied via a commandline option. p_num = p or self._get_p_number(base_path, base_dir) cmd = ['patch'] if revert: cmd.append('-R') if p_num >= 0: cmd.append('-p%d' % p_num) cmd.extend(['-i', six.text_type(patch_file)]) # Ignore return code 2 in case the patch file consists of only empty # files, which 'patch' can't handle. Other 'patch' errors also give # return code 2, so we must check the command output. rc, patch_output = execute(cmd, extra_ignore_errors=(2,), return_error_code=True) only_garbage_in_patch = ('patch: **** Only garbage was found in the ' 'patch input.\n') if (patch_output and patch_output.startswith('patch: **** ') and patch_output != only_garbage_in_patch): raise SCMError('Failed to execute command: %s\n%s' % (cmd, patch_output)) # Check the patch for any added/deleted empty files to handle. if self.supports_empty_files(): try: with open(patch_file, 'rb') as f: patch = f.read() except IOError as e: logging.error('Unable to read file %s: %s', patch_file, e) return patched_empty_files = self.apply_patch_for_empty_files( patch, p_num, revert=revert) # If there are no empty files in a "garbage-only" patch, the patch # is probably malformed. if (patch_output == only_garbage_in_patch and not patched_empty_files): raise SCMError('Failed to execute command: %s\n%s' % (cmd, patch_output)) # TODO: Should this take into account apply_patch_for_empty_files ? # The return value of that function is False both when it fails # and when there are no empty files. return PatchResult(applied=(rc == 0), patch_output=patch_output) def create_commit(self, message, author, run_editor, files=[], all_files=False): """Creates a commit based on the provided message and author. Derived classes should override this method if they wish to support committing changes to their repositories. """ raise NotImplementedError def get_commit_message(self, revisions): """Returns the commit message from the commits in the given revisions. This pulls out the first line from the commit messages of the given revisions. That is then used as the summary. """ commit_message = self.get_raw_commit_message(revisions) lines = commit_message.splitlines() if not lines: return None result = { 'summary': lines[0], } # Try to pull the body of the commit out of the full commit # description, so that we can skip the summary. if len(lines) >= 3 and lines[0] and not lines[1]: result['description'] = '\n'.join(lines[2:]).strip() else: result['description'] = commit_message return result def delete_branch(self, branch_name, merged_only=True): """Deletes the specified branch. If merged_only is False, then the branch will be deleted even if not yet merged into an upstream branch. """ raise NotImplementedError def merge(self, target, destination, message, author, squash=False, run_editor=False): """Merges the target branch with destination branch.""" raise NotImplementedError def push_upstream(self, remote_branch): """Pushes the current branch to upstream.""" raise NotImplementedError def get_raw_commit_message(self, revisions): """Extracts the commit messages on the commits in the given revisions. Derived classes should override this method in order to allow callers to fetch commit messages. This is needed for description guessing. If a derived class is unable to fetch the description, ``None`` should be returned. Callers that need to differentiate the summary from the description should instead use get_commit_message(). """ raise NotImplementedError def get_current_branch(self): """Returns the repository branch name of the current directory. Derived classes should override this method if they are able to determine the current branch of the working directory. If a derived class is unable to unable to determine the branch, ``None`` should be returned. """ raise NotImplementedError def supports_empty_files(self): """Check if the RB server supports added/deleted empty files. This method returns False. To change this behaviour, override it in a subclass. """ return False def apply_patch_for_empty_files(self, patch, p_num, revert=False): """Return True if any empty files in the patch are applied. If there are no empty files in the patch or if an error occurs while applying the patch, we return False. """ raise NotImplementedError def amend_commit_description(self, message, revisions=None): """Update a commit message to the given string. The optional revisions argument exists to provide compatibility with SCMs that allow modification of multiple changesets at any given time. It takes a parsed revision spec, and will amend the change referenced by the tip revision therein. """ raise NotImplementedError class RepositoryInfo(object): """ A representation of a source code repository. """ def __init__(self, path=None, base_path=None, supports_changesets=False, supports_parent_diffs=False): self.path = path self.base_path = base_path self.supports_changesets = supports_changesets self.supports_parent_diffs = supports_parent_diffs logging.debug('repository info: %s' % self) def __str__(self): return 'Path: %s, Base path: %s, Supports changesets: %s' % \ (self.path, self.base_path, self.supports_changesets) def set_base_path(self, base_path): if not base_path.startswith('/'): base_path = '/' + base_path logging.debug('changing repository info base_path from %s to %s', (self.base_path, base_path)) self.base_path = base_path def find_server_repository_info(self, server): """ Try to find the repository from the list of repositories on the server. For Subversion, this could be a repository with a different URL. For all other clients, this is a noop. """ return self def load_scmclients(config, options): global SCMCLIENTS SCMCLIENTS = {} for ep in pkg_resources.iter_entry_points(group='rbtools_scm_clients'): try: client = ep.load()(config=config, options=options) client.entrypoint_name = ep.name SCMCLIENTS[ep.name] = client except Exception: logging.exception('Could not load SCM Client "%s"', ep.name) def scan_usable_client(config, options, client_name=None): from rbtools.clients.perforce import PerforceClient repository_info = None tool = None # TODO: We should only load all of the scm clients if the # client_name isn't provided. if SCMCLIENTS is None: load_scmclients(config, options) if client_name: if client_name not in SCMCLIENTS: logging.error('The provided repository type "%s" is invalid.' % client_name) sys.exit(1) else: scmclients = { client_name: SCMCLIENTS[client_name] } else: scmclients = SCMCLIENTS for name, tool in six.iteritems(scmclients): logging.debug('Checking for a %s repository...' % tool.name) repository_info = tool.get_repository_info() if repository_info: break if not repository_info: if client_name: logging.error('The provided repository type was not detected ' 'in the current directory.') elif getattr(options, 'repository_url', None): logging.error('No supported repository could be accessed at ' 'the supplied url.') else: logging.error('The current directory does not contain a checkout ' 'from a supported source code repository.') sys.exit(1) # Verify that options specific to an SCM Client have not been mis-used. if (getattr(options, 'change_only', False) and not repository_info.supports_changesets): sys.stderr.write('The --change-only option is not valid for the ' 'current SCM client.\n') sys.exit(1) if (getattr(options, 'parent_branch', None) and not repository_info.supports_parent_diffs): sys.stderr.write('The --parent option is not valid for the ' 'current SCM client.\n') sys.exit(1) if (not isinstance(tool, PerforceClient) and (getattr(options, 'p4_client', None) or getattr(options, 'p4_port', None))): sys.stderr.write('The --p4-client and --p4-port options are not valid ' 'for the current SCM client.\n') sys.exit(1) return (repository_info, tool) def print_clients(config, options): """Print the supported detected SCM clients. Each SCM client, including those provided by third party packages, will be printed. Additionally, SCM clients which are detected in the current directory will be highlighted. """ print('The following repository types are supported by this installation') print('of RBTools. Each "" may be used as a value for the') print('"--repository-type=" command line argument. Repository types') print('which are detected in the current directory are marked with a "*"') print('[*] "": ') if SCMCLIENTS is None: load_scmclients(config, options) for name, tool in six.iteritems(SCMCLIENTS): repository_info = tool.get_repository_info() if repository_info: print(' * "%s": %s' % (name, tool.name)) else: print(' "%s": %s' % (name, tool.name)) RBTools-0.7.11/rbtools/clients/clearcase.py0000644000232200023220000011631413230242633021117 0ustar debalancedebalanceimport datetime import itertools import logging import os import six import sys import threading from collections import deque from pkg_resources import parse_version from rbtools.api.errors import APIError from rbtools.clients import SCMClient, RepositoryInfo from rbtools.clients.errors import InvalidRevisionSpecError, SCMError from rbtools.utils.checks import check_gnu_diff, check_install from rbtools.utils.filesystem import make_tempfile from rbtools.utils.process import execute # This specific import is necessary to handle the paths for # cygwin enabled machines. if (sys.platform.startswith('win') or sys.platform.startswith('cygwin')): import ntpath as cpath else: import posixpath as cpath class get_elements_from_label_thread(threading.Thread): def __init__(self, threadID, dir_name, label, elements): self.threadID = threadID self.dir_name = dir_name self.elements = elements # Remove any trailing vobstag not supported by cleartool find. try: label, vobstag = label.rsplit('@', 1) except: pass self.label = label if sys.platform.startswith('win'): self.cc_xpn = '%CLEARCASE_XPN%' else: self.cc_xpn = '$CLEARCASE_XPN' threading.Thread.__init__(self) def run(self): """Returns a dictionnary of ClearCase elements (oid + version) belonging to a label and identified by path. """ output = execute( ['cleartool', 'find', self.dir_name, '-version', 'lbtype(%s)' % self.label, '-exec', r'cleartool describe -fmt "%On\t%En\t%Vn\n" ' + self.cc_xpn], extra_ignore_errors=(1,), with_errors=False) for line in output.split('\n'): # Does not process empty lines. if not line: continue oid, path, version = line.split('\t', 2) self.elements[path] = { 'oid': oid, 'version': version, } class ClearCaseClient(SCMClient): """ A wrapper around the clearcase tool that fetches repository information and generates compatible diffs. This client assumes that cygwin is installed on windows. """ name = 'ClearCase' viewtype = None supports_patch_revert = True REVISION_ACTIVITY_BASE = '--rbtools-activity-base' REVISION_ACTIVITY_PREFIX = 'activity:' REVISION_BRANCH_BASE = '--rbtools-branch-base' REVISION_BRANCH_PREFIX = 'brtype:' REVISION_CHECKEDOUT_BASE = '--rbtools-checkedout-base' REVISION_CHECKEDOUT_CHANGESET = '--rbtools-checkedout-changeset' REVISION_FILES = '--rbtools-files' REVISION_LABEL_BASE = '--rbtools-label-base' REVISION_LABEL_PREFIX = 'lbtype:' def __init__(self, **kwargs): super(ClearCaseClient, self).__init__(**kwargs) def get_repository_info(self): """Returns information on the Clear Case repository. This will first check if the cleartool command is installed and in the path, and that the current working directory is inside of the view. """ if not check_install(['cleartool', 'help']): logging.debug('Unable to execute "cleartool help": skipping ' 'ClearCase') return None viewname = execute(["cleartool", "pwv", "-short"]).strip() if viewname.startswith('** NONE'): return None # Now that we know it's ClearCase, make sure we have GNU diff # installed, and error out if we don't. check_gnu_diff() property_lines = execute( ["cleartool", "lsview", "-full", "-properties", "-cview"], split_lines=True) for line in property_lines: properties = line.split(' ') if properties[0] == 'Properties:': # Determine the view type and check if it's supported. # # Specifically check if webview was listed in properties # because webview types also list the 'snapshot' # entry in properties. if 'webview' in properties: raise SCMError('Webviews are not supported. You can use ' 'rbt commands only in dynamic or snapshot ' 'views.') if 'dynamic' in properties: self.viewtype = 'dynamic' else: self.viewtype = 'snapshot' break # Find current VOB's tag vobstag = execute(["cleartool", "describe", "-short", "vob:."], ignore_errors=True).strip() if "Error: " in vobstag: raise SCMError("Failed to generate diff run rbt inside vob.") root_path = execute(["cleartool", "pwv", "-root"], ignore_errors=True).strip() if "Error: " in root_path: raise SCMError("Failed to generate diff run rbt inside view.") # From current working directory cut path to VOB. On Windows # and under cygwin, the VOB tag contains the VOB's path including # name, e.g. `\new_proj` for a VOB `new_proj` mounted at the root # of a drive. On Unix, the VOB tag is similar, but with a different # path separator, e.g. `/vobs/new_proj` for our new_proj VOB mounted # at `/vobs`. cwd = os.getcwd() base_path = cwd[:len(root_path) + len(vobstag)] return ClearCaseRepositoryInfo(path=base_path, base_path=base_path, vobstag=vobstag, supports_parent_diffs=False) def _determine_branch_path(self, version_path): """Determine branch path of revision. """ branch_path, number = cpath.split(version_path) return branch_path def _list_checkedout(self, path): """List all checked out elements in current view below path. Run cleartool command twice because: -recurse finds checked out elements under path except path whereas -directory detect only if path directory is checked out. """ checkedout_elements = [] for option in ['-recurse', '-directory']: # We ignore return code 1 in order to omit files that ClearCase # cannot read. output = execute(['cleartool', 'lscheckout', option, '-cview', '-fmt', r'%En@@%Vn\n', path], split_lines=True, extra_ignore_errors=(1,), with_errors=False) if output: checkedout_elements.extend(output) logging.debug(output) return checkedout_elements def _is_a_label(self, label, vobstag=None): """Return True when label is a valid ClearCase lbtype. Raise an error when vobstag expected does not match. """ label_vobstag = None # Try to find any vobstag. try: label, label_vobstag = label.rsplit('@', 1) except: pass # Be sure label is prefix by lbtype, required by cleartool describe. if not label.startswith(self.REVISION_LABEL_PREFIX): label = '%s%s' % (self.REVISION_LABEL_PREFIX, label) # If vobstag defined, check if it matchs with the one extracted from # label, otherwise raise an exception. if vobstag and label_vobstag and label_vobstag != vobstag: raise Exception('label vobstag %s does not match expected vobstag ' '%s' % (label_vobstag, vobstag)) # Finally check if label exists in database, otherwise quit. Ignore # return code 1, it means label does not exist. output = execute(['cleartool', 'describe', '-short', label], extra_ignore_errors=(1,), with_errors=False) return bool(output) def _get_tmp_label(self): """Generate a string that will be used to set a ClearCase label.""" now = datetime.datetime.now() temporary_label = 'Current_%d_%d_%d_%d_%d_%d_%d' % ( now.year, now.month, now.day, now.hour, now.minute, now.second, now.microsecond) return temporary_label def _set_label(self, label, path): """Set a ClearCase label on elements seen under path.""" checkedout_elements = self._list_checkedout(path) if checkedout_elements: raise Exception( 'ClearCase backend cannot set label when some elements are ' 'checked out:\n%s' % ''.join(checkedout_elements)) # First create label in vob database. execute(['cleartool', 'mklbtype', '-c', 'label created for rbtools', label], with_errors=True) # We ignore return code 1 in order to omit files that ClearCase cannot # read. recursive_option = '' if cpath.isdir(path): recursive_option = '-recurse' # Apply label to path. execute(['cleartool', 'mklabel', '-nc', recursive_option, label, path], extra_ignore_errors=(1,), with_errors=False) def _remove_label(self, label): """Remove a ClearCase label from vob database. It will remove all references of this label on elements. """ # Be sure label is prefix by lbtype. if not label.startswith(self.REVISION_LABEL_PREFIX): label = '%s%s' % (self.REVISION_LABEL_PREFIX, label) # Label exists so remove it. execute(['cleartool', 'rmtype', '-rmall', '-force', label], with_errors=True) def _determine_version(self, version_path): """Determine numeric version of revision. CHECKEDOUT is marked as infinity to be treated always as highest possible version of file. CHECKEDOUT, in ClearCase, is something like HEAD. """ branch, number = cpath.split(version_path) if number == 'CHECKEDOUT': return float('inf') return int(number) def _construct_extended_path(self, path, version): """Combine extended_path from path and version. CHECKEDOUT must be removed becasue this one version doesn't exists in MVFS (ClearCase dynamic view file system). Only way to get content of checked out file is to use filename only.""" if not version or version.endswith('CHECKEDOUT'): return path return "%s@@%s" % (path, version) def _construct_revision(self, branch_path, version_number): """Combine revision from branch_path and version_number.""" return cpath.join(branch_path, version_number) def parse_revision_spec(self, revisions): """Parses the given revision spec. The 'revisions' argument is a list of revisions as specified by the user. Items in the list do not necessarily represent a single revision, since the user can use SCM-native syntaxes such as "r1..r2" or "r1:r2". SCMTool-specific overrides of this method are expected to deal with such syntaxes. This will return a dictionary with the following keys: 'base': A revision to use as the base of the resulting diff. 'tip': A revision to use as the tip of the resulting diff. These will be used to generate the diffs to upload to Review Board (or print). There are many different ways to generate diffs for clearcase, because there are so many different workflows. This method serves more as a way to validate the passed-in arguments than actually parsing them in the way that other clients do. """ n_revs = len(revisions) if n_revs == 0: return { 'base': self.REVISION_CHECKEDOUT_BASE, 'tip': self.REVISION_CHECKEDOUT_CHANGESET, } elif n_revs == 1: if revisions[0].startswith(self.REVISION_ACTIVITY_PREFIX): return { 'base': self.REVISION_ACTIVITY_BASE, 'tip': revisions[0][len(self.REVISION_ACTIVITY_PREFIX):], } if revisions[0].startswith(self.REVISION_BRANCH_PREFIX): return { 'base': self.REVISION_BRANCH_BASE, 'tip': revisions[0][len(self.REVISION_BRANCH_PREFIX):], } if revisions[0].startswith(self.REVISION_LABEL_PREFIX): return { 'base': self.REVISION_LABEL_BASE, 'tip': [revisions[0][len(self.REVISION_BRANCH_PREFIX):]], } # TODO: # stream:streamname[@pvob] => review changes in this UCM stream # (UCM "branch") # baseline:baseline[@pvob] => review changes between this baseline # and the working directory elif n_revs == 2: if self.viewtype != 'dynamic': raise SCMError('To generate a diff using multiple revisions, ' 'you must use a dynamic view.') if (revisions[0].startswith(self.REVISION_LABEL_PREFIX) and revisions[1].startswith(self.REVISION_LABEL_PREFIX)): return { 'base': self.REVISION_LABEL_BASE, 'tip': [x[len(self.REVISION_BRANCH_PREFIX):] for x in revisions], } # TODO: # baseline:baseline1[@pvob] baseline:baseline2[@pvob] # => review changes between these two # baselines pass pairs = [] for r in revisions: p = r.split(':') if len(p) != 2: raise InvalidRevisionSpecError( '"%s" is not a valid file@revision pair' % r) pairs.append(p) return { 'base': self.REVISION_FILES, 'tip': pairs, } def _sanitize_activity_changeset(self, changeset): """Return changeset containing non-binary, branched file versions. A UCM activity changeset contains all file revisions created/touched during this activity. File revisions are ordered earlier versions first in the format: changelist = [ @@/, ..., @@/ ] is relative path to file is clearcase specific branch path to file revision is the version number of the file in . A UCM activity changeset can contain changes from different vobs, however reviewboard supports only changes from a single repo at the same time, so changes made outside of the current vobstag will be ignored. """ changelist = {} # Maybe we should be able to access repository_info without calling # cleartool again. repository_info = self.get_repository_info() for change in changeset: path, current = change.split('@@') # If a file isn't in the correct vob, then ignore it. if path.find("%s/" % (repository_info.vobstag,)) == -1: logging.debug("Vobstag does not match, so ignore changes on %s" % path) continue version_number = self._determine_version(current) if path not in changelist: changelist[path] = { 'highest': version_number, 'lowest': version_number, 'current': current, } if version_number == 0: raise SCMError('Unexepected version_number=0 in activity ' 'changeset') elif version_number > changelist[path]['highest']: changelist[path]['highest'] = version_number changelist[path]['current'] = current elif version_number < changelist[path]['lowest']: changelist[path]['lowest'] = version_number # Convert to list changeranges = [] for path, version in six.iteritems(changelist): # Previous version is predecessor of lowest ie its version number # decreased by 1. branch_path = self._determine_branch_path(version['current']) prev_version_number = str(int(version['lowest']) - 1) version['previous'] = self._construct_revision(branch_path, prev_version_number) changeranges.append( (self._construct_extended_path(path, version['previous']), self._construct_extended_path(path, version['current'])) ) return changeranges def _sanitize_branch_changeset(self, changeset): """Return changeset containing non-binary, branched file versions. Changeset contain only first and last version of file made on branch. """ changelist = {} for path, previous, current in changeset: version_number = self._determine_version(current) if path not in changelist: changelist[path] = { 'highest': version_number, 'current': current, 'previous': previous } if version_number == 0: # Previous version of 0 version on branch is base changelist[path]['previous'] = previous elif version_number > changelist[path]['highest']: changelist[path]['highest'] = version_number changelist[path]['current'] = current # Convert to list changeranges = [] for path, version in six.iteritems(changelist): changeranges.append( (self._construct_extended_path(path, version['previous']), self._construct_extended_path(path, version['current'])) ) return changeranges def _sanitize_checkedout_changeset(self, changeset): """Return changeset containing non-binary, checkdout file versions.""" changeranges = [] for path, previous, current in changeset: changeranges.append( (self._construct_extended_path(path, previous), self._construct_extended_path(path, current)) ) return changeranges def _sanitize_version_0_file(self, file_revision): """Replace file version with Predecessor version when version is 0 except for /main/0.""" # There is no predecessor for @@/main/0, so keep current revision. if file_revision.endswith("@@/main/0"): return file_revision if file_revision.endswith("/0"): logging.debug("Found file %s with version 0", file_revision) file_revision = execute(["cleartool", "describe", "-fmt", "%En@@%PSn", file_revision]) logging.debug("Sanitized with predecessor, new file: %s", file_revision) return file_revision def _sanitize_version_0_changeset(self, changeset): """Return changeset sanitized of its /0 version. Indeed this predecessor (equal to /0) should already be available from previous vob synchro in multi-site context. """ sanitized_changeset = [] for old_file, new_file in changeset: # This should not happen for new file but it is safer to sanitize # both file revisions. sanitized_changeset.append( (self._sanitize_version_0_file(old_file), self._sanitize_version_0_file(new_file))) return sanitized_changeset def _directory_content(self, path): """Return directory content ready for saving to tempfile.""" # Get the absolute path of each element located in path, but only # clearcase elements => -vob_only output = execute(["cleartool", "ls", "-short", "-nxname", "-vob_only", path]) lines = output.splitlines(True) content = [] # The previous command returns absolute file paths but only file names # are required. for absolute_path in lines: short_path = os.path.basename(absolute_path.strip()) content.append(short_path) return ''.join([ '%s\n' % s for s in sorted(content)]) def _construct_changeset(self, output): return [ info.split('\t') for info in output.strip().split('\n') ] def _get_checkedout_changeset(self): """Return information about the checked out changeset. This function returns: kind of element, path to file, previews and current file version. """ changeset = [] # We ignore return code 1 in order to omit files thatClear Case can't # read. output = execute(['cleartool', 'lscheckout', '-all', '-cview', '-me', '-fmt', r'%En\t%PVn\t%Vn\n'], extra_ignore_errors=(1,), with_errors=False) if output: changeset = self._construct_changeset(output) return self._sanitize_checkedout_changeset(changeset) def _get_activity_changeset(self, activity): """Returns information about the versions changed on a branch. This takes into account the changes attached to this activity (including rebase changes) in all vobs of the current view. """ changeset = [] # Get list of revisions and get the diff of each one. Return code 1 is # ignored in order to omit files that ClearCase can't read. output = execute(['cleartool', 'lsactivity', '-fmt', '%[versions]p', activity], extra_ignore_errors=(1,), with_errors=False) if output: # UCM activity changeset is split by spaces not but EOL, so we # cannot reuse self._construct_changeset here. changeset = output.split() return self._sanitize_activity_changeset(changeset) def _get_branch_changeset(self, branch): """Returns information about the versions changed on a branch. This takes into account the changes on the branch owned by the current user in all vobs of the current view. """ changeset = [] # We ignore return code 1 in order to omit files that Clear Case can't # read. if sys.platform.startswith('win'): CLEARCASE_XPN = '%CLEARCASE_XPN%' else: CLEARCASE_XPN = '$CLEARCASE_XPN' output = execute( [ "cleartool", "find", "-all", "-version", "brtype(%s)" % branch, "-exec", 'cleartool descr -fmt "%%En\t%%PVn\t%%Vn\n" %s' % CLEARCASE_XPN ], extra_ignore_errors=(1,), with_errors=False) if output: changeset = self._construct_changeset(output) return self._sanitize_branch_changeset(changeset) def _get_label_changeset(self, labels): """Returns information about the versions changed between labels. This takes into account the changes done between labels and restrict analysis to current working directory. A ClearCase label belongs to a uniq vob. """ changeset = [] tmp_labels = [] # Initialize comparison_path to current working directory. # TODO: support another argument to manage a different comparison path. comparison_path = os.getcwd() error_message = None try: # Unless user has provided 2 labels, set a temporary label on # current version seen of comparison_path directory. It will be # used to process changeset. # Indeed ClearCase can identify easily each file and associated # version belonging to a label. if len(labels) == 1: tmp_lb = self._get_tmp_label() tmp_labels.append(tmp_lb) self._set_label(tmp_lb, comparison_path) labels.append(tmp_lb) label_count = len(labels) if label_count != 2: raise Exception( 'ClearCase label comparison does not support %d labels' % label_count) # Now we get 2 labels for comparison, check if they are both valid. repository_info = self.get_repository_info() for label in labels: if not self._is_a_label(label, repository_info.vobstag): raise Exception( 'ClearCase label %s is not a valid label' % label) previous_label, current_label = labels logging.debug('Comparison between labels %s and %s on %s' % (previous_label, current_label, comparison_path)) # List ClearCase element path and version belonging to previous and # current labels, element path is the key of each dict. previous_elements = {} current_elements = {} previous_label_elements_thread = get_elements_from_label_thread( 1, comparison_path, previous_label, previous_elements) previous_label_elements_thread.start() current_label_elements_thread = get_elements_from_label_thread( 2, comparison_path, current_label, current_elements) current_label_elements_thread.start() previous_label_elements_thread.join() current_label_elements_thread.join() seen = [] changelist = {} # Iterate on each ClearCase path in order to find respective # previous and current version. for path in itertools.chain(previous_elements.keys(), current_elements.keys()): if path in seen: continue seen.append(path) # Initialize previous and current version to "/main/0" changelist[path] = { 'previous': '/main/0', 'current': '/main/0', } if path in current_elements: changelist[path]['current'] = \ current_elements[path]['version'] if path in previous_elements: changelist[path]['previous'] = \ previous_elements[path]['version'] logging.debug('path: %s\nprevious: %s\ncurrent: %s\n' % (path, changelist[path]['previous'], changelist[path]['current'])) # Prevent adding identical version to comparison. if changelist[path]['current'] == changelist[path]['previous']: continue changeset.append( (self._construct_extended_path( path, changelist[path]['previous']), self._construct_extended_path( path, changelist[path]['current']))) except Exception as e: error_message = str(e) finally: # Delete all temporary labels. for lb in tmp_labels: if self._is_a_label(lb): self._remove_label(lb) if error_message: raise SCMError('Label comparison failed:\n%s' % error_message) return changeset def diff(self, revisions, include_files=[], exclude_patterns=[], extra_args=[]): if include_files: raise Exception( 'The ClearCase backend does not currently support the ' '-I/--include parameter. To diff for specific files, pass in ' 'file@revision1:file@revision2 pairs as arguments') if revisions['tip'] == self.REVISION_CHECKEDOUT_CHANGESET: changeset = self._get_checkedout_changeset() return self._do_diff(changeset) elif revisions['base'] == self.REVISION_ACTIVITY_BASE: changeset = self._get_activity_changeset(revisions['tip']) return self._do_diff(changeset) elif revisions['base'] == self.REVISION_BRANCH_BASE: changeset = self._get_branch_changeset(revisions['tip']) return self._do_diff(changeset) elif revisions['base'] == self.REVISION_LABEL_BASE: changeset = self._get_label_changeset(revisions['tip']) return self._do_diff(changeset) elif revisions['base'] == self.REVISION_FILES: include_files = revisions['tip'] return self._do_diff(include_files) else: assert False def _diff_files(self, old_file, new_file): """Return unified diff for file. Most effective and reliable way is use gnu diff. """ # In snapshot view, diff can't access history clearcase file version # so copy cc files to tempdir by 'cleartool get -to dest-pname pname', # and compare diff with the new temp ones. if self.viewtype == 'snapshot': # Create temporary file first. tmp_old_file = make_tempfile() tmp_new_file = make_tempfile() # Delete so cleartool can write to them. try: os.remove(tmp_old_file) except OSError: pass try: os.remove(tmp_new_file) except OSError: pass execute(["cleartool", "get", "-to", tmp_old_file, old_file]) execute(["cleartool", "get", "-to", tmp_new_file, new_file]) diff_cmd = ["diff", "-uN", tmp_old_file, tmp_new_file] else: diff_cmd = ["diff", "-uN", old_file, new_file] dl = execute(diff_cmd, extra_ignore_errors=(1, 2), translate_newlines=False) # Replace temporary file name in diff with the one in snapshot view. if self.viewtype == "snapshot": dl = dl.replace(tmp_old_file, old_file) dl = dl.replace(tmp_new_file, new_file) # If the input file has ^M characters at end of line, lets ignore them. dl = dl.replace('\r\r\n', '\r\n') dl = dl.splitlines(True) # Special handling for the output of the diff tool on binary files: # diff outputs "Files a and b differ" # and the code below expects the output to start with # "Binary files " if (len(dl) == 1 and dl[0].startswith('Files %s and %s differ' % (old_file, new_file))): dl = ['Binary files %s and %s differ\n' % (old_file, new_file)] # We need oids of files to translate them to paths on reviewboard # repository. old_oid = execute(["cleartool", "describe", "-fmt", "%On", old_file]) new_oid = execute(["cleartool", "describe", "-fmt", "%On", new_file]) if dl == [] or dl[0].startswith("Binary files "): if dl == []: dl = ["File %s in your changeset is unmodified\n" % new_file] dl.insert(0, "==== %s %s ====\n" % (old_oid, new_oid)) dl.append('\n') else: dl.insert(2, "==== %s %s ====\n" % (old_oid, new_oid)) return dl def _diff_directories(self, old_dir, new_dir): """Return uniffied diff between two directories content. Function save two version's content of directory to temp files and treate them as casual diff between two files. """ old_content = self._directory_content(old_dir) new_content = self._directory_content(new_dir) old_tmp = make_tempfile(content=old_content) new_tmp = make_tempfile(content=new_content) diff_cmd = ["diff", "-uN", old_tmp, new_tmp] dl = execute(diff_cmd, extra_ignore_errors=(1, 2), translate_newlines=False, split_lines=True) # Replace temporary filenames with real directory names and add ids if dl: dl[0] = dl[0].replace(old_tmp, old_dir) dl[1] = dl[1].replace(new_tmp, new_dir) old_oid = execute(["cleartool", "describe", "-fmt", "%On", old_dir]) new_oid = execute(["cleartool", "describe", "-fmt", "%On", new_dir]) dl.insert(2, "==== %s %s ====\n" % (old_oid, new_oid)) return dl def _do_diff(self, changeset): """Generates a unified diff for all files in the changeset.""" # Sanitize all changesets of version 0 before processing changeset = self._sanitize_version_0_changeset(changeset) diff = [] for old_file, new_file in changeset: dl = [] # cpath.isdir does not work for snapshot views but this # information can be found using `cleartool describe`. if self.viewtype == 'snapshot': # ClearCase object path is file path + @@ object_path = new_file.split('@@')[0] + '@@' output = execute(["cleartool", "describe", "-fmt", "%m", object_path]) object_kind = output.strip() isdir = object_kind == 'directory element' else: isdir = cpath.isdir(new_file) if isdir: dl = self._diff_directories(old_file, new_file) elif cpath.exists(new_file) or self.viewtype == 'snapshot': dl = self._diff_files(old_file, new_file) else: logging.error("File %s does not exist or access is denied." % new_file) continue if dl: diff.append(''.join(dl)) return { 'diff': ''.join(diff), } class ClearCaseRepositoryInfo(RepositoryInfo): """ A representation of a ClearCase source code repository. This version knows how to find a matching repository on the server even if the URLs differ. """ def __init__(self, path, base_path, vobstag, supports_parent_diffs=False): RepositoryInfo.__init__(self, path, base_path, supports_parent_diffs=supports_parent_diffs) self.vobstag = vobstag def find_server_repository_info(self, server): """ The point of this function is to find a repository on the server that matches self, even if the paths aren't the same. (For example, if self uses an 'http' path, but the server uses a 'file' path for the same repository.) It does this by comparing the VOB's name and uuid. If the repositories use the same path, you'll get back self, otherwise you'll get a different ClearCaseRepositoryInfo object (with a different path). """ # Find VOB's family uuid based on VOB's tag uuid = self._get_vobs_uuid(self.vobstag) logging.debug("Repository's %s uuid is %r" % (self.vobstag, uuid)) # To reduce HTTP requests (_get_repository_info calls), we build an # ordered list of ClearCase repositories starting with the ones that # have a similar vobstag. repository_scan_order = deque() # Because the VOB tag is platform-specific, we split and search # for the remote name in any sub-part so this HTTP request # optimization can work for users on both Windows and Unix-like # platforms. vob_tag_parts = self.vobstag.split(cpath.sep) # Reduce list of repositories to only ClearCase ones and sort them by # repo name matching vobstag (or some part of the vobstag) first. for repository in server.get_repositories(tool='ClearCase').all_items: # Ignore non-ClearCase repositories. if repository['tool'] != 'ClearCase': continue repo_name = repository['name'] # Repositories with a similar VOB tag get put at the beginning and # the others at the end. if repo_name == self.vobstag or repo_name in vob_tag_parts: repository_scan_order.appendleft(repository) else: repository_scan_order.append(repository) # Now try to find a matching uuid for repository in repository_scan_order: repo_name = repository['name'] try: info = repository.get_info() except APIError as e: # If the current repository is not publicly accessible and the # current user has no explicit access to it, the server will # return error_code 101 and http_status 403. if not (e.error_code == 101 and e.http_status == 403): # We can safely ignore this repository unless the VOB tag # matches. if repo_name == self.vobstag: raise SCMError('You do not have permission to access ' 'this repository.') continue else: # Bubble up any other errors raise e if not info or uuid != info['uuid']: continue path = info['repopath'] logging.debug('Matching repository uuid:%s with path:%s', uuid, path) return ClearCaseRepositoryInfo(path=path, base_path=path, vobstag=self.vobstag) # We didn't found uuid but if version is >= 1.5.3 # we can try to use VOB's name hoping it is better # than current VOB's path. if parse_version(server.rb_version) >= parse_version('1.5.3'): self.path = cpath.split(self.vobstag)[1] # We didn't find a matching repository on the server. # We'll just return self and hope for the best. return self def _get_vobs_uuid(self, vobstag): """Return family uuid of VOB.""" property_lines = execute(["cleartool", "lsvob", "-long", vobstag], split_lines=True) for line in property_lines: if line.startswith('Vob family uuid:'): return line.split(' ')[-1].rstrip() def _get_repository_info(self, server, repository): try: return server.get_repository_info(repository['id']) except APIError as e: # If the server couldn't fetch the repository info, it will return # code 210. Ignore those. # Other more serious errors should still be raised, though. if e.error_code == 210: return None raise e RBTools-0.7.11/rbtools/clients/errors.py0000644000232200023220000000236313230242633020507 0ustar debalancedebalancefrom __future__ import unicode_literals class SCMError(Exception): """A generic error from an SCM.""" class AuthenticationError(Exception): """An error for when authentication fails.""" class MergeError(Exception): """An error for when merging two branches fails.""" class PushError(Exception): """An error for when pushing a branch to upstream fails.""" class AmendError(Exception): """An error for when amending a commit fails.""" class OptionsCheckError(Exception): """An error for when command-line options are used incorrectly.""" class InvalidRevisionSpecError(Exception): """An error for when the specified revisions are invalid.""" class MinimumVersionError(Exception): """An error for when software doesn't meet version requirements.""" class TooManyRevisionsError(InvalidRevisionSpecError): """An error for when too many revisions were specified.""" def __init__(self): super(TooManyRevisionsError, self).__init__( 'Too many revisions specified') class EmptyChangeError(Exception): """An error for when there are no changed files.""" def __init__(self): super(EmptyChangeError, self).__init__( "Couldn't find any affected files for this change.") RBTools-0.7.11/rbtools/clients/svn.py0000644000232200023220000012764113230242633020010 0ustar debalancedebalancefrom __future__ import unicode_literals import getpass import logging import os import posixpath import re import sys from xml.etree import ElementTree import six from six.moves.urllib.parse import unquote from rbtools.api.errors import APIError from rbtools.clients import PatchResult, RepositoryInfo, SCMClient from rbtools.clients.errors import (AuthenticationError, InvalidRevisionSpecError, MinimumVersionError, OptionsCheckError, SCMError, TooManyRevisionsError) from rbtools.utils.checks import (check_gnu_diff, check_install, is_valid_version) from rbtools.utils.diffs import (filename_match_any_patterns, filter_diff, normalize_patterns) from rbtools.utils.filesystem import (make_empty_files, make_tempfile, walk_parents) from rbtools.utils.process import execute class SVNClient(SCMClient): """ A wrapper around the svn Subversion tool that fetches repository information and generates compatible diffs. """ name = 'Subversion' INDEX_SEP = b'=' * 67 INDEX_FILE_RE = re.compile(b'^Index: (.+?)(?:\t\((added|deleted)\))?\n$') supports_diff_exclude_patterns = True supports_patch_revert = True # Match the diff control lines generated by 'svn diff'. DIFF_ORIG_FILE_LINE_RE = re.compile(br'^---\s+.*\s+\(.*\)') DIFF_NEW_FILE_LINE_RE = re.compile(br'^\+\+\+\s+.*\s+\(.*\)') DIFF_COMPLETE_REMOVAL_RE = re.compile(br'^@@ -1,\d+ \+0,0 @@$') ADDED_FILES_RE = re.compile(br'^Index:\s+(\S+)\t\(added\)$', re.M) DELETED_FILES_RE = re.compile(br'^Index:\s+(\S+)\t\(deleted\)$', re.M) REVISION_WORKING_COPY = '--rbtools-working-copy' REVISION_CHANGELIST_PREFIX = '--rbtools-changelist:' VERSION_NUMBER_RE = re.compile(br'(\d+)\.(\d+)\.(\d+)') SHOW_COPIES_AS_ADDS_MIN_VERSION = (1, 7, 0) PATCH_MIN_VERSION = (1, 7, 0) def __init__(self, **kwargs): super(SVNClient, self).__init__(**kwargs) def get_repository_info(self): if not check_install(['svn', 'help']): logging.debug('Unable to execute "svn help": skipping SVN') return None # Get the SVN repository path (either via a working copy or # a supplied URI) svn_info_params = ["info"] if getattr(self.options, 'repository_url', None): svn_info_params.append(self.options.repository_url) data = self._run_svn(svn_info_params, ignore_errors=True, results_unicode=False, log_output_on_error=False) m = re.search(b'^Repository Root: (.+)$', data, re.M) if not m: return None path = m.group(1) m = re.search(b'^URL: (.+)$', data, re.M) if not m: return None base_path = m.group(1)[len(path):] or b'/' m = re.search(b'^Repository UUID: (.+)$', data, re.M) if not m: return None uuid = m.group(1) # Now that we know it's SVN, make sure we have GNU diff installed, # and error out if we don't. check_gnu_diff() # Grab version of SVN client and store as a tuple in the form: # (major_version, minor_version, micro_version) ver_string = self._run_svn(['--version', '-q'], ignore_errors=True) m = self.VERSION_NUMBER_RE.match(ver_string) if not m: logging.warn('Unable to parse SVN client version triple from ' '"%s". Assuming version 0.0.0.' % ver_string.strip()) self.subversion_client_version = (0, 0, 0) else: self.subversion_client_version = tuple(map(int, m.groups())) return SVNRepositoryInfo(path, base_path, uuid) def parse_revision_spec(self, revisions=[]): """Parses the given revision spec. The 'revisions' argument is a list of revisions as specified by the user. Items in the list do not necessarily represent a single revision, since the user can use SCM-native syntaxes such as "r1..r2" or "r1:r2". SCMTool-specific overrides of this method are expected to deal with such syntaxes. This will return a dictionary with the following keys: 'base': A revision to use as the base of the resulting diff. 'tip': A revision to use as the tip of the resulting diff. These will be used to generate the diffs to upload to Review Board (or print). The diff for review will include the changes in (base, tip]. If a single revision is passed in, this will return the parent of that revision for 'base' and the passed-in revision for 'tip'. If zero revisions are passed in, this will return the most recently checked-out revision for 'base' and a special string indicating the working copy for 'tip'. The SVN SCMClient never fills in the 'parent_base' key. Users who are using other patch-stack tools who want to use parent diffs with SVN will have to generate their diffs by hand. """ n_revisions = len(revisions) if n_revisions == 1 and ':' in revisions[0]: revisions = revisions[0].split(':') n_revisions = len(revisions) if n_revisions == 0: # Most recent checked-out revision -- working copy # TODO: this should warn about mixed-revision working copies that # affect the list of files changed (see bug 2392). return { 'base': 'BASE', 'tip': self.REVISION_WORKING_COPY, } elif n_revisions == 1: # Either a numeric revision (n-1:n) or a changelist revision = revisions[0] try: revision = self._convert_symbolic_revision(revision) return { 'base': revision - 1, 'tip': revision, } except ValueError: # It's not a revision--let's try a changelist. This only makes # sense if we have a working copy. if not self.options.repository_url: status = self._run_svn( ['status', '--cl', six.text_type(revision), '--ignore-externals', '--xml'], results_unicode=False) cl = ElementTree.fromstring(status).find('changelist') if cl is not None: # TODO: this should warn about mixed-revision working # copies that affect the list of files changed (see # bug 2392). return { 'base': 'BASE', 'tip': self.REVISION_CHANGELIST_PREFIX + revision } raise InvalidRevisionSpecError( '"%s" does not appear to be a valid revision or ' 'changelist name' % revision) elif n_revisions == 2: # Diff between two numeric revisions try: return { 'base': self._convert_symbolic_revision(revisions[0]), 'tip': self._convert_symbolic_revision(revisions[1]), } except ValueError: raise InvalidRevisionSpecError( 'Could not parse specified revisions: %s' % revisions) else: raise TooManyRevisionsError def _convert_symbolic_revision(self, revision): command = ['-r', six.text_type(revision), '-l', '1'] if getattr(self.options, 'repository_url', None): command.append(self.options.repository_url) log = self.svn_log_xml(command) if log is not None: try: root = ElementTree.fromstring(log) except ValueError as e: # _convert_symbolic_revision() nominally raises a ValueError to # indicate any failure to determine the revision number from # the log entry. Here, we explicitly catch a ValueError from # ElementTree and raise a generic SCMError so that this # specific failure to parse the XML log output is # differentiated from the nominal case. raise SCMError('Failed to parse svn log - %s.' % e) logentry = root.find('logentry') if logentry is not None: return int(logentry.attrib['revision']) raise ValueError def scan_for_server(self, repository_info): # Scan first for dot files, since it's faster and will cover the # user's $HOME/.reviewboardrc server_url = super(SVNClient, self).scan_for_server(repository_info) if server_url: return server_url return self.scan_for_server_property(repository_info) def scan_for_server_property(self, repository_info): def get_url_prop(path): url = self._run_svn(["propget", "reviewboard:url", path], with_errors=False, extra_ignore_errors=(1,)).strip() return url or None for path in walk_parents(os.getcwd()): if not os.path.exists(os.path.join(path, ".svn")): break prop = get_url_prop(path) if prop: return prop return get_url_prop(repository_info.path) def get_raw_commit_message(self, revisions): """Return the raw commit message(s) for the given revisions. Args: revisions (dict): Revisions to get the commit messages for. This will contain ``tip`` and ``base`` keys. Returns: unicode: The commit messages for all the requested revisions. """ base = six.text_type(revisions['base']) tip = six.text_type(revisions['tip']) if (tip == SVNClient.REVISION_WORKING_COPY or tip.startswith(SVNClient.REVISION_CHANGELIST_PREFIX)): return '' command = ['-r', '%s:%s' % (base, tip)] if getattr(self.options, 'repository_url', None): command.append(self.options.repository_url) log = self.svn_log_xml(command) try: root = ElementTree.fromstring(log) except ValueError as e: raise SCMError('Failed to parse svn log: %s' % e) # We skip the first commit message, because we want commit messages # corresponding to the changes that will be included in the diff. messages = root.findall('.//msg')[1:] return '\n\n'.join(message.text for message in messages) def diff(self, revisions, include_files=[], exclude_patterns=[], extra_args=[]): """ Performs a diff in a Subversion repository. If the given revision spec is empty, this will do a diff of the modified files in the working directory. If the spec is a changelist, it will do a diff of the modified files in that changelist. If the spec is a single revision, it will show the changes in that revision. If the spec is two revisions, this will do a diff between the two revisions. SVN repositories do not support branches of branches in a way that makes parent diffs possible, so we never return a parent diff. """ repository_info = self.get_repository_info() # SVN paths are always relative to the root of the repository, so we # compute the current path we are checked out at and use that as the # current working directory. We use / for the base_dir because we do # not normalize the paths to be filesystem paths, but instead use SVN # paths. exclude_patterns = normalize_patterns(exclude_patterns, '/', repository_info.base_path) # Keep track of information needed for handling empty files later. empty_files_revisions = { 'base': None, 'tip': None, } base = six.text_type(revisions['base']) tip = six.text_type(revisions['tip']) diff_cmd = ['diff', '--diff-cmd=diff', '--notice-ancestry'] changelist = None if tip == self.REVISION_WORKING_COPY: # Posting the working copy diff_cmd.extend(['-r', base]) elif tip.startswith(self.REVISION_CHANGELIST_PREFIX): # Posting a changelist changelist = tip[len(self.REVISION_CHANGELIST_PREFIX):] diff_cmd.extend(['--changelist', changelist]) else: # Diff between two separate revisions. Behavior depends on whether # or not there's a working copy if self.options.repository_url: # No working copy--create 'old' and 'new' URLs if len(include_files) == 1: # If there's a single file or directory passed in, we use # that as part of the URL instead of as a separate # filename. repository_info.set_base_path(include_files[0]) include_files = [] new_url = (repository_info.path + repository_info.base_path + '@' + tip) # When the source revision is '0', assume the user wants to # upload a diff containing all the files in 'base_path' as # new files. If the base path within the repository is added to # both the old and new URLs, `svn diff` will error out, since # the base_path didn't exist at revision 0. To avoid that # error, use the repository's root URL as the source for the # diff. if base == '0': old_url = repository_info.path + '@' + base else: old_url = (repository_info.path + repository_info.base_path + '@' + base) diff_cmd.extend([old_url, new_url]) empty_files_revisions['base'] = '(revision %s)' % base empty_files_revisions['tip'] = '(revision %s)' % tip else: # Working copy--do a normal range diff diff_cmd.extend(['-r', '%s:%s' % (base, tip)]) empty_files_revisions['base'] = '(revision %s)' % base empty_files_revisions['tip'] = '(revision %s)' % tip diff_cmd.extend(include_files) # Check for and validate --svn-show-copies-as-adds option, or evaluate # working copy to determine if scheduled commit will contain # addition-with-history commit. When this case occurs then # --svn-show-copies-as-adds must be specified. Note: this only # pertains to local modifications in a working copy and not diffs # between specific numeric revisions. if (((tip == self.REVISION_WORKING_COPY) or changelist) and is_valid_version(self.subversion_client_version, self.SHOW_COPIES_AS_ADDS_MIN_VERSION)): svn_show_copies_as_adds = getattr( self.options, 'svn_show_copies_as_adds', None) if svn_show_copies_as_adds is None: if self.history_scheduled_with_commit(changelist, include_files, exclude_patterns): sys.stderr.write("One or more files in your changeset has " "history scheduled with commit. Please " "try again with " "'--svn-show-copies-as-adds=y/n'.\n") sys.exit(1) else: if svn_show_copies_as_adds in 'Yy': diff_cmd.append("--show-copies-as-adds") diff = self._run_svn(diff_cmd, split_lines=True, results_unicode=False, log_output_on_error=False) diff = self.handle_renames(diff) if self.supports_empty_files(): diff = self._handle_empty_files(diff, diff_cmd, empty_files_revisions) diff = self.convert_to_absolute_paths(diff, repository_info) if exclude_patterns: diff = filter_diff(diff, self.INDEX_FILE_RE, exclude_patterns) return { 'diff': b''.join(diff), } def history_scheduled_with_commit(self, changelist, include_files, exclude_patterns): """ Method to find if any file status has '+' in 4th column""" status_cmd = ['status', '-q', '--ignore-externals'] if changelist: status_cmd.extend(['--changelist', changelist]) if include_files: status_cmd.extend(include_files) for p in self._run_svn(status_cmd, split_lines=True, results_unicode=False): try: if p[3] == b'+': if exclude_patterns: # We found a file with history, but first we must make # sure that it is not being excluded. filename = p[8:].rstrip() should_exclude = filename_match_any_patterns( filename, exclude_patterns, self.get_repository_info().base_path) if not should_exclude: return True else: return True except IndexError: # This may be some other output, or just doesn't have the # data we're looking for. Move along. pass return False def find_copyfrom(self, path): """ A helper function for handle_renames The output of 'svn info' reports the "Copied From" header when invoked on the exact path that was copied. If the current file was copied as a part of a parent or any further ancestor directory, 'svn info' will not report the origin. Thus it is needed to ascend from the path until either a copied path is found or there are no more path components to try. """ def smart_join(p1, p2): if p2: return os.path.join(p1, p2) return p1 path1 = path path2 = None while path1: info = self.svn_info(path1, ignore_errors=True) or {} url = info.get('Copied From URL', None) if url: root = info['Repository Root'] from_path1 = unquote(url[len(root):]).encode('utf-8') return smart_join(from_path1, path2) if info.get('Schedule', None) != 'normal': # Not added as a part of the parent directory, bail out return None # Strip one component from path1 to path2 path1, tmp = os.path.split(path1) if path1 == b'' or path1 == b'/': path1 = None else: path2 = smart_join(tmp, path2) return None def handle_renames(self, diff_content): """ The output of svn diff is incorrect when the file in question came into being via svn mv/cp. Although the patch for these files are relative to its parent, the diff header doesn't reflect this. This function fixes the relevant section headers of the patch to portray this relationship. """ # svn diff against a repository URL on two revisions appears to # handle moved files properly, so only adjust the diff file names # if they were created using a working copy. if self.options.repository_url: return diff_content result = [] num_lines = len(diff_content) i = 0 while i < num_lines: if (i + 4 < num_lines and self.INDEX_FILE_RE.match(diff_content[i]) and diff_content[i + 1][:-1] == self.INDEX_SEP and self.DIFF_ORIG_FILE_LINE_RE.match(diff_content[i + 2]) and self.DIFF_NEW_FILE_LINE_RE.match(diff_content[i + 3])): from_line = diff_content[i + 2] to_line = diff_content[i + 3] # If the file is marked completely removed, bail out with the # original diff. The reason for this is that # ``svn diff --notice-ancestry`` generates two diffs for a # replaced file: one as a complete deletion, and one as a new # addition. If it was replaced with history, though, we need to # preserve the file name in the "deletion" part, or the patch # won't apply. if self.DIFF_COMPLETE_REMOVAL_RE.match(diff_content[i + 4]): result.extend(diff_content[i:i + 5]) else: to_file, _ = self.parse_filename_header(to_line[4:]) copied_from = self.find_copyfrom(to_file) result.append(diff_content[i]) result.append(diff_content[i + 1]) if copied_from is not None: result.append(from_line.replace(to_file, copied_from)) else: result.append(from_line) result.append(to_line) result.append(diff_content[i + 4]) i += 5 else: result.append(diff_content[i]) i += 1 return result def _handle_empty_files(self, diff_content, diff_cmd, revisions): """Handles added and deleted 0-length files in the diff output. Since the diff output from svn diff does not give enough context for 0-length files, we add extra information to the patch. For example, the original diff output of an added 0-length file is: Index: foo\n ===================================================================\n The modified diff of an added 0-length file will be: Index: foo\t(added)\n ===================================================================\n --- foo\t()\n +++ foo\t()\n """ # Get a list of all deleted files in this diff so we can differentiate # between added empty files and deleted empty files. diff_cmd.append('--no-diff-deleted') diff_with_deleted = self._run_svn(diff_cmd, ignore_errors=True, none_on_ignored_error=True, results_unicode=False) if not diff_with_deleted: return diff_content deleted_files = re.findall(br'^Index:\s+(\S+)\s+\(deleted\)$', diff_with_deleted, re.M) result = [] i = 0 num_lines = len(diff_content) while i < num_lines: line = diff_content[i] if (line.startswith(b'Index: ') and (i + 2 == num_lines or (i + 2 < num_lines and diff_content[i + 2].startswith(b'Index: ')))): # An empty file. Get and add the extra diff information. index_line = line.strip() filename = index_line.split(b' ', 1)[1].strip() if filename in deleted_files: # Deleted empty file. result.append(b'%s\t(deleted)\n' % index_line) if not revisions['base'] and not revisions['tip']: tip = b'(working copy)' info = self.svn_info(filename, ignore_errors=True) if info and 'Revision' in info: base = '(revision %s)' % info['Revision'] else: continue else: base = revisions['base'] tip = revisions['tip'] else: # Added empty file. result.append(b'%s\t(added)\n' % index_line) if not revisions['base'] and not revisions['tip']: base = tip = b'(revision 0)' else: base = revisions['base'] tip = revisions['tip'] if isinstance(base, six.text_type): base = base.encode('utf-8') if isinstance(tip, six.text_type): tip = tip.encode('utf-8') result.append(b'%s\n' % self.INDEX_SEP) result.append(b'--- %s\t%s\n' % (filename, base)) result.append(b'+++ %s\t%s\n' % (filename, tip)) # Skip the next line (the index separator) since we've already # copied it. i += 2 else: result.append(line) i += 1 return result def convert_to_absolute_paths(self, diff_content, repository_info): """ Converts relative paths in a diff output to absolute paths. This handles paths that have been svn switched to other parts of the repository. """ result = [] for line in diff_content: front = None orig_line = line if (self.DIFF_NEW_FILE_LINE_RE.match(line) or self.DIFF_ORIG_FILE_LINE_RE.match(line) or line.startswith(b'Index: ')): front, line = line.split(b' ', 1) if front: if line.startswith(b'/'): # Already absolute line = front + b' ' + line else: # Filename and rest of line (usually the revision # component) file, rest = self.parse_filename_header(line) # If working with a diff generated outside of a working # copy, then file paths are already absolute, so just # add initial slash. if self.options.repository_url: path = unquote( posixpath.join(repository_info.base_path, file)) else: info = self.svn_info(file, True) if info is None: result.append(orig_line) continue url = info["URL"] root = info["Repository Root"] path = unquote(url[len(root):]) line = b'%s %s%s' % (front, path, rest) result.append(line) return result def svn_info(self, path, ignore_errors=False): """Return a dict which is the result of 'svn info' at a given path.""" svninfo = {} # SVN's internal path recognizers think that any file path that # includes an '@' character will be path@rev, and skips everything that # comes after the '@'. This makes it hard to do operations on files # which include '@' in the name (such as image@2x.png). if b'@' in path and not path[-1] == b'@': path += b'@' result = self._run_svn([b"info", path], split_lines=True, ignore_errors=ignore_errors, none_on_ignored_error=True, results_unicode=False) if result is None: return None for info in result: parts = info.strip().split(b': ', 1) if len(parts) == 2: key, value = parts svninfo[key] = value return svninfo # Adapted from server code parser.py def parse_filename_header(self, s): parts = None if b'\t' in s: # There's a \t separating the filename and info. This is the # best case scenario, since it allows for filenames with spaces # without much work. The info can also contain tabs after the # initial one; ignore those when splitting the string. parts = s.split(b'\t', 1) # There's spaces being used to separate the filename and info. # This is technically wrong, so all we can do is assume that # 1) the filename won't have multiple consecutive spaces, and # 2) there's at least 2 spaces separating the filename and info. if b' ' in s: parts = re.split(b' +', s) if parts: parts[1] = b'\t' + parts[1] return parts # strip off ending newline, and return it as the second component return [s.split(b'\n')[0], b'\n'] def _get_p_number(self, base_path, base_dir): """Return the argument for --strip in svn patch. This determines the number of path components to remove from file paths in the diff to be applied. """ if base_path == '/': # We always need to strip off the leading forward slash. return 1 else: # We strip all leading directories from base_path. The last # directory will not be suffixed with a slash. return base_path.count('/') + 1 def _exclude_files_not_in_tree(self, patch_file, base_path): """Process a diff and remove entries not in the current directory. The file at the location patch_file will be overwritten by the new patch. This function returns a tuple of two booleans. The first boolean indicates if any files have been excluded. The second boolean indicates if the resulting diff patch file is empty. """ excluded_files = False empty_patch = True # If our base path does not have a trailing slash (which it won't # unless we are at a checkout root), we append a slash so that we can # determine if files are under the base_path. We do this so that files # like /trunkish (which begins with /trunk) do not mistakenly get # placed in /trunk if that is the base_path. if not base_path.endswith('/'): base_path += '/' filtered_patch_name = make_tempfile() with open(filtered_patch_name, 'w') as filtered_patch: with open(patch_file, 'r') as original_patch: include_file = True for line in original_patch.readlines(): m = self.INDEX_FILE_RE.match(line) if m: filename = m.group(1).decode('utf-8') include_file = filename.startswith(base_path) if not include_file: excluded_files = True else: empty_patch = False if include_file: filtered_patch.write(line) os.rename(filtered_patch_name, patch_file) return (excluded_files, empty_patch) def apply_patch(self, patch_file, base_path, base_dir, p=None, revert=False): """Apply the patch and return a PatchResult indicating its success.""" if not is_valid_version(self.subversion_client_version, self.PATCH_MIN_VERSION): raise MinimumVersionError( 'Using "rbt patch" with the SVN backend requires at least ' 'svn 1.7.0') if base_dir and not base_dir.startswith(base_path): # The patch was created in either a higher level directory or a # directory not under this one. We should exclude files from the # patch that are not under this directory. excluded, empty = self._exclude_files_not_in_tree(patch_file, base_path) if excluded: logging.warn('This patch was generated in a different ' 'directory. To prevent conflicts, all files ' 'not under the current directory have been ' 'excluded. To apply all files in this ' 'patch, apply this patch from the %s directory.' % base_dir) if empty: logging.warn('All files were excluded from the patch.') cmd = ['patch'] p_num = p or self._get_p_number(base_path, base_dir) if p_num >= 0: cmd.append('--strip=%s' % p_num) if revert: cmd.append('--reverse-diff') cmd.append(six.text_type(patch_file)) rc, patch_output = self._run_svn(cmd, return_error_code=True) if self.supports_empty_files(): try: with open(patch_file, 'rb') as f: patch = f.read() except IOError as e: logging.error('Unable to read file %s: %s', patch_file, e) return self.apply_patch_for_empty_files(patch, p_num, revert=revert) # TODO: What is svn's equivalent of a garbage patch message? return PatchResult(applied=(rc == 0), patch_output=patch_output) def apply_patch_for_empty_files(self, patch, p_num, revert=False): """Returns True if any empty files in the patch are applied. If there are no empty files in the patch or if an error occurs while applying the patch, we return False. """ patched_empty_files = False if revert: added_files = self.DELETED_FILES_RE.findall(patch) deleted_files = self.ADDED_FILES_RE.findall(patch) else: added_files = self.ADDED_FILES_RE.findall(patch) deleted_files = self.DELETED_FILES_RE.findall(patch) if added_files: added_files = self._strip_p_num_slashes(added_files, int(p_num)) make_empty_files(added_files) # We require --force here because svn will complain if we run # `svn add` on a file that has already been added or deleted. result = self._run_svn(['add', '--force'] + added_files, ignore_errors=True, none_on_ignored_error=True) if result is None: logging.error('Unable to execute "svn add" on: %s', ', '.join(added_files)) else: patched_empty_files = True if deleted_files: deleted_files = self._strip_p_num_slashes(deleted_files, int(p_num)) # We require --force here because svn will complain if we run # `svn delete` on a file that has already been added or deleted. result = self._run_svn(['delete', '--force'] + deleted_files, ignore_errors=True, none_on_ignored_error=True) if result is None: logging.error('Unable to execute "svn delete" on: %s', ', '.join(deleted_files)) else: patched_empty_files = True return patched_empty_files def supports_empty_files(self): """Checks if the RB server supports added/deleted empty files.""" return (self.capabilities and self.capabilities.has_capability('scmtools', 'svn', 'empty_files')) def _run_svn(self, svn_args, *args, **kwargs): cmdline = [b'svn', b'--non-interactive'] + svn_args if getattr(self.options, 'svn_username', None): cmdline += [b'--username', self.options.svn_username] if getattr(self.options, 'svn_prompt_password', None): self.options.svn_prompt_password = False self.options.svn_password = getpass.getpass(b'SVN Password:') if getattr(self.options, 'svn_password', None): cmdline += [b'--password', self.options.svn_password] return execute(cmdline, *args, **kwargs) def svn_log_xml(self, svn_args, *args, **kwargs): """Run SVN log non-interactively and retrieve XML output. We cannot run SVN log interactively and retrieve XML output because the authentication prompts will be intermixed with the XML output and cause XML parsing to fail. This function returns None (as if none_on_ignored_error where True) if an error occurs that is not an authentication error. """ command = ['log', '--xml'] + svn_args rc, result, errors = self._run_svn(command, *args, return_error_code=True, with_errors=False, return_errors=True, ignore_errors=True, results_unicode=False, **kwargs) if rc: # SVN Error E215004: --non-interactive was passed but the remote # repository requires authentication. if errors.startswith(b'svn: E215004'): raise AuthenticationError( 'Could not authenticate against remote SVN repository. ' 'Please provide the --svn-username and either the ' '--svn-password or --svn-prompt-password command-line ' 'options.') return None return result def check_options(self): if getattr(self.options, 'svn_show_copies_as_adds', None): if (len(self.options.svn_show_copies_as_adds) > 1 or self.options.svn_show_copies_as_adds not in 'YyNn'): raise OptionsCheckError( 'Invalid value \'%s\' for --svn-show-copies-as-adds ' 'option. Valid values are \'y\' or \'n\'.' % self.options.svn_show_copies_as_adds) class SVNRepositoryInfo(RepositoryInfo): """Information on a Subversion repository. This stores information on the path and, optionally, UUID of a Subversion repository. It can match a local repository against those on a Review Board server. Attributes: repository_id (int): ID of the repository in the API. This is used primarily for testing purposes, and is not guaranteed to be set. uuid (unicode): UUID of the Subversion repository. """ def __init__(self, path, base_path, uuid, supports_parent_diffs=False, repository_id=None): """Initialize the repository information. Args: path (unicode): Subversion checkout path. base_path (unicode): Root of the Subversion repository. uuid (unicode): UUID of the Subversion repository. supports_parent_diffs (bool, optional): Whether or not the repository supports parent diffs. repository_id (int, optional): ID of the repository in the API. This is used primarily for testing purposes, and is not guaranteed to be set. """ super(SVNRepositoryInfo, self).__init__( path, base_path, supports_parent_diffs=supports_parent_diffs) self.uuid = uuid self.repository_id = repository_id def find_server_repository_info(self, server): """Return server-side information on the current Subversion repository. The point of this function is to find a repository on the server that matches self, even if the paths aren't the same. (For example, if self uses an 'http' path, but the server uses a 'file' path for the same repository.) It does this by comparing repository UUIDs. If the repositories use the same path, you'll get back self, otherwise you'll get a different SVNRepositoryInfo object (with a different path). Args: server (rbtools.api.resource.RootResource): The root resource for the Review Board server. Returns: SVNRepositoryInfo: The server-side information for this repository. """ # Since all_items is a generator, and we need to process the list of # repositories twice, we're going to keep a cached list of repositories # that we'll add to as we iterate through the first time. That way, # we can iterate through a second time, without performing another # call to the server. # # Hopefully we'll match a repository in the first (less expensive) loop # and won't need it. # # Note also that we're not fetching all pages up-front, as that could # lead to a lot of unnecessary API requests if the repository in # question is found before the last page of results in the first for # loop. repositories = server.get_repositories(tool='Subversion').all_items cached_repos = [] # Do two paths. The first will be to try to find a matching entry # by path/mirror path. If we don't find anything, then the second will # be to find a matching UUID. for repository in repositories: if (self.path == repository['path'] or ('mirror_path' in repository and self.path == repository['mirror_path'])): self.repository_id = repository.id return self cached_repos.append(repository) # We didn't find our locally matched repository, so scan based on UUID. for repository in cached_repos: try: info = repository.get_info() if not info or self.uuid != info['uuid']: continue except APIError: continue repos_base_path = info['url'][len(info['root_url']):] relpath = self._get_relative_path(self.base_path, repos_base_path) if relpath: return SVNRepositoryInfo(info['url'], relpath, self.uuid, repository_id=repository.id) # We didn't find a matching repository on the server. We'll just return # self and hope for the best. In reality, we'll likely fail, but we # did all we could really do. return self def _get_repository_info(self, server, repository): try: return server.get_repository_info(repository['id']) except APIError as e: # If the server couldn't fetch the repository info, it will return # code 210. Ignore those. # Other more serious errors should still be raised, though. if e.error_code == 210: return None raise e def _get_relative_path(self, path, root): pathdirs = self._split_on_slash(path) rootdirs = self._split_on_slash(root) # root is empty, so anything relative to that is itself if len(rootdirs) == 0: return path # If one of the directories doesn't match, then path is not relative # to root. if rootdirs != pathdirs[:len(rootdirs)]: return None # All the directories matched, so the relative path is whatever # directories are left over. The base_path can't be empty, though, so # if the paths are the same, return '/' if len(pathdirs) == len(rootdirs): return '/' else: return '/' + '/'.join(pathdirs[len(rootdirs):]) def _split_on_slash(self, path): # Split on slashes, but ignore multiple slashes and throw away any # trailing slashes. split = re.split('/*', path) if split[-1] == '': split = split[0:-1] return split RBTools-0.7.11/rbtools/tests.py0000644000232200023220000000063213230242633016671 0ustar debalancedebalancefrom __future__ import unicode_literals class OptionsStub(object): def __init__(self): self.debug = True self.guess_summary = False self.guess_description = False self.tracking = None self.username = None self.password = None self.repository_url = None self.disable_proxy = False self.summary = None self.description = None RBTools-0.7.11/rbtools/helpers/0000755000232200023220000000000013230242636016621 5ustar debalancedebalanceRBTools-0.7.11/rbtools/helpers/hgext.py0000644000232200023220000000112713230242633020310 0ustar debalancedebalancefrom __future__ import unicode_literals # This file provides a Mercurial extension that resets certain # config options to provide consistent output. # We use reposetup because the config is re-read for each repo, after # uisetup() is called. ALLOWED_PARAMS = ['git', 'svn'] def reposetup(ui, repo): for section in ['diff']: for k, v in ui.configitems(section): # Setting value to None is effectively unsetting the value since # None is the stand-in value for "not set." if k not in ALLOWED_PARAMS: ui.setconfig(section, k, None) RBTools-0.7.11/rbtools/helpers/__init__.py0000644000232200023220000000000013230242633020715 0ustar debalancedebalanceRBTools-0.7.11/MANIFEST.in0000644000232200023220000000022013230242633015220 0ustar debalancedebalancerecursive-include contrib *.py *.txt README* include ez_setup.py include AUTHORS include COPYING include INSTALL include NEWS include README.md RBTools-0.7.11/NEWS0000644000232200023220000000033313230242633014166 0ustar debalancedebalanceRelease Notes ============= Release notes for RBTools can be found in the reviewboard tree under docs/releasenotes/rbtools/. These can also be read online at http://www.reviewboard.org/docs/releasenotes/dev/rbtools/. RBTools-0.7.11/AUTHORS0000644000232200023220000000364513230242633014550 0ustar debalancedebalanceLead Developers: * Christian Hammond * David Trowbridge * Steven MacLeod Contributors: * Adam Collard * Akis Kalligeros * Aleksandrs Zdancuks * Alexander Goncahrov * Alexander Kouznetsov * Alexander Solovets * Alexey Neyman * Ali Kamali * Alois Mahdal * Amber Yust * Amey Prabhu Gaonkar * André Klitzing * Andrew Bettison * Andrew Brandon * Andrew Grigorev * Andrew Stitcher * Anselina Chia * Anthony Cruz * Anthony Hache * Azad Salahli * Barret Rennie * Bartosz Gołek * Ben (full name unknown) * Ben Asher * Ben Hollis * Bogdana Popa * Bradley Baetz * Brandon Martin * Bruce Cran * Bryan Halter * Chris Clark * Chris Eagan * Chris Hunt * Chris Liu * Craig Silverstein * Damian * Dana Lacoste * Daniel Cestari * Daniel LaMotte * Dan Savilonis * Dave Druska * David Gardner * Dick Porter * Edward Lee * Eric Huss * Erik Lattimore * Flavio Castelli * Frank Murphy * Garrett Cooper * Georgy Dyuldin * Gregory Szorc * Griffin Myers * Gyula Faller * H W Tovetjärn * Halvor Lund * Holden Karau * Ian Monroe * Jan Koprowski * Jason Felice * Jeremy Bettis * John Larmie * John Sintal * Joshua Olson * Justin Maillet * Karsten Verelst * Laurent Nicolas * Lepton Wu * Luis Rodriguez * Luke Lu * Luke Robison * Maciej Borzecki * Manu Cupcic * Markku Linnoskivi * Martin Donlon * Matthew Maclean * Matthew Woehlke * Mike Crute * Nils Philippsen * Nathan Dimmock * Nathan Heijermans * Nicolas Dély * Nicolas Morey * Nicolas Morey-Chaisemartin * Noah Kantrowitz * Paul Scott * Peter Ward * Petr Novák * Raghu Kaippully * Ravi Kondamuru * Ryan Oblak * Ryan Shelley * Ryan Swanson * Severin Gehwolf * Shaurya Sengar * Simon Zhang * Stacey Sheldon * Stefan Ring * Stephen Gallagher * Stephen Kiernan * Steven Ihde * Steven Russell * Theo Belaire * Thilo-Alexander Ginkel * Tien Vu * Tom Saeger * Tomasz Moń * Vaclav Slavik * Vadim Zeitlin * Xuanyi Lin RBTools-0.7.11/setup.cfg0000644000232200023220000000045413230242636015317 0ustar debalancedebalance[egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 [aliases] snapshot = egg_info -Dr nightly = egg_info -d alpha2 = egg_info -Db alpha2 alpha1 = egg_info -Db alpha1 beta2 = egg_info -Db beta2 beta1 = egg_info -Db beta1 rc1 = egg_info -Db rc1 rc2 = egg_info -Db rc2 release = egg_info -Db '' RBTools-0.7.11/PKG-INFO0000644000232200023220000000116713230242636014575 0ustar debalancedebalanceMetadata-Version: 1.1 Name: RBTools Version: 0.7.11 Summary: Command line tools for use with Review Board Home-page: http://www.reviewboard.org/ Author: Christian Hammond Author-email: chipx86@chipx86.com License: MIT Download-URL: http://downloads.reviewboard.org/releases/RBTools/0.7/ Description: UNKNOWN Platform: UNKNOWN Classifier: Development Status :: 4 - Beta Classifier: Environment :: Console Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Topic :: Software Development RBTools-0.7.11/INSTALL0000644000232200023220000000033313230242633014520 0ustar debalancedebalanceInstallation ============ To install rbtools, simply run the following as root: $ python setup.py install Or to automatically download and install the latest version, you can run: $ easy_install -U RBTools RBTools-0.7.11/setup.py0000755000232200023220000001156513230242633015215 0ustar debalancedebalance#!/usr/bin/env python # # setup.py -- Installation for rbtools. # # Copyright (C) 2009 Christian Hammond # Copyright (C) 2009 David Trowbridge # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import sys # Attempt to use currently-installed setuptools first try: from setuptools import setup, find_packages except ImportError: # setuptools was unavailable. Install it then try again from ez_setup import use_setuptools use_setuptools() from setuptools import setup, find_packages from rbtools import get_package_version, is_release, VERSION PACKAGE_NAME = 'RBTools' if is_release(): download_url = "http://downloads.reviewboard.org/releases/%s/%s.%s/" % \ (PACKAGE_NAME, VERSION[0], VERSION[1]) else: download_url = "http://downloads.reviewboard.org/nightlies/" install_requires = [ 'six>=1.8.0', 'tqdm', ] # Make sure this is a version of Python we are compatible with. This should # prevent people on older versions from unintentionally trying to install # the source tarball, and failing. if sys.hexversion < 0x02050000: sys.stderr.write( 'RBTools %s is incompatible with your version of Python.\n' 'Please install RBTools 0.5.x or upgrade Python to at least ' '2.6.x (preferably 2.7).\n' % get_package_version()) sys.exit(1) elif sys.hexversion < 0x02060000: sys.stderr.write( 'RBTools %s is incompatible with your version of Python.\n' 'Please install RBTools 0.6.x or upgrade Python to at least ' '2.6.x (preferably 2.7).\n' % get_package_version()) sys.exit(1) elif sys.hexversion < 0x02070000: install_requires.append('argparse') rb_commands = [ 'api-get = rbtools.commands.api_get:APIGet', 'attach = rbtools.commands.attach:Attach', 'clear-cache = rbtools.commands.clearcache:ClearCache', 'close = rbtools.commands.close:Close', 'diff = rbtools.commands.diff:Diff', 'install = rbtools.commands.install:Install', 'land = rbtools.commands.land:Land', 'list-repo-types = rbtools.commands.list_repo_types:ListRepoTypes', 'login = rbtools.commands.login:Login', 'logout = rbtools.commands.logout:Logout', 'patch = rbtools.commands.patch:Patch', 'post = rbtools.commands.post:Post', 'publish = rbtools.commands.publish:Publish', 'setup-repo = rbtools.commands.setup_repo:SetupRepo', 'stamp = rbtools.commands.stamp:Stamp', 'status = rbtools.commands.status:Status', ] scm_clients = [ 'bazaar = rbtools.clients.bazaar:BazaarClient', 'clearcase = rbtools.clients.clearcase:ClearCaseClient', 'cvs = rbtools.clients.cvs:CVSClient', 'git = rbtools.clients.git:GitClient', 'mercurial = rbtools.clients.mercurial:MercurialClient', 'perforce = rbtools.clients.perforce:PerforceClient', 'plastic = rbtools.clients.plastic:PlasticClient', 'svn = rbtools.clients.svn:SVNClient', 'tfs = rbtools.clients.tfs:TFSClient', ] setup(name=PACKAGE_NAME, version=get_package_version(), license="MIT", description="Command line tools for use with Review Board", entry_points={ 'console_scripts': [ 'rbt = rbtools.commands.main:main', ], 'rbtools_commands': rb_commands, 'rbtools_scm_clients': scm_clients, }, install_requires=install_requires, dependency_links=[ download_url, ], packages=find_packages(), include_package_data=True, maintainer="Christian Hammond", maintainer_email="chipx86@chipx86.com", url="http://www.reviewboard.org/", download_url=download_url, classifiers=[ "Development Status :: 4 - Beta", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Topic :: Software Development", ]) RBTools-0.7.11/RBTools.egg-info/0000755000232200023220000000000013230242636016511 5ustar debalancedebalanceRBTools-0.7.11/RBTools.egg-info/SOURCES.txt0000644000232200023220000000453313230242636020402 0ustar debalancedebalanceAUTHORS COPYING INSTALL MANIFEST.in NEWS README.md ez_setup.py setup.cfg setup.py RBTools.egg-info/PKG-INFO RBTools.egg-info/SOURCES.txt RBTools.egg-info/dependency_links.txt RBTools.egg-info/entry_points.txt RBTools.egg-info/requires.txt RBTools.egg-info/top_level.txt contrib/P4Tool.txt contrib/README.P4Tool contrib/installers/windows/scripts/get-version.py contrib/internal/release.py rbtools/__init__.py rbtools/tests.py rbtools/api/__init__.py rbtools/api/cache.py rbtools/api/capabilities.py rbtools/api/client.py rbtools/api/decode.py rbtools/api/decorators.py rbtools/api/errors.py rbtools/api/factory.py rbtools/api/request.py rbtools/api/resource.py rbtools/api/tests.py rbtools/api/utils.py rbtools/api/transport/__init__.py rbtools/api/transport/sync.py rbtools/clients/__init__.py rbtools/clients/bazaar.py rbtools/clients/clearcase.py rbtools/clients/cvs.py rbtools/clients/errors.py rbtools/clients/git.py rbtools/clients/mercurial.py rbtools/clients/perforce.py rbtools/clients/plastic.py rbtools/clients/svn.py rbtools/clients/tfs.py rbtools/clients/tests/__init__.py rbtools/clients/tests/test_bzr.py rbtools/clients/tests/test_git.py rbtools/clients/tests/test_mercurial.py rbtools/clients/tests/test_p4.py rbtools/clients/tests/test_svn.py rbtools/commands/__init__.py rbtools/commands/api_get.py rbtools/commands/attach.py rbtools/commands/clearcache.py rbtools/commands/close.py rbtools/commands/diff.py rbtools/commands/install.py rbtools/commands/land.py rbtools/commands/list_repo_types.py rbtools/commands/login.py rbtools/commands/logout.py rbtools/commands/main.py rbtools/commands/patch.py rbtools/commands/post.py rbtools/commands/publish.py rbtools/commands/setup_repo.py rbtools/commands/stamp.py rbtools/commands/status.py rbtools/commands/tests/__init__.py rbtools/commands/tests/test_post.py rbtools/helpers/__init__.py rbtools/helpers/hgext.py rbtools/hooks/__init__.py rbtools/hooks/common.py rbtools/hooks/git.py rbtools/testing/__init__.py rbtools/testing/testcase.py rbtools/utils/__init__.py rbtools/utils/aliases.py rbtools/utils/appdirs.py rbtools/utils/checks.py rbtools/utils/commands.py rbtools/utils/console.py rbtools/utils/diffs.py rbtools/utils/filesystem.py rbtools/utils/match_score.py rbtools/utils/process.py rbtools/utils/repository.py rbtools/utils/review_request.py rbtools/utils/testbase.py rbtools/utils/tests.py rbtools/utils/users.pyRBTools-0.7.11/RBTools.egg-info/dependency_links.txt0000644000232200023220000000006713230242636022573 0ustar debalancedebalancehttp://downloads.reviewboard.org/releases/RBTools/0.7/ RBTools-0.7.11/RBTools.egg-info/entry_points.txt0000644000232200023220000000220613230242636022007 0ustar debalancedebalance[console_scripts] rbt = rbtools.commands.main:main [rbtools_commands] api-get = rbtools.commands.api_get:APIGet attach = rbtools.commands.attach:Attach clear-cache = rbtools.commands.clearcache:ClearCache close = rbtools.commands.close:Close diff = rbtools.commands.diff:Diff install = rbtools.commands.install:Install land = rbtools.commands.land:Land list-repo-types = rbtools.commands.list_repo_types:ListRepoTypes login = rbtools.commands.login:Login logout = rbtools.commands.logout:Logout patch = rbtools.commands.patch:Patch post = rbtools.commands.post:Post publish = rbtools.commands.publish:Publish setup-repo = rbtools.commands.setup_repo:SetupRepo stamp = rbtools.commands.stamp:Stamp status = rbtools.commands.status:Status [rbtools_scm_clients] bazaar = rbtools.clients.bazaar:BazaarClient clearcase = rbtools.clients.clearcase:ClearCaseClient cvs = rbtools.clients.cvs:CVSClient git = rbtools.clients.git:GitClient mercurial = rbtools.clients.mercurial:MercurialClient perforce = rbtools.clients.perforce:PerforceClient plastic = rbtools.clients.plastic:PlasticClient svn = rbtools.clients.svn:SVNClient tfs = rbtools.clients.tfs:TFSClient RBTools-0.7.11/RBTools.egg-info/requires.txt0000644000232200023220000000002013230242636021101 0ustar debalancedebalancesix>=1.8.0 tqdm RBTools-0.7.11/RBTools.egg-info/PKG-INFO0000644000232200023220000000116713230242636017613 0ustar debalancedebalanceMetadata-Version: 1.1 Name: RBTools Version: 0.7.11 Summary: Command line tools for use with Review Board Home-page: http://www.reviewboard.org/ Author: Christian Hammond Author-email: chipx86@chipx86.com License: MIT Download-URL: http://downloads.reviewboard.org/releases/RBTools/0.7/ Description: UNKNOWN Platform: UNKNOWN Classifier: Development Status :: 4 - Beta Classifier: Environment :: Console Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Topic :: Software Development RBTools-0.7.11/RBTools.egg-info/top_level.txt0000644000232200023220000000001013230242636021232 0ustar debalancedebalancerbtools