pax_global_header 0000666 0000000 0000000 00000000064 14716151714 0014521 g ustar 00root root 0000000 0000000 52 comment=07b3e145ccd356cff202ff23d1694d6d4e9e0b2e
whatthepatch-1.0.7/ 0000775 0000000 0000000 00000000000 14716151714 0014212 5 ustar 00root root 0000000 0000000 whatthepatch-1.0.7/.envrc 0000664 0000000 0000000 00000000010 14716151714 0015317 0 ustar 00root root 0000000 0000000 use nix
whatthepatch-1.0.7/.github/ 0000775 0000000 0000000 00000000000 14716151714 0015552 5 ustar 00root root 0000000 0000000 whatthepatch-1.0.7/.github/workflows/ 0000775 0000000 0000000 00000000000 14716151714 0017607 5 ustar 00root root 0000000 0000000 whatthepatch-1.0.7/.github/workflows/build.yml 0000664 0000000 0000000 00000004704 14716151714 0021436 0 ustar 00root root 0000000 0000000 # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
name: Build
on:
push:
branches: [main]
pull_request:
branches: [main]
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }} for ${{ matrix.os }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Display Python version
run: python -c "import sys; print(sys.version)"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install build
pip install flake8
pip install pytest
- name: Build package
run: |
python -m build
- name: Lint
run: |
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- uses: ConorMacBride/install-package@v1
with:
brew: gpatch
apt: patch
choco: patch
- name: Test
run: |
pytest --doctest-modules --doctest-glob='*.rst' --junitxml=junit/test-results-${{ matrix.python-version }}-${{ matrix.os }}.xml
if: ${{ matrix.os != 'macos-latest' }}
- name: Test macos-latest
run: |
export PATH="/opt/homebrew/opt/gpatch/libexec/gnubin:$PATH"
pytest --doctest-modules --doctest-glob='*.rst' --junitxml=junit/test-results-${{ matrix.python-version }}-${{ matrix.os }}.xml
if: ${{ matrix.os == 'macos-latest' }}
- name: Upload pytest test results
uses: actions/upload-artifact@v4
with:
name: pytest-results-${{ matrix.python-version }}-${{ matrix.os }}
path: junit/test-results-${{ matrix.python-version }}-${{ matrix.os }}.xml
# Use always() to always run this step to publish test results when there are test failures
if: ${{ always() }}
whatthepatch-1.0.7/.github/workflows/publish.yml 0000664 0000000 0000000 00000002545 14716151714 0022006 0 ustar 00root root 0000000 0000000 # https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-pypi
name: Upload Python Package
on:
release:
types: [published]
permissions:
contents: read
jobs:
release-build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: "3.x"
- name: Build release distributions
run: |
python -m pip install build
python -m build
- name: Upload distributions
uses: actions/upload-artifact@v4
with:
name: release-dists
path: dist/
pypi-publish:
runs-on: ubuntu-latest
needs:
- release-build
permissions:
# IMPORTANT: this permission is mandatory for trusted publishing
id-token: write
# Dedicated environments with protections for publishing are strongly recommended.
environment:
name: pypi
# OPTIONAL: uncomment and update to include your PyPI project URL in the deployment status:
url: https://pypi.org/p/whatthepatch
steps:
- name: Retrieve release distributions
uses: actions/download-artifact@v4
with:
name: release-dists
path: dist/
- name: Publish release distributions to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
whatthepatch-1.0.7/.gitignore 0000664 0000000 0000000 00000006026 14716151714 0016206 0 ustar 00root root 0000000 0000000 .direnv
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
junit/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
whatthepatch-1.0.7/.vscode/ 0000775 0000000 0000000 00000000000 14716151714 0015553 5 ustar 00root root 0000000 0000000 whatthepatch-1.0.7/.vscode/settings.json 0000664 0000000 0000000 00000000343 14716151714 0020306 0 ustar 00root root 0000000 0000000 {
"nixEnvSelector.nixFile": "${workspaceRoot}/shell.nix",
"python.testing.pytestArgs": ["tests"],
"python.testing.unittestEnabled": false,
"python.testing.pytestEnabled": true,
"python.formatting.provider": "black"
}
whatthepatch-1.0.7/CODE_OF_CONDUCT.md 0000664 0000000 0000000 00000012153 14716151714 0017013 0 ustar 00root root 0000000 0000000
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
[INSERT CONTACT METHOD].
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
whatthepatch-1.0.7/HISTORY.md 0000664 0000000 0000000 00000004720 14716151714 0015700 0 ustar 00root root 0000000 0000000 # next
Nothing yet :)
# 1.0.7
- PR #62 fix: incorrect regular expression matching diffcmd (Thanks, @jingfelix)
- Support up to 3.13
- Drop support up to 3.8
# 1.0.6
- PR #60 Improve huge_patch test (Thanks, @arkamar)
- Support up to 3.12
- Drop support up to 3.7
# 1.0.5
- PR #57 bugfix:min line in binary diff (Thanks, @babenek and @abbradar)
# 1.0.4
- PR #53 git binary patch support (Thanks, @babenek)
- PR #51 Remove redundant wheel dep from pyproject.toml (Thanks, @mgorny)
- Add basic nix release support
# 1.0.3
- PR #46 Code optimization for unified diff parsing (Thanks, @babenek)
- Package using build module and pyproject.toml
- Support up to 3.11
- Drop support up to 3.6
# 1.0.2
- Support up to 3.9
- PR #42 Fix unified diff parse error (Thanks, @kkpattern)
# 1.0.1
- PR #37 Replace nose with pytest (Thanks, @MeggyCal)
- PR #39 Fix bug where context diffs would not parse (Thanks, @FallenSky2077)
# 1.0.0
- Issue #26 fix where hardcoded "/tmp" reference was being used
- Support up to Python 3.8
- Drop support for Python 2, 3.4
Dev-only:
- Bump Code of Conduct to 2.0
- Setup Github Actions for package publishing
- Setup Github Actions for build and testing
- Move off Travis and Tox in favor of Github Actions
# 0.0.6
- PR #13 Support for reverse patching (Thanks, @graingert)
- This is a breaking change that converted the parsed tuples into namedtuples
and added the hunk number to that tuple
- PR #20 Support up to Python 3.7, drop support for 3.3 (Thanks, @graingert)
- Issue #18 fix for empty file adds in git
# 0.0.5
- PR #6 Added better support for binary files. (Thanks, @ramusus)
- PR #3 Added support for git index revision ids that have more than 7
characters (Thanks, @jopereria)
# 0.0.4
- PR #2 Bug fix for one-liner diffs (Thanks, @thoward)
- Issue #1 fix where some old real test cases were left failing
- Added a Code of Conduct
- Added support for Python 3.5
# 0.0.3
- Better matching for almost all patch headers
- Support patches that have entire hunks removed
- Support git patches that are missing index header file modes
- Moved to MIT license
- Officially adopt Python 3
# 0.0.2
- Initial support to apply parsed patches
- Support diffs that do not have headers
# 0.0.1
- The very first release that included parsing support for patches in unified
diff format, context diff format, ed diff format, git, bazaar, subversion, and
cvs.
whatthepatch-1.0.7/LICENSE 0000664 0000000 0000000 00000002110 14716151714 0015211 0 ustar 00root root 0000000 0000000 The MIT License (MIT)
Copyright (c) 2012 -- 2020 Christopher S. Corley
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
whatthepatch-1.0.7/MANIFEST.in 0000664 0000000 0000000 00000000134 14716151714 0015746 0 ustar 00root root 0000000 0000000 include README.rst LICENSE
recursive-include tests *.py
recursive-include tests/casefiles *
whatthepatch-1.0.7/Pipfile 0000664 0000000 0000000 00000000260 14716151714 0015523 0 ustar 00root root 0000000 0000000 [[source]]
name = "pypi"
url = "https://pypi.org/simple"
verify_ssl = true
[dev-packages]
pytest = "*"
flake8 = "*"
black = "*"
build = "*"
[pipenv]
allow_prereleases = true
whatthepatch-1.0.7/Pipfile.lock 0000664 0000000 0000000 00000020545 14716151714 0016462 0 ustar 00root root 0000000 0000000 {
"_meta": {
"hash": {
"sha256": "52e1786b721fabd51a0e37861f0f6b6f2631e305b8eea2158f0cb52a8fa84d41"
},
"pipfile-spec": 6,
"requires": {},
"sources": [
{
"name": "pypi",
"url": "https://pypi.org/simple",
"verify_ssl": true
}
]
},
"default": {},
"develop": {
"attrs": {
"hashes": [
"sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6",
"sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c"
],
"markers": "python_version >= '3.5'",
"version": "==22.1.0"
},
"black": {
"hashes": [
"sha256:2818cf72dfd5d289e48f37ccfa08b460bf469e67fb7c4abb07edc2e9f16fb63f",
"sha256:41622020d7120e01d377f74249e677039d20e6344ff5851de8a10f11f513bf93",
"sha256:4acf672def7eb1725f41f38bf6bf425c8237248bb0804faa3965c036f7672d11",
"sha256:4be5bb28e090456adfc1255e03967fb67ca846a03be7aadf6249096100ee32d0",
"sha256:4f1373a7808a8f135b774039f61d59e4be7eb56b2513d3d2f02a8b9365b8a8a9",
"sha256:56f52cfbd3dabe2798d76dbdd299faa046a901041faf2cf33288bc4e6dae57b5",
"sha256:65b76c275e4c1c5ce6e9870911384bff5ca31ab63d19c76811cb1fb162678213",
"sha256:65c02e4ea2ae09d16314d30912a58ada9a5c4fdfedf9512d23326128ac08ac3d",
"sha256:6905238a754ceb7788a73f02b45637d820b2f5478b20fec82ea865e4f5d4d9f7",
"sha256:79dcf34b33e38ed1b17434693763301d7ccbd1c5860674a8f871bd15139e7837",
"sha256:7bb041dca0d784697af4646d3b62ba4a6b028276ae878e53f6b4f74ddd6db99f",
"sha256:7d5e026f8da0322b5662fa7a8e752b3fa2dac1c1cbc213c3d7ff9bdd0ab12395",
"sha256:9f50ea1132e2189d8dff0115ab75b65590a3e97de1e143795adb4ce317934995",
"sha256:a0c9c4a0771afc6919578cec71ce82a3e31e054904e7197deacbc9382671c41f",
"sha256:aadf7a02d947936ee418777e0247ea114f78aff0d0959461057cae8a04f20597",
"sha256:b5991d523eee14756f3c8d5df5231550ae8993e2286b8014e2fdea7156ed0959",
"sha256:bf21b7b230718a5f08bd32d5e4f1db7fc8788345c8aea1d155fc17852b3410f5",
"sha256:c45f8dff244b3c431b36e3224b6be4a127c6aca780853574c00faf99258041eb",
"sha256:c7ed6668cbbfcd231fa0dc1b137d3e40c04c7f786e626b405c62bcd5db5857e4",
"sha256:d7de8d330763c66663661a1ffd432274a2f92f07feeddd89ffd085b5744f85e7",
"sha256:e19cb1c6365fd6dc38a6eae2dcb691d7d83935c10215aef8e6c38edee3f77abd",
"sha256:e2af80566f43c85f5797365077fb64a393861a3730bd110971ab7a0c94e873e7"
],
"index": "pypi",
"markers": "python_version >= '3.8'",
"version": "==24.3.0"
},
"build": {
"hashes": [
"sha256:1a07724e891cbd898923145eb7752ee7653674c511378eb9c7691aab1612bc3c",
"sha256:38a7a2b7a0bdc61a42a0a67509d88c71ecfc37b393baba770fae34e20929ff69"
],
"index": "pypi",
"version": "==0.9.0"
},
"click": {
"hashes": [
"sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28",
"sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"
],
"markers": "python_version >= '3.7'",
"version": "==8.1.7"
},
"exceptiongroup": {
"hashes": [
"sha256:4d6c0aa6dd825810941c792f53d7b8d71da26f5e5f84f20f9508e8f2d33b140a",
"sha256:73866f7f842ede6cb1daa42c4af078e2035e5f7607f0e2c762cc51bb31bbe7b2"
],
"markers": "python_version < '3.11'",
"version": "==1.0.1"
},
"flake8": {
"hashes": [
"sha256:6fbe320aad8d6b95cec8b8e47bc933004678dc63095be98528b7bdd2a9f510db",
"sha256:7a1cf6b73744f5806ab95e526f6f0d8c01c66d7bbe349562d22dfca20610b248"
],
"index": "pypi",
"version": "==5.0.4"
},
"iniconfig": {
"hashes": [
"sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3",
"sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"
],
"version": "==1.1.1"
},
"mccabe": {
"hashes": [
"sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325",
"sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"
],
"markers": "python_version >= '3.6'",
"version": "==0.7.0"
},
"mypy-extensions": {
"hashes": [
"sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d",
"sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"
],
"markers": "python_version >= '3.5'",
"version": "==1.0.0"
},
"packaging": {
"hashes": [
"sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5",
"sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"
],
"markers": "python_version >= '3.7'",
"version": "==24.0"
},
"pathspec": {
"hashes": [
"sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08",
"sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"
],
"markers": "python_version >= '3.8'",
"version": "==0.12.1"
},
"pep517": {
"hashes": [
"sha256:4ba4446d80aed5b5eac6509ade100bff3e7943a8489de249654a5ae9b33ee35b",
"sha256:ae69927c5c172be1add9203726d4b84cf3ebad1edcd5f71fcdc746e66e829f59"
],
"markers": "python_version >= '3.6'",
"version": "==0.13.0"
},
"platformdirs": {
"hashes": [
"sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068",
"sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768"
],
"markers": "python_version >= '3.8'",
"version": "==4.2.0"
},
"pluggy": {
"hashes": [
"sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159",
"sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"
],
"markers": "python_version >= '3.6'",
"version": "==1.0.0"
},
"pycodestyle": {
"hashes": [
"sha256:2c9607871d58c76354b697b42f5d57e1ada7d261c261efac224b664affdc5785",
"sha256:d1735fc58b418fd7c5f658d28d943854f8a849b01a5d0a1e6f3f3fdd0166804b"
],
"markers": "python_version >= '3.6'",
"version": "==2.9.1"
},
"pyflakes": {
"hashes": [
"sha256:4579f67d887f804e67edb544428f264b7b24f435b263c4614f384135cea553d2",
"sha256:491feb020dca48ccc562a8c0cbe8df07ee13078df59813b83959cbdada312ea3"
],
"markers": "python_version >= '3.6'",
"version": "==2.5.0"
},
"pyparsing": {
"hashes": [
"sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb",
"sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"
],
"markers": "python_full_version >= '3.6.8'",
"version": "==3.0.9"
},
"pytest": {
"hashes": [
"sha256:892f933d339f068883b6fd5a459f03d85bfcb355e4981e146d2c7616c21fef71",
"sha256:c4014eb40e10f11f355ad4e3c2fb2c6c6d1919c73f3b5a433de4708202cade59"
],
"index": "pypi",
"version": "==7.2.0"
},
"tomli": {
"hashes": [
"sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc",
"sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"
],
"markers": "python_full_version < '3.11.0a7'",
"version": "==2.0.1"
}
}
}
whatthepatch-1.0.7/README.rst 0000664 0000000 0000000 00000013722 14716151714 0015706 0 ustar 00root root 0000000 0000000 What The Patch!?
================
What The Patch!? is a library for both parsing and applying patch files.
Status
------
.. image:: https://github.com/cscorley/whatthepatch/workflows/Build/badge.svg
This has been released as 1.0, but has never had much active development. The
functions are stable and have been reliable for several years, even if they
are not ideally implemented. Pull requests will always be considered, merged,
and released; however, issues may not ever be fixed by the maintainer.
Contribute
^^^^^^^^^^
#. Fork this repository
#. Create a new branch to work on
#. Commit your tests and/or changes
#. Push and create a pull request here!
Features
--------
- Parsing of almost all ``diff`` formats (except forwarded ed):
- normal (default, --normal)
- copied context (-c, --context)
- unified context (-u, --unified)
- ed script (-e, --ed)
- rcs ed script (-n, --rcs)
- Parsing of several SCM patches:
- CVS
- SVN
- Git
Installation
------------
This library is available on `PyPI `_
and can be installed via pip:
.. code-block:: bash
$ pip install whatthepatch
Usage
=====
Let us say we have a patch file containing some changes, aptly named
'somechanges.patch':
.. code-block:: diff
--- lao 2012-12-26 23:16:54.000000000 -0600
+++ tzu 2012-12-26 23:16:50.000000000 -0600
@@ -1,7 +1,6 @@
-The Way that can be told of is not the eternal Way;
-The name that can be named is not the eternal name.
The Nameless is the origin of Heaven and Earth;
-The Named is the mother of all things.
+The named is the mother of all things.
+
Therefore let there always be non-being,
so we may see their subtlety,
And let there always be being,
@@ -9,3 +8,6 @@
The two are the same,
But after they are produced,
they have different names.
+They both may be called deep and profound.
+Deeper and more profound,
+The door of all subtleties!
Parsing
-------
Here is how we would use What The Patch!? in Python to get the changeset for
each diff in the patch:
.. code-block:: python
>>> import whatthepatch
>>> import pprint
>>> with open('tests/casefiles/diff-unified.diff') as f:
... text = f.read()
...
>>> for diff in whatthepatch.parse_patch(text):
... print(diff) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
...
diff(header=header(index_path=None,
old_path='lao',
old_version='2013-01-05 16:56:19.000000000 -0600',
new_path='tzu',
new_version='2013-01-05 16:56:35.000000000 -0600'),
changes=[Change(old=1, new=None, line='The Way that can be told of is not the eternal Way;', hunk=1),
Change(old=2, new=None, line='The name that can be named is not the eternal name.', hunk=1),
Change(old=3, new=1, line='The Nameless is the origin of Heaven and Earth;', hunk=1),
Change(old=4, new=None, line='The Named is the mother of all things.', hunk=1),
Change(old=None, new=2, line='The named is the mother of all things.', hunk=1),
Change(old=None, new=3, line='', hunk=1),
Change(old=5, new=4, line='Therefore let there always be non-being,', hunk=1),
Change(old=6, new=5, line=' so we may see their subtlety,', hunk=1),
Change(old=7, new=6, line='And let there always be being,', hunk=1),
Change(old=9, new=8, line='The two are the same,', hunk=2),
Change(old=10, new=9, line='But after they are produced,', hunk=2),
Change(old=11, new=10, line=' they have different names.', hunk=2),
Change(old=None, new=11, line='They both may be called deep and profound.', hunk=2),
Change(old=None, new=12, line='Deeper and more profound,', hunk=2),
Change(old=None, new=13, line='The door of all subtleties!', hunk=2)],
text='...')
The changes are listed as they are in the patch, but instead of the +/- syntax
of the patch, we get a tuple of two numbers and the text of the line.
What these numbers indicate are as follows:
#. ``( old=1, new=None, ... )`` indicates line 1 of the file lao was **removed**.
#. ``( old=None, new=2, ... )`` indicates line 2 of the file tzu was **inserted**.
#. ``( old=5, new=4, ... )`` indicates that line 5 of lao and line 4 of tzu are **equal**.
Please note that not all patch formats provide the actual lines modified, so some
results will have the text portion of the tuple set to ``None``.
Applying
--------
To apply a diff to some lines of text, first read the patch and parse it.
.. code-block:: python
>>> import whatthepatch
>>> with open('tests/casefiles/diff-default.diff') as f:
... text = f.read()
...
>>> with open('tests/casefiles/lao') as f:
... lao = f.read()
...
>>> diff = [x for x in whatthepatch.parse_patch(text)]
>>> diff = diff[0]
>>> tzu = whatthepatch.apply_diff(diff, lao)
>>> tzu # doctest: +NORMALIZE_WHITESPACE
['The Nameless is the origin of Heaven and Earth;',
'The named is the mother of all things.',
'',
'Therefore let there always be non-being,',
' so we may see their subtlety,',
'And let there always be being,',
' so we may see their outcome.',
'The two are the same,',
'But after they are produced,',
' they have different names.',
'They both may be called deep and profound.',
'Deeper and more profound,',
'The door of all subtleties!']
If apply does not satisfy your needs and you are on a system that has
``patch`` in ``PATH``, you can also call ``apply_diff(diff, lao,
use_patch=True)``. The default is false, and patch is not necessary to apply
diffs to text.
whatthepatch-1.0.7/pyproject.toml 0000664 0000000 0000000 00000002415 14716151714 0017130 0 ustar 00root root 0000000 0000000 [project]
name = "whatthepatch"
version = "1.0.7"
maintainers = [{ name = "Christopher S. Corley", email = "cscorley@gmail.com" }]
requires-python = ">=3.9"
readme = "README.rst"
description = "A patch parsing and application library."
keywords = ["patch", "diff", "parser"]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Version Control",
"Topic :: Software Development",
"Topic :: Text Processing",
]
[project.urls]
"Homepage" = "https://github.com/cscorley/whatthepatch"
"Bug Tracker" = "https://github.com/cscorley/whatthepatch/issues"
[build-system]
requires = ["setuptools>=65.0.0"]
build-backend = "setuptools.build_meta"
whatthepatch-1.0.7/release.nix 0000664 0000000 0000000 00000001022 14716151714 0016345 0 ustar 00root root 0000000 0000000 { lib, python3Packages, setuptools }:
with python3Packages;
buildPythonPackage {
pname = "whatthepatch";
version = "1.0.7";
format = "pyproject";
src = ./.;
checkInputs = [ pytestCheckHook ];
pythonImportsCheck = [ "whatthepatch" ];
nativeBuildInputs = [ setuptools ];
meta = with lib; {
description = "Python library for both parsing and applying patch files";
homepage = "https://github.com/cscorley/whatthepatch";
license = licenses.mit;
maintainers = with maintainers; [ cscorley ];
};
}
whatthepatch-1.0.7/shell.nix 0000664 0000000 0000000 00000000374 14716151714 0016045 0 ustar 00root root 0000000 0000000 { pkgs ? import { } }:
let
whatthepatch = p: p.callPackage ./release.nix { };
pythonEnv = pkgs.python3.withPackages
(p: [ p.pytest p.flake8 p.black p.build p.docutils (whatthepatch p) ]);
in pkgs.mkShell { packages = [ pythonEnv ]; }
whatthepatch-1.0.7/src/ 0000775 0000000 0000000 00000000000 14716151714 0015001 5 ustar 00root root 0000000 0000000 whatthepatch-1.0.7/src/whatthepatch/ 0000775 0000000 0000000 00000000000 14716151714 0017465 5 ustar 00root root 0000000 0000000 whatthepatch-1.0.7/src/whatthepatch/__init__.py 0000664 0000000 0000000 00000000177 14716151714 0021603 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
from .patch import parse_patch
from .apply import apply_diff
__all__ = ["parse_patch", "apply_diff"]
whatthepatch-1.0.7/src/whatthepatch/apply.py 0000664 0000000 0000000 00000007326 14716151714 0021174 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
import os.path
import subprocess
import tempfile
from . import patch
from .exceptions import HunkApplyException, SubprocessException
from .snippets import remove, which
def apply_patch(diffs):
"""Not ready for use yet"""
pass
if isinstance(diffs, patch.diff):
diffs = [diffs]
for diff in diffs:
if diff.header.old_path == "/dev/null":
text = []
else:
with open(diff.header.old_path) as f:
text = f.read()
new_text = apply_diff(diff, text)
with open(diff.header.new_path, "w") as f:
f.write(new_text)
def _apply_diff_with_subprocess(diff, lines, reverse=False):
# call out to patch program
patchexec = which("patch")
if not patchexec:
raise SubprocessException("cannot find patch program", code=-1)
tempdir = tempfile.gettempdir()
filepath = os.path.join(tempdir, "wtp-" + str(hash(diff.header)))
oldfilepath = filepath + ".old"
newfilepath = filepath + ".new"
rejfilepath = filepath + ".rej"
patchfilepath = filepath + ".patch"
with open(oldfilepath, "w") as f:
f.write("\n".join(lines) + "\n")
with open(patchfilepath, "w") as f:
f.write(diff.text)
args = [
patchexec,
"--reverse" if reverse else "--forward",
"--quiet",
"-o",
newfilepath,
"-i",
patchfilepath,
"-r",
rejfilepath,
oldfilepath,
]
ret = subprocess.call(args)
with open(newfilepath) as f:
lines = f.read().splitlines()
try:
with open(rejfilepath) as f:
rejlines = f.read().splitlines()
except IOError:
rejlines = None
remove(oldfilepath)
remove(newfilepath)
remove(rejfilepath)
remove(patchfilepath)
# do this last to ensure files get cleaned up
if ret != 0:
raise SubprocessException("patch program failed", code=ret)
return lines, rejlines
def _reverse(changes):
def _reverse_change(c):
return c._replace(old=c.new, new=c.old)
return [_reverse_change(c) for c in changes]
def apply_diff(diff, text, reverse=False, use_patch=False):
try:
lines = text.splitlines()
except AttributeError:
lines = list(text)
if use_patch:
return _apply_diff_with_subprocess(diff, lines, reverse)
n_lines = len(lines)
changes = _reverse(diff.changes) if reverse else diff.changes
# check that the source text matches the context of the diff
for old, new, line, hunk in changes:
# might have to check for line is None here for ed scripts
if old is not None and line is not None:
if old > n_lines:
raise HunkApplyException(
'context line {n}, "{line}" does not exist in source'.format(
n=old, line=line
),
hunk=hunk,
)
if lines[old - 1] != line:
raise HunkApplyException(
'context line {n}, "{line}" does not match "{sl}"'.format(
n=old, line=line, sl=lines[old - 1]
),
hunk=hunk,
)
# for calculating the old line
r = 0
i = 0
for old, new, line, hunk in changes:
if old is not None and new is None:
del lines[old - 1 - r + i]
r += 1
elif old is None and new is not None:
lines.insert(new - 1, line)
i += 1
elif old is not None and new is not None:
# Sometimes, people remove hunks from patches, making these
# numbers completely unreliable. Because they're jerks.
pass
return lines
whatthepatch-1.0.7/src/whatthepatch/exceptions.py 0000664 0000000 0000000 00000001400 14716151714 0022213 0 ustar 00root root 0000000 0000000 class WhatThePatchException(Exception):
pass
class HunkException(WhatThePatchException):
def __init__(self, msg, hunk=None):
self.hunk = hunk
if hunk is not None:
super(HunkException, self).__init__(
"{msg}, in hunk #{n}".format(msg=msg, n=hunk)
)
else:
super(HunkException, self).__init__(msg)
class ApplyException(WhatThePatchException):
pass
class SubprocessException(ApplyException):
def __init__(self, msg, code):
super(SubprocessException, self).__init__(msg)
self.code = code
class HunkApplyException(HunkException, ApplyException, ValueError):
pass
class ParseException(HunkException, ValueError):
pass
whatthepatch-1.0.7/src/whatthepatch/patch.py 0000664 0000000 0000000 00000071170 14716151714 0021144 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
import base64
import re
import zlib
from collections import namedtuple
from . import exceptions
from .snippets import findall_regex, split_by_regex
header = namedtuple(
"header",
"index_path old_path old_version new_path new_version",
)
diffobj = namedtuple("diff", "header changes text")
Change = namedtuple("Change", "old new line hunk")
file_timestamp_str = "(.+?)(?:\t|:| +)(.*)"
# .+? was previously [^:\t\n\r\f\v]+
# general diff regex
diffcmd_header = re.compile("^diff(?: .+)? (.+) (.+)$")
unified_header_index = re.compile("^Index: (.+)$")
unified_header_old_line = re.compile(r"^--- " + file_timestamp_str + "$")
unified_header_new_line = re.compile(r"^\+\+\+ " + file_timestamp_str + "$")
unified_hunk_start = re.compile(r"^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@(.*)$")
unified_change = re.compile("^([-+ ])(.*)$")
context_header_old_line = re.compile(r"^\*\*\* " + file_timestamp_str + "$")
context_header_new_line = re.compile("^--- " + file_timestamp_str + "$")
context_hunk_start = re.compile(r"^\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*$")
context_hunk_old = re.compile(r"^\*\*\* (\d+),?(\d*) \*\*\*\*$")
context_hunk_new = re.compile(r"^--- (\d+),?(\d*) ----$")
context_change = re.compile("^([-+ !]) (.*)$")
ed_hunk_start = re.compile(r"^(\d+),?(\d*)([acd])$")
ed_hunk_end = re.compile("^.$")
# much like forward ed, but no 'c' type
rcs_ed_hunk_start = re.compile(r"^([ad])(\d+) ?(\d*)$")
default_hunk_start = re.compile(r"^(\d+),?(\d*)([acd])(\d+),?(\d*)$")
default_hunk_mid = re.compile("^---$")
default_change = re.compile("^([><]) (.*)$")
# Headers
# git has a special index header and no end part
git_diffcmd_header = re.compile("^diff --git a/(.+) b/(.+)$")
git_header_index = re.compile(r"^index ([a-f0-9]+)..([a-f0-9]+) ?(\d*)$")
git_header_old_line = re.compile("^--- (.+)$")
git_header_new_line = re.compile(r"^\+\+\+ (.+)$")
git_header_file_mode = re.compile(r"^(new|deleted) file mode \d{6}$")
git_header_binary_file = re.compile("^Binary files (.+) and (.+) differ")
git_binary_patch_start = re.compile(r"^GIT binary patch$")
git_binary_literal_start = re.compile(r"^literal (\d+)$")
git_binary_delta_start = re.compile(r"^delta (\d+)$")
base85string = re.compile(r"^[0-9A-Za-z!#$%&()*+;<=>?@^_`{|}~-]+$")
bzr_header_index = re.compile("=== (.+)")
bzr_header_old_line = unified_header_old_line
bzr_header_new_line = unified_header_new_line
svn_header_index = unified_header_index
svn_header_timestamp_version = re.compile(r"\((?:working copy|revision (\d+))\)")
svn_header_timestamp = re.compile(r".*(\(.*\))$")
cvs_header_index = unified_header_index
cvs_header_rcs = re.compile(r"^RCS file: (.+)(?:,\w{1}$|$)")
cvs_header_timestamp = re.compile(r"(.+)\t([\d.]+)")
cvs_header_timestamp_colon = re.compile(r":([\d.]+)\t(.+)")
old_cvs_diffcmd_header = re.compile("^diff(?: .+)? (.+):(.*) (.+):(.*)$")
def parse_patch(text):
try:
lines = text.splitlines()
except AttributeError:
lines = text
# maybe use this to nuke all of those line endings?
# lines = [x.splitlines()[0] for x in lines]
lines = [x if len(x) == 0 else x.splitlines()[0] for x in lines]
check = [
unified_header_index,
diffcmd_header,
cvs_header_rcs,
git_header_index,
context_header_old_line,
unified_header_old_line,
]
diffs = []
for c in check:
diffs = split_by_regex(lines, c)
if len(diffs) > 1:
break
for diff in diffs:
difftext = "\n".join(diff) + "\n"
h = parse_header(diff)
d = parse_diff(diff)
if h or d:
yield diffobj(header=h, changes=d, text=difftext)
def parse_header(text):
h = parse_scm_header(text)
if h is None:
h = parse_diff_header(text)
return h
def parse_scm_header(text):
try:
lines = text.splitlines()
except AttributeError:
lines = text
check = [
(git_header_index, parse_git_header),
(old_cvs_diffcmd_header, parse_cvs_header),
(cvs_header_rcs, parse_cvs_header),
(svn_header_index, parse_svn_header),
]
for regex, parser in check:
diffs = findall_regex(lines, regex)
if len(diffs) > 0:
git_opt = findall_regex(lines, git_diffcmd_header)
if len(git_opt) > 0:
res = parser(lines)
if res:
old_path = res.old_path
new_path = res.new_path
if old_path.startswith("a/"):
old_path = old_path[2:]
if new_path.startswith("b/"):
new_path = new_path[2:]
return header(
index_path=res.index_path,
old_path=old_path,
old_version=res.old_version,
new_path=new_path,
new_version=res.new_version,
)
else:
res = parser(lines)
return res
return None
def parse_diff_header(text):
try:
lines = text.splitlines()
except AttributeError:
lines = text
check = [
(unified_header_new_line, parse_unified_header),
(context_header_old_line, parse_context_header),
(diffcmd_header, parse_diffcmd_header),
# TODO:
# git_header can handle version-less unified headers, but
# will trim a/ and b/ in the paths if they exist...
(git_header_new_line, parse_git_header),
]
for regex, parser in check:
diffs = findall_regex(lines, regex)
if len(diffs) > 0:
return parser(lines)
return None # no header?
def parse_diff(text):
try:
lines = text.splitlines()
except AttributeError:
lines = text
check = [
(unified_hunk_start, parse_unified_diff),
(context_hunk_start, parse_context_diff),
(default_hunk_start, parse_default_diff),
(ed_hunk_start, parse_ed_diff),
(rcs_ed_hunk_start, parse_rcs_ed_diff),
(git_binary_patch_start, parse_git_binary_diff),
]
for hunk, parser in check:
diffs = findall_regex(lines, hunk)
if len(diffs) > 0:
return parser(lines)
return None
def parse_git_header(text):
try:
lines = text.splitlines()
except AttributeError:
lines = text
old_version = None
new_version = None
old_path = None
new_path = None
cmd_old_path = None
cmd_new_path = None
for line in lines:
hm = git_diffcmd_header.match(line)
if hm:
cmd_old_path = hm.group(1)
cmd_new_path = hm.group(2)
continue
g = git_header_index.match(line)
if g:
old_version = g.group(1)
new_version = g.group(2)
continue
# git always has it's own special headers
o = git_header_old_line.match(line)
if o:
old_path = o.group(1)
n = git_header_new_line.match(line)
if n:
new_path = n.group(1)
binary = git_header_binary_file.match(line)
if binary:
old_path = binary.group(1)
new_path = binary.group(2)
if old_path and new_path:
if old_path.startswith("a/"):
old_path = old_path[2:]
if new_path.startswith("b/"):
new_path = new_path[2:]
return header(
index_path=None,
old_path=old_path,
old_version=old_version,
new_path=new_path,
new_version=new_version,
)
# if we go through all of the text without finding our normal info,
# use the cmd if available
if cmd_old_path and cmd_new_path and old_version and new_version:
if cmd_old_path.startswith("a/"):
cmd_old_path = cmd_old_path[2:]
if cmd_new_path.startswith("b/"):
cmd_new_path = cmd_new_path[2:]
return header(
index_path=None,
# wow, I kind of hate this:
# assume /dev/null if the versions are zeroed out
old_path="/dev/null" if old_version == "0000000" else cmd_old_path,
old_version=old_version,
new_path="/dev/null" if new_version == "0000000" else cmd_new_path,
new_version=new_version,
)
return None
def parse_svn_header(text):
try:
lines = text.splitlines()
except AttributeError:
lines = text
headers = findall_regex(lines, svn_header_index)
if len(headers) == 0:
return None
while len(lines) > 0:
i = svn_header_index.match(lines[0])
del lines[0]
if not i:
continue
diff_header = parse_diff_header(lines)
if not diff_header:
return header(
index_path=i.group(1),
old_path=i.group(1),
old_version=None,
new_path=i.group(1),
new_version=None,
)
opath = diff_header.old_path
over = diff_header.old_version
if over:
oend = svn_header_timestamp_version.match(over)
if oend and oend.group(1):
over = int(oend.group(1))
elif opath:
ts = svn_header_timestamp.match(opath)
if ts:
opath = opath[: -len(ts.group(1))]
oend = svn_header_timestamp_version.match(ts.group(1))
if oend and oend.group(1):
over = int(oend.group(1))
npath = diff_header.new_path
nver = diff_header.new_version
if nver:
nend = svn_header_timestamp_version.match(diff_header.new_version)
if nend and nend.group(1):
nver = int(nend.group(1))
elif npath:
ts = svn_header_timestamp.match(npath)
if ts:
npath = npath[: -len(ts.group(1))]
nend = svn_header_timestamp_version.match(ts.group(1))
if nend and nend.group(1):
nver = int(nend.group(1))
if type(over) != int:
over = None
if type(nver) != int:
nver = None
return header(
index_path=i.group(1),
old_path=opath,
old_version=over,
new_path=npath,
new_version=nver,
)
return None
def parse_cvs_header(text):
try:
lines = text.splitlines()
except AttributeError:
lines = text
headers = findall_regex(lines, cvs_header_rcs)
headers_old = findall_regex(lines, old_cvs_diffcmd_header)
if headers:
# parse rcs style headers
while len(lines) > 0:
i = cvs_header_index.match(lines[0])
del lines[0]
if not i:
continue
diff_header = parse_diff_header(lines)
if diff_header:
over = diff_header.old_version
if over:
oend = cvs_header_timestamp.match(over)
oend_c = cvs_header_timestamp_colon.match(over)
if oend:
over = oend.group(2)
elif oend_c:
over = oend_c.group(1)
nver = diff_header.new_version
if nver:
nend = cvs_header_timestamp.match(nver)
nend_c = cvs_header_timestamp_colon.match(nver)
if nend:
nver = nend.group(2)
elif nend_c:
nver = nend_c.group(1)
return header(
index_path=i.group(1),
old_path=diff_header.old_path,
old_version=over,
new_path=diff_header.new_path,
new_version=nver,
)
return header(
index_path=i.group(1),
old_path=i.group(1),
old_version=None,
new_path=i.group(1),
new_version=None,
)
elif headers_old:
# parse old style headers
while len(lines) > 0:
i = cvs_header_index.match(lines[0])
del lines[0]
if not i:
continue
d = old_cvs_diffcmd_header.match(lines[0])
if not d:
return header(
index_path=i.group(1),
old_path=i.group(1),
old_version=None,
new_path=i.group(1),
new_version=None,
)
# will get rid of the useless stuff for us
parse_diff_header(lines)
over = d.group(2) if d.group(2) else None
nver = d.group(4) if d.group(4) else None
return header(
index_path=i.group(1),
old_path=d.group(1),
old_version=over,
new_path=d.group(3),
new_version=nver,
)
return None
def parse_diffcmd_header(text):
try:
lines = text.splitlines()
except AttributeError:
lines = text
headers = findall_regex(lines, diffcmd_header)
if len(headers) == 0:
return None
while len(lines) > 0:
d = diffcmd_header.match(lines[0])
del lines[0]
if d:
return header(
index_path=None,
old_path=d.group(1),
old_version=None,
new_path=d.group(2),
new_version=None,
)
return None
def parse_unified_header(text):
try:
lines = text.splitlines()
except AttributeError:
lines = text
headers = findall_regex(lines, unified_header_new_line)
if len(headers) == 0:
return None
while len(lines) > 1:
o = unified_header_old_line.match(lines[0])
del lines[0]
if o:
n = unified_header_new_line.match(lines[0])
del lines[0]
if n:
over = o.group(2)
if len(over) == 0:
over = None
nver = n.group(2)
if len(nver) == 0:
nver = None
return header(
index_path=None,
old_path=o.group(1),
old_version=over,
new_path=n.group(1),
new_version=nver,
)
return None
def parse_context_header(text):
try:
lines = text.splitlines()
except AttributeError:
lines = text
headers = findall_regex(lines, context_header_old_line)
if len(headers) == 0:
return None
while len(lines) > 1:
o = context_header_old_line.match(lines[0])
del lines[0]
if o:
n = context_header_new_line.match(lines[0])
del lines[0]
if n:
over = o.group(2)
if len(over) == 0:
over = None
nver = n.group(2)
if len(nver) == 0:
nver = None
return header(
index_path=None,
old_path=o.group(1),
old_version=over,
new_path=n.group(1),
new_version=nver,
)
return None
def parse_default_diff(text):
try:
lines = text.splitlines()
except AttributeError:
lines = text
old = 0
new = 0
old_len = 0
new_len = 0
r = 0
i = 0
changes = list()
hunks = split_by_regex(lines, default_hunk_start)
for hunk_n, hunk in enumerate(hunks):
if not len(hunk):
continue
r = 0
i = 0
while len(hunk) > 0:
h = default_hunk_start.match(hunk[0])
c = default_change.match(hunk[0])
del hunk[0]
if h:
old = int(h.group(1))
if len(h.group(2)) > 0:
old_len = int(h.group(2)) - old + 1
else:
old_len = 0
new = int(h.group(4))
if len(h.group(5)) > 0:
new_len = int(h.group(5)) - new + 1
else:
new_len = 0
elif c:
kind = c.group(1)
line = c.group(2)
if kind == "<" and (r != old_len or r == 0):
changes.append(Change(old + r, None, line, hunk_n))
r += 1
elif kind == ">" and (i != new_len or i == 0):
changes.append(Change(None, new + i, line, hunk_n))
i += 1
if len(changes) > 0:
return changes
return None
def parse_unified_diff(text):
try:
lines = text.splitlines()
except AttributeError:
lines = text
old = 0
new = 0
r = 0
i = 0
old_len = 0
new_len = 0
changes = list()
hunks = split_by_regex(lines, unified_hunk_start)
for hunk_n, hunk in enumerate(hunks):
# reset counters
r = 0
i = 0
while len(hunk) > 0:
h = unified_hunk_start.match(hunk[0])
del hunk[0]
if h:
old = int(h.group(1))
if len(h.group(2)) > 0:
old_len = int(h.group(2))
else:
old_len = 0
new = int(h.group(3))
if len(h.group(4)) > 0:
new_len = int(h.group(4))
else:
new_len = 0
h = None
break
for n in hunk:
c = unified_change.match(n)
if c:
kind = c.group(1)
line = c.group(2)
if kind == "-" and (r != old_len or r == 0):
changes.append(Change(old + r, None, line, hunk_n))
r += 1
elif kind == "+" and (i != new_len or i == 0):
changes.append(Change(None, new + i, line, hunk_n))
i += 1
elif kind == " ":
if r != old_len and i != new_len:
changes.append(Change(old + r, new + i, line, hunk_n))
r += 1
i += 1
if len(changes) > 0:
return changes
return None
def parse_context_diff(text):
try:
lines = text.splitlines()
except AttributeError:
lines = text
old = 0
new = 0
j = 0
k = 0
changes = list()
hunks = split_by_regex(lines, context_hunk_start)
for hunk_n, hunk in enumerate(hunks):
if not len(hunk):
continue
j = 0
k = 0
parts = split_by_regex(hunk, context_hunk_new)
if len(parts) != 2:
raise exceptions.ParseException("Context diff invalid", hunk_n)
old_hunk = parts[0]
new_hunk = parts[1]
while len(old_hunk) > 0:
o = context_hunk_old.match(old_hunk[0])
del old_hunk[0]
if not o:
continue
old = int(o.group(1))
old_len = int(o.group(2)) + 1 - old
while len(new_hunk) > 0:
n = context_hunk_new.match(new_hunk[0])
del new_hunk[0]
if not n:
continue
new = int(n.group(1))
new_len = int(n.group(2)) + 1 - new
break
break
# now have old and new set, can start processing?
if len(old_hunk) > 0 and len(new_hunk) == 0:
msg = "Got unexpected change in removal hunk: "
# only removes left?
while len(old_hunk) > 0:
c = context_change.match(old_hunk[0])
del old_hunk[0]
if not c:
continue
kind = c.group(1)
line = c.group(2)
if kind == "-" and (j != old_len or j == 0):
changes.append(Change(old + j, None, line, hunk_n))
j += 1
elif kind == " " and (
(j != old_len and k != new_len) or (j == 0 or k == 0)
):
changes.append(Change(old + j, new + k, line, hunk_n))
j += 1
k += 1
elif kind == "+" or kind == "!":
raise exceptions.ParseException(msg + kind, hunk_n)
continue
if len(old_hunk) == 0 and len(new_hunk) > 0:
msg = "Got unexpected change in removal hunk: "
# only insertions left?
while len(new_hunk) > 0:
c = context_change.match(new_hunk[0])
del new_hunk[0]
if not c:
continue
kind = c.group(1)
line = c.group(2)
if kind == "+" and (k != new_len or k == 0):
changes.append(Change(None, new + k, line, hunk_n))
k += 1
elif kind == " " and (
(j != old_len and k != new_len) or (j == 0 or k == 0)
):
changes.append(Change(old + j, new + k, line, hunk_n))
j += 1
k += 1
elif kind == "-" or kind == "!":
raise exceptions.ParseException(msg + kind, hunk_n)
continue
# both
while len(old_hunk) > 0 and len(new_hunk) > 0:
oc = context_change.match(old_hunk[0])
nc = context_change.match(new_hunk[0])
okind = None
nkind = None
if oc:
okind = oc.group(1)
oline = oc.group(2)
if nc:
nkind = nc.group(1)
nline = nc.group(2)
if not (oc or nc):
del old_hunk[0]
del new_hunk[0]
elif okind == " " and nkind == " " and oline == nline:
changes.append(Change(old + j, new + k, oline, hunk_n))
j += 1
k += 1
del old_hunk[0]
del new_hunk[0]
elif okind == "-" or okind == "!" and (j != old_len or j == 0):
changes.append(Change(old + j, None, oline, hunk_n))
j += 1
del old_hunk[0]
elif nkind == "+" or nkind == "!" and (k != new_len or k == 0):
changes.append(Change(None, new + k, nline, hunk_n))
k += 1
del new_hunk[0]
else:
return None
if len(changes) > 0:
return changes
return None
def parse_ed_diff(text):
try:
lines = text.splitlines()
except AttributeError:
lines = text
old = 0
j = 0
k = 0
r = 0
i = 0
changes = list()
hunks = split_by_regex(lines, ed_hunk_start)
hunks.reverse()
for hunk_n, hunk in enumerate(hunks):
if not len(hunk):
continue
j = 0
k = 0
while len(hunk) > 0:
o = ed_hunk_start.match(hunk[0])
del hunk[0]
if not o:
continue
old = int(o.group(1))
old_end = int(o.group(2)) if len(o.group(2)) else old
hunk_kind = o.group(3)
if hunk_kind == "d":
k = 0
while old_end >= old:
changes.append(Change(old + k, None, None, hunk_n))
r += 1
k += 1
old_end -= 1
continue
while len(hunk) > 0:
e = ed_hunk_end.match(hunk[0])
if not e and hunk_kind == "c":
k = 0
while old_end >= old:
changes.append(Change(old + k, None, None, hunk_n))
r += 1
k += 1
old_end -= 1
# I basically have no idea why this works
# for these tests.
changes.append(
Change(
None,
old - r + i + k + j,
hunk[0],
hunk_n,
)
)
i += 1
j += 1
if not e and hunk_kind == "a":
changes.append(
Change(
None,
old - r + i + 1,
hunk[0],
hunk_n,
)
)
i += 1
del hunk[0]
if len(changes) > 0:
return changes
return None
def parse_rcs_ed_diff(text):
# much like forward ed, but no 'c' type
try:
lines = text.splitlines()
except AttributeError:
lines = text
old = 0
j = 0
size = 0
total_change_size = 0
changes = list()
hunks = split_by_regex(lines, rcs_ed_hunk_start)
for hunk_n, hunk in enumerate(hunks):
if len(hunk):
j = 0
while len(hunk) > 0:
o = rcs_ed_hunk_start.match(hunk[0])
del hunk[0]
if not o:
continue
hunk_kind = o.group(1)
old = int(o.group(2))
size = int(o.group(3))
if hunk_kind == "a":
old += total_change_size + 1
total_change_size += size
while size > 0 and len(hunk) > 0:
changes.append(Change(None, old + j, hunk[0], hunk_n))
j += 1
size -= 1
del hunk[0]
elif hunk_kind == "d":
total_change_size -= size
while size > 0:
changes.append(Change(old + j, None, None, hunk_n))
j += 1
size -= 1
if len(changes) > 0:
return changes
return None
def parse_git_binary_diff(text):
try:
lines = text.splitlines()
except AttributeError:
lines = text
changes = list()
old_version = None
new_version = None
cmd_old_path = None
cmd_new_path = None
# the sizes are used as latch-up
old_size = None
new_size = None
old_encoded = ""
new_encoded = ""
for line in lines:
if cmd_old_path is None and cmd_new_path is None:
hm = git_diffcmd_header.match(line)
if hm:
cmd_old_path = hm.group(1)
cmd_new_path = hm.group(2)
continue
if old_version is None and new_version is None:
g = git_header_index.match(line)
if g:
old_version = g.group(1)
new_version = g.group(2)
continue
# the first is added file
if new_size is None:
literal = git_binary_literal_start.match(line)
if literal:
new_size = int(literal.group(1))
continue
delta = git_binary_delta_start.match(line)
if delta:
# not supported
new_size = 0
continue
elif new_size > 0:
if base85string.match(line):
assert len(line) >= 6 and ((len(line) - 1) % 5) == 0
new_encoded += line[1:]
elif 0 == len(line):
decoded = base64.b85decode(new_encoded)
added_data = zlib.decompress(decoded)
assert new_size == len(added_data)
change = Change(None, 0, added_data, None)
changes.append(change)
new_size = 0
else:
break
# the second is removed file
if old_size is None:
literal = git_binary_literal_start.match(line)
if literal:
old_size = int(literal.group(1))
delta = git_binary_delta_start.match(line)
if delta:
# not supported
old_size = 0
continue
elif old_size > 0:
if base85string.match(line):
assert len(line) >= 6 and ((len(line) - 1) % 5) == 0
old_encoded += line[1:]
elif 0 == len(line):
decoded = base64.b85decode(old_encoded)
removed_data = zlib.decompress(decoded)
assert old_size == len(removed_data)
change = Change(0, None, None, removed_data)
changes.append(change)
old_size = 0
else:
break
return changes
whatthepatch-1.0.7/src/whatthepatch/snippets.py 0000664 0000000 0000000 00000002572 14716151714 0021712 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
import os
from shutil import rmtree
def remove(path):
if os.path.exists(path):
if os.path.isdir(path):
rmtree(path)
else:
os.remove(path)
# find all indices of a list of strings that match a regex
def findall_regex(items, regex):
found = list()
for i in range(0, len(items)):
k = regex.match(items[i])
if k:
found.append(i)
k = None
return found
def split_by_regex(items, regex):
splits = list()
indices = findall_regex(items, regex)
k = None
for i in indices:
if k is None:
splits.append(items[0:i])
k = i
else:
splits.append(items[k:i])
k = i
splits.append(items[k:])
return splits
# http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
whatthepatch-1.0.7/tests/ 0000775 0000000 0000000 00000000000 14716151714 0015354 5 ustar 00root root 0000000 0000000 whatthepatch-1.0.7/tests/__init__.py 0000664 0000000 0000000 00000000000 14716151714 0017453 0 ustar 00root root 0000000 0000000 whatthepatch-1.0.7/tests/casefiles/ 0000775 0000000 0000000 00000000000 14716151714 0017312 5 ustar 00root root 0000000 0000000 whatthepatch-1.0.7/tests/casefiles/abc 0000664 0000000 0000000 00000000060 14716151714 0017756 0 ustar 00root root 0000000 0000000 The Nameless is the origin of Heaven and Earth;
whatthepatch-1.0.7/tests/casefiles/apache-attachment-2241.diff 0000664 0000000 0000000 00000001542 14716151714 0024103 0 ustar 00root root 0000000 0000000 *** src\main\org\apache\tools\ant\taskdefs\optional\pvcs\Pvcs.orig Sat Jun 22 16:11:58 2002
--- src\main\org\apache\tools\ant\taskdefs\optional\pvcs\Pvcs.java Fri Jun 28 10:55:50 2002
***************
*** 91,97 ****
*
* @author Thomas Christensen
* @author Don Jeffery
! * @author Steven E. Newton
*/
public class Pvcs extends org.apache.tools.ant.Task {
private String pvcsbin;
--- 91,97 ----
*
* @author Thomas Christensen
* @author Don Jeffery
! * @author Steven E. Newton
*/
public class Pvcs extends org.apache.tools.ant.Task {
private String pvcsbin;
whatthepatch-1.0.7/tests/casefiles/apache-attachment-28223.diff 0000664 0000000 0000000 00000001240 14716151714 0024166 0 ustar 00root root 0000000 0000000 diff --git a/src/script/.ant.swp b/src/script/.ant.swp
index 7962473..b214d30 100644
Binary files a/src/script/.ant.swp and b/src/script/.ant.swp differ
diff --git a/src/script/ant b/src/script/ant
index 0dc84e0..11c1b59 100644
--- a/src/script/ant
+++ b/src/script/ant
@@ -176,10 +176,7 @@ if $rpm_mode && [ -x /usr/bin/build-classpath ] ; then
*.rpmnew) ;;
*)
for dep in `cat "$file"`; do
- case "$OPT_JAR_LIST" in
- *"$dep"*) ;;
- *) OPT_JAR_LIST="$OPT_JAR_LIST${OPT_JAR_LIST:+ }$dep"
- esac
+ OPT_JAR_LIST="$OPT_JAR_LIST${OPT_JAR_LIST:+ }$dep"
done
esac
fi
whatthepatch-1.0.7/tests/casefiles/context-header.diff 0000664 0000000 0000000 00000000156 14716151714 0023060 0 ustar 00root root 0000000 0000000 *** /tmp/o 2012-12-22 06:43:35.000000000 -0600
--- /tmp/n 2012-12-23 20:40:50.000000000 -0600
***************
whatthepatch-1.0.7/tests/casefiles/cvs-header.diff 0000664 0000000 0000000 00000001207 14716151714 0022165 0 ustar 00root root 0000000 0000000 Index: org.eclipse.core.resources/src/org/eclipse/core/internal/localstore/SafeChunkyInputStream.java
===================================================================
RCS file: /cvsroot/eclipse/org.eclipse.core.resources/src/org/eclipse/core/internal/localstore/SafeChunkyInputStream.java,v
retrieving revision 1.6.4.1
retrieving revision 1.8
diff -u -r1.6.4.1 -r1.8
--- org.eclipse.core.resources/src/org/eclipse/core/internal/localstore/SafeChunkyInputStream.java 23 Jul 2001 17:51:45 -0000 1.6.4.1
+++ org.eclipse.core.resources/src/org/eclipse/core/internal/localstore/SafeChunkyInputStream.java 17 May 2002 20:27:56 -0000 1.8
@@ -1 +1 @@
whatthepatch-1.0.7/tests/casefiles/diff-context-blah.diff 0000664 0000000 0000000 00000001654 14716151714 0023450 0 ustar 00root root 0000000 0000000 *** lao 2013-01-05 16:56:19.000000000 -0600
--- tzu 2013-01-05 16:56:35.000000000 -0600
***************
*** 1,7 ****
- The Way that can be told of is not the eternal Way;
- The name that can be named is not the eternal name.
The Nameless is the origin of Heaven and Earth;
! The Named is the mother of all things.
Therefore let there always be non-being,
so we may see their subtlety,
And let there always be being,
--- 1,6 ----
The Nameless is the origin of Heaven and Earth;
! The named is the mother of all things.
!
Therefore let there always be non-being,
so we may see their subtlety,
And let there always be being,
***************
*** 9,11 ****
--- 8,13 ----
The two are the same,
But after they are produced,
they have different names.
+ They both may be called deep and profound.
+ Deeper and more profound,
+ The door of all subtleties!
+ blah
+ blah
+ bleh
None of these last 4 lines should parse.
whatthepatch-1.0.7/tests/casefiles/diff-context.diff 0000664 0000000 0000000 00000001554 14716151714 0022543 0 ustar 00root root 0000000 0000000 *** lao 2013-01-05 16:56:19.000000000 -0600
--- tzu 2013-01-05 16:56:35.000000000 -0600
***************
*** 1,7 ****
- The Way that can be told of is not the eternal Way;
- The name that can be named is not the eternal name.
The Nameless is the origin of Heaven and Earth;
! The Named is the mother of all things.
Therefore let there always be non-being,
so we may see their subtlety,
And let there always be being,
--- 1,6 ----
The Nameless is the origin of Heaven and Earth;
! The named is the mother of all things.
!
Therefore let there always be non-being,
so we may see their subtlety,
And let there always be being,
***************
*** 9,11 ****
--- 8,13 ----
The two are the same,
But after they are produced,
they have different names.
+ They both may be called deep and profound.
+ Deeper and more profound,
+ The door of all subtleties!
whatthepatch-1.0.7/tests/casefiles/diff-default-blah.diff 0000664 0000000 0000000 00000000601 14716151714 0023377 0 ustar 00root root 0000000 0000000 1,2d0
< The Way that can be told of is not the eternal Way;
< The name that can be named is not the eternal name.
4c2,3
< The Named is the mother of all things.
---
> The named is the mother of all things.
>
11a11,13
> They both may be called deep and profound.
> Deeper and more profound,
> The door of all subtleties!
> blah
> blah
> bleh
None of these last 4 lines should parse.
whatthepatch-1.0.7/tests/casefiles/diff-default.diff 0000664 0000000 0000000 00000000501 14716151714 0022472 0 ustar 00root root 0000000 0000000 1,2d0
< The Way that can be told of is not the eternal Way;
< The name that can be named is not the eternal name.
4c2,3
< The Named is the mother of all things.
---
> The named is the mother of all things.
>
11a11,13
> They both may be called deep and profound.
> Deeper and more profound,
> The door of all subtleties!
whatthepatch-1.0.7/tests/casefiles/diff-ed.diff 0000664 0000000 0000000 00000000231 14716151714 0021436 0 ustar 00root root 0000000 0000000 11a
They both may be called deep and profound.
Deeper and more profound,
The door of all subtleties!
.
4c
The named is the mother of all things.
.
1,2d
whatthepatch-1.0.7/tests/casefiles/diff-rcs.diff 0000664 0000000 0000000 00000000236 14716151714 0021642 0 ustar 00root root 0000000 0000000 d1 2
d4 1
a4 2
The named is the mother of all things.
a11 3
They both may be called deep and profound.
Deeper and more profound,
The door of all subtleties!
whatthepatch-1.0.7/tests/casefiles/diff-unified-bad.diff 0000664 0000000 0000000 00000001206 14716151714 0023220 0 ustar 00root root 0000000 0000000 --- lao 2013-01-05 16:56:19.000000000 -0600
+++ tzu 2013-01-05 16:56:35.000000000 -0600
@@ -1,7 +1,6 @@
-The Way that can be told of is not the eternal Way;
-The name that can be named is not the eternal name.
The Nameless is the origin of Heaven and Earth;
-The Named is the mother of all tings.
+The named is the mother of all things.
+
Therefore let there always be non-being,
so we may see their subtlety,
And let there always be being,
@@ -9,3 +8,6 @@
The two are the same,
But after they are produced,
they have different names.
+They both may be called deep and profound.
+Deeper and more profound,
+The door of all subtleties!
whatthepatch-1.0.7/tests/casefiles/diff-unified-bad2.diff 0000664 0000000 0000000 00000001206 14716151714 0023302 0 ustar 00root root 0000000 0000000 --- lao 2013-01-05 16:56:19.000000000 -0600
+++ tzu 2013-01-05 16:56:35.000000000 -0600
@@ -1,7 +1,6 @@
-The Way that can be told of is not the eternal Way;
-The name that can be named is not the eternal name.
The Nameless is the origin of Heaven and Earth;
-The Named is the mother of all things.
+The named is the mother of all things.
+
Therefore let there always be non-being,
so we may see their subtlety,
And let there always be being,
@@ -9,3 +8,6 @@
The two are te same,
But after they are produced,
they have different names.
+They both may be called deep and profound.
+Deeper and more profound,
+The door of all subtleties!
whatthepatch-1.0.7/tests/casefiles/diff-unified-blah.diff 0000664 0000000 0000000 00000001303 14716151714 0023376 0 ustar 00root root 0000000 0000000 --- lao 2013-01-05 16:56:19.000000000 -0600
+++ tzu 2013-01-05 16:56:35.000000000 -0600
@@ -1,7 +1,6 @@
-The Way that can be told of is not the eternal Way;
-The name that can be named is not the eternal name.
The Nameless is the origin of Heaven and Earth;
-The Named is the mother of all things.
+The named is the mother of all things.
+
Therefore let there always be non-being,
so we may see their subtlety,
And let there always be being,
@@ -9,3 +8,6 @@
The two are the same,
But after they are produced,
they have different names.
+They both may be called deep and profound.
+Deeper and more profound,
+The door of all subtleties!
-blah
-blah
+bleh
None of these last 4 lines should parse.
whatthepatch-1.0.7/tests/casefiles/diff-unified.diff 0000664 0000000 0000000 00000001207 14716151714 0022475 0 ustar 00root root 0000000 0000000 --- lao 2013-01-05 16:56:19.000000000 -0600
+++ tzu 2013-01-05 16:56:35.000000000 -0600
@@ -1,7 +1,6 @@
-The Way that can be told of is not the eternal Way;
-The name that can be named is not the eternal name.
The Nameless is the origin of Heaven and Earth;
-The Named is the mother of all things.
+The named is the mother of all things.
+
Therefore let there always be non-being,
so we may see their subtlety,
And let there always be being,
@@ -9,3 +8,6 @@
The two are the same,
But after they are produced,
they have different names.
+They both may be called deep and profound.
+Deeper and more profound,
+The door of all subtleties!
whatthepatch-1.0.7/tests/casefiles/diff-unified2.diff 0000664 0000000 0000000 00000000277 14716151714 0022565 0 ustar 00root root 0000000 0000000 --- abc 2013-01-05 16:56:19.000000000 -0600
+++ efg 2013-01-05 16:56:35.000000000 -0600
@@ -1 +1,2 @@
The Nameless is the origin of Heaven and Earth;
+The named is the mother of all things.
whatthepatch-1.0.7/tests/casefiles/eclipse-attachment-126343.header 0000664 0000000 0000000 00000000740 14716151714 0024777 0 ustar 00root root 0000000 0000000 Index: test plugin/org/eclipse/jdt/debug/testplugin/ResumeBreakpointListener.java
===================================================================
RCS file: test plugin/org/eclipse/jdt/debug/testplugin/ResumeBreakpointListener.java
diff -N test plugin/org/eclipse/jdt/debug/testplugin/ResumeBreakpointListener.java
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ test plugin/org/eclipse/jdt/debug/testplugin/ResumeBreakpointListener.java 1 Jan 1970 00:00:00 -0000
@@ -0,0 +1,53 @@
whatthepatch-1.0.7/tests/casefiles/eclipse-attachment-1701.patch 0000664 0000000 0000000 00000002355 14716151714 0024500 0 ustar 00root root 0000000 0000000 ? cvsdiff
? model/org/eclipse/jdt/internal/debug/core/BreakMouseGrab.java
Index: model/org/eclipse/jdt/internal/debug/core/JDIDebugPlugin.java
===================================================================
RCS file: /home/eclipse/org.eclipse.jdt.debug/model/org/eclipse/jdt/internal/debug/core/JDIDebugPlugin.java,v
retrieving revision 1.34
diff -u -p -r1.34 JDIDebugPlugin.java
--- model/org/eclipse/jdt/internal/debug/core/JDIDebugPlugin.java 6 Jun 2002 20:33:46 -0000 1.34
+++ model/org/eclipse/jdt/internal/debug/core/JDIDebugPlugin.java 16 Jul 2002 20:37:56 -0000
@@ -61,6 +61,13 @@ public class JDIDebugPlugin extends Plug
private boolean fTrace = false;
/**
+ * On SWT/GTK applications, we need to drop
+ * any mouse grabs when a breakpoint is hit
+ * or the mouse is not usable.
+ */
+ private BreakMouseGrab breakMouseGrab;
+
+ /**
* Returns whether the debug UI plug-in is in trace
* mode.
*
@@ -117,6 +124,7 @@ public class JDIDebugPlugin extends Plug
fBreakpointListeners = new ListenerList(5);
getPluginPreferences().setDefault(JDIDebugModel.PREF_REQUEST_TIMEOUT, JDIDebugModel.DEF_REQUEST_TIMEOUT);
getPluginPreferences().addPropertyChangeListener(this);
+ breakMouseGrab = new BreakMouseGrab();
}
/**
whatthepatch-1.0.7/tests/casefiles/efg 0000664 0000000 0000000 00000000127 14716151714 0017776 0 ustar 00root root 0000000 0000000 The Nameless is the origin of Heaven and Earth;
The named is the mother of all things.
whatthepatch-1.0.7/tests/casefiles/embedded-diff.comment 0000664 0000000 0000000 00000001500 14716151714 0023331 0 ustar 00root root 0000000 0000000 In order to pass the initial test case of the bug reporter, a simple patch for "IRFactory#decompile(AstNode)" is necessary:
---
--- a/src/org/mozilla/javascript/IRFactory.java
+++ b/src/org/mozilla/javascript/IRFactory.java
@@ -2182,6 +2182,9 @@ public final class IRFactory extends Parser
case Token.GETELEM:
decompileElementGet((ElementGet) node);
break;
+ case Token.THIS:
+ decompiler.addToken(node.getType());
+ break;
default:
Kit.codeBug("unexpected token: "
+ Token.typeToName(node.getType()));
---
But that change won't be sufficient to cover the other tests of the JUnit test case.
PS: The bug subject should be changed to mention "Destructuring Assignment" instead of "Array comprehension"
whatthepatch-1.0.7/tests/casefiles/git-bin.patch 0000664 0000000 0000000 00000004156 14716151714 0021672 0 ustar 00root root 0000000 0000000 ---
fox.bin | Bin 0 -> 44 bytes
fox.txt | 2 +-
lorem.bin | Bin 0 -> 446 bytes
lorem.zip | Bin 431 -> 432 bytes
4 files changed, 1 insertion(+), 1 deletion(-)
create mode 100644 fox.bin
create mode 100644 lorem.bin
diff --git a/fox.bin b/fox.bin
new file mode 100644
index 0000000000000000000000000000000000000000..e7683ad05fd121a9ca86cab5a827d471d29b4d4f
GIT binary patch
literal 44
ycmWH^NL45-%}mZ#NGi%N&r?XtuTaP;%`GTa$S+GRQYZmR=Ok8DDx~D6GXMZ
literal 0
HcmV?d00001
diff --git a/fox.txt b/fox.txt
index ff3bb63..8fe2a4b 100644
--- a/fox.txt
+++ b/fox.txt
@@ -1 +1 @@
-The quick brown fox jumps over the lazy dog
\ No newline at end of file
+The quick brown fox jumps over the lazy dog.
\ No newline at end of file
diff --git a/lorem.bin b/lorem.bin
new file mode 100644
index 0000000000000000000000000000000000000000..aef2724fd9ff72caf4eb1ac8333f0b5b322d82fb
GIT binary patch
literal 446
zcmXw#&2d992!vD07T|eRB)42s0Fkh>Gy1ax9+w~Fm)wMaW%v8+Q!6-@SL9y$#G*l}
z+6Ae%rODKMLNW(eV!J^Lqq#K40+haL&oHecme~?Bvp0hqihPGW)J|zdm0J@?;oarH
zmq8nAXrppJ9#KlY;O<;#ecAL3ed0DLGNbT$
z;NzKem+$vrMRcVJ
literal 0
HcmV?d00001
diff --git a/lorem.zip b/lorem.zip
index 0f6beb70488e2b29fcaadf724b6f48ef0ab5bc4e..3c8a65bf1a97bb4180c83a0e31352b4edb4c245e 100644
GIT binary patch
delta 275
zcmZ3_yn#6)z?+#xgn@y9gP}7+C2a4}O*1$c85s5gF(-ozLr#8CYOY>MMM-D~Cj+xl
z?ABymATF)oW?*D_!OXw_CQK(BEOa*HaEZRjWV65P$+T=Pg^u?VBeY_ymt>hupAyrM
zu_ldGFZao&vTKV}cPYzE-4`VHX!@1~7xxOUc%}T}z;fqZ+pf4>{#%je`L@mdh12$@
z##~QSCtp2z)oM{#R?hTKa+j9=zO;TxpG;eTRQ}78>li9e@lU*`!E