2006-08-12 02:50:16 +04:00
|
|
|
# patch.py - patch file parsing routines
|
|
|
|
#
|
2006-08-12 23:47:18 +04:00
|
|
|
# Copyright 2006 Brendan Cully <brendan@kublai.com>
|
2007-07-17 20:39:30 +04:00
|
|
|
# Copyright 2007 Chris Mason <chris.mason@oracle.com>
|
2006-08-12 23:47:18 +04:00
|
|
|
#
|
2009-04-26 03:08:54 +04:00
|
|
|
# This software may be used and distributed according to the terms of the
|
2010-01-20 07:20:08 +03:00
|
|
|
# GNU General Public License version 2 or any later version.
|
2006-08-12 02:50:16 +04:00
|
|
|
|
2017-09-03 08:56:31 +03:00
|
|
|
from __future__ import absolute_import, print_function
|
2010-04-17 22:13:57 +04:00
|
|
|
|
2015-12-22 08:33:52 +03:00
|
|
|
import collections
|
|
|
|
import copy
|
|
|
|
import email
|
|
|
|
import errno
|
2016-06-10 07:12:33 +03:00
|
|
|
import hashlib
|
2015-12-22 08:33:52 +03:00
|
|
|
import os
|
|
|
|
import posixpath
|
|
|
|
import re
|
|
|
|
import shutil
|
|
|
|
import tempfile
|
|
|
|
import zlib
|
|
|
|
|
|
|
|
from . import (
|
|
|
|
copies,
|
|
|
|
encoding,
|
|
|
|
error,
|
2016-03-03 20:34:19 +03:00
|
|
|
mail,
|
2015-12-22 08:33:52 +03:00
|
|
|
mdiff,
|
|
|
|
pathutil,
|
2016-08-13 06:15:49 +03:00
|
|
|
policy,
|
2017-02-13 17:36:38 +03:00
|
|
|
pycompat,
|
2015-12-22 08:33:52 +03:00
|
|
|
scmutil,
|
2017-01-09 22:24:18 +03:00
|
|
|
similar,
|
2015-12-22 08:33:52 +03:00
|
|
|
util,
|
2017-03-02 15:29:59 +03:00
|
|
|
vfs as vfsmod,
|
2015-12-22 08:33:52 +03:00
|
|
|
)
|
2018-07-06 03:45:27 +03:00
|
|
|
from .i18n import _
|
|
|
|
from .node import hex, short
|
2016-08-13 06:15:49 +03:00
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
|
|
|
diffhelpers = policy.importmod(r"diffhelpers")
|
2016-04-10 23:55:37 +03:00
|
|
|
stringio = util.stringio
|
2006-08-13 00:16:48 +04:00
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
gitre = re.compile(br"diff --git a/(.*) b/(.*)")
|
|
|
|
tabsplitter = re.compile(br"(\t+|[^\t]+)")
|
|
|
|
wordsplitter = re.compile(br"(\t+| +|[a-zA-Z0-9_\x80-\xff]+|[^ \ta-zA-Z0-9_\x80-\xff])")
|
2008-10-22 14:56:28 +04:00
|
|
|
|
2017-09-03 10:45:33 +03:00
|
|
|
PatchError = error.PatchError
|
2006-08-17 06:46:18 +04:00
|
|
|
|
|
|
|
# public functions
|
|
|
|
|
flake8: enable F821 check
Summary:
This check is useful and detects real errors (ex. fbconduit). Unfortunately
`arc lint` will run it with both py2 and py3 so a lot of py2 builtins will
still be warned.
I didn't find a clean way to disable py3 check. So this diff tries to fix them.
For `xrange`, the change was done by a script:
```
import sys
import redbaron
headertypes = {'comment', 'endl', 'from_import', 'import', 'string',
'assignment', 'atomtrailers'}
xrangefix = '''try:
xrange(0)
except NameError:
xrange = range
'''
def isxrange(x):
try:
return x[0].value == 'xrange'
except Exception:
return False
def main(argv):
for i, path in enumerate(argv):
print('(%d/%d) scanning %s' % (i + 1, len(argv), path))
content = open(path).read()
try:
red = redbaron.RedBaron(content)
except Exception:
print(' warning: failed to parse')
continue
hasxrange = red.find('atomtrailersnode', value=isxrange)
hasxrangefix = 'xrange = range' in content
if hasxrangefix or not hasxrange:
print(' no need to change')
continue
# find a place to insert the compatibility statement
changed = False
for node in red:
if node.type in headertypes:
continue
# node.insert_before is an easier API, but it has bugs changing
# other "finally" and "except" positions. So do the insert
# manually.
# # node.insert_before(xrangefix)
line = node.absolute_bounding_box.top_left.line - 1
lines = content.splitlines(1)
content = ''.join(lines[:line]) + xrangefix + ''.join(lines[line:])
changed = True
break
if changed:
# "content" is faster than "red.dumps()"
open(path, 'w').write(content)
print(' updated')
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
```
For other py2 builtins that do not have a py3 equivalent, some `# noqa`
were added as a workaround for now.
Reviewed By: DurhamG
Differential Revision: D6934535
fbshipit-source-id: 546b62830af144bc8b46788d2e0fd00496838939
2018-02-10 04:31:44 +03:00
|
|
|
try:
|
|
|
|
xrange(0)
|
|
|
|
except NameError:
|
|
|
|
xrange = range
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2010-02-07 20:06:52 +03:00
|
|
|
def split(stream):
|
2018-07-06 03:45:27 +03:00
|
|
|
"""return an iterator of individual patches from a stream"""
|
|
|
|
|
2010-02-07 20:06:52 +03:00
|
|
|
def isheader(line, inheader):
|
2018-07-06 03:45:27 +03:00
|
|
|
if inheader and line[0] in (" ", "\t"):
|
2010-02-07 20:06:52 +03:00
|
|
|
# continuation
|
|
|
|
return True
|
2018-07-06 03:45:27 +03:00
|
|
|
if line[0] in (" ", "-", "+"):
|
2010-04-09 22:34:05 +04:00
|
|
|
# diff line - don't check for header pattern in there
|
|
|
|
return False
|
2018-07-06 03:45:27 +03:00
|
|
|
l = line.split(": ", 1)
|
|
|
|
return len(l) == 2 and " " not in l[0]
|
2010-02-07 20:06:52 +03:00
|
|
|
|
|
|
|
def chunk(lines):
|
2018-07-06 03:45:27 +03:00
|
|
|
return stringio("".join(lines))
|
2010-02-07 20:06:52 +03:00
|
|
|
|
|
|
|
def hgsplit(stream, cur):
|
|
|
|
inheader = True
|
|
|
|
|
|
|
|
for line in stream:
|
|
|
|
if not line.strip():
|
|
|
|
inheader = False
|
2018-07-06 03:45:27 +03:00
|
|
|
if not inheader and line.startswith("# HG changeset patch"):
|
2010-02-07 20:06:52 +03:00
|
|
|
yield chunk(cur)
|
|
|
|
cur = []
|
|
|
|
inheader = True
|
|
|
|
|
|
|
|
cur.append(line)
|
|
|
|
|
|
|
|
if cur:
|
|
|
|
yield chunk(cur)
|
|
|
|
|
|
|
|
def mboxsplit(stream, cur):
|
|
|
|
for line in stream:
|
2018-07-06 03:45:27 +03:00
|
|
|
if line.startswith("From "):
|
2010-02-07 20:06:52 +03:00
|
|
|
for c in split(chunk(cur[1:])):
|
|
|
|
yield c
|
|
|
|
cur = []
|
|
|
|
|
|
|
|
cur.append(line)
|
|
|
|
|
|
|
|
if cur:
|
|
|
|
for c in split(chunk(cur[1:])):
|
|
|
|
yield c
|
|
|
|
|
|
|
|
def mimesplit(stream, cur):
|
|
|
|
def msgfp(m):
|
2016-04-10 23:55:37 +03:00
|
|
|
fp = stringio()
|
2010-02-07 20:06:52 +03:00
|
|
|
g = email.Generator.Generator(fp, mangle_from_=False)
|
|
|
|
g.flatten(m)
|
|
|
|
fp.seek(0)
|
|
|
|
return fp
|
|
|
|
|
|
|
|
for line in stream:
|
|
|
|
cur.append(line)
|
|
|
|
c = chunk(cur)
|
|
|
|
|
|
|
|
m = email.Parser.Parser().parse(c)
|
|
|
|
if not m.is_multipart():
|
|
|
|
yield msgfp(m)
|
|
|
|
else:
|
2018-07-06 03:45:27 +03:00
|
|
|
ok_types = ("text/plain", "text/x-diff", "text/x-patch")
|
2010-02-07 20:06:52 +03:00
|
|
|
for part in m.walk():
|
|
|
|
ct = part.get_content_type()
|
|
|
|
if ct not in ok_types:
|
|
|
|
continue
|
|
|
|
yield msgfp(part)
|
|
|
|
|
|
|
|
def headersplit(stream, cur):
|
|
|
|
inheader = False
|
|
|
|
|
|
|
|
for line in stream:
|
|
|
|
if not inheader and isheader(line, inheader):
|
|
|
|
yield chunk(cur)
|
|
|
|
cur = []
|
|
|
|
inheader = True
|
|
|
|
if inheader and not isheader(line, inheader):
|
|
|
|
inheader = False
|
|
|
|
|
|
|
|
cur.append(line)
|
|
|
|
|
|
|
|
if cur:
|
|
|
|
yield chunk(cur)
|
|
|
|
|
|
|
|
def remainder(cur):
|
|
|
|
yield chunk(cur)
|
|
|
|
|
|
|
|
class fiter(object):
|
|
|
|
def __init__(self, fp):
|
|
|
|
self.fp = fp
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def next(self):
|
|
|
|
l = self.fp.readline()
|
|
|
|
if not l:
|
|
|
|
raise StopIteration
|
|
|
|
return l
|
|
|
|
|
2017-11-21 10:13:09 +03:00
|
|
|
__next__ = next
|
|
|
|
|
2010-02-07 20:06:52 +03:00
|
|
|
inheader = False
|
|
|
|
cur = []
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
mimeheaders = ["content-type"]
|
2010-02-07 20:06:52 +03:00
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
if not util.safehasattr(stream, "next"):
|
2010-02-07 20:06:52 +03:00
|
|
|
# http responses, for example, have readline but not next
|
|
|
|
stream = fiter(stream)
|
|
|
|
|
|
|
|
for line in stream:
|
|
|
|
cur.append(line)
|
2018-07-06 03:45:27 +03:00
|
|
|
if line.startswith("# HG changeset patch"):
|
2010-02-07 20:06:52 +03:00
|
|
|
return hgsplit(stream, cur)
|
2018-07-06 03:45:27 +03:00
|
|
|
elif line.startswith("From "):
|
2010-02-07 20:06:52 +03:00
|
|
|
return mboxsplit(stream, cur)
|
|
|
|
elif isheader(line, inheader):
|
|
|
|
inheader = True
|
2018-07-06 03:45:27 +03:00
|
|
|
if line.split(":", 1)[0].lower() in mimeheaders:
|
2010-02-07 20:06:52 +03:00
|
|
|
# let email parser handle this
|
|
|
|
return mimesplit(stream, cur)
|
2018-07-06 03:45:27 +03:00
|
|
|
elif line.startswith("--- ") and inheader:
|
2010-02-18 21:46:01 +03:00
|
|
|
# No evil headers seen by diff start, split by hand
|
2010-02-07 20:06:52 +03:00
|
|
|
return headersplit(stream, cur)
|
|
|
|
# Not enough info, keep reading
|
|
|
|
|
|
|
|
# if we are here, we have a very plain patch
|
|
|
|
return remainder(cur)
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2015-10-07 11:20:49 +03:00
|
|
|
## Some facility for extensible patch parsing:
|
|
|
|
# list of pairs ("header to match", "data key")
|
2018-07-06 03:45:27 +03:00
|
|
|
patchheadermap = [("Date", "date"), ("Branch", "branch"), ("Node ID", "nodeid")]
|
|
|
|
|
2015-10-07 11:20:49 +03:00
|
|
|
|
2006-08-13 00:16:48 +04:00
|
|
|
def extract(ui, fileobj):
|
2018-07-06 03:45:27 +03:00
|
|
|
"""extract patch from data read from fileobj.
|
2006-08-13 00:16:48 +04:00
|
|
|
|
2007-03-22 20:44:59 +03:00
|
|
|
patch can be a normal patch or contained in an email message.
|
2006-08-13 00:16:48 +04:00
|
|
|
|
2015-10-17 01:58:46 +03:00
|
|
|
return a dictionary. Standard keys are:
|
2015-10-06 12:01:53 +03:00
|
|
|
- filename,
|
|
|
|
- message,
|
|
|
|
- user,
|
|
|
|
- date,
|
|
|
|
- branch,
|
|
|
|
- node,
|
|
|
|
- p1,
|
|
|
|
- p2.
|
2015-10-17 01:58:46 +03:00
|
|
|
Any item can be missing from the dictionary. If filename is missing,
|
2018-07-06 03:45:27 +03:00
|
|
|
fileobj did not contain a patch. Caller must unlink filename when done."""
|
2006-08-13 00:16:48 +04:00
|
|
|
|
|
|
|
# attempt to detect the start of a patch
|
|
|
|
# (this heuristic is borrowed from quilt)
|
2018-07-06 03:45:27 +03:00
|
|
|
diffre = re.compile(
|
|
|
|
br"^(?:Index:[ \t]|diff[ \t]-|RCS file: |"
|
|
|
|
br"retrieving revision [0-9]+(\.[0-9]+)*$|"
|
|
|
|
br"---[ \t].*?^\+\+\+[ \t]|"
|
|
|
|
br"\*\*\*[ \t].*?^---[ \t])",
|
|
|
|
re.MULTILINE | re.DOTALL,
|
|
|
|
)
|
2006-08-13 00:16:48 +04:00
|
|
|
|
2015-10-06 12:01:53 +03:00
|
|
|
data = {}
|
2018-07-06 03:45:27 +03:00
|
|
|
fd, tmpname = tempfile.mkstemp(prefix="hg-patch-")
|
|
|
|
tmpfp = os.fdopen(fd, pycompat.sysstr("w"))
|
2006-08-13 00:16:48 +04:00
|
|
|
try:
|
|
|
|
msg = email.Parser.Parser().parse(fileobj)
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
subject = msg["Subject"] and mail.headdecode(msg["Subject"])
|
|
|
|
data["user"] = msg["From"] and mail.headdecode(msg["From"])
|
|
|
|
if not subject and not data["user"]:
|
2009-10-09 01:42:33 +04:00
|
|
|
# Not an email, restore parsed headers if any
|
2018-07-06 03:45:27 +03:00
|
|
|
subject = "\n".join(": ".join(h) for h in msg.items()) + "\n"
|
2009-10-09 01:42:33 +04:00
|
|
|
|
2006-08-13 00:16:48 +04:00
|
|
|
# should try to parse msg['Date']
|
2007-03-22 20:44:59 +03:00
|
|
|
parents = []
|
2006-08-13 00:16:48 +04:00
|
|
|
|
2007-07-02 21:59:16 +04:00
|
|
|
if subject:
|
2018-07-06 03:45:27 +03:00
|
|
|
if subject.startswith("[PATCH"):
|
|
|
|
pend = subject.find("]")
|
2007-03-14 07:54:34 +03:00
|
|
|
if pend >= 0:
|
2018-07-06 03:45:27 +03:00
|
|
|
subject = subject[pend + 1 :].lstrip()
|
|
|
|
subject = re.sub(br"\n[ \t]+", " ", subject)
|
|
|
|
ui.debug("Subject: %s\n" % subject)
|
|
|
|
if data["user"]:
|
|
|
|
ui.debug("From: %s\n" % data["user"])
|
2006-08-13 00:16:48 +04:00
|
|
|
diffs_seen = 0
|
2018-07-06 03:45:27 +03:00
|
|
|
ok_types = ("text/plain", "text/x-diff", "text/x-patch")
|
|
|
|
message = ""
|
2006-08-13 00:16:48 +04:00
|
|
|
for part in msg.walk():
|
|
|
|
content_type = part.get_content_type()
|
2018-07-06 03:45:27 +03:00
|
|
|
ui.debug("Content-Type: %s\n" % content_type)
|
2006-08-13 00:16:48 +04:00
|
|
|
if content_type not in ok_types:
|
|
|
|
continue
|
|
|
|
payload = part.get_payload(decode=True)
|
|
|
|
m = diffre.search(payload)
|
|
|
|
if m:
|
2007-03-14 21:46:07 +03:00
|
|
|
hgpatch = False
|
2010-10-09 08:39:44 +04:00
|
|
|
hgpatchheader = False
|
2007-03-14 21:46:07 +03:00
|
|
|
ignoretext = False
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
ui.debug("found patch at byte %d\n" % m.start(0))
|
2006-08-13 00:16:48 +04:00
|
|
|
diffs_seen += 1
|
2016-04-10 23:55:37 +03:00
|
|
|
cfp = stringio()
|
2018-07-06 03:45:27 +03:00
|
|
|
for line in payload[: m.start(0)].splitlines():
|
|
|
|
if line.startswith("# HG changeset patch") and not hgpatch:
|
|
|
|
ui.debug("patch generated by hg export\n")
|
2010-10-14 03:28:29 +04:00
|
|
|
hgpatch = True
|
2010-10-09 08:39:44 +04:00
|
|
|
hgpatchheader = True
|
2006-08-13 00:16:48 +04:00
|
|
|
# drop earlier commit message content
|
|
|
|
cfp.seek(0)
|
|
|
|
cfp.truncate()
|
2007-07-03 00:26:12 +04:00
|
|
|
subject = None
|
2010-10-09 08:39:44 +04:00
|
|
|
elif hgpatchheader:
|
2018-07-06 03:45:27 +03:00
|
|
|
if line.startswith("# User "):
|
|
|
|
data["user"] = line[7:]
|
|
|
|
ui.debug("From: %s\n" % data["user"])
|
2007-03-22 20:44:59 +03:00
|
|
|
elif line.startswith("# Parent "):
|
2012-04-20 21:11:54 +04:00
|
|
|
parents.append(line[9:].lstrip())
|
2015-10-07 11:20:49 +03:00
|
|
|
elif line.startswith("# "):
|
|
|
|
for header, key in patchheadermap:
|
2018-07-06 03:45:27 +03:00
|
|
|
prefix = "# %s " % header
|
2015-10-07 11:20:49 +03:00
|
|
|
if line.startswith(prefix):
|
2018-07-06 03:45:27 +03:00
|
|
|
data[key] = line[len(prefix) :]
|
2015-10-07 11:20:49 +03:00
|
|
|
else:
|
2010-10-09 08:39:44 +04:00
|
|
|
hgpatchheader = False
|
2018-07-06 03:45:27 +03:00
|
|
|
elif line == "---":
|
2007-03-14 21:46:07 +03:00
|
|
|
ignoretext = True
|
2010-10-09 08:39:44 +04:00
|
|
|
if not hgpatchheader and not ignoretext:
|
2006-08-13 00:16:48 +04:00
|
|
|
cfp.write(line)
|
2018-07-06 03:45:27 +03:00
|
|
|
cfp.write("\n")
|
2006-08-13 00:16:48 +04:00
|
|
|
message = cfp.getvalue()
|
|
|
|
if tmpfp:
|
|
|
|
tmpfp.write(payload)
|
2018-07-06 03:45:27 +03:00
|
|
|
if not payload.endswith("\n"):
|
|
|
|
tmpfp.write("\n")
|
|
|
|
elif not diffs_seen and message and content_type == "text/plain":
|
|
|
|
message += "\n" + payload
|
|
|
|
except: # re-raises
|
2006-08-13 00:16:48 +04:00
|
|
|
tmpfp.close()
|
|
|
|
os.unlink(tmpname)
|
|
|
|
raise
|
|
|
|
|
2007-07-02 21:59:16 +04:00
|
|
|
if subject and not message.startswith(subject):
|
2018-07-06 03:45:27 +03:00
|
|
|
message = "%s\n%s" % (subject, message)
|
|
|
|
data["message"] = message
|
2006-08-13 00:16:48 +04:00
|
|
|
tmpfp.close()
|
2015-03-14 00:00:06 +03:00
|
|
|
if parents:
|
2018-07-06 03:45:27 +03:00
|
|
|
data["p1"] = parents.pop(0)
|
2015-10-06 12:04:06 +03:00
|
|
|
if parents:
|
2018-07-06 03:45:27 +03:00
|
|
|
data["p2"] = parents.pop(0)
|
2015-03-14 00:00:06 +03:00
|
|
|
|
2015-10-06 12:11:09 +03:00
|
|
|
if diffs_seen:
|
2018-07-06 03:45:27 +03:00
|
|
|
data["filename"] = tmpname
|
2015-10-06 12:11:09 +03:00
|
|
|
else:
|
|
|
|
os.unlink(tmpname)
|
2015-10-06 12:01:53 +03:00
|
|
|
return data
|
2006-08-12 02:50:16 +04:00
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2009-06-10 17:10:21 +04:00
|
|
|
class patchmeta(object):
|
2008-10-19 01:45:45 +04:00
|
|
|
"""Patched file metadata
|
|
|
|
|
|
|
|
'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
|
|
|
|
or COPY. 'path' is patched file path. 'oldpath' is set to the
|
2008-10-19 01:45:45 +04:00
|
|
|
origin file when 'op' is either COPY or RENAME, None otherwise. If
|
|
|
|
file mode is changed, 'mode' is a tuple (islink, isexec) where
|
|
|
|
'islink' is True if the file is a symlink and 'isexec' is True if
|
|
|
|
the file is executable. Otherwise, 'mode' is None.
|
2008-10-19 01:45:45 +04:00
|
|
|
"""
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2008-10-19 01:45:45 +04:00
|
|
|
def __init__(self, path):
|
|
|
|
self.path = path
|
|
|
|
self.oldpath = None
|
|
|
|
self.mode = None
|
2018-07-06 03:45:27 +03:00
|
|
|
self.op = "MODIFY"
|
2008-10-19 01:45:45 +04:00
|
|
|
self.binary = False
|
|
|
|
|
2008-10-19 01:45:45 +04:00
|
|
|
def setmode(self, mode):
|
2015-06-24 08:30:33 +03:00
|
|
|
islink = mode & 0o20000
|
|
|
|
isexec = mode & 0o100
|
2008-10-19 01:45:45 +04:00
|
|
|
self.mode = (islink, isexec)
|
|
|
|
|
2011-06-11 16:17:25 +04:00
|
|
|
def copy(self):
|
|
|
|
other = patchmeta(self.path)
|
|
|
|
other.oldpath = self.oldpath
|
|
|
|
other.mode = self.mode
|
|
|
|
other.op = self.op
|
|
|
|
other.binary = self.binary
|
|
|
|
return other
|
|
|
|
|
patch: fix patch hunk/metdata synchronization (issue3384)
Git patches are parsed in two phases: 1) extract metadata, 2) parse actual
deltas and merge them with the previous metadata. We do this to avoid
dependency issues like "modify a; copy a to b", where "b" must be copied from
the unmodified "a".
Issue3384 is caused by flaky code I wrote to synchronize the patch metadata
with the emitted hunk:
if (gitpatches and
(gitpatches[-1][0] == afile or gitpatches[-1][1] == bfile)):
gp = gitpatches.pop()[2]
With a patch like:
diff --git a/a b/c
copy from a
copy to c
--- a/a
+++ b/c
@@ -1,1 +1,2 @@
a
+a
@@ -2,1 +2,2 @@
a
+a
diff --git a/a b/a
--- a/a
+++ b/a
@@ -1,1 +1,2 @@
a
+b
the first hunk of the first block is matched with the metadata for the block
"diff --git a/a b/c", then the second hunk of the first block is matched with
the metadata of the second block "diff --git a/a b/a", because of the "or" in
the code paste above. Turning the "or" into an "and" is not enough as we have
to deal with /dev/null cases for each file.
We I remove this broken piece of code:
# copy/rename + modify should modify target, not source
if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
afile = bfile
because "afile = bfile" set "afile" to stuff like "b/file" instead of "a/file",
and because this only happens for git patches, which afile/bfile are ignored
anyway by applydiff().
v2:
- Avoid a traceback on git metadata desynchronization
2012-04-21 23:40:25 +04:00
|
|
|
def _ispatchinga(self, afile):
|
2018-07-06 03:45:27 +03:00
|
|
|
if afile == "/dev/null":
|
|
|
|
return self.op == "ADD"
|
|
|
|
return afile == "a/" + (self.oldpath or self.path)
|
patch: fix patch hunk/metdata synchronization (issue3384)
Git patches are parsed in two phases: 1) extract metadata, 2) parse actual
deltas and merge them with the previous metadata. We do this to avoid
dependency issues like "modify a; copy a to b", where "b" must be copied from
the unmodified "a".
Issue3384 is caused by flaky code I wrote to synchronize the patch metadata
with the emitted hunk:
if (gitpatches and
(gitpatches[-1][0] == afile or gitpatches[-1][1] == bfile)):
gp = gitpatches.pop()[2]
With a patch like:
diff --git a/a b/c
copy from a
copy to c
--- a/a
+++ b/c
@@ -1,1 +1,2 @@
a
+a
@@ -2,1 +2,2 @@
a
+a
diff --git a/a b/a
--- a/a
+++ b/a
@@ -1,1 +1,2 @@
a
+b
the first hunk of the first block is matched with the metadata for the block
"diff --git a/a b/c", then the second hunk of the first block is matched with
the metadata of the second block "diff --git a/a b/a", because of the "or" in
the code paste above. Turning the "or" into an "and" is not enough as we have
to deal with /dev/null cases for each file.
We I remove this broken piece of code:
# copy/rename + modify should modify target, not source
if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
afile = bfile
because "afile = bfile" set "afile" to stuff like "b/file" instead of "a/file",
and because this only happens for git patches, which afile/bfile are ignored
anyway by applydiff().
v2:
- Avoid a traceback on git metadata desynchronization
2012-04-21 23:40:25 +04:00
|
|
|
|
|
|
|
def _ispatchingb(self, bfile):
|
2018-07-06 03:45:27 +03:00
|
|
|
if bfile == "/dev/null":
|
|
|
|
return self.op == "DELETE"
|
|
|
|
return bfile == "b/" + self.path
|
patch: fix patch hunk/metdata synchronization (issue3384)
Git patches are parsed in two phases: 1) extract metadata, 2) parse actual
deltas and merge them with the previous metadata. We do this to avoid
dependency issues like "modify a; copy a to b", where "b" must be copied from
the unmodified "a".
Issue3384 is caused by flaky code I wrote to synchronize the patch metadata
with the emitted hunk:
if (gitpatches and
(gitpatches[-1][0] == afile or gitpatches[-1][1] == bfile)):
gp = gitpatches.pop()[2]
With a patch like:
diff --git a/a b/c
copy from a
copy to c
--- a/a
+++ b/c
@@ -1,1 +1,2 @@
a
+a
@@ -2,1 +2,2 @@
a
+a
diff --git a/a b/a
--- a/a
+++ b/a
@@ -1,1 +1,2 @@
a
+b
the first hunk of the first block is matched with the metadata for the block
"diff --git a/a b/c", then the second hunk of the first block is matched with
the metadata of the second block "diff --git a/a b/a", because of the "or" in
the code paste above. Turning the "or" into an "and" is not enough as we have
to deal with /dev/null cases for each file.
We I remove this broken piece of code:
# copy/rename + modify should modify target, not source
if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
afile = bfile
because "afile = bfile" set "afile" to stuff like "b/file" instead of "a/file",
and because this only happens for git patches, which afile/bfile are ignored
anyway by applydiff().
v2:
- Avoid a traceback on git metadata desynchronization
2012-04-21 23:40:25 +04:00
|
|
|
|
|
|
|
def ispatching(self, afile, bfile):
|
|
|
|
return self._ispatchinga(afile) and self._ispatchingb(bfile)
|
|
|
|
|
2010-04-26 15:21:02 +04:00
|
|
|
def __repr__(self):
|
|
|
|
return "<patchmeta %s %r>" % (self.op, self.path)
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2008-10-19 01:45:46 +04:00
|
|
|
def readgitpatch(lr):
|
2006-08-12 02:50:16 +04:00
|
|
|
"""extract git-style metadata about patches from <patchname>"""
|
2006-10-01 21:26:33 +04:00
|
|
|
|
2006-08-12 02:50:16 +04:00
|
|
|
# Filter patch for git information
|
|
|
|
gp = None
|
|
|
|
gitpatches = []
|
2008-10-19 01:45:46 +04:00
|
|
|
for line in lr:
|
2018-07-06 03:45:27 +03:00
|
|
|
line = line.rstrip(" \r\n")
|
|
|
|
if line.startswith("diff --git a/"):
|
2006-08-12 02:50:16 +04:00
|
|
|
m = gitre.match(line)
|
|
|
|
if m:
|
|
|
|
if gp:
|
|
|
|
gitpatches.append(gp)
|
2009-08-24 16:40:21 +04:00
|
|
|
dst = m.group(2)
|
2008-10-19 01:45:45 +04:00
|
|
|
gp = patchmeta(dst)
|
2006-08-12 02:50:16 +04:00
|
|
|
elif gp:
|
2018-07-06 03:45:27 +03:00
|
|
|
if line.startswith("--- "):
|
2006-08-12 02:50:16 +04:00
|
|
|
gitpatches.append(gp)
|
|
|
|
gp = None
|
|
|
|
continue
|
2018-07-06 03:45:27 +03:00
|
|
|
if line.startswith("rename from "):
|
|
|
|
gp.op = "RENAME"
|
2009-07-25 01:53:40 +04:00
|
|
|
gp.oldpath = line[12:]
|
2018-07-06 03:45:27 +03:00
|
|
|
elif line.startswith("rename to "):
|
2009-07-25 01:53:40 +04:00
|
|
|
gp.path = line[10:]
|
2018-07-06 03:45:27 +03:00
|
|
|
elif line.startswith("copy from "):
|
|
|
|
gp.op = "COPY"
|
2009-07-25 01:53:40 +04:00
|
|
|
gp.oldpath = line[10:]
|
2018-07-06 03:45:27 +03:00
|
|
|
elif line.startswith("copy to "):
|
2009-07-25 01:53:40 +04:00
|
|
|
gp.path = line[8:]
|
2018-07-06 03:45:27 +03:00
|
|
|
elif line.startswith("deleted file"):
|
|
|
|
gp.op = "DELETE"
|
|
|
|
elif line.startswith("new file mode "):
|
|
|
|
gp.op = "ADD"
|
2009-07-25 01:53:40 +04:00
|
|
|
gp.setmode(int(line[-6:], 8))
|
2018-07-06 03:45:27 +03:00
|
|
|
elif line.startswith("new mode "):
|
2009-07-25 01:53:40 +04:00
|
|
|
gp.setmode(int(line[-6:], 8))
|
2018-07-06 03:45:27 +03:00
|
|
|
elif line.startswith("GIT binary patch"):
|
2006-10-12 20:17:16 +04:00
|
|
|
gp.binary = True
|
2006-08-12 02:50:16 +04:00
|
|
|
if gp:
|
|
|
|
gitpatches.append(gp)
|
|
|
|
|
2010-10-10 00:13:08 +04:00
|
|
|
return gitpatches
|
2006-08-12 02:50:16 +04:00
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2009-06-22 14:05:11 +04:00
|
|
|
class linereader(object):
|
2009-06-15 02:03:26 +04:00
|
|
|
# simple class to allow pushing lines back into the input stream
|
2011-05-24 16:21:04 +04:00
|
|
|
def __init__(self, fp):
|
2009-06-15 02:03:26 +04:00
|
|
|
self.fp = fp
|
|
|
|
self.buf = []
|
|
|
|
|
|
|
|
def push(self, line):
|
|
|
|
if line is not None:
|
|
|
|
self.buf.append(line)
|
|
|
|
|
|
|
|
def readline(self):
|
|
|
|
if self.buf:
|
|
|
|
l = self.buf[0]
|
|
|
|
del self.buf[0]
|
|
|
|
return l
|
2011-05-24 16:21:04 +04:00
|
|
|
return self.fp.readline()
|
2009-06-15 02:03:26 +04:00
|
|
|
|
|
|
|
def __iter__(self):
|
2018-07-06 03:45:27 +03:00
|
|
|
return iter(self.readline, "")
|
|
|
|
|
2009-06-15 02:03:26 +04:00
|
|
|
|
2011-05-18 01:46:15 +04:00
|
|
|
class abstractbackend(object):
|
|
|
|
def __init__(self, ui):
|
2007-07-17 20:39:30 +04:00
|
|
|
self.ui = ui
|
2007-12-18 01:42:46 +03:00
|
|
|
|
2011-05-20 00:49:43 +04:00
|
|
|
def getfile(self, fname):
|
|
|
|
"""Return target file data and flags as a (data, (islink,
|
2014-08-27 00:03:32 +04:00
|
|
|
isexec)) tuple. Data is None if file is missing/deleted.
|
2011-05-18 01:46:15 +04:00
|
|
|
"""
|
|
|
|
raise NotImplementedError
|
|
|
|
|
2011-05-27 23:50:10 +04:00
|
|
|
def setfile(self, fname, data, mode, copysource):
|
2011-05-20 00:49:43 +04:00
|
|
|
"""Write data to target file fname and set its mode. mode is a
|
|
|
|
(islink, isexec) tuple. If data is None, the file content should
|
2011-05-27 23:50:10 +04:00
|
|
|
be left unchanged. If the file is modified after being copied,
|
|
|
|
copysource is set to the original file name.
|
2011-05-19 01:48:13 +04:00
|
|
|
"""
|
2011-05-18 01:46:15 +04:00
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def unlink(self, fname):
|
|
|
|
"""Unlink target file."""
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def writerej(self, fname, failed, total, lines):
|
|
|
|
"""Write rejected lines for fname. total is the number of hunks
|
|
|
|
which failed to apply and total the total number of hunks for this
|
|
|
|
files.
|
|
|
|
"""
|
|
|
|
|
2011-05-18 01:46:38 +04:00
|
|
|
def exists(self, fname):
|
|
|
|
raise NotImplementedError
|
|
|
|
|
2017-06-30 19:07:24 +03:00
|
|
|
def close(self):
|
|
|
|
raise NotImplementedError
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2011-05-18 01:46:15 +04:00
|
|
|
class fsbackend(abstractbackend):
|
2011-05-18 01:46:38 +04:00
|
|
|
def __init__(self, ui, basedir):
|
2011-05-18 01:46:15 +04:00
|
|
|
super(fsbackend, self).__init__(ui)
|
2017-03-02 15:29:59 +03:00
|
|
|
self.opener = vfsmod.vfs(basedir)
|
2007-07-17 20:39:30 +04:00
|
|
|
|
2011-05-20 00:49:43 +04:00
|
|
|
def getfile(self, fname):
|
2014-06-05 13:54:29 +04:00
|
|
|
if self.opener.islink(fname):
|
|
|
|
return (self.opener.readlink(fname), (True, False))
|
|
|
|
|
2011-06-05 15:27:06 +04:00
|
|
|
isexec = False
|
2008-11-19 15:27:57 +03:00
|
|
|
try:
|
2015-06-24 08:30:33 +03:00
|
|
|
isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
|
2015-06-24 08:20:08 +03:00
|
|
|
except OSError as e:
|
2011-05-20 00:49:43 +04:00
|
|
|
if e.errno != errno.ENOENT:
|
|
|
|
raise
|
2014-08-27 00:03:32 +04:00
|
|
|
try:
|
|
|
|
return (self.opener.read(fname), (False, isexec))
|
2015-06-24 08:20:08 +03:00
|
|
|
except IOError as e:
|
2014-08-27 00:03:32 +04:00
|
|
|
if e.errno != errno.ENOENT:
|
|
|
|
raise
|
|
|
|
return None, None
|
2011-05-20 00:49:43 +04:00
|
|
|
|
2011-05-27 23:50:10 +04:00
|
|
|
def setfile(self, fname, data, mode, copysource):
|
2011-05-20 00:49:43 +04:00
|
|
|
islink, isexec = mode
|
|
|
|
if data is None:
|
2014-06-05 13:54:29 +04:00
|
|
|
self.opener.setflags(fname, islink, isexec)
|
2011-05-20 00:44:01 +04:00
|
|
|
return
|
2011-05-19 01:48:13 +04:00
|
|
|
if islink:
|
2011-05-20 00:49:43 +04:00
|
|
|
self.opener.symlink(data, fname)
|
2011-05-19 01:48:13 +04:00
|
|
|
else:
|
2011-05-20 00:49:43 +04:00
|
|
|
self.opener.write(fname, data)
|
2011-05-19 01:48:13 +04:00
|
|
|
if isexec:
|
2014-06-05 13:54:29 +04:00
|
|
|
self.opener.setflags(fname, False, True)
|
2008-11-19 15:27:57 +03:00
|
|
|
|
|
|
|
def unlink(self, fname):
|
2014-06-05 13:54:29 +04:00
|
|
|
self.opener.unlinkpath(fname, ignoremissing=True)
|
2008-11-19 15:27:57 +03:00
|
|
|
|
2011-05-18 01:46:15 +04:00
|
|
|
def writerej(self, fname, failed, total, lines):
|
|
|
|
fname = fname + ".rej"
|
|
|
|
self.ui.warn(
|
2018-07-06 03:45:27 +03:00
|
|
|
_("%d out of %d hunks FAILED -- saving rejects to file %s\n")
|
|
|
|
% (failed, total, fname)
|
|
|
|
)
|
|
|
|
fp = self.opener(fname, "w")
|
2011-05-18 01:46:15 +04:00
|
|
|
fp.writelines(lines)
|
|
|
|
fp.close()
|
|
|
|
|
2011-05-18 01:46:38 +04:00
|
|
|
def exists(self, fname):
|
2014-06-05 13:54:29 +04:00
|
|
|
return self.opener.lexists(fname)
|
2011-05-18 01:46:38 +04:00
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2011-05-19 01:48:17 +04:00
|
|
|
class workingbackend(fsbackend):
|
|
|
|
def __init__(self, ui, repo, similarity):
|
|
|
|
super(workingbackend, self).__init__(ui, repo.root)
|
|
|
|
self.repo = repo
|
|
|
|
self.similarity = similarity
|
|
|
|
self.removed = set()
|
|
|
|
self.changed = set()
|
|
|
|
self.copied = []
|
|
|
|
|
2011-05-27 23:50:11 +04:00
|
|
|
def _checkknown(self, fname):
|
2018-07-06 03:45:27 +03:00
|
|
|
if self.repo.dirstate[fname] == "?" and self.exists(fname):
|
|
|
|
raise PatchError(_("cannot patch %s: file is not tracked") % fname)
|
2011-05-27 23:50:11 +04:00
|
|
|
|
2011-05-27 23:50:10 +04:00
|
|
|
def setfile(self, fname, data, mode, copysource):
|
2011-05-27 23:50:11 +04:00
|
|
|
self._checkknown(fname)
|
2011-05-27 23:50:10 +04:00
|
|
|
super(workingbackend, self).setfile(fname, data, mode, copysource)
|
|
|
|
if copysource is not None:
|
|
|
|
self.copied.append((copysource, fname))
|
2011-05-19 01:48:17 +04:00
|
|
|
self.changed.add(fname)
|
|
|
|
|
|
|
|
def unlink(self, fname):
|
2011-05-27 23:50:11 +04:00
|
|
|
self._checkknown(fname)
|
2011-05-19 01:48:17 +04:00
|
|
|
super(workingbackend, self).unlink(fname)
|
|
|
|
self.removed.add(fname)
|
|
|
|
self.changed.add(fname)
|
|
|
|
|
|
|
|
def close(self):
|
|
|
|
wctx = self.repo[None]
|
2013-04-05 00:45:21 +04:00
|
|
|
changed = set(self.changed)
|
2011-05-19 01:48:17 +04:00
|
|
|
for src, dst in self.copied:
|
|
|
|
scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
|
2012-02-16 16:03:42 +04:00
|
|
|
if self.removed:
|
2011-05-27 02:15:35 +04:00
|
|
|
wctx.forget(sorted(self.removed))
|
2012-02-16 16:03:42 +04:00
|
|
|
for f in self.removed:
|
|
|
|
if f not in self.repo.dirstate:
|
|
|
|
# File was deleted and no longer belongs to the
|
|
|
|
# dirstate, it was probably marked added then
|
|
|
|
# deleted, and should not be considered by
|
2013-04-05 00:45:21 +04:00
|
|
|
# marktouched().
|
|
|
|
changed.discard(f)
|
|
|
|
if changed:
|
|
|
|
scmutil.marktouched(self.repo, changed, self.similarity)
|
2011-05-19 01:48:17 +04:00
|
|
|
return sorted(self.changed)
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2011-05-27 23:50:10 +04:00
|
|
|
class filestore(object):
|
2011-06-17 22:33:02 +04:00
|
|
|
def __init__(self, maxsize=None):
|
2011-05-27 23:50:10 +04:00
|
|
|
self.opener = None
|
|
|
|
self.files = {}
|
|
|
|
self.created = 0
|
2011-06-17 22:33:02 +04:00
|
|
|
self.maxsize = maxsize
|
|
|
|
if self.maxsize is None:
|
2018-07-06 03:45:27 +03:00
|
|
|
self.maxsize = 4 * (2 ** 20)
|
2011-06-17 22:33:02 +04:00
|
|
|
self.size = 0
|
|
|
|
self.data = {}
|
2011-05-27 23:50:10 +04:00
|
|
|
|
2011-06-15 01:24:34 +04:00
|
|
|
def setfile(self, fname, data, mode, copied=None):
|
2011-06-17 22:33:02 +04:00
|
|
|
if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
|
|
|
|
self.data[fname] = (data, mode, copied)
|
|
|
|
self.size += len(data)
|
|
|
|
else:
|
|
|
|
if self.opener is None:
|
2018-07-06 03:45:27 +03:00
|
|
|
root = tempfile.mkdtemp(prefix="hg-patch-")
|
2017-03-02 15:29:59 +03:00
|
|
|
self.opener = vfsmod.vfs(root)
|
2011-06-17 22:33:02 +04:00
|
|
|
# Avoid filename issues with these simple names
|
|
|
|
fn = str(self.created)
|
|
|
|
self.opener.write(fn, data)
|
|
|
|
self.created += 1
|
|
|
|
self.files[fname] = (fn, mode, copied)
|
2011-05-27 23:50:10 +04:00
|
|
|
|
|
|
|
def getfile(self, fname):
|
2011-06-17 22:33:02 +04:00
|
|
|
if fname in self.data:
|
|
|
|
return self.data[fname]
|
|
|
|
if not self.opener or fname not in self.files:
|
2014-08-27 00:03:32 +04:00
|
|
|
return None, None, None
|
2011-06-15 01:24:34 +04:00
|
|
|
fn, mode, copied = self.files[fname]
|
|
|
|
return self.opener.read(fn), mode, copied
|
2011-05-27 23:50:10 +04:00
|
|
|
|
|
|
|
def close(self):
|
|
|
|
if self.opener:
|
|
|
|
shutil.rmtree(self.opener.base)
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2011-06-15 01:26:35 +04:00
|
|
|
class repobackend(abstractbackend):
|
|
|
|
def __init__(self, ui, repo, ctx, store):
|
|
|
|
super(repobackend, self).__init__(ui)
|
|
|
|
self.repo = repo
|
|
|
|
self.ctx = ctx
|
|
|
|
self.store = store
|
|
|
|
self.changed = set()
|
|
|
|
self.removed = set()
|
|
|
|
self.copied = {}
|
|
|
|
|
|
|
|
def _checkknown(self, fname):
|
|
|
|
if fname not in self.ctx:
|
2018-07-06 03:45:27 +03:00
|
|
|
raise PatchError(_("cannot patch %s: file is not tracked") % fname)
|
2011-06-15 01:26:35 +04:00
|
|
|
|
|
|
|
def getfile(self, fname):
|
|
|
|
try:
|
|
|
|
fctx = self.ctx[fname]
|
|
|
|
except error.LookupError:
|
2014-08-27 00:03:32 +04:00
|
|
|
return None, None
|
2011-06-15 01:26:35 +04:00
|
|
|
flags = fctx.flags()
|
2018-07-06 03:45:27 +03:00
|
|
|
return fctx.data(), ("l" in flags, "x" in flags)
|
2011-06-15 01:26:35 +04:00
|
|
|
|
|
|
|
def setfile(self, fname, data, mode, copysource):
|
|
|
|
if copysource:
|
|
|
|
self._checkknown(copysource)
|
|
|
|
if data is None:
|
|
|
|
data = self.ctx[fname].data()
|
|
|
|
self.store.setfile(fname, data, mode, copysource)
|
|
|
|
self.changed.add(fname)
|
|
|
|
if copysource:
|
|
|
|
self.copied[fname] = copysource
|
|
|
|
|
|
|
|
def unlink(self, fname):
|
|
|
|
self._checkknown(fname)
|
|
|
|
self.removed.add(fname)
|
|
|
|
|
|
|
|
def exists(self, fname):
|
|
|
|
return fname in self.ctx
|
|
|
|
|
|
|
|
def close(self):
|
|
|
|
return self.changed | self.removed
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2011-05-18 01:46:15 +04:00
|
|
|
# @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
|
2018-07-06 03:45:27 +03:00
|
|
|
unidesc = re.compile("@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@")
|
|
|
|
contextdesc = re.compile("(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)")
|
|
|
|
eolmodes = ["strict", "crlf", "lf", "auto"]
|
|
|
|
|
2011-05-18 01:46:15 +04:00
|
|
|
|
|
|
|
class patchfile(object):
|
2018-07-06 03:45:27 +03:00
|
|
|
def __init__(self, ui, gp, backend, store, eolmode="strict"):
|
2011-06-11 16:17:25 +04:00
|
|
|
self.fname = gp.path
|
2011-05-18 01:46:15 +04:00
|
|
|
self.eolmode = eolmode
|
|
|
|
self.eol = None
|
|
|
|
self.backend = backend
|
|
|
|
self.ui = ui
|
|
|
|
self.lines = []
|
|
|
|
self.exists = False
|
2011-05-27 23:50:10 +04:00
|
|
|
self.missing = True
|
2011-06-11 16:17:25 +04:00
|
|
|
self.mode = gp.mode
|
|
|
|
self.copysource = gp.oldpath
|
2018-07-06 03:45:27 +03:00
|
|
|
self.create = gp.op in ("ADD", "COPY", "RENAME")
|
|
|
|
self.remove = gp.op == "DELETE"
|
2014-08-27 00:03:32 +04:00
|
|
|
if self.copysource is None:
|
|
|
|
data, mode = backend.getfile(self.fname)
|
|
|
|
else:
|
|
|
|
data, mode = store.getfile(self.copysource)[:2]
|
|
|
|
if data is not None:
|
|
|
|
self.exists = self.copysource is None or backend.exists(self.fname)
|
2011-05-27 23:50:10 +04:00
|
|
|
self.missing = False
|
|
|
|
if data:
|
2011-07-05 02:53:39 +04:00
|
|
|
self.lines = mdiff.splitnewlines(data)
|
2011-05-27 23:50:10 +04:00
|
|
|
if self.mode is None:
|
|
|
|
self.mode = mode
|
|
|
|
if self.lines:
|
|
|
|
# Normalize line endings
|
2018-07-06 03:45:27 +03:00
|
|
|
if self.lines[0].endswith("\r\n"):
|
|
|
|
self.eol = "\r\n"
|
|
|
|
elif self.lines[0].endswith("\n"):
|
|
|
|
self.eol = "\n"
|
|
|
|
if eolmode != "strict":
|
2011-05-27 23:50:10 +04:00
|
|
|
nlines = []
|
|
|
|
for l in self.lines:
|
2018-07-06 03:45:27 +03:00
|
|
|
if l.endswith("\r\n"):
|
|
|
|
l = l[:-2] + "\n"
|
2011-05-27 23:50:10 +04:00
|
|
|
nlines.append(l)
|
|
|
|
self.lines = nlines
|
2014-08-27 00:03:32 +04:00
|
|
|
else:
|
2011-06-11 16:17:25 +04:00
|
|
|
if self.create:
|
2011-05-27 23:50:10 +04:00
|
|
|
self.missing = False
|
|
|
|
if self.mode is None:
|
|
|
|
self.mode = (False, False)
|
|
|
|
if self.missing:
|
2012-07-31 05:30:42 +04:00
|
|
|
self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
|
2018-07-06 03:45:27 +03:00
|
|
|
self.ui.warn(
|
|
|
|
_(
|
|
|
|
"(use '--prefix' to apply patch relative to the "
|
|
|
|
"current directory)\n"
|
|
|
|
)
|
|
|
|
)
|
2011-05-18 01:46:15 +04:00
|
|
|
|
|
|
|
self.hash = {}
|
|
|
|
self.dirty = 0
|
|
|
|
self.offset = 0
|
|
|
|
self.skew = 0
|
|
|
|
self.rej = []
|
|
|
|
self.fileprinted = False
|
|
|
|
self.printfile(False)
|
|
|
|
self.hunks = 0
|
|
|
|
|
2011-05-19 01:48:13 +04:00
|
|
|
def writelines(self, fname, lines, mode):
|
2018-07-06 03:45:27 +03:00
|
|
|
if self.eolmode == "auto":
|
2011-05-18 01:46:15 +04:00
|
|
|
eol = self.eol
|
2018-07-06 03:45:27 +03:00
|
|
|
elif self.eolmode == "crlf":
|
|
|
|
eol = "\r\n"
|
2011-05-18 01:46:15 +04:00
|
|
|
else:
|
2018-07-06 03:45:27 +03:00
|
|
|
eol = "\n"
|
2011-05-18 01:46:15 +04:00
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
if self.eolmode != "strict" and eol and eol != "\n":
|
2011-05-18 01:46:15 +04:00
|
|
|
rawlines = []
|
|
|
|
for l in lines:
|
2018-07-06 03:45:27 +03:00
|
|
|
if l and l[-1] == "\n":
|
2011-05-18 01:46:15 +04:00
|
|
|
l = l[:-1] + eol
|
|
|
|
rawlines.append(l)
|
|
|
|
lines = rawlines
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
self.backend.setfile(fname, "".join(lines), mode, self.copysource)
|
2011-05-18 01:46:15 +04:00
|
|
|
|
2007-07-17 20:39:30 +04:00
|
|
|
def printfile(self, warn):
|
|
|
|
if self.fileprinted:
|
|
|
|
return
|
|
|
|
if warn or self.ui.verbose:
|
|
|
|
self.fileprinted = True
|
2007-07-17 20:39:30 +04:00
|
|
|
s = _("patching file %s\n") % self.fname
|
2007-07-17 20:39:30 +04:00
|
|
|
if warn:
|
|
|
|
self.ui.warn(s)
|
|
|
|
else:
|
|
|
|
self.ui.note(s)
|
|
|
|
|
|
|
|
def findlines(self, l, linenum):
|
|
|
|
# looks through the hash and finds candidate lines. The
|
|
|
|
# result is a list of line numbers sorted based on distance
|
|
|
|
# from linenum
|
2007-08-07 12:28:43 +04:00
|
|
|
|
2009-10-31 20:00:05 +03:00
|
|
|
cand = self.hash.get(l, [])
|
2007-07-17 20:39:30 +04:00
|
|
|
if len(cand) > 1:
|
|
|
|
# resort our list of potentials forward then back.
|
2009-07-05 13:02:00 +04:00
|
|
|
cand.sort(key=lambda x: abs(x - linenum))
|
2007-07-17 20:39:30 +04:00
|
|
|
return cand
|
|
|
|
|
|
|
|
def write_rej(self):
|
|
|
|
# our rejects are a little different from patch(1). This always
|
|
|
|
# creates rejects in the same form as the original patch. A file
|
|
|
|
# header is inserted so that you can run the reject through patch again
|
|
|
|
# without having to type the filename.
|
|
|
|
if not self.rej:
|
|
|
|
return
|
2011-05-18 01:46:37 +04:00
|
|
|
base = os.path.basename(self.fname)
|
|
|
|
lines = ["--- %s\n+++ %s\n" % (base, base)]
|
|
|
|
for x in self.rej:
|
|
|
|
for l in x.hunk:
|
|
|
|
lines.append(l)
|
2018-07-06 03:45:27 +03:00
|
|
|
if l[-1:] != "\n":
|
2011-05-18 01:46:37 +04:00
|
|
|
lines.append("\n\ No newline at end of file\n")
|
|
|
|
self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
|
2007-07-17 20:39:30 +04:00
|
|
|
|
2009-08-23 16:32:58 +04:00
|
|
|
def apply(self, h):
|
2007-07-17 20:39:30 +04:00
|
|
|
if not h.complete():
|
2018-07-06 03:45:27 +03:00
|
|
|
raise PatchError(
|
|
|
|
_("bad hunk #%d %s (%d %d %d %d)")
|
|
|
|
% (h.number, h.desc, len(h.a), h.lena, len(h.b), h.lenb)
|
|
|
|
)
|
2007-07-17 20:39:30 +04:00
|
|
|
|
|
|
|
self.hunks += 1
|
|
|
|
|
2007-12-18 01:42:46 +03:00
|
|
|
if self.missing:
|
|
|
|
self.rej.append(h)
|
|
|
|
return -1
|
|
|
|
|
2011-05-27 23:50:09 +04:00
|
|
|
if self.exists and self.create:
|
2011-05-27 23:50:10 +04:00
|
|
|
if self.copysource:
|
2018-07-06 03:45:27 +03:00
|
|
|
self.ui.warn(
|
|
|
|
_("cannot create %s: destination already " "exists\n") % self.fname
|
|
|
|
)
|
2011-05-27 23:50:10 +04:00
|
|
|
else:
|
|
|
|
self.ui.warn(_("file %s already exists\n") % self.fname)
|
2007-07-17 20:39:30 +04:00
|
|
|
self.rej.append(h)
|
|
|
|
return -1
|
|
|
|
|
2009-10-16 01:15:30 +04:00
|
|
|
if isinstance(h, binhunk):
|
2011-05-27 23:50:09 +04:00
|
|
|
if self.remove:
|
2011-05-18 01:46:15 +04:00
|
|
|
self.backend.unlink(self.fname)
|
2007-07-17 20:39:30 +04:00
|
|
|
else:
|
2013-11-27 21:39:00 +04:00
|
|
|
l = h.new(self.lines)
|
|
|
|
self.lines[:] = l
|
|
|
|
self.offset += len(l)
|
2011-05-06 13:12:55 +04:00
|
|
|
self.dirty = True
|
2007-07-17 20:39:30 +04:00
|
|
|
return 0
|
|
|
|
|
2009-12-23 21:31:47 +03:00
|
|
|
horig = h
|
2018-07-06 03:45:27 +03:00
|
|
|
if self.eolmode in ("crlf", "lf") or self.eolmode == "auto" and self.eol:
|
2009-12-23 21:31:48 +03:00
|
|
|
# If new eols are going to be normalized, then normalize
|
|
|
|
# hunk data before patching. Otherwise, preserve input
|
|
|
|
# line-endings.
|
2009-12-23 21:31:47 +03:00
|
|
|
h = h.getnormalized()
|
|
|
|
|
2007-07-17 20:39:30 +04:00
|
|
|
# fast case first, no offsets, no fuzz
|
2012-02-13 16:51:38 +04:00
|
|
|
old, oldstart, new, newstart = h.fuzzit(0, False)
|
|
|
|
oldstart += self.offset
|
|
|
|
orig_start = oldstart
|
2009-12-10 03:56:00 +03:00
|
|
|
# if there's skew we want to emit the "(offset %d lines)" even
|
|
|
|
# when the hunk cleanly applies at start + skew, so skip the
|
|
|
|
# fast case code
|
2018-07-06 03:45:27 +03:00
|
|
|
if self.skew == 0 and diffhelpers.testhunk(old, self.lines, oldstart) == 0:
|
2011-05-27 23:50:09 +04:00
|
|
|
if self.remove:
|
2011-05-18 01:46:15 +04:00
|
|
|
self.backend.unlink(self.fname)
|
2007-07-17 20:39:30 +04:00
|
|
|
else:
|
2018-07-06 03:45:27 +03:00
|
|
|
self.lines[oldstart : oldstart + len(old)] = new
|
2012-02-13 16:51:38 +04:00
|
|
|
self.offset += len(new) - len(old)
|
2011-05-06 13:12:55 +04:00
|
|
|
self.dirty = True
|
2007-07-17 20:39:30 +04:00
|
|
|
return 0
|
|
|
|
|
2011-03-20 02:09:44 +03:00
|
|
|
# ok, we couldn't match the hunk. Lets look for offsets and fuzz it
|
|
|
|
self.hash = {}
|
|
|
|
for x, s in enumerate(self.lines):
|
|
|
|
self.hash.setdefault(s, []).append(x)
|
2007-07-17 20:39:30 +04:00
|
|
|
|
2017-06-17 14:17:10 +03:00
|
|
|
for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
|
2010-01-25 09:05:27 +03:00
|
|
|
for toponly in [True, False]:
|
2012-02-13 16:51:38 +04:00
|
|
|
old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
|
2012-02-13 19:47:31 +04:00
|
|
|
oldstart = oldstart + self.offset + self.skew
|
|
|
|
oldstart = min(oldstart, len(self.lines))
|
|
|
|
if old:
|
|
|
|
cand = self.findlines(old[0][1:], oldstart)
|
|
|
|
else:
|
|
|
|
# Only adding lines with no or fuzzed context, just
|
|
|
|
# take the skew in account
|
|
|
|
cand = [oldstart]
|
2007-07-17 20:39:30 +04:00
|
|
|
|
|
|
|
for l in cand:
|
2012-02-13 19:47:31 +04:00
|
|
|
if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
|
2012-02-13 16:21:00 +04:00
|
|
|
self.lines[l : l + len(old)] = new
|
|
|
|
self.offset += len(new) - len(old)
|
2009-12-10 03:56:00 +03:00
|
|
|
self.skew = l - orig_start
|
2011-05-06 13:12:55 +04:00
|
|
|
self.dirty = True
|
2010-02-19 18:04:11 +03:00
|
|
|
offset = l - orig_start - fuzzlen
|
2007-07-17 20:39:30 +04:00
|
|
|
if fuzzlen:
|
2018-07-06 03:45:27 +03:00
|
|
|
msg = _(
|
|
|
|
"Hunk #%d succeeded at %d "
|
|
|
|
"with fuzz %d "
|
|
|
|
"(offset %d lines).\n"
|
|
|
|
)
|
2007-07-17 20:39:30 +04:00
|
|
|
self.printfile(True)
|
2018-07-06 03:45:27 +03:00
|
|
|
self.ui.warn(msg % (h.number, l + 1, fuzzlen, offset))
|
2007-07-17 20:39:30 +04:00
|
|
|
else:
|
2018-07-06 03:45:27 +03:00
|
|
|
msg = _("Hunk #%d succeeded at %d " "(offset %d lines).\n")
|
2010-02-19 18:04:11 +03:00
|
|
|
self.ui.note(msg % (h.number, l + 1, offset))
|
2007-07-17 20:39:30 +04:00
|
|
|
return fuzzlen
|
|
|
|
self.printfile(True)
|
2007-07-17 20:39:30 +04:00
|
|
|
self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
|
2009-12-23 21:31:47 +03:00
|
|
|
self.rej.append(horig)
|
2007-07-17 20:39:30 +04:00
|
|
|
return -1
|
|
|
|
|
2011-03-20 02:22:47 +03:00
|
|
|
def close(self):
|
|
|
|
if self.dirty:
|
2011-05-19 01:48:13 +04:00
|
|
|
self.writelines(self.fname, self.lines, self.mode)
|
2011-03-20 02:22:47 +03:00
|
|
|
self.write_rej()
|
|
|
|
return len(self.rej)
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2015-03-09 23:04:50 +03:00
|
|
|
class header(object):
|
|
|
|
"""patch header
|
|
|
|
"""
|
2018-07-06 03:45:27 +03:00
|
|
|
|
|
|
|
diffgit_re = re.compile("diff --git a/(.*) b/(.*)$")
|
|
|
|
diff_re = re.compile("diff -r .* (.*)$")
|
|
|
|
allhunks_re = re.compile("(?:index|deleted file) ")
|
|
|
|
pretty_re = re.compile("(?:new file|deleted file) ")
|
|
|
|
special_re = re.compile("(?:index|deleted|copy|rename) ")
|
|
|
|
newfile_re = re.compile("(?:new file)")
|
2015-03-09 23:04:50 +03:00
|
|
|
|
|
|
|
def __init__(self, header):
|
|
|
|
self.header = header
|
|
|
|
self.hunks = []
|
|
|
|
|
|
|
|
def binary(self):
|
2018-07-06 03:45:27 +03:00
|
|
|
return any(h.startswith("index ") for h in self.header)
|
2015-03-09 23:04:50 +03:00
|
|
|
|
|
|
|
def pretty(self, fp):
|
|
|
|
for h in self.header:
|
2018-07-06 03:45:27 +03:00
|
|
|
if h.startswith("index "):
|
|
|
|
fp.write(_("this modifies a binary file (all or nothing)\n"))
|
2015-03-09 23:04:50 +03:00
|
|
|
break
|
|
|
|
if self.pretty_re.match(h):
|
|
|
|
fp.write(h)
|
|
|
|
if self.binary():
|
2018-07-06 03:45:27 +03:00
|
|
|
fp.write(_("this is a binary file\n"))
|
2015-03-09 23:04:50 +03:00
|
|
|
break
|
2018-07-06 03:45:27 +03:00
|
|
|
if h.startswith("---"):
|
|
|
|
fp.write(
|
|
|
|
_("%d hunks, %d lines changed\n")
|
|
|
|
% (
|
|
|
|
len(self.hunks),
|
|
|
|
sum([max(h.added, h.removed) for h in self.hunks]),
|
|
|
|
)
|
|
|
|
)
|
2015-03-09 23:04:50 +03:00
|
|
|
break
|
|
|
|
fp.write(h)
|
|
|
|
|
|
|
|
def write(self, fp):
|
2018-07-06 03:45:27 +03:00
|
|
|
fp.write("".join(self.header))
|
2015-03-09 23:04:50 +03:00
|
|
|
|
|
|
|
def allhunks(self):
|
2015-05-16 21:30:07 +03:00
|
|
|
return any(self.allhunks_re.match(h) for h in self.header)
|
2015-03-09 23:04:50 +03:00
|
|
|
|
|
|
|
def files(self):
|
|
|
|
match = self.diffgit_re.match(self.header[0])
|
|
|
|
if match:
|
|
|
|
fromfile, tofile = match.groups()
|
|
|
|
if fromfile == tofile:
|
|
|
|
return [fromfile]
|
|
|
|
return [fromfile, tofile]
|
|
|
|
else:
|
|
|
|
return self.diff_re.match(self.header[0]).groups()
|
|
|
|
|
|
|
|
def filename(self):
|
|
|
|
return self.files()[-1]
|
|
|
|
|
|
|
|
def __repr__(self):
|
2018-07-06 03:45:27 +03:00
|
|
|
return "<header %s>" % (" ".join(map(repr, self.files())))
|
2015-03-09 23:04:50 +03:00
|
|
|
|
2015-04-24 00:27:26 +03:00
|
|
|
def isnewfile(self):
|
2015-05-16 21:30:07 +03:00
|
|
|
return any(self.newfile_re.match(h) for h in self.header)
|
2015-04-24 00:27:26 +03:00
|
|
|
|
2015-03-09 23:04:50 +03:00
|
|
|
def special(self):
|
2015-04-24 00:27:26 +03:00
|
|
|
# Special files are shown only at the header level and not at the hunk
|
|
|
|
# level for example a file that has been deleted is a special file.
|
|
|
|
# The user cannot change the content of the operation, in the case of
|
|
|
|
# the deleted file he has to take the deletion or not take it, he
|
|
|
|
# cannot take some of it.
|
|
|
|
# Newly added files are special if they are empty, they are not special
|
|
|
|
# if they have some content as we want to be able to change it
|
|
|
|
nocontent = len(self.header) == 2
|
|
|
|
emptynewfile = self.isnewfile() and nocontent
|
2018-07-06 03:45:27 +03:00
|
|
|
return emptynewfile or any(self.special_re.match(h) for h in self.header)
|
|
|
|
|
2015-03-09 23:04:50 +03:00
|
|
|
|
2015-03-09 23:09:15 +03:00
|
|
|
class recordhunk(object):
|
|
|
|
"""patch hunk
|
|
|
|
|
|
|
|
XXX shouldn't we merge this with the other hunk class?
|
|
|
|
"""
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
def __init__(
|
|
|
|
self, header, fromline, toline, proc, before, hunk, after, maxcontext=None
|
|
|
|
):
|
2017-07-05 02:41:28 +03:00
|
|
|
def trimcontext(lines, reverse=False):
|
|
|
|
if maxcontext is not None:
|
|
|
|
delta = len(lines) - maxcontext
|
|
|
|
if delta > 0:
|
|
|
|
if reverse:
|
|
|
|
return delta, lines[delta:]
|
|
|
|
else:
|
|
|
|
return delta, lines[:maxcontext]
|
|
|
|
return 0, lines
|
2015-03-09 23:09:15 +03:00
|
|
|
|
|
|
|
self.header = header
|
2017-07-05 02:41:28 +03:00
|
|
|
trimedbefore, self.before = trimcontext(before, True)
|
|
|
|
self.fromline = fromline + trimedbefore
|
|
|
|
self.toline = toline + trimedbefore
|
|
|
|
_trimedafter, self.after = trimcontext(after, False)
|
2015-03-09 23:09:15 +03:00
|
|
|
self.proc = proc
|
|
|
|
self.hunk = hunk
|
|
|
|
self.added, self.removed = self.countchanges(self.hunk)
|
|
|
|
|
2015-03-14 00:08:30 +03:00
|
|
|
def __eq__(self, v):
|
|
|
|
if not isinstance(v, recordhunk):
|
|
|
|
return False
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
return (
|
|
|
|
(v.hunk == self.hunk)
|
|
|
|
and (v.proc == self.proc)
|
|
|
|
and (self.fromline == v.fromline)
|
|
|
|
and (self.header.files() == v.header.files())
|
|
|
|
)
|
2015-03-14 00:08:30 +03:00
|
|
|
|
|
|
|
def __hash__(self):
|
2018-07-06 03:45:27 +03:00
|
|
|
return hash(
|
|
|
|
(tuple(self.hunk), tuple(self.header.files()), self.fromline, self.proc)
|
|
|
|
)
|
2015-03-14 00:08:30 +03:00
|
|
|
|
2015-03-09 23:09:15 +03:00
|
|
|
def countchanges(self, hunk):
|
|
|
|
"""hunk -> (n+,n-)"""
|
2018-07-06 03:45:27 +03:00
|
|
|
add = len([h for h in hunk if h.startswith("+")])
|
|
|
|
rem = len([h for h in hunk if h.startswith("-")])
|
2015-03-09 23:09:15 +03:00
|
|
|
return add, rem
|
|
|
|
|
patch: rewrite reversehunks (issue5337)
The old reversehunks code accesses "crecord.uihunk._hunk", which is the raw
recordhunk without crecord selection information, therefore "revert -i"
cannot revert individual lines, aka. issue5337.
The patch rewrites related logic to return the right reverse hunk for
revert. Namely,
1. "fromline" and "toline" are correctly swapped [1]
2. crecord.uihunk generates a correct reverse hunk [2]
Besides, reversehunks(hunks) will no longer modify its input "hunks", which
is more expected.
[1]: To explain why "fromline" and "toline" need to be swapped, take the
following example:
$ cat > a <<EOF
> 1
> 2
> 3
> 4
> EOF
$ cat > b <<EOF
> 2
> 3
> 5
> EOF
$ diff a b
1d0 <---- "1" is "fromline" and "0" is "toline"
< 1 and they are swapped if diff from the reversed direction
4c3 |
< 4 |
--- |
> 5 |
|
$ diff b a |
0a1 <---------+
> 1
3c4 <---- also "4c3" gets swapped to "3c4"
< 5
---
> 4
[2]: This is a bit tricky.
For example, given a file which is empty in working parent but has 3 lines
in working copy, and the user selection:
select hunk to discard
[x] +1
[ ] +2
[x] +3
The user intent is to drop "1" and "3" in working copy but keep "2", so the
reverse patch would be something like:
-1
2 (2 is a "context line")
-3
We cannot just take all selected lines and swap "-" and "+", which will be:
-1
-3
That patch won't apply because of "2". So the correct way is to insert "2"
as a "context line" by inserting it first then deleting it:
-2
+2
Therefore, the correct revert patch is:
-1
-2
+2
-3
It could be reordered to look more like a common diff hunk:
-1
-2
-3
+2
Note: It's possible to return multiple hunks so there won't be lines like
"-2", "+2". But the current implementation is much simpler.
For deletions, like the working parent has "1\n2\n3\n" and it was changed to
empty in working copy:
select hunk to discard
[x] -1
[ ] -2
[x] -3
The user intent is to drop the deletion of 1 and 3 (in other words, keep
those lines), but still delete "2".
The reverse patch is meant to be applied to working copy which is empty.
So the patch would be:
+1
+3
That is to say, there is no need to special handle the unselected "2" like
the above insertion case.
2017-06-21 09:22:38 +03:00
|
|
|
def reversehunk(self):
|
|
|
|
"""return another recordhunk which is the reverse of the hunk
|
|
|
|
|
|
|
|
If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
|
|
|
|
that, swap fromline/toline and +/- signs while keep other things
|
|
|
|
unchanged.
|
|
|
|
"""
|
2018-07-06 03:45:27 +03:00
|
|
|
m = {"+": "-", "-": "+", "\\": "\\"}
|
|
|
|
hunk = ["%s%s" % (m[l[0:1]], l[1:]) for l in self.hunk]
|
|
|
|
return recordhunk(
|
|
|
|
self.header,
|
|
|
|
self.toline,
|
|
|
|
self.fromline,
|
|
|
|
self.proc,
|
|
|
|
self.before,
|
|
|
|
hunk,
|
|
|
|
self.after,
|
|
|
|
)
|
patch: rewrite reversehunks (issue5337)
The old reversehunks code accesses "crecord.uihunk._hunk", which is the raw
recordhunk without crecord selection information, therefore "revert -i"
cannot revert individual lines, aka. issue5337.
The patch rewrites related logic to return the right reverse hunk for
revert. Namely,
1. "fromline" and "toline" are correctly swapped [1]
2. crecord.uihunk generates a correct reverse hunk [2]
Besides, reversehunks(hunks) will no longer modify its input "hunks", which
is more expected.
[1]: To explain why "fromline" and "toline" need to be swapped, take the
following example:
$ cat > a <<EOF
> 1
> 2
> 3
> 4
> EOF
$ cat > b <<EOF
> 2
> 3
> 5
> EOF
$ diff a b
1d0 <---- "1" is "fromline" and "0" is "toline"
< 1 and they are swapped if diff from the reversed direction
4c3 |
< 4 |
--- |
> 5 |
|
$ diff b a |
0a1 <---------+
> 1
3c4 <---- also "4c3" gets swapped to "3c4"
< 5
---
> 4
[2]: This is a bit tricky.
For example, given a file which is empty in working parent but has 3 lines
in working copy, and the user selection:
select hunk to discard
[x] +1
[ ] +2
[x] +3
The user intent is to drop "1" and "3" in working copy but keep "2", so the
reverse patch would be something like:
-1
2 (2 is a "context line")
-3
We cannot just take all selected lines and swap "-" and "+", which will be:
-1
-3
That patch won't apply because of "2". So the correct way is to insert "2"
as a "context line" by inserting it first then deleting it:
-2
+2
Therefore, the correct revert patch is:
-1
-2
+2
-3
It could be reordered to look more like a common diff hunk:
-1
-2
-3
+2
Note: It's possible to return multiple hunks so there won't be lines like
"-2", "+2". But the current implementation is much simpler.
For deletions, like the working parent has "1\n2\n3\n" and it was changed to
empty in working copy:
select hunk to discard
[x] -1
[ ] -2
[x] -3
The user intent is to drop the deletion of 1 and 3 (in other words, keep
those lines), but still delete "2".
The reverse patch is meant to be applied to working copy which is empty.
So the patch would be:
+1
+3
That is to say, there is no need to special handle the unselected "2" like
the above insertion case.
2017-06-21 09:22:38 +03:00
|
|
|
|
2015-03-09 23:09:15 +03:00
|
|
|
def write(self, fp):
|
|
|
|
delta = len(self.before) + len(self.after)
|
2018-07-06 03:45:27 +03:00
|
|
|
if self.after and self.after[-1] == "\\ No newline at end of file\n":
|
2015-03-09 23:09:15 +03:00
|
|
|
delta -= 1
|
|
|
|
fromlen = delta + self.removed
|
|
|
|
tolen = delta + self.added
|
2018-07-06 03:45:27 +03:00
|
|
|
fp.write(
|
|
|
|
"@@ -%d,%d +%d,%d @@%s\n"
|
|
|
|
% (
|
|
|
|
self.fromline,
|
|
|
|
fromlen,
|
|
|
|
self.toline,
|
|
|
|
tolen,
|
|
|
|
self.proc and (" " + self.proc),
|
|
|
|
)
|
|
|
|
)
|
|
|
|
fp.write("".join(self.before + self.hunk + self.after))
|
2015-03-09 23:09:15 +03:00
|
|
|
|
|
|
|
pretty = write
|
|
|
|
|
|
|
|
def filename(self):
|
|
|
|
return self.header.filename()
|
|
|
|
|
|
|
|
def __repr__(self):
|
2018-07-06 03:45:27 +03:00
|
|
|
return "<hunk %r@%d>" % (self.filename(), self.fromline)
|
|
|
|
|
2015-03-09 23:09:15 +03:00
|
|
|
|
2017-10-05 23:38:48 +03:00
|
|
|
def getmessages():
|
|
|
|
return {
|
2018-07-06 03:45:27 +03:00
|
|
|
"multiple": {
|
|
|
|
"apply": _("apply change %d/%d to '%s'?"),
|
|
|
|
"discard": _("discard change %d/%d to '%s'?"),
|
|
|
|
"record": _("record change %d/%d to '%s'?"),
|
2017-10-05 23:38:48 +03:00
|
|
|
},
|
2018-07-06 03:45:27 +03:00
|
|
|
"single": {
|
|
|
|
"apply": _("apply this change to '%s'?"),
|
|
|
|
"discard": _("discard this change to '%s'?"),
|
|
|
|
"record": _("record this change to '%s'?"),
|
|
|
|
},
|
|
|
|
"help": {
|
|
|
|
"apply": _(
|
|
|
|
"[Ynesfdaq?]"
|
|
|
|
"$$ &Yes, apply this change"
|
|
|
|
"$$ &No, skip this change"
|
|
|
|
"$$ &Edit this change manually"
|
|
|
|
"$$ &Skip remaining changes to this file"
|
|
|
|
"$$ Apply remaining changes to this &file"
|
|
|
|
"$$ &Done, skip remaining changes and files"
|
|
|
|
"$$ Apply &all changes to all remaining files"
|
|
|
|
"$$ &Quit, applying no changes"
|
|
|
|
"$$ &? (display help)"
|
|
|
|
),
|
|
|
|
"discard": _(
|
|
|
|
"[Ynesfdaq?]"
|
|
|
|
"$$ &Yes, discard this change"
|
|
|
|
"$$ &No, skip this change"
|
|
|
|
"$$ &Edit this change manually"
|
|
|
|
"$$ &Skip remaining changes to this file"
|
|
|
|
"$$ Discard remaining changes to this &file"
|
|
|
|
"$$ &Done, skip remaining changes and files"
|
|
|
|
"$$ Discard &all changes to all remaining files"
|
|
|
|
"$$ &Quit, discarding no changes"
|
|
|
|
"$$ &? (display help)"
|
|
|
|
),
|
|
|
|
"record": _(
|
|
|
|
"[Ynesfdaq?]"
|
|
|
|
"$$ &Yes, record this change"
|
|
|
|
"$$ &No, skip this change"
|
|
|
|
"$$ &Edit this change manually"
|
|
|
|
"$$ &Skip remaining changes to this file"
|
|
|
|
"$$ Record remaining changes to this &file"
|
|
|
|
"$$ &Done, skip remaining changes and files"
|
|
|
|
"$$ Record &all changes to all remaining files"
|
|
|
|
"$$ &Quit, recording no changes"
|
|
|
|
"$$ &? (display help)"
|
|
|
|
),
|
2017-10-05 23:38:48 +03:00
|
|
|
},
|
2017-08-30 15:49:14 +03:00
|
|
|
}
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2015-05-28 01:49:24 +03:00
|
|
|
def filterpatch(ui, headers, operation=None):
|
2015-03-11 00:42:07 +03:00
|
|
|
"""Interactively filter patch chunks into applied-only chunks"""
|
2017-10-05 23:38:48 +03:00
|
|
|
messages = getmessages()
|
|
|
|
|
2015-05-28 01:56:10 +03:00
|
|
|
if operation is None:
|
2018-07-06 03:45:27 +03:00
|
|
|
operation = "record"
|
2015-03-11 00:42:07 +03:00
|
|
|
|
|
|
|
def prompt(skipfile, skipall, query, chunk):
|
|
|
|
"""prompt query, and process base inputs
|
|
|
|
|
|
|
|
- y/n for the rest of file
|
|
|
|
- y/n for the rest
|
|
|
|
- ? (help)
|
|
|
|
- q (quit)
|
|
|
|
|
|
|
|
Return True/False and possibly updated skipfile and skipall.
|
|
|
|
"""
|
|
|
|
newpatches = None
|
|
|
|
if skipall is not None:
|
|
|
|
return skipall, skipfile, skipall, newpatches
|
|
|
|
if skipfile is not None:
|
|
|
|
return skipfile, skipfile, skipall, newpatches
|
|
|
|
while True:
|
2018-07-06 03:45:27 +03:00
|
|
|
resps = messages["help"][operation]
|
2015-03-11 00:42:07 +03:00
|
|
|
r = ui.promptchoice("%s %s" % (query, resps))
|
|
|
|
ui.write("\n")
|
2018-07-06 03:45:27 +03:00
|
|
|
if r == 8: # ?
|
2015-03-11 00:42:07 +03:00
|
|
|
for c, t in ui.extractchoices(resps)[1]:
|
2018-07-06 03:45:27 +03:00
|
|
|
ui.write("%s - %s\n" % (c, encoding.lower(t)))
|
2015-03-11 00:42:07 +03:00
|
|
|
continue
|
2018-07-06 03:45:27 +03:00
|
|
|
elif r == 0: # yes
|
2015-03-11 00:42:07 +03:00
|
|
|
ret = True
|
2018-07-06 03:45:27 +03:00
|
|
|
elif r == 1: # no
|
2015-03-11 00:42:07 +03:00
|
|
|
ret = False
|
2018-07-06 03:45:27 +03:00
|
|
|
elif r == 2: # Edit patch
|
2015-03-11 00:42:07 +03:00
|
|
|
if chunk is None:
|
2018-07-06 03:45:27 +03:00
|
|
|
ui.write(_("cannot edit patch for whole file"))
|
2015-03-11 00:42:07 +03:00
|
|
|
ui.write("\n")
|
|
|
|
continue
|
|
|
|
if chunk.header.binary():
|
2018-07-06 03:45:27 +03:00
|
|
|
ui.write(_("cannot edit patch for binary file"))
|
2015-03-11 00:42:07 +03:00
|
|
|
ui.write("\n")
|
|
|
|
continue
|
|
|
|
# Patch comment based on the Git one (based on comment at end of
|
2015-09-30 23:43:49 +03:00
|
|
|
# https://mercurial-scm.org/wiki/RecordExtension)
|
2018-07-06 03:45:27 +03:00
|
|
|
phelp = "---" + _(
|
|
|
|
"""
|
2015-03-11 00:42:07 +03:00
|
|
|
To remove '-' lines, make them ' ' lines (context).
|
|
|
|
To remove '+' lines, delete them.
|
|
|
|
Lines starting with # will be removed from the patch.
|
|
|
|
|
|
|
|
If the patch applies cleanly, the edited hunk will immediately be
|
|
|
|
added to the record list. If it does not apply cleanly, a rejects
|
|
|
|
file will be generated: you can use that when you try again. If
|
|
|
|
all lines of the hunk are removed, then the edit is aborted and
|
|
|
|
the hunk is left unchanged.
|
2018-07-06 03:45:27 +03:00
|
|
|
"""
|
|
|
|
)
|
|
|
|
(patchfd, patchfn) = tempfile.mkstemp(
|
|
|
|
prefix="hg-editor-", suffix=".diff", text=True
|
|
|
|
)
|
2015-03-11 00:42:07 +03:00
|
|
|
ncpatchfp = None
|
|
|
|
try:
|
|
|
|
# Write the initial patch
|
2017-02-13 17:36:38 +03:00
|
|
|
f = os.fdopen(patchfd, pycompat.sysstr("w"))
|
2015-03-11 00:42:07 +03:00
|
|
|
chunk.header.write(f)
|
|
|
|
chunk.write(f)
|
2018-07-06 03:45:27 +03:00
|
|
|
f.write("\n".join(["# " + i for i in phelp.splitlines()]))
|
2015-03-11 00:42:07 +03:00
|
|
|
f.close()
|
|
|
|
# Start the editor and wait for it to complete
|
|
|
|
editor = ui.geteditor()
|
2018-07-06 03:45:27 +03:00
|
|
|
ret = ui.system(
|
|
|
|
'%s "%s"' % (editor, patchfn),
|
|
|
|
environ={"HGUSER": ui.username()},
|
|
|
|
blockedtag="filterpatch",
|
|
|
|
)
|
2015-06-05 23:31:18 +03:00
|
|
|
if ret != 0:
|
|
|
|
ui.warn(_("editor exited with exit code %d\n") % ret)
|
|
|
|
continue
|
2015-03-11 00:42:07 +03:00
|
|
|
# Remove comment lines
|
|
|
|
patchfp = open(patchfn)
|
2016-04-10 23:55:37 +03:00
|
|
|
ncpatchfp = stringio()
|
2016-11-15 02:14:06 +03:00
|
|
|
for line in util.iterfile(patchfp):
|
2018-07-06 03:45:27 +03:00
|
|
|
if not line.startswith("#"):
|
2015-03-11 00:42:07 +03:00
|
|
|
ncpatchfp.write(line)
|
|
|
|
patchfp.close()
|
|
|
|
ncpatchfp.seek(0)
|
|
|
|
newpatches = parsepatch(ncpatchfp)
|
|
|
|
finally:
|
|
|
|
os.unlink(patchfn)
|
|
|
|
del ncpatchfp
|
|
|
|
# Signal that the chunk shouldn't be applied as-is, but
|
|
|
|
# provide the new patch to be used instead.
|
|
|
|
ret = False
|
2018-07-06 03:45:27 +03:00
|
|
|
elif r == 3: # Skip
|
2015-03-11 00:42:07 +03:00
|
|
|
ret = skipfile = False
|
2018-07-06 03:45:27 +03:00
|
|
|
elif r == 4: # file (Record remaining)
|
2015-03-11 00:42:07 +03:00
|
|
|
ret = skipfile = True
|
2018-07-06 03:45:27 +03:00
|
|
|
elif r == 5: # done, skip remaining
|
2015-03-11 00:42:07 +03:00
|
|
|
ret = skipall = False
|
2018-07-06 03:45:27 +03:00
|
|
|
elif r == 6: # all
|
2015-03-11 00:42:07 +03:00
|
|
|
ret = skipall = True
|
2018-07-06 03:45:27 +03:00
|
|
|
elif r == 7: # quit
|
|
|
|
raise error.Abort(_("user quit"))
|
2015-03-11 00:42:07 +03:00
|
|
|
return ret, skipfile, skipall, newpatches
|
|
|
|
|
|
|
|
seen = set()
|
2018-07-06 03:45:27 +03:00
|
|
|
applied = {} # 'filename' -> [] of chunks
|
2015-03-11 00:42:07 +03:00
|
|
|
skipfile, skipall = None, None
|
|
|
|
pos, total = 1, sum(len(h.hunks) for h in headers)
|
|
|
|
for h in headers:
|
|
|
|
pos += len(h.hunks)
|
|
|
|
skipfile = None
|
|
|
|
fixoffset = 0
|
2018-07-06 03:45:27 +03:00
|
|
|
hdr = "".join(h.header)
|
2015-03-11 00:42:07 +03:00
|
|
|
if hdr in seen:
|
|
|
|
continue
|
|
|
|
seen.add(hdr)
|
|
|
|
if skipall is None:
|
|
|
|
h.pretty(ui)
|
2018-07-06 03:45:27 +03:00
|
|
|
msg = _("examine changes to %s?") % _(" and ").join(
|
|
|
|
"'%s'" % f for f in h.files()
|
|
|
|
)
|
2015-03-11 00:42:07 +03:00
|
|
|
r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
|
|
|
|
if not r:
|
|
|
|
continue
|
|
|
|
applied[h.filename()] = [h]
|
|
|
|
if h.allhunks():
|
|
|
|
applied[h.filename()] += h.hunks
|
|
|
|
continue
|
|
|
|
for i, chunk in enumerate(h.hunks):
|
|
|
|
if skipfile is None and skipall is None:
|
|
|
|
chunk.pretty(ui)
|
|
|
|
if total == 1:
|
2018-07-06 03:45:27 +03:00
|
|
|
msg = messages["single"][operation] % chunk.filename()
|
2015-03-11 00:42:07 +03:00
|
|
|
else:
|
|
|
|
idx = pos - len(h.hunks) + i
|
2018-07-06 03:45:27 +03:00
|
|
|
msg = messages["multiple"][operation] % (idx, total, chunk.filename())
|
|
|
|
r, skipfile, skipall, newpatches = prompt(skipfile, skipall, msg, chunk)
|
2015-03-11 00:42:07 +03:00
|
|
|
if r:
|
|
|
|
if fixoffset:
|
|
|
|
chunk = copy.copy(chunk)
|
|
|
|
chunk.toline += fixoffset
|
|
|
|
applied[chunk.filename()].append(chunk)
|
|
|
|
elif newpatches is not None:
|
|
|
|
for newpatch in newpatches:
|
|
|
|
for newhunk in newpatch.hunks:
|
|
|
|
if fixoffset:
|
|
|
|
newhunk.toline += fixoffset
|
|
|
|
applied[newhunk.filename()].append(newhunk)
|
|
|
|
else:
|
|
|
|
fixoffset += chunk.removed - chunk.added
|
2018-07-06 03:45:27 +03:00
|
|
|
return (
|
|
|
|
sum([h for h in applied.itervalues() if h[0].special() or len(h) > 1], []),
|
|
|
|
{},
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2009-06-10 17:10:21 +04:00
|
|
|
class hunk(object):
|
2011-05-27 23:50:09 +04:00
|
|
|
def __init__(self, desc, num, lr, context):
|
2007-07-17 20:39:30 +04:00
|
|
|
self.number = num
|
|
|
|
self.desc = desc
|
2010-01-25 09:05:27 +03:00
|
|
|
self.hunk = [desc]
|
2007-07-17 20:39:30 +04:00
|
|
|
self.a = []
|
|
|
|
self.b = []
|
2009-10-31 20:01:08 +03:00
|
|
|
self.starta = self.lena = None
|
|
|
|
self.startb = self.lenb = None
|
2009-12-23 21:31:47 +03:00
|
|
|
if lr is not None:
|
|
|
|
if context:
|
|
|
|
self.read_context_hunk(lr)
|
|
|
|
else:
|
|
|
|
self.read_unified_hunk(lr)
|
2007-07-17 20:39:30 +04:00
|
|
|
|
2009-12-23 21:31:47 +03:00
|
|
|
def getnormalized(self):
|
|
|
|
"""Return a copy with line endings normalized to LF."""
|
|
|
|
|
|
|
|
def normalize(lines):
|
|
|
|
nlines = []
|
|
|
|
for line in lines:
|
2018-07-06 03:45:27 +03:00
|
|
|
if line.endswith("\r\n"):
|
|
|
|
line = line[:-2] + "\n"
|
2009-12-23 21:31:47 +03:00
|
|
|
nlines.append(line)
|
|
|
|
return nlines
|
|
|
|
|
|
|
|
# Dummy object, it is rebuilt manually
|
2011-05-27 23:50:09 +04:00
|
|
|
nh = hunk(self.desc, self.number, None, None)
|
2009-12-23 21:31:47 +03:00
|
|
|
nh.number = self.number
|
|
|
|
nh.desc = self.desc
|
2010-02-23 00:55:58 +03:00
|
|
|
nh.hunk = self.hunk
|
2009-12-23 21:31:47 +03:00
|
|
|
nh.a = normalize(self.a)
|
|
|
|
nh.b = normalize(self.b)
|
|
|
|
nh.starta = self.starta
|
|
|
|
nh.startb = self.startb
|
|
|
|
nh.lena = self.lena
|
|
|
|
nh.lenb = self.lenb
|
|
|
|
return nh
|
|
|
|
|
2007-07-17 20:39:30 +04:00
|
|
|
def read_unified_hunk(self, lr):
|
|
|
|
m = unidesc.match(self.desc)
|
|
|
|
if not m:
|
2007-07-17 20:39:30 +04:00
|
|
|
raise PatchError(_("bad hunk #%d") % self.number)
|
2011-11-14 21:16:01 +04:00
|
|
|
self.starta, self.lena, self.startb, self.lenb = m.groups()
|
2009-05-20 02:52:46 +04:00
|
|
|
if self.lena is None:
|
2007-07-17 20:39:30 +04:00
|
|
|
self.lena = 1
|
|
|
|
else:
|
|
|
|
self.lena = int(self.lena)
|
2009-05-20 02:52:46 +04:00
|
|
|
if self.lenb is None:
|
2007-07-17 20:39:30 +04:00
|
|
|
self.lenb = 1
|
|
|
|
else:
|
|
|
|
self.lenb = int(self.lenb)
|
|
|
|
self.starta = int(self.starta)
|
|
|
|
self.startb = int(self.startb)
|
2018-07-06 03:45:27 +03:00
|
|
|
diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
|
2007-07-17 20:39:30 +04:00
|
|
|
# if we hit eof before finishing out the hunk, the last line will
|
|
|
|
# be zero length. Lets try to fix it up.
|
|
|
|
while len(self.hunk[-1]) == 0:
|
2008-08-31 13:41:52 +04:00
|
|
|
del self.hunk[-1]
|
|
|
|
del self.a[-1]
|
|
|
|
del self.b[-1]
|
|
|
|
self.lena -= 1
|
|
|
|
self.lenb -= 1
|
2011-03-20 02:08:44 +03:00
|
|
|
self._fixnewline(lr)
|
2007-07-17 20:39:30 +04:00
|
|
|
|
|
|
|
def read_context_hunk(self, lr):
|
|
|
|
self.desc = lr.readline()
|
|
|
|
m = contextdesc.match(self.desc)
|
|
|
|
if not m:
|
2007-07-17 20:39:30 +04:00
|
|
|
raise PatchError(_("bad hunk #%d") % self.number)
|
2011-11-14 21:16:01 +04:00
|
|
|
self.starta, aend = m.groups()
|
2007-07-17 20:39:30 +04:00
|
|
|
self.starta = int(self.starta)
|
2009-05-20 02:52:46 +04:00
|
|
|
if aend is None:
|
2007-07-17 20:39:30 +04:00
|
|
|
aend = self.starta
|
|
|
|
self.lena = int(aend) - self.starta
|
|
|
|
if self.starta:
|
|
|
|
self.lena += 1
|
|
|
|
for x in xrange(self.lena):
|
|
|
|
l = lr.readline()
|
2018-07-06 03:45:27 +03:00
|
|
|
if l.startswith("---"):
|
2010-10-24 14:56:38 +04:00
|
|
|
# lines addition, old block is empty
|
2007-07-17 20:39:30 +04:00
|
|
|
lr.push(l)
|
|
|
|
break
|
|
|
|
s = l[2:]
|
2018-07-06 03:45:27 +03:00
|
|
|
if l.startswith("- ") or l.startswith("! "):
|
|
|
|
u = "-" + s
|
|
|
|
elif l.startswith(" "):
|
|
|
|
u = " " + s
|
2007-07-17 20:39:30 +04:00
|
|
|
else:
|
2018-07-06 03:45:27 +03:00
|
|
|
raise PatchError(_("bad hunk #%d old text line %d") % (self.number, x))
|
2007-07-17 20:39:30 +04:00
|
|
|
self.a.append(u)
|
|
|
|
self.hunk.append(u)
|
|
|
|
|
|
|
|
l = lr.readline()
|
2018-07-06 03:45:27 +03:00
|
|
|
if l.startswith("\ "):
|
2007-07-17 20:39:30 +04:00
|
|
|
s = self.a[-1][:-1]
|
|
|
|
self.a[-1] = s
|
|
|
|
self.hunk[-1] = s
|
|
|
|
l = lr.readline()
|
|
|
|
m = contextdesc.match(l)
|
|
|
|
if not m:
|
2007-07-17 20:39:30 +04:00
|
|
|
raise PatchError(_("bad hunk #%d") % self.number)
|
2011-11-14 21:16:01 +04:00
|
|
|
self.startb, bend = m.groups()
|
2007-07-17 20:39:30 +04:00
|
|
|
self.startb = int(self.startb)
|
2009-05-20 02:52:46 +04:00
|
|
|
if bend is None:
|
2007-07-17 20:39:30 +04:00
|
|
|
bend = self.startb
|
|
|
|
self.lenb = int(bend) - self.startb
|
|
|
|
if self.startb:
|
|
|
|
self.lenb += 1
|
|
|
|
hunki = 1
|
|
|
|
for x in xrange(self.lenb):
|
|
|
|
l = lr.readline()
|
2018-07-06 03:45:27 +03:00
|
|
|
if l.startswith("\ "):
|
2010-10-24 14:56:38 +04:00
|
|
|
# XXX: the only way to hit this is with an invalid line range.
|
|
|
|
# The no-eol marker is not counted in the line range, but I
|
|
|
|
# guess there are diff(1) out there which behave differently.
|
2007-07-17 20:39:30 +04:00
|
|
|
s = self.b[-1][:-1]
|
|
|
|
self.b[-1] = s
|
2010-01-25 09:05:27 +03:00
|
|
|
self.hunk[hunki - 1] = s
|
2007-07-17 20:39:30 +04:00
|
|
|
continue
|
|
|
|
if not l:
|
2010-10-24 14:56:38 +04:00
|
|
|
# line deletions, new block is empty and we hit EOF
|
2007-07-17 20:39:30 +04:00
|
|
|
lr.push(l)
|
|
|
|
break
|
|
|
|
s = l[2:]
|
2018-07-06 03:45:27 +03:00
|
|
|
if l.startswith("+ ") or l.startswith("! "):
|
|
|
|
u = "+" + s
|
|
|
|
elif l.startswith(" "):
|
|
|
|
u = " " + s
|
2007-07-17 20:39:30 +04:00
|
|
|
elif len(self.b) == 0:
|
2010-10-24 14:56:38 +04:00
|
|
|
# line deletions, new block is empty
|
2007-07-17 20:39:30 +04:00
|
|
|
lr.push(l)
|
|
|
|
break
|
|
|
|
else:
|
2018-07-06 03:45:27 +03:00
|
|
|
raise PatchError(_("bad hunk #%d old text line %d") % (self.number, x))
|
2007-07-17 20:39:30 +04:00
|
|
|
self.b.append(s)
|
|
|
|
while True:
|
|
|
|
if hunki >= len(self.hunk):
|
|
|
|
h = ""
|
|
|
|
else:
|
|
|
|
h = self.hunk[hunki]
|
|
|
|
hunki += 1
|
|
|
|
if h == u:
|
|
|
|
break
|
2018-07-06 03:45:27 +03:00
|
|
|
elif h.startswith("-"):
|
2007-07-17 20:39:30 +04:00
|
|
|
continue
|
|
|
|
else:
|
2010-01-25 09:05:27 +03:00
|
|
|
self.hunk.insert(hunki - 1, u)
|
2007-07-17 20:39:30 +04:00
|
|
|
break
|
|
|
|
|
|
|
|
if not self.a:
|
|
|
|
# this happens when lines were only added to the hunk
|
|
|
|
for x in self.hunk:
|
2018-07-06 03:45:27 +03:00
|
|
|
if x.startswith("-") or x.startswith(" "):
|
2007-07-17 20:39:30 +04:00
|
|
|
self.a.append(x)
|
|
|
|
if not self.b:
|
|
|
|
# this happens when lines were only deleted from the hunk
|
|
|
|
for x in self.hunk:
|
2018-07-06 03:45:27 +03:00
|
|
|
if x.startswith("+") or x.startswith(" "):
|
2007-07-17 20:39:30 +04:00
|
|
|
self.b.append(x[1:])
|
|
|
|
# @@ -start,len +start,len @@
|
2018-07-06 03:45:27 +03:00
|
|
|
self.desc = "@@ -%d,%d +%d,%d @@\n" % (
|
|
|
|
self.starta,
|
|
|
|
self.lena,
|
|
|
|
self.startb,
|
|
|
|
self.lenb,
|
|
|
|
)
|
2007-07-17 20:39:30 +04:00
|
|
|
self.hunk[0] = self.desc
|
2011-03-20 02:08:44 +03:00
|
|
|
self._fixnewline(lr)
|
2007-07-17 20:39:30 +04:00
|
|
|
|
2011-03-20 02:08:44 +03:00
|
|
|
def _fixnewline(self, lr):
|
|
|
|
l = lr.readline()
|
2018-07-06 03:45:27 +03:00
|
|
|
if l.startswith("\ "):
|
2011-03-20 02:08:44 +03:00
|
|
|
diffhelpers.fix_newline(self.hunk, self.a, self.b)
|
|
|
|
else:
|
|
|
|
lr.push(l)
|
2007-07-17 20:39:30 +04:00
|
|
|
|
|
|
|
def complete(self):
|
|
|
|
return len(self.a) == self.lena and len(self.b) == self.lenb
|
|
|
|
|
2012-02-13 16:21:00 +04:00
|
|
|
def _fuzzit(self, old, new, fuzz, toponly):
|
2007-07-17 20:39:30 +04:00
|
|
|
# this removes context lines from the top and bottom of list 'l'. It
|
|
|
|
# checks the hunk to make sure only context lines are removed, and then
|
|
|
|
# returns a new shortened list of lines.
|
2012-02-13 20:22:35 +04:00
|
|
|
fuzz = min(fuzz, len(old))
|
2007-07-17 20:39:30 +04:00
|
|
|
if fuzz:
|
|
|
|
top = 0
|
|
|
|
bot = 0
|
|
|
|
hlen = len(self.hunk)
|
2010-01-25 09:05:27 +03:00
|
|
|
for x in xrange(hlen - 1):
|
2007-07-17 20:39:30 +04:00
|
|
|
# the hunk starts with the @@ line, so use x+1
|
2018-07-06 03:45:27 +03:00
|
|
|
if self.hunk[x + 1][0] == " ":
|
2007-07-17 20:39:30 +04:00
|
|
|
top += 1
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
if not toponly:
|
2010-01-25 09:05:27 +03:00
|
|
|
for x in xrange(hlen - 1):
|
2018-07-06 03:45:27 +03:00
|
|
|
if self.hunk[hlen - bot - 1][0] == " ":
|
2007-07-17 20:39:30 +04:00
|
|
|
bot += 1
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
|
2012-02-13 20:22:35 +04:00
|
|
|
bot = min(fuzz, bot)
|
|
|
|
top = min(fuzz, top)
|
2018-07-06 03:45:27 +03:00
|
|
|
return old[top : len(old) - bot], new[top : len(new) - bot], top
|
2012-02-13 16:51:38 +04:00
|
|
|
return old, new, 0
|
2007-07-17 20:39:30 +04:00
|
|
|
|
2012-02-13 16:21:00 +04:00
|
|
|
def fuzzit(self, fuzz, toponly):
|
2012-02-13 16:51:38 +04:00
|
|
|
old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
|
|
|
|
oldstart = self.starta + top
|
|
|
|
newstart = self.startb + top
|
|
|
|
# zero length hunk ranges already have their start decremented
|
2012-05-12 11:10:01 +04:00
|
|
|
if self.lena and oldstart > 0:
|
2012-02-13 16:51:38 +04:00
|
|
|
oldstart -= 1
|
2012-05-12 11:10:01 +04:00
|
|
|
if self.lenb and newstart > 0:
|
2012-02-13 16:51:38 +04:00
|
|
|
newstart -= 1
|
|
|
|
return old, oldstart, new, newstart
|
2007-07-17 20:39:30 +04:00
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2011-06-29 17:00:00 +04:00
|
|
|
class binhunk(object):
|
2018-07-06 03:45:27 +03:00
|
|
|
"A binary patch file."
|
|
|
|
|
2012-04-26 23:44:00 +04:00
|
|
|
def __init__(self, lr, fname):
|
2007-07-17 20:39:30 +04:00
|
|
|
self.text = None
|
2013-11-27 21:39:00 +04:00
|
|
|
self.delta = False
|
2018-07-06 03:45:27 +03:00
|
|
|
self.hunk = ["GIT binary patch\n"]
|
2012-04-26 23:44:00 +04:00
|
|
|
self._fname = fname
|
2011-05-20 00:44:01 +04:00
|
|
|
self._read(lr)
|
2007-07-17 20:39:30 +04:00
|
|
|
|
|
|
|
def complete(self):
|
|
|
|
return self.text is not None
|
|
|
|
|
2013-11-27 21:39:00 +04:00
|
|
|
def new(self, lines):
|
|
|
|
if self.delta:
|
2018-07-06 03:45:27 +03:00
|
|
|
return [applybindelta(self.text, "".join(lines))]
|
2007-07-17 20:39:30 +04:00
|
|
|
return [self.text]
|
|
|
|
|
2011-05-20 00:44:01 +04:00
|
|
|
def _read(self, lr):
|
2012-04-26 23:44:02 +04:00
|
|
|
def getline(lr, hunk):
|
|
|
|
l = lr.readline()
|
|
|
|
hunk.append(l)
|
2018-07-06 03:45:27 +03:00
|
|
|
return l.rstrip("\r\n")
|
2012-04-26 23:44:02 +04:00
|
|
|
|
2013-11-27 21:39:00 +04:00
|
|
|
size = 0
|
2012-04-29 13:19:51 +04:00
|
|
|
while True:
|
2012-04-26 23:44:02 +04:00
|
|
|
line = getline(lr, self.hunk)
|
2012-04-29 13:19:51 +04:00
|
|
|
if not line:
|
2018-07-06 03:45:27 +03:00
|
|
|
raise PatchError(_('could not extract "%s" binary data') % self._fname)
|
|
|
|
if line.startswith("literal "):
|
2013-11-27 21:39:00 +04:00
|
|
|
size = int(line[8:].rstrip())
|
|
|
|
break
|
2018-07-06 03:45:27 +03:00
|
|
|
if line.startswith("delta "):
|
2013-11-27 21:39:00 +04:00
|
|
|
size = int(line[6:].rstrip())
|
|
|
|
self.delta = True
|
2012-04-29 13:19:51 +04:00
|
|
|
break
|
2006-10-12 20:17:16 +04:00
|
|
|
dec = []
|
2012-04-26 23:44:02 +04:00
|
|
|
line = getline(lr, self.hunk)
|
2007-07-17 20:39:30 +04:00
|
|
|
while len(line) > 1:
|
2006-10-13 00:39:14 +04:00
|
|
|
l = line[0]
|
2018-07-06 03:45:27 +03:00
|
|
|
if l <= "Z" and l >= "A":
|
|
|
|
l = ord(l) - ord("A") + 1
|
2006-10-13 00:39:14 +04:00
|
|
|
else:
|
2018-07-06 03:45:27 +03:00
|
|
|
l = ord(l) - ord("a") + 27
|
2012-04-21 21:58:18 +04:00
|
|
|
try:
|
2017-04-26 15:56:47 +03:00
|
|
|
dec.append(util.b85decode(line[1:])[:l])
|
2015-06-24 08:20:08 +03:00
|
|
|
except ValueError as e:
|
2018-07-06 03:45:27 +03:00
|
|
|
raise PatchError(
|
|
|
|
_('could not decode "%s" binary patch: %s') % (self._fname, str(e))
|
|
|
|
)
|
2012-04-26 23:44:02 +04:00
|
|
|
line = getline(lr, self.hunk)
|
2018-07-06 03:45:27 +03:00
|
|
|
text = zlib.decompress("".join(dec))
|
2006-10-12 20:17:16 +04:00
|
|
|
if len(text) != size:
|
2018-07-06 03:45:27 +03:00
|
|
|
raise PatchError(
|
|
|
|
_('"%s" length is %d bytes, should be %d')
|
|
|
|
% (self._fname, len(text), size)
|
|
|
|
)
|
2007-07-17 20:39:30 +04:00
|
|
|
self.text = text
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2007-07-17 20:39:30 +04:00
|
|
|
def parsefilename(str):
|
|
|
|
# --- filename \t|space stuff
|
2018-07-06 03:45:27 +03:00
|
|
|
s = str[4:].rstrip("\r\n")
|
|
|
|
i = s.find("\t")
|
2007-07-17 20:39:30 +04:00
|
|
|
if i < 0:
|
2018-07-06 03:45:27 +03:00
|
|
|
i = s.find(" ")
|
2007-07-17 20:39:30 +04:00
|
|
|
if i < 0:
|
|
|
|
return s
|
|
|
|
return s[:i]
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2015-05-29 23:11:52 +03:00
|
|
|
def reversehunks(hunks):
|
|
|
|
'''reverse the signs in the hunks given as argument
|
|
|
|
|
|
|
|
This function operates on hunks coming out of patch.filterpatch, that is
|
|
|
|
a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
|
|
|
|
|
2017-09-03 08:32:11 +03:00
|
|
|
>>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
|
2015-05-29 23:11:52 +03:00
|
|
|
... --- a/folder1/g
|
|
|
|
... +++ b/folder1/g
|
|
|
|
... @@ -1,7 +1,7 @@
|
|
|
|
... +firstline
|
|
|
|
... c
|
|
|
|
... 1
|
|
|
|
... 2
|
|
|
|
... + 3
|
|
|
|
... -4
|
|
|
|
... 5
|
|
|
|
... d
|
|
|
|
... +lastline"""
|
2017-09-17 06:23:16 +03:00
|
|
|
>>> hunks = parsepatch([rawpatch])
|
2015-05-29 23:11:52 +03:00
|
|
|
>>> hunkscomingfromfilterpatch = []
|
|
|
|
>>> for h in hunks:
|
|
|
|
... hunkscomingfromfilterpatch.append(h)
|
|
|
|
... hunkscomingfromfilterpatch.extend(h.hunks)
|
|
|
|
|
|
|
|
>>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
|
2016-04-10 23:55:37 +03:00
|
|
|
>>> from . import util
|
|
|
|
>>> fp = util.stringio()
|
2015-05-29 23:11:52 +03:00
|
|
|
>>> for c in reversedhunks:
|
|
|
|
... c.write(fp)
|
2017-09-17 06:23:16 +03:00
|
|
|
>>> fp.seek(0) or None
|
2015-05-29 23:11:52 +03:00
|
|
|
>>> reversedpatch = fp.read()
|
2017-09-03 08:56:31 +03:00
|
|
|
>>> print(pycompat.sysstr(reversedpatch))
|
2015-05-29 23:11:52 +03:00
|
|
|
diff --git a/folder1/g b/folder1/g
|
|
|
|
--- a/folder1/g
|
|
|
|
+++ b/folder1/g
|
|
|
|
@@ -1,4 +1,3 @@
|
|
|
|
-firstline
|
|
|
|
c
|
|
|
|
1
|
|
|
|
2
|
patch: rewrite reversehunks (issue5337)
The old reversehunks code accesses "crecord.uihunk._hunk", which is the raw
recordhunk without crecord selection information, therefore "revert -i"
cannot revert individual lines, aka. issue5337.
The patch rewrites related logic to return the right reverse hunk for
revert. Namely,
1. "fromline" and "toline" are correctly swapped [1]
2. crecord.uihunk generates a correct reverse hunk [2]
Besides, reversehunks(hunks) will no longer modify its input "hunks", which
is more expected.
[1]: To explain why "fromline" and "toline" need to be swapped, take the
following example:
$ cat > a <<EOF
> 1
> 2
> 3
> 4
> EOF
$ cat > b <<EOF
> 2
> 3
> 5
> EOF
$ diff a b
1d0 <---- "1" is "fromline" and "0" is "toline"
< 1 and they are swapped if diff from the reversed direction
4c3 |
< 4 |
--- |
> 5 |
|
$ diff b a |
0a1 <---------+
> 1
3c4 <---- also "4c3" gets swapped to "3c4"
< 5
---
> 4
[2]: This is a bit tricky.
For example, given a file which is empty in working parent but has 3 lines
in working copy, and the user selection:
select hunk to discard
[x] +1
[ ] +2
[x] +3
The user intent is to drop "1" and "3" in working copy but keep "2", so the
reverse patch would be something like:
-1
2 (2 is a "context line")
-3
We cannot just take all selected lines and swap "-" and "+", which will be:
-1
-3
That patch won't apply because of "2". So the correct way is to insert "2"
as a "context line" by inserting it first then deleting it:
-2
+2
Therefore, the correct revert patch is:
-1
-2
+2
-3
It could be reordered to look more like a common diff hunk:
-1
-2
-3
+2
Note: It's possible to return multiple hunks so there won't be lines like
"-2", "+2". But the current implementation is much simpler.
For deletions, like the working parent has "1\n2\n3\n" and it was changed to
empty in working copy:
select hunk to discard
[x] -1
[ ] -2
[x] -3
The user intent is to drop the deletion of 1 and 3 (in other words, keep
those lines), but still delete "2".
The reverse patch is meant to be applied to working copy which is empty.
So the patch would be:
+1
+3
That is to say, there is no need to special handle the unselected "2" like
the above insertion case.
2017-06-21 09:22:38 +03:00
|
|
|
@@ -2,6 +1,6 @@
|
2015-05-29 23:11:52 +03:00
|
|
|
c
|
|
|
|
1
|
|
|
|
2
|
|
|
|
- 3
|
|
|
|
+4
|
|
|
|
5
|
|
|
|
d
|
patch: rewrite reversehunks (issue5337)
The old reversehunks code accesses "crecord.uihunk._hunk", which is the raw
recordhunk without crecord selection information, therefore "revert -i"
cannot revert individual lines, aka. issue5337.
The patch rewrites related logic to return the right reverse hunk for
revert. Namely,
1. "fromline" and "toline" are correctly swapped [1]
2. crecord.uihunk generates a correct reverse hunk [2]
Besides, reversehunks(hunks) will no longer modify its input "hunks", which
is more expected.
[1]: To explain why "fromline" and "toline" need to be swapped, take the
following example:
$ cat > a <<EOF
> 1
> 2
> 3
> 4
> EOF
$ cat > b <<EOF
> 2
> 3
> 5
> EOF
$ diff a b
1d0 <---- "1" is "fromline" and "0" is "toline"
< 1 and they are swapped if diff from the reversed direction
4c3 |
< 4 |
--- |
> 5 |
|
$ diff b a |
0a1 <---------+
> 1
3c4 <---- also "4c3" gets swapped to "3c4"
< 5
---
> 4
[2]: This is a bit tricky.
For example, given a file which is empty in working parent but has 3 lines
in working copy, and the user selection:
select hunk to discard
[x] +1
[ ] +2
[x] +3
The user intent is to drop "1" and "3" in working copy but keep "2", so the
reverse patch would be something like:
-1
2 (2 is a "context line")
-3
We cannot just take all selected lines and swap "-" and "+", which will be:
-1
-3
That patch won't apply because of "2". So the correct way is to insert "2"
as a "context line" by inserting it first then deleting it:
-2
+2
Therefore, the correct revert patch is:
-1
-2
+2
-3
It could be reordered to look more like a common diff hunk:
-1
-2
-3
+2
Note: It's possible to return multiple hunks so there won't be lines like
"-2", "+2". But the current implementation is much simpler.
For deletions, like the working parent has "1\n2\n3\n" and it was changed to
empty in working copy:
select hunk to discard
[x] -1
[ ] -2
[x] -3
The user intent is to drop the deletion of 1 and 3 (in other words, keep
those lines), but still delete "2".
The reverse patch is meant to be applied to working copy which is empty.
So the patch would be:
+1
+3
That is to say, there is no need to special handle the unselected "2" like
the above insertion case.
2017-06-21 09:22:38 +03:00
|
|
|
@@ -6,3 +5,2 @@
|
2015-05-29 23:11:52 +03:00
|
|
|
5
|
|
|
|
d
|
|
|
|
-lastline
|
|
|
|
|
|
|
|
'''
|
|
|
|
|
|
|
|
newhunks = []
|
|
|
|
for c in hunks:
|
2018-07-06 03:45:27 +03:00
|
|
|
if util.safehasattr(c, "reversehunk"):
|
patch: rewrite reversehunks (issue5337)
The old reversehunks code accesses "crecord.uihunk._hunk", which is the raw
recordhunk without crecord selection information, therefore "revert -i"
cannot revert individual lines, aka. issue5337.
The patch rewrites related logic to return the right reverse hunk for
revert. Namely,
1. "fromline" and "toline" are correctly swapped [1]
2. crecord.uihunk generates a correct reverse hunk [2]
Besides, reversehunks(hunks) will no longer modify its input "hunks", which
is more expected.
[1]: To explain why "fromline" and "toline" need to be swapped, take the
following example:
$ cat > a <<EOF
> 1
> 2
> 3
> 4
> EOF
$ cat > b <<EOF
> 2
> 3
> 5
> EOF
$ diff a b
1d0 <---- "1" is "fromline" and "0" is "toline"
< 1 and they are swapped if diff from the reversed direction
4c3 |
< 4 |
--- |
> 5 |
|
$ diff b a |
0a1 <---------+
> 1
3c4 <---- also "4c3" gets swapped to "3c4"
< 5
---
> 4
[2]: This is a bit tricky.
For example, given a file which is empty in working parent but has 3 lines
in working copy, and the user selection:
select hunk to discard
[x] +1
[ ] +2
[x] +3
The user intent is to drop "1" and "3" in working copy but keep "2", so the
reverse patch would be something like:
-1
2 (2 is a "context line")
-3
We cannot just take all selected lines and swap "-" and "+", which will be:
-1
-3
That patch won't apply because of "2". So the correct way is to insert "2"
as a "context line" by inserting it first then deleting it:
-2
+2
Therefore, the correct revert patch is:
-1
-2
+2
-3
It could be reordered to look more like a common diff hunk:
-1
-2
-3
+2
Note: It's possible to return multiple hunks so there won't be lines like
"-2", "+2". But the current implementation is much simpler.
For deletions, like the working parent has "1\n2\n3\n" and it was changed to
empty in working copy:
select hunk to discard
[x] -1
[ ] -2
[x] -3
The user intent is to drop the deletion of 1 and 3 (in other words, keep
those lines), but still delete "2".
The reverse patch is meant to be applied to working copy which is empty.
So the patch would be:
+1
+3
That is to say, there is no need to special handle the unselected "2" like
the above insertion case.
2017-06-21 09:22:38 +03:00
|
|
|
c = c.reversehunk()
|
2015-05-29 23:11:52 +03:00
|
|
|
newhunks.append(c)
|
|
|
|
return newhunks
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2017-07-05 02:41:28 +03:00
|
|
|
def parsepatch(originalchunks, maxcontext=None):
|
|
|
|
"""patch -> [] of headers -> [] of hunks
|
|
|
|
|
|
|
|
If maxcontext is not None, trim context lines if necessary.
|
|
|
|
|
2017-09-03 08:32:11 +03:00
|
|
|
>>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
|
2017-07-05 02:41:28 +03:00
|
|
|
... --- a/folder1/g
|
|
|
|
... +++ b/folder1/g
|
|
|
|
... @@ -1,8 +1,10 @@
|
|
|
|
... 1
|
|
|
|
... 2
|
|
|
|
... -3
|
|
|
|
... 4
|
|
|
|
... 5
|
|
|
|
... 6
|
|
|
|
... +6.1
|
|
|
|
... +6.2
|
|
|
|
... 7
|
|
|
|
... 8
|
|
|
|
... +9'''
|
|
|
|
>>> out = util.stringio()
|
|
|
|
>>> headers = parsepatch([rawpatch], maxcontext=1)
|
|
|
|
>>> for header in headers:
|
|
|
|
... header.write(out)
|
|
|
|
... for hunk in header.hunks:
|
|
|
|
... hunk.write(out)
|
2017-09-03 08:56:31 +03:00
|
|
|
>>> print(pycompat.sysstr(out.getvalue()))
|
2017-07-05 02:41:28 +03:00
|
|
|
diff --git a/folder1/g b/folder1/g
|
|
|
|
--- a/folder1/g
|
|
|
|
+++ b/folder1/g
|
|
|
|
@@ -2,3 +2,2 @@
|
|
|
|
2
|
|
|
|
-3
|
|
|
|
4
|
|
|
|
@@ -6,2 +5,4 @@
|
|
|
|
6
|
|
|
|
+6.1
|
|
|
|
+6.2
|
|
|
|
7
|
|
|
|
@@ -8,1 +9,2 @@
|
|
|
|
8
|
|
|
|
+9
|
|
|
|
"""
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2015-03-09 23:16:25 +03:00
|
|
|
class parser(object):
|
|
|
|
"""patch parsing state machine"""
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2015-03-09 23:16:25 +03:00
|
|
|
def __init__(self):
|
|
|
|
self.fromline = 0
|
|
|
|
self.toline = 0
|
2018-07-06 03:45:27 +03:00
|
|
|
self.proc = ""
|
2015-03-09 23:16:25 +03:00
|
|
|
self.header = None
|
|
|
|
self.context = []
|
|
|
|
self.before = []
|
|
|
|
self.hunk = []
|
|
|
|
self.headers = []
|
|
|
|
|
|
|
|
def addrange(self, limits):
|
|
|
|
fromstart, fromend, tostart, toend, proc = limits
|
|
|
|
self.fromline = int(fromstart)
|
|
|
|
self.toline = int(tostart)
|
|
|
|
self.proc = proc
|
|
|
|
|
|
|
|
def addcontext(self, context):
|
|
|
|
if self.hunk:
|
2018-07-06 03:45:27 +03:00
|
|
|
h = recordhunk(
|
|
|
|
self.header,
|
|
|
|
self.fromline,
|
|
|
|
self.toline,
|
|
|
|
self.proc,
|
|
|
|
self.before,
|
|
|
|
self.hunk,
|
|
|
|
context,
|
|
|
|
maxcontext,
|
|
|
|
)
|
2015-03-09 23:16:25 +03:00
|
|
|
self.header.hunks.append(h)
|
|
|
|
self.fromline += len(self.before) + h.removed
|
|
|
|
self.toline += len(self.before) + h.added
|
|
|
|
self.before = []
|
|
|
|
self.hunk = []
|
|
|
|
self.context = context
|
|
|
|
|
|
|
|
def addhunk(self, hunk):
|
|
|
|
if self.context:
|
|
|
|
self.before = self.context
|
|
|
|
self.context = []
|
|
|
|
self.hunk = hunk
|
|
|
|
|
|
|
|
def newfile(self, hdr):
|
|
|
|
self.addcontext([])
|
|
|
|
h = header(hdr)
|
|
|
|
self.headers.append(h)
|
|
|
|
self.header = h
|
|
|
|
|
|
|
|
def addother(self, line):
|
2018-07-06 03:45:27 +03:00
|
|
|
pass # 'other' lines are ignored
|
2015-03-09 23:16:25 +03:00
|
|
|
|
|
|
|
def finished(self):
|
|
|
|
self.addcontext([])
|
|
|
|
return self.headers
|
|
|
|
|
|
|
|
transitions = {
|
2018-07-06 03:45:27 +03:00
|
|
|
"file": {
|
|
|
|
"context": addcontext,
|
|
|
|
"file": newfile,
|
|
|
|
"hunk": addhunk,
|
|
|
|
"range": addrange,
|
|
|
|
},
|
|
|
|
"context": {
|
|
|
|
"file": newfile,
|
|
|
|
"hunk": addhunk,
|
|
|
|
"range": addrange,
|
|
|
|
"other": addother,
|
|
|
|
},
|
|
|
|
"hunk": {"context": addcontext, "file": newfile, "range": addrange},
|
|
|
|
"range": {"context": addcontext, "hunk": addhunk},
|
|
|
|
"other": {"other": addother},
|
|
|
|
}
|
2015-03-09 23:16:25 +03:00
|
|
|
|
|
|
|
p = parser()
|
2016-04-10 23:55:37 +03:00
|
|
|
fp = stringio()
|
2018-07-06 03:45:27 +03:00
|
|
|
fp.write("".join(originalchunks))
|
2015-03-13 03:51:37 +03:00
|
|
|
fp.seek(0)
|
2015-03-09 23:16:25 +03:00
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
state = "context"
|
2015-03-09 23:16:25 +03:00
|
|
|
for newstate, data in scanpatch(fp):
|
|
|
|
try:
|
|
|
|
p.transitions[state][newstate](p, data)
|
|
|
|
except KeyError:
|
2018-07-06 03:45:27 +03:00
|
|
|
raise PatchError("unhandled transition: %s -> %s" % (state, newstate))
|
2015-03-09 23:16:25 +03:00
|
|
|
state = newstate
|
2015-03-13 03:51:37 +03:00
|
|
|
del fp
|
2015-03-09 23:16:25 +03:00
|
|
|
return p.finished()
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2015-03-07 09:17:24 +03:00
|
|
|
def pathtransform(path, strip, prefix):
|
2018-07-06 03:45:27 +03:00
|
|
|
"""turn a path from a patch into a path suitable for the repository
|
2015-03-07 08:48:40 +03:00
|
|
|
|
2015-03-07 09:17:24 +03:00
|
|
|
prefix, if not empty, is expected to be normalized with a / at the end.
|
|
|
|
|
2015-03-07 08:48:40 +03:00
|
|
|
Returns (stripped components, path in repository).
|
|
|
|
|
2017-09-03 08:32:11 +03:00
|
|
|
>>> pathtransform(b'a/b/c', 0, b'')
|
2015-03-07 08:48:40 +03:00
|
|
|
('', 'a/b/c')
|
2017-09-03 08:32:11 +03:00
|
|
|
>>> pathtransform(b' a/b/c ', 0, b'')
|
2015-03-07 08:48:40 +03:00
|
|
|
('', ' a/b/c')
|
2017-09-03 08:32:11 +03:00
|
|
|
>>> pathtransform(b' a/b/c ', 2, b'')
|
2015-03-07 08:48:40 +03:00
|
|
|
('a/b/', 'c')
|
2017-09-03 08:32:11 +03:00
|
|
|
>>> pathtransform(b'a/b/c', 0, b'd/e/')
|
2015-03-19 06:59:06 +03:00
|
|
|
('', 'd/e/a/b/c')
|
2017-09-03 08:32:11 +03:00
|
|
|
>>> pathtransform(b' a//b/c ', 2, b'd/e/')
|
2015-03-07 09:17:24 +03:00
|
|
|
('a//b/', 'd/e/c')
|
2017-09-03 08:32:11 +03:00
|
|
|
>>> pathtransform(b'a/b/c', 3, b'')
|
2015-03-07 08:48:40 +03:00
|
|
|
Traceback (most recent call last):
|
|
|
|
PatchError: unable to strip away 1 of 3 dirs from a/b/c
|
2018-07-06 03:45:27 +03:00
|
|
|
"""
|
2010-04-26 15:21:03 +04:00
|
|
|
pathlen = len(path)
|
|
|
|
i = 0
|
|
|
|
if strip == 0:
|
2018-07-06 03:45:27 +03:00
|
|
|
return "", prefix + path.rstrip()
|
2010-04-26 15:21:03 +04:00
|
|
|
count = strip
|
|
|
|
while count > 0:
|
2018-07-06 03:45:27 +03:00
|
|
|
i = path.find("/", i)
|
2010-04-26 15:21:03 +04:00
|
|
|
if i == -1:
|
2018-07-06 03:45:27 +03:00
|
|
|
raise PatchError(
|
|
|
|
_("unable to strip away %d of %d dirs from %s") % (count, strip, path)
|
|
|
|
)
|
2010-04-26 15:21:03 +04:00
|
|
|
i += 1
|
|
|
|
# consume '//' in the path
|
2018-07-06 03:45:27 +03:00
|
|
|
while i < pathlen - 1 and path[i : i + 1] == "/":
|
2007-07-17 20:39:30 +04:00
|
|
|
i += 1
|
2010-04-26 15:21:03 +04:00
|
|
|
count -= 1
|
2015-03-07 09:17:24 +03:00
|
|
|
return path[:i].lstrip(), prefix + path[i:].rstrip()
|
2007-07-17 20:39:30 +04:00
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2015-03-07 09:19:26 +03:00
|
|
|
def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
|
2007-07-17 20:39:30 +04:00
|
|
|
nulla = afile_orig == "/dev/null"
|
|
|
|
nullb = bfile_orig == "/dev/null"
|
2011-05-27 23:50:09 +04:00
|
|
|
create = nulla and hunk.starta == 0 and hunk.lena == 0
|
|
|
|
remove = nullb and hunk.startb == 0 and hunk.lenb == 0
|
2015-03-07 09:19:26 +03:00
|
|
|
abase, afile = pathtransform(afile_orig, strip, prefix)
|
2011-05-18 01:46:38 +04:00
|
|
|
gooda = not nulla and backend.exists(afile)
|
2015-03-07 09:19:26 +03:00
|
|
|
bbase, bfile = pathtransform(bfile_orig, strip, prefix)
|
2007-07-17 20:39:30 +04:00
|
|
|
if afile == bfile:
|
|
|
|
goodb = gooda
|
|
|
|
else:
|
2011-05-18 01:46:38 +04:00
|
|
|
goodb = not nullb and backend.exists(bfile)
|
2011-05-27 23:50:09 +04:00
|
|
|
missing = not goodb and not gooda and not create
|
2009-08-08 06:27:54 +04:00
|
|
|
|
2010-08-12 19:58:03 +04:00
|
|
|
# some diff programs apparently produce patches where the afile is
|
|
|
|
# not /dev/null, but afile starts with bfile
|
2018-07-06 03:45:27 +03:00
|
|
|
abasedir = afile[: afile.rfind("/") + 1]
|
|
|
|
bbasedir = bfile[: bfile.rfind("/") + 1]
|
|
|
|
if (
|
|
|
|
missing
|
|
|
|
and abasedir == bbasedir
|
|
|
|
and afile.startswith(bfile)
|
|
|
|
and hunk.starta == 0
|
|
|
|
and hunk.lena == 0
|
|
|
|
):
|
2011-05-27 23:50:09 +04:00
|
|
|
create = True
|
|
|
|
missing = False
|
2009-08-08 06:27:54 +04:00
|
|
|
|
2008-03-18 01:36:45 +03:00
|
|
|
# If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
|
|
|
|
# diff is between a file and its backup. In this case, the original
|
|
|
|
# file should be patched (see original mpatch code).
|
2018-07-06 03:45:27 +03:00
|
|
|
isbackup = abase == bbase and bfile.startswith(afile)
|
2007-12-18 01:42:46 +03:00
|
|
|
fname = None
|
|
|
|
if not missing:
|
|
|
|
if gooda and goodb:
|
2015-03-14 00:00:06 +03:00
|
|
|
if isbackup:
|
|
|
|
fname = afile
|
|
|
|
else:
|
|
|
|
fname = bfile
|
2007-12-18 01:42:46 +03:00
|
|
|
elif gooda:
|
2007-07-17 20:39:30 +04:00
|
|
|
fname = afile
|
2007-12-29 21:49:48 +03:00
|
|
|
|
2007-12-18 01:42:46 +03:00
|
|
|
if not fname:
|
|
|
|
if not nullb:
|
2015-03-14 00:00:06 +03:00
|
|
|
if isbackup:
|
|
|
|
fname = afile
|
|
|
|
else:
|
|
|
|
fname = bfile
|
2007-12-18 01:42:46 +03:00
|
|
|
elif not nulla:
|
2007-07-17 20:39:30 +04:00
|
|
|
fname = afile
|
2007-12-18 01:42:46 +03:00
|
|
|
else:
|
|
|
|
raise PatchError(_("undefined source and destination files"))
|
2007-12-29 21:49:48 +03:00
|
|
|
|
2011-06-11 16:17:25 +04:00
|
|
|
gp = patchmeta(fname)
|
|
|
|
if create:
|
2018-07-06 03:45:27 +03:00
|
|
|
gp.op = "ADD"
|
2011-06-11 16:17:25 +04:00
|
|
|
elif remove:
|
2018-07-06 03:45:27 +03:00
|
|
|
gp.op = "DELETE"
|
2011-06-11 16:17:25 +04:00
|
|
|
return gp
|
2007-07-17 20:39:30 +04:00
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2015-03-09 23:14:31 +03:00
|
|
|
def scanpatch(fp):
|
|
|
|
"""like patch.iterhunks, but yield different events
|
|
|
|
|
|
|
|
- ('file', [header_lines + fromfile + tofile])
|
|
|
|
- ('context', [context_lines])
|
|
|
|
- ('hunk', [hunk_lines])
|
|
|
|
- ('range', (-start,len, +start,len, proc))
|
|
|
|
"""
|
2018-07-06 03:45:27 +03:00
|
|
|
lines_re = re.compile(br"@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)")
|
2015-03-09 23:14:31 +03:00
|
|
|
lr = linereader(fp)
|
|
|
|
|
|
|
|
def scanwhile(first, p):
|
|
|
|
"""scan lr while predicate holds"""
|
|
|
|
lines = [first]
|
2018-07-06 03:45:27 +03:00
|
|
|
for line in iter(lr.readline, ""):
|
2015-03-09 23:14:31 +03:00
|
|
|
if p(line):
|
|
|
|
lines.append(line)
|
|
|
|
else:
|
|
|
|
lr.push(line)
|
|
|
|
break
|
|
|
|
return lines
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
for line in iter(lr.readline, ""):
|
|
|
|
if line.startswith("diff --git a/") or line.startswith("diff -r "):
|
|
|
|
|
2015-03-09 23:14:31 +03:00
|
|
|
def notheader(line):
|
|
|
|
s = line.split(None, 1)
|
2018-07-06 03:45:27 +03:00
|
|
|
return not s or s[0] not in ("---", "diff")
|
|
|
|
|
2015-03-09 23:14:31 +03:00
|
|
|
header = scanwhile(line, notheader)
|
|
|
|
fromfile = lr.readline()
|
2018-07-06 03:45:27 +03:00
|
|
|
if fromfile.startswith("---"):
|
2015-03-09 23:14:31 +03:00
|
|
|
tofile = lr.readline()
|
|
|
|
header += [fromfile, tofile]
|
|
|
|
else:
|
|
|
|
lr.push(fromfile)
|
2018-07-06 03:45:27 +03:00
|
|
|
yield "file", header
|
|
|
|
elif line[0:1] == " ":
|
|
|
|
yield "context", scanwhile(line, lambda l: l[0] in " \\")
|
|
|
|
elif line[0] in "-+":
|
|
|
|
yield "hunk", scanwhile(line, lambda l: l[0] in "-+\\")
|
2015-03-09 23:14:31 +03:00
|
|
|
else:
|
|
|
|
m = lines_re.match(line)
|
|
|
|
if m:
|
2018-07-06 03:45:27 +03:00
|
|
|
yield "range", m.groups()
|
2015-03-09 23:14:31 +03:00
|
|
|
else:
|
2018-07-06 03:45:27 +03:00
|
|
|
yield "other", line
|
|
|
|
|
2015-03-09 23:14:31 +03:00
|
|
|
|
2008-10-19 01:45:46 +04:00
|
|
|
def scangitpatch(lr, firstline):
|
2008-10-20 16:57:04 +04:00
|
|
|
"""
|
2008-10-19 01:45:46 +04:00
|
|
|
Git patches can emit:
|
|
|
|
- rename a to b
|
|
|
|
- change b
|
|
|
|
- copy a to c
|
|
|
|
- change c
|
2008-10-20 16:57:04 +04:00
|
|
|
|
2008-10-19 01:45:46 +04:00
|
|
|
We cannot apply this sequence as-is, the renamed 'a' could not be
|
|
|
|
found for it would have been renamed already. And we cannot copy
|
|
|
|
from 'b' instead because 'b' would have been changed already. So
|
|
|
|
we scan the git patch for copy and rename commands so we can
|
|
|
|
perform the copies ahead of time.
|
|
|
|
"""
|
|
|
|
pos = 0
|
|
|
|
try:
|
|
|
|
pos = lr.fp.tell()
|
|
|
|
fp = lr.fp
|
|
|
|
except IOError:
|
2016-04-10 23:55:37 +03:00
|
|
|
fp = stringio(lr.fp.read())
|
2011-05-24 16:21:04 +04:00
|
|
|
gitlr = linereader(fp)
|
2008-10-19 01:45:46 +04:00
|
|
|
gitlr.push(firstline)
|
2010-10-10 00:13:08 +04:00
|
|
|
gitpatches = readgitpatch(gitlr)
|
2008-10-19 01:45:46 +04:00
|
|
|
fp.seek(pos)
|
2010-10-10 00:13:08 +04:00
|
|
|
return gitpatches
|
2008-10-19 01:45:46 +04:00
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2011-05-06 19:45:12 +04:00
|
|
|
def iterhunks(fp):
|
2007-12-18 01:06:01 +03:00
|
|
|
"""Read a patch and yield the following events:
|
|
|
|
- ("file", afile, bfile, firsthunk): select a new target file.
|
|
|
|
- ("hunk", hunk): a new hunk is ready to be applied, follows a
|
|
|
|
"file" event.
|
|
|
|
- ("git", gitchanges): current diff is in git format, gitchanges
|
|
|
|
maps filenames to gitpatch records. Unique event.
|
|
|
|
"""
|
2007-07-17 20:39:30 +04:00
|
|
|
afile = ""
|
|
|
|
bfile = ""
|
|
|
|
state = None
|
|
|
|
hunknum = 0
|
2011-04-26 23:22:14 +04:00
|
|
|
emitfile = newfile = False
|
2011-05-20 00:44:01 +04:00
|
|
|
gitpatches = None
|
2007-07-17 20:39:30 +04:00
|
|
|
|
|
|
|
# our states
|
|
|
|
BFILE = 1
|
|
|
|
context = None
|
2009-12-23 21:31:48 +03:00
|
|
|
lr = linereader(fp)
|
2007-07-17 20:39:30 +04:00
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
for x in iter(lr.readline, ""):
|
2011-05-20 00:44:01 +04:00
|
|
|
if state == BFILE and (
|
2018-07-06 03:45:27 +03:00
|
|
|
(not context and x[0] == "@")
|
|
|
|
or (context is not False and x.startswith("***************"))
|
|
|
|
or x.startswith("GIT binary patch")
|
|
|
|
):
|
2011-05-20 00:44:01 +04:00
|
|
|
gp = None
|
2018-07-06 03:45:27 +03:00
|
|
|
if gitpatches and gitpatches[-1].ispatching(afile, bfile):
|
patch: fix patch hunk/metdata synchronization (issue3384)
Git patches are parsed in two phases: 1) extract metadata, 2) parse actual
deltas and merge them with the previous metadata. We do this to avoid
dependency issues like "modify a; copy a to b", where "b" must be copied from
the unmodified "a".
Issue3384 is caused by flaky code I wrote to synchronize the patch metadata
with the emitted hunk:
if (gitpatches and
(gitpatches[-1][0] == afile or gitpatches[-1][1] == bfile)):
gp = gitpatches.pop()[2]
With a patch like:
diff --git a/a b/c
copy from a
copy to c
--- a/a
+++ b/c
@@ -1,1 +1,2 @@
a
+a
@@ -2,1 +2,2 @@
a
+a
diff --git a/a b/a
--- a/a
+++ b/a
@@ -1,1 +1,2 @@
a
+b
the first hunk of the first block is matched with the metadata for the block
"diff --git a/a b/c", then the second hunk of the first block is matched with
the metadata of the second block "diff --git a/a b/a", because of the "or" in
the code paste above. Turning the "or" into an "and" is not enough as we have
to deal with /dev/null cases for each file.
We I remove this broken piece of code:
# copy/rename + modify should modify target, not source
if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
afile = bfile
because "afile = bfile" set "afile" to stuff like "b/file" instead of "a/file",
and because this only happens for git patches, which afile/bfile are ignored
anyway by applydiff().
v2:
- Avoid a traceback on git metadata desynchronization
2012-04-21 23:40:25 +04:00
|
|
|
gp = gitpatches.pop()
|
2018-07-06 03:45:27 +03:00
|
|
|
if x.startswith("GIT binary patch"):
|
2012-04-26 23:44:00 +04:00
|
|
|
h = binhunk(lr, gp.path)
|
2011-05-20 00:44:01 +04:00
|
|
|
else:
|
2018-07-06 03:45:27 +03:00
|
|
|
if context is None and x.startswith("***************"):
|
2011-05-20 00:44:01 +04:00
|
|
|
context = True
|
2011-05-27 23:50:09 +04:00
|
|
|
h = hunk(x, hunknum + 1, lr, context)
|
2007-07-17 20:39:30 +04:00
|
|
|
hunknum += 1
|
2007-12-18 01:06:01 +03:00
|
|
|
if emitfile:
|
|
|
|
emitfile = False
|
2018-07-06 03:45:27 +03:00
|
|
|
yield "file", (afile, bfile, h, gp and gp.copy() or None)
|
|
|
|
yield "hunk", h
|
|
|
|
elif x.startswith("diff --git a/"):
|
|
|
|
m = gitre.match(x.rstrip(" \r\n"))
|
2011-05-20 00:44:01 +04:00
|
|
|
if not m:
|
|
|
|
continue
|
patch: fix patch hunk/metdata synchronization (issue3384)
Git patches are parsed in two phases: 1) extract metadata, 2) parse actual
deltas and merge them with the previous metadata. We do this to avoid
dependency issues like "modify a; copy a to b", where "b" must be copied from
the unmodified "a".
Issue3384 is caused by flaky code I wrote to synchronize the patch metadata
with the emitted hunk:
if (gitpatches and
(gitpatches[-1][0] == afile or gitpatches[-1][1] == bfile)):
gp = gitpatches.pop()[2]
With a patch like:
diff --git a/a b/c
copy from a
copy to c
--- a/a
+++ b/c
@@ -1,1 +1,2 @@
a
+a
@@ -2,1 +2,2 @@
a
+a
diff --git a/a b/a
--- a/a
+++ b/a
@@ -1,1 +1,2 @@
a
+b
the first hunk of the first block is matched with the metadata for the block
"diff --git a/a b/c", then the second hunk of the first block is matched with
the metadata of the second block "diff --git a/a b/a", because of the "or" in
the code paste above. Turning the "or" into an "and" is not enough as we have
to deal with /dev/null cases for each file.
We I remove this broken piece of code:
# copy/rename + modify should modify target, not source
if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
afile = bfile
because "afile = bfile" set "afile" to stuff like "b/file" instead of "a/file",
and because this only happens for git patches, which afile/bfile are ignored
anyway by applydiff().
v2:
- Avoid a traceback on git metadata desynchronization
2012-04-21 23:40:25 +04:00
|
|
|
if gitpatches is None:
|
2011-05-20 00:44:01 +04:00
|
|
|
# scan whole input for git metadata
|
patch: fix patch hunk/metdata synchronization (issue3384)
Git patches are parsed in two phases: 1) extract metadata, 2) parse actual
deltas and merge them with the previous metadata. We do this to avoid
dependency issues like "modify a; copy a to b", where "b" must be copied from
the unmodified "a".
Issue3384 is caused by flaky code I wrote to synchronize the patch metadata
with the emitted hunk:
if (gitpatches and
(gitpatches[-1][0] == afile or gitpatches[-1][1] == bfile)):
gp = gitpatches.pop()[2]
With a patch like:
diff --git a/a b/c
copy from a
copy to c
--- a/a
+++ b/c
@@ -1,1 +1,2 @@
a
+a
@@ -2,1 +2,2 @@
a
+a
diff --git a/a b/a
--- a/a
+++ b/a
@@ -1,1 +1,2 @@
a
+b
the first hunk of the first block is matched with the metadata for the block
"diff --git a/a b/c", then the second hunk of the first block is matched with
the metadata of the second block "diff --git a/a b/a", because of the "or" in
the code paste above. Turning the "or" into an "and" is not enough as we have
to deal with /dev/null cases for each file.
We I remove this broken piece of code:
# copy/rename + modify should modify target, not source
if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
afile = bfile
because "afile = bfile" set "afile" to stuff like "b/file" instead of "a/file",
and because this only happens for git patches, which afile/bfile are ignored
anyway by applydiff().
v2:
- Avoid a traceback on git metadata desynchronization
2012-04-21 23:40:25 +04:00
|
|
|
gitpatches = scangitpatch(lr, x)
|
2018-07-06 03:45:27 +03:00
|
|
|
yield "git", [
|
|
|
|
g.copy() for g in gitpatches if g.op in ("COPY", "RENAME")
|
|
|
|
]
|
2011-05-20 00:44:01 +04:00
|
|
|
gitpatches.reverse()
|
2018-07-06 03:45:27 +03:00
|
|
|
afile = "a/" + m.group(1)
|
|
|
|
bfile = "b/" + m.group(2)
|
patch: fix patch hunk/metdata synchronization (issue3384)
Git patches are parsed in two phases: 1) extract metadata, 2) parse actual
deltas and merge them with the previous metadata. We do this to avoid
dependency issues like "modify a; copy a to b", where "b" must be copied from
the unmodified "a".
Issue3384 is caused by flaky code I wrote to synchronize the patch metadata
with the emitted hunk:
if (gitpatches and
(gitpatches[-1][0] == afile or gitpatches[-1][1] == bfile)):
gp = gitpatches.pop()[2]
With a patch like:
diff --git a/a b/c
copy from a
copy to c
--- a/a
+++ b/c
@@ -1,1 +1,2 @@
a
+a
@@ -2,1 +2,2 @@
a
+a
diff --git a/a b/a
--- a/a
+++ b/a
@@ -1,1 +1,2 @@
a
+b
the first hunk of the first block is matched with the metadata for the block
"diff --git a/a b/c", then the second hunk of the first block is matched with
the metadata of the second block "diff --git a/a b/a", because of the "or" in
the code paste above. Turning the "or" into an "and" is not enough as we have
to deal with /dev/null cases for each file.
We I remove this broken piece of code:
# copy/rename + modify should modify target, not source
if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
afile = bfile
because "afile = bfile" set "afile" to stuff like "b/file" instead of "a/file",
and because this only happens for git patches, which afile/bfile are ignored
anyway by applydiff().
v2:
- Avoid a traceback on git metadata desynchronization
2012-04-21 23:40:25 +04:00
|
|
|
while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
|
|
|
|
gp = gitpatches.pop()
|
2018-07-06 03:45:27 +03:00
|
|
|
yield "file", ("a/" + gp.path, "b/" + gp.path, None, gp.copy())
|
patch: fix patch hunk/metdata synchronization (issue3384)
Git patches are parsed in two phases: 1) extract metadata, 2) parse actual
deltas and merge them with the previous metadata. We do this to avoid
dependency issues like "modify a; copy a to b", where "b" must be copied from
the unmodified "a".
Issue3384 is caused by flaky code I wrote to synchronize the patch metadata
with the emitted hunk:
if (gitpatches and
(gitpatches[-1][0] == afile or gitpatches[-1][1] == bfile)):
gp = gitpatches.pop()[2]
With a patch like:
diff --git a/a b/c
copy from a
copy to c
--- a/a
+++ b/c
@@ -1,1 +1,2 @@
a
+a
@@ -2,1 +2,2 @@
a
+a
diff --git a/a b/a
--- a/a
+++ b/a
@@ -1,1 +1,2 @@
a
+b
the first hunk of the first block is matched with the metadata for the block
"diff --git a/a b/c", then the second hunk of the first block is matched with
the metadata of the second block "diff --git a/a b/a", because of the "or" in
the code paste above. Turning the "or" into an "and" is not enough as we have
to deal with /dev/null cases for each file.
We I remove this broken piece of code:
# copy/rename + modify should modify target, not source
if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
afile = bfile
because "afile = bfile" set "afile" to stuff like "b/file" instead of "a/file",
and because this only happens for git patches, which afile/bfile are ignored
anyway by applydiff().
v2:
- Avoid a traceback on git metadata desynchronization
2012-04-21 23:40:25 +04:00
|
|
|
if not gitpatches:
|
2018-07-06 03:45:27 +03:00
|
|
|
raise PatchError(
|
|
|
|
_('failed to synchronize metadata for "%s"') % afile[2:]
|
|
|
|
)
|
patch: fix patch hunk/metdata synchronization (issue3384)
Git patches are parsed in two phases: 1) extract metadata, 2) parse actual
deltas and merge them with the previous metadata. We do this to avoid
dependency issues like "modify a; copy a to b", where "b" must be copied from
the unmodified "a".
Issue3384 is caused by flaky code I wrote to synchronize the patch metadata
with the emitted hunk:
if (gitpatches and
(gitpatches[-1][0] == afile or gitpatches[-1][1] == bfile)):
gp = gitpatches.pop()[2]
With a patch like:
diff --git a/a b/c
copy from a
copy to c
--- a/a
+++ b/c
@@ -1,1 +1,2 @@
a
+a
@@ -2,1 +2,2 @@
a
+a
diff --git a/a b/a
--- a/a
+++ b/a
@@ -1,1 +1,2 @@
a
+b
the first hunk of the first block is matched with the metadata for the block
"diff --git a/a b/c", then the second hunk of the first block is matched with
the metadata of the second block "diff --git a/a b/a", because of the "or" in
the code paste above. Turning the "or" into an "and" is not enough as we have
to deal with /dev/null cases for each file.
We I remove this broken piece of code:
# copy/rename + modify should modify target, not source
if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
afile = bfile
because "afile = bfile" set "afile" to stuff like "b/file" instead of "a/file",
and because this only happens for git patches, which afile/bfile are ignored
anyway by applydiff().
v2:
- Avoid a traceback on git metadata desynchronization
2012-04-21 23:40:25 +04:00
|
|
|
gp = gitpatches[-1]
|
2011-05-20 00:44:01 +04:00
|
|
|
newfile = True
|
2018-07-06 03:45:27 +03:00
|
|
|
elif x.startswith("---"):
|
2007-07-17 20:39:30 +04:00
|
|
|
# check for a unified diff
|
|
|
|
l2 = lr.readline()
|
2018-07-06 03:45:27 +03:00
|
|
|
if not l2.startswith("+++"):
|
2007-07-17 20:39:30 +04:00
|
|
|
lr.push(l2)
|
|
|
|
continue
|
|
|
|
newfile = True
|
|
|
|
context = False
|
|
|
|
afile = parsefilename(x)
|
|
|
|
bfile = parsefilename(l2)
|
2018-07-06 03:45:27 +03:00
|
|
|
elif x.startswith("***"):
|
2007-07-17 20:39:30 +04:00
|
|
|
# check for a context diff
|
|
|
|
l2 = lr.readline()
|
2018-07-06 03:45:27 +03:00
|
|
|
if not l2.startswith("---"):
|
2007-07-17 20:39:30 +04:00
|
|
|
lr.push(l2)
|
|
|
|
continue
|
|
|
|
l3 = lr.readline()
|
|
|
|
lr.push(l3)
|
|
|
|
if not l3.startswith("***************"):
|
|
|
|
lr.push(l2)
|
|
|
|
continue
|
|
|
|
newfile = True
|
|
|
|
context = True
|
|
|
|
afile = parsefilename(x)
|
|
|
|
bfile = parsefilename(l2)
|
|
|
|
|
2011-04-26 23:22:14 +04:00
|
|
|
if newfile:
|
|
|
|
newfile = False
|
2007-12-18 01:06:01 +03:00
|
|
|
emitfile = True
|
2007-07-17 20:39:30 +04:00
|
|
|
state = BFILE
|
|
|
|
hunknum = 0
|
2007-12-18 01:06:01 +03:00
|
|
|
|
2011-05-20 00:44:01 +04:00
|
|
|
while gitpatches:
|
patch: fix patch hunk/metdata synchronization (issue3384)
Git patches are parsed in two phases: 1) extract metadata, 2) parse actual
deltas and merge them with the previous metadata. We do this to avoid
dependency issues like "modify a; copy a to b", where "b" must be copied from
the unmodified "a".
Issue3384 is caused by flaky code I wrote to synchronize the patch metadata
with the emitted hunk:
if (gitpatches and
(gitpatches[-1][0] == afile or gitpatches[-1][1] == bfile)):
gp = gitpatches.pop()[2]
With a patch like:
diff --git a/a b/c
copy from a
copy to c
--- a/a
+++ b/c
@@ -1,1 +1,2 @@
a
+a
@@ -2,1 +2,2 @@
a
+a
diff --git a/a b/a
--- a/a
+++ b/a
@@ -1,1 +1,2 @@
a
+b
the first hunk of the first block is matched with the metadata for the block
"diff --git a/a b/c", then the second hunk of the first block is matched with
the metadata of the second block "diff --git a/a b/a", because of the "or" in
the code paste above. Turning the "or" into an "and" is not enough as we have
to deal with /dev/null cases for each file.
We I remove this broken piece of code:
# copy/rename + modify should modify target, not source
if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
afile = bfile
because "afile = bfile" set "afile" to stuff like "b/file" instead of "a/file",
and because this only happens for git patches, which afile/bfile are ignored
anyway by applydiff().
v2:
- Avoid a traceback on git metadata desynchronization
2012-04-21 23:40:25 +04:00
|
|
|
gp = gitpatches.pop()
|
2018-07-06 03:45:27 +03:00
|
|
|
yield "file", ("a/" + gp.path, "b/" + gp.path, None, gp.copy())
|
|
|
|
|
2011-05-20 00:44:01 +04:00
|
|
|
|
2013-11-27 21:39:00 +04:00
|
|
|
def applybindelta(binchunk, data):
|
|
|
|
"""Apply a binary delta hunk
|
|
|
|
The algorithm used is the algorithm from git's patch-delta.c
|
|
|
|
"""
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2013-11-27 21:39:00 +04:00
|
|
|
def deltahead(binchunk):
|
|
|
|
i = 0
|
|
|
|
for c in binchunk:
|
|
|
|
i += 1
|
|
|
|
if not (ord(c) & 0x80):
|
|
|
|
return i
|
|
|
|
return i
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2013-11-27 21:39:00 +04:00
|
|
|
out = ""
|
|
|
|
s = deltahead(binchunk)
|
|
|
|
binchunk = binchunk[s:]
|
|
|
|
s = deltahead(binchunk)
|
|
|
|
binchunk = binchunk[s:]
|
|
|
|
i = 0
|
|
|
|
while i < len(binchunk):
|
|
|
|
cmd = ord(binchunk[i])
|
|
|
|
i += 1
|
2018-07-06 03:45:27 +03:00
|
|
|
if cmd & 0x80:
|
2013-11-27 21:39:00 +04:00
|
|
|
offset = 0
|
|
|
|
size = 0
|
2018-07-06 03:45:27 +03:00
|
|
|
if cmd & 0x01:
|
2013-11-27 21:39:00 +04:00
|
|
|
offset = ord(binchunk[i])
|
|
|
|
i += 1
|
2018-07-06 03:45:27 +03:00
|
|
|
if cmd & 0x02:
|
2013-11-27 21:39:00 +04:00
|
|
|
offset |= ord(binchunk[i]) << 8
|
|
|
|
i += 1
|
2018-07-06 03:45:27 +03:00
|
|
|
if cmd & 0x04:
|
2013-11-27 21:39:00 +04:00
|
|
|
offset |= ord(binchunk[i]) << 16
|
|
|
|
i += 1
|
2018-07-06 03:45:27 +03:00
|
|
|
if cmd & 0x08:
|
2013-11-27 21:39:00 +04:00
|
|
|
offset |= ord(binchunk[i]) << 24
|
|
|
|
i += 1
|
2018-07-06 03:45:27 +03:00
|
|
|
if cmd & 0x10:
|
2013-11-27 21:39:00 +04:00
|
|
|
size = ord(binchunk[i])
|
|
|
|
i += 1
|
2018-07-06 03:45:27 +03:00
|
|
|
if cmd & 0x20:
|
2013-11-27 21:39:00 +04:00
|
|
|
size |= ord(binchunk[i]) << 8
|
|
|
|
i += 1
|
2018-07-06 03:45:27 +03:00
|
|
|
if cmd & 0x40:
|
2013-11-27 21:39:00 +04:00
|
|
|
size |= ord(binchunk[i]) << 16
|
|
|
|
i += 1
|
|
|
|
if size == 0:
|
|
|
|
size = 0x10000
|
|
|
|
offset_end = offset + size
|
|
|
|
out += data[offset:offset_end]
|
|
|
|
elif cmd != 0:
|
|
|
|
offset_end = i + cmd
|
|
|
|
out += binchunk[i:offset_end]
|
|
|
|
i += cmd
|
|
|
|
else:
|
2018-07-06 03:45:27 +03:00
|
|
|
raise PatchError(_("unexpected delta opcode 0"))
|
2013-11-27 21:39:00 +04:00
|
|
|
return out
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
|
|
|
def applydiff(ui, fp, backend, store, strip=1, prefix="", eolmode="strict"):
|
2010-04-17 22:23:24 +04:00
|
|
|
"""Reads a patch from fp and tries to apply it.
|
2009-06-15 02:03:26 +04:00
|
|
|
|
2011-06-11 16:14:13 +04:00
|
|
|
Returns 0 for a clean patch, -1 if any rejects were found and 1 if
|
|
|
|
there was any fuzz.
|
2007-12-18 01:06:01 +03:00
|
|
|
|
2009-12-20 19:18:02 +03:00
|
|
|
If 'eolmode' is 'strict', the patch content and patched file are
|
|
|
|
read in binary mode. Otherwise, line endings are ignored when
|
|
|
|
patching then normalized according to 'eolmode'.
|
2009-06-15 02:03:26 +04:00
|
|
|
"""
|
2018-07-06 03:45:27 +03:00
|
|
|
return _applydiff(
|
|
|
|
ui, fp, patchfile, backend, store, strip=strip, prefix=prefix, eolmode=eolmode
|
|
|
|
)
|
|
|
|
|
2010-04-17 22:23:24 +04:00
|
|
|
|
2017-11-14 21:26:36 +03:00
|
|
|
def _canonprefix(repo, prefix):
|
2015-03-07 09:22:14 +03:00
|
|
|
if prefix:
|
2017-11-14 21:26:36 +03:00
|
|
|
prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
|
2018-07-06 03:45:27 +03:00
|
|
|
if prefix != "":
|
|
|
|
prefix += "/"
|
2017-11-14 21:26:36 +03:00
|
|
|
return prefix
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
|
|
|
def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix="", eolmode="strict"):
|
2017-11-14 21:26:36 +03:00
|
|
|
prefix = _canonprefix(backend.repo, prefix)
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2011-05-20 00:44:01 +04:00
|
|
|
def pstrip(p):
|
2015-03-07 09:22:14 +03:00
|
|
|
return pathtransform(p, strip - 1, prefix)[1]
|
2011-05-20 00:44:01 +04:00
|
|
|
|
2007-12-18 01:06:01 +03:00
|
|
|
rejects = 0
|
|
|
|
err = 0
|
|
|
|
current_file = None
|
|
|
|
|
2011-05-06 19:45:12 +04:00
|
|
|
for state, values in iterhunks(fp):
|
2018-07-06 03:45:27 +03:00
|
|
|
if state == "hunk":
|
2007-12-18 01:06:01 +03:00
|
|
|
if not current_file:
|
|
|
|
continue
|
2010-04-26 15:21:03 +04:00
|
|
|
ret = current_file.apply(values)
|
2011-06-11 16:14:13 +04:00
|
|
|
if ret > 0:
|
|
|
|
err = 1
|
2018-07-06 03:45:27 +03:00
|
|
|
elif state == "file":
|
2011-03-20 02:22:47 +03:00
|
|
|
if current_file:
|
|
|
|
rejects += current_file.close()
|
2011-05-20 00:44:01 +04:00
|
|
|
current_file = None
|
|
|
|
afile, bfile, first_hunk, gp = values
|
|
|
|
if gp:
|
2011-06-11 16:17:25 +04:00
|
|
|
gp.path = pstrip(gp.path)
|
2011-05-27 23:50:10 +04:00
|
|
|
if gp.oldpath:
|
2011-06-11 16:17:25 +04:00
|
|
|
gp.oldpath = pstrip(gp.oldpath)
|
|
|
|
else:
|
2018-07-06 03:45:27 +03:00
|
|
|
gp = makepatchmeta(backend, afile, bfile, first_hunk, strip, prefix)
|
|
|
|
if gp.op == "RENAME":
|
2011-06-11 16:17:25 +04:00
|
|
|
backend.unlink(gp.oldpath)
|
2011-05-20 00:44:01 +04:00
|
|
|
if not first_hunk:
|
2018-07-06 03:45:27 +03:00
|
|
|
if gp.op == "DELETE":
|
2011-06-11 16:17:25 +04:00
|
|
|
backend.unlink(gp.path)
|
|
|
|
continue
|
|
|
|
data, mode = None, None
|
2018-07-06 03:45:27 +03:00
|
|
|
if gp.op in ("RENAME", "COPY"):
|
2011-06-15 01:24:34 +04:00
|
|
|
data, mode = store.getfile(gp.oldpath)[:2]
|
2016-10-08 15:26:58 +03:00
|
|
|
if data is None:
|
|
|
|
# This means that the old path does not exist
|
2018-07-06 03:45:27 +03:00
|
|
|
raise PatchError(
|
|
|
|
_("source file '%s' does not exist") % gp.oldpath
|
|
|
|
)
|
2011-06-11 16:17:25 +04:00
|
|
|
if gp.mode:
|
|
|
|
mode = gp.mode
|
2018-07-06 03:45:27 +03:00
|
|
|
if gp.op == "ADD":
|
2011-06-11 16:17:25 +04:00
|
|
|
# Added files without content have no hunk and
|
|
|
|
# must be created
|
2018-07-06 03:45:27 +03:00
|
|
|
data = ""
|
2011-06-11 16:17:25 +04:00
|
|
|
if data or mode:
|
2018-07-06 03:45:27 +03:00
|
|
|
if gp.op in ("ADD", "RENAME", "COPY") and backend.exists(gp.path):
|
|
|
|
raise PatchError(
|
|
|
|
_("cannot create %s: destination " "already exists")
|
|
|
|
% gp.path
|
|
|
|
)
|
2011-06-11 16:17:25 +04:00
|
|
|
backend.setfile(gp.path, data, mode, gp.oldpath)
|
2011-05-20 00:44:01 +04:00
|
|
|
continue
|
2007-12-18 01:06:01 +03:00
|
|
|
try:
|
2018-07-06 03:45:27 +03:00
|
|
|
current_file = patcher(ui, gp, backend, store, eolmode=eolmode)
|
2015-06-24 08:20:08 +03:00
|
|
|
except PatchError as inst:
|
2018-07-06 03:45:27 +03:00
|
|
|
ui.warn(str(inst) + "\n")
|
2010-04-26 15:21:03 +04:00
|
|
|
current_file = None
|
2007-12-18 01:06:01 +03:00
|
|
|
rejects += 1
|
|
|
|
continue
|
2018-07-06 03:45:27 +03:00
|
|
|
elif state == "git":
|
2010-04-26 15:21:03 +04:00
|
|
|
for gp in values:
|
2011-05-27 23:50:10 +04:00
|
|
|
path = pstrip(gp.oldpath)
|
2014-08-27 00:03:32 +04:00
|
|
|
data, mode = backend.getfile(path)
|
|
|
|
if data is None:
|
2012-06-01 19:37:56 +04:00
|
|
|
# The error ignored here will trigger a getfile()
|
|
|
|
# error in a place more appropriate for error
|
|
|
|
# handling, and will not interrupt the patching
|
|
|
|
# process.
|
2014-08-27 00:03:32 +04:00
|
|
|
pass
|
2012-06-01 19:37:56 +04:00
|
|
|
else:
|
|
|
|
store.setfile(path, data, mode)
|
2007-07-17 20:39:30 +04:00
|
|
|
else:
|
2018-07-06 03:45:27 +03:00
|
|
|
raise error.Abort(_("unsupported parser state: %s") % state)
|
2007-12-18 00:19:21 +03:00
|
|
|
|
2011-03-20 02:22:47 +03:00
|
|
|
if current_file:
|
|
|
|
rejects += current_file.close()
|
2007-12-18 01:06:01 +03:00
|
|
|
|
2007-07-17 20:39:30 +04:00
|
|
|
if rejects:
|
|
|
|
return -1
|
|
|
|
return err
|
2006-08-13 03:13:27 +04:00
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
|
|
|
def _externalpatch(ui, repo, patcher, patchname, strip, files, similarity):
|
2008-10-19 01:45:45 +04:00
|
|
|
"""use <patcher> to apply <patchname> to the working directory.
|
|
|
|
returns whether patch was applied with fuzz factor."""
|
|
|
|
|
|
|
|
fuzz = False
|
2010-10-10 00:13:08 +04:00
|
|
|
args = []
|
2011-05-20 00:44:01 +04:00
|
|
|
cwd = repo.root
|
2008-10-19 01:45:45 +04:00
|
|
|
if cwd:
|
2018-07-06 03:45:27 +03:00
|
|
|
args.append("-d %s" % util.shellquote(cwd))
|
|
|
|
fp = util.popen(
|
|
|
|
"%s %s -p%d < %s" % (patcher, " ".join(args), strip, util.shellquote(patchname))
|
|
|
|
)
|
2011-05-20 00:44:01 +04:00
|
|
|
try:
|
2016-11-15 02:14:06 +03:00
|
|
|
for line in util.iterfile(fp):
|
2011-05-20 00:44:01 +04:00
|
|
|
line = line.rstrip()
|
2018-07-06 03:45:27 +03:00
|
|
|
ui.note(line + "\n")
|
|
|
|
if line.startswith("patching file "):
|
2011-05-20 00:44:01 +04:00
|
|
|
pf = util.parsepatchoutput(line)
|
|
|
|
printed_file = False
|
2011-06-11 16:14:11 +04:00
|
|
|
files.add(pf)
|
2018-07-06 03:45:27 +03:00
|
|
|
elif line.find("with fuzz") >= 0:
|
2011-05-20 00:44:01 +04:00
|
|
|
fuzz = True
|
|
|
|
if not printed_file:
|
2018-07-06 03:45:27 +03:00
|
|
|
ui.warn(pf + "\n")
|
2011-05-20 00:44:01 +04:00
|
|
|
printed_file = True
|
2018-07-06 03:45:27 +03:00
|
|
|
ui.warn(line + "\n")
|
|
|
|
elif line.find("saving rejects to file") >= 0:
|
|
|
|
ui.warn(line + "\n")
|
|
|
|
elif line.find("FAILED") >= 0:
|
2011-05-20 00:44:01 +04:00
|
|
|
if not printed_file:
|
2018-07-06 03:45:27 +03:00
|
|
|
ui.warn(pf + "\n")
|
2011-05-20 00:44:01 +04:00
|
|
|
printed_file = True
|
2018-07-06 03:45:27 +03:00
|
|
|
ui.warn(line + "\n")
|
2011-05-20 00:44:01 +04:00
|
|
|
finally:
|
|
|
|
if files:
|
2013-04-05 00:45:21 +04:00
|
|
|
scmutil.marktouched(repo, files, similarity)
|
2008-10-19 01:45:45 +04:00
|
|
|
code = fp.close()
|
|
|
|
if code:
|
2018-07-06 03:45:27 +03:00
|
|
|
raise PatchError(_("patch command failed: %s") % util.explainexit(code)[0])
|
2008-10-19 01:45:45 +04:00
|
|
|
return fuzz
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
|
|
|
def patchbackend(ui, backend, patchobj, strip, prefix, files=None, eolmode="strict"):
|
2009-10-31 20:02:13 +03:00
|
|
|
if files is None:
|
2011-06-11 16:14:11 +04:00
|
|
|
files = set()
|
2009-06-15 02:03:26 +04:00
|
|
|
if eolmode is None:
|
2018-07-06 03:45:27 +03:00
|
|
|
eolmode = ui.config("patch", "eol")
|
2009-12-20 19:18:02 +03:00
|
|
|
if eolmode.lower() not in eolmodes:
|
2018-07-06 03:45:27 +03:00
|
|
|
raise error.Abort(_("unsupported line endings type: %s") % eolmode)
|
2009-12-20 19:18:02 +03:00
|
|
|
eolmode = eolmode.lower()
|
2009-06-19 15:47:50 +04:00
|
|
|
|
2011-05-27 23:50:10 +04:00
|
|
|
store = filestore()
|
2008-10-19 01:45:45 +04:00
|
|
|
try:
|
2018-07-06 03:45:27 +03:00
|
|
|
fp = open(patchobj, "rb")
|
2008-10-19 01:45:45 +04:00
|
|
|
except TypeError:
|
|
|
|
fp = patchobj
|
|
|
|
try:
|
2018-07-06 03:45:27 +03:00
|
|
|
ret = applydiff(
|
|
|
|
ui, fp, backend, store, strip=strip, prefix=prefix, eolmode=eolmode
|
|
|
|
)
|
2008-10-19 01:45:45 +04:00
|
|
|
finally:
|
2010-01-03 21:47:07 +03:00
|
|
|
if fp != patchobj:
|
|
|
|
fp.close()
|
2011-06-11 16:14:11 +04:00
|
|
|
files.update(backend.close())
|
2011-05-27 23:50:10 +04:00
|
|
|
store.close()
|
2008-10-19 01:45:45 +04:00
|
|
|
if ret < 0:
|
2018-07-06 03:45:27 +03:00
|
|
|
raise PatchError(_("patch failed to apply"))
|
2008-10-19 01:45:45 +04:00
|
|
|
return ret > 0
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
|
|
|
def internalpatch(
|
|
|
|
ui, repo, patchobj, strip, prefix="", files=None, eolmode="strict", similarity=0
|
|
|
|
):
|
2011-06-15 01:26:35 +04:00
|
|
|
"""use builtin patch to apply <patchobj> to the working directory.
|
|
|
|
returns whether patch was applied with fuzz factor."""
|
|
|
|
backend = workingbackend(ui, repo, similarity)
|
2015-03-07 09:43:47 +03:00
|
|
|
return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
|
2011-06-15 01:26:35 +04:00
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
|
|
|
def patchrepo(
|
|
|
|
ui, repo, ctx, store, patchobj, strip, prefix, files=None, eolmode="strict"
|
|
|
|
):
|
2011-06-15 01:26:35 +04:00
|
|
|
backend = repobackend(ui, repo, ctx, store)
|
2015-03-10 23:06:38 +03:00
|
|
|
return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
|
2011-06-15 01:26:35 +04:00
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
|
|
|
def patch(
|
|
|
|
ui, repo, patchname, strip=1, prefix="", files=None, eolmode="strict", similarity=0
|
|
|
|
):
|
2009-06-15 02:03:26 +04:00
|
|
|
"""Apply <patchname> to the working directory.
|
|
|
|
|
|
|
|
'eolmode' specifies how end of lines should be handled. It can be:
|
|
|
|
- 'strict': inputs are read in binary mode, EOLs are preserved
|
|
|
|
- 'crlf': EOLs are ignored when patching and reset to CRLF
|
|
|
|
- 'lf': EOLs are ignored when patching and reset to LF
|
|
|
|
- None: get it from user settings, default to 'strict'
|
|
|
|
'eolmode' is ignored when using an external patcher program.
|
|
|
|
|
|
|
|
Returns whether patch was applied with fuzz factor.
|
|
|
|
"""
|
2018-07-06 03:45:27 +03:00
|
|
|
patcher = ui.config("ui", "patch")
|
2009-10-31 20:02:13 +03:00
|
|
|
if files is None:
|
2011-06-11 16:14:11 +04:00
|
|
|
files = set()
|
2014-05-09 04:08:17 +04:00
|
|
|
if patcher:
|
2018-07-06 03:45:27 +03:00
|
|
|
return _externalpatch(ui, repo, patcher, patchname, strip, files, similarity)
|
|
|
|
return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode, similarity)
|
|
|
|
|
2008-10-19 01:45:45 +04:00
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
def changedfiles(ui, repo, patchpath, strip=1, prefix=""):
|
2011-05-18 01:46:38 +04:00
|
|
|
backend = fsbackend(ui, repo.root)
|
2017-11-14 21:26:36 +03:00
|
|
|
prefix = _canonprefix(repo, prefix)
|
2018-07-06 03:45:27 +03:00
|
|
|
with open(patchpath, "rb") as fp:
|
2011-05-06 20:03:41 +04:00
|
|
|
changed = set()
|
|
|
|
for state, values in iterhunks(fp):
|
2018-07-06 03:45:27 +03:00
|
|
|
if state == "file":
|
2011-05-20 00:44:01 +04:00
|
|
|
afile, bfile, first_hunk, gp = values
|
|
|
|
if gp:
|
2017-11-14 21:26:36 +03:00
|
|
|
gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
|
2011-06-11 16:17:25 +04:00
|
|
|
if gp.oldpath:
|
2018-07-06 03:45:27 +03:00
|
|
|
gp.oldpath = pathtransform(gp.oldpath, strip - 1, prefix)[1]
|
2011-06-11 16:17:25 +04:00
|
|
|
else:
|
2018-07-06 03:45:27 +03:00
|
|
|
gp = makepatchmeta(backend, afile, bfile, first_hunk, strip, prefix)
|
2011-06-11 16:17:25 +04:00
|
|
|
changed.add(gp.path)
|
2018-07-06 03:45:27 +03:00
|
|
|
if gp.op == "RENAME":
|
2011-06-11 16:17:25 +04:00
|
|
|
changed.add(gp.oldpath)
|
2018-07-06 03:45:27 +03:00
|
|
|
elif state not in ("hunk", "git"):
|
|
|
|
raise error.Abort(_("unsupported parser state: %s") % state)
|
2011-05-06 20:03:41 +04:00
|
|
|
return changed
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2010-01-01 22:54:05 +03:00
|
|
|
class GitDiffRequired(Exception):
|
|
|
|
pass
|
2008-10-22 11:29:26 +04:00
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
|
|
|
def diffallopts(ui, opts=None, untrusted=False, section="diff"):
|
|
|
|
"""return diffopts with all features supported and parsed"""
|
|
|
|
return difffeatureopts(
|
|
|
|
ui,
|
|
|
|
opts=opts,
|
|
|
|
untrusted=untrusted,
|
|
|
|
section=section,
|
|
|
|
git=True,
|
|
|
|
whitespace=True,
|
|
|
|
formatchanging=True,
|
|
|
|
)
|
|
|
|
|
2014-11-19 03:53:22 +03:00
|
|
|
|
2014-11-19 08:43:38 +03:00
|
|
|
diffopts = diffallopts
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
|
|
|
def difffeatureopts(
|
|
|
|
ui,
|
|
|
|
opts=None,
|
|
|
|
untrusted=False,
|
|
|
|
section="diff",
|
|
|
|
git=False,
|
|
|
|
whitespace=False,
|
|
|
|
formatchanging=False,
|
|
|
|
):
|
|
|
|
"""return diffopts with only opted-in features parsed
|
2014-11-19 04:10:14 +03:00
|
|
|
|
|
|
|
Features:
|
|
|
|
- git: git-style diffs
|
2014-11-19 04:09:26 +03:00
|
|
|
- whitespace: whitespace options like ignoreblanklines and ignorews
|
2014-11-19 04:22:03 +03:00
|
|
|
- formatchanging: options that will likely break or cause correctness issues
|
|
|
|
with most diff parsers
|
2018-07-06 03:45:27 +03:00
|
|
|
"""
|
|
|
|
|
2014-11-13 10:44:17 +03:00
|
|
|
def get(key, name=None, getter=ui.configbool, forceplain=None):
|
|
|
|
if opts:
|
|
|
|
v = opts.get(key)
|
2016-08-30 22:55:07 +03:00
|
|
|
# diffopts flags are either None-default (which is passed
|
|
|
|
# through unchanged, so we can identify unset values), or
|
|
|
|
# some other falsey default (eg --unified, which defaults
|
|
|
|
# to an empty string). We only want to override the config
|
|
|
|
# entries from hgrc with command line values if they
|
|
|
|
# appear to have been set, which is any truthy value,
|
|
|
|
# True, or False.
|
|
|
|
if v or isinstance(v, bool):
|
2014-11-13 10:44:17 +03:00
|
|
|
return v
|
2014-11-13 10:47:25 +03:00
|
|
|
if forceplain is not None and ui.plain():
|
|
|
|
return forceplain
|
2017-10-08 22:47:14 +03:00
|
|
|
return getter(section, name or key, untrusted=untrusted)
|
2014-11-13 10:44:17 +03:00
|
|
|
|
2014-11-19 04:22:03 +03:00
|
|
|
# core options, expected to be understood by every diff parser
|
2014-11-19 03:00:54 +03:00
|
|
|
buildopts = {
|
2018-07-06 03:45:27 +03:00
|
|
|
"nodates": get("nodates"),
|
|
|
|
"showfunc": get("show_function", "showfunc"),
|
|
|
|
"context": get("unified", getter=ui.config),
|
2014-11-19 03:00:54 +03:00
|
|
|
}
|
2018-07-06 03:45:27 +03:00
|
|
|
buildopts["worddiff"] = (
|
|
|
|
ui.configbool("experimental", "worddiff")
|
|
|
|
and not ui.plain()
|
|
|
|
and ui._colormode is not None
|
|
|
|
)
|
2014-11-19 03:00:54 +03:00
|
|
|
|
2014-11-19 04:10:14 +03:00
|
|
|
if git:
|
2018-07-06 03:45:27 +03:00
|
|
|
buildopts["git"] = get("git")
|
2017-01-09 22:13:47 +03:00
|
|
|
|
2017-01-09 21:51:44 +03:00
|
|
|
# since this is in the experimental section, we need to call
|
|
|
|
# ui.configbool directory
|
2018-07-06 03:45:27 +03:00
|
|
|
buildopts["showsimilarity"] = ui.configbool(
|
|
|
|
"experimental", "extendedheader.similarity"
|
|
|
|
)
|
2017-01-09 21:51:44 +03:00
|
|
|
|
2017-01-09 22:13:47 +03:00
|
|
|
# need to inspect the ui object instead of using get() since we want to
|
|
|
|
# test for an int
|
2018-07-06 03:45:27 +03:00
|
|
|
hconf = ui.config("experimental", "extendedheader.index")
|
2017-01-09 22:13:47 +03:00
|
|
|
if hconf is not None:
|
|
|
|
hlen = None
|
|
|
|
try:
|
|
|
|
# the hash config could be an integer (for length of hash) or a
|
|
|
|
# word (e.g. short, full, none)
|
|
|
|
hlen = int(hconf)
|
2017-01-15 10:33:15 +03:00
|
|
|
if hlen < 0 or hlen > 40:
|
|
|
|
msg = _("invalid length for extendedheader.index: '%d'\n")
|
|
|
|
ui.warn(msg % hlen)
|
2017-01-09 22:13:47 +03:00
|
|
|
except ValueError:
|
|
|
|
# default value
|
2018-07-06 03:45:27 +03:00
|
|
|
if hconf == "short" or hconf == "":
|
2017-01-09 22:13:47 +03:00
|
|
|
hlen = 12
|
2018-07-06 03:45:27 +03:00
|
|
|
elif hconf == "full":
|
2017-01-09 22:13:47 +03:00
|
|
|
hlen = 40
|
2018-07-06 03:45:27 +03:00
|
|
|
elif hconf != "none":
|
2017-01-09 22:13:47 +03:00
|
|
|
msg = _("invalid value for extendedheader.index: '%s'\n")
|
|
|
|
ui.warn(msg % hconf)
|
|
|
|
finally:
|
2018-07-06 03:45:27 +03:00
|
|
|
buildopts["index"] = hlen
|
2017-01-09 22:13:47 +03:00
|
|
|
|
2014-11-19 04:09:26 +03:00
|
|
|
if whitespace:
|
2018-07-06 03:45:27 +03:00
|
|
|
buildopts["ignorews"] = get("ignore_all_space", "ignorews")
|
|
|
|
buildopts["ignorewsamount"] = get("ignore_space_change", "ignorewsamount")
|
|
|
|
buildopts["ignoreblanklines"] = get("ignore_blank_lines", "ignoreblanklines")
|
|
|
|
buildopts["ignorewseol"] = get("ignore_space_at_eol", "ignorewseol")
|
2014-11-19 04:22:03 +03:00
|
|
|
if formatchanging:
|
2018-07-06 03:45:27 +03:00
|
|
|
buildopts["text"] = opts and opts.get("text")
|
|
|
|
binary = None if opts is None else opts.get("binary")
|
|
|
|
buildopts["nobinary"] = (
|
|
|
|
not binary if binary is not None else get("nobinary", forceplain=False)
|
|
|
|
)
|
|
|
|
buildopts["noprefix"] = get("noprefix", forceplain=False)
|
2014-11-19 04:10:14 +03:00
|
|
|
|
2017-03-26 18:28:21 +03:00
|
|
|
return mdiff.diffopts(**pycompat.strkwargs(buildopts))
|
2010-03-09 21:04:18 +03:00
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
|
|
|
def diff(
|
|
|
|
repo,
|
|
|
|
node1=None,
|
|
|
|
node2=None,
|
|
|
|
match=None,
|
|
|
|
changes=None,
|
|
|
|
opts=None,
|
|
|
|
losedatafn=None,
|
|
|
|
prefix="",
|
|
|
|
relroot="",
|
|
|
|
copy=None,
|
|
|
|
hunksfilterfn=None,
|
|
|
|
):
|
|
|
|
"""yields diff of changes to files between two nodes, or node and
|
2006-08-13 03:13:27 +04:00
|
|
|
working directory.
|
|
|
|
|
|
|
|
if node1 is None, use first dirstate parent instead.
|
2010-01-01 22:54:05 +03:00
|
|
|
if node2 is None, compare node1 with working directory.
|
|
|
|
|
|
|
|
losedatafn(**kwarg) is a callable run when opts.upgrade=True and
|
|
|
|
every time some change cannot be represented with the current
|
|
|
|
patch format. Return False to upgrade to git patch format, True to
|
|
|
|
accept the loss or raise an exception to abort the diff. It is
|
|
|
|
called with the name of current file being diffed as 'fn'. If set
|
|
|
|
to None, patches will always be upgraded to git format when
|
|
|
|
necessary.
|
2010-09-03 14:58:51 +04:00
|
|
|
|
|
|
|
prefix is a filename prefix that is prepended to all filenames on
|
|
|
|
display (used for subrepos).
|
2015-03-17 23:41:24 +03:00
|
|
|
|
|
|
|
relroot, if not empty, must be normalized with a trailing /. Any match
|
2016-06-27 13:11:18 +03:00
|
|
|
patterns that fall outside it will be ignored.
|
|
|
|
|
|
|
|
copy, if not empty, should contain mappings {dst@y: src@x} of copy
|
2017-10-06 15:45:17 +03:00
|
|
|
information.
|
|
|
|
|
|
|
|
hunksfilterfn, if not None, should be a function taking a filectx and
|
|
|
|
hunks generator that may yield filtered hunks.
|
2018-07-06 03:45:27 +03:00
|
|
|
"""
|
2017-10-05 22:20:08 +03:00
|
|
|
for fctx1, fctx2, hdr, hunks in diffhunks(
|
2018-07-06 03:45:27 +03:00
|
|
|
repo,
|
|
|
|
node1=node1,
|
|
|
|
node2=node2,
|
|
|
|
match=match,
|
|
|
|
changes=changes,
|
|
|
|
opts=opts,
|
|
|
|
losedatafn=losedatafn,
|
|
|
|
prefix=prefix,
|
|
|
|
relroot=relroot,
|
|
|
|
copy=copy,
|
2017-10-05 22:20:08 +03:00
|
|
|
):
|
2017-10-06 15:45:17 +03:00
|
|
|
if hunksfilterfn is not None:
|
2017-10-19 16:06:33 +03:00
|
|
|
# If the file has been removed, fctx2 is None; but this should
|
|
|
|
# not occur here since we catch removed files early in
|
|
|
|
# cmdutil.getloglinerangerevs() for 'hg log -L'.
|
2018-07-06 03:45:27 +03:00
|
|
|
assert fctx2 is not None, "fctx2 unexpectly None in diff hunks filtering"
|
2017-10-06 15:45:17 +03:00
|
|
|
hunks = hunksfilterfn(fctx2, hunks)
|
2018-07-06 03:45:27 +03:00
|
|
|
text = "".join(sum((list(hlines) for hrange, hlines in hunks), []))
|
2017-09-26 19:17:47 +03:00
|
|
|
if hdr and (text or len(hdr) > 1):
|
2018-07-06 03:45:27 +03:00
|
|
|
yield "\n".join(hdr) + "\n"
|
2017-03-03 19:20:11 +03:00
|
|
|
if text:
|
|
|
|
yield text
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
|
|
|
def diffhunks(
|
|
|
|
repo,
|
|
|
|
node1=None,
|
|
|
|
node2=None,
|
|
|
|
match=None,
|
|
|
|
changes=None,
|
|
|
|
opts=None,
|
|
|
|
losedatafn=None,
|
|
|
|
prefix="",
|
|
|
|
relroot="",
|
|
|
|
copy=None,
|
|
|
|
):
|
2017-03-03 19:20:11 +03:00
|
|
|
"""Yield diff of changes to files in the form of (`header`, `hunks`) tuples
|
|
|
|
where `header` is a list of diff headers and `hunks` is an iterable of
|
|
|
|
(`hunkrange`, `hunklines`) tuples.
|
|
|
|
|
|
|
|
See diff() for the meaning of parameters.
|
|
|
|
"""
|
2006-08-13 03:13:27 +04:00
|
|
|
|
|
|
|
if opts is None:
|
|
|
|
opts = mdiff.defaultopts
|
|
|
|
|
2009-11-05 17:18:56 +03:00
|
|
|
if not node1 and not node2:
|
2011-04-05 01:21:59 +04:00
|
|
|
node1 = repo.dirstate.p1()
|
2006-08-17 06:49:45 +04:00
|
|
|
|
2009-07-15 03:50:37 +04:00
|
|
|
def lrugetfilectx():
|
|
|
|
cache = {}
|
2015-05-16 21:28:04 +03:00
|
|
|
order = collections.deque()
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2009-07-15 03:50:37 +04:00
|
|
|
def getfilectx(f, ctx):
|
|
|
|
fctx = ctx.filectx(f, filelog=cache.get(f))
|
|
|
|
if f not in cache:
|
|
|
|
if len(cache) > 20:
|
2012-05-15 21:46:23 +04:00
|
|
|
del cache[order.popleft()]
|
2009-10-31 20:02:34 +03:00
|
|
|
cache[f] = fctx.filelog()
|
2009-07-15 03:50:37 +04:00
|
|
|
else:
|
|
|
|
order.remove(f)
|
|
|
|
order.append(f)
|
|
|
|
return fctx
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2009-07-15 03:50:37 +04:00
|
|
|
return getfilectx
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2009-07-15 03:50:37 +04:00
|
|
|
getfilectx = lrugetfilectx()
|
2006-08-17 06:49:45 +04:00
|
|
|
|
2008-06-26 23:35:46 +04:00
|
|
|
ctx1 = repo[node1]
|
2008-10-13 00:21:08 +04:00
|
|
|
ctx2 = repo[node2]
|
2006-08-13 03:13:27 +04:00
|
|
|
|
2015-03-18 01:46:36 +03:00
|
|
|
relfiltered = False
|
2018-07-06 03:45:27 +03:00
|
|
|
if relroot != "" and match.always():
|
2015-03-18 01:46:36 +03:00
|
|
|
# as a special case, create a new matcher with just the relroot
|
|
|
|
pats = [relroot]
|
2018-07-06 03:45:27 +03:00
|
|
|
match = scmutil.match(ctx2, pats, default="path")
|
2015-03-18 01:46:36 +03:00
|
|
|
relfiltered = True
|
|
|
|
|
2006-08-13 03:13:27 +04:00
|
|
|
if not changes:
|
2008-10-13 00:21:08 +04:00
|
|
|
changes = repo.status(ctx1, ctx2, match=match)
|
2008-06-27 22:43:29 +04:00
|
|
|
modified, added, removed = changes[:3]
|
2006-08-13 03:13:27 +04:00
|
|
|
|
|
|
|
if not modified and not added and not removed:
|
2010-01-01 22:54:05 +03:00
|
|
|
return []
|
2006-12-25 19:43:49 +03:00
|
|
|
|
2015-03-14 00:00:06 +03:00
|
|
|
if repo.ui.debugflag:
|
|
|
|
hexfunc = hex
|
|
|
|
else:
|
|
|
|
hexfunc = short
|
2013-08-14 22:10:27 +04:00
|
|
|
revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
|
2006-08-13 03:13:27 +04:00
|
|
|
|
2016-06-27 13:11:18 +03:00
|
|
|
if copy is None:
|
|
|
|
copy = {}
|
|
|
|
if opts.git or opts.upgrade:
|
|
|
|
copy = copies.pathcopies(ctx1, ctx2, match=match)
|
2006-08-15 09:48:03 +04:00
|
|
|
|
2015-03-17 23:41:24 +03:00
|
|
|
if relroot is not None:
|
2015-03-18 01:46:36 +03:00
|
|
|
if not relfiltered:
|
|
|
|
# XXX this would ideally be done in the matcher, but that is
|
|
|
|
# generally meant to 'or' patterns, not 'and' them. In this case we
|
|
|
|
# need to 'and' all the patterns from the matcher with relroot.
|
|
|
|
def filterrel(l):
|
|
|
|
return [f for f in l if f.startswith(relroot)]
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2015-03-18 01:46:36 +03:00
|
|
|
modified = filterrel(modified)
|
|
|
|
added = filterrel(added)
|
|
|
|
removed = filterrel(removed)
|
|
|
|
relfiltered = True
|
2015-03-17 23:41:24 +03:00
|
|
|
# filter out copies where either side isn't inside the relative root
|
2018-07-06 03:45:27 +03:00
|
|
|
copy = dict(
|
|
|
|
(
|
|
|
|
(dst, src)
|
|
|
|
for (dst, src) in copy.iteritems()
|
|
|
|
if dst.startswith(relroot) and src.startswith(relroot)
|
|
|
|
)
|
|
|
|
)
|
2015-03-17 23:41:24 +03:00
|
|
|
|
2016-01-14 21:02:34 +03:00
|
|
|
modifiedset = set(modified)
|
|
|
|
addedset = set(added)
|
2016-01-14 21:14:24 +03:00
|
|
|
removedset = set(removed)
|
2016-01-14 21:02:34 +03:00
|
|
|
for f in modified:
|
|
|
|
if f not in ctx1:
|
|
|
|
# Fix up added, since merged-in additions appear as
|
|
|
|
# modifications during merges
|
|
|
|
modifiedset.remove(f)
|
|
|
|
addedset.add(f)
|
2016-01-14 21:14:24 +03:00
|
|
|
for f in removed:
|
|
|
|
if f not in ctx1:
|
|
|
|
# Merged-in additions that are then removed are reported as removed.
|
|
|
|
# They are not in ctx1, so We don't want to show them in the diff.
|
|
|
|
removedset.remove(f)
|
2016-01-14 21:02:34 +03:00
|
|
|
modified = sorted(modifiedset)
|
|
|
|
added = sorted(addedset)
|
2016-01-14 21:14:24 +03:00
|
|
|
removed = sorted(removedset)
|
2016-01-14 21:22:55 +03:00
|
|
|
for dst, src in copy.items():
|
|
|
|
if src not in ctx1:
|
|
|
|
# Files merged in during a merge and then copied/renamed are
|
|
|
|
# reported as copies. We want to show them in the diff as additions.
|
|
|
|
del copy[dst]
|
2016-01-14 21:02:34 +03:00
|
|
|
|
2012-07-31 05:30:42 +04:00
|
|
|
def difffn(opts, losedata):
|
2018-07-06 03:45:27 +03:00
|
|
|
return trydiff(
|
|
|
|
repo,
|
|
|
|
revs,
|
|
|
|
ctx1,
|
|
|
|
ctx2,
|
|
|
|
modified,
|
|
|
|
added,
|
|
|
|
removed,
|
|
|
|
copy,
|
|
|
|
getfilectx,
|
|
|
|
opts,
|
|
|
|
losedata,
|
|
|
|
prefix,
|
|
|
|
relroot,
|
|
|
|
)
|
|
|
|
|
2010-01-01 22:54:05 +03:00
|
|
|
if opts.upgrade and not opts.git:
|
|
|
|
try:
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2010-01-01 22:54:05 +03:00
|
|
|
def losedata(fn):
|
|
|
|
if not losedatafn or not losedatafn(fn=fn):
|
2012-05-12 18:00:58 +04:00
|
|
|
raise GitDiffRequired
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2010-01-01 22:54:05 +03:00
|
|
|
# Buffer the whole output until we are sure it can be generated
|
|
|
|
return list(difffn(opts.copy(git=False), losedata))
|
|
|
|
except GitDiffRequired:
|
|
|
|
return difffn(opts.copy(git=True), None)
|
|
|
|
else:
|
|
|
|
return difffn(opts, None)
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2018-03-19 14:28:30 +03:00
|
|
|
def diffsinglehunk(hunklines):
|
|
|
|
"""yield tokens for a list of lines in a single hunk"""
|
|
|
|
for line in hunklines:
|
|
|
|
# chomp
|
2018-07-06 03:45:27 +03:00
|
|
|
chompline = line.rstrip("\n")
|
2018-03-19 14:28:30 +03:00
|
|
|
# highlight tabs and trailing whitespace
|
|
|
|
stripline = chompline.rstrip()
|
2018-07-06 03:45:27 +03:00
|
|
|
if line[0] == "-":
|
|
|
|
label = "diff.deleted"
|
|
|
|
elif line[0] == "+":
|
|
|
|
label = "diff.inserted"
|
2018-03-19 14:28:30 +03:00
|
|
|
else:
|
2018-07-06 03:45:27 +03:00
|
|
|
raise error.ProgrammingError("unexpected hunk line: %s" % line)
|
2018-03-19 14:28:30 +03:00
|
|
|
for token in tabsplitter.findall(stripline):
|
2018-07-06 03:45:27 +03:00
|
|
|
if "\t" == token[0]:
|
|
|
|
yield (token, "diff.tab")
|
2018-03-19 14:28:30 +03:00
|
|
|
else:
|
|
|
|
yield (token, label)
|
|
|
|
|
|
|
|
if chompline != stripline:
|
2018-07-06 03:45:27 +03:00
|
|
|
yield (chompline[len(stripline) :], "diff.trailingwhitespace")
|
2018-03-19 14:28:30 +03:00
|
|
|
if chompline != line:
|
2018-07-06 03:45:27 +03:00
|
|
|
yield (line[len(chompline) :], "")
|
|
|
|
|
2018-03-19 14:28:30 +03:00
|
|
|
|
patch: rewrite the worddiff algorithm
Summary:
There were recent complains about both quality [1] [2] and performance [3]
of the current word diff algorithm.
The current algorithm is actually bad in various ways:
- Lines could be matched across hunks, which is confusing (report [1]).
- For short lines, they can fail "similarity" check, which means they
won't be highlighted when they are expected to be (report [2]).
- Various performance issues:
- Using difflib implemented by pure Python, which is both slow and
suboptimal comparing with xdiff.
- Searching for matched lines across hunks could be O(N^2) if there are
no match found.
Thinking it in a "highlight" way is actually tricky, consider the following
change:
```
# before
foo = 10
# after
if True:
foo = 21 + 3
```
It's obvious that "10" and "21 + 3" need highlighting because they are
different. But what about "if True:"? In theory it's also "different" and
need highlighting. How about purely inserted or deleted hunks then?
Highlighting all of them would be too noisy.
This diff rewrites the word diff algorithm. It differs in multiple ways:
1. Get rid of "matching lines by similarity" step.
2. Only diff words within a same hunk.
3. Dim unchanged words. Instead of highlighting changed words.
4. Treat pure insertion or deletion hunks differently - do not dim or
highlight words in them.
5. Use xdiff instead.
6. Use a better regexp to split words. This reduces the number of tokens sent
to the diff algorithm.
1, 2, 5, 6 help performance. 1, 2, 3, 4 make the result more predictable and
trustworthy. 3 avoids the nasty question about what to highlight. 3 and 4 makes
it more flexible for people to tweak colors. 6 makes the result better since it
merges multiple space tokens into one so xdiff will less likely miss important
matches (than meaningless matches like spaces).
"bold" and "underline" were removed so the changed words will have regular
red/green colors. The output won't be too "noisy" even in cases where code are
changed in a way that inline word matching is meaningless. For people who want
more contrast, they can set:
[color]
diff.inserted.changed = green bold
diff.deleted.changed = red bold
Practically, when diffing D7319718, the old code spends 4 seconds on finding
matched lines preparing for worddiff:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (17467 times) patch.py:2471
....
> 3927 \ _findmatches (22 times) patch.py:2537
348 \ __init__ (8158 times) difflib.py:154
340 | set_seqs (8158 times) difflib.py:223
328 | set_seq2 (8158 times) difflib.py:261
322 | __chain_b (8158 times) difflib.py:306
1818 \ ratio (8158 times) difflib.py:636
1777 | get_matching_blocks (8158 times) difflib.py:460
1605 \ find_longest_match (51966 times) difflib.py:350
38 | __new__ (51966 times) <string>:8
29 \ _make (36035 times) <string>:12
143 \ write (17466 times) ui.py:883
```
The new code takes 0.14 seconds:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (23401 times) patch.py:2562
....
> 140 \ consumehunkbuffer (23346 times) patch.py:2585
130 | diffsinglehunkinline (23240 times) patch.py:2496
215 \ write (23400 times) ui.py:883
118 \ flush cmdutil.py:1606
118 | write ui.py:883
```
[1]: https://fburl.com/lkb9rc9m
[2]: https://fburl.com/0r9bqf0e
[3]: https://fburl.com/pxqznw31
Reviewed By: ryanmce
Differential Revision: D7314726
fbshipit-source-id: becd979cb9ac3fd3f4adae11cb10804d535f58df
2018-03-19 14:28:32 +03:00
|
|
|
def diffsinglehunkinline(hunklines):
|
|
|
|
"""yield tokens for a list of lines in a single hunk, with inline colors"""
|
|
|
|
# prepare deleted, and inserted content
|
2018-07-06 03:45:27 +03:00
|
|
|
a = ""
|
|
|
|
b = ""
|
patch: rewrite the worddiff algorithm
Summary:
There were recent complains about both quality [1] [2] and performance [3]
of the current word diff algorithm.
The current algorithm is actually bad in various ways:
- Lines could be matched across hunks, which is confusing (report [1]).
- For short lines, they can fail "similarity" check, which means they
won't be highlighted when they are expected to be (report [2]).
- Various performance issues:
- Using difflib implemented by pure Python, which is both slow and
suboptimal comparing with xdiff.
- Searching for matched lines across hunks could be O(N^2) if there are
no match found.
Thinking it in a "highlight" way is actually tricky, consider the following
change:
```
# before
foo = 10
# after
if True:
foo = 21 + 3
```
It's obvious that "10" and "21 + 3" need highlighting because they are
different. But what about "if True:"? In theory it's also "different" and
need highlighting. How about purely inserted or deleted hunks then?
Highlighting all of them would be too noisy.
This diff rewrites the word diff algorithm. It differs in multiple ways:
1. Get rid of "matching lines by similarity" step.
2. Only diff words within a same hunk.
3. Dim unchanged words. Instead of highlighting changed words.
4. Treat pure insertion or deletion hunks differently - do not dim or
highlight words in them.
5. Use xdiff instead.
6. Use a better regexp to split words. This reduces the number of tokens sent
to the diff algorithm.
1, 2, 5, 6 help performance. 1, 2, 3, 4 make the result more predictable and
trustworthy. 3 avoids the nasty question about what to highlight. 3 and 4 makes
it more flexible for people to tweak colors. 6 makes the result better since it
merges multiple space tokens into one so xdiff will less likely miss important
matches (than meaningless matches like spaces).
"bold" and "underline" were removed so the changed words will have regular
red/green colors. The output won't be too "noisy" even in cases where code are
changed in a way that inline word matching is meaningless. For people who want
more contrast, they can set:
[color]
diff.inserted.changed = green bold
diff.deleted.changed = red bold
Practically, when diffing D7319718, the old code spends 4 seconds on finding
matched lines preparing for worddiff:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (17467 times) patch.py:2471
....
> 3927 \ _findmatches (22 times) patch.py:2537
348 \ __init__ (8158 times) difflib.py:154
340 | set_seqs (8158 times) difflib.py:223
328 | set_seq2 (8158 times) difflib.py:261
322 | __chain_b (8158 times) difflib.py:306
1818 \ ratio (8158 times) difflib.py:636
1777 | get_matching_blocks (8158 times) difflib.py:460
1605 \ find_longest_match (51966 times) difflib.py:350
38 | __new__ (51966 times) <string>:8
29 \ _make (36035 times) <string>:12
143 \ write (17466 times) ui.py:883
```
The new code takes 0.14 seconds:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (23401 times) patch.py:2562
....
> 140 \ consumehunkbuffer (23346 times) patch.py:2585
130 | diffsinglehunkinline (23240 times) patch.py:2496
215 \ write (23400 times) ui.py:883
118 \ flush cmdutil.py:1606
118 | write ui.py:883
```
[1]: https://fburl.com/lkb9rc9m
[2]: https://fburl.com/0r9bqf0e
[3]: https://fburl.com/pxqznw31
Reviewed By: ryanmce
Differential Revision: D7314726
fbshipit-source-id: becd979cb9ac3fd3f4adae11cb10804d535f58df
2018-03-19 14:28:32 +03:00
|
|
|
for line in hunklines:
|
2018-07-06 03:45:27 +03:00
|
|
|
if line[0] == "-":
|
patch: rewrite the worddiff algorithm
Summary:
There were recent complains about both quality [1] [2] and performance [3]
of the current word diff algorithm.
The current algorithm is actually bad in various ways:
- Lines could be matched across hunks, which is confusing (report [1]).
- For short lines, they can fail "similarity" check, which means they
won't be highlighted when they are expected to be (report [2]).
- Various performance issues:
- Using difflib implemented by pure Python, which is both slow and
suboptimal comparing with xdiff.
- Searching for matched lines across hunks could be O(N^2) if there are
no match found.
Thinking it in a "highlight" way is actually tricky, consider the following
change:
```
# before
foo = 10
# after
if True:
foo = 21 + 3
```
It's obvious that "10" and "21 + 3" need highlighting because they are
different. But what about "if True:"? In theory it's also "different" and
need highlighting. How about purely inserted or deleted hunks then?
Highlighting all of them would be too noisy.
This diff rewrites the word diff algorithm. It differs in multiple ways:
1. Get rid of "matching lines by similarity" step.
2. Only diff words within a same hunk.
3. Dim unchanged words. Instead of highlighting changed words.
4. Treat pure insertion or deletion hunks differently - do not dim or
highlight words in them.
5. Use xdiff instead.
6. Use a better regexp to split words. This reduces the number of tokens sent
to the diff algorithm.
1, 2, 5, 6 help performance. 1, 2, 3, 4 make the result more predictable and
trustworthy. 3 avoids the nasty question about what to highlight. 3 and 4 makes
it more flexible for people to tweak colors. 6 makes the result better since it
merges multiple space tokens into one so xdiff will less likely miss important
matches (than meaningless matches like spaces).
"bold" and "underline" were removed so the changed words will have regular
red/green colors. The output won't be too "noisy" even in cases where code are
changed in a way that inline word matching is meaningless. For people who want
more contrast, they can set:
[color]
diff.inserted.changed = green bold
diff.deleted.changed = red bold
Practically, when diffing D7319718, the old code spends 4 seconds on finding
matched lines preparing for worddiff:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (17467 times) patch.py:2471
....
> 3927 \ _findmatches (22 times) patch.py:2537
348 \ __init__ (8158 times) difflib.py:154
340 | set_seqs (8158 times) difflib.py:223
328 | set_seq2 (8158 times) difflib.py:261
322 | __chain_b (8158 times) difflib.py:306
1818 \ ratio (8158 times) difflib.py:636
1777 | get_matching_blocks (8158 times) difflib.py:460
1605 \ find_longest_match (51966 times) difflib.py:350
38 | __new__ (51966 times) <string>:8
29 \ _make (36035 times) <string>:12
143 \ write (17466 times) ui.py:883
```
The new code takes 0.14 seconds:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (23401 times) patch.py:2562
....
> 140 \ consumehunkbuffer (23346 times) patch.py:2585
130 | diffsinglehunkinline (23240 times) patch.py:2496
215 \ write (23400 times) ui.py:883
118 \ flush cmdutil.py:1606
118 | write ui.py:883
```
[1]: https://fburl.com/lkb9rc9m
[2]: https://fburl.com/0r9bqf0e
[3]: https://fburl.com/pxqznw31
Reviewed By: ryanmce
Differential Revision: D7314726
fbshipit-source-id: becd979cb9ac3fd3f4adae11cb10804d535f58df
2018-03-19 14:28:32 +03:00
|
|
|
a += line[1:]
|
2018-07-06 03:45:27 +03:00
|
|
|
elif line[0] == "+":
|
patch: rewrite the worddiff algorithm
Summary:
There were recent complains about both quality [1] [2] and performance [3]
of the current word diff algorithm.
The current algorithm is actually bad in various ways:
- Lines could be matched across hunks, which is confusing (report [1]).
- For short lines, they can fail "similarity" check, which means they
won't be highlighted when they are expected to be (report [2]).
- Various performance issues:
- Using difflib implemented by pure Python, which is both slow and
suboptimal comparing with xdiff.
- Searching for matched lines across hunks could be O(N^2) if there are
no match found.
Thinking it in a "highlight" way is actually tricky, consider the following
change:
```
# before
foo = 10
# after
if True:
foo = 21 + 3
```
It's obvious that "10" and "21 + 3" need highlighting because they are
different. But what about "if True:"? In theory it's also "different" and
need highlighting. How about purely inserted or deleted hunks then?
Highlighting all of them would be too noisy.
This diff rewrites the word diff algorithm. It differs in multiple ways:
1. Get rid of "matching lines by similarity" step.
2. Only diff words within a same hunk.
3. Dim unchanged words. Instead of highlighting changed words.
4. Treat pure insertion or deletion hunks differently - do not dim or
highlight words in them.
5. Use xdiff instead.
6. Use a better regexp to split words. This reduces the number of tokens sent
to the diff algorithm.
1, 2, 5, 6 help performance. 1, 2, 3, 4 make the result more predictable and
trustworthy. 3 avoids the nasty question about what to highlight. 3 and 4 makes
it more flexible for people to tweak colors. 6 makes the result better since it
merges multiple space tokens into one so xdiff will less likely miss important
matches (than meaningless matches like spaces).
"bold" and "underline" were removed so the changed words will have regular
red/green colors. The output won't be too "noisy" even in cases where code are
changed in a way that inline word matching is meaningless. For people who want
more contrast, they can set:
[color]
diff.inserted.changed = green bold
diff.deleted.changed = red bold
Practically, when diffing D7319718, the old code spends 4 seconds on finding
matched lines preparing for worddiff:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (17467 times) patch.py:2471
....
> 3927 \ _findmatches (22 times) patch.py:2537
348 \ __init__ (8158 times) difflib.py:154
340 | set_seqs (8158 times) difflib.py:223
328 | set_seq2 (8158 times) difflib.py:261
322 | __chain_b (8158 times) difflib.py:306
1818 \ ratio (8158 times) difflib.py:636
1777 | get_matching_blocks (8158 times) difflib.py:460
1605 \ find_longest_match (51966 times) difflib.py:350
38 | __new__ (51966 times) <string>:8
29 \ _make (36035 times) <string>:12
143 \ write (17466 times) ui.py:883
```
The new code takes 0.14 seconds:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (23401 times) patch.py:2562
....
> 140 \ consumehunkbuffer (23346 times) patch.py:2585
130 | diffsinglehunkinline (23240 times) patch.py:2496
215 \ write (23400 times) ui.py:883
118 \ flush cmdutil.py:1606
118 | write ui.py:883
```
[1]: https://fburl.com/lkb9rc9m
[2]: https://fburl.com/0r9bqf0e
[3]: https://fburl.com/pxqznw31
Reviewed By: ryanmce
Differential Revision: D7314726
fbshipit-source-id: becd979cb9ac3fd3f4adae11cb10804d535f58df
2018-03-19 14:28:32 +03:00
|
|
|
b += line[1:]
|
|
|
|
else:
|
2018-07-06 03:45:27 +03:00
|
|
|
raise error.ProgrammingError("unexpected hunk line: %s" % line)
|
patch: rewrite the worddiff algorithm
Summary:
There were recent complains about both quality [1] [2] and performance [3]
of the current word diff algorithm.
The current algorithm is actually bad in various ways:
- Lines could be matched across hunks, which is confusing (report [1]).
- For short lines, they can fail "similarity" check, which means they
won't be highlighted when they are expected to be (report [2]).
- Various performance issues:
- Using difflib implemented by pure Python, which is both slow and
suboptimal comparing with xdiff.
- Searching for matched lines across hunks could be O(N^2) if there are
no match found.
Thinking it in a "highlight" way is actually tricky, consider the following
change:
```
# before
foo = 10
# after
if True:
foo = 21 + 3
```
It's obvious that "10" and "21 + 3" need highlighting because they are
different. But what about "if True:"? In theory it's also "different" and
need highlighting. How about purely inserted or deleted hunks then?
Highlighting all of them would be too noisy.
This diff rewrites the word diff algorithm. It differs in multiple ways:
1. Get rid of "matching lines by similarity" step.
2. Only diff words within a same hunk.
3. Dim unchanged words. Instead of highlighting changed words.
4. Treat pure insertion or deletion hunks differently - do not dim or
highlight words in them.
5. Use xdiff instead.
6. Use a better regexp to split words. This reduces the number of tokens sent
to the diff algorithm.
1, 2, 5, 6 help performance. 1, 2, 3, 4 make the result more predictable and
trustworthy. 3 avoids the nasty question about what to highlight. 3 and 4 makes
it more flexible for people to tweak colors. 6 makes the result better since it
merges multiple space tokens into one so xdiff will less likely miss important
matches (than meaningless matches like spaces).
"bold" and "underline" were removed so the changed words will have regular
red/green colors. The output won't be too "noisy" even in cases where code are
changed in a way that inline word matching is meaningless. For people who want
more contrast, they can set:
[color]
diff.inserted.changed = green bold
diff.deleted.changed = red bold
Practically, when diffing D7319718, the old code spends 4 seconds on finding
matched lines preparing for worddiff:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (17467 times) patch.py:2471
....
> 3927 \ _findmatches (22 times) patch.py:2537
348 \ __init__ (8158 times) difflib.py:154
340 | set_seqs (8158 times) difflib.py:223
328 | set_seq2 (8158 times) difflib.py:261
322 | __chain_b (8158 times) difflib.py:306
1818 \ ratio (8158 times) difflib.py:636
1777 | get_matching_blocks (8158 times) difflib.py:460
1605 \ find_longest_match (51966 times) difflib.py:350
38 | __new__ (51966 times) <string>:8
29 \ _make (36035 times) <string>:12
143 \ write (17466 times) ui.py:883
```
The new code takes 0.14 seconds:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (23401 times) patch.py:2562
....
> 140 \ consumehunkbuffer (23346 times) patch.py:2585
130 | diffsinglehunkinline (23240 times) patch.py:2496
215 \ write (23400 times) ui.py:883
118 \ flush cmdutil.py:1606
118 | write ui.py:883
```
[1]: https://fburl.com/lkb9rc9m
[2]: https://fburl.com/0r9bqf0e
[3]: https://fburl.com/pxqznw31
Reviewed By: ryanmce
Differential Revision: D7314726
fbshipit-source-id: becd979cb9ac3fd3f4adae11cb10804d535f58df
2018-03-19 14:28:32 +03:00
|
|
|
# fast path: if either side is empty, use diffsinglehunk
|
|
|
|
if not a or not b:
|
|
|
|
for t in diffsinglehunk(hunklines):
|
|
|
|
yield t
|
|
|
|
return
|
|
|
|
# re-split the content into words
|
|
|
|
al = wordsplitter.findall(a)
|
|
|
|
bl = wordsplitter.findall(b)
|
|
|
|
# re-arrange the words to lines since the diff algorithm is line-based
|
2018-07-06 03:45:27 +03:00
|
|
|
aln = [s if s == "\n" else s + "\n" for s in al]
|
|
|
|
bln = [s if s == "\n" else s + "\n" for s in bl]
|
|
|
|
an = "".join(aln)
|
|
|
|
bn = "".join(bln)
|
patch: rewrite the worddiff algorithm
Summary:
There were recent complains about both quality [1] [2] and performance [3]
of the current word diff algorithm.
The current algorithm is actually bad in various ways:
- Lines could be matched across hunks, which is confusing (report [1]).
- For short lines, they can fail "similarity" check, which means they
won't be highlighted when they are expected to be (report [2]).
- Various performance issues:
- Using difflib implemented by pure Python, which is both slow and
suboptimal comparing with xdiff.
- Searching for matched lines across hunks could be O(N^2) if there are
no match found.
Thinking it in a "highlight" way is actually tricky, consider the following
change:
```
# before
foo = 10
# after
if True:
foo = 21 + 3
```
It's obvious that "10" and "21 + 3" need highlighting because they are
different. But what about "if True:"? In theory it's also "different" and
need highlighting. How about purely inserted or deleted hunks then?
Highlighting all of them would be too noisy.
This diff rewrites the word diff algorithm. It differs in multiple ways:
1. Get rid of "matching lines by similarity" step.
2. Only diff words within a same hunk.
3. Dim unchanged words. Instead of highlighting changed words.
4. Treat pure insertion or deletion hunks differently - do not dim or
highlight words in them.
5. Use xdiff instead.
6. Use a better regexp to split words. This reduces the number of tokens sent
to the diff algorithm.
1, 2, 5, 6 help performance. 1, 2, 3, 4 make the result more predictable and
trustworthy. 3 avoids the nasty question about what to highlight. 3 and 4 makes
it more flexible for people to tweak colors. 6 makes the result better since it
merges multiple space tokens into one so xdiff will less likely miss important
matches (than meaningless matches like spaces).
"bold" and "underline" were removed so the changed words will have regular
red/green colors. The output won't be too "noisy" even in cases where code are
changed in a way that inline word matching is meaningless. For people who want
more contrast, they can set:
[color]
diff.inserted.changed = green bold
diff.deleted.changed = red bold
Practically, when diffing D7319718, the old code spends 4 seconds on finding
matched lines preparing for worddiff:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (17467 times) patch.py:2471
....
> 3927 \ _findmatches (22 times) patch.py:2537
348 \ __init__ (8158 times) difflib.py:154
340 | set_seqs (8158 times) difflib.py:223
328 | set_seq2 (8158 times) difflib.py:261
322 | __chain_b (8158 times) difflib.py:306
1818 \ ratio (8158 times) difflib.py:636
1777 | get_matching_blocks (8158 times) difflib.py:460
1605 \ find_longest_match (51966 times) difflib.py:350
38 | __new__ (51966 times) <string>:8
29 \ _make (36035 times) <string>:12
143 \ write (17466 times) ui.py:883
```
The new code takes 0.14 seconds:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (23401 times) patch.py:2562
....
> 140 \ consumehunkbuffer (23346 times) patch.py:2585
130 | diffsinglehunkinline (23240 times) patch.py:2496
215 \ write (23400 times) ui.py:883
118 \ flush cmdutil.py:1606
118 | write ui.py:883
```
[1]: https://fburl.com/lkb9rc9m
[2]: https://fburl.com/0r9bqf0e
[3]: https://fburl.com/pxqznw31
Reviewed By: ryanmce
Differential Revision: D7314726
fbshipit-source-id: becd979cb9ac3fd3f4adae11cb10804d535f58df
2018-03-19 14:28:32 +03:00
|
|
|
# run the diff algorithm, prepare atokens and btokens
|
|
|
|
atokens = []
|
|
|
|
btokens = []
|
|
|
|
blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
|
|
|
|
for (a1, a2, b1, b2), btype in blocks:
|
2018-07-06 03:45:27 +03:00
|
|
|
changed = btype == "!"
|
|
|
|
for token in mdiff.splitnewlines("".join(al[a1:a2])):
|
patch: rewrite the worddiff algorithm
Summary:
There were recent complains about both quality [1] [2] and performance [3]
of the current word diff algorithm.
The current algorithm is actually bad in various ways:
- Lines could be matched across hunks, which is confusing (report [1]).
- For short lines, they can fail "similarity" check, which means they
won't be highlighted when they are expected to be (report [2]).
- Various performance issues:
- Using difflib implemented by pure Python, which is both slow and
suboptimal comparing with xdiff.
- Searching for matched lines across hunks could be O(N^2) if there are
no match found.
Thinking it in a "highlight" way is actually tricky, consider the following
change:
```
# before
foo = 10
# after
if True:
foo = 21 + 3
```
It's obvious that "10" and "21 + 3" need highlighting because they are
different. But what about "if True:"? In theory it's also "different" and
need highlighting. How about purely inserted or deleted hunks then?
Highlighting all of them would be too noisy.
This diff rewrites the word diff algorithm. It differs in multiple ways:
1. Get rid of "matching lines by similarity" step.
2. Only diff words within a same hunk.
3. Dim unchanged words. Instead of highlighting changed words.
4. Treat pure insertion or deletion hunks differently - do not dim or
highlight words in them.
5. Use xdiff instead.
6. Use a better regexp to split words. This reduces the number of tokens sent
to the diff algorithm.
1, 2, 5, 6 help performance. 1, 2, 3, 4 make the result more predictable and
trustworthy. 3 avoids the nasty question about what to highlight. 3 and 4 makes
it more flexible for people to tweak colors. 6 makes the result better since it
merges multiple space tokens into one so xdiff will less likely miss important
matches (than meaningless matches like spaces).
"bold" and "underline" were removed so the changed words will have regular
red/green colors. The output won't be too "noisy" even in cases where code are
changed in a way that inline word matching is meaningless. For people who want
more contrast, they can set:
[color]
diff.inserted.changed = green bold
diff.deleted.changed = red bold
Practically, when diffing D7319718, the old code spends 4 seconds on finding
matched lines preparing for worddiff:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (17467 times) patch.py:2471
....
> 3927 \ _findmatches (22 times) patch.py:2537
348 \ __init__ (8158 times) difflib.py:154
340 | set_seqs (8158 times) difflib.py:223
328 | set_seq2 (8158 times) difflib.py:261
322 | __chain_b (8158 times) difflib.py:306
1818 \ ratio (8158 times) difflib.py:636
1777 | get_matching_blocks (8158 times) difflib.py:460
1605 \ find_longest_match (51966 times) difflib.py:350
38 | __new__ (51966 times) <string>:8
29 \ _make (36035 times) <string>:12
143 \ write (17466 times) ui.py:883
```
The new code takes 0.14 seconds:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (23401 times) patch.py:2562
....
> 140 \ consumehunkbuffer (23346 times) patch.py:2585
130 | diffsinglehunkinline (23240 times) patch.py:2496
215 \ write (23400 times) ui.py:883
118 \ flush cmdutil.py:1606
118 | write ui.py:883
```
[1]: https://fburl.com/lkb9rc9m
[2]: https://fburl.com/0r9bqf0e
[3]: https://fburl.com/pxqznw31
Reviewed By: ryanmce
Differential Revision: D7314726
fbshipit-source-id: becd979cb9ac3fd3f4adae11cb10804d535f58df
2018-03-19 14:28:32 +03:00
|
|
|
atokens.append((changed, token))
|
2018-07-06 03:45:27 +03:00
|
|
|
for token in mdiff.splitnewlines("".join(bl[b1:b2])):
|
patch: rewrite the worddiff algorithm
Summary:
There were recent complains about both quality [1] [2] and performance [3]
of the current word diff algorithm.
The current algorithm is actually bad in various ways:
- Lines could be matched across hunks, which is confusing (report [1]).
- For short lines, they can fail "similarity" check, which means they
won't be highlighted when they are expected to be (report [2]).
- Various performance issues:
- Using difflib implemented by pure Python, which is both slow and
suboptimal comparing with xdiff.
- Searching for matched lines across hunks could be O(N^2) if there are
no match found.
Thinking it in a "highlight" way is actually tricky, consider the following
change:
```
# before
foo = 10
# after
if True:
foo = 21 + 3
```
It's obvious that "10" and "21 + 3" need highlighting because they are
different. But what about "if True:"? In theory it's also "different" and
need highlighting. How about purely inserted or deleted hunks then?
Highlighting all of them would be too noisy.
This diff rewrites the word diff algorithm. It differs in multiple ways:
1. Get rid of "matching lines by similarity" step.
2. Only diff words within a same hunk.
3. Dim unchanged words. Instead of highlighting changed words.
4. Treat pure insertion or deletion hunks differently - do not dim or
highlight words in them.
5. Use xdiff instead.
6. Use a better regexp to split words. This reduces the number of tokens sent
to the diff algorithm.
1, 2, 5, 6 help performance. 1, 2, 3, 4 make the result more predictable and
trustworthy. 3 avoids the nasty question about what to highlight. 3 and 4 makes
it more flexible for people to tweak colors. 6 makes the result better since it
merges multiple space tokens into one so xdiff will less likely miss important
matches (than meaningless matches like spaces).
"bold" and "underline" were removed so the changed words will have regular
red/green colors. The output won't be too "noisy" even in cases where code are
changed in a way that inline word matching is meaningless. For people who want
more contrast, they can set:
[color]
diff.inserted.changed = green bold
diff.deleted.changed = red bold
Practically, when diffing D7319718, the old code spends 4 seconds on finding
matched lines preparing for worddiff:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (17467 times) patch.py:2471
....
> 3927 \ _findmatches (22 times) patch.py:2537
348 \ __init__ (8158 times) difflib.py:154
340 | set_seqs (8158 times) difflib.py:223
328 | set_seq2 (8158 times) difflib.py:261
322 | __chain_b (8158 times) difflib.py:306
1818 \ ratio (8158 times) difflib.py:636
1777 | get_matching_blocks (8158 times) difflib.py:460
1605 \ find_longest_match (51966 times) difflib.py:350
38 | __new__ (51966 times) <string>:8
29 \ _make (36035 times) <string>:12
143 \ write (17466 times) ui.py:883
```
The new code takes 0.14 seconds:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (23401 times) patch.py:2562
....
> 140 \ consumehunkbuffer (23346 times) patch.py:2585
130 | diffsinglehunkinline (23240 times) patch.py:2496
215 \ write (23400 times) ui.py:883
118 \ flush cmdutil.py:1606
118 | write ui.py:883
```
[1]: https://fburl.com/lkb9rc9m
[2]: https://fburl.com/0r9bqf0e
[3]: https://fburl.com/pxqznw31
Reviewed By: ryanmce
Differential Revision: D7314726
fbshipit-source-id: becd979cb9ac3fd3f4adae11cb10804d535f58df
2018-03-19 14:28:32 +03:00
|
|
|
btokens.append((changed, token))
|
|
|
|
|
|
|
|
# yield deleted tokens, then inserted ones
|
2018-07-06 03:45:27 +03:00
|
|
|
for prefix, label, tokens in [
|
|
|
|
("-", "diff.deleted", atokens),
|
|
|
|
("+", "diff.inserted", btokens),
|
|
|
|
]:
|
patch: rewrite the worddiff algorithm
Summary:
There were recent complains about both quality [1] [2] and performance [3]
of the current word diff algorithm.
The current algorithm is actually bad in various ways:
- Lines could be matched across hunks, which is confusing (report [1]).
- For short lines, they can fail "similarity" check, which means they
won't be highlighted when they are expected to be (report [2]).
- Various performance issues:
- Using difflib implemented by pure Python, which is both slow and
suboptimal comparing with xdiff.
- Searching for matched lines across hunks could be O(N^2) if there are
no match found.
Thinking it in a "highlight" way is actually tricky, consider the following
change:
```
# before
foo = 10
# after
if True:
foo = 21 + 3
```
It's obvious that "10" and "21 + 3" need highlighting because they are
different. But what about "if True:"? In theory it's also "different" and
need highlighting. How about purely inserted or deleted hunks then?
Highlighting all of them would be too noisy.
This diff rewrites the word diff algorithm. It differs in multiple ways:
1. Get rid of "matching lines by similarity" step.
2. Only diff words within a same hunk.
3. Dim unchanged words. Instead of highlighting changed words.
4. Treat pure insertion or deletion hunks differently - do not dim or
highlight words in them.
5. Use xdiff instead.
6. Use a better regexp to split words. This reduces the number of tokens sent
to the diff algorithm.
1, 2, 5, 6 help performance. 1, 2, 3, 4 make the result more predictable and
trustworthy. 3 avoids the nasty question about what to highlight. 3 and 4 makes
it more flexible for people to tweak colors. 6 makes the result better since it
merges multiple space tokens into one so xdiff will less likely miss important
matches (than meaningless matches like spaces).
"bold" and "underline" were removed so the changed words will have regular
red/green colors. The output won't be too "noisy" even in cases where code are
changed in a way that inline word matching is meaningless. For people who want
more contrast, they can set:
[color]
diff.inserted.changed = green bold
diff.deleted.changed = red bold
Practically, when diffing D7319718, the old code spends 4 seconds on finding
matched lines preparing for worddiff:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (17467 times) patch.py:2471
....
> 3927 \ _findmatches (22 times) patch.py:2537
348 \ __init__ (8158 times) difflib.py:154
340 | set_seqs (8158 times) difflib.py:223
328 | set_seq2 (8158 times) difflib.py:261
322 | __chain_b (8158 times) difflib.py:306
1818 \ ratio (8158 times) difflib.py:636
1777 | get_matching_blocks (8158 times) difflib.py:460
1605 \ find_longest_match (51966 times) difflib.py:350
38 | __new__ (51966 times) <string>:8
29 \ _make (36035 times) <string>:12
143 \ write (17466 times) ui.py:883
```
The new code takes 0.14 seconds:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (23401 times) patch.py:2562
....
> 140 \ consumehunkbuffer (23346 times) patch.py:2585
130 | diffsinglehunkinline (23240 times) patch.py:2496
215 \ write (23400 times) ui.py:883
118 \ flush cmdutil.py:1606
118 | write ui.py:883
```
[1]: https://fburl.com/lkb9rc9m
[2]: https://fburl.com/0r9bqf0e
[3]: https://fburl.com/pxqznw31
Reviewed By: ryanmce
Differential Revision: D7314726
fbshipit-source-id: becd979cb9ac3fd3f4adae11cb10804d535f58df
2018-03-19 14:28:32 +03:00
|
|
|
nextisnewline = True
|
|
|
|
for changed, token in tokens:
|
|
|
|
if nextisnewline:
|
|
|
|
yield (prefix, label)
|
|
|
|
nextisnewline = False
|
|
|
|
# special handling line end
|
2018-07-06 03:45:27 +03:00
|
|
|
isendofline = token.endswith("\n")
|
patch: rewrite the worddiff algorithm
Summary:
There were recent complains about both quality [1] [2] and performance [3]
of the current word diff algorithm.
The current algorithm is actually bad in various ways:
- Lines could be matched across hunks, which is confusing (report [1]).
- For short lines, they can fail "similarity" check, which means they
won't be highlighted when they are expected to be (report [2]).
- Various performance issues:
- Using difflib implemented by pure Python, which is both slow and
suboptimal comparing with xdiff.
- Searching for matched lines across hunks could be O(N^2) if there are
no match found.
Thinking it in a "highlight" way is actually tricky, consider the following
change:
```
# before
foo = 10
# after
if True:
foo = 21 + 3
```
It's obvious that "10" and "21 + 3" need highlighting because they are
different. But what about "if True:"? In theory it's also "different" and
need highlighting. How about purely inserted or deleted hunks then?
Highlighting all of them would be too noisy.
This diff rewrites the word diff algorithm. It differs in multiple ways:
1. Get rid of "matching lines by similarity" step.
2. Only diff words within a same hunk.
3. Dim unchanged words. Instead of highlighting changed words.
4. Treat pure insertion or deletion hunks differently - do not dim or
highlight words in them.
5. Use xdiff instead.
6. Use a better regexp to split words. This reduces the number of tokens sent
to the diff algorithm.
1, 2, 5, 6 help performance. 1, 2, 3, 4 make the result more predictable and
trustworthy. 3 avoids the nasty question about what to highlight. 3 and 4 makes
it more flexible for people to tweak colors. 6 makes the result better since it
merges multiple space tokens into one so xdiff will less likely miss important
matches (than meaningless matches like spaces).
"bold" and "underline" were removed so the changed words will have regular
red/green colors. The output won't be too "noisy" even in cases where code are
changed in a way that inline word matching is meaningless. For people who want
more contrast, they can set:
[color]
diff.inserted.changed = green bold
diff.deleted.changed = red bold
Practically, when diffing D7319718, the old code spends 4 seconds on finding
matched lines preparing for worddiff:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (17467 times) patch.py:2471
....
> 3927 \ _findmatches (22 times) patch.py:2537
348 \ __init__ (8158 times) difflib.py:154
340 | set_seqs (8158 times) difflib.py:223
328 | set_seq2 (8158 times) difflib.py:261
322 | __chain_b (8158 times) difflib.py:306
1818 \ ratio (8158 times) difflib.py:636
1777 | get_matching_blocks (8158 times) difflib.py:460
1605 \ find_longest_match (51966 times) difflib.py:350
38 | __new__ (51966 times) <string>:8
29 \ _make (36035 times) <string>:12
143 \ write (17466 times) ui.py:883
```
The new code takes 0.14 seconds:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (23401 times) patch.py:2562
....
> 140 \ consumehunkbuffer (23346 times) patch.py:2585
130 | diffsinglehunkinline (23240 times) patch.py:2496
215 \ write (23400 times) ui.py:883
118 \ flush cmdutil.py:1606
118 | write ui.py:883
```
[1]: https://fburl.com/lkb9rc9m
[2]: https://fburl.com/0r9bqf0e
[3]: https://fburl.com/pxqznw31
Reviewed By: ryanmce
Differential Revision: D7314726
fbshipit-source-id: becd979cb9ac3fd3f4adae11cb10804d535f58df
2018-03-19 14:28:32 +03:00
|
|
|
if isendofline:
|
2018-07-06 03:45:27 +03:00
|
|
|
chomp = token[:-1] # chomp
|
|
|
|
token = chomp.rstrip() # detect spaces at the end
|
|
|
|
endspaces = chomp[len(token) :]
|
patch: rewrite the worddiff algorithm
Summary:
There were recent complains about both quality [1] [2] and performance [3]
of the current word diff algorithm.
The current algorithm is actually bad in various ways:
- Lines could be matched across hunks, which is confusing (report [1]).
- For short lines, they can fail "similarity" check, which means they
won't be highlighted when they are expected to be (report [2]).
- Various performance issues:
- Using difflib implemented by pure Python, which is both slow and
suboptimal comparing with xdiff.
- Searching for matched lines across hunks could be O(N^2) if there are
no match found.
Thinking it in a "highlight" way is actually tricky, consider the following
change:
```
# before
foo = 10
# after
if True:
foo = 21 + 3
```
It's obvious that "10" and "21 + 3" need highlighting because they are
different. But what about "if True:"? In theory it's also "different" and
need highlighting. How about purely inserted or deleted hunks then?
Highlighting all of them would be too noisy.
This diff rewrites the word diff algorithm. It differs in multiple ways:
1. Get rid of "matching lines by similarity" step.
2. Only diff words within a same hunk.
3. Dim unchanged words. Instead of highlighting changed words.
4. Treat pure insertion or deletion hunks differently - do not dim or
highlight words in them.
5. Use xdiff instead.
6. Use a better regexp to split words. This reduces the number of tokens sent
to the diff algorithm.
1, 2, 5, 6 help performance. 1, 2, 3, 4 make the result more predictable and
trustworthy. 3 avoids the nasty question about what to highlight. 3 and 4 makes
it more flexible for people to tweak colors. 6 makes the result better since it
merges multiple space tokens into one so xdiff will less likely miss important
matches (than meaningless matches like spaces).
"bold" and "underline" were removed so the changed words will have regular
red/green colors. The output won't be too "noisy" even in cases where code are
changed in a way that inline word matching is meaningless. For people who want
more contrast, they can set:
[color]
diff.inserted.changed = green bold
diff.deleted.changed = red bold
Practically, when diffing D7319718, the old code spends 4 seconds on finding
matched lines preparing for worddiff:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (17467 times) patch.py:2471
....
> 3927 \ _findmatches (22 times) patch.py:2537
348 \ __init__ (8158 times) difflib.py:154
340 | set_seqs (8158 times) difflib.py:223
328 | set_seq2 (8158 times) difflib.py:261
322 | __chain_b (8158 times) difflib.py:306
1818 \ ratio (8158 times) difflib.py:636
1777 | get_matching_blocks (8158 times) difflib.py:460
1605 \ find_longest_match (51966 times) difflib.py:350
38 | __new__ (51966 times) <string>:8
29 \ _make (36035 times) <string>:12
143 \ write (17466 times) ui.py:883
```
The new code takes 0.14 seconds:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (23401 times) patch.py:2562
....
> 140 \ consumehunkbuffer (23346 times) patch.py:2585
130 | diffsinglehunkinline (23240 times) patch.py:2496
215 \ write (23400 times) ui.py:883
118 \ flush cmdutil.py:1606
118 | write ui.py:883
```
[1]: https://fburl.com/lkb9rc9m
[2]: https://fburl.com/0r9bqf0e
[3]: https://fburl.com/pxqznw31
Reviewed By: ryanmce
Differential Revision: D7314726
fbshipit-source-id: becd979cb9ac3fd3f4adae11cb10804d535f58df
2018-03-19 14:28:32 +03:00
|
|
|
# scan tabs
|
|
|
|
for maybetab in tabsplitter.findall(token):
|
2018-07-06 03:45:27 +03:00
|
|
|
if "\t" == maybetab[0]:
|
|
|
|
currentlabel = "diff.tab"
|
patch: rewrite the worddiff algorithm
Summary:
There were recent complains about both quality [1] [2] and performance [3]
of the current word diff algorithm.
The current algorithm is actually bad in various ways:
- Lines could be matched across hunks, which is confusing (report [1]).
- For short lines, they can fail "similarity" check, which means they
won't be highlighted when they are expected to be (report [2]).
- Various performance issues:
- Using difflib implemented by pure Python, which is both slow and
suboptimal comparing with xdiff.
- Searching for matched lines across hunks could be O(N^2) if there are
no match found.
Thinking it in a "highlight" way is actually tricky, consider the following
change:
```
# before
foo = 10
# after
if True:
foo = 21 + 3
```
It's obvious that "10" and "21 + 3" need highlighting because they are
different. But what about "if True:"? In theory it's also "different" and
need highlighting. How about purely inserted or deleted hunks then?
Highlighting all of them would be too noisy.
This diff rewrites the word diff algorithm. It differs in multiple ways:
1. Get rid of "matching lines by similarity" step.
2. Only diff words within a same hunk.
3. Dim unchanged words. Instead of highlighting changed words.
4. Treat pure insertion or deletion hunks differently - do not dim or
highlight words in them.
5. Use xdiff instead.
6. Use a better regexp to split words. This reduces the number of tokens sent
to the diff algorithm.
1, 2, 5, 6 help performance. 1, 2, 3, 4 make the result more predictable and
trustworthy. 3 avoids the nasty question about what to highlight. 3 and 4 makes
it more flexible for people to tweak colors. 6 makes the result better since it
merges multiple space tokens into one so xdiff will less likely miss important
matches (than meaningless matches like spaces).
"bold" and "underline" were removed so the changed words will have regular
red/green colors. The output won't be too "noisy" even in cases where code are
changed in a way that inline word matching is meaningless. For people who want
more contrast, they can set:
[color]
diff.inserted.changed = green bold
diff.deleted.changed = red bold
Practically, when diffing D7319718, the old code spends 4 seconds on finding
matched lines preparing for worddiff:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (17467 times) patch.py:2471
....
> 3927 \ _findmatches (22 times) patch.py:2537
348 \ __init__ (8158 times) difflib.py:154
340 | set_seqs (8158 times) difflib.py:223
328 | set_seq2 (8158 times) difflib.py:261
322 | __chain_b (8158 times) difflib.py:306
1818 \ ratio (8158 times) difflib.py:636
1777 | get_matching_blocks (8158 times) difflib.py:460
1605 \ find_longest_match (51966 times) difflib.py:350
38 | __new__ (51966 times) <string>:8
29 \ _make (36035 times) <string>:12
143 \ write (17466 times) ui.py:883
```
The new code takes 0.14 seconds:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (23401 times) patch.py:2562
....
> 140 \ consumehunkbuffer (23346 times) patch.py:2585
130 | diffsinglehunkinline (23240 times) patch.py:2496
215 \ write (23400 times) ui.py:883
118 \ flush cmdutil.py:1606
118 | write ui.py:883
```
[1]: https://fburl.com/lkb9rc9m
[2]: https://fburl.com/0r9bqf0e
[3]: https://fburl.com/pxqznw31
Reviewed By: ryanmce
Differential Revision: D7314726
fbshipit-source-id: becd979cb9ac3fd3f4adae11cb10804d535f58df
2018-03-19 14:28:32 +03:00
|
|
|
else:
|
|
|
|
if changed:
|
2018-07-06 03:45:27 +03:00
|
|
|
currentlabel = label + ".changed"
|
patch: rewrite the worddiff algorithm
Summary:
There were recent complains about both quality [1] [2] and performance [3]
of the current word diff algorithm.
The current algorithm is actually bad in various ways:
- Lines could be matched across hunks, which is confusing (report [1]).
- For short lines, they can fail "similarity" check, which means they
won't be highlighted when they are expected to be (report [2]).
- Various performance issues:
- Using difflib implemented by pure Python, which is both slow and
suboptimal comparing with xdiff.
- Searching for matched lines across hunks could be O(N^2) if there are
no match found.
Thinking it in a "highlight" way is actually tricky, consider the following
change:
```
# before
foo = 10
# after
if True:
foo = 21 + 3
```
It's obvious that "10" and "21 + 3" need highlighting because they are
different. But what about "if True:"? In theory it's also "different" and
need highlighting. How about purely inserted or deleted hunks then?
Highlighting all of them would be too noisy.
This diff rewrites the word diff algorithm. It differs in multiple ways:
1. Get rid of "matching lines by similarity" step.
2. Only diff words within a same hunk.
3. Dim unchanged words. Instead of highlighting changed words.
4. Treat pure insertion or deletion hunks differently - do not dim or
highlight words in them.
5. Use xdiff instead.
6. Use a better regexp to split words. This reduces the number of tokens sent
to the diff algorithm.
1, 2, 5, 6 help performance. 1, 2, 3, 4 make the result more predictable and
trustworthy. 3 avoids the nasty question about what to highlight. 3 and 4 makes
it more flexible for people to tweak colors. 6 makes the result better since it
merges multiple space tokens into one so xdiff will less likely miss important
matches (than meaningless matches like spaces).
"bold" and "underline" were removed so the changed words will have regular
red/green colors. The output won't be too "noisy" even in cases where code are
changed in a way that inline word matching is meaningless. For people who want
more contrast, they can set:
[color]
diff.inserted.changed = green bold
diff.deleted.changed = red bold
Practically, when diffing D7319718, the old code spends 4 seconds on finding
matched lines preparing for worddiff:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (17467 times) patch.py:2471
....
> 3927 \ _findmatches (22 times) patch.py:2537
348 \ __init__ (8158 times) difflib.py:154
340 | set_seqs (8158 times) difflib.py:223
328 | set_seq2 (8158 times) difflib.py:261
322 | __chain_b (8158 times) difflib.py:306
1818 \ ratio (8158 times) difflib.py:636
1777 | get_matching_blocks (8158 times) difflib.py:460
1605 \ find_longest_match (51966 times) difflib.py:350
38 | __new__ (51966 times) <string>:8
29 \ _make (36035 times) <string>:12
143 \ write (17466 times) ui.py:883
```
The new code takes 0.14 seconds:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (23401 times) patch.py:2562
....
> 140 \ consumehunkbuffer (23346 times) patch.py:2585
130 | diffsinglehunkinline (23240 times) patch.py:2496
215 \ write (23400 times) ui.py:883
118 \ flush cmdutil.py:1606
118 | write ui.py:883
```
[1]: https://fburl.com/lkb9rc9m
[2]: https://fburl.com/0r9bqf0e
[3]: https://fburl.com/pxqznw31
Reviewed By: ryanmce
Differential Revision: D7314726
fbshipit-source-id: becd979cb9ac3fd3f4adae11cb10804d535f58df
2018-03-19 14:28:32 +03:00
|
|
|
else:
|
2018-07-06 03:45:27 +03:00
|
|
|
currentlabel = label + ".unchanged"
|
patch: rewrite the worddiff algorithm
Summary:
There were recent complains about both quality [1] [2] and performance [3]
of the current word diff algorithm.
The current algorithm is actually bad in various ways:
- Lines could be matched across hunks, which is confusing (report [1]).
- For short lines, they can fail "similarity" check, which means they
won't be highlighted when they are expected to be (report [2]).
- Various performance issues:
- Using difflib implemented by pure Python, which is both slow and
suboptimal comparing with xdiff.
- Searching for matched lines across hunks could be O(N^2) if there are
no match found.
Thinking it in a "highlight" way is actually tricky, consider the following
change:
```
# before
foo = 10
# after
if True:
foo = 21 + 3
```
It's obvious that "10" and "21 + 3" need highlighting because they are
different. But what about "if True:"? In theory it's also "different" and
need highlighting. How about purely inserted or deleted hunks then?
Highlighting all of them would be too noisy.
This diff rewrites the word diff algorithm. It differs in multiple ways:
1. Get rid of "matching lines by similarity" step.
2. Only diff words within a same hunk.
3. Dim unchanged words. Instead of highlighting changed words.
4. Treat pure insertion or deletion hunks differently - do not dim or
highlight words in them.
5. Use xdiff instead.
6. Use a better regexp to split words. This reduces the number of tokens sent
to the diff algorithm.
1, 2, 5, 6 help performance. 1, 2, 3, 4 make the result more predictable and
trustworthy. 3 avoids the nasty question about what to highlight. 3 and 4 makes
it more flexible for people to tweak colors. 6 makes the result better since it
merges multiple space tokens into one so xdiff will less likely miss important
matches (than meaningless matches like spaces).
"bold" and "underline" were removed so the changed words will have regular
red/green colors. The output won't be too "noisy" even in cases where code are
changed in a way that inline word matching is meaningless. For people who want
more contrast, they can set:
[color]
diff.inserted.changed = green bold
diff.deleted.changed = red bold
Practically, when diffing D7319718, the old code spends 4 seconds on finding
matched lines preparing for worddiff:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (17467 times) patch.py:2471
....
> 3927 \ _findmatches (22 times) patch.py:2537
348 \ __init__ (8158 times) difflib.py:154
340 | set_seqs (8158 times) difflib.py:223
328 | set_seq2 (8158 times) difflib.py:261
322 | __chain_b (8158 times) difflib.py:306
1818 \ ratio (8158 times) difflib.py:636
1777 | get_matching_blocks (8158 times) difflib.py:460
1605 \ find_longest_match (51966 times) difflib.py:350
38 | __new__ (51966 times) <string>:8
29 \ _make (36035 times) <string>:12
143 \ write (17466 times) ui.py:883
```
The new code takes 0.14 seconds:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (23401 times) patch.py:2562
....
> 140 \ consumehunkbuffer (23346 times) patch.py:2585
130 | diffsinglehunkinline (23240 times) patch.py:2496
215 \ write (23400 times) ui.py:883
118 \ flush cmdutil.py:1606
118 | write ui.py:883
```
[1]: https://fburl.com/lkb9rc9m
[2]: https://fburl.com/0r9bqf0e
[3]: https://fburl.com/pxqznw31
Reviewed By: ryanmce
Differential Revision: D7314726
fbshipit-source-id: becd979cb9ac3fd3f4adae11cb10804d535f58df
2018-03-19 14:28:32 +03:00
|
|
|
yield (maybetab, currentlabel)
|
|
|
|
if isendofline:
|
|
|
|
if endspaces:
|
2018-07-06 03:45:27 +03:00
|
|
|
yield (endspaces, "diff.trailingwhitespace")
|
|
|
|
yield ("\n", "")
|
patch: rewrite the worddiff algorithm
Summary:
There were recent complains about both quality [1] [2] and performance [3]
of the current word diff algorithm.
The current algorithm is actually bad in various ways:
- Lines could be matched across hunks, which is confusing (report [1]).
- For short lines, they can fail "similarity" check, which means they
won't be highlighted when they are expected to be (report [2]).
- Various performance issues:
- Using difflib implemented by pure Python, which is both slow and
suboptimal comparing with xdiff.
- Searching for matched lines across hunks could be O(N^2) if there are
no match found.
Thinking it in a "highlight" way is actually tricky, consider the following
change:
```
# before
foo = 10
# after
if True:
foo = 21 + 3
```
It's obvious that "10" and "21 + 3" need highlighting because they are
different. But what about "if True:"? In theory it's also "different" and
need highlighting. How about purely inserted or deleted hunks then?
Highlighting all of them would be too noisy.
This diff rewrites the word diff algorithm. It differs in multiple ways:
1. Get rid of "matching lines by similarity" step.
2. Only diff words within a same hunk.
3. Dim unchanged words. Instead of highlighting changed words.
4. Treat pure insertion or deletion hunks differently - do not dim or
highlight words in them.
5. Use xdiff instead.
6. Use a better regexp to split words. This reduces the number of tokens sent
to the diff algorithm.
1, 2, 5, 6 help performance. 1, 2, 3, 4 make the result more predictable and
trustworthy. 3 avoids the nasty question about what to highlight. 3 and 4 makes
it more flexible for people to tweak colors. 6 makes the result better since it
merges multiple space tokens into one so xdiff will less likely miss important
matches (than meaningless matches like spaces).
"bold" and "underline" were removed so the changed words will have regular
red/green colors. The output won't be too "noisy" even in cases where code are
changed in a way that inline word matching is meaningless. For people who want
more contrast, they can set:
[color]
diff.inserted.changed = green bold
diff.deleted.changed = red bold
Practically, when diffing D7319718, the old code spends 4 seconds on finding
matched lines preparing for worddiff:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (17467 times) patch.py:2471
....
> 3927 \ _findmatches (22 times) patch.py:2537
348 \ __init__ (8158 times) difflib.py:154
340 | set_seqs (8158 times) difflib.py:223
328 | set_seq2 (8158 times) difflib.py:261
322 | __chain_b (8158 times) difflib.py:306
1818 \ ratio (8158 times) difflib.py:636
1777 | get_matching_blocks (8158 times) difflib.py:460
1605 \ find_longest_match (51966 times) difflib.py:350
38 | __new__ (51966 times) <string>:8
29 \ _make (36035 times) <string>:12
143 \ write (17466 times) ui.py:883
```
The new code takes 0.14 seconds:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (23401 times) patch.py:2562
....
> 140 \ consumehunkbuffer (23346 times) patch.py:2585
130 | diffsinglehunkinline (23240 times) patch.py:2496
215 \ write (23400 times) ui.py:883
118 \ flush cmdutil.py:1606
118 | write ui.py:883
```
[1]: https://fburl.com/lkb9rc9m
[2]: https://fburl.com/0r9bqf0e
[3]: https://fburl.com/pxqznw31
Reviewed By: ryanmce
Differential Revision: D7314726
fbshipit-source-id: becd979cb9ac3fd3f4adae11cb10804d535f58df
2018-03-19 14:28:32 +03:00
|
|
|
nextisnewline = True
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2010-04-03 00:22:06 +04:00
|
|
|
def difflabel(func, *args, **kw):
|
2018-07-06 03:45:27 +03:00
|
|
|
"""yields 2-tuples of (output, label) based on the output of func()"""
|
|
|
|
if kw.get(r"opts") and kw[r"opts"].worddiff:
|
patch: rewrite the worddiff algorithm
Summary:
There were recent complains about both quality [1] [2] and performance [3]
of the current word diff algorithm.
The current algorithm is actually bad in various ways:
- Lines could be matched across hunks, which is confusing (report [1]).
- For short lines, they can fail "similarity" check, which means they
won't be highlighted when they are expected to be (report [2]).
- Various performance issues:
- Using difflib implemented by pure Python, which is both slow and
suboptimal comparing with xdiff.
- Searching for matched lines across hunks could be O(N^2) if there are
no match found.
Thinking it in a "highlight" way is actually tricky, consider the following
change:
```
# before
foo = 10
# after
if True:
foo = 21 + 3
```
It's obvious that "10" and "21 + 3" need highlighting because they are
different. But what about "if True:"? In theory it's also "different" and
need highlighting. How about purely inserted or deleted hunks then?
Highlighting all of them would be too noisy.
This diff rewrites the word diff algorithm. It differs in multiple ways:
1. Get rid of "matching lines by similarity" step.
2. Only diff words within a same hunk.
3. Dim unchanged words. Instead of highlighting changed words.
4. Treat pure insertion or deletion hunks differently - do not dim or
highlight words in them.
5. Use xdiff instead.
6. Use a better regexp to split words. This reduces the number of tokens sent
to the diff algorithm.
1, 2, 5, 6 help performance. 1, 2, 3, 4 make the result more predictable and
trustworthy. 3 avoids the nasty question about what to highlight. 3 and 4 makes
it more flexible for people to tweak colors. 6 makes the result better since it
merges multiple space tokens into one so xdiff will less likely miss important
matches (than meaningless matches like spaces).
"bold" and "underline" were removed so the changed words will have regular
red/green colors. The output won't be too "noisy" even in cases where code are
changed in a way that inline word matching is meaningless. For people who want
more contrast, they can set:
[color]
diff.inserted.changed = green bold
diff.deleted.changed = red bold
Practically, when diffing D7319718, the old code spends 4 seconds on finding
matched lines preparing for worddiff:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (17467 times) patch.py:2471
....
> 3927 \ _findmatches (22 times) patch.py:2537
348 \ __init__ (8158 times) difflib.py:154
340 | set_seqs (8158 times) difflib.py:223
328 | set_seq2 (8158 times) difflib.py:261
322 | __chain_b (8158 times) difflib.py:306
1818 \ ratio (8158 times) difflib.py:636
1777 | get_matching_blocks (8158 times) difflib.py:460
1605 \ find_longest_match (51966 times) difflib.py:350
38 | __new__ (51966 times) <string>:8
29 \ _make (36035 times) <string>:12
143 \ write (17466 times) ui.py:883
```
The new code takes 0.14 seconds:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (23401 times) patch.py:2562
....
> 140 \ consumehunkbuffer (23346 times) patch.py:2585
130 | diffsinglehunkinline (23240 times) patch.py:2496
215 \ write (23400 times) ui.py:883
118 \ flush cmdutil.py:1606
118 | write ui.py:883
```
[1]: https://fburl.com/lkb9rc9m
[2]: https://fburl.com/0r9bqf0e
[3]: https://fburl.com/pxqznw31
Reviewed By: ryanmce
Differential Revision: D7314726
fbshipit-source-id: becd979cb9ac3fd3f4adae11cb10804d535f58df
2018-03-19 14:28:32 +03:00
|
|
|
dodiffhunk = diffsinglehunkinline
|
|
|
|
else:
|
|
|
|
dodiffhunk = diffsinglehunk
|
2018-07-06 03:45:27 +03:00
|
|
|
headprefixes = [
|
|
|
|
("diff", "diff.diffline"),
|
|
|
|
("copy", "diff.extended"),
|
|
|
|
("rename", "diff.extended"),
|
|
|
|
("old", "diff.extended"),
|
|
|
|
("new", "diff.extended"),
|
|
|
|
("deleted", "diff.extended"),
|
|
|
|
("index", "diff.extended"),
|
|
|
|
("similarity", "diff.extended"),
|
|
|
|
("---", "diff.file_a"),
|
|
|
|
("+++", "diff.file_b"),
|
|
|
|
]
|
|
|
|
textprefixes = [
|
|
|
|
("@", "diff.hunk"),
|
|
|
|
# - and + are handled by diffsinglehunk
|
|
|
|
]
|
2011-10-05 10:20:38 +04:00
|
|
|
head = False
|
2018-03-19 14:28:30 +03:00
|
|
|
|
|
|
|
# buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
|
|
|
|
hunkbuffer = []
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2018-03-19 14:28:30 +03:00
|
|
|
def consumehunkbuffer():
|
|
|
|
if hunkbuffer:
|
patch: rewrite the worddiff algorithm
Summary:
There were recent complains about both quality [1] [2] and performance [3]
of the current word diff algorithm.
The current algorithm is actually bad in various ways:
- Lines could be matched across hunks, which is confusing (report [1]).
- For short lines, they can fail "similarity" check, which means they
won't be highlighted when they are expected to be (report [2]).
- Various performance issues:
- Using difflib implemented by pure Python, which is both slow and
suboptimal comparing with xdiff.
- Searching for matched lines across hunks could be O(N^2) if there are
no match found.
Thinking it in a "highlight" way is actually tricky, consider the following
change:
```
# before
foo = 10
# after
if True:
foo = 21 + 3
```
It's obvious that "10" and "21 + 3" need highlighting because they are
different. But what about "if True:"? In theory it's also "different" and
need highlighting. How about purely inserted or deleted hunks then?
Highlighting all of them would be too noisy.
This diff rewrites the word diff algorithm. It differs in multiple ways:
1. Get rid of "matching lines by similarity" step.
2. Only diff words within a same hunk.
3. Dim unchanged words. Instead of highlighting changed words.
4. Treat pure insertion or deletion hunks differently - do not dim or
highlight words in them.
5. Use xdiff instead.
6. Use a better regexp to split words. This reduces the number of tokens sent
to the diff algorithm.
1, 2, 5, 6 help performance. 1, 2, 3, 4 make the result more predictable and
trustworthy. 3 avoids the nasty question about what to highlight. 3 and 4 makes
it more flexible for people to tweak colors. 6 makes the result better since it
merges multiple space tokens into one so xdiff will less likely miss important
matches (than meaningless matches like spaces).
"bold" and "underline" were removed so the changed words will have regular
red/green colors. The output won't be too "noisy" even in cases where code are
changed in a way that inline word matching is meaningless. For people who want
more contrast, they can set:
[color]
diff.inserted.changed = green bold
diff.deleted.changed = red bold
Practically, when diffing D7319718, the old code spends 4 seconds on finding
matched lines preparing for worddiff:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (17467 times) patch.py:2471
....
> 3927 \ _findmatches (22 times) patch.py:2537
348 \ __init__ (8158 times) difflib.py:154
340 | set_seqs (8158 times) difflib.py:223
328 | set_seq2 (8158 times) difflib.py:261
322 | __chain_b (8158 times) difflib.py:306
1818 \ ratio (8158 times) difflib.py:636
1777 | get_matching_blocks (8158 times) difflib.py:460
1605 \ find_longest_match (51966 times) difflib.py:350
38 | __new__ (51966 times) <string>:8
29 \ _make (36035 times) <string>:12
143 \ write (17466 times) ui.py:883
```
The new code takes 0.14 seconds:
```
| diffordiffstat cmdutil.py:1522
\ difflabel (23401 times) patch.py:2562
....
> 140 \ consumehunkbuffer (23346 times) patch.py:2585
130 | diffsinglehunkinline (23240 times) patch.py:2496
215 \ write (23400 times) ui.py:883
118 \ flush cmdutil.py:1606
118 | write ui.py:883
```
[1]: https://fburl.com/lkb9rc9m
[2]: https://fburl.com/0r9bqf0e
[3]: https://fburl.com/pxqznw31
Reviewed By: ryanmce
Differential Revision: D7314726
fbshipit-source-id: becd979cb9ac3fd3f4adae11cb10804d535f58df
2018-03-19 14:28:32 +03:00
|
|
|
for token in dodiffhunk(hunkbuffer):
|
2018-03-19 14:28:30 +03:00
|
|
|
yield token
|
|
|
|
hunkbuffer[:] = []
|
|
|
|
|
2010-04-03 00:22:06 +04:00
|
|
|
for chunk in func(*args, **kw):
|
2018-07-06 03:45:27 +03:00
|
|
|
lines = chunk.split("\n")
|
2018-03-19 14:28:29 +03:00
|
|
|
linecount = len(lines)
|
2010-04-03 00:22:06 +04:00
|
|
|
for i, line in enumerate(lines):
|
2011-10-05 10:20:38 +04:00
|
|
|
if head:
|
2018-07-06 03:45:27 +03:00
|
|
|
if line.startswith("@"):
|
2011-10-05 10:20:38 +04:00
|
|
|
head = False
|
|
|
|
else:
|
2018-07-06 03:45:27 +03:00
|
|
|
if line and line[0] not in " +-@\\":
|
2011-10-05 10:20:38 +04:00
|
|
|
head = True
|
2014-08-20 23:15:50 +04:00
|
|
|
diffline = False
|
2018-07-06 03:45:27 +03:00
|
|
|
if not head and line and line[0] in "+-":
|
2014-08-20 23:15:50 +04:00
|
|
|
diffline = True
|
|
|
|
|
2011-10-05 10:20:38 +04:00
|
|
|
prefixes = textprefixes
|
|
|
|
if head:
|
|
|
|
prefixes = headprefixes
|
2018-03-19 14:28:30 +03:00
|
|
|
if diffline:
|
|
|
|
# buffered
|
|
|
|
bufferedline = line
|
|
|
|
if i + 1 < linecount:
|
|
|
|
bufferedline += "\n"
|
|
|
|
hunkbuffer.append(bufferedline)
|
2010-04-03 00:22:06 +04:00
|
|
|
else:
|
2018-03-19 14:28:30 +03:00
|
|
|
# unbuffered
|
|
|
|
for token in consumehunkbuffer():
|
|
|
|
yield token
|
|
|
|
stripline = line.rstrip()
|
|
|
|
for prefix, label in prefixes:
|
|
|
|
if stripline.startswith(prefix):
|
|
|
|
yield (stripline, label)
|
|
|
|
if line != stripline:
|
2018-07-06 03:45:27 +03:00
|
|
|
yield (line[len(stripline) :], "diff.trailingwhitespace")
|
2017-10-25 18:13:38 +03:00
|
|
|
break
|
2018-03-19 14:28:30 +03:00
|
|
|
else:
|
2018-07-06 03:45:27 +03:00
|
|
|
yield (line, "")
|
2018-03-19 14:28:30 +03:00
|
|
|
if i + 1 < linecount:
|
2018-07-06 03:45:27 +03:00
|
|
|
yield ("\n", "")
|
2018-03-19 14:28:30 +03:00
|
|
|
for token in consumehunkbuffer():
|
|
|
|
yield token
|
2017-10-25 18:13:38 +03:00
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2010-04-03 00:22:06 +04:00
|
|
|
def diffui(*args, **kw):
|
2018-07-06 03:45:27 +03:00
|
|
|
"""like diff(), but yields 2-tuples of (output, label) for ui.write()"""
|
2010-04-03 00:22:06 +04:00
|
|
|
return difflabel(diff, *args, **kw)
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2016-01-14 21:02:34 +03:00
|
|
|
def _filepairs(modified, added, removed, copy, opts):
|
2018-07-06 03:45:27 +03:00
|
|
|
"""generates tuples (f1, f2, copyop), where f1 is the name of the file
|
2015-01-23 10:29:00 +03:00
|
|
|
before and f2 is the the name after. For added files, f1 will be None,
|
|
|
|
and for removed files, f2 will be None. copyop may be set to None, 'copy'
|
2018-07-06 03:45:27 +03:00
|
|
|
or 'rename' (the latter two only if opts.git is set)."""
|
2009-05-17 05:28:49 +04:00
|
|
|
gone = set()
|
2008-06-26 22:46:34 +04:00
|
|
|
|
2010-02-11 16:22:57 +03:00
|
|
|
copyto = dict([(v, k) for k, v in copy.items()])
|
|
|
|
|
2015-01-30 08:12:35 +03:00
|
|
|
addedset, removedset = set(added), set(removed)
|
2015-01-23 10:29:00 +03:00
|
|
|
|
2009-04-27 01:50:44 +04:00
|
|
|
for f in sorted(modified + added + removed):
|
2015-01-17 02:09:21 +03:00
|
|
|
copyop = None
|
trydiff: make variable names more consistent
Use '1' and '2' as suffix for names just like in the parameters
'ctx[12]':
to,tn -> content1,content2
a,b -> f1, f2
omode,mode -> mode1,mode2
omode,nmode -> mode1,mode2
onode,nnode -> node1,node2
oflag,nflag -> flag1,flag2
oindex,nindex -> index1,index2
2015-01-16 21:57:13 +03:00
|
|
|
f1, f2 = f, f
|
2015-01-23 10:13:48 +03:00
|
|
|
if f in addedset:
|
|
|
|
f1 = None
|
|
|
|
if f in copy:
|
2010-01-01 22:54:05 +03:00
|
|
|
if opts.git:
|
2015-01-23 10:13:48 +03:00
|
|
|
f1 = copy[f]
|
|
|
|
if f1 in removedset and f1 not in gone:
|
2018-07-06 03:45:27 +03:00
|
|
|
copyop = "rename"
|
2015-01-23 10:13:48 +03:00
|
|
|
gone.add(f1)
|
|
|
|
else:
|
2018-07-06 03:45:27 +03:00
|
|
|
copyop = "copy"
|
2015-01-23 10:13:48 +03:00
|
|
|
elif f in removedset:
|
|
|
|
f2 = None
|
|
|
|
if opts.git:
|
|
|
|
# have we already reported a copy above?
|
2018-07-06 03:45:27 +03:00
|
|
|
if f in copyto and copyto[f] in addedset and copy[copyto[f]] == f:
|
2015-01-23 10:13:48 +03:00
|
|
|
continue
|
2015-01-23 10:29:00 +03:00
|
|
|
yield f1, f2, copyop
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
|
|
|
def trydiff(
|
|
|
|
repo,
|
|
|
|
revs,
|
|
|
|
ctx1,
|
|
|
|
ctx2,
|
|
|
|
modified,
|
|
|
|
added,
|
|
|
|
removed,
|
|
|
|
copy,
|
|
|
|
getfilectx,
|
|
|
|
opts,
|
|
|
|
losedatafn,
|
|
|
|
prefix,
|
|
|
|
relroot,
|
|
|
|
):
|
|
|
|
"""given input data, generate a diff and yield it in blocks
|
2015-03-17 23:06:15 +03:00
|
|
|
|
|
|
|
If generating a diff would lose data like flags or binary data and
|
|
|
|
losedatafn is not None, it will be called.
|
|
|
|
|
2015-03-17 22:59:41 +03:00
|
|
|
relroot is removed and prefix is added to every path in the diff output.
|
|
|
|
|
|
|
|
If relroot is not empty, this function expects every path in modified,
|
2018-07-06 03:45:27 +03:00
|
|
|
added, removed and copy to start with it."""
|
2015-01-23 10:29:00 +03:00
|
|
|
|
|
|
|
def gitindex(text):
|
|
|
|
if not text:
|
|
|
|
text = ""
|
|
|
|
l = len(text)
|
2018-07-06 03:45:27 +03:00
|
|
|
s = hashlib.sha1("blob %d\0" % l)
|
2015-01-23 10:29:00 +03:00
|
|
|
s.update(text)
|
|
|
|
return s.hexdigest()
|
|
|
|
|
|
|
|
if opts.noprefix:
|
2018-07-06 03:45:27 +03:00
|
|
|
aprefix = bprefix = ""
|
2015-01-23 10:29:00 +03:00
|
|
|
else:
|
2018-07-06 03:45:27 +03:00
|
|
|
aprefix = "a/"
|
|
|
|
bprefix = "b/"
|
2015-01-23 10:29:00 +03:00
|
|
|
|
|
|
|
def diffline(f, revs):
|
2018-07-06 03:45:27 +03:00
|
|
|
revinfo = " ".join(["-r %s" % rev for rev in revs])
|
|
|
|
return "diff %s %s" % (revinfo, f)
|
2015-01-23 10:29:00 +03:00
|
|
|
|
2017-05-04 08:20:44 +03:00
|
|
|
def isempty(fctx):
|
|
|
|
return fctx is None or fctx.size() == 0
|
|
|
|
|
2015-01-23 10:29:00 +03:00
|
|
|
date1 = util.datestr(ctx1.date())
|
|
|
|
date2 = util.datestr(ctx2.date())
|
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
gitmode = {"l": "120000", "x": "100755", "": "100644"}
|
2015-01-23 09:22:09 +03:00
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
if relroot != "" and (
|
|
|
|
repo.ui.configbool("devel", "all-warnings")
|
|
|
|
or repo.ui.configbool("devel", "check-relroot")
|
|
|
|
):
|
2017-07-24 21:42:55 +03:00
|
|
|
for f in modified + added + removed + list(copy) + list(copy.values()):
|
2015-03-17 22:59:41 +03:00
|
|
|
if f is not None and not f.startswith(relroot):
|
|
|
|
raise AssertionError(
|
2018-07-06 03:45:27 +03:00
|
|
|
"file %s doesn't start with relroot %s" % (f, relroot)
|
|
|
|
)
|
2015-03-17 22:59:41 +03:00
|
|
|
|
2016-01-14 21:02:34 +03:00
|
|
|
for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
|
2015-01-23 10:18:43 +03:00
|
|
|
content1 = None
|
|
|
|
content2 = None
|
2017-05-04 08:16:54 +03:00
|
|
|
fctx1 = None
|
|
|
|
fctx2 = None
|
2015-02-07 03:09:43 +03:00
|
|
|
flag1 = None
|
|
|
|
flag2 = None
|
2015-01-23 10:18:43 +03:00
|
|
|
if f1:
|
2017-05-04 08:16:54 +03:00
|
|
|
fctx1 = getfilectx(f1, ctx1)
|
2015-02-07 03:09:43 +03:00
|
|
|
if opts.git or losedatafn:
|
|
|
|
flag1 = ctx1.flags(f1)
|
2015-01-23 10:18:43 +03:00
|
|
|
if f2:
|
2017-05-04 08:16:54 +03:00
|
|
|
fctx2 = getfilectx(f2, ctx2)
|
2015-02-07 03:09:43 +03:00
|
|
|
if opts.git or losedatafn:
|
|
|
|
flag2 = ctx2.flags(f2)
|
2017-05-06 03:20:32 +03:00
|
|
|
# if binary is True, output "summary" or "base85", but not "text diff"
|
2018-02-12 17:09:25 +03:00
|
|
|
if opts.text:
|
|
|
|
check_binary = True
|
|
|
|
binary = False
|
|
|
|
else:
|
2018-07-06 03:45:27 +03:00
|
|
|
check_binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
|
2018-02-12 17:09:25 +03:00
|
|
|
binary = check_binary
|
2015-01-23 08:35:57 +03:00
|
|
|
|
|
|
|
if losedatafn and not opts.git:
|
2018-07-06 03:45:27 +03:00
|
|
|
if (
|
|
|
|
binary
|
|
|
|
or
|
2015-01-23 08:35:57 +03:00
|
|
|
# copy/rename
|
2018-07-06 03:45:27 +03:00
|
|
|
f2 in copy
|
|
|
|
or
|
2015-01-23 08:35:57 +03:00
|
|
|
# empty file creation
|
2018-07-06 03:45:27 +03:00
|
|
|
(not f1 and isempty(fctx2))
|
|
|
|
or
|
2015-01-23 08:35:57 +03:00
|
|
|
# empty file deletion
|
2018-07-06 03:45:27 +03:00
|
|
|
(isempty(fctx1) and not f2)
|
|
|
|
or
|
2015-01-23 08:35:57 +03:00
|
|
|
# create with flags
|
2018-07-06 03:45:27 +03:00
|
|
|
(not f1 and flag2)
|
|
|
|
or
|
2015-01-23 08:35:57 +03:00
|
|
|
# change flags
|
2018-07-06 03:45:27 +03:00
|
|
|
(f1 and f2 and flag1 != flag2)
|
|
|
|
):
|
2015-01-23 10:29:00 +03:00
|
|
|
losedatafn(f2 or f1)
|
2010-01-01 22:54:05 +03:00
|
|
|
|
2015-03-17 22:59:41 +03:00
|
|
|
path1 = f1 or f2
|
|
|
|
path2 = f2 or f1
|
2018-07-06 03:45:27 +03:00
|
|
|
path1 = posixpath.join(prefix, path1[len(relroot) :])
|
|
|
|
path2 = posixpath.join(prefix, path2[len(relroot) :])
|
2015-01-23 04:00:54 +03:00
|
|
|
header = []
|
2015-01-17 02:19:57 +03:00
|
|
|
if opts.git:
|
2018-07-06 03:45:27 +03:00
|
|
|
header.append("diff --git %s%s %s%s" % (aprefix, path1, bprefix, path2))
|
|
|
|
if not f1: # added
|
|
|
|
header.append("new file mode %s" % gitmode[flag2])
|
|
|
|
elif not f2: # removed
|
|
|
|
header.append("deleted file mode %s" % gitmode[flag1])
|
2015-01-23 04:00:54 +03:00
|
|
|
else: # modified/copied/renamed
|
2015-01-17 01:53:37 +03:00
|
|
|
mode1, mode2 = gitmode[flag1], gitmode[flag2]
|
|
|
|
if mode1 != mode2:
|
2018-07-06 03:45:27 +03:00
|
|
|
header.append("old mode %s" % mode1)
|
|
|
|
header.append("new mode %s" % mode2)
|
2015-01-17 02:09:21 +03:00
|
|
|
if copyop is not None:
|
2017-01-09 22:24:18 +03:00
|
|
|
if opts.showsimilarity:
|
|
|
|
sim = similar.score(ctx1[path1], ctx2[path2]) * 100
|
2018-07-06 03:45:27 +03:00
|
|
|
header.append("similarity index %d%%" % sim)
|
|
|
|
header.append("%s from %s" % (copyop, path1))
|
|
|
|
header.append("%s to %s" % (copyop, path2))
|
2015-01-17 02:27:04 +03:00
|
|
|
elif revs and not repo.ui.quiet:
|
2015-01-17 02:19:57 +03:00
|
|
|
header.append(diffline(path1, revs))
|
2015-01-23 04:00:54 +03:00
|
|
|
|
2017-05-06 02:48:58 +03:00
|
|
|
# fctx.is | diffopts | what to | is fctx.data()
|
|
|
|
# binary() | text nobinary git index | output? | outputted?
|
|
|
|
# ------------------------------------|----------------------------
|
|
|
|
# yes | no no no * | summary | no
|
|
|
|
# yes | no no yes * | base85 | yes
|
|
|
|
# yes | no yes no * | summary | no
|
|
|
|
# yes | no yes yes 0 | summary | no
|
|
|
|
# yes | no yes yes >0 | summary | semi [1]
|
|
|
|
# yes | yes * * * | text diff | yes
|
|
|
|
# no | * * * * | text diff | yes
|
|
|
|
# [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
|
2018-07-06 03:45:27 +03:00
|
|
|
if binary and (not opts.git or (opts.git and opts.nobinary and not opts.index)):
|
2017-05-04 09:50:41 +03:00
|
|
|
# fast path: no binary content will be displayed, content1 and
|
|
|
|
# content2 are only used for equivalent test. cmp() could have a
|
|
|
|
# fast path.
|
|
|
|
if fctx1 is not None:
|
2018-07-06 03:45:27 +03:00
|
|
|
content1 = b"\0"
|
2017-05-04 09:50:41 +03:00
|
|
|
if fctx2 is not None:
|
|
|
|
if fctx1 is not None and not fctx1.cmp(fctx2):
|
2018-07-06 03:45:27 +03:00
|
|
|
content2 = b"\0" # not different
|
2017-05-04 09:50:41 +03:00
|
|
|
else:
|
2018-07-06 03:45:27 +03:00
|
|
|
content2 = b"\0\0"
|
2017-05-04 09:50:41 +03:00
|
|
|
else:
|
|
|
|
# normal path: load contents
|
|
|
|
if fctx1 is not None:
|
|
|
|
content1 = fctx1.data()
|
|
|
|
if fctx2 is not None:
|
|
|
|
content2 = fctx2.data()
|
|
|
|
|
2017-05-06 03:20:32 +03:00
|
|
|
if binary and opts.git and not opts.nobinary:
|
trydiff: make variable names more consistent
Use '1' and '2' as suffix for names just like in the parameters
'ctx[12]':
to,tn -> content1,content2
a,b -> f1, f2
omode,mode -> mode1,mode2
omode,nmode -> mode1,mode2
onode,nnode -> node1,node2
oflag,nflag -> flag1,flag2
oindex,nindex -> index1,index2
2015-01-16 21:57:13 +03:00
|
|
|
text = mdiff.b85diff(content1, content2)
|
2015-01-23 08:03:57 +03:00
|
|
|
if text:
|
2018-07-06 03:45:27 +03:00
|
|
|
header.append("index %s..%s" % (gitindex(content1), gitindex(content2)))
|
|
|
|
hunks = ((None, [text]),)
|
2015-01-07 21:59:40 +03:00
|
|
|
else:
|
2017-01-01 00:41:57 +03:00
|
|
|
if opts.git and opts.index > 0:
|
|
|
|
flag = flag1
|
|
|
|
if flag is None:
|
|
|
|
flag = flag2
|
2018-07-06 03:45:27 +03:00
|
|
|
header.append(
|
|
|
|
"index %s..%s %s"
|
|
|
|
% (
|
|
|
|
gitindex(content1)[0 : opts.index],
|
|
|
|
gitindex(content2)[0 : opts.index],
|
|
|
|
gitmode[flag],
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
uheaders, hunks = mdiff.unidiff(
|
|
|
|
content1,
|
|
|
|
date1,
|
|
|
|
content2,
|
|
|
|
date2,
|
|
|
|
path1,
|
|
|
|
path2,
|
|
|
|
opts=opts,
|
|
|
|
check_binary=check_binary,
|
|
|
|
)
|
2017-03-03 15:51:22 +03:00
|
|
|
header.extend(uheaders)
|
2017-10-05 22:20:08 +03:00
|
|
|
yield fctx1, fctx2, header, hunks
|
2006-08-13 03:13:27 +04:00
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2011-05-22 00:06:36 +04:00
|
|
|
def diffstatsum(stats):
|
2011-05-26 18:51:02 +04:00
|
|
|
maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
|
2011-05-22 00:06:36 +04:00
|
|
|
for f, a, r, b in stats:
|
|
|
|
maxfile = max(maxfile, encoding.colwidth(f))
|
2011-05-26 18:51:02 +04:00
|
|
|
maxtotal = max(maxtotal, a + r)
|
2011-05-22 00:06:36 +04:00
|
|
|
addtotal += a
|
|
|
|
removetotal += r
|
|
|
|
binary = binary or b
|
|
|
|
|
2011-05-26 18:51:02 +04:00
|
|
|
return maxfile, maxtotal, addtotal, removetotal, binary
|
2011-05-22 00:06:36 +04:00
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2008-12-25 11:48:24 +03:00
|
|
|
def diffstatdata(lines):
|
2018-07-06 03:45:27 +03:00
|
|
|
diffre = re.compile("^diff .*-r [a-z0-9]+\s(.*)$")
|
diffstat: fix parsing of filenames with spaces
The patch changes the output of "hg diff --stat" when one file whose filename
has spaces has changed, making it get the full filename instead of just the
substring between the last space and the end of the filename.
It also changes the diffstat generated by "hg email -d" when one of the commit
messages starts with "diff". Because of the regex used to parse the filename,
the diffstat generated by "hg email -d" will still be not correct if a commit
message starts with "diff -r ".
Before the patch Mercurial has the following behavior:
$ echo "foobar">"file with spaces"
$ hg add "file with spaces"
$ hg diff --stat
spaces | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
$ hg diff --git --stat
file with spaces | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
After the patch:
$ echo "foobar">"file with spaces"
$ hg add "file with spaces"
$ hg diff --stat
file with spaces | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
$ hg diff --git --stat
file with spaces | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
Before the patch:
$ hg add mercurial/patch.py tests/tests-diffstat.t
$ hg commit -m "diffstat: fix parsing of filenames"
$ hg email -d --test tip
This patch series consists of 1 patches.
diffstat: fix parsing of filenames
[...]
filenames | 0
mercurial/patch.py | 6 ++++--
tests/test-diffstat.t | 17 +++++++++++++++++
3 files changed, 21 insertions(+), 2 deletions(-)
[...]
After the patch:
$ hg email -d --test tip
This patch series consists of 1 patches.
diffstat: fix parsing of filenames
[...]
mercurial/patch.py | 6 ++++--
tests/test-diffstat.t | 17 +++++++++++++++++
3 files changed, 21 insertions(+), 2 deletions(-)
[...]
2011-02-04 22:32:14 +03:00
|
|
|
|
2011-05-22 00:01:28 +04:00
|
|
|
results = []
|
2011-10-24 15:41:19 +04:00
|
|
|
filename, adds, removes, isbinary = None, 0, 0, False
|
2011-05-22 00:01:28 +04:00
|
|
|
|
|
|
|
def addresult():
|
|
|
|
if filename:
|
|
|
|
results.append((filename, adds, removes, isbinary))
|
|
|
|
|
2017-05-18 04:51:17 +03:00
|
|
|
# inheader is used to track if a line is in the
|
|
|
|
# header portion of the diff. This helps properly account
|
|
|
|
# for lines that start with '--' or '++'
|
|
|
|
inheader = False
|
|
|
|
|
2008-12-25 11:48:24 +03:00
|
|
|
for line in lines:
|
2018-07-06 03:45:27 +03:00
|
|
|
if line.startswith("diff"):
|
2011-05-22 00:01:28 +04:00
|
|
|
addresult()
|
2017-05-18 04:51:17 +03:00
|
|
|
# starting a new file diff
|
|
|
|
# set numbers to 0 and reset inheader
|
|
|
|
inheader = True
|
2011-10-24 15:41:19 +04:00
|
|
|
adds, removes, isbinary = 0, 0, False
|
2018-07-06 03:45:27 +03:00
|
|
|
if line.startswith("diff --git a/"):
|
2014-04-12 00:38:55 +04:00
|
|
|
filename = gitre.search(line).group(2)
|
2018-07-06 03:45:27 +03:00
|
|
|
elif line.startswith("diff -r"):
|
2009-06-09 17:25:17 +04:00
|
|
|
# format: "diff -r ... -r ... filename"
|
diffstat: fix parsing of filenames with spaces
The patch changes the output of "hg diff --stat" when one file whose filename
has spaces has changed, making it get the full filename instead of just the
substring between the last space and the end of the filename.
It also changes the diffstat generated by "hg email -d" when one of the commit
messages starts with "diff". Because of the regex used to parse the filename,
the diffstat generated by "hg email -d" will still be not correct if a commit
message starts with "diff -r ".
Before the patch Mercurial has the following behavior:
$ echo "foobar">"file with spaces"
$ hg add "file with spaces"
$ hg diff --stat
spaces | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
$ hg diff --git --stat
file with spaces | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
After the patch:
$ echo "foobar">"file with spaces"
$ hg add "file with spaces"
$ hg diff --stat
file with spaces | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
$ hg diff --git --stat
file with spaces | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
Before the patch:
$ hg add mercurial/patch.py tests/tests-diffstat.t
$ hg commit -m "diffstat: fix parsing of filenames"
$ hg email -d --test tip
This patch series consists of 1 patches.
diffstat: fix parsing of filenames
[...]
filenames | 0
mercurial/patch.py | 6 ++++--
tests/test-diffstat.t | 17 +++++++++++++++++
3 files changed, 21 insertions(+), 2 deletions(-)
[...]
After the patch:
$ hg email -d --test tip
This patch series consists of 1 patches.
diffstat: fix parsing of filenames
[...]
mercurial/patch.py | 6 ++++--
tests/test-diffstat.t | 17 +++++++++++++++++
3 files changed, 21 insertions(+), 2 deletions(-)
[...]
2011-02-04 22:32:14 +03:00
|
|
|
filename = diffre.search(line).group(1)
|
2018-07-06 03:45:27 +03:00
|
|
|
elif line.startswith("@@"):
|
2017-05-18 04:51:17 +03:00
|
|
|
inheader = False
|
2018-07-06 03:45:27 +03:00
|
|
|
elif line.startswith("+") and not inheader:
|
2008-12-25 11:48:24 +03:00
|
|
|
adds += 1
|
2018-07-06 03:45:27 +03:00
|
|
|
elif line.startswith("-") and not inheader:
|
2008-12-25 11:48:24 +03:00
|
|
|
removes += 1
|
2018-07-06 03:45:27 +03:00
|
|
|
elif line.startswith("GIT binary patch") or line.startswith("Binary file"):
|
2011-10-24 15:41:19 +04:00
|
|
|
isbinary = True
|
2011-05-22 00:01:28 +04:00
|
|
|
addresult()
|
|
|
|
return results
|
2008-12-25 11:48:24 +03:00
|
|
|
|
2018-07-06 03:45:27 +03:00
|
|
|
|
2018-05-25 10:38:13 +03:00
|
|
|
def diffstat(lines, width=80, status=None):
|
|
|
|
"""If status is not None, it's a tuple (modified, added, removed) and
|
|
|
|
"changed", "added", "removed" will be shown before file names.
|
|
|
|
"""
|
2008-12-25 11:48:24 +03:00
|
|
|
output = []
|
2011-05-22 00:06:38 +04:00
|
|
|
stats = diffstatdata(lines)
|
2011-05-26 18:51:02 +04:00
|
|
|
maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
|
2008-12-25 11:48:24 +03:00
|
|
|
|
|
|
|
countwidth = len(str(maxtotal))
|
2009-10-25 03:53:33 +03:00
|
|
|
if hasbinary and countwidth < 3:
|
|
|
|
countwidth = 3
|
2009-08-11 00:59:29 +04:00
|
|
|
graphwidth = width - countwidth - maxname - 6
|
2008-12-25 11:48:24 +03:00
|
|
|
if graphwidth < 10:
|
|
|
|
graphwidth = 10
|
|
|
|
|
2009-08-11 00:59:29 +04:00
|
|
|
def scale(i):
|
|
|
|
if maxtotal <= graphwidth:
|
|
|
|
return i
|
|
|
|
# If diffstat runs out of room it doesn't print anything,
|
|
|
|
# which isn't very useful, so always print at least one + or -
|
|
|
|
# if there were at least some changes.
|
|
|
|
return max(i * graphwidth // maxtotal, int(bool(i)))
|
2008-12-25 11:48:24 +03:00
|
|
|
|
2011-05-22 00:06:38 +04:00
|
|
|
for filename, adds, removes, isbinary in stats:
|
2011-10-24 15:41:19 +04:00
|
|
|
if isbinary:
|
2018-07-06 03:45:27 +03:00
|
|
|
count = "Bin"
|
2009-10-25 03:53:33 +03:00
|
|
|
else:
|
2018-07-06 03:45:27 +03:00
|
|
|
count = "%d" % (adds + removes)
|
|
|
|
pluses = "+" * scale(adds)
|
|
|
|
minuses = "-" * scale(removes)
|
2018-05-25 10:38:13 +03:00
|
|
|
if status:
|
|
|
|
if filename in status[0]:
|
2018-07-06 03:45:27 +03:00
|
|
|
prefix = "changed "
|
2018-05-25 10:38:13 +03:00
|
|
|
elif filename in status[1]:
|
2018-07-06 03:45:27 +03:00
|
|
|
prefix = "added "
|
2018-05-25 10:38:13 +03:00
|
|
|
elif filename in status[2]:
|
2018-07-06 03:45:27 +03:00
|
|
|
prefix = "removed "
|
2018-05-25 10:38:13 +03:00
|
|
|
else:
|
2018-07-06 03:45:27 +03:00
|
|
|
prefix = " "
|
|
|
|
output.append(
|
|
|
|
"%s%s%s | %*s %s%s\n"
|
|
|
|
% (
|
|
|
|
prefix,
|
|
|
|
filename,
|
|
|
|
" " * (maxname - encoding.colwidth(filename)),
|
|
|
|
countwidth,
|
|
|
|
count,
|
|
|
|
pluses,
|
|
|
|
minuses,
|
|
|
|
)
|
|
|
|
)
|
2008-12-25 11:48:24 +03:00
|
|
|
|
|
|
|
if stats:
|
2018-07-06 03:45:27 +03:00
|
|
|
output.append(
|
|
|
|
_(" %d files changed, %d insertions(+), " "%d deletions(-)\n")
|
|
|
|
% (len(stats), totaladds, totalremoves)
|
|
|
|
)
|
|
|
|
|
|
|
|
return "".join(output)
|
2008-12-25 11:48:24 +03:00
|
|
|
|
2010-04-03 00:22:06 +04:00
|
|
|
|
|
|
|
def diffstatui(*args, **kw):
|
2018-07-06 03:45:27 +03:00
|
|
|
"""like diffstat(), but yields 2-tuples of (output, label) for
|
2010-04-03 00:22:06 +04:00
|
|
|
ui.write()
|
2018-07-06 03:45:27 +03:00
|
|
|
"""
|
2010-04-03 00:22:06 +04:00
|
|
|
|
|
|
|
for line in diffstat(*args, **kw).splitlines():
|
2018-07-06 03:45:27 +03:00
|
|
|
if line and line[-1] in "+-":
|
|
|
|
name, graph = line.rsplit(" ", 1)
|
|
|
|
yield (name + " ", "")
|
|
|
|
m = re.search(br"\++", graph)
|
2010-04-03 00:22:06 +04:00
|
|
|
if m:
|
2018-07-06 03:45:27 +03:00
|
|
|
yield (m.group(0), "diffstat.inserted")
|
|
|
|
m = re.search(br"-+", graph)
|
2010-04-03 00:22:06 +04:00
|
|
|
if m:
|
2018-07-06 03:45:27 +03:00
|
|
|
yield (m.group(0), "diffstat.deleted")
|
2010-04-03 00:22:06 +04:00
|
|
|
else:
|
2018-07-06 03:45:27 +03:00
|
|
|
yield (line, "")
|
|
|
|
yield ("\n", "")
|