2006-08-12 02:50:16 +04:00
|
|
|
# patch.py - patch file parsing routines
|
|
|
|
#
|
2006-08-12 23:47:18 +04:00
|
|
|
# Copyright 2006 Brendan Cully <brendan@kublai.com>
|
2007-07-17 20:39:30 +04:00
|
|
|
# Copyright 2007 Chris Mason <chris.mason@oracle.com>
|
2006-08-12 23:47:18 +04:00
|
|
|
#
|
2009-04-26 03:08:54 +04:00
|
|
|
# This software may be used and distributed according to the terms of the
|
2010-01-20 07:20:08 +03:00
|
|
|
# GNU General Public License version 2 or any later version.
|
2006-08-12 02:50:16 +04:00
|
|
|
|
2010-12-10 17:14:05 +03:00
|
|
|
import cStringIO, email.Parser, os, errno, re
|
2010-04-17 22:13:57 +04:00
|
|
|
import tempfile, zlib
|
|
|
|
|
2006-12-15 05:25:19 +03:00
|
|
|
from i18n import _
|
2008-03-07 00:23:26 +03:00
|
|
|
from node import hex, nullid, short
|
2011-05-08 19:48:30 +04:00
|
|
|
import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, wdutil
|
2006-08-13 00:16:48 +04:00
|
|
|
|
2008-10-22 14:56:28 +04:00
|
|
|
gitre = re.compile('diff --git a/(.*) b/(.*)')
|
|
|
|
|
2007-07-17 20:39:30 +04:00
|
|
|
class PatchError(Exception):
|
|
|
|
pass
|
|
|
|
|
2006-08-17 06:46:18 +04:00
|
|
|
# helper functions
|
|
|
|
|
2008-12-09 16:27:47 +03:00
|
|
|
def copyfile(src, dst, basedir):
|
2011-04-20 23:41:41 +04:00
|
|
|
abssrc, absdst = [scmutil.canonpath(basedir, basedir, x)
|
|
|
|
for x in [src, dst]]
|
2010-09-20 23:42:11 +04:00
|
|
|
if os.path.lexists(absdst):
|
2006-08-17 06:46:18 +04:00
|
|
|
raise util.Abort(_("cannot create %s: destination already exists") %
|
|
|
|
dst)
|
|
|
|
|
2008-12-09 16:27:47 +03:00
|
|
|
dstdir = os.path.dirname(absdst)
|
|
|
|
if dstdir and not os.path.isdir(dstdir):
|
|
|
|
try:
|
|
|
|
os.makedirs(dstdir)
|
2008-12-12 00:59:35 +03:00
|
|
|
except IOError:
|
2008-12-09 16:27:47 +03:00
|
|
|
raise util.Abort(
|
|
|
|
_("cannot create %s: unable to create destination directory")
|
2009-01-03 22:33:37 +03:00
|
|
|
% dst)
|
2006-11-13 22:26:57 +03:00
|
|
|
|
|
|
|
util.copyfile(abssrc, absdst)
|
2006-08-17 06:46:18 +04:00
|
|
|
|
|
|
|
# public functions
|
|
|
|
|
2010-02-07 20:06:52 +03:00
|
|
|
def split(stream):
|
|
|
|
'''return an iterator of individual patches from a stream'''
|
|
|
|
def isheader(line, inheader):
|
|
|
|
if inheader and line[0] in (' ', '\t'):
|
|
|
|
# continuation
|
|
|
|
return True
|
2010-04-09 22:34:05 +04:00
|
|
|
if line[0] in (' ', '-', '+'):
|
|
|
|
# diff line - don't check for header pattern in there
|
|
|
|
return False
|
2010-02-07 20:06:52 +03:00
|
|
|
l = line.split(': ', 1)
|
|
|
|
return len(l) == 2 and ' ' not in l[0]
|
|
|
|
|
|
|
|
def chunk(lines):
|
|
|
|
return cStringIO.StringIO(''.join(lines))
|
|
|
|
|
|
|
|
def hgsplit(stream, cur):
|
|
|
|
inheader = True
|
|
|
|
|
|
|
|
for line in stream:
|
|
|
|
if not line.strip():
|
|
|
|
inheader = False
|
|
|
|
if not inheader and line.startswith('# HG changeset patch'):
|
|
|
|
yield chunk(cur)
|
|
|
|
cur = []
|
|
|
|
inheader = True
|
|
|
|
|
|
|
|
cur.append(line)
|
|
|
|
|
|
|
|
if cur:
|
|
|
|
yield chunk(cur)
|
|
|
|
|
|
|
|
def mboxsplit(stream, cur):
|
|
|
|
for line in stream:
|
|
|
|
if line.startswith('From '):
|
|
|
|
for c in split(chunk(cur[1:])):
|
|
|
|
yield c
|
|
|
|
cur = []
|
|
|
|
|
|
|
|
cur.append(line)
|
|
|
|
|
|
|
|
if cur:
|
|
|
|
for c in split(chunk(cur[1:])):
|
|
|
|
yield c
|
|
|
|
|
|
|
|
def mimesplit(stream, cur):
|
|
|
|
def msgfp(m):
|
|
|
|
fp = cStringIO.StringIO()
|
|
|
|
g = email.Generator.Generator(fp, mangle_from_=False)
|
|
|
|
g.flatten(m)
|
|
|
|
fp.seek(0)
|
|
|
|
return fp
|
|
|
|
|
|
|
|
for line in stream:
|
|
|
|
cur.append(line)
|
|
|
|
c = chunk(cur)
|
|
|
|
|
|
|
|
m = email.Parser.Parser().parse(c)
|
|
|
|
if not m.is_multipart():
|
|
|
|
yield msgfp(m)
|
|
|
|
else:
|
|
|
|
ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
|
|
|
|
for part in m.walk():
|
|
|
|
ct = part.get_content_type()
|
|
|
|
if ct not in ok_types:
|
|
|
|
continue
|
|
|
|
yield msgfp(part)
|
|
|
|
|
|
|
|
def headersplit(stream, cur):
|
|
|
|
inheader = False
|
|
|
|
|
|
|
|
for line in stream:
|
|
|
|
if not inheader and isheader(line, inheader):
|
|
|
|
yield chunk(cur)
|
|
|
|
cur = []
|
|
|
|
inheader = True
|
|
|
|
if inheader and not isheader(line, inheader):
|
|
|
|
inheader = False
|
|
|
|
|
|
|
|
cur.append(line)
|
|
|
|
|
|
|
|
if cur:
|
|
|
|
yield chunk(cur)
|
|
|
|
|
|
|
|
def remainder(cur):
|
|
|
|
yield chunk(cur)
|
|
|
|
|
|
|
|
class fiter(object):
|
|
|
|
def __init__(self, fp):
|
|
|
|
self.fp = fp
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def next(self):
|
|
|
|
l = self.fp.readline()
|
|
|
|
if not l:
|
|
|
|
raise StopIteration
|
|
|
|
return l
|
|
|
|
|
|
|
|
inheader = False
|
|
|
|
cur = []
|
|
|
|
|
|
|
|
mimeheaders = ['content-type']
|
|
|
|
|
|
|
|
if not hasattr(stream, 'next'):
|
|
|
|
# http responses, for example, have readline but not next
|
|
|
|
stream = fiter(stream)
|
|
|
|
|
|
|
|
for line in stream:
|
|
|
|
cur.append(line)
|
|
|
|
if line.startswith('# HG changeset patch'):
|
|
|
|
return hgsplit(stream, cur)
|
|
|
|
elif line.startswith('From '):
|
|
|
|
return mboxsplit(stream, cur)
|
|
|
|
elif isheader(line, inheader):
|
|
|
|
inheader = True
|
|
|
|
if line.split(':', 1)[0].lower() in mimeheaders:
|
|
|
|
# let email parser handle this
|
|
|
|
return mimesplit(stream, cur)
|
2010-02-18 21:46:01 +03:00
|
|
|
elif line.startswith('--- ') and inheader:
|
|
|
|
# No evil headers seen by diff start, split by hand
|
2010-02-07 20:06:52 +03:00
|
|
|
return headersplit(stream, cur)
|
|
|
|
# Not enough info, keep reading
|
|
|
|
|
|
|
|
# if we are here, we have a very plain patch
|
|
|
|
return remainder(cur)
|
|
|
|
|
2006-08-13 00:16:48 +04:00
|
|
|
def extract(ui, fileobj):
|
|
|
|
'''extract patch from data read from fileobj.
|
|
|
|
|
2007-03-22 20:44:59 +03:00
|
|
|
patch can be a normal patch or contained in an email message.
|
2006-08-13 00:16:48 +04:00
|
|
|
|
2010-07-22 09:30:27 +04:00
|
|
|
return tuple (filename, message, user, date, branch, node, p1, p2).
|
2007-03-22 20:44:59 +03:00
|
|
|
Any item in the returned tuple can be None. If filename is None,
|
|
|
|
fileobj did not contain a patch. Caller must unlink filename when done.'''
|
2006-08-13 00:16:48 +04:00
|
|
|
|
|
|
|
# attempt to detect the start of a patch
|
|
|
|
# (this heuristic is borrowed from quilt)
|
2009-02-04 22:53:38 +03:00
|
|
|
diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
|
|
|
|
r'retrieving revision [0-9]+(\.[0-9]+)*$|'
|
2010-03-19 14:45:39 +03:00
|
|
|
r'---[ \t].*?^\+\+\+[ \t]|'
|
|
|
|
r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
|
2006-08-13 00:16:48 +04:00
|
|
|
|
|
|
|
fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
|
|
|
|
tmpfp = os.fdopen(fd, 'w')
|
|
|
|
try:
|
|
|
|
msg = email.Parser.Parser().parse(fileobj)
|
|
|
|
|
2007-07-02 21:59:16 +04:00
|
|
|
subject = msg['Subject']
|
2006-08-13 00:16:48 +04:00
|
|
|
user = msg['From']
|
2009-10-09 01:42:33 +04:00
|
|
|
if not subject and not user:
|
|
|
|
# Not an email, restore parsed headers if any
|
|
|
|
subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
|
|
|
|
|
2007-10-09 00:20:23 +04:00
|
|
|
gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
|
2006-08-13 00:16:48 +04:00
|
|
|
# should try to parse msg['Date']
|
|
|
|
date = None
|
2007-03-22 20:44:59 +03:00
|
|
|
nodeid = None
|
2007-05-18 07:09:47 +04:00
|
|
|
branch = None
|
2007-03-22 20:44:59 +03:00
|
|
|
parents = []
|
2006-08-13 00:16:48 +04:00
|
|
|
|
2007-07-02 21:59:16 +04:00
|
|
|
if subject:
|
|
|
|
if subject.startswith('[PATCH'):
|
|
|
|
pend = subject.find(']')
|
2007-03-14 07:54:34 +03:00
|
|
|
if pend >= 0:
|
2010-01-25 09:05:27 +03:00
|
|
|
subject = subject[pend + 1:].lstrip()
|
2007-07-02 21:59:16 +04:00
|
|
|
subject = subject.replace('\n\t', ' ')
|
|
|
|
ui.debug('Subject: %s\n' % subject)
|
2006-08-13 00:16:48 +04:00
|
|
|
if user:
|
|
|
|
ui.debug('From: %s\n' % user)
|
|
|
|
diffs_seen = 0
|
|
|
|
ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
|
2007-07-17 20:39:30 +04:00
|
|
|
message = ''
|
2006-08-13 00:16:48 +04:00
|
|
|
for part in msg.walk():
|
|
|
|
content_type = part.get_content_type()
|
|
|
|
ui.debug('Content-Type: %s\n' % content_type)
|
|
|
|
if content_type not in ok_types:
|
|
|
|
continue
|
|
|
|
payload = part.get_payload(decode=True)
|
|
|
|
m = diffre.search(payload)
|
|
|
|
if m:
|
2007-03-14 21:46:07 +03:00
|
|
|
hgpatch = False
|
2010-10-09 08:39:44 +04:00
|
|
|
hgpatchheader = False
|
2007-03-14 21:46:07 +03:00
|
|
|
ignoretext = False
|
|
|
|
|
2009-09-19 03:15:38 +04:00
|
|
|
ui.debug('found patch at byte %d\n' % m.start(0))
|
2006-08-13 00:16:48 +04:00
|
|
|
diffs_seen += 1
|
|
|
|
cfp = cStringIO.StringIO()
|
|
|
|
for line in payload[:m.start(0)].splitlines():
|
2010-10-14 03:28:29 +04:00
|
|
|
if line.startswith('# HG changeset patch') and not hgpatch:
|
2009-09-19 03:15:38 +04:00
|
|
|
ui.debug('patch generated by hg export\n')
|
2010-10-14 03:28:29 +04:00
|
|
|
hgpatch = True
|
2010-10-09 08:39:44 +04:00
|
|
|
hgpatchheader = True
|
2006-08-13 00:16:48 +04:00
|
|
|
# drop earlier commit message content
|
|
|
|
cfp.seek(0)
|
|
|
|
cfp.truncate()
|
2007-07-03 00:26:12 +04:00
|
|
|
subject = None
|
2010-10-09 08:39:44 +04:00
|
|
|
elif hgpatchheader:
|
2006-08-13 00:16:48 +04:00
|
|
|
if line.startswith('# User '):
|
|
|
|
user = line[7:]
|
|
|
|
ui.debug('From: %s\n' % user)
|
|
|
|
elif line.startswith("# Date "):
|
|
|
|
date = line[7:]
|
2007-05-18 07:09:47 +04:00
|
|
|
elif line.startswith("# Branch "):
|
|
|
|
branch = line[9:]
|
2007-03-22 20:44:59 +03:00
|
|
|
elif line.startswith("# Node ID "):
|
|
|
|
nodeid = line[10:]
|
|
|
|
elif line.startswith("# Parent "):
|
|
|
|
parents.append(line[10:])
|
2010-10-09 08:39:44 +04:00
|
|
|
elif not line.startswith("# "):
|
|
|
|
hgpatchheader = False
|
2007-10-09 00:20:23 +04:00
|
|
|
elif line == '---' and gitsendmail:
|
2007-03-14 21:46:07 +03:00
|
|
|
ignoretext = True
|
2010-10-09 08:39:44 +04:00
|
|
|
if not hgpatchheader and not ignoretext:
|
2006-08-13 00:16:48 +04:00
|
|
|
cfp.write(line)
|
|
|
|
cfp.write('\n')
|
|
|
|
message = cfp.getvalue()
|
|
|
|
if tmpfp:
|
|
|
|
tmpfp.write(payload)
|
|
|
|
if not payload.endswith('\n'):
|
|
|
|
tmpfp.write('\n')
|
|
|
|
elif not diffs_seen and message and content_type == 'text/plain':
|
|
|
|
message += '\n' + payload
|
|
|
|
except:
|
|
|
|
tmpfp.close()
|
|
|
|
os.unlink(tmpname)
|
|
|
|
raise
|
|
|
|
|
2007-07-02 21:59:16 +04:00
|
|
|
if subject and not message.startswith(subject):
|
|
|
|
message = '%s\n%s' % (subject, message)
|
2006-08-13 00:16:48 +04:00
|
|
|
tmpfp.close()
|
|
|
|
if not diffs_seen:
|
|
|
|
os.unlink(tmpname)
|
2007-05-18 07:09:47 +04:00
|
|
|
return None, message, user, date, branch, None, None, None
|
2007-03-22 20:44:59 +03:00
|
|
|
p1 = parents and parents.pop(0) or None
|
|
|
|
p2 = parents and parents.pop(0) or None
|
2007-05-18 07:09:47 +04:00
|
|
|
return tmpname, message, user, date, branch, nodeid, p1, p2
|
2006-08-12 02:50:16 +04:00
|
|
|
|
2009-06-10 17:10:21 +04:00
|
|
|
class patchmeta(object):
|
2008-10-19 01:45:45 +04:00
|
|
|
"""Patched file metadata
|
|
|
|
|
|
|
|
'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
|
|
|
|
or COPY. 'path' is patched file path. 'oldpath' is set to the
|
2008-10-19 01:45:45 +04:00
|
|
|
origin file when 'op' is either COPY or RENAME, None otherwise. If
|
|
|
|
file mode is changed, 'mode' is a tuple (islink, isexec) where
|
|
|
|
'islink' is True if the file is a symlink and 'isexec' is True if
|
|
|
|
the file is executable. Otherwise, 'mode' is None.
|
2008-10-19 01:45:45 +04:00
|
|
|
"""
|
|
|
|
def __init__(self, path):
|
|
|
|
self.path = path
|
|
|
|
self.oldpath = None
|
|
|
|
self.mode = None
|
|
|
|
self.op = 'MODIFY'
|
|
|
|
self.binary = False
|
|
|
|
|
2008-10-19 01:45:45 +04:00
|
|
|
def setmode(self, mode):
|
|
|
|
islink = mode & 020000
|
|
|
|
isexec = mode & 0100
|
|
|
|
self.mode = (islink, isexec)
|
|
|
|
|
2010-04-26 15:21:02 +04:00
|
|
|
def __repr__(self):
|
|
|
|
return "<patchmeta %s %r>" % (self.op, self.path)
|
|
|
|
|
2008-10-19 01:45:46 +04:00
|
|
|
def readgitpatch(lr):
|
2006-08-12 02:50:16 +04:00
|
|
|
"""extract git-style metadata about patches from <patchname>"""
|
2006-10-01 21:26:33 +04:00
|
|
|
|
2006-08-12 02:50:16 +04:00
|
|
|
# Filter patch for git information
|
|
|
|
gp = None
|
|
|
|
gitpatches = []
|
2008-10-19 01:45:46 +04:00
|
|
|
for line in lr:
|
2009-07-25 01:53:40 +04:00
|
|
|
line = line.rstrip(' \r\n')
|
2006-08-12 02:50:16 +04:00
|
|
|
if line.startswith('diff --git'):
|
|
|
|
m = gitre.match(line)
|
|
|
|
if m:
|
|
|
|
if gp:
|
|
|
|
gitpatches.append(gp)
|
2009-08-24 16:40:21 +04:00
|
|
|
dst = m.group(2)
|
2008-10-19 01:45:45 +04:00
|
|
|
gp = patchmeta(dst)
|
2006-08-12 02:50:16 +04:00
|
|
|
elif gp:
|
|
|
|
if line.startswith('--- '):
|
|
|
|
gitpatches.append(gp)
|
|
|
|
gp = None
|
|
|
|
continue
|
|
|
|
if line.startswith('rename from '):
|
|
|
|
gp.op = 'RENAME'
|
2009-07-25 01:53:40 +04:00
|
|
|
gp.oldpath = line[12:]
|
2006-08-12 02:50:16 +04:00
|
|
|
elif line.startswith('rename to '):
|
2009-07-25 01:53:40 +04:00
|
|
|
gp.path = line[10:]
|
2006-08-12 02:50:16 +04:00
|
|
|
elif line.startswith('copy from '):
|
|
|
|
gp.op = 'COPY'
|
2009-07-25 01:53:40 +04:00
|
|
|
gp.oldpath = line[10:]
|
2006-08-12 02:50:16 +04:00
|
|
|
elif line.startswith('copy to '):
|
2009-07-25 01:53:40 +04:00
|
|
|
gp.path = line[8:]
|
2006-08-12 02:50:16 +04:00
|
|
|
elif line.startswith('deleted file'):
|
|
|
|
gp.op = 'DELETE'
|
|
|
|
elif line.startswith('new file mode '):
|
|
|
|
gp.op = 'ADD'
|
2009-07-25 01:53:40 +04:00
|
|
|
gp.setmode(int(line[-6:], 8))
|
2006-08-12 02:50:16 +04:00
|
|
|
elif line.startswith('new mode '):
|
2009-07-25 01:53:40 +04:00
|
|
|
gp.setmode(int(line[-6:], 8))
|
2006-10-12 20:17:16 +04:00
|
|
|
elif line.startswith('GIT binary patch'):
|
|
|
|
gp.binary = True
|
2006-08-12 02:50:16 +04:00
|
|
|
if gp:
|
|
|
|
gitpatches.append(gp)
|
|
|
|
|
2010-10-10 00:13:08 +04:00
|
|
|
return gitpatches
|
2006-08-12 02:50:16 +04:00
|
|
|
|
2009-06-22 14:05:11 +04:00
|
|
|
class linereader(object):
|
2009-06-15 02:03:26 +04:00
|
|
|
# simple class to allow pushing lines back into the input stream
|
|
|
|
def __init__(self, fp, textmode=False):
|
|
|
|
self.fp = fp
|
|
|
|
self.buf = []
|
|
|
|
self.textmode = textmode
|
2009-12-20 19:18:04 +03:00
|
|
|
self.eol = None
|
2009-06-15 02:03:26 +04:00
|
|
|
|
|
|
|
def push(self, line):
|
|
|
|
if line is not None:
|
|
|
|
self.buf.append(line)
|
|
|
|
|
|
|
|
def readline(self):
|
|
|
|
if self.buf:
|
|
|
|
l = self.buf[0]
|
|
|
|
del self.buf[0]
|
|
|
|
return l
|
|
|
|
l = self.fp.readline()
|
2009-12-20 19:18:04 +03:00
|
|
|
if not self.eol:
|
|
|
|
if l.endswith('\r\n'):
|
|
|
|
self.eol = '\r\n'
|
|
|
|
elif l.endswith('\n'):
|
|
|
|
self.eol = '\n'
|
2009-06-15 02:03:26 +04:00
|
|
|
if self.textmode and l.endswith('\r\n'):
|
|
|
|
l = l[:-2] + '\n'
|
|
|
|
return l
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
while 1:
|
|
|
|
l = self.readline()
|
|
|
|
if not l:
|
|
|
|
break
|
|
|
|
yield l
|
|
|
|
|
2007-07-17 20:39:30 +04:00
|
|
|
# @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
|
|
|
|
unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
|
|
|
|
contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
|
2009-12-20 19:18:04 +03:00
|
|
|
eolmodes = ['strict', 'crlf', 'lf', 'auto']
|
2007-07-17 20:39:30 +04:00
|
|
|
|
2009-06-10 17:10:21 +04:00
|
|
|
class patchfile(object):
|
2009-12-20 19:18:02 +03:00
|
|
|
def __init__(self, ui, fname, opener, missing=False, eolmode='strict'):
|
2007-07-17 20:39:30 +04:00
|
|
|
self.fname = fname
|
2009-12-20 19:18:02 +03:00
|
|
|
self.eolmode = eolmode
|
2009-12-20 19:18:04 +03:00
|
|
|
self.eol = None
|
2008-11-19 15:27:57 +03:00
|
|
|
self.opener = opener
|
2007-07-17 20:39:30 +04:00
|
|
|
self.ui = ui
|
2007-12-18 01:42:46 +03:00
|
|
|
self.lines = []
|
|
|
|
self.exists = False
|
|
|
|
self.missing = missing
|
|
|
|
if not missing:
|
|
|
|
try:
|
2008-11-19 15:27:57 +03:00
|
|
|
self.lines = self.readlines(fname)
|
2007-12-18 01:42:46 +03:00
|
|
|
self.exists = True
|
|
|
|
except IOError:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
|
|
|
|
|
2007-07-17 20:39:30 +04:00
|
|
|
self.hash = {}
|
2011-05-06 13:12:55 +04:00
|
|
|
self.dirty = False
|
2007-07-17 20:39:30 +04:00
|
|
|
self.offset = 0
|
2009-12-10 03:56:00 +03:00
|
|
|
self.skew = 0
|
2007-07-17 20:39:30 +04:00
|
|
|
self.rej = []
|
|
|
|
self.fileprinted = False
|
|
|
|
self.printfile(False)
|
|
|
|
self.hunks = 0
|
|
|
|
|
2008-11-19 15:27:57 +03:00
|
|
|
def readlines(self, fname):
|
2009-10-16 01:15:30 +04:00
|
|
|
if os.path.islink(fname):
|
|
|
|
return [os.readlink(fname)]
|
2008-11-19 15:27:57 +03:00
|
|
|
fp = self.opener(fname, 'r')
|
|
|
|
try:
|
2009-12-20 19:18:04 +03:00
|
|
|
lr = linereader(fp, self.eolmode != 'strict')
|
|
|
|
lines = list(lr)
|
|
|
|
self.eol = lr.eol
|
|
|
|
return lines
|
2008-11-19 15:27:57 +03:00
|
|
|
finally:
|
|
|
|
fp.close()
|
|
|
|
|
2009-11-05 12:44:36 +03:00
|
|
|
def writelines(self, fname, lines):
|
2009-10-16 01:15:30 +04:00
|
|
|
# Ensure supplied data ends in fname, being a regular file or
|
2010-09-13 15:08:09 +04:00
|
|
|
# a symlink. cmdutil.updatedir will -too magically- take care
|
|
|
|
# of setting it to the proper type afterwards.
|
2010-12-10 17:14:05 +03:00
|
|
|
st_mode = None
|
2009-10-16 01:15:30 +04:00
|
|
|
islink = os.path.islink(fname)
|
|
|
|
if islink:
|
|
|
|
fp = cStringIO.StringIO()
|
|
|
|
else:
|
2010-12-10 17:14:05 +03:00
|
|
|
try:
|
|
|
|
st_mode = os.lstat(fname).st_mode & 0777
|
|
|
|
except OSError, e:
|
|
|
|
if e.errno != errno.ENOENT:
|
|
|
|
raise
|
2009-10-16 01:15:30 +04:00
|
|
|
fp = self.opener(fname, 'w')
|
2008-11-19 15:27:57 +03:00
|
|
|
try:
|
2009-12-23 21:31:47 +03:00
|
|
|
if self.eolmode == 'auto':
|
2009-12-20 19:18:04 +03:00
|
|
|
eol = self.eol
|
|
|
|
elif self.eolmode == 'crlf':
|
|
|
|
eol = '\r\n'
|
|
|
|
else:
|
|
|
|
eol = '\n'
|
|
|
|
|
2009-12-23 21:31:47 +03:00
|
|
|
if self.eolmode != 'strict' and eol and eol != '\n':
|
2009-06-15 02:03:26 +04:00
|
|
|
for l in lines:
|
|
|
|
if l and l[-1] == '\n':
|
2009-12-20 19:18:04 +03:00
|
|
|
l = l[:-1] + eol
|
2009-06-15 02:03:26 +04:00
|
|
|
fp.write(l)
|
|
|
|
else:
|
|
|
|
fp.writelines(lines)
|
2009-10-16 01:15:30 +04:00
|
|
|
if islink:
|
|
|
|
self.opener.symlink(fp.getvalue(), fname)
|
2010-12-10 17:14:05 +03:00
|
|
|
if st_mode is not None:
|
|
|
|
os.chmod(fname, st_mode)
|
2008-11-19 15:27:57 +03:00
|
|
|
finally:
|
|
|
|
fp.close()
|
|
|
|
|
|
|
|
def unlink(self, fname):
|
|
|
|
os.unlink(fname)
|
|
|
|
|
2007-07-17 20:39:30 +04:00
|
|
|
def printfile(self, warn):
|
|
|
|
if self.fileprinted:
|
|
|
|
return
|
|
|
|
if warn or self.ui.verbose:
|
|
|
|
self.fileprinted = True
|
2007-07-17 20:39:30 +04:00
|
|
|
s = _("patching file %s\n") % self.fname
|
2007-07-17 20:39:30 +04:00
|
|
|
if warn:
|
|
|
|
self.ui.warn(s)
|
|
|
|
else:
|
|
|
|
self.ui.note(s)
|
|
|
|
|
|
|
|
|
|
|
|
def findlines(self, l, linenum):
|
|
|
|
# looks through the hash and finds candidate lines. The
|
|
|
|
# result is a list of line numbers sorted based on distance
|
|
|
|
# from linenum
|
2007-08-07 12:28:43 +04:00
|
|
|
|
2009-10-31 20:00:05 +03:00
|
|
|
cand = self.hash.get(l, [])
|
2007-07-17 20:39:30 +04:00
|
|
|
if len(cand) > 1:
|
|
|
|
# resort our list of potentials forward then back.
|
2009-07-05 13:02:00 +04:00
|
|
|
cand.sort(key=lambda x: abs(x - linenum))
|
2007-07-17 20:39:30 +04:00
|
|
|
return cand
|
|
|
|
|
2010-12-03 05:40:30 +03:00
|
|
|
def makerejlines(self, fname):
|
|
|
|
base = os.path.basename(fname)
|
|
|
|
yield "--- %s\n+++ %s\n" % (base, base)
|
|
|
|
for x in self.rej:
|
|
|
|
for l in x.hunk:
|
|
|
|
yield l
|
|
|
|
if l[-1] != '\n':
|
|
|
|
yield "\n\ No newline at end of file\n"
|
|
|
|
|
2007-07-17 20:39:30 +04:00
|
|
|
def write_rej(self):
|
|
|
|
# our rejects are a little different from patch(1). This always
|
|
|
|
# creates rejects in the same form as the original patch. A file
|
|
|
|
# header is inserted so that you can run the reject through patch again
|
|
|
|
# without having to type the filename.
|
|
|
|
|
|
|
|
if not self.rej:
|
|
|
|
return
|
|
|
|
|
|
|
|
fname = self.fname + ".rej"
|
|
|
|
self.ui.warn(
|
2008-08-31 18:12:02 +04:00
|
|
|
_("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
|
|
|
|
(len(self.rej), self.hunks, fname))
|
2008-11-19 15:27:57 +03:00
|
|
|
|
2010-12-03 05:40:30 +03:00
|
|
|
fp = self.opener(fname, 'w')
|
|
|
|
fp.writelines(self.makerejlines(self.fname))
|
|
|
|
fp.close()
|
2007-07-17 20:39:30 +04:00
|
|
|
|
2009-08-23 16:32:58 +04:00
|
|
|
def apply(self, h):
|
2007-07-17 20:39:30 +04:00
|
|
|
if not h.complete():
|
2007-07-17 20:39:30 +04:00
|
|
|
raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
|
2007-07-17 20:39:30 +04:00
|
|
|
(h.number, h.desc, len(h.a), h.lena, len(h.b),
|
|
|
|
h.lenb))
|
|
|
|
|
|
|
|
self.hunks += 1
|
|
|
|
|
2007-12-18 01:42:46 +03:00
|
|
|
if self.missing:
|
|
|
|
self.rej.append(h)
|
|
|
|
return -1
|
|
|
|
|
2007-07-17 20:39:30 +04:00
|
|
|
if self.exists and h.createfile():
|
2007-07-17 20:39:30 +04:00
|
|
|
self.ui.warn(_("file %s already exists\n") % self.fname)
|
2007-07-17 20:39:30 +04:00
|
|
|
self.rej.append(h)
|
|
|
|
return -1
|
|
|
|
|
2009-10-16 01:15:30 +04:00
|
|
|
if isinstance(h, binhunk):
|
2007-07-17 20:39:30 +04:00
|
|
|
if h.rmfile():
|
2008-11-19 15:27:57 +03:00
|
|
|
self.unlink(self.fname)
|
2007-07-17 20:39:30 +04:00
|
|
|
else:
|
|
|
|
self.lines[:] = h.new()
|
|
|
|
self.offset += len(h.new())
|
2011-05-06 13:12:55 +04:00
|
|
|
self.dirty = True
|
2007-07-17 20:39:30 +04:00
|
|
|
return 0
|
|
|
|
|
2009-12-23 21:31:47 +03:00
|
|
|
horig = h
|
2009-12-23 21:31:48 +03:00
|
|
|
if (self.eolmode in ('crlf', 'lf')
|
|
|
|
or self.eolmode == 'auto' and self.eol):
|
|
|
|
# If new eols are going to be normalized, then normalize
|
|
|
|
# hunk data before patching. Otherwise, preserve input
|
|
|
|
# line-endings.
|
2009-12-23 21:31:47 +03:00
|
|
|
h = h.getnormalized()
|
|
|
|
|
2007-07-17 20:39:30 +04:00
|
|
|
# fast case first, no offsets, no fuzz
|
|
|
|
old = h.old()
|
|
|
|
# patch starts counting at 1 unless we are adding the file
|
|
|
|
if h.starta == 0:
|
|
|
|
start = 0
|
|
|
|
else:
|
|
|
|
start = h.starta + self.offset - 1
|
|
|
|
orig_start = start
|
2009-12-10 03:56:00 +03:00
|
|
|
# if there's skew we want to emit the "(offset %d lines)" even
|
|
|
|
# when the hunk cleanly applies at start + skew, so skip the
|
|
|
|
# fast case code
|
|
|
|
if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
|
2007-07-17 20:39:30 +04:00
|
|
|
if h.rmfile():
|
2008-11-19 15:27:57 +03:00
|
|
|
self.unlink(self.fname)
|
2007-07-17 20:39:30 +04:00
|
|
|
else:
|
|
|
|
self.lines[start : start + h.lena] = h.new()
|
|
|
|
self.offset += h.lenb - h.lena
|
2011-05-06 13:12:55 +04:00
|
|
|
self.dirty = True
|
2007-07-17 20:39:30 +04:00
|
|
|
return 0
|
|
|
|
|
2011-03-20 02:09:44 +03:00
|
|
|
# ok, we couldn't match the hunk. Lets look for offsets and fuzz it
|
|
|
|
self.hash = {}
|
|
|
|
for x, s in enumerate(self.lines):
|
|
|
|
self.hash.setdefault(s, []).append(x)
|
2007-07-17 20:39:30 +04:00
|
|
|
if h.hunk[-1][0] != ' ':
|
|
|
|
# if the hunk tried to put something at the bottom of the file
|
|
|
|
# override the start line and use eof here
|
|
|
|
search_start = len(self.lines)
|
|
|
|
else:
|
2009-12-10 03:56:00 +03:00
|
|
|
search_start = orig_start + self.skew
|
2007-07-17 20:39:30 +04:00
|
|
|
|
|
|
|
for fuzzlen in xrange(3):
|
2010-01-25 09:05:27 +03:00
|
|
|
for toponly in [True, False]:
|
2007-07-17 20:39:30 +04:00
|
|
|
old = h.old(fuzzlen, toponly)
|
|
|
|
|
|
|
|
cand = self.findlines(old[0][1:], search_start)
|
|
|
|
for l in cand:
|
|
|
|
if diffhelpers.testhunk(old, self.lines, l) == 0:
|
|
|
|
newlines = h.new(fuzzlen, toponly)
|
|
|
|
self.lines[l : l + len(old)] = newlines
|
|
|
|
self.offset += len(newlines) - len(old)
|
2009-12-10 03:56:00 +03:00
|
|
|
self.skew = l - orig_start
|
2011-05-06 13:12:55 +04:00
|
|
|
self.dirty = True
|
2010-02-19 18:04:11 +03:00
|
|
|
offset = l - orig_start - fuzzlen
|
2007-07-17 20:39:30 +04:00
|
|
|
if fuzzlen:
|
2010-02-19 18:04:11 +03:00
|
|
|
msg = _("Hunk #%d succeeded at %d "
|
|
|
|
"with fuzz %d "
|
|
|
|
"(offset %d lines).\n")
|
2007-07-17 20:39:30 +04:00
|
|
|
self.printfile(True)
|
2010-02-19 18:04:11 +03:00
|
|
|
self.ui.warn(msg %
|
|
|
|
(h.number, l + 1, fuzzlen, offset))
|
2007-07-17 20:39:30 +04:00
|
|
|
else:
|
2010-02-19 18:04:11 +03:00
|
|
|
msg = _("Hunk #%d succeeded at %d "
|
2009-04-21 04:16:14 +04:00
|
|
|
"(offset %d lines).\n")
|
2010-02-19 18:04:11 +03:00
|
|
|
self.ui.note(msg % (h.number, l + 1, offset))
|
2007-07-17 20:39:30 +04:00
|
|
|
return fuzzlen
|
|
|
|
self.printfile(True)
|
2007-07-17 20:39:30 +04:00
|
|
|
self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
|
2009-12-23 21:31:47 +03:00
|
|
|
self.rej.append(horig)
|
2007-07-17 20:39:30 +04:00
|
|
|
return -1
|
|
|
|
|
2011-03-20 02:22:47 +03:00
|
|
|
def close(self):
|
|
|
|
if self.dirty:
|
|
|
|
self.writelines(self.fname, self.lines)
|
|
|
|
self.write_rej()
|
|
|
|
return len(self.rej)
|
|
|
|
|
2009-06-10 17:10:21 +04:00
|
|
|
class hunk(object):
|
2008-03-16 02:35:12 +03:00
|
|
|
def __init__(self, desc, num, lr, context, create=False, remove=False):
|
2007-07-17 20:39:30 +04:00
|
|
|
self.number = num
|
|
|
|
self.desc = desc
|
2010-01-25 09:05:27 +03:00
|
|
|
self.hunk = [desc]
|
2007-07-17 20:39:30 +04:00
|
|
|
self.a = []
|
|
|
|
self.b = []
|
2009-10-31 20:01:08 +03:00
|
|
|
self.starta = self.lena = None
|
|
|
|
self.startb = self.lenb = None
|
2009-12-23 21:31:47 +03:00
|
|
|
if lr is not None:
|
|
|
|
if context:
|
|
|
|
self.read_context_hunk(lr)
|
|
|
|
else:
|
|
|
|
self.read_unified_hunk(lr)
|
2008-03-16 02:35:12 +03:00
|
|
|
self.create = create
|
|
|
|
self.remove = remove and not create
|
2007-07-17 20:39:30 +04:00
|
|
|
|
2009-12-23 21:31:47 +03:00
|
|
|
def getnormalized(self):
|
|
|
|
"""Return a copy with line endings normalized to LF."""
|
|
|
|
|
|
|
|
def normalize(lines):
|
|
|
|
nlines = []
|
|
|
|
for line in lines:
|
|
|
|
if line.endswith('\r\n'):
|
|
|
|
line = line[:-2] + '\n'
|
|
|
|
nlines.append(line)
|
|
|
|
return nlines
|
|
|
|
|
|
|
|
# Dummy object, it is rebuilt manually
|
|
|
|
nh = hunk(self.desc, self.number, None, None, False, False)
|
|
|
|
nh.number = self.number
|
|
|
|
nh.desc = self.desc
|
2010-02-23 00:55:58 +03:00
|
|
|
nh.hunk = self.hunk
|
2009-12-23 21:31:47 +03:00
|
|
|
nh.a = normalize(self.a)
|
|
|
|
nh.b = normalize(self.b)
|
|
|
|
nh.starta = self.starta
|
|
|
|
nh.startb = self.startb
|
|
|
|
nh.lena = self.lena
|
|
|
|
nh.lenb = self.lenb
|
|
|
|
nh.create = self.create
|
|
|
|
nh.remove = self.remove
|
|
|
|
return nh
|
|
|
|
|
2007-07-17 20:39:30 +04:00
|
|
|
def read_unified_hunk(self, lr):
|
|
|
|
m = unidesc.match(self.desc)
|
|
|
|
if not m:
|
2007-07-17 20:39:30 +04:00
|
|
|
raise PatchError(_("bad hunk #%d") % self.number)
|
2007-07-17 20:39:30 +04:00
|
|
|
self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
|
2009-05-20 02:52:46 +04:00
|
|
|
if self.lena is None:
|
2007-07-17 20:39:30 +04:00
|
|
|
self.lena = 1
|
|
|
|
else:
|
|
|
|
self.lena = int(self.lena)
|
2009-05-20 02:52:46 +04:00
|
|
|
if self.lenb is None:
|
2007-07-17 20:39:30 +04:00
|
|
|
self.lenb = 1
|
|
|
|
else:
|
|
|
|
self.lenb = int(self.lenb)
|
|
|
|
self.starta = int(self.starta)
|
|
|
|
self.startb = int(self.startb)
|
2008-10-19 01:45:46 +04:00
|
|
|
diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
|
2007-07-17 20:39:30 +04:00
|
|
|
# if we hit eof before finishing out the hunk, the last line will
|
|
|
|
# be zero length. Lets try to fix it up.
|
|
|
|
while len(self.hunk[-1]) == 0:
|
2008-08-31 13:41:52 +04:00
|
|
|
del self.hunk[-1]
|
|
|
|
del self.a[-1]
|
|
|
|
del self.b[-1]
|
|
|
|
self.lena -= 1
|
|
|
|
self.lenb -= 1
|
2011-03-20 02:08:44 +03:00
|
|
|
self._fixnewline(lr)
|
2007-07-17 20:39:30 +04:00
|
|
|
|
|
|
|
def read_context_hunk(self, lr):
|
|
|
|
self.desc = lr.readline()
|
|
|
|
m = contextdesc.match(self.desc)
|
|
|
|
if not m:
|
2007-07-17 20:39:30 +04:00
|
|
|
raise PatchError(_("bad hunk #%d") % self.number)
|
2007-07-17 20:39:30 +04:00
|
|
|
foo, self.starta, foo2, aend, foo3 = m.groups()
|
|
|
|
self.starta = int(self.starta)
|
2009-05-20 02:52:46 +04:00
|
|
|
if aend is None:
|
2007-07-17 20:39:30 +04:00
|
|
|
aend = self.starta
|
|
|
|
self.lena = int(aend) - self.starta
|
|
|
|
if self.starta:
|
|
|
|
self.lena += 1
|
|
|
|
for x in xrange(self.lena):
|
|
|
|
l = lr.readline()
|
|
|
|
if l.startswith('---'):
|
2010-10-24 14:56:38 +04:00
|
|
|
# lines addition, old block is empty
|
2007-07-17 20:39:30 +04:00
|
|
|
lr.push(l)
|
|
|
|
break
|
|
|
|
s = l[2:]
|
|
|
|
if l.startswith('- ') or l.startswith('! '):
|
|
|
|
u = '-' + s
|
|
|
|
elif l.startswith(' '):
|
|
|
|
u = ' ' + s
|
|
|
|
else:
|
2007-07-17 20:39:30 +04:00
|
|
|
raise PatchError(_("bad hunk #%d old text line %d") %
|
|
|
|
(self.number, x))
|
2007-07-17 20:39:30 +04:00
|
|
|
self.a.append(u)
|
|
|
|
self.hunk.append(u)
|
|
|
|
|
|
|
|
l = lr.readline()
|
|
|
|
if l.startswith('\ '):
|
|
|
|
s = self.a[-1][:-1]
|
|
|
|
self.a[-1] = s
|
|
|
|
self.hunk[-1] = s
|
|
|
|
l = lr.readline()
|
|
|
|
m = contextdesc.match(l)
|
|
|
|
if not m:
|
2007-07-17 20:39:30 +04:00
|
|
|
raise PatchError(_("bad hunk #%d") % self.number)
|
2007-07-17 20:39:30 +04:00
|
|
|
foo, self.startb, foo2, bend, foo3 = m.groups()
|
|
|
|
self.startb = int(self.startb)
|
2009-05-20 02:52:46 +04:00
|
|
|
if bend is None:
|
2007-07-17 20:39:30 +04:00
|
|
|
bend = self.startb
|
|
|
|
self.lenb = int(bend) - self.startb
|
|
|
|
if self.startb:
|
|
|
|
self.lenb += 1
|
|
|
|
hunki = 1
|
|
|
|
for x in xrange(self.lenb):
|
|
|
|
l = lr.readline()
|
|
|
|
if l.startswith('\ '):
|
2010-10-24 14:56:38 +04:00
|
|
|
# XXX: the only way to hit this is with an invalid line range.
|
|
|
|
# The no-eol marker is not counted in the line range, but I
|
|
|
|
# guess there are diff(1) out there which behave differently.
|
2007-07-17 20:39:30 +04:00
|
|
|
s = self.b[-1][:-1]
|
|
|
|
self.b[-1] = s
|
2010-01-25 09:05:27 +03:00
|
|
|
self.hunk[hunki - 1] = s
|
2007-07-17 20:39:30 +04:00
|
|
|
continue
|
|
|
|
if not l:
|
2010-10-24 14:56:38 +04:00
|
|
|
# line deletions, new block is empty and we hit EOF
|
2007-07-17 20:39:30 +04:00
|
|
|
lr.push(l)
|
|
|
|
break
|
|
|
|
s = l[2:]
|
|
|
|
if l.startswith('+ ') or l.startswith('! '):
|
|
|
|
u = '+' + s
|
|
|
|
elif l.startswith(' '):
|
|
|
|
u = ' ' + s
|
|
|
|
elif len(self.b) == 0:
|
2010-10-24 14:56:38 +04:00
|
|
|
# line deletions, new block is empty
|
2007-07-17 20:39:30 +04:00
|
|
|
lr.push(l)
|
|
|
|
break
|
|
|
|
else:
|
2007-07-17 20:39:30 +04:00
|
|
|
raise PatchError(_("bad hunk #%d old text line %d") %
|
|
|
|
(self.number, x))
|
2007-07-17 20:39:30 +04:00
|
|
|
self.b.append(s)
|
|
|
|
while True:
|
|
|
|
if hunki >= len(self.hunk):
|
|
|
|
h = ""
|
|
|
|
else:
|
|
|
|
h = self.hunk[hunki]
|
|
|
|
hunki += 1
|
|
|
|
if h == u:
|
|
|
|
break
|
|
|
|
elif h.startswith('-'):
|
|
|
|
continue
|
|
|
|
else:
|
2010-01-25 09:05:27 +03:00
|
|
|
self.hunk.insert(hunki - 1, u)
|
2007-07-17 20:39:30 +04:00
|
|
|
break
|
|
|
|
|
|
|
|
if not self.a:
|
|
|
|
# this happens when lines were only added to the hunk
|
|
|
|
for x in self.hunk:
|
|
|
|
if x.startswith('-') or x.startswith(' '):
|
|
|
|
self.a.append(x)
|
|
|
|
if not self.b:
|
|
|
|
# this happens when lines were only deleted from the hunk
|
|
|
|
for x in self.hunk:
|
|
|
|
if x.startswith('+') or x.startswith(' '):
|
|
|
|
self.b.append(x[1:])
|
|
|
|
# @@ -start,len +start,len @@
|
|
|
|
self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
|
|
|
|
self.startb, self.lenb)
|
|
|
|
self.hunk[0] = self.desc
|
2011-03-20 02:08:44 +03:00
|
|
|
self._fixnewline(lr)
|
2007-07-17 20:39:30 +04:00
|
|
|
|
2011-03-20 02:08:44 +03:00
|
|
|
def _fixnewline(self, lr):
|
|
|
|
l = lr.readline()
|
|
|
|
if l.startswith('\ '):
|
|
|
|
diffhelpers.fix_newline(self.hunk, self.a, self.b)
|
|
|
|
else:
|
|
|
|
lr.push(l)
|
2007-07-17 20:39:30 +04:00
|
|
|
|
|
|
|
def complete(self):
|
|
|
|
return len(self.a) == self.lena and len(self.b) == self.lenb
|
|
|
|
|
|
|
|
def createfile(self):
|
2008-03-16 02:35:12 +03:00
|
|
|
return self.starta == 0 and self.lena == 0 and self.create
|
2007-07-17 20:39:30 +04:00
|
|
|
|
|
|
|
def rmfile(self):
|
2008-03-16 02:35:12 +03:00
|
|
|
return self.startb == 0 and self.lenb == 0 and self.remove
|
2007-07-17 20:39:30 +04:00
|
|
|
|
|
|
|
def fuzzit(self, l, fuzz, toponly):
|
|
|
|
# this removes context lines from the top and bottom of list 'l'. It
|
|
|
|
# checks the hunk to make sure only context lines are removed, and then
|
|
|
|
# returns a new shortened list of lines.
|
|
|
|
fuzz = min(fuzz, len(l)-1)
|
|
|
|
if fuzz:
|
|
|
|
top = 0
|
|
|
|
bot = 0
|
|
|
|
hlen = len(self.hunk)
|
2010-01-25 09:05:27 +03:00
|
|
|
for x in xrange(hlen - 1):
|
2007-07-17 20:39:30 +04:00
|
|
|
# the hunk starts with the @@ line, so use x+1
|
2010-01-25 09:05:27 +03:00
|
|
|
if self.hunk[x + 1][0] == ' ':
|
2007-07-17 20:39:30 +04:00
|
|
|
top += 1
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
if not toponly:
|
2010-01-25 09:05:27 +03:00
|
|
|
for x in xrange(hlen - 1):
|
|
|
|
if self.hunk[hlen - bot - 1][0] == ' ':
|
2007-07-17 20:39:30 +04:00
|
|
|
bot += 1
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
|
|
|
|
# top and bot now count context in the hunk
|
|
|
|
# adjust them if either one is short
|
|
|
|
context = max(top, bot, 3)
|
|
|
|
if bot < context:
|
|
|
|
bot = max(0, fuzz - (context - bot))
|
|
|
|
else:
|
|
|
|
bot = min(fuzz, bot)
|
|
|
|
if top < context:
|
|
|
|
top = max(0, fuzz - (context - top))
|
|
|
|
else:
|
|
|
|
top = min(fuzz, top)
|
|
|
|
|
|
|
|
return l[top:len(l)-bot]
|
|
|
|
return l
|
|
|
|
|
|
|
|
def old(self, fuzz=0, toponly=False):
|
|
|
|
return self.fuzzit(self.a, fuzz, toponly)
|
2007-08-07 12:28:43 +04:00
|
|
|
|
2007-07-17 20:39:30 +04:00
|
|
|
def new(self, fuzz=0, toponly=False):
|
|
|
|
return self.fuzzit(self.b, fuzz, toponly)
|
|
|
|
|
2009-10-16 01:15:30 +04:00
|
|
|
class binhunk:
|
|
|
|
'A binary patch file. Only understands literals so far.'
|
2007-07-17 20:39:30 +04:00
|
|
|
def __init__(self, gitpatch):
|
|
|
|
self.gitpatch = gitpatch
|
|
|
|
self.text = None
|
2009-10-16 01:15:30 +04:00
|
|
|
self.hunk = ['GIT binary patch\n']
|
2007-07-17 20:39:30 +04:00
|
|
|
|
|
|
|
def createfile(self):
|
|
|
|
return self.gitpatch.op in ('ADD', 'RENAME', 'COPY')
|
|
|
|
|
|
|
|
def rmfile(self):
|
|
|
|
return self.gitpatch.op == 'DELETE'
|
|
|
|
|
|
|
|
def complete(self):
|
|
|
|
return self.text is not None
|
|
|
|
|
|
|
|
def new(self):
|
|
|
|
return [self.text]
|
|
|
|
|
2008-10-19 01:45:46 +04:00
|
|
|
def extract(self, lr):
|
|
|
|
line = lr.readline()
|
2007-07-17 20:39:30 +04:00
|
|
|
self.hunk.append(line)
|
2006-10-12 20:17:16 +04:00
|
|
|
while line and not line.startswith('literal '):
|
2008-10-19 01:45:46 +04:00
|
|
|
line = lr.readline()
|
2007-07-17 20:39:30 +04:00
|
|
|
self.hunk.append(line)
|
2006-10-12 20:17:16 +04:00
|
|
|
if not line:
|
2007-07-17 20:39:30 +04:00
|
|
|
raise PatchError(_('could not extract binary patch'))
|
2007-07-17 20:39:30 +04:00
|
|
|
size = int(line[8:].rstrip())
|
2006-10-12 20:17:16 +04:00
|
|
|
dec = []
|
2008-10-19 01:45:46 +04:00
|
|
|
line = lr.readline()
|
2007-07-17 20:39:30 +04:00
|
|
|
self.hunk.append(line)
|
|
|
|
while len(line) > 1:
|
2006-10-13 00:39:14 +04:00
|
|
|
l = line[0]
|
|
|
|
if l <= 'Z' and l >= 'A':
|
|
|
|
l = ord(l) - ord('A') + 1
|
|
|
|
else:
|
|
|
|
l = ord(l) - ord('a') + 27
|
2007-07-17 20:39:30 +04:00
|
|
|
dec.append(base85.b85decode(line[1:-1])[:l])
|
2008-10-19 01:45:46 +04:00
|
|
|
line = lr.readline()
|
2007-07-17 20:39:30 +04:00
|
|
|
self.hunk.append(line)
|
2006-10-12 20:17:16 +04:00
|
|
|
text = zlib.decompress(''.join(dec))
|
|
|
|
if len(text) != size:
|
2007-07-17 20:39:30 +04:00
|
|
|
raise PatchError(_('binary patch is %d bytes, not %d') %
|
2007-07-17 20:39:30 +04:00
|
|
|
len(text), size)
|
|
|
|
self.text = text
|
|
|
|
|
|
|
|
def parsefilename(str):
|
|
|
|
# --- filename \t|space stuff
|
2008-01-12 22:43:09 +03:00
|
|
|
s = str[4:].rstrip('\r\n')
|
2007-07-17 20:39:30 +04:00
|
|
|
i = s.find('\t')
|
|
|
|
if i < 0:
|
|
|
|
i = s.find(' ')
|
|
|
|
if i < 0:
|
|
|
|
return s
|
|
|
|
return s[:i]
|
|
|
|
|
2010-04-26 15:21:03 +04:00
|
|
|
def pathstrip(path, strip):
|
|
|
|
pathlen = len(path)
|
|
|
|
i = 0
|
|
|
|
if strip == 0:
|
|
|
|
return '', path.rstrip()
|
|
|
|
count = strip
|
|
|
|
while count > 0:
|
|
|
|
i = path.find('/', i)
|
|
|
|
if i == -1:
|
|
|
|
raise PatchError(_("unable to strip away %d of %d dirs from %s") %
|
|
|
|
(count, strip, path))
|
|
|
|
i += 1
|
|
|
|
# consume '//' in the path
|
|
|
|
while i < pathlen - 1 and path[i] == '/':
|
2007-07-17 20:39:30 +04:00
|
|
|
i += 1
|
2010-04-26 15:21:03 +04:00
|
|
|
count -= 1
|
|
|
|
return path[:i].lstrip(), path[i:].rstrip()
|
2007-07-17 20:39:30 +04:00
|
|
|
|
2010-04-26 15:21:03 +04:00
|
|
|
def selectfile(afile_orig, bfile_orig, hunk, strip):
|
2007-07-17 20:39:30 +04:00
|
|
|
nulla = afile_orig == "/dev/null"
|
|
|
|
nullb = bfile_orig == "/dev/null"
|
2008-03-18 01:36:45 +03:00
|
|
|
abase, afile = pathstrip(afile_orig, strip)
|
2010-08-25 18:23:32 +04:00
|
|
|
gooda = not nulla and os.path.lexists(afile)
|
2008-03-18 01:36:45 +03:00
|
|
|
bbase, bfile = pathstrip(bfile_orig, strip)
|
2007-07-17 20:39:30 +04:00
|
|
|
if afile == bfile:
|
|
|
|
goodb = gooda
|
|
|
|
else:
|
2010-09-20 23:42:11 +04:00
|
|
|
goodb = not nullb and os.path.lexists(bfile)
|
2007-07-17 20:39:30 +04:00
|
|
|
createfunc = hunk.createfile
|
2007-12-18 01:42:46 +03:00
|
|
|
missing = not goodb and not gooda and not createfunc()
|
2009-08-08 06:27:54 +04:00
|
|
|
|
2010-08-12 19:58:03 +04:00
|
|
|
# some diff programs apparently produce patches where the afile is
|
|
|
|
# not /dev/null, but afile starts with bfile
|
2010-03-20 00:52:38 +03:00
|
|
|
abasedir = afile[:afile.rfind('/') + 1]
|
|
|
|
bbasedir = bfile[:bfile.rfind('/') + 1]
|
|
|
|
if missing and abasedir == bbasedir and afile.startswith(bfile):
|
2009-08-08 06:27:54 +04:00
|
|
|
# this isn't very pretty
|
|
|
|
hunk.create = True
|
|
|
|
if createfunc():
|
|
|
|
missing = False
|
|
|
|
else:
|
|
|
|
hunk.create = False
|
|
|
|
|
2008-03-18 01:36:45 +03:00
|
|
|
# If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
|
|
|
|
# diff is between a file and its backup. In this case, the original
|
|
|
|
# file should be patched (see original mpatch code).
|
|
|
|
isbackup = (abase == bbase and bfile.startswith(afile))
|
2007-12-18 01:42:46 +03:00
|
|
|
fname = None
|
|
|
|
if not missing:
|
|
|
|
if gooda and goodb:
|
2008-03-18 01:36:45 +03:00
|
|
|
fname = isbackup and afile or bfile
|
2007-12-18 01:42:46 +03:00
|
|
|
elif gooda:
|
2007-07-17 20:39:30 +04:00
|
|
|
fname = afile
|
2007-12-29 21:49:48 +03:00
|
|
|
|
2007-12-18 01:42:46 +03:00
|
|
|
if not fname:
|
|
|
|
if not nullb:
|
2008-03-18 01:36:45 +03:00
|
|
|
fname = isbackup and afile or bfile
|
2007-12-18 01:42:46 +03:00
|
|
|
elif not nulla:
|
2007-07-17 20:39:30 +04:00
|
|
|
fname = afile
|
2007-12-18 01:42:46 +03:00
|
|
|
else:
|
|
|
|
raise PatchError(_("undefined source and destination files"))
|
2007-12-29 21:49:48 +03:00
|
|
|
|
2007-12-18 01:42:46 +03:00
|
|
|
return fname, missing
|
2007-07-17 20:39:30 +04:00
|
|
|
|
2008-10-19 01:45:46 +04:00
|
|
|
def scangitpatch(lr, firstline):
|
2008-10-20 16:57:04 +04:00
|
|
|
"""
|
2008-10-19 01:45:46 +04:00
|
|
|
Git patches can emit:
|
|
|
|
- rename a to b
|
|
|
|
- change b
|
|
|
|
- copy a to c
|
|
|
|
- change c
|
2008-10-20 16:57:04 +04:00
|
|
|
|
2008-10-19 01:45:46 +04:00
|
|
|
We cannot apply this sequence as-is, the renamed 'a' could not be
|
|
|
|
found for it would have been renamed already. And we cannot copy
|
|
|
|
from 'b' instead because 'b' would have been changed already. So
|
|
|
|
we scan the git patch for copy and rename commands so we can
|
|
|
|
perform the copies ahead of time.
|
|
|
|
"""
|
|
|
|
pos = 0
|
|
|
|
try:
|
|
|
|
pos = lr.fp.tell()
|
|
|
|
fp = lr.fp
|
|
|
|
except IOError:
|
|
|
|
fp = cStringIO.StringIO(lr.fp.read())
|
2009-06-15 02:03:26 +04:00
|
|
|
gitlr = linereader(fp, lr.textmode)
|
2008-10-19 01:45:46 +04:00
|
|
|
gitlr.push(firstline)
|
2010-10-10 00:13:08 +04:00
|
|
|
gitpatches = readgitpatch(gitlr)
|
2008-10-19 01:45:46 +04:00
|
|
|
fp.seek(pos)
|
2010-10-10 00:13:08 +04:00
|
|
|
return gitpatches
|
2008-10-19 01:45:46 +04:00
|
|
|
|
2011-05-06 19:45:12 +04:00
|
|
|
def iterhunks(fp):
|
2007-12-18 01:06:01 +03:00
|
|
|
"""Read a patch and yield the following events:
|
|
|
|
- ("file", afile, bfile, firsthunk): select a new target file.
|
|
|
|
- ("hunk", hunk): a new hunk is ready to be applied, follows a
|
|
|
|
"file" event.
|
|
|
|
- ("git", gitchanges): current diff is in git format, gitchanges
|
|
|
|
maps filenames to gitpatch records. Unique event.
|
|
|
|
"""
|
|
|
|
changed = {}
|
2007-07-17 20:39:30 +04:00
|
|
|
afile = ""
|
|
|
|
bfile = ""
|
|
|
|
state = None
|
|
|
|
hunknum = 0
|
2011-04-26 23:22:14 +04:00
|
|
|
emitfile = newfile = False
|
2007-07-17 20:39:30 +04:00
|
|
|
git = False
|
|
|
|
|
|
|
|
# our states
|
|
|
|
BFILE = 1
|
|
|
|
context = None
|
2009-12-23 21:31:48 +03:00
|
|
|
lr = linereader(fp)
|
2007-07-17 20:39:30 +04:00
|
|
|
|
|
|
|
while True:
|
|
|
|
x = lr.readline()
|
|
|
|
if not x:
|
|
|
|
break
|
2010-11-03 23:11:07 +03:00
|
|
|
if (state == BFILE and ((not context and x[0] == '@') or
|
2009-05-20 02:43:23 +04:00
|
|
|
((context is not False) and x.startswith('***************')))):
|
2010-10-10 00:13:08 +04:00
|
|
|
if context is None and x.startswith('***************'):
|
|
|
|
context = True
|
|
|
|
gpatch = changed.get(bfile)
|
|
|
|
create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD'
|
|
|
|
remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE'
|
2011-03-20 02:08:44 +03:00
|
|
|
h = hunk(x, hunknum + 1, lr, context, create, remove)
|
2007-07-17 20:39:30 +04:00
|
|
|
hunknum += 1
|
2007-12-18 01:06:01 +03:00
|
|
|
if emitfile:
|
|
|
|
emitfile = False
|
2011-03-20 02:08:44 +03:00
|
|
|
yield 'file', (afile, bfile, h)
|
|
|
|
yield 'hunk', h
|
2007-07-17 20:39:30 +04:00
|
|
|
elif state == BFILE and x.startswith('GIT binary patch'):
|
2011-03-20 02:08:44 +03:00
|
|
|
h = binhunk(changed[bfile])
|
2007-07-17 20:39:30 +04:00
|
|
|
hunknum += 1
|
2007-12-18 01:06:01 +03:00
|
|
|
if emitfile:
|
|
|
|
emitfile = False
|
2011-03-20 02:08:44 +03:00
|
|
|
yield 'file', ('a/' + afile, 'b/' + bfile, h)
|
|
|
|
h.extract(lr)
|
|
|
|
yield 'hunk', h
|
2007-07-17 20:39:30 +04:00
|
|
|
elif x.startswith('diff --git'):
|
|
|
|
# check for git diff, scanning the whole patch file if needed
|
|
|
|
m = gitre.match(x)
|
|
|
|
if m:
|
|
|
|
afile, bfile = m.group(1, 2)
|
|
|
|
if not git:
|
|
|
|
git = True
|
2010-10-10 00:13:08 +04:00
|
|
|
gitpatches = scangitpatch(lr, x)
|
2007-12-18 01:06:01 +03:00
|
|
|
yield 'git', gitpatches
|
2007-07-17 20:39:30 +04:00
|
|
|
for gp in gitpatches:
|
2008-10-19 01:45:45 +04:00
|
|
|
changed[gp.path] = gp
|
2007-07-17 20:39:30 +04:00
|
|
|
# else error?
|
|
|
|
# copy/rename + modify should modify target, not source
|
2008-10-22 14:56:28 +04:00
|
|
|
gp = changed.get(bfile)
|
2010-03-20 16:47:05 +03:00
|
|
|
if gp and (gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD')
|
|
|
|
or gp.mode):
|
2007-07-17 20:39:30 +04:00
|
|
|
afile = bfile
|
2011-04-26 23:22:14 +04:00
|
|
|
newfile = True
|
2007-07-17 20:39:30 +04:00
|
|
|
elif x.startswith('---'):
|
|
|
|
# check for a unified diff
|
|
|
|
l2 = lr.readline()
|
|
|
|
if not l2.startswith('+++'):
|
|
|
|
lr.push(l2)
|
|
|
|
continue
|
|
|
|
newfile = True
|
|
|
|
context = False
|
|
|
|
afile = parsefilename(x)
|
|
|
|
bfile = parsefilename(l2)
|
|
|
|
elif x.startswith('***'):
|
|
|
|
# check for a context diff
|
|
|
|
l2 = lr.readline()
|
|
|
|
if not l2.startswith('---'):
|
|
|
|
lr.push(l2)
|
|
|
|
continue
|
|
|
|
l3 = lr.readline()
|
|
|
|
lr.push(l3)
|
|
|
|
if not l3.startswith("***************"):
|
|
|
|
lr.push(l2)
|
|
|
|
continue
|
|
|
|
newfile = True
|
|
|
|
context = True
|
|
|
|
afile = parsefilename(x)
|
|
|
|
bfile = parsefilename(l2)
|
|
|
|
|
2011-04-26 23:22:14 +04:00
|
|
|
if newfile:
|
|
|
|
newfile = False
|
2007-12-18 01:06:01 +03:00
|
|
|
emitfile = True
|
2007-07-17 20:39:30 +04:00
|
|
|
state = BFILE
|
|
|
|
hunknum = 0
|
2007-12-18 01:06:01 +03:00
|
|
|
|
2010-11-03 23:11:07 +03:00
|
|
|
def applydiff(ui, fp, changed, strip=1, eolmode='strict'):
|
2010-04-17 22:23:24 +04:00
|
|
|
"""Reads a patch from fp and tries to apply it.
|
2009-06-15 02:03:26 +04:00
|
|
|
|
|
|
|
The dict 'changed' is filled in with all of the filenames changed
|
|
|
|
by the patch. Returns 0 for a clean patch, -1 if any rejects were
|
|
|
|
found and 1 if there was any fuzz.
|
2007-12-18 01:06:01 +03:00
|
|
|
|
2009-12-20 19:18:02 +03:00
|
|
|
If 'eolmode' is 'strict', the patch content and patched file are
|
|
|
|
read in binary mode. Otherwise, line endings are ignored when
|
|
|
|
patching then normalized according to 'eolmode'.
|
2010-04-17 22:23:24 +04:00
|
|
|
|
2010-09-13 15:08:09 +04:00
|
|
|
Callers probably want to call 'cmdutil.updatedir' after this to
|
|
|
|
apply certain categories of changes not done by this function.
|
2009-06-15 02:03:26 +04:00
|
|
|
"""
|
2010-11-03 23:11:07 +03:00
|
|
|
return _applydiff(ui, fp, patchfile, copyfile, changed, strip=strip,
|
|
|
|
eolmode=eolmode)
|
2010-04-17 22:23:24 +04:00
|
|
|
|
2010-11-03 23:11:07 +03:00
|
|
|
def _applydiff(ui, fp, patcher, copyfn, changed, strip=1, eolmode='strict'):
|
2007-12-18 01:06:01 +03:00
|
|
|
rejects = 0
|
|
|
|
err = 0
|
|
|
|
current_file = None
|
2010-04-26 15:21:03 +04:00
|
|
|
cwd = os.getcwd()
|
2011-04-20 21:54:57 +04:00
|
|
|
opener = scmutil.opener(cwd)
|
2007-12-18 01:06:01 +03:00
|
|
|
|
2011-05-06 19:45:12 +04:00
|
|
|
for state, values in iterhunks(fp):
|
2007-12-18 01:06:01 +03:00
|
|
|
if state == 'hunk':
|
|
|
|
if not current_file:
|
|
|
|
continue
|
2010-04-26 15:21:03 +04:00
|
|
|
ret = current_file.apply(values)
|
2007-07-17 20:39:30 +04:00
|
|
|
if ret >= 0:
|
2008-10-19 01:45:45 +04:00
|
|
|
changed.setdefault(current_file.fname, None)
|
2007-07-17 20:39:30 +04:00
|
|
|
if ret > 0:
|
|
|
|
err = 1
|
2007-12-18 01:06:01 +03:00
|
|
|
elif state == 'file':
|
2011-03-20 02:22:47 +03:00
|
|
|
if current_file:
|
|
|
|
rejects += current_file.close()
|
2007-12-18 01:06:01 +03:00
|
|
|
afile, bfile, first_hunk = values
|
|
|
|
try:
|
2010-11-03 23:11:07 +03:00
|
|
|
current_file, missing = selectfile(afile, bfile,
|
|
|
|
first_hunk, strip)
|
|
|
|
current_file = patcher(ui, current_file, opener,
|
|
|
|
missing=missing, eolmode=eolmode)
|
2011-05-06 13:31:40 +04:00
|
|
|
except PatchError, inst:
|
|
|
|
ui.warn(str(inst) + '\n')
|
2010-04-26 15:21:03 +04:00
|
|
|
current_file = None
|
2007-12-18 01:06:01 +03:00
|
|
|
rejects += 1
|
|
|
|
continue
|
|
|
|
elif state == 'git':
|
2010-04-26 15:21:03 +04:00
|
|
|
for gp in values:
|
2010-04-26 15:21:03 +04:00
|
|
|
gp.path = pathstrip(gp.path, strip - 1)[1]
|
|
|
|
if gp.oldpath:
|
|
|
|
gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
|
2010-09-28 00:47:10 +04:00
|
|
|
# Binary patches really overwrite target files, copying them
|
|
|
|
# will just make it fails with "target file exists"
|
|
|
|
if gp.op in ('COPY', 'RENAME') and not gp.binary:
|
2010-04-17 22:23:24 +04:00
|
|
|
copyfn(gp.oldpath, gp.path, cwd)
|
2008-10-19 01:45:45 +04:00
|
|
|
changed[gp.path] = gp
|
2007-07-17 20:39:30 +04:00
|
|
|
else:
|
2007-12-18 01:06:01 +03:00
|
|
|
raise util.Abort(_('unsupported parser state: %s') % state)
|
2007-12-18 00:19:21 +03:00
|
|
|
|
2011-03-20 02:22:47 +03:00
|
|
|
if current_file:
|
|
|
|
rejects += current_file.close()
|
2007-12-18 01:06:01 +03:00
|
|
|
|
2007-07-17 20:39:30 +04:00
|
|
|
if rejects:
|
|
|
|
return -1
|
|
|
|
return err
|
2006-08-13 03:13:27 +04:00
|
|
|
|
2011-05-08 19:48:30 +04:00
|
|
|
def updatedir(ui, repo, patches, similarity=0):
|
|
|
|
'''Update dirstate after patch application according to metadata'''
|
|
|
|
if not patches:
|
|
|
|
return []
|
|
|
|
copies = []
|
|
|
|
removes = set()
|
|
|
|
cfiles = patches.keys()
|
|
|
|
cwd = repo.getcwd()
|
|
|
|
if cwd:
|
|
|
|
cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
|
|
|
|
for f in patches:
|
|
|
|
gp = patches[f]
|
|
|
|
if not gp:
|
|
|
|
continue
|
|
|
|
if gp.op == 'RENAME':
|
|
|
|
copies.append((gp.oldpath, gp.path))
|
|
|
|
removes.add(gp.oldpath)
|
|
|
|
elif gp.op == 'COPY':
|
|
|
|
copies.append((gp.oldpath, gp.path))
|
|
|
|
elif gp.op == 'DELETE':
|
|
|
|
removes.add(gp.path)
|
|
|
|
|
|
|
|
wctx = repo[None]
|
|
|
|
for src, dst in copies:
|
|
|
|
wdutil.dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
|
|
|
|
if (not similarity) and removes:
|
|
|
|
wctx.remove(sorted(removes), True)
|
|
|
|
|
|
|
|
for f in patches:
|
|
|
|
gp = patches[f]
|
|
|
|
if gp and gp.mode:
|
|
|
|
islink, isexec = gp.mode
|
|
|
|
dst = repo.wjoin(gp.path)
|
|
|
|
# patch won't create empty files
|
|
|
|
if gp.op == 'ADD' and not os.path.lexists(dst):
|
|
|
|
flags = (isexec and 'x' or '') + (islink and 'l' or '')
|
|
|
|
repo.wwrite(gp.path, '', flags)
|
|
|
|
util.setflags(dst, islink, isexec)
|
|
|
|
wdutil.addremove(repo, cfiles, similarity=similarity)
|
|
|
|
files = patches.keys()
|
|
|
|
files.extend([r for r in removes if r not in files])
|
|
|
|
return sorted(files)
|
|
|
|
|
patch: deprecate ui.patch / external patcher feature
Why?
- Mercurial internal patcher works correctly for regular patches and git
patches, is much faster at least on Windows and is more extensible.
- In theory, the external patcher can be used to handle exotic patch formats. I
do not know any and have not heard about any such use in years.
- Most patch programs cannot handle git format patches, which makes the API
caller to decide either to ignore ui.patch by calling patch.internalpatch()
directly, or take the risk of random failures with valid inputs.
- One thing a patch program could do Mercurial patcher cannot is applying with
--reverse. Apparently several shelve like extensions try to use that,
including passing the "reverse" option to Mercurial patcher, which has been
removed mid-2009. I never heard anybody complain about that, and would prefer
reimplementing it anyway.
And from the technical perspective:
- The external patcher makes everything harder to maintain and implement. EOL
normalization is not implemented, and I would bet file renames, if supported
by the patcher, are not correctly recorded in the dirstate.
- No tests.
How?
- Remove related documentation
- Clearly mark patch.externalpatch() as private
- Remove the debuginstall check. This deprecation request was actually
triggered by this last point. debuginstall is the only piece of code patching
without a repository. When migrating to an integrated patch() + updatedir()
call, this was really a showstopper, all workarounds were either ugly or
uselessly complicated to implement. If we do not support external patcher
anymore, the debuginstall check is not useful anymore.
- Remove patch.externalpatch() after 1.9 release.
2011-03-24 12:28:29 +03:00
|
|
|
def _externalpatch(patcher, patchname, ui, strip, cwd, files):
|
2008-10-19 01:45:45 +04:00
|
|
|
"""use <patcher> to apply <patchname> to the working directory.
|
|
|
|
returns whether patch was applied with fuzz factor."""
|
|
|
|
|
|
|
|
fuzz = False
|
2010-10-10 00:13:08 +04:00
|
|
|
args = []
|
2008-10-19 01:45:45 +04:00
|
|
|
if cwd:
|
|
|
|
args.append('-d %s' % util.shellquote(cwd))
|
|
|
|
fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
|
|
|
|
util.shellquote(patchname)))
|
|
|
|
|
|
|
|
for line in fp:
|
|
|
|
line = line.rstrip()
|
|
|
|
ui.note(line + '\n')
|
|
|
|
if line.startswith('patching file '):
|
2011-05-06 17:19:48 +04:00
|
|
|
pf = util.parsepatchoutput(line)
|
2008-10-19 01:45:45 +04:00
|
|
|
printed_file = False
|
2008-10-26 19:26:28 +03:00
|
|
|
files.setdefault(pf, None)
|
2008-10-19 01:45:45 +04:00
|
|
|
elif line.find('with fuzz') >= 0:
|
|
|
|
fuzz = True
|
|
|
|
if not printed_file:
|
|
|
|
ui.warn(pf + '\n')
|
|
|
|
printed_file = True
|
|
|
|
ui.warn(line + '\n')
|
|
|
|
elif line.find('saving rejects to file') >= 0:
|
|
|
|
ui.warn(line + '\n')
|
|
|
|
elif line.find('FAILED') >= 0:
|
|
|
|
if not printed_file:
|
|
|
|
ui.warn(pf + '\n')
|
|
|
|
printed_file = True
|
|
|
|
ui.warn(line + '\n')
|
|
|
|
code = fp.close()
|
|
|
|
if code:
|
|
|
|
raise PatchError(_("patch command failed: %s") %
|
2011-05-06 17:31:09 +04:00
|
|
|
util.explainexit(code)[0])
|
2008-10-19 01:45:45 +04:00
|
|
|
return fuzz
|
|
|
|
|
2009-10-31 20:02:13 +03:00
|
|
|
def internalpatch(patchobj, ui, strip, cwd, files=None, eolmode='strict'):
|
2008-10-19 01:45:45 +04:00
|
|
|
"""use builtin patch to apply <patchobj> to the working directory.
|
|
|
|
returns whether patch was applied with fuzz factor."""
|
2009-06-15 02:03:26 +04:00
|
|
|
|
2009-10-31 20:02:13 +03:00
|
|
|
if files is None:
|
|
|
|
files = {}
|
2009-06-15 02:03:26 +04:00
|
|
|
if eolmode is None:
|
|
|
|
eolmode = ui.config('patch', 'eol', 'strict')
|
2009-12-20 19:18:02 +03:00
|
|
|
if eolmode.lower() not in eolmodes:
|
2010-08-30 00:37:58 +04:00
|
|
|
raise util.Abort(_('unsupported line endings type: %s') % eolmode)
|
2009-12-20 19:18:02 +03:00
|
|
|
eolmode = eolmode.lower()
|
2009-06-19 15:47:50 +04:00
|
|
|
|
2008-10-19 01:45:45 +04:00
|
|
|
try:
|
2009-07-05 13:01:30 +04:00
|
|
|
fp = open(patchobj, 'rb')
|
2008-10-19 01:45:45 +04:00
|
|
|
except TypeError:
|
|
|
|
fp = patchobj
|
|
|
|
if cwd:
|
|
|
|
curdir = os.getcwd()
|
|
|
|
os.chdir(cwd)
|
|
|
|
try:
|
2009-12-20 19:18:02 +03:00
|
|
|
ret = applydiff(ui, fp, files, strip=strip, eolmode=eolmode)
|
2008-10-19 01:45:45 +04:00
|
|
|
finally:
|
|
|
|
if cwd:
|
|
|
|
os.chdir(curdir)
|
2010-01-03 21:47:07 +03:00
|
|
|
if fp != patchobj:
|
|
|
|
fp.close()
|
2008-10-19 01:45:45 +04:00
|
|
|
if ret < 0:
|
2010-10-10 00:13:08 +04:00
|
|
|
raise PatchError(_('patch failed to apply'))
|
2008-10-19 01:45:45 +04:00
|
|
|
return ret > 0
|
|
|
|
|
2009-10-31 20:02:13 +03:00
|
|
|
def patch(patchname, ui, strip=1, cwd=None, files=None, eolmode='strict'):
|
2009-06-15 02:03:26 +04:00
|
|
|
"""Apply <patchname> to the working directory.
|
|
|
|
|
|
|
|
'eolmode' specifies how end of lines should be handled. It can be:
|
|
|
|
- 'strict': inputs are read in binary mode, EOLs are preserved
|
|
|
|
- 'crlf': EOLs are ignored when patching and reset to CRLF
|
|
|
|
- 'lf': EOLs are ignored when patching and reset to LF
|
|
|
|
- None: get it from user settings, default to 'strict'
|
|
|
|
'eolmode' is ignored when using an external patcher program.
|
|
|
|
|
|
|
|
Returns whether patch was applied with fuzz factor.
|
|
|
|
"""
|
2008-10-19 01:45:45 +04:00
|
|
|
patcher = ui.config('ui', 'patch')
|
2009-10-31 20:02:13 +03:00
|
|
|
if files is None:
|
|
|
|
files = {}
|
2008-10-19 01:45:45 +04:00
|
|
|
try:
|
|
|
|
if patcher:
|
patch: deprecate ui.patch / external patcher feature
Why?
- Mercurial internal patcher works correctly for regular patches and git
patches, is much faster at least on Windows and is more extensible.
- In theory, the external patcher can be used to handle exotic patch formats. I
do not know any and have not heard about any such use in years.
- Most patch programs cannot handle git format patches, which makes the API
caller to decide either to ignore ui.patch by calling patch.internalpatch()
directly, or take the risk of random failures with valid inputs.
- One thing a patch program could do Mercurial patcher cannot is applying with
--reverse. Apparently several shelve like extensions try to use that,
including passing the "reverse" option to Mercurial patcher, which has been
removed mid-2009. I never heard anybody complain about that, and would prefer
reimplementing it anyway.
And from the technical perspective:
- The external patcher makes everything harder to maintain and implement. EOL
normalization is not implemented, and I would bet file renames, if supported
by the patcher, are not correctly recorded in the dirstate.
- No tests.
How?
- Remove related documentation
- Clearly mark patch.externalpatch() as private
- Remove the debuginstall check. This deprecation request was actually
triggered by this last point. debuginstall is the only piece of code patching
without a repository. When migrating to an integrated patch() + updatedir()
call, this was really a showstopper, all workarounds were either ugly or
uselessly complicated to implement. If we do not support external patcher
anymore, the debuginstall check is not useful anymore.
- Remove patch.externalpatch() after 1.9 release.
2011-03-24 12:28:29 +03:00
|
|
|
return _externalpatch(patcher, patchname, ui, strip, cwd, files)
|
2010-10-10 00:13:08 +04:00
|
|
|
return internalpatch(patchname, ui, strip, cwd, files, eolmode)
|
2008-10-19 01:45:45 +04:00
|
|
|
except PatchError, err:
|
2010-10-10 00:13:08 +04:00
|
|
|
raise util.Abort(str(err))
|
2008-10-19 01:45:45 +04:00
|
|
|
|
2011-05-06 20:03:41 +04:00
|
|
|
def changedfiles(patchpath, strip=1):
|
|
|
|
fp = open(patchpath, 'rb')
|
|
|
|
try:
|
|
|
|
changed = set()
|
|
|
|
for state, values in iterhunks(fp):
|
|
|
|
if state == 'hunk':
|
|
|
|
continue
|
|
|
|
elif state == 'file':
|
|
|
|
afile, bfile, first_hunk = values
|
|
|
|
current_file, missing = selectfile(afile, bfile,
|
|
|
|
first_hunk, strip)
|
|
|
|
changed.add(current_file)
|
|
|
|
elif state == 'git':
|
|
|
|
for gp in values:
|
|
|
|
gp.path = pathstrip(gp.path, strip - 1)[1]
|
|
|
|
changed.add(gp.path)
|
|
|
|
else:
|
|
|
|
raise util.Abort(_('unsupported parser state: %s') % state)
|
|
|
|
return changed
|
|
|
|
finally:
|
|
|
|
fp.close()
|
|
|
|
|
2007-07-31 07:49:08 +04:00
|
|
|
def b85diff(to, tn):
|
2006-10-12 20:17:16 +04:00
|
|
|
'''print base85-encoded binary diff'''
|
|
|
|
def gitindex(text):
|
|
|
|
if not text:
|
2010-09-02 14:08:13 +04:00
|
|
|
return hex(nullid)
|
2006-10-12 20:17:16 +04:00
|
|
|
l = len(text)
|
2008-04-05 00:36:40 +04:00
|
|
|
s = util.sha1('blob %d\0' % l)
|
2006-10-12 20:17:16 +04:00
|
|
|
s.update(text)
|
|
|
|
return s.hexdigest()
|
|
|
|
|
|
|
|
def fmtline(line):
|
|
|
|
l = len(line)
|
|
|
|
if l <= 26:
|
|
|
|
l = chr(ord('A') + l - 1)
|
|
|
|
else:
|
|
|
|
l = chr(l - 26 + ord('a') - 1)
|
|
|
|
return '%c%s\n' % (l, base85.b85encode(line, True))
|
|
|
|
|
|
|
|
def chunk(text, csize=52):
|
|
|
|
l = len(text)
|
|
|
|
i = 0
|
|
|
|
while i < l:
|
2010-01-25 09:05:27 +03:00
|
|
|
yield text[i:i + csize]
|
2006-10-12 20:17:16 +04:00
|
|
|
i += csize
|
|
|
|
|
2007-02-17 14:54:59 +03:00
|
|
|
tohash = gitindex(to)
|
|
|
|
tnhash = gitindex(tn)
|
|
|
|
if tohash == tnhash:
|
2007-02-17 14:55:00 +03:00
|
|
|
return ""
|
2006-10-12 20:17:16 +04:00
|
|
|
|
2007-02-17 14:55:00 +03:00
|
|
|
# TODO: deltas
|
|
|
|
ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
|
|
|
|
(tohash, tnhash, len(tn))]
|
|
|
|
for l in chunk(zlib.compress(tn)):
|
|
|
|
ret.append(fmtline(l))
|
|
|
|
ret.append('\n')
|
|
|
|
return ''.join(ret)
|
2006-10-12 20:17:16 +04:00
|
|
|
|
2010-01-01 22:54:05 +03:00
|
|
|
class GitDiffRequired(Exception):
|
|
|
|
pass
|
2008-10-22 11:29:26 +04:00
|
|
|
|
2010-03-09 21:04:18 +03:00
|
|
|
def diffopts(ui, opts=None, untrusted=False):
|
|
|
|
def get(key, name=None, getter=ui.configbool):
|
|
|
|
return ((opts and opts.get(key)) or
|
|
|
|
getter('diff', name or key, None, untrusted=untrusted))
|
|
|
|
return mdiff.diffopts(
|
|
|
|
text=opts and opts.get('text'),
|
|
|
|
git=get('git'),
|
|
|
|
nodates=get('nodates'),
|
|
|
|
showfunc=get('show_function', 'showfunc'),
|
|
|
|
ignorews=get('ignore_all_space', 'ignorews'),
|
|
|
|
ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
|
|
|
|
ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
|
|
|
|
context=get('unified', getter=ui.config))
|
|
|
|
|
2010-01-01 22:54:05 +03:00
|
|
|
def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
|
2010-09-03 14:58:51 +04:00
|
|
|
losedatafn=None, prefix=''):
|
2008-11-03 18:48:23 +03:00
|
|
|
'''yields diff of changes to files between two nodes, or node and
|
2006-08-13 03:13:27 +04:00
|
|
|
working directory.
|
|
|
|
|
|
|
|
if node1 is None, use first dirstate parent instead.
|
2010-01-01 22:54:05 +03:00
|
|
|
if node2 is None, compare node1 with working directory.
|
|
|
|
|
|
|
|
losedatafn(**kwarg) is a callable run when opts.upgrade=True and
|
|
|
|
every time some change cannot be represented with the current
|
|
|
|
patch format. Return False to upgrade to git patch format, True to
|
|
|
|
accept the loss or raise an exception to abort the diff. It is
|
|
|
|
called with the name of current file being diffed as 'fn'. If set
|
|
|
|
to None, patches will always be upgraded to git format when
|
|
|
|
necessary.
|
2010-09-03 14:58:51 +04:00
|
|
|
|
|
|
|
prefix is a filename prefix that is prepended to all filenames on
|
|
|
|
display (used for subrepos).
|
2010-01-01 22:54:05 +03:00
|
|
|
'''
|
2006-08-13 03:13:27 +04:00
|
|
|
|
|
|
|
if opts is None:
|
|
|
|
opts = mdiff.defaultopts
|
|
|
|
|
2009-11-05 17:18:56 +03:00
|
|
|
if not node1 and not node2:
|
2011-04-05 01:21:59 +04:00
|
|
|
node1 = repo.dirstate.p1()
|
2006-08-17 06:49:45 +04:00
|
|
|
|
2009-07-15 03:50:37 +04:00
|
|
|
def lrugetfilectx():
|
|
|
|
cache = {}
|
|
|
|
order = []
|
|
|
|
def getfilectx(f, ctx):
|
|
|
|
fctx = ctx.filectx(f, filelog=cache.get(f))
|
|
|
|
if f not in cache:
|
|
|
|
if len(cache) > 20:
|
|
|
|
del cache[order.pop(0)]
|
2009-10-31 20:02:34 +03:00
|
|
|
cache[f] = fctx.filelog()
|
2009-07-15 03:50:37 +04:00
|
|
|
else:
|
|
|
|
order.remove(f)
|
|
|
|
order.append(f)
|
|
|
|
return fctx
|
|
|
|
return getfilectx
|
|
|
|
getfilectx = lrugetfilectx()
|
2006-08-17 06:49:45 +04:00
|
|
|
|
2008-06-26 23:35:46 +04:00
|
|
|
ctx1 = repo[node1]
|
2008-10-13 00:21:08 +04:00
|
|
|
ctx2 = repo[node2]
|
2006-08-13 03:13:27 +04:00
|
|
|
|
|
|
|
if not changes:
|
2008-10-13 00:21:08 +04:00
|
|
|
changes = repo.status(ctx1, ctx2, match=match)
|
2008-06-27 22:43:29 +04:00
|
|
|
modified, added, removed = changes[:3]
|
2006-08-13 03:13:27 +04:00
|
|
|
|
|
|
|
if not modified and not added and not removed:
|
2010-01-01 22:54:05 +03:00
|
|
|
return []
|
2006-12-25 19:43:49 +03:00
|
|
|
|
2009-12-29 18:00:38 +03:00
|
|
|
revs = None
|
2010-01-01 22:54:05 +03:00
|
|
|
if not repo.ui.quiet:
|
2006-10-13 22:34:35 +04:00
|
|
|
hexfunc = repo.ui.debugflag and hex or short
|
2009-12-29 18:00:38 +03:00
|
|
|
revs = [hexfunc(node) for node in [node1, node2] if node]
|
2006-08-13 03:13:27 +04:00
|
|
|
|
2010-01-01 22:54:05 +03:00
|
|
|
copy = {}
|
|
|
|
if opts.git or opts.upgrade:
|
|
|
|
copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
|
2006-08-15 09:48:03 +04:00
|
|
|
|
2010-01-01 22:54:05 +03:00
|
|
|
difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
|
2010-09-03 14:58:51 +04:00
|
|
|
modified, added, removed, copy, getfilectx, opts, losedata, prefix)
|
2010-01-01 22:54:05 +03:00
|
|
|
if opts.upgrade and not opts.git:
|
|
|
|
try:
|
|
|
|
def losedata(fn):
|
|
|
|
if not losedatafn or not losedatafn(fn=fn):
|
|
|
|
raise GitDiffRequired()
|
|
|
|
# Buffer the whole output until we are sure it can be generated
|
|
|
|
return list(difffn(opts.copy(git=False), losedata))
|
|
|
|
except GitDiffRequired:
|
|
|
|
return difffn(opts.copy(git=True), None)
|
|
|
|
else:
|
|
|
|
return difffn(opts, None)
|
|
|
|
|
2010-04-03 00:22:06 +04:00
|
|
|
def difflabel(func, *args, **kw):
|
|
|
|
'''yields 2-tuples of (output, label) based on the output of func()'''
|
|
|
|
prefixes = [('diff', 'diff.diffline'),
|
|
|
|
('copy', 'diff.extended'),
|
|
|
|
('rename', 'diff.extended'),
|
|
|
|
('old', 'diff.extended'),
|
|
|
|
('new', 'diff.extended'),
|
|
|
|
('deleted', 'diff.extended'),
|
|
|
|
('---', 'diff.file_a'),
|
|
|
|
('+++', 'diff.file_b'),
|
|
|
|
('@@', 'diff.hunk'),
|
|
|
|
('-', 'diff.deleted'),
|
|
|
|
('+', 'diff.inserted')]
|
|
|
|
|
|
|
|
for chunk in func(*args, **kw):
|
|
|
|
lines = chunk.split('\n')
|
|
|
|
for i, line in enumerate(lines):
|
|
|
|
if i != 0:
|
|
|
|
yield ('\n', '')
|
|
|
|
stripline = line
|
|
|
|
if line and line[0] in '+-':
|
|
|
|
# highlight trailing whitespace, but only in changed lines
|
|
|
|
stripline = line.rstrip()
|
|
|
|
for prefix, label in prefixes:
|
|
|
|
if stripline.startswith(prefix):
|
|
|
|
yield (stripline, label)
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
yield (line, '')
|
|
|
|
if line != stripline:
|
|
|
|
yield (line[len(stripline):], 'diff.trailingwhitespace')
|
|
|
|
|
|
|
|
def diffui(*args, **kw):
|
|
|
|
'''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
|
|
|
|
return difflabel(diff, *args, **kw)
|
|
|
|
|
|
|
|
|
2010-01-01 22:54:05 +03:00
|
|
|
def _addmodehdr(header, omode, nmode):
|
|
|
|
if omode != nmode:
|
|
|
|
header.append('old mode %s\n' % omode)
|
|
|
|
header.append('new mode %s\n' % nmode)
|
|
|
|
|
|
|
|
def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
|
2010-09-03 14:58:51 +04:00
|
|
|
copy, getfilectx, opts, losedatafn, prefix):
|
|
|
|
|
|
|
|
def join(f):
|
|
|
|
return os.path.join(prefix, f)
|
2010-01-01 22:54:05 +03:00
|
|
|
|
|
|
|
date1 = util.datestr(ctx1.date())
|
|
|
|
man1 = ctx1.manifest()
|
|
|
|
|
2009-05-17 05:28:49 +04:00
|
|
|
gone = set()
|
2008-06-26 22:46:34 +04:00
|
|
|
gitmode = {'l': '120000', 'x': '100755', '': '100644'}
|
|
|
|
|
2010-02-11 16:22:57 +03:00
|
|
|
copyto = dict([(v, k) for k, v in copy.items()])
|
|
|
|
|
2010-01-01 22:54:05 +03:00
|
|
|
if opts.git:
|
|
|
|
revs = None
|
|
|
|
|
2009-04-27 01:50:44 +04:00
|
|
|
for f in sorted(modified + added + removed):
|
2006-08-13 03:13:27 +04:00
|
|
|
to = None
|
|
|
|
tn = None
|
2006-08-15 09:48:03 +04:00
|
|
|
dodiff = True
|
2006-10-11 01:48:26 +04:00
|
|
|
header = []
|
2006-12-25 19:43:49 +03:00
|
|
|
if f in man1:
|
|
|
|
to = getfilectx(f, ctx1).data()
|
2006-08-13 03:13:27 +04:00
|
|
|
if f not in removed:
|
2006-12-25 19:43:49 +03:00
|
|
|
tn = getfilectx(f, ctx2).data()
|
2007-11-01 22:17:59 +03:00
|
|
|
a, b = f, f
|
2010-01-01 22:54:05 +03:00
|
|
|
if opts.git or losedatafn:
|
2006-08-15 09:48:03 +04:00
|
|
|
if f in added:
|
2008-06-26 22:46:34 +04:00
|
|
|
mode = gitmode[ctx2.flags(f)]
|
2010-02-11 16:22:57 +03:00
|
|
|
if f in copy or f in copyto:
|
2010-01-01 22:54:05 +03:00
|
|
|
if opts.git:
|
2010-02-11 16:22:57 +03:00
|
|
|
if f in copy:
|
|
|
|
a = copy[f]
|
|
|
|
else:
|
|
|
|
a = copyto[f]
|
2010-01-01 22:54:05 +03:00
|
|
|
omode = gitmode[man1.flags(a)]
|
|
|
|
_addmodehdr(header, omode, mode)
|
|
|
|
if a in removed and a not in gone:
|
|
|
|
op = 'rename'
|
|
|
|
gone.add(a)
|
|
|
|
else:
|
|
|
|
op = 'copy'
|
2010-09-03 14:58:51 +04:00
|
|
|
header.append('%s from %s\n' % (op, join(a)))
|
|
|
|
header.append('%s to %s\n' % (op, join(f)))
|
2010-01-01 22:54:05 +03:00
|
|
|
to = getfilectx(a, ctx1).data()
|
2006-11-21 00:32:46 +03:00
|
|
|
else:
|
2010-01-01 22:54:05 +03:00
|
|
|
losedatafn(f)
|
2006-08-15 09:48:03 +04:00
|
|
|
else:
|
2010-01-01 22:54:05 +03:00
|
|
|
if opts.git:
|
|
|
|
header.append('new file mode %s\n' % mode)
|
|
|
|
elif ctx2.flags(f):
|
|
|
|
losedatafn(f)
|
2010-09-28 02:41:08 +04:00
|
|
|
# In theory, if tn was copied or renamed we should check
|
|
|
|
# if the source is binary too but the copy record already
|
|
|
|
# forces git mode.
|
2007-02-16 09:54:46 +03:00
|
|
|
if util.binary(tn):
|
2010-01-01 22:54:05 +03:00
|
|
|
if opts.git:
|
|
|
|
dodiff = 'binary'
|
|
|
|
else:
|
|
|
|
losedatafn(f)
|
|
|
|
if not opts.git and not tn:
|
|
|
|
# regular diffs cannot represent new empty file
|
|
|
|
losedatafn(f)
|
2006-08-15 09:48:03 +04:00
|
|
|
elif f in removed:
|
2010-01-01 22:54:05 +03:00
|
|
|
if opts.git:
|
|
|
|
# have we already reported a copy above?
|
2010-02-14 19:53:23 +03:00
|
|
|
if ((f in copy and copy[f] in added
|
|
|
|
and copyto[copy[f]] == f) or
|
|
|
|
(f in copyto and copyto[f] in added
|
|
|
|
and copy[copyto[f]] == f)):
|
2010-01-01 22:54:05 +03:00
|
|
|
dodiff = False
|
|
|
|
else:
|
|
|
|
header.append('deleted file mode %s\n' %
|
|
|
|
gitmode[man1.flags(f)])
|
2010-09-28 02:41:07 +04:00
|
|
|
elif not to or util.binary(to):
|
2010-01-01 22:54:05 +03:00
|
|
|
# regular diffs cannot represent empty file deletion
|
|
|
|
losedatafn(f)
|
2006-08-15 09:48:03 +04:00
|
|
|
else:
|
2010-01-01 22:54:05 +03:00
|
|
|
oflag = man1.flags(f)
|
|
|
|
nflag = ctx2.flags(f)
|
|
|
|
binary = util.binary(to) or util.binary(tn)
|
|
|
|
if opts.git:
|
|
|
|
_addmodehdr(header, gitmode[oflag], gitmode[nflag])
|
|
|
|
if binary:
|
|
|
|
dodiff = 'binary'
|
|
|
|
elif binary or nflag != oflag:
|
|
|
|
losedatafn(f)
|
|
|
|
if opts.git:
|
2010-09-03 14:58:51 +04:00
|
|
|
header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
|
2010-01-01 22:54:05 +03:00
|
|
|
|
2007-02-17 14:55:00 +03:00
|
|
|
if dodiff:
|
|
|
|
if dodiff == 'binary':
|
2007-07-31 07:49:08 +04:00
|
|
|
text = b85diff(to, tn)
|
2007-02-17 14:55:00 +03:00
|
|
|
else:
|
2007-02-20 22:55:23 +03:00
|
|
|
text = mdiff.unidiff(to, date1,
|
|
|
|
# ctx2 date may be dynamic
|
|
|
|
tn, util.datestr(ctx2.date()),
|
2010-09-03 14:58:51 +04:00
|
|
|
join(a), join(b), revs, opts=opts)
|
2008-11-03 18:48:23 +03:00
|
|
|
if header and (text or len(header) > 1):
|
|
|
|
yield ''.join(header)
|
|
|
|
if text:
|
|
|
|
yield text
|
2006-08-13 03:13:27 +04:00
|
|
|
|
2008-12-25 11:48:24 +03:00
|
|
|
def diffstatdata(lines):
|
diffstat: fix parsing of filenames with spaces
The patch changes the output of "hg diff --stat" when one file whose filename
has spaces has changed, making it get the full filename instead of just the
substring between the last space and the end of the filename.
It also changes the diffstat generated by "hg email -d" when one of the commit
messages starts with "diff". Because of the regex used to parse the filename,
the diffstat generated by "hg email -d" will still be not correct if a commit
message starts with "diff -r ".
Before the patch Mercurial has the following behavior:
$ echo "foobar">"file with spaces"
$ hg add "file with spaces"
$ hg diff --stat
spaces | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
$ hg diff --git --stat
file with spaces | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
After the patch:
$ echo "foobar">"file with spaces"
$ hg add "file with spaces"
$ hg diff --stat
file with spaces | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
$ hg diff --git --stat
file with spaces | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
Before the patch:
$ hg add mercurial/patch.py tests/tests-diffstat.t
$ hg commit -m "diffstat: fix parsing of filenames"
$ hg email -d --test tip
This patch series consists of 1 patches.
diffstat: fix parsing of filenames
[...]
filenames | 0
mercurial/patch.py | 6 ++++--
tests/test-diffstat.t | 17 +++++++++++++++++
3 files changed, 21 insertions(+), 2 deletions(-)
[...]
After the patch:
$ hg email -d --test tip
This patch series consists of 1 patches.
diffstat: fix parsing of filenames
[...]
mercurial/patch.py | 6 ++++--
tests/test-diffstat.t | 17 +++++++++++++++++
3 files changed, 21 insertions(+), 2 deletions(-)
[...]
2011-02-04 22:32:14 +03:00
|
|
|
diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
|
|
|
|
|
2009-01-13 21:28:06 +03:00
|
|
|
filename, adds, removes = None, 0, 0
|
2008-12-25 11:48:24 +03:00
|
|
|
for line in lines:
|
|
|
|
if line.startswith('diff'):
|
|
|
|
if filename:
|
2009-10-25 03:53:33 +03:00
|
|
|
isbinary = adds == 0 and removes == 0
|
|
|
|
yield (filename, adds, removes, isbinary)
|
2008-12-25 11:48:24 +03:00
|
|
|
# set numbers to 0 anyway when starting new file
|
2009-01-13 21:28:06 +03:00
|
|
|
adds, removes = 0, 0
|
2008-12-25 11:48:24 +03:00
|
|
|
if line.startswith('diff --git'):
|
|
|
|
filename = gitre.search(line).group(1)
|
diffstat: fix parsing of filenames with spaces
The patch changes the output of "hg diff --stat" when one file whose filename
has spaces has changed, making it get the full filename instead of just the
substring between the last space and the end of the filename.
It also changes the diffstat generated by "hg email -d" when one of the commit
messages starts with "diff". Because of the regex used to parse the filename,
the diffstat generated by "hg email -d" will still be not correct if a commit
message starts with "diff -r ".
Before the patch Mercurial has the following behavior:
$ echo "foobar">"file with spaces"
$ hg add "file with spaces"
$ hg diff --stat
spaces | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
$ hg diff --git --stat
file with spaces | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
After the patch:
$ echo "foobar">"file with spaces"
$ hg add "file with spaces"
$ hg diff --stat
file with spaces | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
$ hg diff --git --stat
file with spaces | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
Before the patch:
$ hg add mercurial/patch.py tests/tests-diffstat.t
$ hg commit -m "diffstat: fix parsing of filenames"
$ hg email -d --test tip
This patch series consists of 1 patches.
diffstat: fix parsing of filenames
[...]
filenames | 0
mercurial/patch.py | 6 ++++--
tests/test-diffstat.t | 17 +++++++++++++++++
3 files changed, 21 insertions(+), 2 deletions(-)
[...]
After the patch:
$ hg email -d --test tip
This patch series consists of 1 patches.
diffstat: fix parsing of filenames
[...]
mercurial/patch.py | 6 ++++--
tests/test-diffstat.t | 17 +++++++++++++++++
3 files changed, 21 insertions(+), 2 deletions(-)
[...]
2011-02-04 22:32:14 +03:00
|
|
|
elif line.startswith('diff -r'):
|
2009-06-09 17:25:17 +04:00
|
|
|
# format: "diff -r ... -r ... filename"
|
diffstat: fix parsing of filenames with spaces
The patch changes the output of "hg diff --stat" when one file whose filename
has spaces has changed, making it get the full filename instead of just the
substring between the last space and the end of the filename.
It also changes the diffstat generated by "hg email -d" when one of the commit
messages starts with "diff". Because of the regex used to parse the filename,
the diffstat generated by "hg email -d" will still be not correct if a commit
message starts with "diff -r ".
Before the patch Mercurial has the following behavior:
$ echo "foobar">"file with spaces"
$ hg add "file with spaces"
$ hg diff --stat
spaces | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
$ hg diff --git --stat
file with spaces | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
After the patch:
$ echo "foobar">"file with spaces"
$ hg add "file with spaces"
$ hg diff --stat
file with spaces | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
$ hg diff --git --stat
file with spaces | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
Before the patch:
$ hg add mercurial/patch.py tests/tests-diffstat.t
$ hg commit -m "diffstat: fix parsing of filenames"
$ hg email -d --test tip
This patch series consists of 1 patches.
diffstat: fix parsing of filenames
[...]
filenames | 0
mercurial/patch.py | 6 ++++--
tests/test-diffstat.t | 17 +++++++++++++++++
3 files changed, 21 insertions(+), 2 deletions(-)
[...]
After the patch:
$ hg email -d --test tip
This patch series consists of 1 patches.
diffstat: fix parsing of filenames
[...]
mercurial/patch.py | 6 ++++--
tests/test-diffstat.t | 17 +++++++++++++++++
3 files changed, 21 insertions(+), 2 deletions(-)
[...]
2011-02-04 22:32:14 +03:00
|
|
|
filename = diffre.search(line).group(1)
|
2008-12-25 11:48:24 +03:00
|
|
|
elif line.startswith('+') and not line.startswith('+++'):
|
|
|
|
adds += 1
|
|
|
|
elif line.startswith('-') and not line.startswith('---'):
|
|
|
|
removes += 1
|
2009-01-19 14:59:56 +03:00
|
|
|
if filename:
|
2009-10-25 03:53:33 +03:00
|
|
|
isbinary = adds == 0 and removes == 0
|
|
|
|
yield (filename, adds, removes, isbinary)
|
2008-12-25 11:48:24 +03:00
|
|
|
|
2009-10-25 03:53:33 +03:00
|
|
|
def diffstat(lines, width=80, git=False):
|
2008-12-25 11:48:24 +03:00
|
|
|
output = []
|
|
|
|
stats = list(diffstatdata(lines))
|
|
|
|
|
|
|
|
maxtotal, maxname = 0, 0
|
|
|
|
totaladds, totalremoves = 0, 0
|
2009-10-25 03:53:33 +03:00
|
|
|
hasbinary = False
|
2010-07-17 20:06:50 +04:00
|
|
|
|
|
|
|
sized = [(filename, adds, removes, isbinary, encoding.colwidth(filename))
|
|
|
|
for filename, adds, removes, isbinary in stats]
|
|
|
|
|
|
|
|
for filename, adds, removes, isbinary, namewidth in sized:
|
2008-12-25 11:48:24 +03:00
|
|
|
totaladds += adds
|
|
|
|
totalremoves += removes
|
2010-07-17 20:06:50 +04:00
|
|
|
maxname = max(maxname, namewidth)
|
2010-01-25 09:05:27 +03:00
|
|
|
maxtotal = max(maxtotal, adds + removes)
|
2009-10-25 03:53:33 +03:00
|
|
|
if isbinary:
|
|
|
|
hasbinary = True
|
2008-12-25 11:48:24 +03:00
|
|
|
|
|
|
|
countwidth = len(str(maxtotal))
|
2009-10-25 03:53:33 +03:00
|
|
|
if hasbinary and countwidth < 3:
|
|
|
|
countwidth = 3
|
2009-08-11 00:59:29 +04:00
|
|
|
graphwidth = width - countwidth - maxname - 6
|
2008-12-25 11:48:24 +03:00
|
|
|
if graphwidth < 10:
|
|
|
|
graphwidth = 10
|
|
|
|
|
2009-08-11 00:59:29 +04:00
|
|
|
def scale(i):
|
|
|
|
if maxtotal <= graphwidth:
|
|
|
|
return i
|
|
|
|
# If diffstat runs out of room it doesn't print anything,
|
|
|
|
# which isn't very useful, so always print at least one + or -
|
|
|
|
# if there were at least some changes.
|
|
|
|
return max(i * graphwidth // maxtotal, int(bool(i)))
|
2008-12-25 11:48:24 +03:00
|
|
|
|
2010-07-17 20:06:50 +04:00
|
|
|
for filename, adds, removes, isbinary, namewidth in sized:
|
2009-10-25 03:53:33 +03:00
|
|
|
if git and isbinary:
|
|
|
|
count = 'Bin'
|
|
|
|
else:
|
|
|
|
count = adds + removes
|
2009-08-11 00:59:29 +04:00
|
|
|
pluses = '+' * scale(adds)
|
|
|
|
minuses = '-' * scale(removes)
|
2010-07-17 20:06:50 +04:00
|
|
|
output.append(' %s%s | %*s %s%s\n' %
|
|
|
|
(filename, ' ' * (maxname - namewidth),
|
|
|
|
countwidth, count,
|
|
|
|
pluses, minuses))
|
2008-12-25 11:48:24 +03:00
|
|
|
|
|
|
|
if stats:
|
2009-08-11 01:02:58 +04:00
|
|
|
output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
|
2009-03-20 22:38:50 +03:00
|
|
|
% (len(stats), totaladds, totalremoves))
|
2008-12-25 11:48:24 +03:00
|
|
|
|
|
|
|
return ''.join(output)
|
2010-04-03 00:22:06 +04:00
|
|
|
|
|
|
|
def diffstatui(*args, **kw):
|
|
|
|
'''like diffstat(), but yields 2-tuples of (output, label) for
|
|
|
|
ui.write()
|
|
|
|
'''
|
|
|
|
|
|
|
|
for line in diffstat(*args, **kw).splitlines():
|
|
|
|
if line and line[-1] in '+-':
|
|
|
|
name, graph = line.rsplit(' ', 1)
|
|
|
|
yield (name + ' ', '')
|
|
|
|
m = re.search(r'\++', graph)
|
|
|
|
if m:
|
|
|
|
yield (m.group(0), 'diffstat.inserted')
|
|
|
|
m = re.search(r'-+', graph)
|
|
|
|
if m:
|
|
|
|
yield (m.group(0), 'diffstat.deleted')
|
|
|
|
else:
|
|
|
|
yield (line, '')
|
|
|
|
yield ('\n', '')
|