2008-02-04 04:29:05 +03:00
|
|
|
# Copyright (C) 2004, 2005 Canonical Ltd
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation; either version 2 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
2012-01-06 19:27:13 +04:00
|
|
|
# along with this program; if not, see <http://www.gnu.org/licenses/>.
|
2008-02-04 04:29:05 +03:00
|
|
|
|
|
|
|
# mbp: "you know that thing where cvs gives you conflict markers?"
|
|
|
|
# s: "i hate that."
|
|
|
|
|
2015-08-09 05:54:11 +03:00
|
|
|
from __future__ import absolute_import
|
|
|
|
|
|
|
|
from .i18n import _
|
|
|
|
from . import (
|
2015-10-08 22:55:45 +03:00
|
|
|
error,
|
2015-08-09 05:54:11 +03:00
|
|
|
mdiff,
|
2017-06-22 00:46:16 +03:00
|
|
|
pycompat,
|
2015-08-09 05:54:11 +03:00
|
|
|
util,
|
|
|
|
)
|
2008-02-04 04:29:05 +03:00
|
|
|
|
flake8: enable F821 check
Summary:
This check is useful and detects real errors (ex. fbconduit). Unfortunately
`arc lint` will run it with both py2 and py3 so a lot of py2 builtins will
still be warned.
I didn't find a clean way to disable py3 check. So this diff tries to fix them.
For `xrange`, the change was done by a script:
```
import sys
import redbaron
headertypes = {'comment', 'endl', 'from_import', 'import', 'string',
'assignment', 'atomtrailers'}
xrangefix = '''try:
xrange(0)
except NameError:
xrange = range
'''
def isxrange(x):
try:
return x[0].value == 'xrange'
except Exception:
return False
def main(argv):
for i, path in enumerate(argv):
print('(%d/%d) scanning %s' % (i + 1, len(argv), path))
content = open(path).read()
try:
red = redbaron.RedBaron(content)
except Exception:
print(' warning: failed to parse')
continue
hasxrange = red.find('atomtrailersnode', value=isxrange)
hasxrangefix = 'xrange = range' in content
if hasxrangefix or not hasxrange:
print(' no need to change')
continue
# find a place to insert the compatibility statement
changed = False
for node in red:
if node.type in headertypes:
continue
# node.insert_before is an easier API, but it has bugs changing
# other "finally" and "except" positions. So do the insert
# manually.
# # node.insert_before(xrangefix)
line = node.absolute_bounding_box.top_left.line - 1
lines = content.splitlines(1)
content = ''.join(lines[:line]) + xrangefix + ''.join(lines[line:])
changed = True
break
if changed:
# "content" is faster than "red.dumps()"
open(path, 'w').write(content)
print(' updated')
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
```
For other py2 builtins that do not have a py3 equivalent, some `# noqa`
were added as a workaround for now.
Reviewed By: DurhamG
Differential Revision: D6934535
fbshipit-source-id: 546b62830af144bc8b46788d2e0fd00496838939
2018-02-10 04:31:44 +03:00
|
|
|
try:
|
|
|
|
xrange(0)
|
|
|
|
except NameError:
|
|
|
|
xrange = range
|
|
|
|
|
2008-02-04 04:29:05 +03:00
|
|
|
class CantReprocessAndShowBase(Exception):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def intersect(ra, rb):
|
|
|
|
"""Given two ranges return the range where they intersect or None.
|
|
|
|
|
|
|
|
>>> intersect((0, 10), (0, 6))
|
|
|
|
(0, 6)
|
|
|
|
>>> intersect((0, 10), (5, 15))
|
|
|
|
(5, 10)
|
|
|
|
>>> intersect((0, 10), (10, 15))
|
|
|
|
>>> intersect((0, 9), (10, 15))
|
|
|
|
>>> intersect((0, 9), (7, 15))
|
|
|
|
(7, 9)
|
|
|
|
"""
|
|
|
|
assert ra[0] <= ra[1]
|
|
|
|
assert rb[0] <= rb[1]
|
|
|
|
|
|
|
|
sa = max(ra[0], rb[0])
|
|
|
|
sb = min(ra[1], rb[1])
|
|
|
|
if sa < sb:
|
|
|
|
return sa, sb
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
def compare_range(a, astart, aend, b, bstart, bend):
|
|
|
|
"""Compare a[astart:aend] == b[bstart:bend], without slicing.
|
|
|
|
"""
|
2010-01-25 09:05:27 +03:00
|
|
|
if (aend - astart) != (bend - bstart):
|
2008-02-04 04:29:05 +03:00
|
|
|
return False
|
|
|
|
for ia, ib in zip(xrange(astart, aend), xrange(bstart, bend)):
|
|
|
|
if a[ia] != b[ib]:
|
|
|
|
return False
|
|
|
|
else:
|
|
|
|
return True
|
|
|
|
|
|
|
|
class Merge3Text(object):
|
|
|
|
"""3-way merge of texts.
|
|
|
|
|
|
|
|
Given strings BASE, OTHER, THIS, tries to produce a combined text
|
|
|
|
incorporating the changes from both BASE->OTHER and BASE->THIS."""
|
|
|
|
def __init__(self, basetext, atext, btext, base=None, a=None, b=None):
|
|
|
|
self.basetext = basetext
|
|
|
|
self.atext = atext
|
|
|
|
self.btext = btext
|
|
|
|
if base is None:
|
|
|
|
base = mdiff.splitnewlines(basetext)
|
|
|
|
if a is None:
|
|
|
|
a = mdiff.splitnewlines(atext)
|
|
|
|
if b is None:
|
|
|
|
b = mdiff.splitnewlines(btext)
|
|
|
|
self.base = base
|
|
|
|
self.a = a
|
|
|
|
self.b = b
|
|
|
|
|
|
|
|
def merge_lines(self,
|
|
|
|
name_a=None,
|
|
|
|
name_b=None,
|
|
|
|
name_base=None,
|
|
|
|
start_marker='<<<<<<<',
|
|
|
|
mid_marker='=======',
|
|
|
|
end_marker='>>>>>>>',
|
2015-08-12 15:53:01 +03:00
|
|
|
base_marker=None,
|
merge: minimize conflicts when common base is not shown (issue4447)
Previously, two changes that were nearly, but not quite, identical would result
in large merge conflict regions that looked very similar, and were thus very
confusing to users, and lead people used to other source control systems to
claim that "mercurial's merge algorithms suck". In the relatively common case
of a new file being introduced in two branches with very slight modifications,
the old behavior would show the entire file as a conflict, and it would be very
difficult for a user to determine what was going on.
In the past, mercurial attempted to solve this with a "very smart" algorithm
that would find all common lines, but this has significant problems as
described in 3d22d20aa950.
Instead, we use a "very dumb" algorithm introduced in the previous patch that
simply matches lines at the periphery of conflict regions. This minimizes most
conflict regions well, though there may still be some degenerate edge cases,
like small modification to the beginning and end of a large file.
2016-02-10 20:06:08 +03:00
|
|
|
localorother=None,
|
|
|
|
minimize=False):
|
2008-02-04 04:29:05 +03:00
|
|
|
"""Return merge in cvs-like form.
|
|
|
|
"""
|
|
|
|
self.conflicts = False
|
|
|
|
newline = '\n'
|
|
|
|
if len(self.a) > 0:
|
|
|
|
if self.a[0].endswith('\r\n'):
|
|
|
|
newline = '\r\n'
|
|
|
|
elif self.a[0].endswith('\r'):
|
|
|
|
newline = '\r'
|
2015-08-16 01:00:34 +03:00
|
|
|
if name_a and start_marker:
|
2008-02-04 04:29:05 +03:00
|
|
|
start_marker = start_marker + ' ' + name_a
|
2015-08-16 01:00:34 +03:00
|
|
|
if name_b and end_marker:
|
2008-02-04 04:29:05 +03:00
|
|
|
end_marker = end_marker + ' ' + name_b
|
|
|
|
if name_base and base_marker:
|
|
|
|
base_marker = base_marker + ' ' + name_base
|
|
|
|
merge_regions = self.merge_regions()
|
merge: minimize conflicts when common base is not shown (issue4447)
Previously, two changes that were nearly, but not quite, identical would result
in large merge conflict regions that looked very similar, and were thus very
confusing to users, and lead people used to other source control systems to
claim that "mercurial's merge algorithms suck". In the relatively common case
of a new file being introduced in two branches with very slight modifications,
the old behavior would show the entire file as a conflict, and it would be very
difficult for a user to determine what was going on.
In the past, mercurial attempted to solve this with a "very smart" algorithm
that would find all common lines, but this has significant problems as
described in 3d22d20aa950.
Instead, we use a "very dumb" algorithm introduced in the previous patch that
simply matches lines at the periphery of conflict regions. This minimizes most
conflict regions well, though there may still be some degenerate edge cases,
like small modification to the beginning and end of a large file.
2016-02-10 20:06:08 +03:00
|
|
|
if minimize:
|
|
|
|
merge_regions = self.minimize(merge_regions)
|
2008-02-04 04:29:05 +03:00
|
|
|
for t in merge_regions:
|
|
|
|
what = t[0]
|
|
|
|
if what == 'unchanged':
|
|
|
|
for i in range(t[1], t[2]):
|
|
|
|
yield self.base[i]
|
2010-09-24 21:46:54 +04:00
|
|
|
elif what == 'a' or what == 'same':
|
2008-02-04 04:29:05 +03:00
|
|
|
for i in range(t[1], t[2]):
|
|
|
|
yield self.a[i]
|
|
|
|
elif what == 'b':
|
|
|
|
for i in range(t[1], t[2]):
|
|
|
|
yield self.b[i]
|
|
|
|
elif what == 'conflict':
|
2015-08-12 15:53:01 +03:00
|
|
|
if localorother == 'local':
|
|
|
|
for i in range(t[3], t[4]):
|
|
|
|
yield self.a[i]
|
|
|
|
elif localorother == 'other':
|
|
|
|
for i in range(t[5], t[6]):
|
|
|
|
yield self.b[i]
|
|
|
|
else:
|
|
|
|
self.conflicts = True
|
|
|
|
if start_marker is not None:
|
|
|
|
yield start_marker + newline
|
|
|
|
for i in range(t[3], t[4]):
|
|
|
|
yield self.a[i]
|
|
|
|
if base_marker is not None:
|
|
|
|
yield base_marker + newline
|
|
|
|
for i in range(t[1], t[2]):
|
|
|
|
yield self.base[i]
|
|
|
|
if mid_marker is not None:
|
|
|
|
yield mid_marker + newline
|
|
|
|
for i in range(t[5], t[6]):
|
|
|
|
yield self.b[i]
|
|
|
|
if end_marker is not None:
|
|
|
|
yield end_marker + newline
|
2008-02-04 04:29:05 +03:00
|
|
|
else:
|
|
|
|
raise ValueError(what)
|
|
|
|
|
|
|
|
def merge_groups(self):
|
|
|
|
"""Yield sequence of line groups. Each one is a tuple:
|
|
|
|
|
|
|
|
'unchanged', lines
|
|
|
|
Lines unchanged from base
|
|
|
|
|
|
|
|
'a', lines
|
|
|
|
Lines taken from a
|
|
|
|
|
|
|
|
'same', lines
|
|
|
|
Lines taken from a (and equal to b)
|
|
|
|
|
|
|
|
'b', lines
|
|
|
|
Lines taken from b
|
|
|
|
|
|
|
|
'conflict', base_lines, a_lines, b_lines
|
|
|
|
Lines from base were changed to either a or b and conflict.
|
|
|
|
"""
|
|
|
|
for t in self.merge_regions():
|
|
|
|
what = t[0]
|
|
|
|
if what == 'unchanged':
|
|
|
|
yield what, self.base[t[1]:t[2]]
|
2010-09-24 21:46:54 +04:00
|
|
|
elif what == 'a' or what == 'same':
|
2008-02-04 04:29:05 +03:00
|
|
|
yield what, self.a[t[1]:t[2]]
|
|
|
|
elif what == 'b':
|
|
|
|
yield what, self.b[t[1]:t[2]]
|
|
|
|
elif what == 'conflict':
|
|
|
|
yield (what,
|
|
|
|
self.base[t[1]:t[2]],
|
|
|
|
self.a[t[3]:t[4]],
|
|
|
|
self.b[t[5]:t[6]])
|
|
|
|
else:
|
|
|
|
raise ValueError(what)
|
|
|
|
|
|
|
|
def merge_regions(self):
|
|
|
|
"""Return sequences of matching and conflicting regions.
|
|
|
|
|
|
|
|
This returns tuples, where the first value says what kind we
|
|
|
|
have:
|
|
|
|
|
|
|
|
'unchanged', start, end
|
|
|
|
Take a region of base[start:end]
|
|
|
|
|
|
|
|
'same', astart, aend
|
|
|
|
b and a are different from base but give the same result
|
|
|
|
|
|
|
|
'a', start, end
|
|
|
|
Non-clashing insertion from a[start:end]
|
|
|
|
|
2016-02-10 02:25:09 +03:00
|
|
|
'conflict', zstart, zend, astart, aend, bstart, bend
|
|
|
|
Conflict between a and b, with z as common ancestor
|
|
|
|
|
2008-02-04 04:29:05 +03:00
|
|
|
Method is as follows:
|
|
|
|
|
|
|
|
The two sequences align only on regions which match the base
|
2011-06-08 02:02:54 +04:00
|
|
|
and both descendants. These are found by doing a two-way diff
|
2008-02-04 04:29:05 +03:00
|
|
|
of each one against the base, and then finding the
|
|
|
|
intersections between those regions. These "sync regions"
|
|
|
|
are by definition unchanged in both and easily dealt with.
|
|
|
|
|
|
|
|
The regions in between can be in any of three cases:
|
|
|
|
conflicted, or changed on only one side.
|
|
|
|
"""
|
|
|
|
|
|
|
|
# section a[0:ia] has been disposed of, etc
|
|
|
|
iz = ia = ib = 0
|
|
|
|
|
2012-05-12 17:54:54 +04:00
|
|
|
for region in self.find_sync_regions():
|
|
|
|
zmatch, zend, amatch, aend, bmatch, bend = region
|
2008-02-04 04:29:05 +03:00
|
|
|
#print 'match base [%d:%d]' % (zmatch, zend)
|
|
|
|
|
|
|
|
matchlen = zend - zmatch
|
|
|
|
assert matchlen >= 0
|
|
|
|
assert matchlen == (aend - amatch)
|
|
|
|
assert matchlen == (bend - bmatch)
|
|
|
|
|
|
|
|
len_a = amatch - ia
|
|
|
|
len_b = bmatch - ib
|
|
|
|
len_base = zmatch - iz
|
|
|
|
assert len_a >= 0
|
|
|
|
assert len_b >= 0
|
|
|
|
assert len_base >= 0
|
|
|
|
|
|
|
|
#print 'unmatched a=%d, b=%d' % (len_a, len_b)
|
|
|
|
|
|
|
|
if len_a or len_b:
|
|
|
|
# try to avoid actually slicing the lists
|
|
|
|
equal_a = compare_range(self.a, ia, amatch,
|
|
|
|
self.base, iz, zmatch)
|
|
|
|
equal_b = compare_range(self.b, ib, bmatch,
|
|
|
|
self.base, iz, zmatch)
|
|
|
|
same = compare_range(self.a, ia, amatch,
|
|
|
|
self.b, ib, bmatch)
|
|
|
|
|
|
|
|
if same:
|
|
|
|
yield 'same', ia, amatch
|
|
|
|
elif equal_a and not equal_b:
|
|
|
|
yield 'b', ib, bmatch
|
|
|
|
elif equal_b and not equal_a:
|
|
|
|
yield 'a', ia, amatch
|
|
|
|
elif not equal_a and not equal_b:
|
|
|
|
yield 'conflict', iz, zmatch, ia, amatch, ib, bmatch
|
|
|
|
else:
|
|
|
|
raise AssertionError("can't handle a=b=base but unmatched")
|
|
|
|
|
|
|
|
ia = amatch
|
|
|
|
ib = bmatch
|
|
|
|
iz = zmatch
|
|
|
|
|
|
|
|
# if the same part of the base was deleted on both sides
|
|
|
|
# that's OK, we can just skip it.
|
|
|
|
|
|
|
|
if matchlen > 0:
|
|
|
|
assert ia == amatch
|
|
|
|
assert ib == bmatch
|
|
|
|
assert iz == zmatch
|
|
|
|
|
|
|
|
yield 'unchanged', zmatch, zend
|
|
|
|
iz = zend
|
|
|
|
ia = aend
|
|
|
|
ib = bend
|
|
|
|
|
2016-02-10 19:25:03 +03:00
|
|
|
def minimize(self, merge_regions):
|
|
|
|
"""Trim conflict regions of lines where A and B sides match.
|
|
|
|
|
2016-10-18 00:16:55 +03:00
|
|
|
Lines where both A and B have made the same changes at the beginning
|
2016-02-10 19:25:03 +03:00
|
|
|
or the end of each merge region are eliminated from the conflict
|
|
|
|
region and are instead considered the same.
|
|
|
|
"""
|
|
|
|
for region in merge_regions:
|
|
|
|
if region[0] != "conflict":
|
|
|
|
yield region
|
|
|
|
continue
|
|
|
|
issue, z1, z2, a1, a2, b1, b2 = region
|
|
|
|
alen = a2 - a1
|
|
|
|
blen = b2 - b1
|
|
|
|
|
|
|
|
# find matches at the front
|
|
|
|
ii = 0
|
|
|
|
while ii < alen and ii < blen and \
|
|
|
|
self.a[a1 + ii] == self.b[b1 + ii]:
|
|
|
|
ii += 1
|
|
|
|
startmatches = ii
|
|
|
|
|
|
|
|
# find matches at the end
|
|
|
|
ii = 0
|
|
|
|
while ii < alen and ii < blen and \
|
|
|
|
self.a[a2 - ii - 1] == self.b[b2 - ii - 1]:
|
|
|
|
ii += 1
|
|
|
|
endmatches = ii
|
|
|
|
|
|
|
|
if startmatches > 0:
|
|
|
|
yield 'same', a1, a1 + startmatches
|
|
|
|
|
|
|
|
yield ('conflict', z1, z2,
|
|
|
|
a1 + startmatches, a2 - endmatches,
|
|
|
|
b1 + startmatches, b2 - endmatches)
|
|
|
|
|
|
|
|
if endmatches > 0:
|
|
|
|
yield 'same', a2 - endmatches, a2
|
|
|
|
|
2008-02-04 04:29:05 +03:00
|
|
|
def find_sync_regions(self):
|
2011-06-08 02:02:54 +04:00
|
|
|
"""Return a list of sync regions, where both descendants match the base.
|
2008-02-04 04:29:05 +03:00
|
|
|
|
|
|
|
Generates a list of (base1, base2, a1, a2, b1, b2). There is
|
|
|
|
always a zero-length sync region at the end of all the files.
|
|
|
|
"""
|
|
|
|
|
|
|
|
ia = ib = 0
|
|
|
|
amatches = mdiff.get_matching_blocks(self.basetext, self.atext)
|
|
|
|
bmatches = mdiff.get_matching_blocks(self.basetext, self.btext)
|
|
|
|
len_a = len(amatches)
|
|
|
|
len_b = len(bmatches)
|
|
|
|
|
|
|
|
sl = []
|
|
|
|
|
|
|
|
while ia < len_a and ib < len_b:
|
|
|
|
abase, amatch, alen = amatches[ia]
|
|
|
|
bbase, bmatch, blen = bmatches[ib]
|
|
|
|
|
|
|
|
# there is an unconflicted block at i; how long does it
|
|
|
|
# extend? until whichever one ends earlier.
|
2010-01-25 09:05:27 +03:00
|
|
|
i = intersect((abase, abase + alen), (bbase, bbase + blen))
|
2008-02-04 04:29:05 +03:00
|
|
|
if i:
|
|
|
|
intbase = i[0]
|
|
|
|
intend = i[1]
|
|
|
|
intlen = intend - intbase
|
|
|
|
|
|
|
|
# found a match of base[i[0], i[1]]; this may be less than
|
|
|
|
# the region that matches in either one
|
|
|
|
assert intlen <= alen
|
|
|
|
assert intlen <= blen
|
|
|
|
assert abase <= intbase
|
|
|
|
assert bbase <= intbase
|
|
|
|
|
|
|
|
asub = amatch + (intbase - abase)
|
|
|
|
bsub = bmatch + (intbase - bbase)
|
|
|
|
aend = asub + intlen
|
|
|
|
bend = bsub + intlen
|
|
|
|
|
|
|
|
assert self.base[intbase:intend] == self.a[asub:aend], \
|
|
|
|
(self.base[intbase:intend], self.a[asub:aend])
|
|
|
|
|
|
|
|
assert self.base[intbase:intend] == self.b[bsub:bend]
|
|
|
|
|
|
|
|
sl.append((intbase, intend,
|
|
|
|
asub, aend,
|
|
|
|
bsub, bend))
|
|
|
|
|
|
|
|
# advance whichever one ends first in the base text
|
|
|
|
if (abase + alen) < (bbase + blen):
|
|
|
|
ia += 1
|
|
|
|
else:
|
|
|
|
ib += 1
|
|
|
|
|
|
|
|
intbase = len(self.base)
|
|
|
|
abase = len(self.a)
|
|
|
|
bbase = len(self.b)
|
|
|
|
sl.append((intbase, intbase, abase, abase, bbase, bbase))
|
|
|
|
|
|
|
|
return sl
|
|
|
|
|
|
|
|
def find_unconflicted(self):
|
|
|
|
"""Return a list of ranges in base that are not conflicted."""
|
|
|
|
am = mdiff.get_matching_blocks(self.basetext, self.atext)
|
|
|
|
bm = mdiff.get_matching_blocks(self.basetext, self.btext)
|
|
|
|
|
|
|
|
unc = []
|
|
|
|
|
|
|
|
while am and bm:
|
|
|
|
# there is an unconflicted block at i; how long does it
|
|
|
|
# extend? until whichever one ends earlier.
|
|
|
|
a1 = am[0][0]
|
|
|
|
a2 = a1 + am[0][2]
|
|
|
|
b1 = bm[0][0]
|
|
|
|
b2 = b1 + bm[0][2]
|
|
|
|
i = intersect((a1, a2), (b1, b2))
|
|
|
|
if i:
|
|
|
|
unc.append(i)
|
|
|
|
|
|
|
|
if a2 < b2:
|
|
|
|
del am[0]
|
|
|
|
else:
|
|
|
|
del bm[0]
|
|
|
|
|
|
|
|
return unc
|
|
|
|
|
2017-08-14 06:06:52 +03:00
|
|
|
def _verifytext(text, path, ui, opts):
|
|
|
|
"""verifies that text is non-binary (unless opts[text] is passed,
|
|
|
|
then we just warn)"""
|
|
|
|
if util.binary(text):
|
|
|
|
msg = _("%s looks like a binary file.") % path
|
|
|
|
if not opts.get('quiet'):
|
|
|
|
ui.warn(_('warning: %s\n') % msg)
|
|
|
|
if not opts.get('text'):
|
|
|
|
raise error.Abort(msg)
|
|
|
|
return text
|
|
|
|
|
2017-08-14 08:46:16 +03:00
|
|
|
def _picklabels(defaults, overrides):
|
|
|
|
if len(overrides) > 3:
|
|
|
|
raise error.Abort(_("can only specify three labels."))
|
2017-08-25 23:49:17 +03:00
|
|
|
result = defaults[:]
|
|
|
|
for i, override in enumerate(overrides):
|
|
|
|
result[i] = override
|
|
|
|
return result
|
2017-08-14 08:46:16 +03:00
|
|
|
|
2017-09-01 20:35:43 +03:00
|
|
|
def simplemerge(ui, localctx, basectx, otherctx, **opts):
|
2017-08-14 06:06:52 +03:00
|
|
|
"""Performs the simplemerge algorithm.
|
|
|
|
|
2017-08-25 07:30:51 +03:00
|
|
|
The merged result is written into `localctx`.
|
2017-08-25 07:30:37 +03:00
|
|
|
"""
|
2017-12-10 02:18:24 +03:00
|
|
|
opts = pycompat.byteskwargs(opts)
|
|
|
|
|
2017-08-14 06:06:52 +03:00
|
|
|
def readctx(ctx):
|
2017-08-25 07:26:40 +03:00
|
|
|
# Merges were always run in the working copy before, which means
|
|
|
|
# they used decoded data, if the user defined any repository
|
|
|
|
# filters.
|
|
|
|
#
|
|
|
|
# Maintain that behavior today for BC, though perhaps in the future
|
|
|
|
# it'd be worth considering whether merging encoded data (what the
|
|
|
|
# repository usually sees) might be more useful.
|
|
|
|
return _verifytext(ctx.decodeddata(), ctx.path(), ui, opts)
|
2017-08-14 06:06:52 +03:00
|
|
|
|
2015-08-16 01:00:34 +03:00
|
|
|
mode = opts.get('mode','merge')
|
2017-08-14 08:46:16 +03:00
|
|
|
name_a, name_b, name_base = None, None, None
|
|
|
|
if mode != 'union':
|
2017-08-25 07:30:37 +03:00
|
|
|
name_a, name_b, name_base = _picklabels([localctx.path(),
|
|
|
|
otherctx.path(), None],
|
2017-08-14 08:46:16 +03:00
|
|
|
opts.get('label', []))
|
2008-02-04 04:29:05 +03:00
|
|
|
|
2011-05-14 04:46:36 +04:00
|
|
|
try:
|
2017-08-25 07:30:37 +03:00
|
|
|
localtext = readctx(localctx)
|
|
|
|
basetext = readctx(basectx)
|
|
|
|
othertext = readctx(otherctx)
|
2015-10-08 22:55:45 +03:00
|
|
|
except error.Abort:
|
2011-05-14 04:46:36 +04:00
|
|
|
return 1
|
2008-02-04 04:29:05 +03:00
|
|
|
|
|
|
|
m3 = Merge3Text(basetext, localtext, othertext)
|
merge: minimize conflicts when common base is not shown (issue4447)
Previously, two changes that were nearly, but not quite, identical would result
in large merge conflict regions that looked very similar, and were thus very
confusing to users, and lead people used to other source control systems to
claim that "mercurial's merge algorithms suck". In the relatively common case
of a new file being introduced in two branches with very slight modifications,
the old behavior would show the entire file as a conflict, and it would be very
difficult for a user to determine what was going on.
In the past, mercurial attempted to solve this with a "very smart" algorithm
that would find all common lines, but this has significant problems as
described in 3d22d20aa950.
Instead, we use a "very dumb" algorithm introduced in the previous patch that
simply matches lines at the periphery of conflict regions. This minimizes most
conflict regions well, though there may still be some degenerate edge cases,
like small modification to the beginning and end of a large file.
2016-02-10 20:06:08 +03:00
|
|
|
extrakwargs = {
|
|
|
|
"localorother": opts.get("localorother", None),
|
|
|
|
'minimize': True,
|
|
|
|
}
|
2015-08-16 01:00:34 +03:00
|
|
|
if mode == 'union':
|
|
|
|
extrakwargs['start_marker'] = None
|
|
|
|
extrakwargs['mid_marker'] = None
|
|
|
|
extrakwargs['end_marker'] = None
|
|
|
|
elif name_base is not None:
|
2014-08-06 02:10:50 +04:00
|
|
|
extrakwargs['base_marker'] = '|||||||'
|
|
|
|
extrakwargs['name_base'] = name_base
|
merge: minimize conflicts when common base is not shown (issue4447)
Previously, two changes that were nearly, but not quite, identical would result
in large merge conflict regions that looked very similar, and were thus very
confusing to users, and lead people used to other source control systems to
claim that "mercurial's merge algorithms suck". In the relatively common case
of a new file being introduced in two branches with very slight modifications,
the old behavior would show the entire file as a conflict, and it would be very
difficult for a user to determine what was going on.
In the past, mercurial attempted to solve this with a "very smart" algorithm
that would find all common lines, but this has significant problems as
described in 3d22d20aa950.
Instead, we use a "very dumb" algorithm introduced in the previous patch that
simply matches lines at the periphery of conflict regions. This minimizes most
conflict regions well, though there may still be some degenerate edge cases,
like small modification to the beginning and end of a large file.
2016-02-10 20:06:08 +03:00
|
|
|
extrakwargs['minimize'] = False
|
2017-08-25 07:30:51 +03:00
|
|
|
|
|
|
|
mergedtext = ""
|
2017-06-22 00:46:16 +03:00
|
|
|
for line in m3.merge_lines(name_a=name_a, name_b=name_b,
|
|
|
|
**pycompat.strkwargs(extrakwargs)):
|
2017-08-25 07:30:51 +03:00
|
|
|
if opts.get('print'):
|
|
|
|
ui.fout.write(line)
|
|
|
|
else:
|
|
|
|
mergedtext += line
|
2008-02-04 04:29:05 +03:00
|
|
|
|
|
|
|
if not opts.get('print'):
|
2017-08-25 07:30:51 +03:00
|
|
|
localctx.write(mergedtext, localctx.flags())
|
2008-02-04 04:29:05 +03:00
|
|
|
|
2015-08-16 01:00:34 +03:00
|
|
|
if m3.conflicts and not mode == 'union':
|
2008-02-04 04:29:05 +03:00
|
|
|
return 1
|