sapling/mercurial/filemerge.py

848 lines
30 KiB
Python
Raw Normal View History

# filemerge.py - file-level merge handling for Mercurial
#
# Copyright 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
2010-01-20 07:20:08 +03:00
# GNU General Public License version 2 or any later version.
2015-08-09 05:15:09 +03:00
from __future__ import absolute_import
import os
import re
import tempfile
from .i18n import _
from .node import nullid, short
2015-08-09 05:15:09 +03:00
from . import (
encoding,
2015-08-09 05:15:09 +03:00
error,
formatter,
2015-08-09 05:15:09 +03:00
match,
pycompat,
registrar,
scmutil,
2015-08-09 05:15:09 +03:00
simplemerge,
tagmerge,
templatekw,
templater,
util,
)
def _toolstr(ui, tool, part, *args):
return ui.config("merge-tools", tool + "." + part, *args)
def _toolbool(ui, tool, part,*args):
return ui.configbool("merge-tools", tool + "." + part, *args)
def _toollist(ui, tool, part):
return ui.configlist("merge-tools", tool + "." + part)
internals = {}
# Merge tools to document.
internalsdoc = {}
internaltool = registrar.internalmerge()
# internal tool merge types
nomerge = internaltool.nomerge
mergeonly = internaltool.mergeonly # just the full merge, no premerge
fullmerge = internaltool.fullmerge # both premerge and merge
_localchangedotherdeletedmsg = _(
"local%(l)s changed %(fd)s which other%(o)s deleted\n"
"use (c)hanged version, (d)elete, or leave (u)nresolved?"
"$$ &Changed $$ &Delete $$ &Unresolved")
_otherchangedlocaldeletedmsg = _(
"other%(o)s changed %(fd)s which local%(l)s deleted\n"
"use (c)hanged version, leave (d)eleted, or "
"leave (u)nresolved?"
"$$ &Changed $$ &Deleted $$ &Unresolved")
class absentfilectx(object):
"""Represents a file that's ostensibly in a context but is actually not
present in it.
This is here because it's very specific to the filemerge code for now --
other code is likely going to break with the values this returns."""
def __init__(self, ctx, f):
self._ctx = ctx
self._f = f
def path(self):
return self._f
def size(self):
return None
def data(self):
return None
def filenode(self):
return nullid
_customcmp = True
def cmp(self, fctx):
"""compare with other file context
returns True if different from fctx.
"""
return not (fctx.isabsent() and
fctx.ctx() == self.ctx() and
fctx.path() == self.path())
def flags(self):
return ''
def changectx(self):
return self._ctx
def isbinary(self):
return False
def isabsent(self):
return True
def _findtool(ui, tool):
if tool in internals:
return tool
return findexternaltool(ui, tool)
def findexternaltool(ui, tool):
for kn in ("regkey", "regkeyalt"):
k = _toolstr(ui, tool, kn)
if not k:
continue
2011-05-06 17:16:22 +04:00
p = util.lookupreg(k, _toolstr(ui, tool, "regname"))
if p:
p = util.findexe(p + _toolstr(ui, tool, "regappend", ""))
if p:
return p
exe = _toolstr(ui, tool, "executable", tool)
return util.findexe(util.expandpath(exe))
def _picktool(repo, ui, path, binary, symlink, changedelete):
def supportscd(tool):
return tool in internals and internals[tool].mergetype == nomerge
def check(tool, pat, symlink, binary, changedelete):
tmsg = tool
if pat:
tmsg = _("%s (for pattern %s)") % (tool, pat)
if not _findtool(ui, tool):
if pat: # explicitly requested tool deserves a warning
ui.warn(_("couldn't find merge tool %s\n") % tmsg)
else: # configured but non-existing tools are more silent
ui.note(_("couldn't find merge tool %s\n") % tmsg)
elif symlink and not _toolbool(ui, tool, "symlink"):
ui.warn(_("tool %s can't handle symlinks\n") % tmsg)
elif binary and not _toolbool(ui, tool, "binary"):
ui.warn(_("tool %s can't handle binary\n") % tmsg)
elif changedelete and not supportscd(tool):
# the nomerge tools are the only tools that support change/delete
# conflicts
pass
elif not util.gui() and _toolbool(ui, tool, "gui"):
ui.warn(_("tool %s requires a GUI\n") % tmsg)
else:
return True
return False
# internal config: ui.forcemerge
# forcemerge comes from command line arguments, highest priority
force = ui.config('ui', 'forcemerge')
if force:
toolpath = _findtool(ui, force)
if changedelete and not supportscd(toolpath):
return ":prompt", None
else:
if toolpath:
return (force, util.shellquote(toolpath))
else:
# mimic HGMERGE if given tool not found
return (force, force)
# HGMERGE takes next precedence
hgmerge = encoding.environ.get("HGMERGE")
if hgmerge:
if changedelete and not supportscd(hgmerge):
return ":prompt", None
else:
return (hgmerge, hgmerge)
# then patterns
2008-02-04 18:56:01 +03:00
for pat, tool in ui.configitems("merge-patterns"):
2009-05-24 11:56:14 +04:00
mf = match.match(repo.root, '', [pat])
if mf(path) and check(tool, pat, symlink, False, changedelete):
2010-02-05 20:50:08 +03:00
toolpath = _findtool(ui, tool)
return (tool, util.shellquote(toolpath))
# then merge tools
tools = {}
disabled = set()
2010-01-25 09:05:27 +03:00
for k, v in ui.configitems("merge-tools"):
t = k.split('.')[0]
if t not in tools:
tools[t] = int(_toolstr(ui, t, "priority"))
if _toolbool(ui, t, "disabled"):
disabled.add(t)
names = tools.keys()
tools = sorted([(-p, tool) for tool, p in tools.items()
if tool not in disabled])
uimerge = ui.config("ui", "merge")
if uimerge:
# external tools defined in uimerge won't be able to handle
# change/delete conflicts
if uimerge not in names and not changedelete:
return (uimerge, uimerge)
tools.insert(0, (None, uimerge)) # highest priority
tools.append((None, "hgmerge")) # the old default, if found
2010-01-25 09:05:27 +03:00
for p, t in tools:
if check(t, None, symlink, binary, changedelete):
toolpath = _findtool(ui, t)
return (t, util.shellquote(toolpath))
# internal merge or prompt as last resort
if symlink or binary or changedelete:
if not changedelete and len(tools):
# any tool is rejected by capability for symlink or binary
ui.warn(_("no tool found to merge %s\n") % path)
return ":prompt", None
return ":merge", None
def _eoltype(data):
"Guess the EOL type of a file"
if '\0' in data: # binary
return None
if '\r\n' in data: # Windows
return '\r\n'
if '\r' in data: # Old Mac
return '\r'
if '\n' in data: # UNIX
return '\n'
return None # unknown
def _matcheol(file, back):
"Convert EOL markers in a file to match origfile"
tostyle = _eoltype(back.data()) # No repo.wread filters?
if tostyle:
data = util.readfile(file)
style = _eoltype(data)
if style:
newdata = data.replace(style, tostyle)
if newdata != data:
util.writefile(file, newdata)
@internaltool('prompt', nomerge)
def _iprompt(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
"""Asks the user which of the local `p1()` or the other `p2()` version to
keep as the merged version."""
ui = repo.ui
fd = fcd.path()
# Avoid prompting during an in-memory merge since it doesn't support merge
# conflicts.
if fcd.changectx().isinmemory():
raise error.InMemoryMergeConflictsError('in-memory merge does not '
'support file conflicts')
prompts = partextras(labels)
prompts['fd'] = fd
try:
if fco.isabsent():
index = ui.promptchoice(
_localchangedotherdeletedmsg % prompts, 2)
choice = ['local', 'other', 'unresolved'][index]
elif fcd.isabsent():
index = ui.promptchoice(
_otherchangedlocaldeletedmsg % prompts, 2)
choice = ['other', 'local', 'unresolved'][index]
else:
index = ui.promptchoice(
_("keep (l)ocal%(l)s, take (o)ther%(o)s, or leave (u)nresolved"
" for %(fd)s?"
"$$ &Local $$ &Other $$ &Unresolved") % prompts, 2)
choice = ['local', 'other', 'unresolved'][index]
if choice == 'other':
return _iother(repo, mynode, orig, fcd, fco, fca, toolconf,
labels)
elif choice == 'local':
return _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf,
labels)
elif choice == 'unresolved':
return _ifail(repo, mynode, orig, fcd, fco, fca, toolconf,
labels)
except error.ResponseExpected:
ui.write("\n")
return _ifail(repo, mynode, orig, fcd, fco, fca, toolconf,
labels)
@internaltool('local', nomerge)
def _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
"""Uses the local `p1()` version of files as the merged version."""
return 0, fcd.isabsent()
@internaltool('other', nomerge)
def _iother(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
"""Uses the other `p2()` version of files as the merged version."""
if fco.isabsent():
# local changed, remote deleted -- 'deleted' picked
_underlyingfctxifabsent(fcd).remove()
deleted = True
else:
_underlyingfctxifabsent(fcd).write(fco.data(), fco.flags())
deleted = False
return 0, deleted
@internaltool('fail', nomerge)
def _ifail(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
"""
Rather than attempting to merge files that were modified on both
branches, it marks them as unresolved. The resolve command must be
used to resolve these conflicts."""
# for change/delete conflicts write out the changed version, then fail
if fcd.isabsent():
_underlyingfctxifabsent(fcd).write(fco.data(), fco.flags())
return 1, False
def _underlyingfctxifabsent(filectx):
"""Sometimes when resolving, our fcd is actually an absentfilectx, but
we want to write to it (to do the resolve). This helper returns the
underyling workingfilectx in that case.
"""
if filectx.isabsent():
return filectx.changectx()[filectx.path()]
else:
return filectx
def _premerge(repo, fcd, fco, fca, toolconf, files, labels=None):
tool, toolpath, binary, symlink = toolconf
if symlink or fcd.isabsent() or fco.isabsent():
return 1
unused, unused, unused, back = files
ui = repo.ui
validkeep = ['keep', 'keep-merge3']
# do we attempt to simplemerge first?
try:
premerge = _toolbool(ui, tool, "premerge", not binary)
except error.ConfigError:
premerge = _toolstr(ui, tool, "premerge", "").lower()
if premerge not in validkeep:
_valid = ', '.join(["'" + v + "'" for v in validkeep])
raise error.ConfigError(_("%s.premerge not valid "
"('%s' is neither boolean nor %s)") %
(tool, premerge, _valid))
if premerge:
if premerge == 'keep-merge3':
if not labels:
labels = _defaultconflictlabels
if len(labels) < 3:
labels.append('base')
r = simplemerge.simplemerge(ui, fcd, fca, fco, quiet=True, label=labels)
if not r:
ui.debug(" premerge successful\n")
return 0
if premerge not in validkeep:
# restore from backup and try again
_restorebackup(fcd, back)
return 1 # continue merging
def _mergecheck(repo, mynode, orig, fcd, fco, fca, toolconf):
tool, toolpath, binary, symlink = toolconf
if symlink:
repo.ui.warn(_('warning: internal %s cannot merge symlinks '
'for %s\n') % (tool, fcd.path()))
return False
if fcd.isabsent() or fco.isabsent():
repo.ui.warn(_('warning: internal %s cannot merge change/delete '
'conflict for %s\n') % (tool, fcd.path()))
return False
return True
def _merge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels, mode):
"""
Uses the internal non-interactive simple merge algorithm for merging
files. It will fail if there are any conflicts and leave markers in
the partially merged file. Markers will have two sections, one for each side
of merge, unless mode equals 'union' which suppresses the markers."""
ui = repo.ui
r = simplemerge.simplemerge(ui, fcd, fca, fco, label=labels, mode=mode)
return True, r, False
@internaltool('union', fullmerge,
_("warning: conflicts while merging %s! "
"(edit, then use 'hg resolve --mark')\n"),
precheck=_mergecheck)
def _iunion(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
"""
Uses the internal non-interactive simple merge algorithm for merging
files. It will use both left and right sides for conflict regions.
No markers are inserted."""
return _merge(repo, mynode, orig, fcd, fco, fca, toolconf,
files, labels, 'union')
@internaltool('merge', fullmerge,
_("warning: conflicts while merging %s! "
"(edit, then use 'hg resolve --mark')\n"),
precheck=_mergecheck)
def _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
"""
Uses the internal non-interactive simple merge algorithm for merging
files. It will fail if there are any conflicts and leave markers in
the partially merged file. Markers will have two sections, one for each side
of merge."""
return _merge(repo, mynode, orig, fcd, fco, fca, toolconf,
files, labels, 'merge')
@internaltool('merge3', fullmerge,
_("warning: conflicts while merging %s! "
"(edit, then use 'hg resolve --mark')\n"),
precheck=_mergecheck)
merge: add an internal:merge3 tool This variant gives access to a feature already present in ``internal:merge``: displaying merge base content. In the basic merge (calling ``hg merge``) case, including more context to the merge markers is an interesting addition. But this extra information is the only viable option in case conflict from grafting (, rebase, etc…). When grafting ``source`` on ``destination``, the parent of ``source`` is used as the ``base``. When all three changesets add content in the same location, the marker for ``source`` will contains both ``base`` and ``source`` content. Without the content of base exposed, there is no way for the user to discriminate content coming from ``base`` and content commit from ``source``. Practical example (all addition are in the same place): * ``destination`` adds ``Dest-Content`` * ``base`` adds ``Base-Content`` * ``source`` adds ``Src-Content`` Grafting ``source`` on ``destination`` will produce the following conflict: <<<<<<< destination Dest-Content ======= Base-Content Src-Content >>>>>>> source This that case there is no way to distinct ``base`` from ``source``. As a result content from ``base`` are likely to slip in the resolution result. However, adding the base make the situation very clear: <<<<<<< destination Dest-Content ||||||| base Base-Content ======= base Base-Content Src-Content >>>>>>> source Once the base is added, the addition from the grafted changeset is made clear. User can compare the content from ``base`` and ``source`` to make an enlightened decision during merge resolution.
2014-08-06 01:58:45 +04:00
def _imerge3(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
"""
Uses the internal non-interactive simple merge algorithm for merging
files. It will fail if there are any conflicts and leave markers in
the partially merged file. Marker will have three sections, one from each
side of the merge and one for the base content."""
if not labels:
labels = _defaultconflictlabels
if len(labels) < 3:
labels.append('base')
return _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels)
def _imergeauto(repo, mynode, orig, fcd, fco, fca, toolconf, files,
labels=None, localorother=None):
"""
Generic driver for _imergelocal and _imergeother
"""
assert localorother is not None
tool, toolpath, binary, symlink = toolconf
r = simplemerge.simplemerge(repo.ui, fcd, fca, fco, label=labels,
localorother=localorother)
return True, r
@internaltool('merge-local', mergeonly, precheck=_mergecheck)
def _imergelocal(*args, **kwargs):
"""
Like :merge, but resolve all conflicts non-interactively in favor
of the local `p1()` changes."""
success, status = _imergeauto(localorother='local', *args, **kwargs)
return success, status, False
@internaltool('merge-other', mergeonly, precheck=_mergecheck)
def _imergeother(*args, **kwargs):
"""
Like :merge, but resolve all conflicts non-interactively in favor
of the other `p2()` changes."""
success, status = _imergeauto(localorother='other', *args, **kwargs)
return success, status, False
@internaltool('tagmerge', mergeonly,
filemerge: add internal:tagmerge merge tool Add a new internal:tagmerge merge tool which implements an automatic merge algorithm for mercurial's tag files The tagmerge algorithm is able to resolve most merge conflicts that currently would trigger a .hgtags merge conflict. The only case that it does not (and cannot) handle is that in which two tags point to different revisions on each merge parent _and_ their corresponding tag histories have the same rank (i.e. the same length). In all other cases the merge algorithm will choose the revision belonging to the parent with the highest ranked tag history. The merged tag history is the combination of both tag histories (special care is taken to try to combine common tag histories where possible). The algorithm also handles cases in which tags have been manually removed from the .hgtags file and other similar corner cases. In addition to actually merging the tags from two parents, taking into account the base, the algorithm also tries to minimize the difference between the merged tag file and the first parent's tag file (i.e. it tries to make the merged tag order as as similar as possible to the first parent's tag file order). The algorithm works as follows: 1. read the tags from p1, p2 and the base - when reading the p1 tags, also get the line numbers associated to each tag node (these will be used to sort the merged tags in a way that minimizes the diff to p1). Ignore the file numbers when reading p2 and the base 2. recover the "lost tags" (i.e. those that are found in the base but not on p1 or p2) and add them back to p1 and/or p2 - at this point the only tags that are on p1 but not on p2 are those new tags that were introduced in p1. Same thing for the tags that are on p2 but not on p2 3. take all tags that are only on p1 or only on p2 (but not on the base) - Note that these are the tags that were introduced between base and p1 and between base and p2, possibly on separate clones 4. for each tag found both on p1 and p2 perform the following merge algorithm: - the tags conflict if their tag "histories" have the same "rank" (i.e. length) _AND_ the last (current) tag is _NOT_ the same - for non conflicting tags: - choose which are the high and the low ranking nodes - the high ranking list of nodes is the one that is longer. In case of draw favor p1 - the merged node list is made of 3 parts: - first the nodes that are common to the beginning of both the low and the high ranking nodes - second the non common low ranking nodes - finally the non common high ranking nodes (with the last one being the merged tag node) - note that this is equivalent to putting the whole low ranking node list first, followed by the non common high ranking nodes - note that during the merge we keep the "node line numbers", which will be used when writing the merged tags to the tag file 5. write the merged tags taking into account to their positions in the first parent (i.e. try to keep the relative ordering of the nodes that come from p1). This minimizes the diff between the merged and the p1 tag files This is done by using the following algorithm - group the nodes for a given tag that must be written next to each other - A: nodes that come from consecutive lines on p1 - B: nodes that come from p2 (i.e. whose associated line number is None) and are next to one of the a nodes in A - each group is associated with a line number coming from p1 - generate a "tag block" for each of the groups - a tag block is a set of consecutive "node tag" lines belonging to the same tag and which will be written next to each other on the merged tags file - sort the "tag blocks" according to their associated number line - put blocks whose nodes come all from p2 first - write the tag blocks in the sorted order Notes: - A few tests have been added to test-tag.t. These tests are very specific to the new internal:tagmerge tool, so perhaps they should be moved to their own test file. - The merge algorithm was discussed in a thread on the mercurial mailing list. In http://markmail.org/message/anqaxldup4tmgyrx a slightly different algorithm was suggested. In it the p1 and p2 tags would have been interleaved instead of put one before the other. It would be possible to implement that but my tests suggest that the merge result would be more confusing and harder to understand.
2014-06-26 03:20:25 +04:00
_("automatic tag merging of %s failed! "
"(use 'hg resolve --tool :merge' or another merge "
filemerge: add internal:tagmerge merge tool Add a new internal:tagmerge merge tool which implements an automatic merge algorithm for mercurial's tag files The tagmerge algorithm is able to resolve most merge conflicts that currently would trigger a .hgtags merge conflict. The only case that it does not (and cannot) handle is that in which two tags point to different revisions on each merge parent _and_ their corresponding tag histories have the same rank (i.e. the same length). In all other cases the merge algorithm will choose the revision belonging to the parent with the highest ranked tag history. The merged tag history is the combination of both tag histories (special care is taken to try to combine common tag histories where possible). The algorithm also handles cases in which tags have been manually removed from the .hgtags file and other similar corner cases. In addition to actually merging the tags from two parents, taking into account the base, the algorithm also tries to minimize the difference between the merged tag file and the first parent's tag file (i.e. it tries to make the merged tag order as as similar as possible to the first parent's tag file order). The algorithm works as follows: 1. read the tags from p1, p2 and the base - when reading the p1 tags, also get the line numbers associated to each tag node (these will be used to sort the merged tags in a way that minimizes the diff to p1). Ignore the file numbers when reading p2 and the base 2. recover the "lost tags" (i.e. those that are found in the base but not on p1 or p2) and add them back to p1 and/or p2 - at this point the only tags that are on p1 but not on p2 are those new tags that were introduced in p1. Same thing for the tags that are on p2 but not on p2 3. take all tags that are only on p1 or only on p2 (but not on the base) - Note that these are the tags that were introduced between base and p1 and between base and p2, possibly on separate clones 4. for each tag found both on p1 and p2 perform the following merge algorithm: - the tags conflict if their tag "histories" have the same "rank" (i.e. length) _AND_ the last (current) tag is _NOT_ the same - for non conflicting tags: - choose which are the high and the low ranking nodes - the high ranking list of nodes is the one that is longer. In case of draw favor p1 - the merged node list is made of 3 parts: - first the nodes that are common to the beginning of both the low and the high ranking nodes - second the non common low ranking nodes - finally the non common high ranking nodes (with the last one being the merged tag node) - note that this is equivalent to putting the whole low ranking node list first, followed by the non common high ranking nodes - note that during the merge we keep the "node line numbers", which will be used when writing the merged tags to the tag file 5. write the merged tags taking into account to their positions in the first parent (i.e. try to keep the relative ordering of the nodes that come from p1). This minimizes the diff between the merged and the p1 tag files This is done by using the following algorithm - group the nodes for a given tag that must be written next to each other - A: nodes that come from consecutive lines on p1 - B: nodes that come from p2 (i.e. whose associated line number is None) and are next to one of the a nodes in A - each group is associated with a line number coming from p1 - generate a "tag block" for each of the groups - a tag block is a set of consecutive "node tag" lines belonging to the same tag and which will be written next to each other on the merged tags file - sort the "tag blocks" according to their associated number line - put blocks whose nodes come all from p2 first - write the tag blocks in the sorted order Notes: - A few tests have been added to test-tag.t. These tests are very specific to the new internal:tagmerge tool, so perhaps they should be moved to their own test file. - The merge algorithm was discussed in a thread on the mercurial mailing list. In http://markmail.org/message/anqaxldup4tmgyrx a slightly different algorithm was suggested. In it the p1 and p2 tags would have been interleaved instead of put one before the other. It would be possible to implement that but my tests suggest that the merge result would be more confusing and harder to understand.
2014-06-26 03:20:25 +04:00
"tool of your choice)\n"))
def _itagmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
"""
Uses the internal tag merge algorithm (experimental).
"""
success, status = tagmerge.merge(repo, fcd, fco, fca)
return success, status, False
filemerge: add internal:tagmerge merge tool Add a new internal:tagmerge merge tool which implements an automatic merge algorithm for mercurial's tag files The tagmerge algorithm is able to resolve most merge conflicts that currently would trigger a .hgtags merge conflict. The only case that it does not (and cannot) handle is that in which two tags point to different revisions on each merge parent _and_ their corresponding tag histories have the same rank (i.e. the same length). In all other cases the merge algorithm will choose the revision belonging to the parent with the highest ranked tag history. The merged tag history is the combination of both tag histories (special care is taken to try to combine common tag histories where possible). The algorithm also handles cases in which tags have been manually removed from the .hgtags file and other similar corner cases. In addition to actually merging the tags from two parents, taking into account the base, the algorithm also tries to minimize the difference between the merged tag file and the first parent's tag file (i.e. it tries to make the merged tag order as as similar as possible to the first parent's tag file order). The algorithm works as follows: 1. read the tags from p1, p2 and the base - when reading the p1 tags, also get the line numbers associated to each tag node (these will be used to sort the merged tags in a way that minimizes the diff to p1). Ignore the file numbers when reading p2 and the base 2. recover the "lost tags" (i.e. those that are found in the base but not on p1 or p2) and add them back to p1 and/or p2 - at this point the only tags that are on p1 but not on p2 are those new tags that were introduced in p1. Same thing for the tags that are on p2 but not on p2 3. take all tags that are only on p1 or only on p2 (but not on the base) - Note that these are the tags that were introduced between base and p1 and between base and p2, possibly on separate clones 4. for each tag found both on p1 and p2 perform the following merge algorithm: - the tags conflict if their tag "histories" have the same "rank" (i.e. length) _AND_ the last (current) tag is _NOT_ the same - for non conflicting tags: - choose which are the high and the low ranking nodes - the high ranking list of nodes is the one that is longer. In case of draw favor p1 - the merged node list is made of 3 parts: - first the nodes that are common to the beginning of both the low and the high ranking nodes - second the non common low ranking nodes - finally the non common high ranking nodes (with the last one being the merged tag node) - note that this is equivalent to putting the whole low ranking node list first, followed by the non common high ranking nodes - note that during the merge we keep the "node line numbers", which will be used when writing the merged tags to the tag file 5. write the merged tags taking into account to their positions in the first parent (i.e. try to keep the relative ordering of the nodes that come from p1). This minimizes the diff between the merged and the p1 tag files This is done by using the following algorithm - group the nodes for a given tag that must be written next to each other - A: nodes that come from consecutive lines on p1 - B: nodes that come from p2 (i.e. whose associated line number is None) and are next to one of the a nodes in A - each group is associated with a line number coming from p1 - generate a "tag block" for each of the groups - a tag block is a set of consecutive "node tag" lines belonging to the same tag and which will be written next to each other on the merged tags file - sort the "tag blocks" according to their associated number line - put blocks whose nodes come all from p2 first - write the tag blocks in the sorted order Notes: - A few tests have been added to test-tag.t. These tests are very specific to the new internal:tagmerge tool, so perhaps they should be moved to their own test file. - The merge algorithm was discussed in a thread on the mercurial mailing list. In http://markmail.org/message/anqaxldup4tmgyrx a slightly different algorithm was suggested. In it the p1 and p2 tags would have been interleaved instead of put one before the other. It would be possible to implement that but my tests suggest that the merge result would be more confusing and harder to understand.
2014-06-26 03:20:25 +04:00
@internaltool('dump', fullmerge)
def _idump(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
"""
Creates three versions of the files to merge, containing the
contents of local, other and base. These files can then be used to
perform a merge manually. If the file to be merged is named
``a.txt``, these files will accordingly be named ``a.txt.local``,
``a.txt.other`` and ``a.txt.base`` and they will be placed in the
same directory as ``a.txt``.
This implies premerge. Therefore, files aren't dumped, if premerge
runs successfully. Use :forcedump to forcibly write files out.
"""
a = _workingpath(repo, fcd)
fd = fcd.path()
from . import context
if isinstance(fcd, context.overlayworkingfilectx):
raise error.InMemoryMergeConflictsError('in-memory merge does not '
'support the :dump tool.')
util.writefile(a + ".local", fcd.decodeddata())
repo.wwrite(fd + ".other", fco.data(), fco.flags())
repo.wwrite(fd + ".base", fca.data(), fca.flags())
return False, 1, False
@internaltool('forcedump', mergeonly)
def _forcedump(repo, mynode, orig, fcd, fco, fca, toolconf, files,
labels=None):
"""
Creates three versions of the files as same as :dump, but omits premerge.
"""
return _idump(repo, mynode, orig, fcd, fco, fca, toolconf, files,
labels=labels)
def _xmergeimm(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
# In-memory merge simply raises an exception on all external merge tools,
# for now.
#
# It would be possible to run most tools with temporary files, but this
# raises the question of what to do if the user only partially resolves the
# file -- we can't leave a merge state. (Copy to somewhere in the .hg/
# directory and tell the user how to get it is my best idea, but it's
# clunky.)
raise error.InMemoryMergeConflictsError('in-memory merge does not support '
'external merge tools')
def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
tool, toolpath, binary, symlink = toolconf
if fcd.isabsent() or fco.isabsent():
repo.ui.warn(_('warning: %s cannot merge change/delete conflict '
'for %s\n') % (tool, fcd.path()))
return False, 1, None
unused, unused, unused, back = files
a = _workingpath(repo, fcd)
b, c = _maketempfiles(repo, fco, fca)
try:
out = ""
env = {'HG_FILE': fcd.path(),
'HG_MY_NODE': short(mynode),
'HG_OTHER_NODE': str(fco.changectx()),
'HG_BASE_NODE': str(fca.changectx()),
'HG_MY_ISLINK': 'l' in fcd.flags(),
'HG_OTHER_ISLINK': 'l' in fco.flags(),
'HG_BASE_ISLINK': 'l' in fca.flags(),
}
ui = repo.ui
args = _toolstr(ui, tool, "args")
if "$output" in args:
# read input from backup, write to original
out = a
a = repo.wvfs.join(back.path())
replace = {'local': a, 'base': b, 'other': c, 'output': out}
args = util.interpolate(r'\$', replace, args,
lambda s: util.shellquote(util.localpath(s)))
cmd = toolpath + ' ' + args
if _toolbool(ui, tool, "gui"):
repo.ui.status(_('running merge tool %s for file %s\n') %
(tool, fcd.path()))
repo.ui.debug('launching merge tool: %s\n' % cmd)
r = ui.system(cmd, cwd=repo.root, environ=env, blockedtag='mergetool')
repo.ui.debug('merge tool returned: %d\n' % r)
return True, r, False
finally:
util.unlink(b)
util.unlink(c)
def _formatconflictmarker(repo, ctx, template, label, pad):
"""Applies the given template to the ctx, prefixed by the label.
Pad is the minimum width of the label prefix, so that multiple markers
can have aligned templated parts.
"""
if ctx.node() is None:
ctx = ctx.p1()
props = templatekw.keywords.copy()
props['templ'] = template
props['ctx'] = ctx
props['repo'] = repo
templateresult = template.render(props)
label = ('%s:' % label).ljust(pad + 1)
mark = '%s %s' % (label, templateresult)
if mark:
mark = mark.splitlines()[0] # split for safety
# 8 for the prefix of conflict marker lines (e.g. '<<<<<<< ')
return util.ellipsis(mark, 80 - 8)
_defaultconflictlabels = ['local', 'other']
def _formatlabels(repo, fcd, fco, fca, labels):
"""Formats the given labels using the conflict marker template.
Returns a list of formatted labels.
"""
cd = fcd.changectx()
co = fco.changectx()
ca = fca.changectx()
ui = repo.ui
template = ui.config('ui', 'mergemarkertemplate')
template = templater.unquotestring(template)
tmpl = formatter.maketemplater(ui, template)
pad = max(len(l) for l in labels)
newlabels = [_formatconflictmarker(repo, cd, tmpl, labels[0], pad),
_formatconflictmarker(repo, co, tmpl, labels[1], pad)]
if len(labels) > 2:
newlabels.append(_formatconflictmarker(repo, ca, tmpl, labels[2], pad))
return newlabels
def partextras(labels):
"""Return a dictionary of extra labels for use in prompts to the user
Intended use is in strings of the form "(l)ocal%(l)s".
"""
if labels is None:
return {
"l": "",
"o": "",
}
return {
"l": " [%s]" % labels[0],
"o": " [%s]" % labels[1],
}
def _restorebackup(fcd, back):
# TODO: Add a workingfilectx.write(otherfilectx) path so we can use
# util.copy here instead.
fcd.write(back.data(), fcd.flags())
def _makebackup(repo, ui, wctx, fcd, premerge):
"""Makes and returns a filectx-like object for ``fcd``'s backup file.
In addition to preserving the user's pre-existing modifications to `fcd`
(if any), the backup is used to undo certain premerges, confirm whether a
merge changed anything, and determine what line endings the new file should
have.
Backups only need to be written once (right before the premerge) since their
content doesn't change afterwards.
"""
if fcd.isabsent():
return None
# TODO: Break this import cycle somehow. (filectx -> ctx -> fileset ->
# merge -> filemerge). (I suspect the fileset import is the weakest link)
from . import context
a = _workingpath(repo, fcd)
back = scmutil.origpath(ui, repo, a)
inworkingdir = (back.startswith(repo.wvfs.base) and not
back.startswith(repo.vfs.base))
if isinstance(fcd, context.overlayworkingfilectx) and inworkingdir:
# If the backup file is to be in the working directory, and we're
# merging in-memory, we must redirect the backup to the memory context
# so we don't disturb the working directory.
relpath = back[len(repo.wvfs.base) + 1:]
if premerge:
wctx[relpath].write(fcd.data(), fcd.flags())
return wctx[relpath]
else:
if premerge:
# Otherwise, write to wherever path the user specified the backups
# should go. We still need to switch based on whether the source is
# in-memory so we can use the fast path of ``util.copy`` if both are
# on disk.
if isinstance(fcd, context.overlayworkingfilectx):
util.writefile(back, fcd.data())
else:
util.copyfile(a, back)
# A arbitraryfilectx is returned, so we can run the same functions on
# the backup context regardless of where it lives.
return context.arbitraryfilectx(back, repo=repo)
def _maketempfiles(repo, fco, fca):
"""Writes out `fco` and `fca` as temporary files, so an external merge
tool may use them.
"""
def temp(prefix, ctx):
fullbase, ext = os.path.splitext(ctx.path())
pre = "%s~%s." % (os.path.basename(fullbase), prefix)
(fd, name) = tempfile.mkstemp(prefix=pre, suffix=ext)
data = repo.wwritedata(ctx.path(), ctx.data())
f = os.fdopen(fd, pycompat.sysstr("wb"))
f.write(data)
f.close()
return name
b = temp("base", fca)
c = temp("other", fco)
return b, c
def _filemerge(premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None):
"""perform a 3-way merge in the working directory
premerge = whether this is a premerge
mynode = parent node before merge
orig = original local filename before merge
fco = other file context
fca = ancestor file context
fcd = local file context for current/destination file
Returns whether the merge is complete, the return value of the merge, and
a boolean indicating whether the file was deleted from disk."""
if not fco.cmp(fcd): # files identical?
return True, None, False
ui = repo.ui
fd = fcd.path()
binary = fcd.isbinary() or fco.isbinary() or fca.isbinary()
symlink = 'l' in fcd.flags() + fco.flags()
changedelete = fcd.isabsent() or fco.isabsent()
tool, toolpath = _picktool(repo, ui, fd, binary, symlink, changedelete)
if tool in internals and tool.startswith('internal:'):
# normalize to new-style names (':merge' etc)
tool = tool[len('internal'):]
ui.debug("picked tool '%s' for %s (binary %s symlink %s changedelete %s)\n"
2017-06-02 14:27:21 +03:00
% (tool, fd, pycompat.bytestr(binary), pycompat.bytestr(symlink),
pycompat.bytestr(changedelete)))
if tool in internals:
func = internals[tool]
mergetype = func.mergetype
onfailure = func.onfailure
precheck = func.precheck
else:
if wctx.isinmemory():
func = _xmergeimm
else:
func = _xmerge
mergetype = fullmerge
onfailure = _("merging %s failed!\n")
precheck = None
toolconf = tool, toolpath, binary, symlink
if mergetype == nomerge:
r, deleted = func(repo, mynode, orig, fcd, fco, fca, toolconf, labels)
return True, r, deleted
if premerge:
if orig != fco.path():
ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd))
else:
ui.status(_("merging %s\n") % fd)
ui.debug("my %s other %s ancestor %s\n" % (fcd, fco, fca))
if precheck and not precheck(repo, mynode, orig, fcd, fco, fca,
toolconf):
if onfailure:
if wctx.isinmemory():
raise error.InMemoryMergeConflictsError('in-memory merge does '
'not support merge '
'conflicts')
ui.warn(onfailure % fd)
return True, 1, False
back = _makebackup(repo, ui, wctx, fcd, premerge)
files = (None, None, None, back)
r = 1
try:
codemod: register core configitems using a script This is done by a script [2] using RedBaron [1], a tool designed for doing code refactoring. All "default" values are decided by the script and are strongly consistent with the existing code. There are 2 changes done manually to fix tests: [warn] mercurial/exchange.py: experimental.bundle2-output-capture: default needs manual removal [warn] mercurial/localrepo.py: experimental.hook-track-tags: default needs manual removal Since RedBaron is not confident about how to indent things [2]. [1]: https://github.com/PyCQA/redbaron [2]: https://github.com/PyCQA/redbaron/issues/100 [3]: #!/usr/bin/env python # codemod_configitems.py - codemod tool to fill configitems # # Copyright 2017 Facebook, Inc. # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. from __future__ import absolute_import, print_function import os import sys import redbaron def readpath(path): with open(path) as f: return f.read() def writepath(path, content): with open(path, 'w') as f: f.write(content) _configmethods = {'config', 'configbool', 'configint', 'configbytes', 'configlist', 'configdate'} def extractstring(rnode): """get the string from a RedBaron string or call_argument node""" while rnode.type != 'string': rnode = rnode.value return rnode.value[1:-1] # unquote, "'str'" -> "str" def uiconfigitems(red): """match *.ui.config* pattern, yield (node, method, args, section, name)""" for node in red.find_all('atomtrailers'): entry = None try: obj = node[-3].value method = node[-2].value args = node[-1] section = args[0].value name = args[1].value if (obj in ('ui', 'self') and method in _configmethods and section.type == 'string' and name.type == 'string'): entry = (node, method, args, extractstring(section), extractstring(name)) except Exception: pass else: if entry: yield entry def coreconfigitems(red): """match coreconfigitem(...) pattern, yield (node, args, section, name)""" for node in red.find_all('atomtrailers'): entry = None try: args = node[1] section = args[0].value name = args[1].value if (node[0].value == 'coreconfigitem' and section.type == 'string' and name.type == 'string'): entry = (node, args, extractstring(section), extractstring(name)) except Exception: pass else: if entry: yield entry def registercoreconfig(cfgred, section, name, defaultrepr): """insert coreconfigitem to cfgred AST section and name are plain string, defaultrepr is a string """ # find a place to insert the "coreconfigitem" item entries = list(coreconfigitems(cfgred)) for node, args, nodesection, nodename in reversed(entries): if (nodesection, nodename) < (section, name): # insert after this entry node.insert_after( 'coreconfigitem(%r, %r,\n' ' default=%s,\n' ')' % (section, name, defaultrepr)) return def main(argv): if not argv: print('Usage: codemod_configitems.py FILES\n' 'For example, FILES could be "{hgext,mercurial}/*/**.py"') dirname = os.path.dirname reporoot = dirname(dirname(dirname(os.path.abspath(__file__)))) # register configitems to this destination cfgpath = os.path.join(reporoot, 'mercurial', 'configitems.py') cfgred = redbaron.RedBaron(readpath(cfgpath)) # state about what to do registered = set((s, n) for n, a, s, n in coreconfigitems(cfgred)) toregister = {} # {(section, name): defaultrepr} coreconfigs = set() # {(section, name)}, whether it's used in core # first loop: scan all files before taking any action for i, path in enumerate(argv): print('(%d/%d) scanning %s' % (i + 1, len(argv), path)) iscore = ('mercurial' in path) and ('hgext' not in path) red = redbaron.RedBaron(readpath(path)) # find all repo.ui.config* and ui.config* calls, and collect their # section, name and default value information. for node, method, args, section, name in uiconfigitems(red): if section == 'web': # [web] section has some weirdness, ignore them for now continue defaultrepr = None key = (section, name) if len(args) == 2: if key in registered: continue if method == 'configlist': defaultrepr = 'list' elif method == 'configbool': defaultrepr = 'False' else: defaultrepr = 'None' elif len(args) >= 3 and (args[2].target is None or args[2].target.value == 'default'): # try to understand the "default" value dnode = args[2].value if dnode.type == 'name': if dnode.value in {'None', 'True', 'False'}: defaultrepr = dnode.value elif dnode.type == 'string': defaultrepr = repr(dnode.value[1:-1]) elif dnode.type in ('int', 'float'): defaultrepr = dnode.value # inconsistent default if key in toregister and toregister[key] != defaultrepr: defaultrepr = None # interesting to rewrite if key not in registered: if defaultrepr is None: print('[note] %s: %s.%s: unsupported default' % (path, section, name)) registered.add(key) # skip checking it again else: toregister[key] = defaultrepr if iscore: coreconfigs.add(key) # second loop: rewrite files given "toregister" result for path in argv: # reconstruct redbaron - trade CPU for memory red = redbaron.RedBaron(readpath(path)) changed = False for node, method, args, section, name in uiconfigitems(red): key = (section, name) defaultrepr = toregister.get(key) if defaultrepr is None or key not in coreconfigs: continue if len(args) >= 3 and (args[2].target is None or args[2].target.value == 'default'): try: del args[2] changed = True except Exception: # redbaron fails to do the rewrite due to indentation # see https://github.com/PyCQA/redbaron/issues/100 print('[warn] %s: %s.%s: default needs manual removal' % (path, section, name)) if key not in registered: print('registering %s.%s' % (section, name)) registercoreconfig(cfgred, section, name, defaultrepr) registered.add(key) if changed: print('updating %s' % path) writepath(path, red.dumps()) if toregister: print('updating configitems.py') writepath(cfgpath, cfgred.dumps()) if __name__ == "__main__": sys.exit(main(sys.argv[1:]))
2017-07-15 00:22:40 +03:00
markerstyle = ui.config('ui', 'mergemarkers')
if not labels:
labels = _defaultconflictlabels
if markerstyle != 'basic':
labels = _formatlabels(repo, fcd, fco, fca, labels)
if premerge and mergetype == fullmerge:
r = _premerge(repo, fcd, fco, fca, toolconf, files, labels=labels)
# complete if premerge successful (r is 0)
return not r, r, False
needcheck, r, deleted = func(repo, mynode, orig, fcd, fco, fca,
toolconf, files, labels=labels)
if needcheck:
r = _check(repo, r, ui, tool, fcd, files)
if r:
if onfailure:
if wctx.isinmemory():
raise error.InMemoryMergeConflictsError('in-memory merge '
'does not support '
'merge conflicts')
ui.warn(onfailure % fd)
_onfilemergefailure(ui)
return True, r, deleted
finally:
if not r and back is not None:
back.remove()
def _haltmerge():
msg = _('merge halted after failed merge (see hg resolve)')
raise error.InterventionRequired(msg)
def _onfilemergefailure(ui):
action = ui.config('merge', 'on-failure')
if action == 'prompt':
msg = _('continue merge operation (yn)?' '$$ &Yes $$ &No')
if ui.promptchoice(msg, 0) == 1:
_haltmerge()
if action == 'halt':
_haltmerge()
# default action is 'continue', in which case we neither prompt nor halt
def _check(repo, r, ui, tool, fcd, files):
fd = fcd.path()
unused, unused, unused, back = files
if not r and (_toolbool(ui, tool, "checkconflicts") or
'conflicts' in _toollist(ui, tool, "check")):
if re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data(),
re.MULTILINE):
r = 1
checked = False
if 'prompt' in _toollist(ui, tool, "check"):
checked = True
if ui.promptchoice(_("was merge of '%s' successful (yn)?"
"$$ &Yes $$ &No") % fd, 1):
r = 1
if not r and not checked and (_toolbool(ui, tool, "checkchanged") or
'changed' in
_toollist(ui, tool, "check")):
if back is not None and not fcd.cmp(back):
if ui.promptchoice(_(" output file %s appears unchanged\n"
"was merge successful (yn)?"
"$$ &Yes $$ &No") % fd, 1):
r = 1
if back is not None and _toolbool(ui, tool, "fixeol"):
_matcheol(_workingpath(repo, fcd), back)
return r
def _workingpath(repo, ctx):
return repo.wjoin(ctx.path())
def premerge(repo, wctx, mynode, orig, fcd, fco, fca, labels=None):
return _filemerge(True, repo, wctx, mynode, orig, fcd, fco, fca,
labels=labels)
def filemerge(repo, wctx, mynode, orig, fcd, fco, fca, labels=None):
return _filemerge(False, repo, wctx, mynode, orig, fcd, fco, fca,
labels=labels)
def loadinternalmerge(ui, extname, registrarobj):
"""Load internal merge tool from specified registrarobj
"""
for name, func in registrarobj._table.iteritems():
fullname = ':' + name
internals[fullname] = func
internals['internal:' + name] = func
internalsdoc[fullname] = func
# load built-in merge tools explicitly to setup internalsdoc
loadinternalmerge(None, None, internaltool)
# tell hggettext to extract docstrings from these functions:
i18nfunctions = internals.values()