2006-08-04 00:24:41 +04:00
|
|
|
# merge.py - directory-level update/merge handling for Mercurial
|
|
|
|
#
|
2007-06-19 10:51:34 +04:00
|
|
|
# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
|
2006-08-04 00:24:41 +04:00
|
|
|
#
|
2009-04-26 03:08:54 +04:00
|
|
|
# This software may be used and distributed according to the terms of the
|
2010-01-20 07:20:08 +03:00
|
|
|
# GNU General Public License version 2 or any later version.
|
2006-08-04 00:24:41 +04:00
|
|
|
|
2015-08-09 05:41:25 +03:00
|
|
|
from __future__ import absolute_import
|
|
|
|
|
|
|
|
import errno
|
|
|
|
import os
|
|
|
|
import shutil
|
2014-02-26 06:37:06 +04:00
|
|
|
import struct
|
|
|
|
|
2015-08-09 05:41:25 +03:00
|
|
|
from .i18n import _
|
|
|
|
from .node import (
|
|
|
|
bin,
|
|
|
|
hex,
|
|
|
|
nullid,
|
|
|
|
nullrev,
|
|
|
|
)
|
|
|
|
from . import (
|
|
|
|
copies,
|
2015-09-29 08:11:23 +03:00
|
|
|
destutil,
|
2015-10-08 22:55:45 +03:00
|
|
|
error,
|
2015-08-09 05:41:25 +03:00
|
|
|
filemerge,
|
|
|
|
obsolete,
|
|
|
|
subrepo,
|
|
|
|
util,
|
|
|
|
worker,
|
|
|
|
)
|
2008-04-11 00:02:24 +04:00
|
|
|
|
2014-02-26 06:37:06 +04:00
|
|
|
_pack = struct.pack
|
|
|
|
_unpack = struct.unpack
|
|
|
|
|
2014-02-26 06:54:47 +04:00
|
|
|
def _droponode(data):
|
|
|
|
# used for compatibility for v1
|
2014-11-21 03:39:32 +03:00
|
|
|
bits = data.split('\0')
|
2014-02-26 06:54:47 +04:00
|
|
|
bits = bits[:-2] + bits[-1:]
|
2014-11-21 03:39:32 +03:00
|
|
|
return '\0'.join(bits)
|
2014-02-26 06:54:47 +04:00
|
|
|
|
2008-04-11 00:02:24 +04:00
|
|
|
class mergestate(object):
|
2014-02-26 06:37:06 +04:00
|
|
|
'''track 3-way merge state of individual files
|
|
|
|
|
|
|
|
it is stored on disk when needed. Two file are used, one with an old
|
|
|
|
format, one with a new format. Both contains similar data, but the new
|
2015-09-25 07:54:20 +03:00
|
|
|
format can store new kinds of field.
|
2014-02-26 06:37:06 +04:00
|
|
|
|
|
|
|
Current new format is a list of arbitrary record of the form:
|
|
|
|
|
|
|
|
[type][length][content]
|
|
|
|
|
|
|
|
Type is a single character, length is a 4 bytes integer, content is an
|
2014-02-28 05:28:12 +04:00
|
|
|
arbitrary suites of bytes of length `length`.
|
2014-02-26 06:37:06 +04:00
|
|
|
|
|
|
|
Type should be a letter. Capital letter are mandatory record, Mercurial
|
|
|
|
should abort if they are unknown. lower case record can be safely ignored.
|
|
|
|
|
|
|
|
Currently known record:
|
|
|
|
|
|
|
|
L: the node of the "local" part of the merge (hexified version)
|
2014-02-26 06:42:11 +04:00
|
|
|
O: the node of the "other" part of the merge (hexified version)
|
2014-02-26 06:37:06 +04:00
|
|
|
F: a file to be merged entry
|
2015-09-29 04:34:06 +03:00
|
|
|
D: a file that the external merge driver will merge internally
|
|
|
|
(experimental)
|
2015-10-01 07:42:52 +03:00
|
|
|
m: the external merge driver defined for this merge plus its run state
|
|
|
|
(experimental)
|
|
|
|
|
|
|
|
Merge driver run states (experimental):
|
|
|
|
u: driver-resolved files unmarked -- needs to be run next time we're about
|
|
|
|
to resolve or commit
|
|
|
|
m: driver-resolved files marked -- only needs to be run before commit
|
|
|
|
s: success/skipped -- does not need to be run any more
|
2014-02-26 06:37:06 +04:00
|
|
|
'''
|
2014-11-21 03:39:32 +03:00
|
|
|
statepathv1 = 'merge/state'
|
|
|
|
statepathv2 = 'merge/state2'
|
2014-03-05 22:22:43 +04:00
|
|
|
|
2008-04-11 00:02:24 +04:00
|
|
|
def __init__(self, repo):
|
|
|
|
self._repo = repo
|
2010-09-06 19:35:49 +04:00
|
|
|
self._dirty = False
|
2008-04-11 21:52:56 +04:00
|
|
|
self._read()
|
2014-03-05 22:22:43 +04:00
|
|
|
|
2014-02-26 06:42:11 +04:00
|
|
|
def reset(self, node=None, other=None):
|
2008-04-11 21:52:56 +04:00
|
|
|
self._state = {}
|
2014-05-09 03:48:28 +04:00
|
|
|
self._local = None
|
|
|
|
self._other = None
|
2009-03-17 00:58:41 +03:00
|
|
|
if node:
|
|
|
|
self._local = node
|
2014-02-26 06:42:11 +04:00
|
|
|
self._other = other
|
2015-10-01 07:42:52 +03:00
|
|
|
self._mdstate = 'u'
|
2014-11-21 03:39:32 +03:00
|
|
|
shutil.rmtree(self._repo.join('merge'), True)
|
2010-09-06 19:35:49 +04:00
|
|
|
self._dirty = False
|
2014-03-05 22:22:43 +04:00
|
|
|
|
2008-04-11 21:52:56 +04:00
|
|
|
def _read(self):
|
2014-03-05 22:49:43 +04:00
|
|
|
"""Analyse each record content to restore a serialized state from disk
|
|
|
|
|
|
|
|
This function process "record" entry produced by the de-serialization
|
|
|
|
of on disk file.
|
|
|
|
"""
|
2008-04-11 21:52:56 +04:00
|
|
|
self._state = {}
|
2014-05-09 03:48:28 +04:00
|
|
|
self._local = None
|
|
|
|
self._other = None
|
2015-10-01 07:42:52 +03:00
|
|
|
self._mdstate = 'u'
|
2014-02-28 00:59:41 +04:00
|
|
|
records = self._readrecords()
|
|
|
|
for rtype, record in records:
|
|
|
|
if rtype == 'L':
|
|
|
|
self._local = bin(record)
|
2014-02-26 06:42:11 +04:00
|
|
|
elif rtype == 'O':
|
|
|
|
self._other = bin(record)
|
2015-10-01 07:42:52 +03:00
|
|
|
elif rtype == 'm':
|
|
|
|
bits = record.split('\0', 1)
|
|
|
|
mdstate = bits[1]
|
|
|
|
if len(mdstate) != 1 or mdstate not in 'ums':
|
|
|
|
# the merge driver should be idempotent, so just rerun it
|
|
|
|
mdstate = 'u'
|
|
|
|
|
|
|
|
# protect against the following:
|
|
|
|
# - A configures a malicious merge driver in their hgrc, then
|
|
|
|
# pauses the merge
|
|
|
|
# - A edits their hgrc to remove references to the merge driver
|
|
|
|
# - A gives a copy of their entire repo, including .hg, to B
|
|
|
|
# - B inspects .hgrc and finds it to be clean
|
|
|
|
# - B then continues the merge and the malicious merge driver
|
|
|
|
# gets invoked
|
|
|
|
if self.mergedriver != bits[0]:
|
|
|
|
raise error.ConfigError(
|
|
|
|
_("merge driver changed since merge started"),
|
|
|
|
hint=_("revert merge driver change or abort merge"))
|
|
|
|
self._mdstate = mdstate
|
2015-09-29 04:34:06 +03:00
|
|
|
elif rtype in 'FD':
|
2014-11-21 03:39:32 +03:00
|
|
|
bits = record.split('\0')
|
2014-02-28 00:59:41 +04:00
|
|
|
self._state[bits[0]] = bits[1:]
|
|
|
|
elif not rtype.islower():
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_('unsupported merge state record: %s')
|
2014-03-31 21:46:03 +04:00
|
|
|
% rtype)
|
2014-02-28 00:59:41 +04:00
|
|
|
self._dirty = False
|
2014-03-05 22:22:43 +04:00
|
|
|
|
2014-02-28 00:59:41 +04:00
|
|
|
def _readrecords(self):
|
2014-03-05 22:49:43 +04:00
|
|
|
"""Read merge state from disk and return a list of record (TYPE, data)
|
|
|
|
|
2014-04-13 21:01:00 +04:00
|
|
|
We read data from both v1 and v2 files and decide which one to use.
|
2014-03-05 22:49:43 +04:00
|
|
|
|
2014-04-13 21:01:00 +04:00
|
|
|
V1 has been used by version prior to 2.9.1 and contains less data than
|
|
|
|
v2. We read both versions and check if no data in v2 contradicts
|
2014-03-05 22:49:43 +04:00
|
|
|
v1. If there is not contradiction we can safely assume that both v1
|
|
|
|
and v2 were written at the same time and use the extract data in v2. If
|
|
|
|
there is contradiction we ignore v2 content as we assume an old version
|
2014-04-13 21:01:00 +04:00
|
|
|
of Mercurial has overwritten the mergestate file and left an old v2
|
2014-03-05 22:49:43 +04:00
|
|
|
file around.
|
|
|
|
|
|
|
|
returns list of record [(TYPE, data), ...]"""
|
2014-02-26 06:37:06 +04:00
|
|
|
v1records = self._readrecordsv1()
|
|
|
|
v2records = self._readrecordsv2()
|
2015-10-01 07:22:31 +03:00
|
|
|
if self._v1v2match(v1records, v2records):
|
|
|
|
return v2records
|
|
|
|
else:
|
|
|
|
# v1 file is newer than v2 file, use it
|
|
|
|
# we have to infer the "other" changeset of the merge
|
|
|
|
# we cannot do better than that with v1 of the format
|
|
|
|
mctx = self._repo[None].parents()[-1]
|
|
|
|
v1records.append(('O', mctx.hex()))
|
|
|
|
# add place holder "other" file node information
|
|
|
|
# nobody is using it yet so we do no need to fetch the data
|
|
|
|
# if mctx was wrong `mctx[bits[-2]]` may fails.
|
|
|
|
for idx, r in enumerate(v1records):
|
|
|
|
if r[0] == 'F':
|
|
|
|
bits = r[1].split('\0')
|
|
|
|
bits.insert(-2, '')
|
|
|
|
v1records[idx] = (r[0], '\0'.join(bits))
|
|
|
|
return v1records
|
|
|
|
|
|
|
|
def _v1v2match(self, v1records, v2records):
|
2014-02-26 06:54:47 +04:00
|
|
|
oldv2 = set() # old format version of v2 record
|
|
|
|
for rec in v2records:
|
|
|
|
if rec[0] == 'L':
|
|
|
|
oldv2.add(rec)
|
|
|
|
elif rec[0] == 'F':
|
|
|
|
# drop the onode data (not contained in v1)
|
|
|
|
oldv2.add(('F', _droponode(rec[1])))
|
|
|
|
for rec in v1records:
|
|
|
|
if rec not in oldv2:
|
2015-10-01 07:22:31 +03:00
|
|
|
return False
|
2014-02-26 06:37:06 +04:00
|
|
|
else:
|
2015-10-01 07:22:31 +03:00
|
|
|
return True
|
2014-03-05 22:22:43 +04:00
|
|
|
|
2014-02-26 06:37:06 +04:00
|
|
|
def _readrecordsv1(self):
|
2014-03-05 22:49:43 +04:00
|
|
|
"""read on disk merge state for version 1 file
|
|
|
|
|
|
|
|
returns list of record [(TYPE, data), ...]
|
|
|
|
|
|
|
|
Note: the "F" data from this file are one entry short
|
|
|
|
(no "other file node" entry)
|
|
|
|
"""
|
2014-02-28 00:59:41 +04:00
|
|
|
records = []
|
2008-04-11 21:52:56 +04:00
|
|
|
try:
|
2015-01-16 01:17:12 +03:00
|
|
|
f = self._repo.vfs(self.statepathv1)
|
2008-04-12 22:51:26 +04:00
|
|
|
for i, l in enumerate(f):
|
|
|
|
if i == 0:
|
2014-02-28 00:59:41 +04:00
|
|
|
records.append(('L', l[:-1]))
|
2008-04-12 22:51:26 +04:00
|
|
|
else:
|
2014-02-28 00:59:41 +04:00
|
|
|
records.append(('F', l[:-1]))
|
2010-12-24 17:23:01 +03:00
|
|
|
f.close()
|
2015-06-24 08:20:08 +03:00
|
|
|
except IOError as err:
|
2008-04-11 21:52:56 +04:00
|
|
|
if err.errno != errno.ENOENT:
|
|
|
|
raise
|
2014-02-28 00:59:41 +04:00
|
|
|
return records
|
2014-03-05 22:22:43 +04:00
|
|
|
|
2014-02-26 06:37:06 +04:00
|
|
|
def _readrecordsv2(self):
|
2014-03-05 22:49:43 +04:00
|
|
|
"""read on disk merge state for version 2 file
|
|
|
|
|
|
|
|
returns list of record [(TYPE, data), ...]
|
|
|
|
"""
|
2014-02-26 06:37:06 +04:00
|
|
|
records = []
|
|
|
|
try:
|
2015-01-16 01:17:12 +03:00
|
|
|
f = self._repo.vfs(self.statepathv2)
|
2014-02-26 06:37:06 +04:00
|
|
|
data = f.read()
|
|
|
|
off = 0
|
|
|
|
end = len(data)
|
|
|
|
while off < end:
|
|
|
|
rtype = data[off]
|
|
|
|
off += 1
|
2014-02-28 05:28:12 +04:00
|
|
|
length = _unpack('>I', data[off:(off + 4)])[0]
|
2014-02-26 06:37:06 +04:00
|
|
|
off += 4
|
2014-02-28 05:28:12 +04:00
|
|
|
record = data[off:(off + length)]
|
|
|
|
off += length
|
2014-02-26 06:37:06 +04:00
|
|
|
records.append((rtype, record))
|
|
|
|
f.close()
|
2015-06-24 08:20:08 +03:00
|
|
|
except IOError as err:
|
2014-02-26 06:37:06 +04:00
|
|
|
if err.errno != errno.ENOENT:
|
|
|
|
raise
|
|
|
|
return records
|
2014-03-05 22:22:43 +04:00
|
|
|
|
2015-10-01 07:42:52 +03:00
|
|
|
@util.propertycache
|
|
|
|
def mergedriver(self):
|
|
|
|
return self._repo.ui.config('experimental', 'mergedriver')
|
|
|
|
|
2014-04-19 06:08:32 +04:00
|
|
|
def active(self):
|
|
|
|
"""Whether mergestate is active.
|
|
|
|
|
|
|
|
Returns True if there appears to be mergestate. This is a rough proxy
|
|
|
|
for "is a merge in progress."
|
|
|
|
"""
|
|
|
|
# Check local variables before looking at filesystem for performance
|
|
|
|
# reasons.
|
|
|
|
return bool(self._local) or bool(self._state) or \
|
2015-01-16 01:17:12 +03:00
|
|
|
self._repo.vfs.exists(self.statepathv1) or \
|
|
|
|
self._repo.vfs.exists(self.statepathv2)
|
2014-04-19 06:08:32 +04:00
|
|
|
|
2010-09-06 19:35:49 +04:00
|
|
|
def commit(self):
|
2014-03-05 22:49:43 +04:00
|
|
|
"""Write current state on disk (if necessary)"""
|
2010-09-06 19:35:49 +04:00
|
|
|
if self._dirty:
|
2014-02-28 00:59:41 +04:00
|
|
|
records = []
|
2014-11-21 03:39:32 +03:00
|
|
|
records.append(('L', hex(self._local)))
|
|
|
|
records.append(('O', hex(self._other)))
|
2015-10-01 07:42:52 +03:00
|
|
|
if self.mergedriver:
|
|
|
|
records.append(('m', '\0'.join([
|
|
|
|
self.mergedriver, self._mdstate])))
|
2010-09-06 19:35:49 +04:00
|
|
|
for d, v in self._state.iteritems():
|
2015-09-29 04:34:06 +03:00
|
|
|
if v[0] == 'd':
|
|
|
|
records.append(('D', '\0'.join([d] + v)))
|
|
|
|
else:
|
|
|
|
records.append(('F', '\0'.join([d] + v)))
|
2014-02-28 00:59:41 +04:00
|
|
|
self._writerecords(records)
|
2010-09-06 19:35:49 +04:00
|
|
|
self._dirty = False
|
2014-03-05 22:22:43 +04:00
|
|
|
|
2014-02-28 00:59:41 +04:00
|
|
|
def _writerecords(self, records):
|
2014-03-05 22:49:43 +04:00
|
|
|
"""Write current state on disk (both v1 and v2)"""
|
2014-02-26 06:37:06 +04:00
|
|
|
self._writerecordsv1(records)
|
|
|
|
self._writerecordsv2(records)
|
2014-03-05 22:22:43 +04:00
|
|
|
|
2014-02-26 06:37:06 +04:00
|
|
|
def _writerecordsv1(self, records):
|
2014-03-05 22:49:43 +04:00
|
|
|
"""Write current state on disk in a version 1 file"""
|
2015-01-16 01:17:12 +03:00
|
|
|
f = self._repo.vfs(self.statepathv1, 'w')
|
2014-02-28 00:59:41 +04:00
|
|
|
irecords = iter(records)
|
|
|
|
lrecords = irecords.next()
|
|
|
|
assert lrecords[0] == 'L'
|
2014-11-21 03:39:32 +03:00
|
|
|
f.write(hex(self._local) + '\n')
|
2014-02-28 00:59:41 +04:00
|
|
|
for rtype, data in irecords:
|
2014-11-21 03:39:32 +03:00
|
|
|
if rtype == 'F':
|
|
|
|
f.write('%s\n' % _droponode(data))
|
2014-02-28 00:59:41 +04:00
|
|
|
f.close()
|
2014-03-05 22:22:43 +04:00
|
|
|
|
2014-02-26 06:37:06 +04:00
|
|
|
def _writerecordsv2(self, records):
|
2014-03-05 22:49:43 +04:00
|
|
|
"""Write current state on disk in a version 2 file"""
|
2015-01-16 01:17:12 +03:00
|
|
|
f = self._repo.vfs(self.statepathv2, 'w')
|
2014-02-26 06:37:06 +04:00
|
|
|
for key, data in records:
|
|
|
|
assert len(key) == 1
|
2014-11-21 03:39:32 +03:00
|
|
|
format = '>sI%is' % len(data)
|
2014-02-26 06:37:06 +04:00
|
|
|
f.write(_pack(format, key, len(data), data))
|
|
|
|
f.close()
|
2014-03-05 22:22:43 +04:00
|
|
|
|
2013-01-09 05:02:45 +04:00
|
|
|
def add(self, fcl, fco, fca, fd):
|
2014-03-05 22:49:43 +04:00
|
|
|
"""add a new (potentially?) conflicting file the merge state
|
|
|
|
fcl: file context for local,
|
|
|
|
fco: file context for remote,
|
|
|
|
fca: file context for ancestors,
|
|
|
|
fd: file path of the resulting merge.
|
|
|
|
|
|
|
|
note: also write the local version to the `.hg/merge` directory.
|
|
|
|
"""
|
2008-04-11 14:04:26 +04:00
|
|
|
hash = util.sha1(fcl.path()).hexdigest()
|
2015-01-16 01:17:12 +03:00
|
|
|
self._repo.vfs.write('merge/' + hash, fcl.data())
|
2014-02-26 06:54:47 +04:00
|
|
|
self._state[fd] = ['u', hash, fcl.path(),
|
|
|
|
fca.path(), hex(fca.filenode()),
|
|
|
|
fco.path(), hex(fco.filenode()),
|
|
|
|
fcl.flags()]
|
2010-09-06 19:35:49 +04:00
|
|
|
self._dirty = True
|
2014-03-05 22:22:43 +04:00
|
|
|
|
2008-04-11 00:02:24 +04:00
|
|
|
def __contains__(self, dfile):
|
|
|
|
return dfile in self._state
|
2014-03-05 22:22:43 +04:00
|
|
|
|
2008-04-11 00:02:24 +04:00
|
|
|
def __getitem__(self, dfile):
|
2008-04-11 21:52:56 +04:00
|
|
|
return self._state[dfile][0]
|
2014-03-05 22:22:43 +04:00
|
|
|
|
2008-04-11 21:52:56 +04:00
|
|
|
def __iter__(self):
|
2012-05-13 19:45:08 +04:00
|
|
|
return iter(sorted(self._state))
|
2014-03-05 22:22:43 +04:00
|
|
|
|
2013-06-04 04:20:37 +04:00
|
|
|
def files(self):
|
|
|
|
return self._state.keys()
|
2014-03-05 22:22:43 +04:00
|
|
|
|
2008-04-11 00:02:24 +04:00
|
|
|
def mark(self, dfile, state):
|
2008-04-11 21:52:56 +04:00
|
|
|
self._state[dfile][0] = state
|
2010-09-06 19:35:49 +04:00
|
|
|
self._dirty = True
|
2014-03-05 22:22:43 +04:00
|
|
|
|
2014-04-19 09:19:25 +04:00
|
|
|
def unresolved(self):
|
|
|
|
"""Obtain the paths of unresolved files."""
|
|
|
|
|
|
|
|
for f, entry in self._state.items():
|
|
|
|
if entry[0] == 'u':
|
|
|
|
yield f
|
|
|
|
|
2015-10-15 01:01:07 +03:00
|
|
|
def driverresolved(self):
|
|
|
|
"""Obtain the paths of driver-resolved files."""
|
|
|
|
|
|
|
|
for f, entry in self._state.items():
|
|
|
|
if entry[0] == 'd':
|
|
|
|
yield f
|
|
|
|
|
2015-10-12 06:12:12 +03:00
|
|
|
def _resolve(self, preresolve, dfile, wctx, labels=None):
|
2014-03-05 22:49:43 +04:00
|
|
|
"""rerun merge process for file path `dfile`"""
|
2015-10-01 05:43:51 +03:00
|
|
|
if self[dfile] in 'rd':
|
2015-10-12 04:37:54 +03:00
|
|
|
return True, 0
|
2014-02-26 06:54:47 +04:00
|
|
|
stateentry = self._state[dfile]
|
|
|
|
state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
|
2014-02-26 06:45:01 +04:00
|
|
|
octx = self._repo[self._other]
|
2008-04-11 00:02:24 +04:00
|
|
|
fcd = wctx[dfile]
|
|
|
|
fco = octx[ofile]
|
|
|
|
fca = self._repo.filectx(afile, fileid=anode)
|
2013-01-09 05:02:45 +04:00
|
|
|
# "premerge" x flags
|
|
|
|
flo = fco.flags()
|
|
|
|
fla = fca.flags()
|
|
|
|
if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
|
|
|
|
if fca.node() == nullid:
|
2015-10-12 06:12:12 +03:00
|
|
|
if preresolve:
|
|
|
|
self._repo.ui.warn(
|
|
|
|
_('warning: cannot merge flags for %s\n') % afile)
|
2013-01-09 05:02:45 +04:00
|
|
|
elif flags == fla:
|
|
|
|
flags = flo
|
2015-10-12 06:12:12 +03:00
|
|
|
if preresolve:
|
|
|
|
# restore local
|
|
|
|
f = self._repo.vfs('merge/' + hash)
|
|
|
|
self._repo.wwrite(dfile, f.read(), flags)
|
|
|
|
f.close()
|
|
|
|
complete, r = filemerge.premerge(self._repo, self._local, lfile,
|
|
|
|
fcd, fco, fca, labels=labels)
|
|
|
|
else:
|
2015-10-12 06:47:14 +03:00
|
|
|
complete, r = filemerge.filemerge(self._repo, self._local, lfile,
|
|
|
|
fcd, fco, fca, labels=labels)
|
2011-03-06 01:34:59 +03:00
|
|
|
if r is None:
|
|
|
|
# no real conflict
|
|
|
|
del self._state[dfile]
|
2014-02-28 05:26:03 +04:00
|
|
|
self._dirty = True
|
2011-03-06 01:34:59 +03:00
|
|
|
elif not r:
|
2008-04-11 00:02:24 +04:00
|
|
|
self.mark(dfile, 'r')
|
2015-10-12 04:37:54 +03:00
|
|
|
return complete, r
|
2006-08-04 00:24:41 +04:00
|
|
|
|
2015-10-12 06:12:12 +03:00
|
|
|
def preresolve(self, dfile, wctx, labels=None):
|
|
|
|
return self._resolve(True, dfile, wctx, labels=labels)
|
|
|
|
|
2015-10-12 04:29:50 +03:00
|
|
|
def resolve(self, dfile, wctx, labels=None):
|
|
|
|
"""rerun merge process for file path `dfile`"""
|
2015-10-12 06:12:12 +03:00
|
|
|
return self._resolve(False, dfile, wctx, labels=labels)[1]
|
2015-10-12 04:29:50 +03:00
|
|
|
|
2014-12-13 10:18:36 +03:00
|
|
|
def _checkunknownfile(repo, wctx, mctx, f, f2=None):
|
|
|
|
if f2 is None:
|
|
|
|
f2 = f
|
2014-11-17 10:41:44 +03:00
|
|
|
return (os.path.isfile(repo.wjoin(f))
|
2015-01-11 03:51:52 +03:00
|
|
|
and repo.wvfs.audit.check(f)
|
2012-03-27 01:41:54 +04:00
|
|
|
and repo.dirstate.normalize(f) not in repo.dirstate
|
2014-12-13 10:18:36 +03:00
|
|
|
and mctx[f2].cmp(wctx[f]))
|
2012-02-10 02:50:19 +04:00
|
|
|
|
2014-12-14 10:52:22 +03:00
|
|
|
def _checkunknownfiles(repo, wctx, mctx, force, actions):
|
|
|
|
"""
|
|
|
|
Considers any actions that care about the presence of conflicting unknown
|
|
|
|
files. For some actions, the result is to abort; for others, it is to
|
|
|
|
choose a different action.
|
|
|
|
"""
|
|
|
|
aborts = []
|
|
|
|
if not force:
|
|
|
|
for f, (m, args, msg) in actions.iteritems():
|
|
|
|
if m in ('c', 'dc'):
|
|
|
|
if _checkunknownfile(repo, wctx, mctx, f):
|
|
|
|
aborts.append(f)
|
|
|
|
elif m == 'dg':
|
|
|
|
if _checkunknownfile(repo, wctx, mctx, f, args[0]):
|
|
|
|
aborts.append(f)
|
|
|
|
|
|
|
|
for f in sorted(aborts):
|
|
|
|
repo.ui.warn(_("%s: untracked file differs\n") % f)
|
|
|
|
if aborts:
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_("untracked files in working directory differ "
|
2014-12-14 10:52:22 +03:00
|
|
|
"from files in requested revision"))
|
|
|
|
|
|
|
|
for f, (m, args, msg) in actions.iteritems():
|
|
|
|
if m == 'c':
|
|
|
|
actions[f] = ('g', args, msg)
|
|
|
|
elif m == 'cm':
|
|
|
|
fl2, anc = args
|
|
|
|
different = _checkunknownfile(repo, wctx, mctx, f)
|
|
|
|
if different:
|
|
|
|
actions[f] = ('m', (f, f, None, False, anc),
|
|
|
|
"remote differs from untracked local")
|
|
|
|
else:
|
|
|
|
actions[f] = ('g', (fl2,), "remote created")
|
|
|
|
|
2008-03-15 18:02:31 +03:00
|
|
|
def _forgetremoved(wctx, mctx, branchmerge):
|
2006-09-18 01:47:33 +04:00
|
|
|
"""
|
|
|
|
Forget removed files
|
|
|
|
|
|
|
|
If we're jumping between revisions (as opposed to merging), and if
|
|
|
|
neither the working directory nor the target rev has the file,
|
|
|
|
then we need to remove it from the dirstate, to prevent the
|
|
|
|
dirstate from listing the file when it is no longer in the
|
|
|
|
manifest.
|
2008-03-13 01:44:08 +03:00
|
|
|
|
|
|
|
If we're merging, and the other revision has removed a file
|
|
|
|
that is not present in the working directory, we need to mark it
|
|
|
|
as removed.
|
2006-09-18 01:47:33 +04:00
|
|
|
"""
|
|
|
|
|
2014-12-12 08:58:49 +03:00
|
|
|
actions = {}
|
|
|
|
m = 'f'
|
2014-02-28 05:25:58 +04:00
|
|
|
if branchmerge:
|
2014-12-12 08:58:49 +03:00
|
|
|
m = 'r'
|
2008-03-13 01:44:08 +03:00
|
|
|
for f in wctx.deleted():
|
2008-03-15 18:02:31 +03:00
|
|
|
if f not in mctx:
|
2014-12-12 08:58:49 +03:00
|
|
|
actions[f] = m, None, "forget deleted"
|
2008-03-13 01:44:08 +03:00
|
|
|
|
|
|
|
if not branchmerge:
|
|
|
|
for f in wctx.removed():
|
2008-03-15 18:02:31 +03:00
|
|
|
if f not in mctx:
|
2014-12-12 08:58:49 +03:00
|
|
|
actions[f] = 'f', None, "forget removed"
|
2006-09-18 01:47:33 +04:00
|
|
|
|
2014-12-12 08:58:49 +03:00
|
|
|
return actions
|
2006-09-18 01:47:33 +04:00
|
|
|
|
2014-03-02 21:36:02 +04:00
|
|
|
def _checkcollision(repo, wmf, actions):
|
icasefs: rewrite case-folding collision detection (issue3452)
Before this patch, case-folding collision detection uses
"copies.pathcopies()" before "manifestmerge()", and is not aware of
renaming in some cases.
For example, in the case of issue3452, "copies.pathcopies()" can't
detect renaming, if the file is renamed at the revision before common
ancestor of merging. So, "hg merge" is aborted unexpectedly on case
insensitive filesystem.
This patch fully rewrites case-folding collision detection, and
relocate it into "manifestmerge()".
New implementation uses list of actions held in "actions" and
"prompts" to build provisional merged manifest up.
Provisional merged manifest should be correct, if actions required to
build merge result up in working directory are listed up in "actions"
and "prompts" correctly.
This patch checks case-folding collision still before prompting for
merge, to avoid aborting after some interactions with users. So, this
assumes that user would choose not "deleted" but "changed".
This patch also changes existing abort message, because sorting before
collision detection changes order of checked files.
2013-04-30 00:01:32 +04:00
|
|
|
# build provisional merged manifest up
|
|
|
|
pmmf = set(wmf)
|
|
|
|
|
2014-02-28 05:25:58 +04:00
|
|
|
if actions:
|
|
|
|
# k, dr, e and rd are no-op
|
|
|
|
for m in 'a', 'f', 'g', 'cd', 'dc':
|
|
|
|
for f, args, msg in actions[m]:
|
|
|
|
pmmf.add(f)
|
|
|
|
for f, args, msg in actions['r']:
|
|
|
|
pmmf.discard(f)
|
|
|
|
for f, args, msg in actions['dm']:
|
|
|
|
f2, flags = args
|
|
|
|
pmmf.discard(f2)
|
|
|
|
pmmf.add(f)
|
|
|
|
for f, args, msg in actions['dg']:
|
|
|
|
pmmf.add(f)
|
|
|
|
for f, args, msg in actions['m']:
|
|
|
|
f1, f2, fa, move, anc = args
|
|
|
|
if move:
|
|
|
|
pmmf.discard(f1)
|
|
|
|
pmmf.add(f)
|
icasefs: rewrite case-folding collision detection (issue3452)
Before this patch, case-folding collision detection uses
"copies.pathcopies()" before "manifestmerge()", and is not aware of
renaming in some cases.
For example, in the case of issue3452, "copies.pathcopies()" can't
detect renaming, if the file is renamed at the revision before common
ancestor of merging. So, "hg merge" is aborted unexpectedly on case
insensitive filesystem.
This patch fully rewrites case-folding collision detection, and
relocate it into "manifestmerge()".
New implementation uses list of actions held in "actions" and
"prompts" to build provisional merged manifest up.
Provisional merged manifest should be correct, if actions required to
build merge result up in working directory are listed up in "actions"
and "prompts" correctly.
This patch checks case-folding collision still before prompting for
merge, to avoid aborting after some interactions with users. So, this
assumes that user would choose not "deleted" but "changed".
This patch also changes existing abort message, because sorting before
collision detection changes order of checked files.
2013-04-30 00:01:32 +04:00
|
|
|
|
|
|
|
# check case-folding collision in provisional merged manifest
|
|
|
|
foldmap = {}
|
|
|
|
for f in sorted(pmmf):
|
|
|
|
fold = util.normcase(f)
|
|
|
|
if fold in foldmap:
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_("case-folding collision between %s and %s")
|
icasefs: rewrite case-folding collision detection (issue3452)
Before this patch, case-folding collision detection uses
"copies.pathcopies()" before "manifestmerge()", and is not aware of
renaming in some cases.
For example, in the case of issue3452, "copies.pathcopies()" can't
detect renaming, if the file is renamed at the revision before common
ancestor of merging. So, "hg merge" is aborted unexpectedly on case
insensitive filesystem.
This patch fully rewrites case-folding collision detection, and
relocate it into "manifestmerge()".
New implementation uses list of actions held in "actions" and
"prompts" to build provisional merged manifest up.
Provisional merged manifest should be correct, if actions required to
build merge result up in working directory are listed up in "actions"
and "prompts" correctly.
This patch checks case-folding collision still before prompting for
merge, to avoid aborting after some interactions with users. So, this
assumes that user would choose not "deleted" but "changed".
This patch also changes existing abort message, because sorting before
collision detection changes order of checked files.
2013-04-30 00:01:32 +04:00
|
|
|
% (f, foldmap[fold]))
|
|
|
|
foldmap[fold] = f
|
|
|
|
|
2015-10-13 01:16:25 +03:00
|
|
|
# check case-folding of directories
|
|
|
|
foldprefix = unfoldprefix = lastfull = ''
|
|
|
|
for fold, f in sorted(foldmap.items()):
|
|
|
|
if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
|
|
|
|
# the folded prefix matches but actual casing is different
|
|
|
|
raise error.Abort(_("case-folding collision between "
|
|
|
|
"%s and directory of %s") % (lastfull, f))
|
|
|
|
foldprefix = fold + '/'
|
|
|
|
unfoldprefix = f + '/'
|
|
|
|
lastfull = f
|
|
|
|
|
2013-03-15 22:23:29 +04:00
|
|
|
def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
|
2014-04-06 15:39:51 +04:00
|
|
|
acceptremote, followcopies):
|
2006-09-18 01:13:03 +04:00
|
|
|
"""
|
2010-08-11 16:16:22 +04:00
|
|
|
Merge p1 and p2 with ancestor pa and generate merge action list
|
2006-10-10 11:31:02 +04:00
|
|
|
|
2013-02-08 19:23:23 +04:00
|
|
|
branchmerge and force are as passed in to update
|
2006-10-10 11:31:02 +04:00
|
|
|
partial = function to filter file lists
|
2013-03-15 22:23:29 +04:00
|
|
|
acceptremote = accept the incoming changes without prompting
|
2006-09-18 01:13:03 +04:00
|
|
|
"""
|
|
|
|
|
2014-12-10 03:49:55 +03:00
|
|
|
copy, movewithdir, diverge, renamedelete = {}, {}, {}, {}
|
2009-06-09 03:14:44 +04:00
|
|
|
|
2013-02-10 20:55:01 +04:00
|
|
|
# manifests fetched in order are going to be faster, so prime the caches
|
|
|
|
[x.manifest() for x in
|
|
|
|
sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
|
|
|
|
|
|
|
|
if followcopies:
|
2013-02-10 01:24:24 +04:00
|
|
|
ret = copies.mergecopies(repo, wctx, p2, pa)
|
2012-12-27 02:50:17 +04:00
|
|
|
copy, movewithdir, diverge, renamedelete = ret
|
2009-06-09 03:14:44 +04:00
|
|
|
|
|
|
|
repo.ui.note(_("resolving manifests\n"))
|
2013-02-08 19:23:23 +04:00
|
|
|
repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
|
|
|
|
% (bool(branchmerge), bool(force), bool(partial)))
|
2013-02-10 01:24:24 +04:00
|
|
|
repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
|
2009-06-09 03:14:44 +04:00
|
|
|
|
2013-02-10 01:24:24 +04:00
|
|
|
m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
|
2009-06-09 03:14:44 +04:00
|
|
|
copied = set(copy.values())
|
2012-12-27 02:50:17 +04:00
|
|
|
copied.update(movewithdir.values())
|
2006-10-10 01:07:19 +04:00
|
|
|
|
2010-07-01 08:56:35 +04:00
|
|
|
if '.hgsubstate' in m1:
|
2009-11-08 19:29:52 +03:00
|
|
|
# check whether sub state is modified
|
2013-02-10 01:24:24 +04:00
|
|
|
for s in sorted(wctx.substate):
|
|
|
|
if wctx.sub(s).dirty():
|
2014-11-21 03:39:32 +03:00
|
|
|
m1['.hgsubstate'] += '+'
|
2009-11-08 19:29:52 +03:00
|
|
|
break
|
|
|
|
|
2006-09-18 01:13:03 +04:00
|
|
|
# Compare manifests
|
2014-10-15 04:09:16 +04:00
|
|
|
diff = m1.diff(m2)
|
2013-03-26 04:41:06 +04:00
|
|
|
|
2014-12-09 00:24:10 +03:00
|
|
|
actions = {}
|
2014-10-15 10:18:07 +04:00
|
|
|
for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
|
2006-10-04 23:33:22 +04:00
|
|
|
if partial and not partial(f):
|
|
|
|
continue
|
2014-11-24 01:09:10 +03:00
|
|
|
if n1 and n2: # file exists on both local and remote side
|
2014-11-25 03:16:34 +03:00
|
|
|
if f not in ma:
|
2014-11-25 03:42:36 +03:00
|
|
|
fa = copy.get(f, None)
|
|
|
|
if fa is not None:
|
2014-12-09 00:24:10 +03:00
|
|
|
actions[f] = ('m', (f, f, fa, False, pa.node()),
|
|
|
|
"both renamed from " + fa)
|
2014-11-25 03:42:36 +03:00
|
|
|
else:
|
2014-12-09 00:24:10 +03:00
|
|
|
actions[f] = ('m', (f, f, None, False, pa.node()),
|
|
|
|
"both created")
|
2014-11-25 03:16:34 +03:00
|
|
|
else:
|
|
|
|
a = ma[f]
|
|
|
|
fla = ma.flags(f)
|
2014-11-25 03:11:22 +03:00
|
|
|
nol = 'l' not in fl1 + fl2 + fla
|
|
|
|
if n2 == a and fl2 == fla:
|
2014-12-09 00:24:10 +03:00
|
|
|
actions[f] = ('k' , (), "remote unchanged")
|
2014-11-25 03:11:22 +03:00
|
|
|
elif n1 == a and fl1 == fla: # local unchanged - use remote
|
|
|
|
if n1 == n2: # optimization: keep local content
|
2014-12-09 00:24:10 +03:00
|
|
|
actions[f] = ('e', (fl2,), "update permissions")
|
2014-11-25 03:11:22 +03:00
|
|
|
else:
|
2014-12-09 00:24:10 +03:00
|
|
|
actions[f] = ('g', (fl2,), "remote is newer")
|
2014-11-25 03:11:22 +03:00
|
|
|
elif nol and n2 == a: # remote only changed 'x'
|
2014-12-09 00:24:10 +03:00
|
|
|
actions[f] = ('e', (fl2,), "update permissions")
|
2014-11-25 03:11:22 +03:00
|
|
|
elif nol and n1 == a: # local only changed 'x'
|
2014-12-09 00:24:10 +03:00
|
|
|
actions[f] = ('g', (fl1,), "remote is newer")
|
2014-11-25 03:11:22 +03:00
|
|
|
else: # both changed something
|
2014-12-09 00:24:10 +03:00
|
|
|
actions[f] = ('m', (f, f, f, False, pa.node()),
|
|
|
|
"versions differ")
|
2014-11-24 01:09:10 +03:00
|
|
|
elif n1: # file exists only on local side
|
2014-11-24 02:08:50 +03:00
|
|
|
if f in copied:
|
|
|
|
pass # we'll deal with it on m2 side
|
|
|
|
elif f in movewithdir: # directory rename, move local
|
2014-11-24 01:09:10 +03:00
|
|
|
f2 = movewithdir[f]
|
2014-12-03 21:56:07 +03:00
|
|
|
if f2 in m2:
|
2014-12-09 00:24:10 +03:00
|
|
|
actions[f2] = ('m', (f, f2, None, True, pa.node()),
|
|
|
|
"remote directory rename, both created")
|
2014-12-03 21:56:07 +03:00
|
|
|
else:
|
2014-12-09 00:24:10 +03:00
|
|
|
actions[f2] = ('dm', (f, fl1),
|
|
|
|
"remote directory rename - move from " + f)
|
2014-11-24 01:09:10 +03:00
|
|
|
elif f in copy:
|
|
|
|
f2 = copy[f]
|
2014-12-09 00:24:10 +03:00
|
|
|
actions[f] = ('m', (f, f2, f2, False, pa.node()),
|
|
|
|
"local copied/moved from " + f2)
|
2014-11-24 01:09:10 +03:00
|
|
|
elif f in ma: # clean, a different, no remote
|
|
|
|
if n1 != ma[f]:
|
|
|
|
if acceptremote:
|
2014-12-09 00:24:10 +03:00
|
|
|
actions[f] = ('r', None, "remote delete")
|
2014-11-24 01:09:10 +03:00
|
|
|
else:
|
2014-12-09 00:24:10 +03:00
|
|
|
actions[f] = ('cd', None, "prompt changed/deleted")
|
2014-11-24 01:09:10 +03:00
|
|
|
elif n1[20:] == 'a':
|
|
|
|
# This extra 'a' is added by working copy manifest to mark
|
|
|
|
# the file as locally added. We should forget it instead of
|
|
|
|
# deleting it.
|
2014-12-09 00:24:10 +03:00
|
|
|
actions[f] = ('f', None, "remote deleted")
|
2014-02-28 05:26:03 +04:00
|
|
|
else:
|
2014-12-09 00:24:10 +03:00
|
|
|
actions[f] = ('r', None, "other deleted")
|
2014-11-24 01:09:10 +03:00
|
|
|
elif n2: # file exists only on remote side
|
2014-11-24 02:08:50 +03:00
|
|
|
if f in copied:
|
|
|
|
pass # we'll deal with it on m1 side
|
|
|
|
elif f in movewithdir:
|
2014-11-24 01:09:10 +03:00
|
|
|
f2 = movewithdir[f]
|
2014-12-03 22:02:52 +03:00
|
|
|
if f2 in m1:
|
2014-12-09 00:24:10 +03:00
|
|
|
actions[f2] = ('m', (f2, f, None, False, pa.node()),
|
|
|
|
"local directory rename, both created")
|
2014-12-03 22:02:52 +03:00
|
|
|
else:
|
2014-12-09 00:24:10 +03:00
|
|
|
actions[f2] = ('dg', (f, fl2),
|
|
|
|
"local directory rename - get from " + f)
|
2014-11-24 01:09:10 +03:00
|
|
|
elif f in copy:
|
|
|
|
f2 = copy[f]
|
|
|
|
if f2 in m2:
|
2014-12-09 00:24:10 +03:00
|
|
|
actions[f] = ('m', (f2, f, f2, False, pa.node()),
|
|
|
|
"remote copied from " + f2)
|
2013-02-09 19:36:00 +04:00
|
|
|
else:
|
2014-12-09 00:24:10 +03:00
|
|
|
actions[f] = ('m', (f2, f, f2, True, pa.node()),
|
|
|
|
"remote moved from " + f2)
|
2014-11-24 01:09:10 +03:00
|
|
|
elif f not in ma:
|
|
|
|
# local unknown, remote created: the logic is described by the
|
|
|
|
# following table:
|
|
|
|
#
|
|
|
|
# force branchmerge different | action
|
2014-11-19 22:51:31 +03:00
|
|
|
# n * * | create
|
2014-11-19 22:48:30 +03:00
|
|
|
# y n * | create
|
|
|
|
# y y n | create
|
2014-11-24 01:09:10 +03:00
|
|
|
# y y y | merge
|
|
|
|
#
|
|
|
|
# Checking whether the files are different is expensive, so we
|
|
|
|
# don't do that when we can avoid it.
|
2014-11-19 22:44:00 +03:00
|
|
|
if not force:
|
2014-11-19 22:51:31 +03:00
|
|
|
actions[f] = ('c', (fl2,), "remote created")
|
2014-11-19 22:44:00 +03:00
|
|
|
elif not branchmerge:
|
2014-11-19 22:48:30 +03:00
|
|
|
actions[f] = ('c', (fl2,), "remote created")
|
2014-02-28 05:26:03 +04:00
|
|
|
else:
|
2014-12-16 03:45:19 +03:00
|
|
|
actions[f] = ('cm', (fl2, pa.node()),
|
|
|
|
"remote created, get or merge")
|
2014-11-24 01:09:10 +03:00
|
|
|
elif n2 != ma[f]:
|
2014-11-19 22:51:31 +03:00
|
|
|
if acceptremote:
|
|
|
|
actions[f] = ('c', (fl2,), "remote recreating")
|
2014-11-24 01:09:10 +03:00
|
|
|
else:
|
2014-11-19 22:51:31 +03:00
|
|
|
actions[f] = ('dc', (fl2,), "prompt deleted/changed")
|
|
|
|
|
2014-12-10 03:49:55 +03:00
|
|
|
return actions, diverge, renamedelete
|
2006-09-18 01:13:03 +04:00
|
|
|
|
2014-12-04 00:50:28 +03:00
|
|
|
def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
|
|
|
|
"""Resolves false conflicts where the nodeid changed but the content
|
|
|
|
remained the same."""
|
|
|
|
|
2014-12-12 08:06:16 +03:00
|
|
|
for f, (m, args, msg) in actions.items():
|
|
|
|
if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
|
2014-12-04 00:50:28 +03:00
|
|
|
# local did change but ended up with same content
|
2014-12-12 08:06:16 +03:00
|
|
|
actions[f] = 'r', None, "prompt same"
|
|
|
|
elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
|
2014-12-04 00:50:28 +03:00
|
|
|
# remote did change but ended up with same content
|
2014-12-12 08:06:16 +03:00
|
|
|
del actions[f] # don't get = keep local deleted
|
2014-12-04 00:50:28 +03:00
|
|
|
|
2014-11-22 00:06:04 +03:00
|
|
|
def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
|
|
|
|
acceptremote, followcopies):
|
|
|
|
"Calculate the actions needed to merge mctx into wctx using ancestors"
|
|
|
|
|
|
|
|
if len(ancestors) == 1: # default
|
2014-12-10 03:49:55 +03:00
|
|
|
actions, diverge, renamedelete = manifestmerge(
|
|
|
|
repo, wctx, mctx, ancestors[0], branchmerge, force, partial,
|
|
|
|
acceptremote, followcopies)
|
2014-12-18 20:22:09 +03:00
|
|
|
_checkunknownfiles(repo, wctx, mctx, force, actions)
|
2014-11-22 00:06:04 +03:00
|
|
|
|
|
|
|
else: # only when merge.preferancestor=* - the default
|
|
|
|
repo.ui.note(
|
|
|
|
_("note: merging %s and %s using bids from ancestors %s\n") %
|
|
|
|
(wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
|
|
|
|
|
|
|
|
# Call for bids
|
|
|
|
fbids = {} # mapping filename to bids (action method to list af actions)
|
2014-12-10 03:49:55 +03:00
|
|
|
diverge, renamedelete = None, None
|
2014-11-22 00:06:04 +03:00
|
|
|
for ancestor in ancestors:
|
|
|
|
repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
|
2014-12-10 03:49:55 +03:00
|
|
|
actions, diverge1, renamedelete1 = manifestmerge(
|
|
|
|
repo, wctx, mctx, ancestor, branchmerge, force, partial,
|
|
|
|
acceptremote, followcopies)
|
2014-12-18 20:22:09 +03:00
|
|
|
_checkunknownfiles(repo, wctx, mctx, force, actions)
|
2015-08-19 22:05:42 +03:00
|
|
|
|
|
|
|
# Track the shortest set of warning on the theory that bid
|
|
|
|
# merge will correctly incorporate more information
|
|
|
|
if diverge is None or len(diverge1) < len(diverge):
|
2014-12-10 03:49:55 +03:00
|
|
|
diverge = diverge1
|
2015-08-19 22:05:42 +03:00
|
|
|
if renamedelete is None or len(renamedelete) < len(renamedelete1):
|
2014-12-10 03:49:55 +03:00
|
|
|
renamedelete = renamedelete1
|
2015-08-19 22:05:42 +03:00
|
|
|
|
2014-12-12 07:56:53 +03:00
|
|
|
for f, a in sorted(actions.iteritems()):
|
|
|
|
m, args, msg = a
|
|
|
|
repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
|
|
|
|
if f in fbids:
|
|
|
|
d = fbids[f]
|
|
|
|
if m in d:
|
|
|
|
d[m].append(a)
|
2014-11-22 00:06:04 +03:00
|
|
|
else:
|
2014-12-12 07:56:53 +03:00
|
|
|
d[m] = [a]
|
|
|
|
else:
|
|
|
|
fbids[f] = {m: [a]}
|
2014-11-22 00:06:04 +03:00
|
|
|
|
|
|
|
# Pick the best bid for each file
|
|
|
|
repo.ui.note(_('\nauction for merging merge bids\n'))
|
2014-12-12 07:56:53 +03:00
|
|
|
actions = {}
|
2014-11-22 00:06:04 +03:00
|
|
|
for f, bids in sorted(fbids.items()):
|
|
|
|
# bids is a mapping from action method to list af actions
|
|
|
|
# Consensus?
|
|
|
|
if len(bids) == 1: # all bids are the same kind of method
|
|
|
|
m, l = bids.items()[0]
|
2015-05-16 21:34:19 +03:00
|
|
|
if all(a == l[0] for a in l[1:]): # len(bids) is > 1
|
2014-11-22 00:06:04 +03:00
|
|
|
repo.ui.note(" %s: consensus for %s\n" % (f, m))
|
2014-12-12 07:56:53 +03:00
|
|
|
actions[f] = l[0]
|
2014-11-22 00:06:04 +03:00
|
|
|
continue
|
|
|
|
# If keep is an option, just do it.
|
|
|
|
if 'k' in bids:
|
|
|
|
repo.ui.note(" %s: picking 'keep' action\n" % f)
|
2014-12-12 07:56:53 +03:00
|
|
|
actions[f] = bids['k'][0]
|
2014-11-22 00:06:04 +03:00
|
|
|
continue
|
|
|
|
# If there are gets and they all agree [how could they not?], do it.
|
|
|
|
if 'g' in bids:
|
|
|
|
ga0 = bids['g'][0]
|
2015-05-16 21:34:19 +03:00
|
|
|
if all(a == ga0 for a in bids['g'][1:]):
|
2014-11-22 00:06:04 +03:00
|
|
|
repo.ui.note(" %s: picking 'get' action\n" % f)
|
2014-12-12 07:56:53 +03:00
|
|
|
actions[f] = ga0
|
2014-11-22 00:06:04 +03:00
|
|
|
continue
|
|
|
|
# TODO: Consider other simple actions such as mode changes
|
|
|
|
# Handle inefficient democrazy.
|
|
|
|
repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
|
|
|
|
for m, l in sorted(bids.items()):
|
|
|
|
for _f, args, msg in l:
|
|
|
|
repo.ui.note(' %s -> %s\n' % (msg, m))
|
|
|
|
# Pick random action. TODO: Instead, prompt user when resolving
|
|
|
|
m, l = bids.items()[0]
|
|
|
|
repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
|
|
|
|
(f, m))
|
2014-12-12 07:56:53 +03:00
|
|
|
actions[f] = l[0]
|
2014-11-22 00:06:04 +03:00
|
|
|
continue
|
|
|
|
repo.ui.note(_('end of auction\n\n'))
|
|
|
|
|
2014-12-12 08:06:16 +03:00
|
|
|
_resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
|
|
|
|
|
2014-12-12 08:58:49 +03:00
|
|
|
if wctx.rev() is None:
|
|
|
|
fractions = _forgetremoved(wctx, mctx, branchmerge)
|
|
|
|
actions.update(fractions)
|
|
|
|
|
2014-12-10 03:49:55 +03:00
|
|
|
return actions, diverge, renamedelete
|
2014-11-22 00:06:04 +03:00
|
|
|
|
2014-05-09 14:01:56 +04:00
|
|
|
def batchremove(repo, actions):
|
|
|
|
"""apply removes to the working directory
|
2013-02-10 03:21:58 +04:00
|
|
|
|
|
|
|
yields tuples for progress updates
|
|
|
|
"""
|
2013-02-10 03:51:32 +04:00
|
|
|
verbose = repo.ui.verbose
|
|
|
|
unlink = util.unlinkpath
|
|
|
|
wjoin = repo.wjoin
|
2015-01-11 03:51:52 +03:00
|
|
|
audit = repo.wvfs.audit
|
2013-02-10 03:22:09 +04:00
|
|
|
i = 0
|
2014-02-28 05:25:58 +04:00
|
|
|
for f, args, msg in actions:
|
2014-05-09 14:01:56 +04:00
|
|
|
repo.ui.debug(" %s: %s -> r\n" % (f, msg))
|
2014-05-02 03:09:14 +04:00
|
|
|
if verbose:
|
|
|
|
repo.ui.note(_("removing %s\n") % f)
|
|
|
|
audit(f)
|
|
|
|
try:
|
|
|
|
unlink(wjoin(f), ignoremissing=True)
|
2015-06-24 08:20:08 +03:00
|
|
|
except OSError as inst:
|
2014-05-02 03:09:14 +04:00
|
|
|
repo.ui.warn(_("update failed to remove %s: %s!\n") %
|
|
|
|
(f, inst.strerror))
|
2014-05-09 14:01:56 +04:00
|
|
|
if i == 100:
|
|
|
|
yield i, f
|
|
|
|
i = 0
|
|
|
|
i += 1
|
|
|
|
if i > 0:
|
|
|
|
yield i, f
|
|
|
|
|
|
|
|
def batchget(repo, mctx, actions):
|
|
|
|
"""apply gets to the working directory
|
|
|
|
|
|
|
|
mctx is the context to get from
|
|
|
|
|
|
|
|
yields tuples for progress updates
|
|
|
|
"""
|
|
|
|
verbose = repo.ui.verbose
|
|
|
|
fctx = mctx.filectx
|
|
|
|
wwrite = repo.wwrite
|
|
|
|
i = 0
|
2014-02-28 05:25:58 +04:00
|
|
|
for f, args, msg in actions:
|
2014-05-09 14:01:56 +04:00
|
|
|
repo.ui.debug(" %s: %s -> g\n" % (f, msg))
|
2014-05-02 03:09:14 +04:00
|
|
|
if verbose:
|
|
|
|
repo.ui.note(_("getting %s\n") % f)
|
|
|
|
wwrite(f, fctx(f).data(), args[0])
|
2013-02-10 03:22:09 +04:00
|
|
|
if i == 100:
|
|
|
|
yield i, f
|
|
|
|
i = 0
|
|
|
|
i += 1
|
|
|
|
if i > 0:
|
2013-02-10 03:21:58 +04:00
|
|
|
yield i, f
|
|
|
|
|
2014-05-09 03:54:23 +04:00
|
|
|
def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
|
2010-06-18 10:49:24 +04:00
|
|
|
"""apply the merge action list to the working directory
|
|
|
|
|
|
|
|
wctx is the working copy context
|
|
|
|
mctx is the context to be merged into the working copy
|
2010-12-13 19:46:31 +03:00
|
|
|
|
|
|
|
Return a tuple of counts (updated, merged, removed, unresolved) that
|
|
|
|
describes how many files were affected by the update.
|
2010-06-18 10:49:24 +04:00
|
|
|
"""
|
2006-10-10 11:31:02 +04:00
|
|
|
|
2006-09-18 02:39:19 +04:00
|
|
|
updated, merged, removed, unresolved = 0, 0, 0, 0
|
2008-04-11 00:02:24 +04:00
|
|
|
ms = mergestate(repo)
|
2014-02-26 06:42:11 +04:00
|
|
|
ms.reset(wctx.p1().node(), mctx.node())
|
2008-04-11 00:02:24 +04:00
|
|
|
moves = []
|
2014-02-28 05:25:58 +04:00
|
|
|
for m, l in actions.items():
|
|
|
|
l.sort()
|
2008-04-11 00:02:24 +04:00
|
|
|
|
|
|
|
# prescan for merges
|
2014-02-28 05:25:58 +04:00
|
|
|
for f, args, msg in actions['m']:
|
2014-05-02 03:09:14 +04:00
|
|
|
f1, f2, fa, move, anc = args
|
|
|
|
if f == '.hgsubstate': # merged internally
|
|
|
|
continue
|
|
|
|
repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
|
|
|
|
fcl = wctx[f1]
|
|
|
|
fco = mctx[f2]
|
|
|
|
actx = repo[anc]
|
|
|
|
if fa in actx:
|
|
|
|
fca = actx[fa]
|
|
|
|
else:
|
|
|
|
fca = repo.filectx(f1, fileid=nullrev)
|
|
|
|
ms.add(fcl, fco, fca, f)
|
|
|
|
if f1 != f and move:
|
|
|
|
moves.append(f1)
|
2008-04-11 00:02:24 +04:00
|
|
|
|
2015-01-11 03:51:52 +03:00
|
|
|
audit = repo.wvfs.audit
|
2014-05-15 04:14:59 +04:00
|
|
|
_updating = _('updating')
|
|
|
|
_files = _('files')
|
|
|
|
progress = repo.ui.progress
|
2011-05-21 04:05:00 +04:00
|
|
|
|
2008-04-11 00:02:24 +04:00
|
|
|
# remove renamed files after safely stored
|
|
|
|
for f in moves:
|
2010-08-25 18:23:32 +04:00
|
|
|
if os.path.lexists(repo.wjoin(f)):
|
2009-09-19 03:15:38 +04:00
|
|
|
repo.ui.debug("removing %s\n" % f)
|
2011-05-21 04:05:00 +04:00
|
|
|
audit(f)
|
2013-01-15 04:05:12 +04:00
|
|
|
util.unlinkpath(repo.wjoin(f))
|
2007-08-01 21:33:12 +04:00
|
|
|
|
2014-12-10 03:49:55 +03:00
|
|
|
numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
|
2013-02-10 03:21:58 +04:00
|
|
|
|
2014-02-28 05:25:58 +04:00
|
|
|
if [a for a in actions['r'] if a[0] == '.hgsubstate']:
|
2013-02-10 03:22:08 +04:00
|
|
|
subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
|
|
|
|
|
2014-05-15 04:14:59 +04:00
|
|
|
# remove in parallel (must come first)
|
2013-02-10 03:22:09 +04:00
|
|
|
z = 0
|
2014-02-28 05:25:58 +04:00
|
|
|
prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
|
2013-04-29 10:58:15 +04:00
|
|
|
for i, item in prog:
|
|
|
|
z += i
|
2014-05-15 04:14:59 +04:00
|
|
|
progress(_updating, z, item=item, total=numupdates, unit=_files)
|
2014-02-28 05:25:58 +04:00
|
|
|
removed = len(actions['r'])
|
2014-05-15 04:14:59 +04:00
|
|
|
|
|
|
|
# get in parallel
|
2014-02-28 05:25:58 +04:00
|
|
|
prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
|
2013-02-10 03:51:32 +04:00
|
|
|
for i, item in prog:
|
2013-02-10 03:22:09 +04:00
|
|
|
z += i
|
2014-05-15 04:14:59 +04:00
|
|
|
progress(_updating, z, item=item, total=numupdates, unit=_files)
|
2014-02-28 05:25:58 +04:00
|
|
|
updated = len(actions['g'])
|
2013-02-10 03:21:58 +04:00
|
|
|
|
2014-02-28 05:25:58 +04:00
|
|
|
if [a for a in actions['g'] if a[0] == '.hgsubstate']:
|
2013-02-10 03:22:08 +04:00
|
|
|
subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
|
|
|
|
|
2014-05-02 03:09:14 +04:00
|
|
|
# forget (manifest only, just log it) (must come first)
|
|
|
|
for f, args, msg in actions['f']:
|
|
|
|
repo.ui.debug(" %s: %s -> f\n" % (f, msg))
|
|
|
|
z += 1
|
|
|
|
progress(_updating, z, item=f, total=numupdates, unit=_files)
|
2014-04-22 04:10:25 +04:00
|
|
|
|
2014-05-02 03:09:14 +04:00
|
|
|
# re-add (manifest only, just log it)
|
|
|
|
for f, args, msg in actions['a']:
|
|
|
|
repo.ui.debug(" %s: %s -> a\n" % (f, msg))
|
|
|
|
z += 1
|
|
|
|
progress(_updating, z, item=f, total=numupdates, unit=_files)
|
2014-04-22 04:10:25 +04:00
|
|
|
|
2014-05-02 03:09:14 +04:00
|
|
|
# keep (noop, just log it)
|
|
|
|
for f, args, msg in actions['k']:
|
|
|
|
repo.ui.debug(" %s: %s -> k\n" % (f, msg))
|
|
|
|
# no progress
|
2014-04-22 04:10:25 +04:00
|
|
|
|
2014-05-02 03:09:14 +04:00
|
|
|
# directory rename, move local
|
|
|
|
for f, args, msg in actions['dm']:
|
|
|
|
repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
|
|
|
|
z += 1
|
|
|
|
progress(_updating, z, item=f, total=numupdates, unit=_files)
|
|
|
|
f0, flags = args
|
|
|
|
repo.ui.note(_("moving %s to %s\n") % (f0, f))
|
|
|
|
audit(f)
|
|
|
|
repo.wwrite(f, wctx.filectx(f0).data(), flags)
|
|
|
|
util.unlinkpath(repo.wjoin(f0))
|
|
|
|
updated += 1
|
|
|
|
|
|
|
|
# local directory rename, get
|
|
|
|
for f, args, msg in actions['dg']:
|
|
|
|
repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
|
|
|
|
z += 1
|
|
|
|
progress(_updating, z, item=f, total=numupdates, unit=_files)
|
|
|
|
f0, flags = args
|
|
|
|
repo.ui.note(_("getting %s to %s\n") % (f0, f))
|
|
|
|
repo.wwrite(f, mctx.filectx(f0).data(), flags)
|
|
|
|
updated += 1
|
|
|
|
|
|
|
|
# exec
|
|
|
|
for f, args, msg in actions['e']:
|
|
|
|
repo.ui.debug(" %s: %s -> e\n" % (f, msg))
|
|
|
|
z += 1
|
|
|
|
progress(_updating, z, item=f, total=numupdates, unit=_files)
|
|
|
|
flags, = args
|
|
|
|
audit(f)
|
|
|
|
util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
|
|
|
|
updated += 1
|
2014-04-22 04:10:25 +04:00
|
|
|
|
2015-10-12 07:56:39 +03:00
|
|
|
# premerge
|
|
|
|
tocomplete = []
|
2015-09-16 22:36:21 +03:00
|
|
|
for f, args, msg in actions['m']:
|
2015-10-12 07:56:39 +03:00
|
|
|
repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
|
2015-09-16 22:36:21 +03:00
|
|
|
z += 1
|
|
|
|
progress(_updating, z, item=f, total=numupdates, unit=_files)
|
|
|
|
if f == '.hgsubstate': # subrepo states need updating
|
|
|
|
subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
|
|
|
|
overwrite)
|
|
|
|
continue
|
|
|
|
audit(f)
|
2015-10-12 06:12:12 +03:00
|
|
|
complete, r = ms.preresolve(f, wctx, labels=labels)
|
2015-10-12 07:56:39 +03:00
|
|
|
if complete:
|
|
|
|
if r is not None and r > 0:
|
|
|
|
unresolved += 1
|
|
|
|
else:
|
|
|
|
if r is None:
|
|
|
|
updated += 1
|
|
|
|
else:
|
|
|
|
merged += 1
|
|
|
|
else:
|
|
|
|
numupdates += 1
|
|
|
|
tocomplete.append((f, args, msg))
|
|
|
|
|
|
|
|
# merge
|
|
|
|
for f, args, msg in tocomplete:
|
|
|
|
repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
|
|
|
|
z += 1
|
|
|
|
progress(_updating, z, item=f, total=numupdates, unit=_files)
|
|
|
|
r = ms.resolve(f, wctx, labels=labels)
|
2015-09-16 22:36:21 +03:00
|
|
|
if r is not None and r > 0:
|
|
|
|
unresolved += 1
|
|
|
|
else:
|
|
|
|
if r is None:
|
|
|
|
updated += 1
|
|
|
|
else:
|
|
|
|
merged += 1
|
|
|
|
|
2010-09-06 19:35:49 +04:00
|
|
|
ms.commit()
|
2013-02-10 03:51:32 +04:00
|
|
|
progress(_updating, None, total=numupdates, unit=_files)
|
2006-09-18 02:39:19 +04:00
|
|
|
|
|
|
|
return updated, merged, removed, unresolved
|
|
|
|
|
2013-01-09 03:01:33 +04:00
|
|
|
def recordupdates(repo, actions, branchmerge):
|
2006-10-10 11:31:02 +04:00
|
|
|
"record merge actions to the dirstate"
|
2014-05-02 03:09:14 +04:00
|
|
|
# remove (must come first)
|
|
|
|
for f, args, msg in actions['r']:
|
|
|
|
if branchmerge:
|
|
|
|
repo.dirstate.remove(f)
|
|
|
|
else:
|
2014-05-02 03:09:14 +04:00
|
|
|
repo.dirstate.drop(f)
|
2014-04-22 04:10:25 +04:00
|
|
|
|
2014-05-02 03:09:14 +04:00
|
|
|
# forget (must come first)
|
|
|
|
for f, args, msg in actions['f']:
|
|
|
|
repo.dirstate.drop(f)
|
2014-04-22 04:10:25 +04:00
|
|
|
|
2014-05-02 03:09:14 +04:00
|
|
|
# re-add
|
|
|
|
for f, args, msg in actions['a']:
|
|
|
|
if not branchmerge:
|
|
|
|
repo.dirstate.add(f)
|
2014-04-22 04:10:25 +04:00
|
|
|
|
2014-05-02 03:09:14 +04:00
|
|
|
# exec change
|
|
|
|
for f, args, msg in actions['e']:
|
|
|
|
repo.dirstate.normallookup(f)
|
2014-04-22 04:10:25 +04:00
|
|
|
|
2014-05-02 03:09:14 +04:00
|
|
|
# keep
|
|
|
|
for f, args, msg in actions['k']:
|
|
|
|
pass
|
2014-04-22 04:10:25 +04:00
|
|
|
|
2014-05-02 03:09:14 +04:00
|
|
|
# get
|
|
|
|
for f, args, msg in actions['g']:
|
|
|
|
if branchmerge:
|
|
|
|
repo.dirstate.otherparent(f)
|
|
|
|
else:
|
|
|
|
repo.dirstate.normal(f)
|
2014-04-22 04:10:25 +04:00
|
|
|
|
2014-05-02 03:09:14 +04:00
|
|
|
# merge
|
|
|
|
for f, args, msg in actions['m']:
|
|
|
|
f1, f2, fa, move, anc = args
|
|
|
|
if branchmerge:
|
|
|
|
# We've done a branch merge, mark this file as merged
|
|
|
|
# so that we properly record the merger later
|
|
|
|
repo.dirstate.merge(f)
|
|
|
|
if f1 != f2: # copy/rename
|
|
|
|
if move:
|
|
|
|
repo.dirstate.remove(f1)
|
|
|
|
if f1 != f:
|
|
|
|
repo.dirstate.copy(f1, f)
|
|
|
|
else:
|
|
|
|
repo.dirstate.copy(f2, f)
|
|
|
|
else:
|
|
|
|
# We've update-merged a locally modified file, so
|
|
|
|
# we set the dirstate to emulate a normal checkout
|
|
|
|
# of that file some time in the past. Thus our
|
|
|
|
# merge will appear as a normal local file
|
|
|
|
# modification.
|
|
|
|
if f2 == f: # file not locally copied/moved
|
|
|
|
repo.dirstate.normallookup(f)
|
|
|
|
if move:
|
|
|
|
repo.dirstate.drop(f1)
|
2014-04-22 04:10:25 +04:00
|
|
|
|
2014-05-02 03:09:14 +04:00
|
|
|
# directory rename, move local
|
|
|
|
for f, args, msg in actions['dm']:
|
|
|
|
f0, flag = args
|
|
|
|
if branchmerge:
|
|
|
|
repo.dirstate.add(f)
|
|
|
|
repo.dirstate.remove(f0)
|
|
|
|
repo.dirstate.copy(f0, f)
|
|
|
|
else:
|
|
|
|
repo.dirstate.normal(f)
|
|
|
|
repo.dirstate.drop(f0)
|
|
|
|
|
|
|
|
# directory rename, get
|
|
|
|
for f, args, msg in actions['dg']:
|
|
|
|
f0, flag = args
|
|
|
|
if branchmerge:
|
|
|
|
repo.dirstate.add(f)
|
|
|
|
repo.dirstate.copy(f0, f)
|
|
|
|
else:
|
|
|
|
repo.dirstate.normal(f)
|
2006-09-18 02:39:19 +04:00
|
|
|
|
rebase: allow collapsing branches in place (issue3111)
We allow rebase plus collapse, but not collapse only? I imagine people would
rebase first then collapse once they are sure the rebase is correct and it is
the right time to finish it.
I was reluctant to submit this patch for reasons detailed below, but it
improves rebase --collapse usefulness so much it is worth the ugliness.
The fix is ugly because we should be fixing the collapse code path rather than
the merge. Collapsing by merging changesets repeatedly is inefficient compared
to what commit --amend does: commitctx(), update, strip. The problem with the
latter is, to generate the synthetic changeset, copy records are gathered with
copies.pathcopies(). copies.pathcopies() is still implemented with merging in
mind and discards information like file replaced by the copy of another,
criss-cross copies and so forth. I believe this information should not be lost,
even if we decide not to interpret it fully later, at merge time.
The second issue with improving rebase --collapse is the option should not be
there to begin with. Rebasing and collapsing are orthogonal and a dedicated
command would probably enable a better, simpler ui. We should avoid advertizing
rebase --collapse, but with this fix it becomes the best shipped solution to
collapse changesets.
And for the record, available techniques are:
- revert + commit + strip: lose copies
- mq/qfold: repeated patching() (mostly correct, fragile)
- rebase: repeated merges (mostly correct, fragile)
- collapse: revert + tag rewriting wizardry, lose copies
- histedit: repeated patching() (mostly correct, fragile)
- amend: copies.pathcopies() + commitctx() + update + strip
2012-05-03 17:14:58 +04:00
|
|
|
def update(repo, node, branchmerge, force, partial, ancestor=None,
|
2014-05-09 03:54:23 +04:00
|
|
|
mergeancestor=False, labels=None):
|
2006-10-10 11:31:02 +04:00
|
|
|
"""
|
|
|
|
Perform a merge between the working directory and the given node
|
|
|
|
|
2009-11-05 12:53:36 +03:00
|
|
|
node = the node to update to, or None if unspecified
|
2006-10-10 11:31:02 +04:00
|
|
|
branchmerge = whether to merge between branches
|
|
|
|
force = whether to force branch merging or file overwriting
|
|
|
|
partial = a function to filter file lists (dirstate not updated)
|
2013-03-15 22:23:29 +04:00
|
|
|
mergeancestor = whether it is merging with an ancestor. If true,
|
|
|
|
we should accept the incoming changes for any prompts that occur.
|
|
|
|
If false, merging with an ancestor (fast-forward) is only allowed
|
|
|
|
between different named branches. This flag is used by rebase extension
|
|
|
|
as a temporary fix and should be avoided in general.
|
2009-11-05 12:53:36 +03:00
|
|
|
|
|
|
|
The table below shows all the behaviors of the update command
|
|
|
|
given the -c and -C or no options, whether the working directory
|
|
|
|
is dirty, whether a revision is specified, and the relationship of
|
|
|
|
the parent rev to the target rev (linear, on the same named
|
|
|
|
branch, or on another named branch).
|
|
|
|
|
2010-09-14 14:20:51 +04:00
|
|
|
This logic is tested by test-update-branches.t.
|
2009-11-05 12:53:36 +03:00
|
|
|
|
|
|
|
-c -C dirty rev | linear same cross
|
|
|
|
n n n n | ok (1) x
|
2009-11-05 12:53:59 +03:00
|
|
|
n n n y | ok ok ok
|
2013-09-24 07:07:30 +04:00
|
|
|
n n y n | merge (2) (2)
|
|
|
|
n n y y | merge (3) (3)
|
2009-11-05 12:53:36 +03:00
|
|
|
n y * * | --- discard ---
|
2013-09-24 07:07:30 +04:00
|
|
|
y n y * | --- (4) ---
|
2009-11-05 12:53:36 +03:00
|
|
|
y n n * | --- ok ---
|
2013-09-24 07:07:30 +04:00
|
|
|
y y * * | --- (5) ---
|
2009-11-05 12:53:36 +03:00
|
|
|
|
|
|
|
x = can't happen
|
|
|
|
* = don't-care
|
2013-09-24 04:43:33 +04:00
|
|
|
1 = abort: not a linear update (merge or update --check to force update)
|
2013-09-24 07:08:52 +04:00
|
|
|
2 = abort: uncommitted changes (commit and merge, or update --clean to
|
|
|
|
discard changes)
|
2013-09-24 07:07:30 +04:00
|
|
|
3 = abort: uncommitted changes (commit or update --clean to discard changes)
|
2013-09-24 07:33:02 +04:00
|
|
|
4 = abort: uncommitted changes (checked in commands.py)
|
2013-09-24 07:07:30 +04:00
|
|
|
5 = incompatible options (checked in commands.py)
|
2010-12-13 19:46:31 +03:00
|
|
|
|
|
|
|
Return the same tuple as applyupdates().
|
2006-10-10 11:31:02 +04:00
|
|
|
"""
|
2006-08-08 19:56:48 +04:00
|
|
|
|
2009-11-05 12:53:59 +03:00
|
|
|
onode = node
|
2007-07-22 01:02:10 +04:00
|
|
|
wlock = repo.wlock()
|
2007-07-22 01:02:10 +04:00
|
|
|
try:
|
2008-06-26 23:35:46 +04:00
|
|
|
wc = repo[None]
|
2013-11-07 03:02:07 +04:00
|
|
|
pl = wc.parents()
|
|
|
|
p1 = pl[0]
|
2014-04-06 15:39:51 +04:00
|
|
|
pas = [None]
|
2014-11-30 21:26:53 +03:00
|
|
|
if ancestor is not None:
|
2014-04-06 15:39:51 +04:00
|
|
|
pas = [repo[ancestor]]
|
2013-11-07 03:02:07 +04:00
|
|
|
|
2007-07-22 01:02:10 +04:00
|
|
|
if node is None:
|
2015-10-06 07:42:09 +03:00
|
|
|
if (repo.ui.configbool('devel', 'all-warnings')
|
|
|
|
or repo.ui.configbool('devel', 'oldapi')):
|
|
|
|
repo.ui.develwarn('update with no target')
|
2015-09-29 11:03:26 +03:00
|
|
|
rev, _mark, _act = destutil.destupdate(repo)
|
|
|
|
node = repo[rev].node()
|
2014-01-16 02:41:18 +04:00
|
|
|
|
2007-07-22 01:02:10 +04:00
|
|
|
overwrite = force and not branchmerge
|
2013-11-07 03:02:07 +04:00
|
|
|
|
|
|
|
p2 = repo[node]
|
2014-04-06 15:39:51 +04:00
|
|
|
if pas[0] is None:
|
2015-06-26 01:54:55 +03:00
|
|
|
if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
|
2014-02-28 05:52:32 +04:00
|
|
|
cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
|
|
|
|
pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
|
|
|
|
else:
|
2014-08-15 04:39:01 +04:00
|
|
|
pas = [p1.ancestor(p2, warn=branchmerge)]
|
2011-04-05 00:25:20 +04:00
|
|
|
|
2007-07-22 01:02:10 +04:00
|
|
|
fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
|
|
|
|
|
|
|
|
### check phase
|
|
|
|
if not overwrite and len(pl) > 1:
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_("outstanding uncommitted merge"))
|
2008-03-24 18:01:05 +03:00
|
|
|
if branchmerge:
|
2014-04-06 15:39:51 +04:00
|
|
|
if pas == [p2]:
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_("merging with a working directory ancestor"
|
2010-06-20 23:21:56 +04:00
|
|
|
" has no effect"))
|
2014-04-06 15:39:51 +04:00
|
|
|
elif pas == [p1]:
|
rebase: allow collapsing branches in place (issue3111)
We allow rebase plus collapse, but not collapse only? I imagine people would
rebase first then collapse once they are sure the rebase is correct and it is
the right time to finish it.
I was reluctant to submit this patch for reasons detailed below, but it
improves rebase --collapse usefulness so much it is worth the ugliness.
The fix is ugly because we should be fixing the collapse code path rather than
the merge. Collapsing by merging changesets repeatedly is inefficient compared
to what commit --amend does: commitctx(), update, strip. The problem with the
latter is, to generate the synthetic changeset, copy records are gathered with
copies.pathcopies(). copies.pathcopies() is still implemented with merging in
mind and discards information like file replaced by the copy of another,
criss-cross copies and so forth. I believe this information should not be lost,
even if we decide not to interpret it fully later, at merge time.
The second issue with improving rebase --collapse is the option should not be
there to begin with. Rebasing and collapsing are orthogonal and a dedicated
command would probably enable a better, simpler ui. We should avoid advertizing
rebase --collapse, but with this fix it becomes the best shipped solution to
collapse changesets.
And for the record, available techniques are:
- revert + commit + strip: lose copies
- mq/qfold: repeated patching() (mostly correct, fragile)
- rebase: repeated merges (mostly correct, fragile)
- collapse: revert + tag rewriting wizardry, lose copies
- histedit: repeated patching() (mostly correct, fragile)
- amend: copies.pathcopies() + commitctx() + update + strip
2012-05-03 17:14:58 +04:00
|
|
|
if not mergeancestor and p1.branch() == p2.branch():
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_("nothing to merge"),
|
2011-12-07 21:23:01 +04:00
|
|
|
hint=_("use 'hg update' "
|
|
|
|
"or check 'hg heads'"))
|
2008-03-24 18:01:05 +03:00
|
|
|
if not force and (wc.files() or wc.deleted()):
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_("uncommitted changes"),
|
2011-12-07 21:23:01 +04:00
|
|
|
hint=_("use 'hg status' to list changes"))
|
2012-12-12 05:38:14 +04:00
|
|
|
for s in sorted(wc.substate):
|
2015-03-25 07:55:35 +03:00
|
|
|
wc.sub(s).bailifchanged()
|
2011-02-16 19:56:11 +03:00
|
|
|
|
2008-03-24 18:01:05 +03:00
|
|
|
elif not overwrite:
|
2013-10-02 04:43:54 +04:00
|
|
|
if p1 == p2: # no-op update
|
|
|
|
# call the hooks and exit early
|
|
|
|
repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
|
|
|
|
repo.hook('update', parent1=xp2, parent2='', error=0)
|
|
|
|
return 0, 0, 0, 0
|
|
|
|
|
2014-04-06 15:39:51 +04:00
|
|
|
if pas not in ([p1], [p2]): # nonlinear
|
2013-04-16 17:33:18 +04:00
|
|
|
dirty = wc.dirty(missing=True)
|
|
|
|
if dirty or onode is None:
|
|
|
|
# Branching is a bit strange to ensure we do the minimal
|
|
|
|
# amount of call to obsolete.background.
|
|
|
|
foreground = obsolete.foreground(repo, [p1.node()])
|
|
|
|
# note: the <node> variable contains a random identifier
|
|
|
|
if repo[node].node() in foreground:
|
2014-04-06 15:39:51 +04:00
|
|
|
pas = [p1] # allow updating to successors
|
2013-09-24 07:07:30 +04:00
|
|
|
elif dirty:
|
|
|
|
msg = _("uncommitted changes")
|
2013-09-24 07:08:52 +04:00
|
|
|
if onode is None:
|
|
|
|
hint = _("commit and merge, or update --clean to"
|
|
|
|
" discard changes")
|
|
|
|
else:
|
|
|
|
hint = _("commit or update --clean to discard"
|
|
|
|
" changes")
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(msg, hint=hint)
|
2013-04-16 17:33:18 +04:00
|
|
|
else: # node is none
|
2013-09-24 04:43:33 +04:00
|
|
|
msg = _("not a linear update")
|
|
|
|
hint = _("merge or update --check to force update")
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(msg, hint=hint)
|
2013-04-16 17:33:18 +04:00
|
|
|
else:
|
|
|
|
# Allow jumping branches if clean and specific rev given
|
2014-04-06 15:39:51 +04:00
|
|
|
pas = [p1]
|
2007-07-22 01:02:10 +04:00
|
|
|
|
2015-06-26 01:53:16 +03:00
|
|
|
# deprecated config: merge.followcopies
|
2014-04-06 15:39:51 +04:00
|
|
|
followcopies = False
|
|
|
|
if overwrite:
|
2014-04-06 15:39:51 +04:00
|
|
|
pas = [wc]
|
|
|
|
elif pas == [p2]: # backwards
|
|
|
|
pas = [wc.p1()]
|
2014-04-06 15:39:51 +04:00
|
|
|
elif not branchmerge and not wc.dirty(missing=True):
|
|
|
|
pass
|
2014-11-21 03:39:32 +03:00
|
|
|
elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
|
2014-04-06 15:39:51 +04:00
|
|
|
followcopies = True
|
|
|
|
|
2007-07-22 01:02:10 +04:00
|
|
|
### calculate phase
|
2014-12-12 09:07:41 +03:00
|
|
|
actionbyfile, diverge, renamedelete = calculateupdates(
|
2014-12-10 03:49:55 +03:00
|
|
|
repo, wc, p2, pas, branchmerge, force, partial, mergeancestor,
|
|
|
|
followcopies)
|
2014-12-12 09:07:41 +03:00
|
|
|
# Convert to dictionary-of-lists format
|
|
|
|
actions = dict((m, []) for m in 'a f g cd dc r dm dg m e k'.split())
|
|
|
|
for f, (m, args, msg) in actionbyfile.iteritems():
|
|
|
|
if m not in actions:
|
|
|
|
actions[m] = []
|
|
|
|
actions[m].append((f, args, msg))
|
2007-07-22 01:02:10 +04:00
|
|
|
|
merge: perform case-collision checking on final set of actions
When there are multiple common ancestors, we should check for case
collisions only on the resulting actions after bid merge has run. To
do this, move the code until after bid merge.
Move it past _resolvetrivial() too, since that might update
actions. If the remote changed a file and then reverted the change,
while the local side deleted the file and created a new file with a
name that case-folds like the old file, we should fail before this
patch but not after.
Although the changes to the actions caused by _forgetremoved() should
have no effect on case collisions, move it after that, too, so the
next person reading the code won't have to think about it.
Moving it past these blocks of code takes it to the end of
calculateupdates(), so let's even move it outside of the method, so we
also check collisions in actions produced by extensions overriding the
method.
2014-11-14 16:53:04 +03:00
|
|
|
if not util.checkcase(repo.path):
|
|
|
|
# check collision between files only in p2 for clean update
|
|
|
|
if (not branchmerge and
|
|
|
|
(force or not wc.dirty(missing=True, branch=False))):
|
|
|
|
_checkcollision(repo, p2.manifest(), None)
|
|
|
|
else:
|
|
|
|
_checkcollision(repo, wc.manifest(), actions)
|
|
|
|
|
2014-12-12 08:21:21 +03:00
|
|
|
# Prompt and create actions. TODO: Move this towards resolve phase.
|
|
|
|
for f, args, msg in sorted(actions['cd']):
|
|
|
|
if repo.ui.promptchoice(
|
|
|
|
_("local changed %s which remote deleted\n"
|
|
|
|
"use (c)hanged version or (d)elete?"
|
|
|
|
"$$ &Changed $$ &Delete") % f, 0):
|
|
|
|
actions['r'].append((f, None, "prompt delete"))
|
|
|
|
else:
|
|
|
|
actions['a'].append((f, None, "prompt keep"))
|
|
|
|
del actions['cd'][:]
|
|
|
|
|
|
|
|
for f, args, msg in sorted(actions['dc']):
|
|
|
|
flags, = args
|
|
|
|
if repo.ui.promptchoice(
|
|
|
|
_("remote changed %s which local deleted\n"
|
|
|
|
"use (c)hanged version or leave (d)eleted?"
|
|
|
|
"$$ &Changed $$ &Deleted") % f, 0) == 0:
|
|
|
|
actions['g'].append((f, (flags,), "prompt recreating"))
|
|
|
|
del actions['dc'][:]
|
|
|
|
|
2007-07-22 01:02:10 +04:00
|
|
|
### apply phase
|
2011-03-08 00:03:10 +03:00
|
|
|
if not branchmerge: # just jump to the new rev
|
2007-07-22 01:02:10 +04:00
|
|
|
fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
|
|
|
|
if not partial:
|
|
|
|
repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
|
2013-07-25 09:33:28 +04:00
|
|
|
# note that we're in the middle of an update
|
|
|
|
repo.vfs.write('updatestate', p2.hex())
|
2007-07-22 01:02:10 +04:00
|
|
|
|
2014-05-09 03:54:23 +04:00
|
|
|
stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
|
2007-07-22 01:02:10 +04:00
|
|
|
|
2014-12-10 01:18:31 +03:00
|
|
|
# divergent renames
|
2014-12-10 03:49:55 +03:00
|
|
|
for f, fl in sorted(diverge.iteritems()):
|
2014-12-10 01:18:31 +03:00
|
|
|
repo.ui.warn(_("note: possible conflict - %s was renamed "
|
|
|
|
"multiple times to:\n") % f)
|
|
|
|
for nf in fl:
|
|
|
|
repo.ui.warn(" %s\n" % nf)
|
|
|
|
|
|
|
|
# rename and delete
|
2014-12-10 03:49:55 +03:00
|
|
|
for f, fl in sorted(renamedelete.iteritems()):
|
2014-12-10 01:18:31 +03:00
|
|
|
repo.ui.warn(_("note: possible conflict - %s was deleted "
|
|
|
|
"and renamed to:\n") % f)
|
|
|
|
for nf in fl:
|
|
|
|
repo.ui.warn(" %s\n" % nf)
|
|
|
|
|
2007-07-22 01:02:10 +04:00
|
|
|
if not partial:
|
2014-09-05 22:36:20 +04:00
|
|
|
repo.dirstate.beginparentchange()
|
2012-04-30 00:25:55 +04:00
|
|
|
repo.setparents(fp1, fp2)
|
2013-01-09 03:01:33 +04:00
|
|
|
recordupdates(repo, actions, branchmerge)
|
2013-07-25 09:33:28 +04:00
|
|
|
# update completed, clear state
|
|
|
|
util.unlink(repo.join('updatestate'))
|
|
|
|
|
2011-03-08 03:21:50 +03:00
|
|
|
if not branchmerge:
|
2007-07-22 01:02:10 +04:00
|
|
|
repo.dirstate.setbranch(p2.branch())
|
2014-09-05 22:36:20 +04:00
|
|
|
repo.dirstate.endparentchange()
|
2007-07-22 01:02:10 +04:00
|
|
|
finally:
|
2009-04-22 04:01:22 +04:00
|
|
|
wlock.release()
|
2010-02-17 17:43:21 +03:00
|
|
|
|
|
|
|
if not partial:
|
merge: run update hook after the last wlock release
There were 2 test failures in 3.4-rc when running test-hook.t with the
largefiles extension enabled. For context, the first is a commit hook:
@@ -618,9 +621,9 @@
$ echo 'update = hg id' >> .hg/hgrc
$ echo bb > a
$ hg ci -ma
- 223eafe2750c tip
+ d3354c4310ed+
$ hg up 0
- cb9a9f314b8b
+ 223eafe2750c+ tip
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
make sure --verbose (and --quiet/--debug etc.) are propagated to the local ui
In both cases, largefiles acquires the wlock before calling into core, which
also acquires the wlock. The first case was fixed in 4100e338a886 by ensuring
the hook only runs after the lock has been fully released. The full release is
important, because that is what writes dirstate to the disk, allowing external
hooks to see the result of the update. This simply changes how the update hook
is called, so that it too is deferred until the lock is finally released.
There are many uses of mergemod.update(), but in terms of commands, it looks
like the following commands take wlock while calling mergemod.update(), and
therefore will now have their hook fired at a later time:
backout, fetch, histedit, qpush, rebase, shelve, transplant
Unlike the others, fetch immediately unlocks after calling update(), so for all
intents and purposes, its hook invocation is not deferred (but the external hook
still sees the proper state).
2015-04-29 22:52:31 +03:00
|
|
|
def updatehook(parent1=xp1, parent2=xp2, error=stats[3]):
|
|
|
|
repo.hook('update', parent1=parent1, parent2=parent2, error=error)
|
|
|
|
repo._afterlock(updatehook)
|
2010-02-17 17:43:21 +03:00
|
|
|
return stats
|
2014-10-14 02:12:12 +04:00
|
|
|
|
|
|
|
def graft(repo, ctx, pctx, labels):
|
|
|
|
"""Do a graft-like merge.
|
|
|
|
|
|
|
|
This is a merge where the merge ancestor is chosen such that one
|
|
|
|
or more changesets are grafted onto the current changeset. In
|
|
|
|
addition to the merge, this fixes up the dirstate to include only
|
|
|
|
a single parent and tries to duplicate any renames/copies
|
|
|
|
appropriately.
|
|
|
|
|
|
|
|
ctx - changeset to rebase
|
|
|
|
pctx - merge base, usually ctx.p1()
|
|
|
|
labels - merge labels eg ['local', 'graft']
|
|
|
|
|
|
|
|
"""
|
2015-04-05 21:55:38 +03:00
|
|
|
# If we're grafting a descendant onto an ancestor, be sure to pass
|
|
|
|
# mergeancestor=True to update. This does two things: 1) allows the merge if
|
|
|
|
# the destination is the same as the parent of the ctx (so we can use graft
|
|
|
|
# to copy commits), and 2) informs update that the incoming changes are
|
|
|
|
# newer than the destination so it doesn't prompt about "remote changed foo
|
|
|
|
# which local deleted".
|
|
|
|
mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
|
2014-10-14 02:12:12 +04:00
|
|
|
|
|
|
|
stats = update(repo, ctx.node(), True, True, False, pctx.node(),
|
2015-04-05 21:55:38 +03:00
|
|
|
mergeancestor=mergeancestor, labels=labels)
|
|
|
|
|
2014-10-14 02:12:12 +04:00
|
|
|
# drop the second merge parent
|
|
|
|
repo.dirstate.beginparentchange()
|
|
|
|
repo.setparents(repo['.'].node(), nullid)
|
|
|
|
repo.dirstate.write()
|
|
|
|
# fix up dirstate for copies and renames
|
|
|
|
copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
|
|
|
|
repo.dirstate.endparentchange()
|
|
|
|
return stats
|