2014-11-08 03:27:47 +03:00
|
|
|
# pushrebase.py - server-side rebasing of pushed commits
|
|
|
|
#
|
|
|
|
# Copyright 2014 Facebook, Inc.
|
|
|
|
#
|
|
|
|
# This software may be used and distributed according to the terms of the
|
|
|
|
# GNU General Public License version 2 or any later version.
|
|
|
|
|
|
|
|
import errno, os, tempfile, sys, operator, resource
|
|
|
|
|
|
|
|
try:
|
|
|
|
import json
|
|
|
|
except ImportError:
|
|
|
|
import simplejson as json
|
|
|
|
|
|
|
|
from mercurial import bundle2, cmdutil, hg, scmutil, exchange, commands
|
|
|
|
from mercurial import util, error, discovery, changegroup, context, revset
|
2014-11-19 03:36:46 +03:00
|
|
|
from mercurial import obsolete, pushkey, phases
|
2014-11-08 03:27:47 +03:00
|
|
|
from mercurial.extensions import wrapcommand, wrapfunction
|
|
|
|
from mercurial.bundlerepo import bundlerepository
|
2014-11-19 03:36:46 +03:00
|
|
|
from mercurial.node import nullid, hex, bin
|
2014-11-08 03:27:47 +03:00
|
|
|
from mercurial.i18n import _
|
|
|
|
|
|
|
|
cmdtable = {}
|
|
|
|
command = cmdutil.command(cmdtable)
|
|
|
|
|
|
|
|
rebaseparttype = 'b2x:rebase'
|
2014-12-05 22:40:14 +03:00
|
|
|
commonheadsparttype = 'b2x:commonheads'
|
2014-11-19 03:36:46 +03:00
|
|
|
|
2014-11-08 03:27:47 +03:00
|
|
|
experimental = 'experimental'
|
|
|
|
configonto = 'server-rebase-onto'
|
|
|
|
|
|
|
|
def extsetup(ui):
|
|
|
|
entry = wrapcommand(commands.table, 'push', _push)
|
|
|
|
entry[1].append(('', 'onto', '', _('server revision to rebase onto')))
|
|
|
|
|
2014-12-09 03:39:20 +03:00
|
|
|
partorder = exchange.b2partsgenorder
|
|
|
|
partorder.insert(partorder.index('changeset'),
|
|
|
|
partorder.pop(partorder.index(rebaseparttype)))
|
|
|
|
|
|
|
|
partorder.insert(0, partorder.pop(partorder.index(commonheadsparttype)))
|
2014-12-05 22:40:14 +03:00
|
|
|
|
2014-11-08 03:27:47 +03:00
|
|
|
wrapfunction(discovery, 'checkheads', _checkheads)
|
|
|
|
|
|
|
|
origpushkeyhandler = bundle2.parthandlermapping['b2x:pushkey']
|
|
|
|
newpushkeyhandler = lambda *args, **kwargs: \
|
|
|
|
bundle2pushkey(origpushkeyhandler, *args, **kwargs)
|
|
|
|
newpushkeyhandler.params = origpushkeyhandler.params
|
|
|
|
bundle2.parthandlermapping['b2x:pushkey'] = newpushkeyhandler
|
|
|
|
|
2014-12-05 22:02:01 +03:00
|
|
|
def validaterevset(repo, revset):
|
|
|
|
"Abort if this is a rebasable revset, return None otherwise"
|
2014-11-08 03:27:47 +03:00
|
|
|
if not repo.revs(revset):
|
|
|
|
raise util.Abort(_('nothing to rebase'))
|
|
|
|
|
|
|
|
if repo.revs('%r and merge()', revset):
|
|
|
|
raise util.Abort(_('cannot rebase merge changesets'))
|
|
|
|
|
|
|
|
tails = repo.revs('%r and ancestor(%r)', revset, revset)
|
|
|
|
if not tails:
|
|
|
|
raise util.Abort(_('cannot rebase unrelated changesets'))
|
|
|
|
if len(tails) != 1:
|
|
|
|
raise util.Abort(_('logic error: multiple tails not possible'))
|
|
|
|
tail = repo[tails.first()]
|
|
|
|
|
|
|
|
heads = repo.revs('heads(%r)', revset)
|
|
|
|
if len(heads) > 1:
|
|
|
|
raise util.Abort(_('cannot rebase divergent changesets'))
|
|
|
|
head = repo[heads.first()]
|
|
|
|
|
2014-12-05 22:02:01 +03:00
|
|
|
repo.ui.note(_('validated revset %r::%r for rebase\n') %
|
|
|
|
(head.hex(), tail.hex()))
|
|
|
|
|
|
|
|
def revsettail(repo, revset):
|
|
|
|
"Return the root changectx of a revset"
|
|
|
|
tails = repo.revs('%r and ancestor(%r)', revset, revset)
|
|
|
|
tail = tails.first()
|
|
|
|
if tail is None:
|
|
|
|
raise ValueError(_("revset doesn't have a single tail"))
|
|
|
|
return repo[tail]
|
2014-11-08 03:27:47 +03:00
|
|
|
|
|
|
|
def getrebasepart(repo, peer, outgoing, onto, newhead=False):
|
|
|
|
if not outgoing.missing:
|
|
|
|
raise util.Abort(_('no commits to rebase'))
|
|
|
|
|
|
|
|
if rebaseparttype not in bundle2.bundle2caps(peer):
|
|
|
|
raise util.Abort(_('no server support for %r') % rebaseparttype)
|
|
|
|
|
2014-12-05 22:02:01 +03:00
|
|
|
validaterevset(repo, revset.formatspec('%ln', outgoing.missing))
|
2014-11-08 03:27:47 +03:00
|
|
|
|
2014-12-09 03:39:20 +03:00
|
|
|
cg = changegroup.getlocalchangegroupraw(repo, 'push', outgoing)
|
|
|
|
|
|
|
|
# .upper() marks this as a mandatory part: server will abort if there's no
|
|
|
|
# handler
|
|
|
|
return bundle2.bundlepart(rebaseparttype.upper(),
|
|
|
|
mandatoryparams={'onto': onto,
|
|
|
|
'newhead': repr(newhead),
|
|
|
|
}.items(),
|
|
|
|
data = cg)
|
2014-11-08 03:27:47 +03:00
|
|
|
|
|
|
|
def _checkheads(orig, repo, remote, *args, **kwargs):
|
|
|
|
onto = repo.ui.config(experimental, configonto)
|
|
|
|
if onto: # This is a rebasing push
|
|
|
|
# The rest of the checks are performed during bundle2 part processing;
|
|
|
|
# we need to bypass the regular push checks because it will look like
|
|
|
|
# we're pushing a new head, which isn't normally allowed
|
|
|
|
if not repo.ui.configbool('experimental', 'bundle2-exp', False):
|
|
|
|
raise util.Abort(_('bundle2 needs to be enabled on client'))
|
|
|
|
if not remote.capable('bundle2-exp'):
|
|
|
|
raise util.Abort(_('bundle2 needs to be enabled on server'))
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
return orig(repo, remote, *args, **kwargs)
|
|
|
|
|
|
|
|
def _push(orig, ui, repo, *args, **opts):
|
2014-11-19 03:36:46 +03:00
|
|
|
oldonto = ui.backupconfig(experimental, configonto)
|
|
|
|
|
2014-11-08 03:27:47 +03:00
|
|
|
ui.setconfig(experimental, configonto, opts.get('onto'), '--onto')
|
2014-11-19 03:36:46 +03:00
|
|
|
if ui.config(experimental, configonto):
|
|
|
|
oldphasemove = wrapfunction(exchange, '_localphasemove', _phasemove)
|
2014-11-08 03:27:47 +03:00
|
|
|
result = orig(ui, repo, *args, **opts)
|
2014-11-19 03:36:46 +03:00
|
|
|
|
|
|
|
ui.restoreconfig(oldonto)
|
|
|
|
if oldphasemove:
|
|
|
|
exchange._localphasemove = oldphasemove
|
2014-11-08 03:27:47 +03:00
|
|
|
return result
|
|
|
|
|
2014-11-19 03:36:46 +03:00
|
|
|
def _phasemove(orig, pushop, nodes, phase=phases.public):
|
|
|
|
"""prevent commits from being marked public
|
|
|
|
|
|
|
|
Since these are going to be mutated on the server, they aren't really being
|
|
|
|
published, their successors are. If we mark these as public now, hg evolve
|
|
|
|
will refuse to fix them for us later."""
|
|
|
|
|
|
|
|
if phase != phases.public:
|
|
|
|
orig(pushop, nodes, phase)
|
|
|
|
|
2014-12-05 22:40:14 +03:00
|
|
|
@exchange.b2partsgenerator(commonheadsparttype)
|
|
|
|
def commonheadspartgen(pushop, bundler):
|
|
|
|
bundler.newpart(commonheadsparttype,
|
|
|
|
data=''.join(pushop.outgoing.commonheads))
|
|
|
|
|
|
|
|
@bundle2.parthandler(commonheadsparttype)
|
|
|
|
def commonheadshandler(op, inpart):
|
|
|
|
nodeid = inpart.read(20)
|
|
|
|
while len(nodeid) == 20:
|
|
|
|
op.records.add(commonheadsparttype, nodeid)
|
|
|
|
nodeid = inpart.read(20)
|
|
|
|
assert not nodeid # data should split evenly into blocks of 20 bytes
|
|
|
|
|
2014-11-08 03:27:47 +03:00
|
|
|
@exchange.b2partsgenerator(rebaseparttype)
|
|
|
|
def partgen(pushop, bundler):
|
|
|
|
onto = pushop.ui.config(experimental, configonto)
|
|
|
|
if 'changesets' in pushop.stepsdone or not onto:
|
|
|
|
return
|
|
|
|
|
|
|
|
pushop.stepsdone.add('changesets')
|
2014-11-19 03:36:46 +03:00
|
|
|
if not pushop.outgoing.missing:
|
|
|
|
upshop.ui.note(_('no changes to push'))
|
|
|
|
pushop.cgresult = 0
|
|
|
|
return
|
2014-11-08 03:27:47 +03:00
|
|
|
|
2014-12-09 03:39:20 +03:00
|
|
|
rebasepart = getrebasepart(pushop.repo,
|
|
|
|
pushop.remote,
|
|
|
|
pushop.outgoing,
|
|
|
|
onto,
|
|
|
|
pushop.newbranch)
|
2014-11-08 03:27:47 +03:00
|
|
|
|
|
|
|
bundler.addpart(rebasepart)
|
|
|
|
|
2014-11-19 03:36:46 +03:00
|
|
|
def handlereply(op):
|
|
|
|
# TODO: read result from server?
|
|
|
|
pushop.cgresult = 1
|
|
|
|
|
|
|
|
return handlereply
|
2014-11-08 03:27:47 +03:00
|
|
|
|
|
|
|
bundle2.capabilities[rebaseparttype] = ()
|
|
|
|
|
extract temporary bundle file creation into its own function
Summary:
Transferring the part data into a bundle file on disk for bundlerepo to read
is a self-contained operation
Test Plan: ##run_tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1726767
Signature: t1:1726767:1418241560:8afac123afc847e61e6f46b6216d15b003024504
2014-12-09 04:10:24 +03:00
|
|
|
def _makebundlefile(part):
|
|
|
|
"""constructs a temporary bundle file
|
|
|
|
|
|
|
|
part.data should be an uncompressed v1 changegroup"""
|
|
|
|
|
|
|
|
fp = None
|
|
|
|
fd, bundlefile = tempfile.mkstemp()
|
|
|
|
try: # guards bundlefile
|
|
|
|
try: # guards fp
|
|
|
|
fp = os.fdopen(fd, 'wb')
|
|
|
|
magic = 'HG10UN'
|
|
|
|
fp.write(magic)
|
|
|
|
data = part.read(resource.getpagesize() - len(magic))
|
|
|
|
while data:
|
|
|
|
fp.write(data)
|
|
|
|
data = part.read(resource.getpagesize())
|
|
|
|
finally:
|
|
|
|
fp.close()
|
|
|
|
except:
|
|
|
|
try:
|
|
|
|
os.unlink(bundlefile)
|
|
|
|
except:
|
|
|
|
# we would rather see the original exception
|
|
|
|
pass
|
|
|
|
raise
|
|
|
|
|
|
|
|
return bundlefile
|
|
|
|
|
pushrebase: extract commit grafting into its own function
Summary: Breaking up the part handler into smaller pieces
Test Plan: ##run_tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1726701
Signature: t1:1726701:1418241313:52edc229bc635d7e703bcc2b5ee5673ece6373fa
2014-12-09 03:54:14 +03:00
|
|
|
def _graft(repo, onto, rev):
|
|
|
|
'''duplicate changeset "rev" with parent "onto"'''
|
|
|
|
if rev.p2().node() != nullid:
|
|
|
|
raise util.Abort(_('cannot graft commit with a non-null p2'))
|
|
|
|
return context.memctx(repo,
|
|
|
|
[onto.node(), nullid],
|
|
|
|
rev.description(),
|
|
|
|
rev.files(),
|
|
|
|
(lambda repo, memctx, path:
|
|
|
|
context.memfilectx(repo, path,rev[path].data())),
|
|
|
|
rev.user(),
|
|
|
|
rev.date(),
|
|
|
|
rev.extra(),
|
|
|
|
).commit()
|
|
|
|
|
2014-11-19 03:36:46 +03:00
|
|
|
# TODO: split this function into smaller pieces
|
2014-11-08 03:27:47 +03:00
|
|
|
@bundle2.parthandler(rebaseparttype, ('onto', 'newhead'))
|
|
|
|
def bundle2rebase(op, part):
|
|
|
|
'''unbundle a bundle2 containing a changegroup to rebase'''
|
|
|
|
|
|
|
|
params = part.params
|
2014-12-09 02:43:58 +03:00
|
|
|
tr = op.gettransaction()
|
|
|
|
hookargs = dict(tr.hookargs)
|
2014-11-08 03:27:47 +03:00
|
|
|
|
|
|
|
bundlefile = None
|
|
|
|
|
|
|
|
try: # guards bundlefile
|
extract temporary bundle file creation into its own function
Summary:
Transferring the part data into a bundle file on disk for bundlerepo to read
is a self-contained operation
Test Plan: ##run_tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1726767
Signature: t1:1726767:1418241560:8afac123afc847e61e6f46b6216d15b003024504
2014-12-09 04:10:24 +03:00
|
|
|
bundlefile = _makebundlefile(part)
|
2014-11-08 03:27:47 +03:00
|
|
|
|
|
|
|
bundle = bundlerepository(op.repo.ui, op.repo.root, bundlefile)
|
2014-12-05 22:02:01 +03:00
|
|
|
validaterevset(bundle, 'bundle()')
|
|
|
|
tail = revsettail(bundle, 'bundle()')
|
2014-11-08 03:27:47 +03:00
|
|
|
|
|
|
|
onto = scmutil.revsingle(op.repo, params['onto'])
|
|
|
|
bundleonto = bundle[onto.hex()]
|
|
|
|
|
|
|
|
if not params['newhead']:
|
|
|
|
if not op.repo.revs('%r and head()', params['onto']):
|
|
|
|
raise util.Abort(_('rebase would produce a new head on server'))
|
|
|
|
|
|
|
|
if bundleonto.ancestor(tail).hex() != tail.p1().hex():
|
2014-12-05 22:02:01 +03:00
|
|
|
raise util.Abort(_('missing changesets between %r and %r') %
|
|
|
|
(bundleonto.ancestor(tail).hex(),
|
|
|
|
tail.p1().hex()))
|
2014-11-08 03:27:47 +03:00
|
|
|
|
|
|
|
revs = [bundle[r] for r in bundle.revs('sort(bundle())')]
|
|
|
|
|
|
|
|
#TODO: Is there a more efficient way to do this check?
|
|
|
|
files = reduce(operator.or_, [set(rev.files()) for rev in revs], set())
|
|
|
|
commonmanifest = tail.p1().manifest().intersectfiles(files)
|
|
|
|
ontomanifest = bundleonto.manifest().intersectfiles(files)
|
|
|
|
conflicts = ontomanifest.diff(commonmanifest).keys()
|
|
|
|
if conflicts:
|
|
|
|
raise util.Abort(_('conflicting changes in %r') % conflicts)
|
|
|
|
|
2014-12-09 02:43:58 +03:00
|
|
|
op.repo.hook("prechangegroup", **hookargs)
|
|
|
|
|
2014-11-08 03:27:47 +03:00
|
|
|
replacements = {}
|
2014-12-09 02:43:58 +03:00
|
|
|
added = []
|
2014-11-08 03:27:47 +03:00
|
|
|
|
|
|
|
for rev in revs:
|
pushrebase: extract commit grafting into its own function
Summary: Breaking up the part handler into smaller pieces
Test Plan: ##run_tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1726701
Signature: t1:1726701:1418241313:52edc229bc635d7e703bcc2b5ee5673ece6373fa
2014-12-09 03:54:14 +03:00
|
|
|
newrev = _graft(op.repo, onto, rev)
|
2014-11-08 03:27:47 +03:00
|
|
|
|
|
|
|
onto = op.repo[newrev]
|
|
|
|
replacements[rev.node()] = onto.node()
|
2014-12-09 02:43:58 +03:00
|
|
|
added.append(onto.node())
|
2014-11-08 03:27:47 +03:00
|
|
|
|
|
|
|
if obsolete.isenabled(op.repo, obsolete.createmarkersopt):
|
2014-12-09 03:39:20 +03:00
|
|
|
markers = [(bundle[oldrev], (op.repo[newrev],))
|
|
|
|
for oldrev, newrev in replacements.items()
|
|
|
|
if newrev != oldrev]
|
2014-11-08 03:27:47 +03:00
|
|
|
|
|
|
|
# TODO: make sure these weren't public originally
|
|
|
|
for old, new in markers:
|
|
|
|
old.mutable = lambda *args: True
|
|
|
|
|
|
|
|
obsolete.createmarkers(op.repo, markers)
|
|
|
|
|
|
|
|
|
|
|
|
finally:
|
|
|
|
try:
|
|
|
|
if bundlefile:
|
|
|
|
os.unlink(bundlefile)
|
|
|
|
except OSError, e:
|
|
|
|
if e.errno != errno.ENOENT:
|
|
|
|
raise
|
|
|
|
|
2014-12-09 02:43:58 +03:00
|
|
|
p = lambda: tr.writepending() and op.repo.root or ""
|
|
|
|
op.repo.hook("pretxnchangegroup", throw=True, pending=p, **hookargs)
|
|
|
|
|
|
|
|
def runhooks():
|
|
|
|
op.repo.hook("changegroup", **hookargs)
|
|
|
|
for n in added:
|
|
|
|
args = hookargs.copy()
|
|
|
|
args['node'] = hex(n)
|
|
|
|
op.repo.hook("incoming", **args)
|
|
|
|
|
|
|
|
tr.addpostclose('serverrebase-cg-hooks',
|
|
|
|
lambda tr: op.repo._afterlock(runhooks))
|
|
|
|
|
2014-12-05 22:40:14 +03:00
|
|
|
if (op.records[commonheadsparttype]
|
2014-11-19 03:36:46 +03:00
|
|
|
and op.reply
|
|
|
|
and 'b2x:pushback' in op.reply.capabilities):
|
2014-12-09 03:39:20 +03:00
|
|
|
outgoing = discovery.outgoing(op.repo.changelog,
|
|
|
|
op.records[commonheadsparttype],
|
|
|
|
[new for old, new in replacements.items()
|
|
|
|
if old != new])
|
2014-11-19 03:36:46 +03:00
|
|
|
|
|
|
|
if outgoing.missing:
|
2014-12-06 00:10:34 +03:00
|
|
|
cgversions = set(op.reply.capabilities.get('b2x:changegroup'))
|
|
|
|
if not cgversions:
|
|
|
|
cgversions.add('01')
|
|
|
|
version = max(cgversions & set(changegroup.packermap.keys()))
|
|
|
|
|
2014-12-09 03:39:20 +03:00
|
|
|
cg = changegroup.getlocalchangegroupraw(op.repo,
|
|
|
|
'rebase:reply',
|
|
|
|
outgoing,
|
|
|
|
version = version)
|
2014-11-19 03:36:46 +03:00
|
|
|
|
2014-12-06 00:10:34 +03:00
|
|
|
cgpart = op.reply.newpart('B2X:CHANGEGROUP', data = cg)
|
|
|
|
if version != '01':
|
|
|
|
cgpart.addparam('version', version)
|
2014-11-19 03:36:46 +03:00
|
|
|
|
|
|
|
if (obsolete.isenabled(op.repo, obsolete.exchangeopt)
|
2014-11-24 22:18:33 +03:00
|
|
|
and op.repo.obsstore):
|
|
|
|
try:
|
|
|
|
exchange.buildobsmarkerspart(
|
|
|
|
op.reply,
|
|
|
|
op.repo.obsstore.relevantmarkers(replacements.values())
|
|
|
|
)
|
|
|
|
except ValueError, exc:
|
|
|
|
op.repo.ui.status(_("can't send obsolete markers: %s") %
|
|
|
|
exc.message)
|
2014-11-19 03:36:46 +03:00
|
|
|
|
|
|
|
for k in replacements.keys():
|
|
|
|
replacements[hex(k)] = hex(replacements[k])
|
|
|
|
|
2014-11-21 21:55:46 +03:00
|
|
|
op.records.add(rebaseparttype, replacements)
|
2014-11-19 03:36:46 +03:00
|
|
|
|
2014-11-08 03:27:47 +03:00
|
|
|
return 1
|
|
|
|
|
|
|
|
def bundle2pushkey(orig, op, part):
|
2014-11-21 21:55:46 +03:00
|
|
|
replacements = dict(sum([record.items()
|
|
|
|
for record
|
|
|
|
in op.records[rebaseparttype]],
|
|
|
|
[]))
|
|
|
|
|
2014-11-08 03:27:47 +03:00
|
|
|
namespace = pushkey.decode(part.params['namespace'])
|
|
|
|
if namespace == 'phases':
|
|
|
|
key = pushkey.decode(part.params['key'])
|
|
|
|
part.params['key'] = pushkey.encode(replacements.get(key, key))
|
|
|
|
if namespace == 'bookmarks':
|
|
|
|
new = pushkey.decode(part.params['new'])
|
|
|
|
part.params['new'] = pushkey.encode(replacements.get(new, new))
|
|
|
|
|
|
|
|
return orig(op, part)
|