2016-09-21 17:45:25 +03:00
|
|
|
# pushrebase.py - server-side rebasing of pushed changesets
|
2014-11-08 03:27:47 +03:00
|
|
|
#
|
|
|
|
# Copyright 2014 Facebook, Inc.
|
|
|
|
#
|
|
|
|
# This software may be used and distributed according to the terms of the
|
|
|
|
# GNU General Public License version 2 or any later version.
|
|
|
|
|
2017-03-23 21:51:56 +03:00
|
|
|
import errno, os, tempfile, mmap, time
|
2014-11-08 03:27:47 +03:00
|
|
|
|
2017-05-22 23:38:37 +03:00
|
|
|
from mercurial import bundle2, hg, scmutil, exchange, commands
|
2017-02-23 15:08:18 +03:00
|
|
|
from mercurial import util, error, discovery, changegroup, context, revsetlang
|
2016-11-16 23:11:11 +03:00
|
|
|
from mercurial import obsolete, pushkey, phases, extensions, manifest
|
2017-05-22 23:38:37 +03:00
|
|
|
from mercurial import encoding, registrar
|
2017-04-26 00:46:36 +03:00
|
|
|
from mercurial.extensions import wrapcommand, wrapfunction, unwrapfunction
|
pushrebase: use mercurial.hg.repository() rather than mercurial.bundlerepo.bundlerepository()
Summary: This change is done so that important extension wrapping is not skipped while creating a bundle. Using bundlerepository, some extensions were getting skipped. Extensions like inhibit would fail without this change.
Test Plan: I ran pushrebase test and it failed with Attribute Error in newmancache._order = collections.deque(oldmancache._order). It failed without the changes made in this diff as well. Also, when I ran all the tests, some of them failed due to not having getdb.sh, which isn't related to this change.
Reviewers: #sourcecontrol, ttung, pyd, lcharignon, rmcelroy
Reviewed By: rmcelroy
Subscribers: durham, lcharignon, ericsumner, mitrandir, trunkagent, pyd, rmcelroy
Differential Revision: https://phabricator.fb.com/D2838849
Tasks: 7916714
Signature: t1:2838849:1453299050:142328bf8f0dbc8814cd3bd25ac47714c4bf99d7
2016-01-21 00:11:27 +03:00
|
|
|
from mercurial.hg import repository
|
2014-11-19 03:36:46 +03:00
|
|
|
from mercurial.node import nullid, hex, bin
|
2014-11-08 03:27:47 +03:00
|
|
|
from mercurial.i18n import _
|
|
|
|
|
2016-11-29 16:24:07 +03:00
|
|
|
testedwith = 'ships-with-fb-hgext'
|
2015-06-13 02:36:51 +03:00
|
|
|
|
2014-11-08 03:27:47 +03:00
|
|
|
cmdtable = {}
|
2017-05-22 23:38:37 +03:00
|
|
|
command = registrar.command(cmdtable)
|
2014-11-08 03:27:47 +03:00
|
|
|
|
|
|
|
rebaseparttype = 'b2x:rebase'
|
2014-12-05 22:40:14 +03:00
|
|
|
commonheadsparttype = 'b2x:commonheads'
|
2014-11-19 03:36:46 +03:00
|
|
|
|
2014-11-08 03:27:47 +03:00
|
|
|
experimental = 'experimental'
|
|
|
|
configonto = 'server-rebase-onto'
|
2015-10-29 04:19:15 +03:00
|
|
|
pushrebasemarker = '__pushrebase_processed__'
|
2015-10-30 01:42:54 +03:00
|
|
|
donotrebasemarker = '__pushrebase_donotrebase__'
|
2014-11-08 03:27:47 +03:00
|
|
|
|
2015-07-07 03:01:25 +03:00
|
|
|
def uisetup(ui):
|
2015-06-26 21:05:46 +03:00
|
|
|
# remotenames circumvents the default push implementation entirely, so make
|
2015-07-07 03:01:25 +03:00
|
|
|
# sure we load after it so that we wrap it.
|
2015-09-30 07:32:02 +03:00
|
|
|
order = extensions._order
|
|
|
|
order.remove('pushrebase')
|
|
|
|
order.append('pushrebase')
|
|
|
|
extensions._order = order
|
2015-07-07 03:01:25 +03:00
|
|
|
|
|
|
|
def extsetup(ui):
|
|
|
|
entry = wrapcommand(commands.table, 'push', _push)
|
|
|
|
try:
|
|
|
|
# Don't add the 'to' arg if it already exists
|
|
|
|
extensions.find('remotenames')
|
|
|
|
except KeyError:
|
|
|
|
entry[1].append(('', 'to', '', _('server revision to rebase onto')))
|
2014-11-08 03:27:47 +03:00
|
|
|
|
2014-12-09 03:39:20 +03:00
|
|
|
partorder = exchange.b2partsgenorder
|
|
|
|
partorder.insert(partorder.index('changeset'),
|
|
|
|
partorder.pop(partorder.index(rebaseparttype)))
|
|
|
|
|
|
|
|
partorder.insert(0, partorder.pop(partorder.index(commonheadsparttype)))
|
2014-12-05 22:40:14 +03:00
|
|
|
|
2014-11-08 03:27:47 +03:00
|
|
|
wrapfunction(discovery, 'checkheads', _checkheads)
|
2015-09-28 01:45:31 +03:00
|
|
|
# we want to disable the heads check because in pushrebase repos, we
|
|
|
|
# expect the heads to change during the push and we should not abort.
|
|
|
|
|
|
|
|
# The check heads functions are used to verify that the heads haven't
|
|
|
|
# changed since the client did the initial discovery. Pushrebase is meant
|
|
|
|
# to allow concurrent pushes, so the heads may have very well changed.
|
|
|
|
# So let's not do this check.
|
2015-09-08 04:26:53 +03:00
|
|
|
wrapfunction(exchange, 'check_heads', _exchangecheckheads)
|
2015-10-01 21:15:37 +03:00
|
|
|
wrapfunction(exchange, '_pushb2ctxcheckheads', _skipcheckheads)
|
2014-11-08 03:27:47 +03:00
|
|
|
|
2015-04-17 23:49:35 +03:00
|
|
|
origpushkeyhandler = bundle2.parthandlermapping['pushkey']
|
2014-11-08 03:27:47 +03:00
|
|
|
newpushkeyhandler = lambda *args, **kwargs: \
|
|
|
|
bundle2pushkey(origpushkeyhandler, *args, **kwargs)
|
|
|
|
newpushkeyhandler.params = origpushkeyhandler.params
|
2015-04-17 23:49:35 +03:00
|
|
|
bundle2.parthandlermapping['pushkey'] = newpushkeyhandler
|
2014-11-08 03:27:47 +03:00
|
|
|
bundle2.parthandlermapping['b2x:pushkey'] = newpushkeyhandler
|
|
|
|
|
2015-09-28 21:31:24 +03:00
|
|
|
wrapfunction(exchange, 'unbundle', unbundle)
|
|
|
|
|
2015-10-06 04:56:46 +03:00
|
|
|
wrapfunction(hg, '_peerorrepo', _peerorrepo)
|
|
|
|
|
2015-10-29 04:19:15 +03:00
|
|
|
def reposetup(ui, repo):
|
|
|
|
if repo.ui.configbool('pushrebase', 'blocknonpushrebase'):
|
|
|
|
repo.ui.setconfig('hooks', 'prechangegroup.blocknonpushrebase',
|
|
|
|
blocknonpushrebase)
|
|
|
|
|
|
|
|
def blocknonpushrebase(ui, repo, **kwargs):
|
|
|
|
if not repo.ui.configbool('pushrebase', pushrebasemarker):
|
2016-01-08 05:30:24 +03:00
|
|
|
raise error.Abort("this repository requires that you push using "
|
2015-10-29 04:19:15 +03:00
|
|
|
"'hg push --to'")
|
|
|
|
|
2017-05-02 05:11:08 +03:00
|
|
|
def _peerorrepo(orig, ui, path, create=False, **kwargs):
|
2015-10-06 04:56:46 +03:00
|
|
|
# Force hooks to use a bundle repo
|
2017-01-03 16:59:55 +03:00
|
|
|
bundlepath = encoding.environ.get("HG_HOOK_BUNDLEPATH")
|
2015-10-06 04:56:46 +03:00
|
|
|
if bundlepath:
|
2017-05-02 05:11:08 +03:00
|
|
|
return orig(ui, bundlepath, create=create, **kwargs)
|
|
|
|
return orig(ui, path, create, **kwargs)
|
2015-10-06 04:56:46 +03:00
|
|
|
|
2015-09-28 21:31:24 +03:00
|
|
|
def unbundle(orig, repo, cg, heads, source, url):
|
|
|
|
# Preload the manifests that the client says we'll need. This happens
|
|
|
|
# outside the lock, thus cutting down on our lock time and increasing commit
|
|
|
|
# throughput.
|
|
|
|
if util.safehasattr(cg, 'params'):
|
|
|
|
preloadmfs = cg.params.get('preloadmanifests')
|
|
|
|
if preloadmfs:
|
|
|
|
for mfnode in preloadmfs.split(','):
|
2016-11-16 23:11:11 +03:00
|
|
|
repo.manifestlog[bin(mfnode)].read()
|
2015-09-28 21:31:24 +03:00
|
|
|
|
|
|
|
return orig(repo, cg, heads, source, url)
|
|
|
|
|
2014-12-05 22:02:01 +03:00
|
|
|
def validaterevset(repo, revset):
|
|
|
|
"Abort if this is a rebasable revset, return None otherwise"
|
2014-11-08 03:27:47 +03:00
|
|
|
if not repo.revs(revset):
|
2016-01-08 05:30:24 +03:00
|
|
|
raise error.Abort(_('nothing to rebase'))
|
2014-11-08 03:27:47 +03:00
|
|
|
|
pushrebase: cleanup phase handling
Summary:
It turns out that bundlerepo uses the underlying phase boundaries, so the
added changesets show up with the same phase as their nearest ancestor that's
already in the repository. This behavior is confusing and inconsistent with
what happens when the bundle is actually applied: retracting the draft boundary
over the new commits.
This makes a copy of the phase cache so that changes in the bundlerepo don't
affect the real one and then retracts the draft boundary like would happen
when the bundle is applied normally.
If this general solution looks OK, I'll submit an upstream patch that makes
bundlerepo behave this way always, which will make it match the behavior that
would be seen when using ##hg unbundle##.
Test Plan: ##run-tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1733166
Signature: t1:1733166:1420502423:f57e2fff8fe47293ecab1ac4f6efdd0507c9c7b5
2015-01-06 02:50:59 +03:00
|
|
|
if repo.revs('%r and public()', revset):
|
2016-01-08 05:30:24 +03:00
|
|
|
raise error.Abort(_('cannot rebase public changesets'))
|
pushrebase: cleanup phase handling
Summary:
It turns out that bundlerepo uses the underlying phase boundaries, so the
added changesets show up with the same phase as their nearest ancestor that's
already in the repository. This behavior is confusing and inconsistent with
what happens when the bundle is actually applied: retracting the draft boundary
over the new commits.
This makes a copy of the phase cache so that changes in the bundlerepo don't
affect the real one and then retracts the draft boundary like would happen
when the bundle is applied normally.
If this general solution looks OK, I'll submit an upstream patch that makes
bundlerepo behave this way always, which will make it match the behavior that
would be seen when using ##hg unbundle##.
Test Plan: ##run-tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1733166
Signature: t1:1733166:1420502423:f57e2fff8fe47293ecab1ac4f6efdd0507c9c7b5
2015-01-06 02:50:59 +03:00
|
|
|
|
pushrebase: disallow pushing obsolete commits
Summary:
@durin42 suggested that the extension might want to prevent processing of
obsolete commits. Given the current state of dealing with conflicting
obsolescence markers, this seems like a good idea.
Test Plan: ##run-tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1740886
Signature: t1:1740886:1418686275:d505ff1de8fbce39eb4fbe3a2e6d368b4b14edb8
2014-12-16 02:06:57 +03:00
|
|
|
if repo.revs('%r and obsolete()', revset):
|
2016-01-08 05:30:24 +03:00
|
|
|
raise error.Abort(_('cannot rebase obsolete changesets'))
|
pushrebase: disallow pushing obsolete commits
Summary:
@durin42 suggested that the extension might want to prevent processing of
obsolete commits. Given the current state of dealing with conflicting
obsolescence markers, this seems like a good idea.
Test Plan: ##run-tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1740886
Signature: t1:1740886:1418686275:d505ff1de8fbce39eb4fbe3a2e6d368b4b14edb8
2014-12-16 02:06:57 +03:00
|
|
|
|
2014-11-08 03:27:47 +03:00
|
|
|
heads = repo.revs('heads(%r)', revset)
|
|
|
|
if len(heads) > 1:
|
2016-01-08 05:30:24 +03:00
|
|
|
raise error.Abort(_('cannot rebase divergent changesets'))
|
2014-11-08 03:27:47 +03:00
|
|
|
|
2015-07-29 23:32:51 +03:00
|
|
|
repo.ui.note(_('validated revset for rebase\n'))
|
2014-12-05 22:02:01 +03:00
|
|
|
|
2015-10-30 01:42:54 +03:00
|
|
|
def getrebasepart(repo, peer, outgoing, onto, newhead):
|
2014-11-08 03:27:47 +03:00
|
|
|
if not outgoing.missing:
|
2016-09-21 17:45:25 +03:00
|
|
|
raise error.Abort(_('no changesets to rebase'))
|
2015-04-03 21:38:55 +03:00
|
|
|
|
2014-11-08 03:27:47 +03:00
|
|
|
if rebaseparttype not in bundle2.bundle2caps(peer):
|
2016-01-08 05:30:24 +03:00
|
|
|
raise error.Abort(_('no server support for %r') % rebaseparttype)
|
2014-11-08 03:27:47 +03:00
|
|
|
|
2017-02-23 15:08:18 +03:00
|
|
|
validaterevset(repo, revsetlang.formatspec('%ln', outgoing.missing))
|
2014-11-08 03:27:47 +03:00
|
|
|
|
2014-12-09 03:39:20 +03:00
|
|
|
cg = changegroup.getlocalchangegroupraw(repo, 'push', outgoing)
|
|
|
|
|
pushrebase: do not require exchange for getting markers
Summary:
D4865150 and D4934720 aren't effective in our current setup. The direct
cause in the code is because the server couldn't find common marker version:
```
# old server-side code, returns empty in our current setup
obsolete.commonversion(bundle2.obsmarkersversion(reply.capabilities))
```
Upon investigation, it's because there is no `exchange` enabled client-side.
But we do want one-way (server->client) markers for the rebased commits, as
long as obsstore is enabled (createmarkers is set, without exchange).
The upstream expects the server to have obsstore enabled, and exchange
enabled, to send markers. Since we are generating markers without an
obsstore (see D4865150), we are on our own way. This diff makes it one step
further.
This diff adds an explicit parameter to the `b2x:rebase` part to tell the
server what obsmarker format the client supports so the server could make a
right decision without relying on the "standard" `reply.capabilities`, which
is affected by the exchange option.
Test Plan: Change the existing test, make sure the old code fails.
Reviewers: #mercurial, durham
Reviewed By: durham
Subscribers: durham, mjpieters
Differential Revision: https://phabricator.intern.facebook.com/D4997972
Signature: t1:4997972:1493848751:14c29654b2e8246bd12a8de8820af5b3773e2fb7
2017-05-04 02:08:35 +03:00
|
|
|
# Explicitly notify the server what obsmarker versions the client supports
|
|
|
|
# so the client could receive marker from the server.
|
|
|
|
#
|
|
|
|
# The core mercurial logic will do the right thing (enable obsmarker
|
|
|
|
# capabilities in the pushback bundle) if obsmarker exchange is enabled
|
|
|
|
# client-side.
|
|
|
|
#
|
|
|
|
# But we want the marker without enabling marker exchange, and our server
|
|
|
|
# could reply a marker without exchange or even obsstore enabled. So we
|
|
|
|
# bypass the "standard" way of capabilities check by sending the supported
|
|
|
|
# versions directly in our own part. Note: do not enable "exchange" because
|
|
|
|
# it has an unwanted side effect: pushing markers from client to server.
|
|
|
|
#
|
|
|
|
# "createmarkers" is all we need to be able to write a new marker.
|
|
|
|
if obsolete.isenabled(repo, obsolete.createmarkersopt):
|
|
|
|
obsmarkerversions = '\0'.join(str(v) for v in obsolete.formats)
|
|
|
|
else:
|
|
|
|
obsmarkerversions = ''
|
|
|
|
|
2014-12-09 03:39:20 +03:00
|
|
|
# .upper() marks this as a mandatory part: server will abort if there's no
|
|
|
|
# handler
|
2015-10-29 04:19:12 +03:00
|
|
|
return bundle2.bundlepart(
|
|
|
|
rebaseparttype.upper(),
|
|
|
|
mandatoryparams={
|
|
|
|
'onto': onto,
|
|
|
|
'newhead': repr(newhead),
|
|
|
|
}.items(),
|
pushrebase: do not require exchange for getting markers
Summary:
D4865150 and D4934720 aren't effective in our current setup. The direct
cause in the code is because the server couldn't find common marker version:
```
# old server-side code, returns empty in our current setup
obsolete.commonversion(bundle2.obsmarkersversion(reply.capabilities))
```
Upon investigation, it's because there is no `exchange` enabled client-side.
But we do want one-way (server->client) markers for the rebased commits, as
long as obsstore is enabled (createmarkers is set, without exchange).
The upstream expects the server to have obsstore enabled, and exchange
enabled, to send markers. Since we are generating markers without an
obsstore (see D4865150), we are on our own way. This diff makes it one step
further.
This diff adds an explicit parameter to the `b2x:rebase` part to tell the
server what obsmarker format the client supports so the server could make a
right decision without relying on the "standard" `reply.capabilities`, which
is affected by the exchange option.
Test Plan: Change the existing test, make sure the old code fails.
Reviewers: #mercurial, durham
Reviewed By: durham
Subscribers: durham, mjpieters
Differential Revision: https://phabricator.intern.facebook.com/D4997972
Signature: t1:4997972:1493848751:14c29654b2e8246bd12a8de8820af5b3773e2fb7
2017-05-04 02:08:35 +03:00
|
|
|
advisoryparams={
|
|
|
|
# advisory: (old) server could ignore this without error
|
|
|
|
'obsmarkerversions': obsmarkerversions,
|
|
|
|
}.items(),
|
2015-10-29 04:19:12 +03:00
|
|
|
data = cg)
|
2014-11-08 03:27:47 +03:00
|
|
|
|
2015-11-18 04:54:31 +03:00
|
|
|
def _checkheads(orig, pushop):
|
|
|
|
repo = pushop.repo
|
2014-11-08 03:27:47 +03:00
|
|
|
onto = repo.ui.config(experimental, configonto)
|
|
|
|
if onto: # This is a rebasing push
|
2016-03-22 20:57:29 +03:00
|
|
|
# If remotenames is enabled, we don't want to abort if the user uses
|
|
|
|
# --to, even if the server doesn't support pushrebase.
|
|
|
|
if checkremotenames():
|
|
|
|
return
|
|
|
|
|
2014-11-08 03:27:47 +03:00
|
|
|
# The rest of the checks are performed during bundle2 part processing;
|
|
|
|
# we need to bypass the regular push checks because it will look like
|
|
|
|
# we're pushing a new head, which isn't normally allowed
|
|
|
|
if not repo.ui.configbool('experimental', 'bundle2-exp', False):
|
2016-01-08 05:30:24 +03:00
|
|
|
raise error.Abort(_('bundle2 needs to be enabled on client'))
|
2016-09-26 11:48:13 +03:00
|
|
|
if not pushop.remote.capable('bundle2-exp'):
|
2016-01-08 05:30:24 +03:00
|
|
|
raise error.Abort(_('bundle2 needs to be enabled on server'))
|
2014-11-08 03:27:47 +03:00
|
|
|
return
|
|
|
|
else:
|
2015-11-18 04:54:31 +03:00
|
|
|
return orig(pushop)
|
2015-04-03 21:38:55 +03:00
|
|
|
|
2015-09-08 04:26:53 +03:00
|
|
|
def _exchangecheckheads(orig, repo, *args, **kwargs):
|
|
|
|
onto = repo.ui.config(experimental, configonto)
|
|
|
|
if not onto:
|
|
|
|
# Only do this work if it's not a rebasing push
|
|
|
|
return orig(repo, *args, **kwargs)
|
|
|
|
|
2015-10-01 21:15:37 +03:00
|
|
|
def _skipcheckheads(orig, pushop, bundler):
|
|
|
|
if not pushop.ui.config(experimental, configonto): # no check if we rebase
|
|
|
|
return orig(pushop, bundler)
|
|
|
|
|
2014-11-08 03:27:47 +03:00
|
|
|
def _push(orig, ui, repo, *args, **opts):
|
2017-03-18 05:42:50 +03:00
|
|
|
onto = opts.get('to')
|
|
|
|
if not onto and not opts.get('rev') and not opts.get('dest'):
|
|
|
|
try:
|
|
|
|
# If it's a tracking bookmark, remotenames will push there,
|
|
|
|
# so let's set that up as our --to.
|
|
|
|
remotenames = extensions.find('remotenames')
|
|
|
|
active = remotenames.bmactive(repo)
|
|
|
|
tracking = remotenames._readtracking(repo)
|
|
|
|
if active and active in tracking:
|
|
|
|
track = tracking[active]
|
|
|
|
path, book = remotenames.splitremotename(track)
|
|
|
|
onto = book
|
|
|
|
except KeyError:
|
|
|
|
# No remotenames? No big deal.
|
|
|
|
pass
|
|
|
|
|
|
|
|
overrides = {(experimental, configonto): onto,
|
|
|
|
('remotenames', 'allownonfastforward'): True}
|
|
|
|
if onto:
|
|
|
|
overrides[(experimental, 'bundle2.pushback')] = True
|
2017-04-26 00:46:36 +03:00
|
|
|
wrapfunction(exchange, '_localphasemove', _phasemove)
|
|
|
|
wrapfunction(obsolete.obsstore, 'mergemarkers', _mergemarkers)
|
pushrebase: (easy) fix traditional push
Summary: Been working with js too long; python vars aren't None by default
Test Plan: ##run-tests.py##, and doing a traditional push w/ the extension enabled
Reviewers: durham, sid0, pyd
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1743710
2014-12-17 01:27:21 +03:00
|
|
|
|
|
|
|
try:
|
2017-03-18 05:42:50 +03:00
|
|
|
with ui.configoverride(overrides, 'pushrebase'):
|
|
|
|
result = orig(ui, repo, *args, **opts)
|
pushrebase: (easy) fix traditional push
Summary: Been working with js too long; python vars aren't None by default
Test Plan: ##run-tests.py##, and doing a traditional push w/ the extension enabled
Reviewers: durham, sid0, pyd
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1743710
2014-12-17 01:27:21 +03:00
|
|
|
finally:
|
2017-04-26 00:46:36 +03:00
|
|
|
if onto:
|
|
|
|
unwrapfunction(exchange, '_localphasemove', _phasemove)
|
|
|
|
unwrapfunction(obsolete.obsstore, 'mergemarkers', _mergemarkers)
|
2014-11-19 03:36:46 +03:00
|
|
|
|
2014-11-08 03:27:47 +03:00
|
|
|
return result
|
|
|
|
|
2017-04-26 00:46:36 +03:00
|
|
|
def _mergemarkers(orig, self, transaction, data):
|
|
|
|
"""record new markers so we could know the correct nodes for _phasemove"""
|
|
|
|
version, markers = obsolete._readmarkers(data)
|
|
|
|
self._pushrebasereplaces = {}
|
|
|
|
if version == obsolete._fm1version:
|
|
|
|
# only support fm1 1:1 replacements for now, record prec -> sucs
|
|
|
|
for prec, sucs, flags, meta, date, parents in markers:
|
|
|
|
if len(sucs) == 1:
|
|
|
|
self._pushrebasereplaces[prec] = sucs[0]
|
|
|
|
return orig(self, transaction, data)
|
2015-04-03 21:38:55 +03:00
|
|
|
|
2017-04-26 00:46:36 +03:00
|
|
|
def _phasemove(orig, pushop, nodes, phase=phases.public):
|
|
|
|
"""prevent original changesets from being marked public
|
|
|
|
|
|
|
|
When marking changesets as public, we need to mark the replaced nodes
|
|
|
|
returned from the server instead. This is done by looking at the new
|
|
|
|
obsmarker we received during "_mergemarkers" and map old nodes to new ones.
|
|
|
|
|
|
|
|
See exchange.push for the order of this and bundle2 pushback:
|
|
|
|
|
|
|
|
_pushdiscovery(pushop)
|
|
|
|
_pushbundle2(pushop)
|
|
|
|
# bundle2 pushback is processed here, but the client receiving the
|
|
|
|
# pushback cannot affect pushop.*heads (which affects phasemove),
|
|
|
|
# because it only gets "repo", and creates a separate "op":
|
|
|
|
bundle2.processbundle(pushop.repo, reply, trgetter)
|
|
|
|
_pushchangeset(pushop)
|
|
|
|
_pushsyncphase(pushop)
|
|
|
|
_localphasemove(...) # this method always gets called
|
|
|
|
_pushobsolete(pushop)
|
|
|
|
_pushbookmark(pushop)
|
|
|
|
|
|
|
|
The least hacky way to get things "right" seem to be:
|
|
|
|
|
|
|
|
1. In core, allow bundle2 pushback handler to affect the original
|
|
|
|
"pushop" somehow (so original pushop's (common|future)heads could be
|
|
|
|
updated accordingly and phasemove logic is affected)
|
|
|
|
2. In pushrebase extension, add a new bundle2 part handler to receive
|
|
|
|
the new relationship, correct pushop.*headers, and write obsmarkers.
|
|
|
|
3. Migrate the obsmarker part to the new bundle2 part added in step 2,
|
|
|
|
i.e. the server won't send obsmarkers directly.
|
|
|
|
|
|
|
|
For now, we don't have "1" so things are done in a bit hacky way.
|
|
|
|
"""
|
|
|
|
# find replacements. note: _pushrebasereplaces could be empty if obsstore
|
|
|
|
# is not enabled locally.
|
|
|
|
mapping = getattr(pushop.repo.obsstore, '_pushrebasereplaces', {})
|
|
|
|
nodes = [mapping.get(n, n) for n in nodes]
|
|
|
|
if phase == phases.public:
|
|
|
|
# only allow new nodes to become public
|
|
|
|
allowednodes = set(mapping.values())
|
|
|
|
nodes = [n for n in nodes if n in allowednodes]
|
|
|
|
orig(pushop, nodes, phase)
|
2014-11-19 03:36:46 +03:00
|
|
|
|
2014-12-05 22:40:14 +03:00
|
|
|
@exchange.b2partsgenerator(commonheadsparttype)
|
|
|
|
def commonheadspartgen(pushop, bundler):
|
2016-03-22 20:57:29 +03:00
|
|
|
if rebaseparttype not in bundle2.bundle2caps(pushop.remote):
|
|
|
|
# Server doesn't support pushrebase, so just fallback to normal push.
|
|
|
|
return
|
|
|
|
|
2014-12-05 22:40:14 +03:00
|
|
|
bundler.newpart(commonheadsparttype,
|
|
|
|
data=''.join(pushop.outgoing.commonheads))
|
|
|
|
|
|
|
|
@bundle2.parthandler(commonheadsparttype)
|
|
|
|
def commonheadshandler(op, inpart):
|
|
|
|
nodeid = inpart.read(20)
|
|
|
|
while len(nodeid) == 20:
|
|
|
|
op.records.add(commonheadsparttype, nodeid)
|
|
|
|
nodeid = inpart.read(20)
|
|
|
|
assert not nodeid # data should split evenly into blocks of 20 bytes
|
|
|
|
|
2016-03-22 20:57:29 +03:00
|
|
|
def checkremotenames():
|
|
|
|
try:
|
|
|
|
extensions.find('remotenames')
|
|
|
|
return True
|
|
|
|
except KeyError:
|
|
|
|
return False
|
|
|
|
|
2014-11-08 03:27:47 +03:00
|
|
|
@exchange.b2partsgenerator(rebaseparttype)
|
|
|
|
def partgen(pushop, bundler):
|
|
|
|
onto = pushop.ui.config(experimental, configonto)
|
2015-10-29 04:19:12 +03:00
|
|
|
if 'changesets' in pushop.stepsdone or not onto:
|
2014-11-08 03:27:47 +03:00
|
|
|
return
|
|
|
|
|
2016-03-22 20:57:29 +03:00
|
|
|
if (rebaseparttype not in bundle2.bundle2caps(pushop.remote) and
|
|
|
|
checkremotenames()):
|
|
|
|
# Server doesn't support pushrebase, but --to is valid in remotenames as
|
|
|
|
# well, so just let it through.
|
|
|
|
return
|
|
|
|
|
2014-11-08 03:27:47 +03:00
|
|
|
pushop.stepsdone.add('changesets')
|
2014-11-19 03:36:46 +03:00
|
|
|
if not pushop.outgoing.missing:
|
2015-07-14 08:01:02 +03:00
|
|
|
# It's important that this text match the text found in upstream
|
|
|
|
# Mercurial, since some tools rely on this string to know if a push
|
|
|
|
# succeeded despite not pushing commits.
|
|
|
|
pushop.ui.status(_('no changes found\n'))
|
2014-11-19 03:36:46 +03:00
|
|
|
pushop.cgresult = 0
|
|
|
|
return
|
2015-04-03 21:38:55 +03:00
|
|
|
|
2015-10-30 01:42:54 +03:00
|
|
|
# Force push means no rebasing, so let's just take the existing parent.
|
|
|
|
if pushop.force:
|
|
|
|
onto = donotrebasemarker
|
|
|
|
|
2014-12-09 03:39:20 +03:00
|
|
|
rebasepart = getrebasepart(pushop.repo,
|
|
|
|
pushop.remote,
|
|
|
|
pushop.outgoing,
|
|
|
|
onto,
|
2015-10-30 01:42:54 +03:00
|
|
|
pushop.newbranch)
|
2014-11-08 03:27:47 +03:00
|
|
|
|
|
|
|
bundler.addpart(rebasepart)
|
|
|
|
|
2015-09-28 21:31:24 +03:00
|
|
|
# Tell the server which manifests to load before taking the lock.
|
|
|
|
# This helps shorten the duration of the lock, which increases our potential
|
|
|
|
# commit rate.
|
|
|
|
missing = pushop.outgoing.missing
|
|
|
|
roots = pushop.repo.set('parents(%ln) - %ln', missing, missing)
|
|
|
|
preloadnodes = [hex(r.manifestnode()) for r in roots]
|
|
|
|
bundler.addparam("preloadmanifests", ','.join(preloadnodes))
|
|
|
|
|
2014-11-19 03:36:46 +03:00
|
|
|
def handlereply(op):
|
pushrebase: extract pushback into its own function
Summary: Also, remove some spurious TODOs
Test Plan: ##run-tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1732036
Signature: t1:1732036:1418250584:6922035a05df012445b0092f8646eb982602f07d
2014-12-11 01:22:34 +03:00
|
|
|
# server either succeeds or aborts; no code to read
|
2014-11-19 03:36:46 +03:00
|
|
|
pushop.cgresult = 1
|
|
|
|
|
|
|
|
return handlereply
|
2014-11-08 03:27:47 +03:00
|
|
|
|
|
|
|
bundle2.capabilities[rebaseparttype] = ()
|
|
|
|
|
extract temporary bundle file creation into its own function
Summary:
Transferring the part data into a bundle file on disk for bundlerepo to read
is a self-contained operation
Test Plan: ##run_tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1726767
Signature: t1:1726767:1418241560:8afac123afc847e61e6f46b6216d15b003024504
2014-12-09 04:10:24 +03:00
|
|
|
def _makebundlefile(part):
|
|
|
|
"""constructs a temporary bundle file
|
|
|
|
|
|
|
|
part.data should be an uncompressed v1 changegroup"""
|
2015-04-03 21:38:55 +03:00
|
|
|
|
extract temporary bundle file creation into its own function
Summary:
Transferring the part data into a bundle file on disk for bundlerepo to read
is a self-contained operation
Test Plan: ##run_tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1726767
Signature: t1:1726767:1418241560:8afac123afc847e61e6f46b6216d15b003024504
2014-12-09 04:10:24 +03:00
|
|
|
fp = None
|
|
|
|
fd, bundlefile = tempfile.mkstemp()
|
|
|
|
try: # guards bundlefile
|
|
|
|
try: # guards fp
|
|
|
|
fp = os.fdopen(fd, 'wb')
|
|
|
|
magic = 'HG10UN'
|
|
|
|
fp.write(magic)
|
2017-03-23 21:51:56 +03:00
|
|
|
data = part.read(mmap.PAGESIZE - len(magic))
|
extract temporary bundle file creation into its own function
Summary:
Transferring the part data into a bundle file on disk for bundlerepo to read
is a self-contained operation
Test Plan: ##run_tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1726767
Signature: t1:1726767:1418241560:8afac123afc847e61e6f46b6216d15b003024504
2014-12-09 04:10:24 +03:00
|
|
|
while data:
|
|
|
|
fp.write(data)
|
2017-03-23 21:51:56 +03:00
|
|
|
data = part.read(mmap.PAGESIZE)
|
extract temporary bundle file creation into its own function
Summary:
Transferring the part data into a bundle file on disk for bundlerepo to read
is a self-contained operation
Test Plan: ##run_tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1726767
Signature: t1:1726767:1418241560:8afac123afc847e61e6f46b6216d15b003024504
2014-12-09 04:10:24 +03:00
|
|
|
finally:
|
|
|
|
fp.close()
|
2016-01-08 05:30:24 +03:00
|
|
|
except Exception:
|
extract temporary bundle file creation into its own function
Summary:
Transferring the part data into a bundle file on disk for bundlerepo to read
is a self-contained operation
Test Plan: ##run_tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1726767
Signature: t1:1726767:1418241560:8afac123afc847e61e6f46b6216d15b003024504
2014-12-09 04:10:24 +03:00
|
|
|
try:
|
|
|
|
os.unlink(bundlefile)
|
2016-01-08 05:30:24 +03:00
|
|
|
except Exception:
|
extract temporary bundle file creation into its own function
Summary:
Transferring the part data into a bundle file on disk for bundlerepo to read
is a self-contained operation
Test Plan: ##run_tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1726767
Signature: t1:1726767:1418241560:8afac123afc847e61e6f46b6216d15b003024504
2014-12-09 04:10:24 +03:00
|
|
|
# we would rather see the original exception
|
|
|
|
pass
|
|
|
|
raise
|
|
|
|
|
|
|
|
return bundlefile
|
|
|
|
|
pushrebase: extract revlist creation and validation into its own function
Summary: None of these temporary variables should be needed later
Test Plan: ##run-tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1726850
Signature: t1:1726850:1418242127:64da1168b9f11897f6018082292e7f6285141bfd
2014-12-09 04:30:26 +03:00
|
|
|
def _getrevs(bundle, onto):
|
|
|
|
'extracts and validates the revs to be imported'
|
|
|
|
validaterevset(bundle, 'bundle()')
|
|
|
|
revs = [bundle[r] for r in bundle.revs('sort(bundle())')]
|
|
|
|
onto = bundle[onto.hex()]
|
2015-07-25 01:23:16 +03:00
|
|
|
# Fast forward update, no rebase needed
|
|
|
|
if list(bundle.set('bundle() & %d::', onto.rev())):
|
|
|
|
return revs, onto
|
pushrebase: extract revlist creation and validation into its own function
Summary: None of these temporary variables should be needed later
Test Plan: ##run-tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1726850
Signature: t1:1726850:1418242127:64da1168b9f11897f6018082292e7f6285141bfd
2014-12-09 04:30:26 +03:00
|
|
|
|
|
|
|
if revs:
|
2015-06-26 04:49:48 +03:00
|
|
|
# We want to rebase the highest bundle root that is an ancestor of
|
|
|
|
# `onto`.
|
2015-07-14 08:01:02 +03:00
|
|
|
oldonto = list(bundle.set('max(parents(bundle()) - bundle() & ::%d)',
|
|
|
|
onto.rev()))
|
2015-06-26 04:49:48 +03:00
|
|
|
if not oldonto:
|
2015-07-14 08:01:02 +03:00
|
|
|
# If there's no shared history, only allow the rebase if the
|
|
|
|
# incoming changes are completely distinct.
|
|
|
|
sharedparents = list(bundle.set('parents(bundle()) - bundle()'))
|
|
|
|
if not sharedparents:
|
|
|
|
return revs, bundle[nullid]
|
2016-09-21 17:45:25 +03:00
|
|
|
raise error.Abort(_('pushed changesets do not branch from an '
|
|
|
|
'ancestor of the desired destination %s')
|
|
|
|
% onto.hex())
|
2015-06-26 04:49:48 +03:00
|
|
|
oldonto = oldonto[0]
|
|
|
|
|
2017-03-29 03:18:59 +03:00
|
|
|
# Computes a list of all the incoming file changes
|
|
|
|
bundlefiles = set()
|
|
|
|
for bundlerev in revs:
|
|
|
|
bundlefiles.update(bundlerev.files())
|
|
|
|
|
|
|
|
def findconflicts():
|
|
|
|
# Returns all the files touched in the bundle that are also touched
|
|
|
|
# between the old onto (ex: our old bookmark location) and the new
|
|
|
|
# onto (ex: the server's actual bookmark location).
|
|
|
|
filematcher = scmutil.matchfiles(bundle, bundlefiles)
|
|
|
|
return onto.manifest().diff(oldonto.manifest(), filematcher).keys()
|
|
|
|
|
|
|
|
def findconflictsfast():
|
|
|
|
# Fast path for detecting conflicting files. Inspects the changelog
|
|
|
|
# file list instead of loading manifests. This only works for
|
|
|
|
# non-merge commits, since merge commit file lists do not include
|
|
|
|
# all the files changed in the merged.
|
|
|
|
ontofiles = set()
|
|
|
|
for betweenctx in bundle.set('%d %% %d', onto.rev(), oldonto.rev()):
|
|
|
|
ontofiles.update(betweenctx.files())
|
|
|
|
|
|
|
|
return bundlefiles.intersection(ontofiles)
|
|
|
|
|
|
|
|
if bundle.revs('(%d %% %d) - not merge()', onto.rev(), oldonto.rev()):
|
|
|
|
# If anything between oldonto and newonto is a merge commit, use the
|
|
|
|
# slower manifest diff path.
|
|
|
|
conflicts = findconflicts()
|
|
|
|
else:
|
|
|
|
conflicts = findconflictsfast()
|
|
|
|
|
pushrebase: extract revlist creation and validation into its own function
Summary: None of these temporary variables should be needed later
Test Plan: ##run-tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1726850
Signature: t1:1726850:1418242127:64da1168b9f11897f6018082292e7f6285141bfd
2014-12-09 04:30:26 +03:00
|
|
|
if conflicts:
|
2016-01-08 05:30:24 +03:00
|
|
|
raise error.Abort(_('conflicting changes in:\n%s') %
|
2015-10-29 04:19:20 +03:00
|
|
|
''.join(' %s\n' % f for f in sorted(conflicts)))
|
pushrebase: extract revlist creation and validation into its own function
Summary: None of these temporary variables should be needed later
Test Plan: ##run-tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1726850
Signature: t1:1726850:1418242127:64da1168b9f11897f6018082292e7f6285141bfd
2014-12-09 04:30:26 +03:00
|
|
|
|
2015-06-26 04:49:48 +03:00
|
|
|
return revs, oldonto
|
|
|
|
|
2016-12-16 21:24:12 +03:00
|
|
|
def _graft(repo, rev, mapping, lastdestnode):
|
2015-06-26 04:49:48 +03:00
|
|
|
'''duplicate changeset "rev" with parents from "mapping"'''
|
|
|
|
oldp1 = rev.p1().node()
|
|
|
|
oldp2 = rev.p2().node()
|
|
|
|
newp1 = mapping.get(oldp1, oldp1)
|
|
|
|
newp2 = mapping.get(oldp2, oldp2)
|
2015-07-24 00:11:58 +03:00
|
|
|
m = rev.manifest()
|
|
|
|
def getfilectx(repo, memctx, path):
|
|
|
|
if path in m:
|
2015-07-24 19:38:01 +03:00
|
|
|
fctx = rev[path]
|
|
|
|
flags = fctx.flags()
|
|
|
|
copied = fctx.renamed()
|
|
|
|
if copied:
|
|
|
|
copied = copied[0]
|
|
|
|
return context.memfilectx(repo, fctx.path(), fctx.data(),
|
|
|
|
islink='l' in flags,
|
|
|
|
isexec='x' in flags,
|
|
|
|
copied=copied)
|
2015-07-24 00:11:58 +03:00
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
pushrebase: extract revlist creation and validation into its own function
Summary: None of these temporary variables should be needed later
Test Plan: ##run-tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1726850
Signature: t1:1726850:1418242127:64da1168b9f11897f6018082292e7f6285141bfd
2014-12-09 04:30:26 +03:00
|
|
|
|
2015-07-14 08:01:02 +03:00
|
|
|
# If the incoming commit has no parents, but requested a rebase,
|
|
|
|
# allow it only for the first commit. The null/null commit will always
|
|
|
|
# be the first commit since we only allow a nullid->nonnullid mapping if the
|
|
|
|
# incoming commits are a completely distinct history (see `sharedparents` in
|
|
|
|
# getrevs()), so there's no risk of commits with a single null parent
|
|
|
|
# accidentally getting translated first.
|
|
|
|
if oldp1 == nullid and oldp2 == nullid:
|
|
|
|
if newp1 != nullid:
|
|
|
|
newp2 = nullid
|
|
|
|
del mapping[nullid]
|
|
|
|
|
2015-08-11 08:15:03 +03:00
|
|
|
if oldp1 != nullid and oldp2 != nullid:
|
2016-12-16 21:24:12 +03:00
|
|
|
# The way commits work is they copy p1, then apply the necessary changes
|
|
|
|
# to get to the new state. In a pushrebase situation, we are applying
|
|
|
|
# changes from the pre-rebase commit to a post-rebase commit, which
|
|
|
|
# means we need to ensure that changes caused by the rebase are
|
|
|
|
# preserved. In a merge commit, if p2 is the post-rebase commit that
|
|
|
|
# contains all the files from the rebase destination, those changes will
|
|
|
|
# be lost, since the newp1 doesn't have those changes, and
|
|
|
|
# oldp1.diff(oldrev) doesn't have them either. The solution is to ensure
|
|
|
|
# that the parent that contains all the original rebase destination
|
|
|
|
# files is always p1. We do that by just swapping them here.
|
|
|
|
if newp2 == lastdestnode:
|
|
|
|
newtemp = newp1
|
|
|
|
oldtemp = oldp1
|
|
|
|
oldp1 = oldp2
|
|
|
|
oldp2 = oldtemp
|
|
|
|
newp1 = newp2
|
|
|
|
newp2 = newtemp
|
|
|
|
|
2015-08-11 08:15:03 +03:00
|
|
|
# If it's a merge commit, Mercurial's rev.files() only returns the files
|
|
|
|
# that are different from both p1 and p2, so it would not capture all of
|
|
|
|
# the incoming changes from p2 (for instance, new files in p2). The fix
|
|
|
|
# is to manually diff the rev manifest and it's p1 to get the list of
|
|
|
|
# files that have changed. We only need to diff against p1, and not p2,
|
|
|
|
# because Mercurial constructs new commits by applying our specified
|
|
|
|
# files on top of a copy of the p1 manifest, so we only need the diff
|
|
|
|
# against p1.
|
2015-12-16 20:47:23 +03:00
|
|
|
bundlerepo = rev._repo
|
|
|
|
files = rev.manifest().diff(bundlerepo[oldp1].manifest()).keys()
|
2015-08-11 08:15:03 +03:00
|
|
|
else:
|
|
|
|
files = rev.files()
|
|
|
|
|
2015-10-18 04:59:46 +03:00
|
|
|
|
|
|
|
date = rev.date()
|
|
|
|
if repo.ui.configbool('pushrebase', 'rewritedates'):
|
|
|
|
date = (time.time(), date[1])
|
pushrebase: extract commit grafting into its own function
Summary: Breaking up the part handler into smaller pieces
Test Plan: ##run_tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1726701
Signature: t1:1726701:1418241313:52edc229bc635d7e703bcc2b5ee5673ece6373fa
2014-12-09 03:54:14 +03:00
|
|
|
return context.memctx(repo,
|
2015-06-26 04:49:48 +03:00
|
|
|
[newp1, newp2],
|
pushrebase: extract commit grafting into its own function
Summary: Breaking up the part handler into smaller pieces
Test Plan: ##run_tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1726701
Signature: t1:1726701:1418241313:52edc229bc635d7e703bcc2b5ee5673ece6373fa
2014-12-09 03:54:14 +03:00
|
|
|
rev.description(),
|
2015-08-11 08:15:03 +03:00
|
|
|
files,
|
2015-07-24 00:11:58 +03:00
|
|
|
getfilectx,
|
pushrebase: extract commit grafting into its own function
Summary: Breaking up the part handler into smaller pieces
Test Plan: ##run_tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1726701
Signature: t1:1726701:1418241313:52edc229bc635d7e703bcc2b5ee5673ece6373fa
2014-12-09 03:54:14 +03:00
|
|
|
rev.user(),
|
2015-10-18 04:59:46 +03:00
|
|
|
date,
|
pushrebase: extract commit grafting into its own function
Summary: Breaking up the part handler into smaller pieces
Test Plan: ##run_tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1726701
Signature: t1:1726701:1418241313:52edc229bc635d7e703bcc2b5ee5673ece6373fa
2014-12-09 03:54:14 +03:00
|
|
|
rev.extra(),
|
|
|
|
).commit()
|
|
|
|
|
2017-04-11 23:21:27 +03:00
|
|
|
def _buildobsolete(replacements, oldrepo, newrepo, date):
|
|
|
|
'''return obsmarkers, add them locally (server-side) if obsstore enabled'''
|
|
|
|
markers = [(oldrepo[oldrev], (newrepo[newrev],),
|
|
|
|
{'operation': 'push', 'user': newrepo[newrev].user()})
|
|
|
|
for oldrev, newrev in replacements.items()
|
|
|
|
if newrev != oldrev]
|
pushrebase: extract obsolete marker generation into a function
Test Plan: ##run-tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1726944
Signature: t1:1726944:1418242222:a91974f41b5883eb659dd69135176adf066b8b47
2014-12-09 04:49:48 +03:00
|
|
|
if obsolete.isenabled(newrepo, obsolete.createmarkersopt):
|
2017-04-11 23:21:27 +03:00
|
|
|
obsolete.createmarkers(newrepo, markers, date=date)
|
|
|
|
return markers
|
pushrebase: extract obsolete marker generation into a function
Test Plan: ##run-tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1726944
Signature: t1:1726944:1418242222:a91974f41b5883eb659dd69135176adf066b8b47
2014-12-09 04:49:48 +03:00
|
|
|
|
pushrebase: extract pushback into its own function
Summary: Also, remove some spurious TODOs
Test Plan: ##run-tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1732036
Signature: t1:1732036:1418250584:6922035a05df012445b0092f8646eb982602f07d
2014-12-11 01:22:34 +03:00
|
|
|
def _addpushbackchangegroup(repo, reply, outgoing):
|
|
|
|
'''adds changegroup part to reply containing revs from outgoing.missing'''
|
2015-04-17 23:49:35 +03:00
|
|
|
cgversions = set(reply.capabilities.get('changegroup'))
|
pushrebase: extract pushback into its own function
Summary: Also, remove some spurious TODOs
Test Plan: ##run-tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1732036
Signature: t1:1732036:1418250584:6922035a05df012445b0092f8646eb982602f07d
2014-12-11 01:22:34 +03:00
|
|
|
if not cgversions:
|
|
|
|
cgversions.add('01')
|
2016-02-09 01:47:54 +03:00
|
|
|
version = max(cgversions & set(changegroup.supportedoutgoingversions(repo)))
|
2015-04-03 21:38:55 +03:00
|
|
|
|
pushrebase: extract pushback into its own function
Summary: Also, remove some spurious TODOs
Test Plan: ##run-tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1732036
Signature: t1:1732036:1418250584:6922035a05df012445b0092f8646eb982602f07d
2014-12-11 01:22:34 +03:00
|
|
|
cg = changegroup.getlocalchangegroupraw(repo,
|
|
|
|
'rebase:reply',
|
|
|
|
outgoing,
|
|
|
|
version = version)
|
|
|
|
|
2016-01-08 05:30:24 +03:00
|
|
|
cgpart = reply.newpart('CHANGEGROUP', data=cg)
|
pushrebase: extract pushback into its own function
Summary: Also, remove some spurious TODOs
Test Plan: ##run-tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1732036
Signature: t1:1732036:1418250584:6922035a05df012445b0092f8646eb982602f07d
2014-12-11 01:22:34 +03:00
|
|
|
if version != '01':
|
|
|
|
cgpart.addparam('version', version)
|
|
|
|
|
pushrebase: do not require exchange for getting markers
Summary:
D4865150 and D4934720 aren't effective in our current setup. The direct
cause in the code is because the server couldn't find common marker version:
```
# old server-side code, returns empty in our current setup
obsolete.commonversion(bundle2.obsmarkersversion(reply.capabilities))
```
Upon investigation, it's because there is no `exchange` enabled client-side.
But we do want one-way (server->client) markers for the rebased commits, as
long as obsstore is enabled (createmarkers is set, without exchange).
The upstream expects the server to have obsstore enabled, and exchange
enabled, to send markers. Since we are generating markers without an
obsstore (see D4865150), we are on our own way. This diff makes it one step
further.
This diff adds an explicit parameter to the `b2x:rebase` part to tell the
server what obsmarker format the client supports so the server could make a
right decision without relying on the "standard" `reply.capabilities`, which
is affected by the exchange option.
Test Plan: Change the existing test, make sure the old code fails.
Reviewers: #mercurial, durham
Reviewed By: durham
Subscribers: durham, mjpieters
Differential Revision: https://phabricator.intern.facebook.com/D4997972
Signature: t1:4997972:1493848751:14c29654b2e8246bd12a8de8820af5b3773e2fb7
2017-05-04 02:08:35 +03:00
|
|
|
def _addpushbackobsolete(repo, reply, markers, markerdate,
|
|
|
|
clientobsmarkerversions):
|
2017-04-11 23:21:27 +03:00
|
|
|
'''adds obsmarkers to reply'''
|
pushrebase: do not require exchange for getting markers
Summary:
D4865150 and D4934720 aren't effective in our current setup. The direct
cause in the code is because the server couldn't find common marker version:
```
# old server-side code, returns empty in our current setup
obsolete.commonversion(bundle2.obsmarkersversion(reply.capabilities))
```
Upon investigation, it's because there is no `exchange` enabled client-side.
But we do want one-way (server->client) markers for the rebased commits, as
long as obsstore is enabled (createmarkers is set, without exchange).
The upstream expects the server to have obsstore enabled, and exchange
enabled, to send markers. Since we are generating markers without an
obsstore (see D4865150), we are on our own way. This diff makes it one step
further.
This diff adds an explicit parameter to the `b2x:rebase` part to tell the
server what obsmarker format the client supports so the server could make a
right decision without relying on the "standard" `reply.capabilities`, which
is affected by the exchange option.
Test Plan: Change the existing test, make sure the old code fails.
Reviewers: #mercurial, durham
Reviewed By: durham
Subscribers: durham, mjpieters
Differential Revision: https://phabricator.intern.facebook.com/D4997972
Signature: t1:4997972:1493848751:14c29654b2e8246bd12a8de8820af5b3773e2fb7
2017-05-04 02:08:35 +03:00
|
|
|
# experimental config: pushrebase.pushback.obsmarkers
|
2017-05-04 02:19:11 +03:00
|
|
|
# if set to False, the server will not push back obsmarkers.
|
pushrebase: do not require exchange for getting markers
Summary:
D4865150 and D4934720 aren't effective in our current setup. The direct
cause in the code is because the server couldn't find common marker version:
```
# old server-side code, returns empty in our current setup
obsolete.commonversion(bundle2.obsmarkersversion(reply.capabilities))
```
Upon investigation, it's because there is no `exchange` enabled client-side.
But we do want one-way (server->client) markers for the rebased commits, as
long as obsstore is enabled (createmarkers is set, without exchange).
The upstream expects the server to have obsstore enabled, and exchange
enabled, to send markers. Since we are generating markers without an
obsstore (see D4865150), we are on our own way. This diff makes it one step
further.
This diff adds an explicit parameter to the `b2x:rebase` part to tell the
server what obsmarker format the client supports so the server could make a
right decision without relying on the "standard" `reply.capabilities`, which
is affected by the exchange option.
Test Plan: Change the existing test, make sure the old code fails.
Reviewers: #mercurial, durham
Reviewed By: durham
Subscribers: durham, mjpieters
Differential Revision: https://phabricator.intern.facebook.com/D4997972
Signature: t1:4997972:1493848751:14c29654b2e8246bd12a8de8820af5b3773e2fb7
2017-05-04 02:08:35 +03:00
|
|
|
if not repo.ui.configbool('pushrebase', 'pushback.obsmarkers', True):
|
2017-04-11 23:21:27 +03:00
|
|
|
return
|
pushrebase: do not require exchange for getting markers
Summary:
D4865150 and D4934720 aren't effective in our current setup. The direct
cause in the code is because the server couldn't find common marker version:
```
# old server-side code, returns empty in our current setup
obsolete.commonversion(bundle2.obsmarkersversion(reply.capabilities))
```
Upon investigation, it's because there is no `exchange` enabled client-side.
But we do want one-way (server->client) markers for the rebased commits, as
long as obsstore is enabled (createmarkers is set, without exchange).
The upstream expects the server to have obsstore enabled, and exchange
enabled, to send markers. Since we are generating markers without an
obsstore (see D4865150), we are on our own way. This diff makes it one step
further.
This diff adds an explicit parameter to the `b2x:rebase` part to tell the
server what obsmarker format the client supports so the server could make a
right decision without relying on the "standard" `reply.capabilities`, which
is affected by the exchange option.
Test Plan: Change the existing test, make sure the old code fails.
Reviewers: #mercurial, durham
Reviewed By: durham
Subscribers: durham, mjpieters
Differential Revision: https://phabricator.intern.facebook.com/D4997972
Signature: t1:4997972:1493848751:14c29654b2e8246bd12a8de8820af5b3773e2fb7
2017-05-04 02:08:35 +03:00
|
|
|
|
|
|
|
# _buildobsolete has hard-coded obsolete._fm1version raw markers, so client
|
|
|
|
# needs to support it, and the reply needs to have the correct capabilities
|
|
|
|
if obsolete._fm1version not in clientobsmarkerversions:
|
|
|
|
return
|
|
|
|
reply.capabilities['obsmarkers'] = ['V1']
|
|
|
|
|
2017-04-11 23:21:27 +03:00
|
|
|
flag = 0
|
|
|
|
parents = None
|
|
|
|
try:
|
|
|
|
rawmarkers = [(pre.node(), tuple(s.node() for s in sucs), flag,
|
|
|
|
tuple(sorted(meta.items())), markerdate, parents)
|
|
|
|
for pre, sucs, meta in markers]
|
2017-05-30 23:13:36 +03:00
|
|
|
bundle2.buildobsmarkerspart(reply, rawmarkers)
|
2017-04-11 23:21:27 +03:00
|
|
|
except ValueError as exc:
|
|
|
|
repo.ui.status(_("can't send obsolete markers: %s") % exc.message)
|
|
|
|
|
pushrebase: do not require exchange for getting markers
Summary:
D4865150 and D4934720 aren't effective in our current setup. The direct
cause in the code is because the server couldn't find common marker version:
```
# old server-side code, returns empty in our current setup
obsolete.commonversion(bundle2.obsmarkersversion(reply.capabilities))
```
Upon investigation, it's because there is no `exchange` enabled client-side.
But we do want one-way (server->client) markers for the rebased commits, as
long as obsstore is enabled (createmarkers is set, without exchange).
The upstream expects the server to have obsstore enabled, and exchange
enabled, to send markers. Since we are generating markers without an
obsstore (see D4865150), we are on our own way. This diff makes it one step
further.
This diff adds an explicit parameter to the `b2x:rebase` part to tell the
server what obsmarker format the client supports so the server could make a
right decision without relying on the "standard" `reply.capabilities`, which
is affected by the exchange option.
Test Plan: Change the existing test, make sure the old code fails.
Reviewers: #mercurial, durham
Reviewed By: durham
Subscribers: durham, mjpieters
Differential Revision: https://phabricator.intern.facebook.com/D4997972
Signature: t1:4997972:1493848751:14c29654b2e8246bd12a8de8820af5b3773e2fb7
2017-05-04 02:08:35 +03:00
|
|
|
def _addpushbackparts(op, replacements, markers, markerdate,
|
|
|
|
clientobsmarkerversions):
|
pushrebase: extract pushback into its own function
Summary: Also, remove some spurious TODOs
Test Plan: ##run-tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1732036
Signature: t1:1732036:1418250584:6922035a05df012445b0092f8646eb982602f07d
2014-12-11 01:22:34 +03:00
|
|
|
'''adds pushback to reply if supported by the client'''
|
|
|
|
if (op.records[commonheadsparttype]
|
|
|
|
and op.reply
|
2015-04-17 23:49:35 +03:00
|
|
|
and 'pushback' in op.reply.capabilities):
|
2016-09-05 22:02:23 +03:00
|
|
|
outgoing = discovery.outgoing(op.repo, op.records[commonheadsparttype],
|
pushrebase: extract pushback into its own function
Summary: Also, remove some spurious TODOs
Test Plan: ##run-tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1732036
Signature: t1:1732036:1418250584:6922035a05df012445b0092f8646eb982602f07d
2014-12-11 01:22:34 +03:00
|
|
|
[new for old, new in replacements.items()
|
|
|
|
if old != new])
|
|
|
|
|
|
|
|
if outgoing.missing:
|
2015-10-29 04:19:18 +03:00
|
|
|
plural = 's' if len(outgoing.missing) > 1 else ''
|
2016-09-21 17:45:25 +03:00
|
|
|
op.repo.ui.warn(_("%s new changeset%s from the server will be "
|
2016-01-08 05:30:24 +03:00
|
|
|
"downloaded\n") % (len(outgoing.missing), plural))
|
pushrebase: extract pushback into its own function
Summary: Also, remove some spurious TODOs
Test Plan: ##run-tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1732036
Signature: t1:1732036:1418250584:6922035a05df012445b0092f8646eb982602f07d
2014-12-11 01:22:34 +03:00
|
|
|
_addpushbackchangegroup(op.repo, op.reply, outgoing)
|
pushrebase: do not require exchange for getting markers
Summary:
D4865150 and D4934720 aren't effective in our current setup. The direct
cause in the code is because the server couldn't find common marker version:
```
# old server-side code, returns empty in our current setup
obsolete.commonversion(bundle2.obsmarkersversion(reply.capabilities))
```
Upon investigation, it's because there is no `exchange` enabled client-side.
But we do want one-way (server->client) markers for the rebased commits, as
long as obsstore is enabled (createmarkers is set, without exchange).
The upstream expects the server to have obsstore enabled, and exchange
enabled, to send markers. Since we are generating markers without an
obsstore (see D4865150), we are on our own way. This diff makes it one step
further.
This diff adds an explicit parameter to the `b2x:rebase` part to tell the
server what obsmarker format the client supports so the server could make a
right decision without relying on the "standard" `reply.capabilities`, which
is affected by the exchange option.
Test Plan: Change the existing test, make sure the old code fails.
Reviewers: #mercurial, durham
Reviewed By: durham
Subscribers: durham, mjpieters
Differential Revision: https://phabricator.intern.facebook.com/D4997972
Signature: t1:4997972:1493848751:14c29654b2e8246bd12a8de8820af5b3773e2fb7
2017-05-04 02:08:35 +03:00
|
|
|
_addpushbackobsolete(op.repo, op.reply, markers, markerdate,
|
|
|
|
clientobsmarkerversions)
|
pushrebase: extract pushback into its own function
Summary: Also, remove some spurious TODOs
Test Plan: ##run-tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1732036
Signature: t1:1732036:1418250584:6922035a05df012445b0092f8646eb982602f07d
2014-12-11 01:22:34 +03:00
|
|
|
|
2017-03-29 03:18:59 +03:00
|
|
|
def resolveonto(repo, ontoarg):
|
|
|
|
try:
|
|
|
|
if ontoarg != donotrebasemarker:
|
|
|
|
return scmutil.revsingle(repo, ontoarg)
|
|
|
|
except error.RepoLookupError:
|
|
|
|
# Probably a new bookmark. Leave onto as None to not do any rebasing
|
|
|
|
pass
|
|
|
|
# onto is None means don't do rebasing
|
|
|
|
return None
|
|
|
|
|
pushrebase: do not require exchange for getting markers
Summary:
D4865150 and D4934720 aren't effective in our current setup. The direct
cause in the code is because the server couldn't find common marker version:
```
# old server-side code, returns empty in our current setup
obsolete.commonversion(bundle2.obsmarkersversion(reply.capabilities))
```
Upon investigation, it's because there is no `exchange` enabled client-side.
But we do want one-way (server->client) markers for the rebased commits, as
long as obsstore is enabled (createmarkers is set, without exchange).
The upstream expects the server to have obsstore enabled, and exchange
enabled, to send markers. Since we are generating markers without an
obsstore (see D4865150), we are on our own way. This diff makes it one step
further.
This diff adds an explicit parameter to the `b2x:rebase` part to tell the
server what obsmarker format the client supports so the server could make a
right decision without relying on the "standard" `reply.capabilities`, which
is affected by the exchange option.
Test Plan: Change the existing test, make sure the old code fails.
Reviewers: #mercurial, durham
Reviewed By: durham
Subscribers: durham, mjpieters
Differential Revision: https://phabricator.intern.facebook.com/D4997972
Signature: t1:4997972:1493848751:14c29654b2e8246bd12a8de8820af5b3773e2fb7
2017-05-04 02:08:35 +03:00
|
|
|
@bundle2.parthandler(rebaseparttype, ('onto', 'newhead', 'obsmarkerversions'))
|
2014-11-08 03:27:47 +03:00
|
|
|
def bundle2rebase(op, part):
|
|
|
|
'''unbundle a bundle2 containing a changegroup to rebase'''
|
|
|
|
|
|
|
|
params = part.params
|
|
|
|
|
|
|
|
bundlefile = None
|
2017-01-03 17:01:57 +03:00
|
|
|
bundle = None
|
pushrebase: do not require exchange for getting markers
Summary:
D4865150 and D4934720 aren't effective in our current setup. The direct
cause in the code is because the server couldn't find common marker version:
```
# old server-side code, returns empty in our current setup
obsolete.commonversion(bundle2.obsmarkersversion(reply.capabilities))
```
Upon investigation, it's because there is no `exchange` enabled client-side.
But we do want one-way (server->client) markers for the rebased commits, as
long as obsstore is enabled (createmarkers is set, without exchange).
The upstream expects the server to have obsstore enabled, and exchange
enabled, to send markers. Since we are generating markers without an
obsstore (see D4865150), we are on our own way. This diff makes it one step
further.
This diff adds an explicit parameter to the `b2x:rebase` part to tell the
server what obsmarker format the client supports so the server could make a
right decision without relying on the "standard" `reply.capabilities`, which
is affected by the exchange option.
Test Plan: Change the existing test, make sure the old code fails.
Reviewers: #mercurial, durham
Reviewed By: durham
Subscribers: durham, mjpieters
Differential Revision: https://phabricator.intern.facebook.com/D4997972
Signature: t1:4997972:1493848751:14c29654b2e8246bd12a8de8820af5b3773e2fb7
2017-05-04 02:08:35 +03:00
|
|
|
clientobsmarkerversions = [
|
|
|
|
int(v) for v in params.get('obsmarkerversions', '').split('\0') if v]
|
2017-04-11 23:21:27 +03:00
|
|
|
markers = []
|
|
|
|
markerdate = util.makedate()
|
2014-11-08 03:27:47 +03:00
|
|
|
|
|
|
|
try: # guards bundlefile
|
extract temporary bundle file creation into its own function
Summary:
Transferring the part data into a bundle file on disk for bundlerepo to read
is a self-contained operation
Test Plan: ##run_tests.py##
Reviewers: pyd, durham
Reviewed By: durham
Subscribers: calvinb, mitrandir, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1726767
Signature: t1:1726767:1418241560:8afac123afc847e61e6f46b6216d15b003024504
2014-12-09 04:10:24 +03:00
|
|
|
bundlefile = _makebundlefile(part)
|
pushrebase: use mercurial.hg.repository() rather than mercurial.bundlerepo.bundlerepository()
Summary: This change is done so that important extension wrapping is not skipped while creating a bundle. Using bundlerepository, some extensions were getting skipped. Extensions like inhibit would fail without this change.
Test Plan: I ran pushrebase test and it failed with Attribute Error in newmancache._order = collections.deque(oldmancache._order). It failed without the changes made in this diff as well. Also, when I ran all the tests, some of them failed due to not having getdb.sh, which isn't related to this change.
Reviewers: #sourcecontrol, ttung, pyd, lcharignon, rmcelroy
Reviewed By: rmcelroy
Subscribers: durham, lcharignon, ericsumner, mitrandir, trunkagent, pyd, rmcelroy
Differential Revision: https://phabricator.fb.com/D2838849
Tasks: 7916714
Signature: t1:2838849:1453299050:142328bf8f0dbc8814cd3bd25ac47714c4bf99d7
2016-01-21 00:11:27 +03:00
|
|
|
bundlepath = "bundle:%s+%s" % (op.repo.root, bundlefile)
|
|
|
|
bundle = repository(op.repo.ui, bundlepath)
|
2015-09-28 21:30:31 +03:00
|
|
|
|
2017-06-14 02:32:25 +03:00
|
|
|
prelockonto = resolveonto(op.repo,
|
|
|
|
params.get('onto', donotrebasemarker))
|
|
|
|
prelockontonode = prelockonto.hex() if prelockonto else None
|
|
|
|
|
2015-10-06 04:56:46 +03:00
|
|
|
# Allow running hooks on the new commits before we take the lock
|
2016-02-19 08:32:11 +03:00
|
|
|
prelockrebaseargs = op.hookargs.copy()
|
2015-10-06 04:56:46 +03:00
|
|
|
prelockrebaseargs['source'] = 'push'
|
|
|
|
prelockrebaseargs['bundle2'] = '1'
|
2016-01-08 05:30:24 +03:00
|
|
|
prelockrebaseargs['node'] = scmutil.revsingle(bundle,
|
|
|
|
'min(bundle())').hex()
|
2017-06-14 02:32:25 +03:00
|
|
|
prelockrebaseargs['node_onto'] = prelockontonode
|
2015-10-06 04:56:46 +03:00
|
|
|
prelockrebaseargs['hook_bundlepath'] = bundlefile
|
|
|
|
op.repo.hook("prepushrebase", throw=True, **prelockrebaseargs)
|
|
|
|
|
2015-10-29 04:19:15 +03:00
|
|
|
op.repo.ui.setconfig('pushrebase', pushrebasemarker, True)
|
2017-03-29 03:18:59 +03:00
|
|
|
|
2017-03-29 03:18:59 +03:00
|
|
|
# We will need the bundle revs after the lock is taken, so let's
|
|
|
|
# precache all the bundle rev manifests.
|
|
|
|
bundlerepocache = {}
|
|
|
|
bundlectxs = list(bundle.set('bundle()'))
|
|
|
|
manifestcachesize = op.repo.ui.configint('format',
|
|
|
|
'manifestcachesize') or 10
|
|
|
|
if len(bundlectxs) < manifestcachesize:
|
|
|
|
for ctx in bundlectxs:
|
|
|
|
bundlerepocache[ctx.manifestnode()] = ctx.manifestctx().read()
|
|
|
|
|
2017-03-29 03:18:59 +03:00
|
|
|
preonto = resolveonto(bundle, params.get('onto', donotrebasemarker))
|
|
|
|
preontocache = None
|
|
|
|
if preonto:
|
|
|
|
cache = bundle.manifestlog._revlog._cache
|
|
|
|
if cache:
|
|
|
|
cachenode, cacherev, cachetext = cache
|
|
|
|
if cachenode == preonto.node():
|
|
|
|
preontocache = cache
|
|
|
|
if not preontocache:
|
|
|
|
cachenode = preonto.manifestnode()
|
|
|
|
cacherev = bundle.manifestlog._revlog.rev(cachenode)
|
|
|
|
cachetext = bundle.manifestlog[cachenode].read().text()
|
|
|
|
preontocache = (cachenode, cacherev, cachetext)
|
|
|
|
|
2015-10-06 04:56:46 +03:00
|
|
|
tr = op.gettransaction()
|
|
|
|
hookargs = dict(tr.hookargs)
|
|
|
|
|
2016-04-04 22:48:01 +03:00
|
|
|
# Recreate the bundle repo, since taking the lock in gettransaction()
|
2015-10-30 01:01:43 +03:00
|
|
|
# may have caused it to become out of date.
|
|
|
|
# (but grab a copy of the cache first)
|
2017-01-03 17:01:57 +03:00
|
|
|
bundle.close()
|
pushrebase: use mercurial.hg.repository() rather than mercurial.bundlerepo.bundlerepository()
Summary: This change is done so that important extension wrapping is not skipped while creating a bundle. Using bundlerepository, some extensions were getting skipped. Extensions like inhibit would fail without this change.
Test Plan: I ran pushrebase test and it failed with Attribute Error in newmancache._order = collections.deque(oldmancache._order). It failed without the changes made in this diff as well. Also, when I ran all the tests, some of them failed due to not having getdb.sh, which isn't related to this change.
Reviewers: #sourcecontrol, ttung, pyd, lcharignon, rmcelroy
Reviewed By: rmcelroy
Subscribers: durham, lcharignon, ericsumner, mitrandir, trunkagent, pyd, rmcelroy
Differential Revision: https://phabricator.fb.com/D2838849
Tasks: 7916714
Signature: t1:2838849:1453299050:142328bf8f0dbc8814cd3bd25ac47714c4bf99d7
2016-01-21 00:11:27 +03:00
|
|
|
bundle = repository(op.repo.ui, bundlepath)
|
2015-10-30 01:01:43 +03:00
|
|
|
|
2015-09-28 21:30:31 +03:00
|
|
|
# Preload the caches with data we already have. We need to make copies
|
|
|
|
# here so that original repo caches don't get tainted with bundle
|
|
|
|
# specific data.
|
2016-11-16 23:11:11 +03:00
|
|
|
newdirmancache = bundle.manifestlog._dirmancache
|
|
|
|
for dir, dircache in op.repo.manifestlog._dirmancache.iteritems():
|
|
|
|
for mfnode in dircache:
|
|
|
|
mfctx = dircache[mfnode]
|
|
|
|
newmfctx = manifest.manifestctx(bundle, mfnode)
|
|
|
|
newmfctx._data = mfctx._data
|
|
|
|
newdirmancache[dir][mfnode] = newmfctx
|
2017-03-29 03:18:59 +03:00
|
|
|
|
|
|
|
for mfnode, mfdict in bundlerepocache.iteritems():
|
|
|
|
newmfctx = manifest.manifestctx(bundle, mfnode)
|
|
|
|
newmfctx._data = mfdict
|
|
|
|
newdirmancache[""][mfnode] = newmfctx
|
|
|
|
|
2016-11-16 23:11:11 +03:00
|
|
|
newfulltextcache = op.repo.manifestlog._revlog._fulltextcache.copy()
|
|
|
|
bundle.manifestlog._revlog._fulltextcache = newfulltextcache
|
2015-09-28 21:30:31 +03:00
|
|
|
|
2017-03-29 03:18:59 +03:00
|
|
|
onto = resolveonto(op.repo, params.get('onto', donotrebasemarker))
|
2015-10-06 04:56:46 +03:00
|
|
|
|
|
|
|
if not params['newhead']:
|
|
|
|
if not op.repo.revs('%r and head()', params['onto']):
|
2016-01-08 05:30:24 +03:00
|
|
|
raise error.Abort(_('rebase would create a new head on server'))
|
2015-10-06 04:56:46 +03:00
|
|
|
|
2016-01-08 05:30:24 +03:00
|
|
|
if onto is None:
|
2015-07-14 08:01:02 +03:00
|
|
|
maxcommonanc = list(bundle.set('max(parents(bundle()) - bundle())'))
|
|
|
|
if not maxcommonanc:
|
|
|
|
onto = op.repo[nullid]
|
|
|
|
else:
|
|
|
|
onto = maxcommonanc[0]
|
|
|
|
|
2015-06-26 04:49:48 +03:00
|
|
|
revs, oldonto = _getrevs(bundle, onto)
|
2014-11-08 03:27:47 +03:00
|
|
|
|
2014-12-09 02:43:58 +03:00
|
|
|
op.repo.hook("prechangegroup", **hookargs)
|
|
|
|
|
2015-06-26 04:49:48 +03:00
|
|
|
mapping = {}
|
|
|
|
|
|
|
|
# Seed the mapping with oldonto->onto
|
|
|
|
mapping[oldonto.node()] = onto.node()
|
|
|
|
|
2015-10-29 04:19:18 +03:00
|
|
|
# Notify the user of what is being pushed
|
|
|
|
plural = 's' if len(revs) > 1 else ''
|
2016-09-21 17:45:25 +03:00
|
|
|
op.repo.ui.warn(_("pushing %s changset%s:\n") % (len(revs), plural))
|
2015-10-29 04:19:18 +03:00
|
|
|
maxoutput = 10
|
|
|
|
for i in range(0, min(len(revs), maxoutput)):
|
|
|
|
firstline = bundle[revs[i]].description().split('\n')[0][:50]
|
2016-01-11 21:19:22 +03:00
|
|
|
op.repo.ui.warn((" %s %s\n") % (revs[i], firstline))
|
2015-10-29 04:19:18 +03:00
|
|
|
|
|
|
|
if len(revs) > maxoutput + 1:
|
2016-01-11 21:19:22 +03:00
|
|
|
op.repo.ui.warn((" ...\n"))
|
2015-10-29 04:19:18 +03:00
|
|
|
firstline = bundle[revs[-1]].description().split('\n')[0][:50]
|
2016-01-11 21:19:22 +03:00
|
|
|
op.repo.ui.warn((" %s %s\n") % (revs[-1], firstline))
|
2015-10-29 04:19:18 +03:00
|
|
|
|
2017-03-29 03:18:59 +03:00
|
|
|
# Prepopulate the revlog _cache with the original onto's fulltext. This
|
|
|
|
# means reading the new onto's manifest will likely have a much shorter
|
|
|
|
# delta chain to traverse.
|
|
|
|
if preontocache:
|
|
|
|
op.repo.manifestlog._revlog._cache = preontocache
|
|
|
|
onto.manifest()
|
|
|
|
|
2017-06-15 02:29:16 +03:00
|
|
|
replacements = {}
|
|
|
|
added = []
|
|
|
|
|
2016-12-16 21:24:12 +03:00
|
|
|
lastdestnode = None
|
2014-11-08 03:27:47 +03:00
|
|
|
for rev in revs:
|
2016-12-16 21:24:12 +03:00
|
|
|
newrev = _graft(op.repo, rev, mapping, lastdestnode)
|
2015-06-26 04:49:48 +03:00
|
|
|
|
|
|
|
new = op.repo[newrev]
|
|
|
|
oldnode = rev.node()
|
|
|
|
newnode = new.node()
|
|
|
|
replacements[oldnode] = newnode
|
|
|
|
mapping[oldnode] = newnode
|
|
|
|
added.append(newnode)
|
|
|
|
|
2016-12-16 21:24:12 +03:00
|
|
|
# Track which commit contains the original rebase destination
|
|
|
|
# contents, so we can preserve the appropriate side's content during
|
|
|
|
# merges.
|
|
|
|
if not lastdestnode or oldnode == lastdestnode:
|
|
|
|
lastdestnode = newnode
|
|
|
|
|
2015-09-23 20:25:24 +03:00
|
|
|
if 'node' not in tr.hookargs:
|
|
|
|
tr.hookargs['node'] = hex(newnode)
|
|
|
|
hookargs['node'] = hex(newnode)
|
|
|
|
|
2017-04-11 23:21:27 +03:00
|
|
|
markers = _buildobsolete(replacements, bundle, op.repo, markerdate)
|
2014-11-08 03:27:47 +03:00
|
|
|
finally:
|
|
|
|
try:
|
|
|
|
if bundlefile:
|
|
|
|
os.unlink(bundlefile)
|
2016-01-08 05:30:24 +03:00
|
|
|
except OSError as e:
|
2014-11-08 03:27:47 +03:00
|
|
|
if e.errno != errno.ENOENT:
|
|
|
|
raise
|
2017-01-03 17:01:57 +03:00
|
|
|
if bundle:
|
|
|
|
bundle.close()
|
2014-11-08 03:27:47 +03:00
|
|
|
|
2014-12-19 03:22:19 +03:00
|
|
|
publishing = op.repo.ui.configbool('phases', 'publish', True)
|
|
|
|
if publishing:
|
|
|
|
phases.advanceboundary(op.repo, tr, phases.public, [added[-1]])
|
|
|
|
|
2014-12-09 02:43:58 +03:00
|
|
|
p = lambda: tr.writepending() and op.repo.root or ""
|
|
|
|
op.repo.hook("pretxnchangegroup", throw=True, pending=p, **hookargs)
|
|
|
|
|
|
|
|
def runhooks():
|
pushrebase: add node argument to changegroup hook
Summary: This is expected by the remotefilelog hook, and possibly others
Test Plan: ##run-tests.py##
Reviewers: durham, pyd, sid0
Reviewed By: sid0
Subscribers: calvinb, rmcelroy, daviser, mpm, davidsp, sid0, akushner, pyd, durham
Differential Revision: https://phabricator.fb.com/D1743261
Signature: t1:1743261:1418762431:4fbbc1ed800023ddc51dbf53703e58294fbc2a4c
2014-12-16 23:38:48 +03:00
|
|
|
args = hookargs.copy()
|
|
|
|
args['node'] = hex(added[0])
|
|
|
|
op.repo.hook("changegroup", **args)
|
2014-12-09 02:43:58 +03:00
|
|
|
for n in added:
|
|
|
|
args = hookargs.copy()
|
|
|
|
args['node'] = hex(n)
|
|
|
|
op.repo.hook("incoming", **args)
|
|
|
|
|
|
|
|
tr.addpostclose('serverrebase-cg-hooks',
|
|
|
|
lambda tr: op.repo._afterlock(runhooks))
|
|
|
|
|
pushrebase: do not require exchange for getting markers
Summary:
D4865150 and D4934720 aren't effective in our current setup. The direct
cause in the code is because the server couldn't find common marker version:
```
# old server-side code, returns empty in our current setup
obsolete.commonversion(bundle2.obsmarkersversion(reply.capabilities))
```
Upon investigation, it's because there is no `exchange` enabled client-side.
But we do want one-way (server->client) markers for the rebased commits, as
long as obsstore is enabled (createmarkers is set, without exchange).
The upstream expects the server to have obsstore enabled, and exchange
enabled, to send markers. Since we are generating markers without an
obsstore (see D4865150), we are on our own way. This diff makes it one step
further.
This diff adds an explicit parameter to the `b2x:rebase` part to tell the
server what obsmarker format the client supports so the server could make a
right decision without relying on the "standard" `reply.capabilities`, which
is affected by the exchange option.
Test Plan: Change the existing test, make sure the old code fails.
Reviewers: #mercurial, durham
Reviewed By: durham
Subscribers: durham, mjpieters
Differential Revision: https://phabricator.intern.facebook.com/D4997972
Signature: t1:4997972:1493848751:14c29654b2e8246bd12a8de8820af5b3773e2fb7
2017-05-04 02:08:35 +03:00
|
|
|
_addpushbackparts(op, replacements, markers, markerdate,
|
|
|
|
clientobsmarkerversions)
|
2014-11-19 03:36:46 +03:00
|
|
|
|
|
|
|
for k in replacements.keys():
|
|
|
|
replacements[hex(k)] = hex(replacements[k])
|
2014-11-21 21:55:46 +03:00
|
|
|
op.records.add(rebaseparttype, replacements)
|
2014-11-19 03:36:46 +03:00
|
|
|
|
2014-11-08 03:27:47 +03:00
|
|
|
return 1
|
|
|
|
|
|
|
|
def bundle2pushkey(orig, op, part):
|
2014-11-21 21:55:46 +03:00
|
|
|
replacements = dict(sum([record.items()
|
|
|
|
for record
|
|
|
|
in op.records[rebaseparttype]],
|
|
|
|
[]))
|
2015-04-03 21:38:55 +03:00
|
|
|
|
2014-11-08 03:27:47 +03:00
|
|
|
namespace = pushkey.decode(part.params['namespace'])
|
|
|
|
if namespace == 'phases':
|
|
|
|
key = pushkey.decode(part.params['key'])
|
|
|
|
part.params['key'] = pushkey.encode(replacements.get(key, key))
|
|
|
|
if namespace == 'bookmarks':
|
|
|
|
new = pushkey.decode(part.params['new'])
|
|
|
|
part.params['new'] = pushkey.encode(replacements.get(new, new))
|
2015-10-02 10:46:37 +03:00
|
|
|
serverbin = op.repo._bookmarks.get(part.params['key'])
|
|
|
|
clienthex = pushkey.decode(part.params['old'])
|
|
|
|
|
|
|
|
if serverbin and clienthex:
|
|
|
|
cl = op.repo.changelog
|
|
|
|
revserver = cl.rev(serverbin)
|
|
|
|
revclient = cl.rev(bin(clienthex))
|
|
|
|
if revclient in cl.ancestors([revserver]):
|
|
|
|
# if the client's bookmark origin is an lagging behind the
|
|
|
|
# server's location for that bookmark (usual for pushrebase)
|
|
|
|
# then update the old location to match the real location
|
|
|
|
#
|
|
|
|
# TODO: We would prefer to only do this for pushrebase pushes
|
|
|
|
# but that isn't straightforward so we just do it always here.
|
|
|
|
# This forbids moving bookmarks backwards from clients.
|
|
|
|
part.params['old'] = pushkey.encode(hex(serverbin))
|
2014-11-08 03:27:47 +03:00
|
|
|
|
|
|
|
return orig(op, part)
|