commitcloud: refactor sync processing

Summary:
Refactor how commit cloud sync works.

Sync is simplified by delegating backup processing to the existing backup code.
This happens first, which means the user's work is backed up earlier, and the
sync processing can assume that all backed up commits are available in the
cloud storage.

Sync no longer attempts to handle the case where cloud storage has changed.
Instead, backup processing should ensure that all local commits are backed up
to the current cloud storage.

If a commit can't be backed up, then treat this as a normal failure to
sync and ignore that commit for this sync attempt.  If a commit can't be
downloaded from the server then the sync fails.

Reviewed By: mitrandir77

Differential Revision: D15295499

fbshipit-source-id: d371c5bf0daedbbe42e8c7d4a0c3d1a40c21a36f
This commit is contained in:
Mark Thomas 2019-05-20 06:11:59 -07:00 committed by Facebook Github Bot
parent 7f84c86805
commit 7359c3df5d
12 changed files with 501 additions and 505 deletions

View File

@ -178,6 +178,9 @@ def pushbackupbookmarks(repo, dest=None, **opts):
name = "/".join((prefix, "heads", hexhead))
infinitepushbookmarks[name] = hexhead
if not infinitepushbookmarks:
return
def getconnection():
return repo.connectionpool.get(remotepath, opts)

View File

@ -610,9 +610,7 @@ def cloudsync(ui, repo, cloudrefs=None, **opts):
ui.setconfig("ui", "ssh", bgssh)
with backuplock.lock(repo):
currentnode = repo["."].node()
sync.docloudsync(ui, repo, cloudrefs, **opts)
ret = sync.maybeupdateworkingcopy(ui, repo, currentnode)
ret = sync.sync(repo, cloudrefs, **opts)
background.backgroundbackupother(repo, **opts)
return ret

View File

@ -70,4 +70,4 @@ def getsyncingobsmarkers(repo):
def clearsyncingobsmarkers(repo):
"""Clears all syncing obsmarkers. The caller must hold the backup lock."""
repo.sharedvfs.unlink(_obsmarkerssyncing)
repo.sharedvfs.tryunlink(_obsmarkerssyncing)

View File

@ -17,17 +17,16 @@ from edenscm.mercurial import (
hintutil,
node as nodemod,
obsolete,
templatefilters,
util,
visibility,
)
from edenscm.mercurial.i18n import _
from . import (
backup,
backupbookmarks,
backuplock,
backupstate,
dependencies,
error as ccerror,
obsmarkers as obsmarkersmod,
service,
@ -56,26 +55,45 @@ def _getbookmarks(repo):
return {n: nodemod.hex(v) for n, v in repo._bookmarks.items()}
def docloudsync(ui, repo, cloudrefs=None, dest=None, **opts):
def sync(repo, cloudrefs=None, dest=None, **opts):
ui = repo.ui
start = time.time()
tokenlocator = token.TokenLocator(ui)
startnode = repo["."].node()
if opts.get("full"):
maxage = None
else:
maxage = ui.configint("commitcloud", "max_sync_age", None)
# Work out which repo and workspace we are synchronizing with.
reponame = ccutil.getreponame(repo)
workspacename = workspace.currentworkspace(repo)
if workspacename is None:
raise ccerror.WorkspaceError(ui, _("undefined workspace"))
# Connect to the commit cloud service.
tokenlocator = token.TokenLocator(ui)
serv = service.get(ui, tokenlocator.token)
ui.status(
_("synchronizing '%s' with '%s'\n") % (reponame, workspacename),
component="commitcloud",
)
backuplock.progress(repo, "starting synchronizing with '%s'" % workspacename)
# Work out what version to fetch updates from.
lastsyncstate = syncstate.SyncState(repo, workspacename)
fetchversion = lastsyncstate.version
if maxage != lastsyncstate.maxage:
# We are doing a full sync, or maxage has changed since the last sync,
# so get a fresh copy of the full state.
fetchversion = 0
remotepath = ccutil.getremotepath(repo, dest)
# external services can run cloud sync and know the lasest version
# External services may already know the version number. Check if we're
# already up-to-date.
version = opts.get("workspace_version")
if version and version.isdigit() and int(version) <= lastsyncstate.version:
ui.status(
@ -83,260 +101,76 @@ def docloudsync(ui, repo, cloudrefs=None, dest=None, **opts):
)
return 0
if opts.get("full"):
maxage = None
else:
maxage = ui.configint("commitcloud", "max_sync_age", None)
fetchversion = lastsyncstate.version
# Back up all local commits that are not already backed up.
backedup, failed = backup.backup(repo, dest=dest, **opts)
# the remote backend for storing Commit Cloud commit have been changed
# switching between Mercurial <-> Mononoke
if lastsyncstate.remotepath and remotepath != lastsyncstate.remotepath:
ui.status(
_(
"commit storage has been switched\n"
" from: %s\n"
" to: %s\n"
)
% (lastsyncstate.remotepath, remotepath),
component="commitcloud",
)
fetchversion = 0
# cloudrefs are passed in cloud rejoin
# On cloud rejoin we already know what the cloudrefs are. Otherwise,
# fetch them from the commit cloud service.
if cloudrefs is None:
# if we are doing a full sync, or maxage has changed since the last
# sync, use 0 as the last version to get a fresh copy of the full state.
if maxage != lastsyncstate.maxage:
fetchversion = 0
cloudrefs = serv.getreferences(reponame, workspacename, fetchversion)
def getconnection():
return repo.connectionpool.get(remotepath, opts)
# the remote backend for storing Commit Cloud commit have been changed
if lastsyncstate.remotepath and remotepath != lastsyncstate.remotepath:
backuplock.progress(repo, "verifying backed up heads at '%s'" % remotepath)
# make sure cloudrefs.heads have been backed up at this remote path
verifybackedupheads(
repo, remotepath, lastsyncstate.remotepath, getconnection, cloudrefs.heads
)
# if verification succeeded, update remote path in the local state and go on
lastsyncstate.updateremotepath(remotepath)
synced = False
pushfailures = set()
prevsyncversion = lastsyncstate.version
prevsyncheads = lastsyncstate.heads
prevsyncbookmarks = lastsyncstate.bookmarks
prevsynctime = lastsyncstate.lastupdatetime or 0
while not synced:
# Apply any changes from the cloud to the local repo.
if cloudrefs.version != fetchversion:
_applycloudchanges(ui, repo, remotepath, lastsyncstate, cloudrefs, maxage)
_applycloudchanges(repo, remotepath, lastsyncstate, cloudrefs, maxage)
# Check if any omissions are now included in the repo
_checkomissions(ui, repo, remotepath, lastsyncstate)
_checkomissions(repo, remotepath, lastsyncstate)
localheads = _getheads(repo)
localbookmarks = _getbookmarks(repo)
obsmarkers = obsmarkersmod.getsyncingobsmarkers(repo)
# Send updates to the cloud. If this fails then we have lost the race
# to update the server and must start again.
synced, cloudrefs = _submitlocalchanges(
repo, reponame, workspacename, lastsyncstate, failed, serv
)
# Work out what we should have synced locally (and haven't deliberately
# omitted)
omittedheads = set(lastsyncstate.omittedheads)
omittedbookmarks = set(lastsyncstate.omittedbookmarks)
localsyncedheads = [
head for head in lastsyncstate.heads if head not in omittedheads
]
localsyncedbookmarks = {
name: node
for name, node in lastsyncstate.bookmarks.items()
if name not in omittedbookmarks
}
if not obsmarkers:
# If the heads have changed, and we don't have any obsmakers to
# send, then it's possible we have some obsoleted versions of
# commits that are visible in the cloud workspace that need to
# be revived.
cloudvisibleonly = list(
repo.unfiltered().set("draft() & ::%ls & hidden()", localsyncedheads)
)
repo._commitcloudskippendingobsmarkers = True
obsolete.revive(cloudvisibleonly)
repo._commitcloudskippendingobsmarkers = False
localheads = _getheads(repo)
if (
set(localheads) == set(localsyncedheads)
and localbookmarks == localsyncedbookmarks
and lastsyncstate.version != 0
and not obsmarkers
):
synced = True
if not synced:
# The local repo has changed. We must send these changes to the
# cloud.
# Push commits that the server doesn't have.
newheads = list(set(localheads) - set(lastsyncstate.heads))
# If there are too many heads to backup,
# it is faster to check with the server first
backuplimitnocheck = ui.configint("commitcloud", "backuplimitnocheck")
if len(newheads) > backuplimitnocheck:
isbackedupremote = dependencies.infinitepush.isbackedupnodes(
getconnection, newheads
)
newheads = [
head for i, head in enumerate(newheads) if not isbackedupremote[i]
]
# all pushed to the server except maybe obsmarkers
allpushed = (not newheads) and (localbookmarks == localsyncedbookmarks)
failedheads = []
unfi = repo.unfiltered()
if not allpushed:
oldheads = list(
set(lastsyncstate.heads) - set(lastsyncstate.omittedheads)
)
backingup = [
nodemod.hex(n)
for n in unfi.nodes("draft() & ::%ls - ::%ls", newheads, oldheads)
]
backuplock.progressbackingup(
repo, [nodemod.bin(node) for node in backingup]
)
newheads, failedheads = dependencies.infinitepush.pushbackupbundlestacks(
ui, repo, getconnection, newheads
)
if failedheads:
pushfailures |= set(failedheads)
# Some heads failed to be pushed. Work out what is actually
# available on the server
localheads = [
ctx.hex()
for ctx in unfi.set(
"heads((draft() & ::%ls) + (draft() & ::%ls & ::%ls))",
newheads,
localheads,
localsyncedheads,
)
]
failedcommits = {
ctx.hex()
for ctx in unfi.set(
"(draft() & ::%ls) - (draft() & ::%ls) - (draft() & ::%ls)",
failedheads,
newheads,
localsyncedheads,
)
}
# Revert any bookmark updates that refer to failed commits to
# the available commits.
for name, bookmarknode in localbookmarks.items():
if bookmarknode in failedcommits:
if name in lastsyncstate.bookmarks:
localbookmarks[name] = lastsyncstate.bookmarks[name]
else:
del localbookmarks[name]
# Update the infinitepush backup bookmarks to point to the new
# local heads and bookmarks. This must be done after all
# referenced commits have been pushed to the server.
if not allpushed:
backupbookmarks.pushbackupbookmarks(repo, dest, **opts)
state = backupstate.BackupState(repo, remotepath)
state.update([nodemod.bin(head) for head in newheads])
# Work out the new cloud heads and bookmarks by merging in the
# omitted items. We need to preserve the ordering of the cloud
# heads so that smartlogs generally match.
newcloudheads = [
head
for head in lastsyncstate.heads
if head in set(localheads) | set(lastsyncstate.omittedheads)
]
newcloudheads.extend(
[head for head in localheads if head not in set(newcloudheads)]
)
newcloudbookmarks = {
name: localbookmarks.get(name, lastsyncstate.bookmarks.get(name))
for name in set(localbookmarks.keys())
| set(lastsyncstate.omittedbookmarks)
}
newomittedheads = list(set(newcloudheads) - set(localheads))
newomittedbookmarks = list(
set(newcloudbookmarks.keys()) - set(localbookmarks.keys())
)
if (
prevsyncversion == lastsyncstate.version - 1
and prevsyncheads == newcloudheads
and prevsyncbookmarks == newcloudbookmarks
and prevsynctime > time.time() - 60
):
raise ccerror.SynchronizationError(
ui,
_(
"oscillating commit cloud workspace detected.\n"
"check for commits that are visible in one repo but hidden in another,\n"
"and hide or unhide those commits in all places."
),
)
# Update the cloud heads, bookmarks and obsmarkers.
backuplock.progress(
repo, "finishing synchronizing with '%s'" % workspacename
)
synced, cloudrefs = serv.updatereferences(
reponame,
workspacename,
lastsyncstate.version,
lastsyncstate.heads,
newcloudheads,
lastsyncstate.bookmarks.keys(),
newcloudbookmarks,
obsmarkers,
)
if synced:
lastsyncstate.update(
cloudrefs.version,
newcloudheads,
newcloudbookmarks,
newomittedheads,
newomittedbookmarks,
maxage,
remotepath,
)
if obsmarkers:
obsmarkersmod.clearsyncingobsmarkers(repo)
# Update the backup bookmarks with any changes we have made by syncing.
backupbookmarks.pushbackupbookmarks(repo, dest, **opts)
backuplock.progresscomplete(repo)
if pushfailures:
raise ccerror.SynchronizationError(
ui, _("%d heads could not be pushed") % len(pushfailures)
)
ui.status(_("commits synchronized\n"), component="commitcloud")
# check that Scm Service is running and a subscription exists
subscription.SubscriptionManager(repo).checksubscription()
if failed:
failedset = set(repo.nodes("%ld::", failed))
if len(failedset) == 1:
repo.ui.warn(
_("failed to synchronize %s\n") % nodemod.short(failedset.pop()),
component="commitcloud",
)
else:
repo.ui.warn(
_("failed to synchronize %d commits\n") % len(failedset),
component="commitcloud",
)
else:
ui.status(_("commits synchronized\n"), component="commitcloud")
elapsed = time.time() - start
ui.status(_("finished in %0.2f sec\n") % elapsed)
# Check that Scm Service is running and a subscription exists
subscription.SubscriptionManager(repo).checksubscription()
return _maybeupdateworkingcopy(repo, startnode)
def _maybeupdateworkingcopy(repo, currentnode):
ui = repo.ui
def maybeupdateworkingcopy(ui, repo, currentnode):
if repo["."].node() != currentnode:
return 0
destination = finddestinationnode(repo, currentnode)
successors = list(repo.nodes("successors(%n) - obsolete()", currentnode))
if destination == currentnode:
if len(successors) == 0:
return 0
if destination and destination in repo:
if len(successors) == 1:
destination = successors[0]
if destination not in repo or destination == currentnode:
return 0
ui.status(
_("current revision %s has been moved remotely to %s\n")
% (nodemod.short(currentnode), nodemod.short(destination)),
@ -353,7 +187,10 @@ def maybeupdateworkingcopy(ui, repo, currentnode):
nodemod.short(destination),
),
)
return _update(ui, repo, destination)
ui.status(_("updating to %s\n") % nodemod.short(destination))
return hg.updatetotally(
ui, repo, destination, destination, updatecheck="noconflict"
)
else:
hintutil.trigger("commitcloud-update-on-move")
else:
@ -368,67 +205,7 @@ def maybeupdateworkingcopy(ui, repo, currentnode):
return 0
def verifybackedupheads(repo, remotepath, oldremotepath, getconnection, heads):
if not heads:
return
backedupheadsremote = {
head
for head, backedup in zip(
heads, dependencies.infinitepush.isbackedupnodes(getconnection, heads)
)
if backedup
}
notbackedupheads = set(heads) - backedupheadsremote
notbackeduplocalheads = {head for head in notbackedupheads if head in repo}
if notbackeduplocalheads:
backingup = list(notbackeduplocalheads)
backuplock.progressbackingup(repo, [nodemod.bin(node) for node in backingup])
repo.ui.status(_("pushing to %s\n") % remotepath)
dependencies.infinitepush.pushbackupbundlestacks(
repo.ui, repo, getconnection, backingup
)
recordbackup(repo.ui, repo, remotepath, backingup)
if len(notbackedupheads) != len(notbackeduplocalheads):
missingheads = list(notbackedupheads - notbackeduplocalheads)
repo.ui.status(
_("some heads are missing at %s\n") % remotepath, component="commitcloud"
)
backuplock.progresspulling(repo, [nodemod.bin(node) for node in missingheads])
pullcmd, pullopts = ccutil.getcommandandoptions("^pull")
pullopts["rev"] = missingheads
pullcmd(repo.ui, repo.unfiltered(), oldremotepath, **pullopts)
backingup = list(missingheads)
backuplock.progressbackingup(repo, [nodemod.bin(node) for node in backingup])
repo.ui.status(_("pushing to %s\n") % remotepath)
dependencies.infinitepush.pushbackupbundlestacks(
repo.ui, repo, getconnection, backingup
)
recordbackup(repo.ui, repo, remotepath, backingup)
return 0
def finddestinationnode(repo, startnode):
nodes = list(repo.nodes("successors(%n) - obsolete()", startnode))
if len(nodes) == 0:
return startnode
elif len(nodes) == 1:
return nodes[0]
else:
return None
def recordbackup(ui, repo, remotepath, newheads):
"""Record that the given heads are already backed up."""
state = backupstate.BackupState(repo, remotepath)
state.update([nodemod.bin(head) for head in newheads])
def _applycloudchanges(ui, repo, remotepath, lastsyncstate, cloudrefs, maxage=None):
def _applycloudchanges(repo, remotepath, lastsyncstate, cloudrefs, maxage):
pullcmd, pullopts = ccutil.getcommandandoptions("^pull")
try:
@ -515,7 +292,8 @@ def _applycloudchanges(ui, repo, remotepath, lastsyncstate, cloudrefs, maxage=No
), extensions.wrappedfunction(
remotenames, "pullremotenames", _pullremotenames
) if remotenames else util.nullcontextmanager():
pullcmd(ui, repo, remotepath, **pullopts)
pullcmd(repo.ui, repo, remotepath, **pullopts)
else:
with repo.wlock(), repo.lock(), repo.transaction("cloudsync") as tr:
omittedbookmarks.extend(
@ -527,6 +305,46 @@ def _applycloudchanges(ui, repo, remotepath, lastsyncstate, cloudrefs, maxage=No
repo, [nodemod.bin(n) for n in newvisibleheads]
)
# Obsmarker sharing is unreliable. Some of the commits that should now
# be visible might be hidden still, and some commits that should be
# hidden might still be visible. Create local obsmarkers to resolve
# this.
if obsolete.isenabled(repo, obsolete.createmarkersopt):
unfi = repo.unfiltered()
# Commits that are only visible in the cloud are commits that are
# ancestors of the cloud heads but are hidden locally.
cloudvisibleonly = list(
unfi.set(
"draft() & ::%ls & hidden()",
[head for head in cloudrefs.heads if head not in omittedheads],
)
)
# Commits that are only hidden in the cloud are commits that are
# ancestors of the previous cloud heads that are not ancestors of the
# current cloud heads, but have not been hidden or obsoleted locally.
cloudhiddenonly = list(
unfi.set(
"(draft() & ::%ls) - (draft() & ::%ls) - hidden() - obsolete()",
[head for head in lastsyncstate.heads if head not in omittedheads],
[head for head in cloudrefs.heads if head not in omittedheads],
)
)
if cloudvisibleonly or cloudhiddenonly:
repo.ui.warn(
_(
"detected obsmarker inconsistency (fixing by obsoleting [%s] and reviving [%s])\n"
)
% (
", ".join([nodemod.short(ctx.node()) for ctx in cloudhiddenonly]),
", ".join([nodemod.short(ctx.node()) for ctx in cloudvisibleonly]),
)
)
repo._commitcloudskippendingobsmarkers = True
with repo.lock():
obsolete.createmarkers(repo, [(ctx, ()) for ctx in cloudhiddenonly])
obsolete.revive(cloudvisibleonly)
repo._commitcloudskippendingobsmarkers = False
# We have now synced the repo to the cloud version. Store this.
lastsyncstate.update(
cloudrefs.version,
@ -535,82 +353,12 @@ def _applycloudchanges(ui, repo, remotepath, lastsyncstate, cloudrefs, maxage=No
omittedheads,
omittedbookmarks,
maxage,
remotepath,
)
# Also update infinitepush state. These new heads are already backed up,
# Also update backup state. These new heads are already backed up,
# otherwise the server wouldn't have told us about them.
recordbackup(ui, repo, remotepath, newheads)
def _checkomissions(ui, repo, remotepath, lastsyncstate):
"""check omissions are still not available locally
Check that the commits that have been deliberately omitted are still not
available locally. If they are now available (e.g. because the user pulled
them manually), then remove the tracking of those heads being omitted, and
restore any bookmarks that can now be restored.
"""
unfi = repo.unfiltered()
lastomittedheads = set(lastsyncstate.omittedheads)
lastomittedbookmarks = set(lastsyncstate.omittedbookmarks)
omittedheads = set()
omittedbookmarks = set()
changes = []
for head in lastomittedheads:
if head not in repo:
omittedheads.add(head)
for name in lastomittedbookmarks:
# bookmark might be removed from cloud workspace by someone else
if name not in lastsyncstate.bookmarks:
continue
node = lastsyncstate.bookmarks[name]
if node in unfi:
changes.append((name, nodemod.bin(node)))
else:
omittedbookmarks.add(name)
if omittedheads != lastomittedheads or omittedbookmarks != lastomittedbookmarks:
lastsyncstate.update(
lastsyncstate.version,
lastsyncstate.heads,
lastsyncstate.bookmarks,
list(omittedheads),
list(omittedbookmarks),
lastsyncstate.maxage,
remotepath,
)
if changes:
with repo.wlock(), repo.lock(), repo.transaction("cloudsync") as tr:
repo._bookmarks.applychanges(repo, tr, changes)
def _update(ui, repo, destination):
# update to new head with merging local uncommited changes
ui.status(_("updating to %s\n") % nodemod.short(destination))
updatecheck = "noconflict"
return hg.updatetotally(ui, repo, destination, destination, updatecheck=updatecheck)
def _filterpushside(ui, repo, pushheads, localheads, lastsyncstateheads):
"""filter push side to include only the specified push heads to the delta"""
# local - allowed - synced
skipped = set(localheads) - set(pushheads) - set(lastsyncstateheads)
if skipped:
def firstline(hexnode):
return templatefilters.firstline(repo[hexnode].description())[:50]
skippedlist = "\n".join(
[" %s %s" % (hexnode[:16], firstline(hexnode)) for hexnode in skipped]
)
ui.status(
_("push filter: list of unsynced local heads that will be skipped\n%s\n")
% skippedlist,
component="commitcloud",
)
return list(set(localheads) & (set(lastsyncstateheads) | set(pushheads)))
state = backupstate.BackupState(repo, remotepath)
state.update([nodemod.bin(head) for head in newheads])
def _mergebookmarks(repo, tr, cloudbookmarks, lastsyncstate):
@ -696,11 +444,6 @@ def _mergebookmarks(repo, tr, cloudbookmarks, lastsyncstate):
return list(omittedbookmarks)
def _mergeobsmarkers(repo, tr, obsmarkers):
tr._commitcloudskippendingobsmarkers = True
repo.obsstore.add(tr, obsmarkers)
def _forkname(ui, name, othernames):
hostname = ui.config("commitcloud", "hostname", socket.gethostname())
@ -715,3 +458,152 @@ def _forkname(ui, name, othernames):
candidate = "%s-%s%s" % (name, hostname, "-%s" % n if n != 0 else "")
if candidate not in othernames:
return candidate
def _mergeobsmarkers(repo, tr, obsmarkers):
if obsolete.isenabled(repo, obsolete.createmarkersopt):
tr._commitcloudskippendingobsmarkers = True
repo.obsstore.add(tr, obsmarkers)
def _checkomissions(repo, remotepath, lastsyncstate):
"""check omissions are still not available locally
Check that the commits that have been deliberately omitted are still not
available locally. If they are now available (e.g. because the user pulled
them manually), then remove the tracking of those heads being omitted, and
restore any bookmarks that can now be restored.
"""
unfi = repo.unfiltered()
lastomittedheads = set(lastsyncstate.omittedheads)
lastomittedbookmarks = set(lastsyncstate.omittedbookmarks)
omittedheads = set()
omittedbookmarks = set()
changes = []
for head in lastomittedheads:
if head not in repo:
omittedheads.add(head)
for name in lastomittedbookmarks:
# bookmark might be removed from cloud workspace by someone else
if name not in lastsyncstate.bookmarks:
continue
node = lastsyncstate.bookmarks[name]
if node in unfi:
changes.append((name, nodemod.bin(node)))
else:
omittedbookmarks.add(name)
if omittedheads != lastomittedheads or omittedbookmarks != lastomittedbookmarks:
lastsyncstate.update(
lastsyncstate.version,
lastsyncstate.heads,
lastsyncstate.bookmarks,
list(omittedheads),
list(omittedbookmarks),
lastsyncstate.maxage,
)
if changes:
with repo.wlock(), repo.lock(), repo.transaction("cloudsync") as tr:
repo._bookmarks.applychanges(repo, tr, changes)
def _submitlocalchanges(repo, reponame, workspacename, lastsyncstate, failed, serv):
localheads = _getheads(repo)
localbookmarks = _getbookmarks(repo)
obsmarkers = obsmarkersmod.getsyncingobsmarkers(repo)
# If any commits failed to back up, exclude them. Revert any bookmark changes
# that point to failed commits.
if failed:
localheads = [
nodemod.hex(head)
for head in repo.nodes("heads(draft() & ::%ls - %ld::)", localheads, failed)
]
failedset = set(repo.nodes("draft() & %ld::", failed))
for name, bookmarknode in localbookmarks.items():
if nodemod.bin(bookmarknode) in failedset:
if name in lastsyncstate.bookmarks:
localbookmarks[name] = lastsyncstate.bookmarks[name]
else:
del localbookmarks[name]
# Work out what we should have synced locally (and haven't deliberately
# omitted)
omittedheads = set(lastsyncstate.omittedheads)
omittedbookmarks = set(lastsyncstate.omittedbookmarks)
localsyncedheads = [
head for head in lastsyncstate.heads if head not in omittedheads
]
localsyncedbookmarks = {
name: node
for name, node in lastsyncstate.bookmarks.items()
if name not in omittedbookmarks
}
if (
set(localheads) == set(localsyncedheads)
and localbookmarks == localsyncedbookmarks
and lastsyncstate.version != 0
and not obsmarkers
):
# Nothing to send.
return True, None
# The local repo has changed. We must send these changes to the
# cloud.
# Work out the new cloud heads and bookmarks by merging in the
# omitted items. We need to preserve the ordering of the cloud
# heads so that smartlogs generally match.
localandomittedheads = set(localheads).union(lastsyncstate.omittedheads)
newcloudheads = util.removeduplicates(
[head for head in lastsyncstate.heads if head in localandomittedheads]
+ localheads
)
newcloudbookmarks = {
name: localbookmarks.get(name, lastsyncstate.bookmarks.get(name))
for name in set(localbookmarks.keys()).union(lastsyncstate.omittedbookmarks)
}
# Work out what the new omitted heads and bookmarks are.
newomittedheads = list(set(newcloudheads).difference(localheads))
newomittedbookmarks = list(
set(newcloudbookmarks.keys()).difference(localbookmarks.keys())
)
# Check for workspace oscillation. This is where we try to revert the
# workspace back to how it was immediately prior to applying the cloud
# changes at the start of the sync. This is usually an error caused by
# inconsistent obsmarkers.
if lastsyncstate.oscillating(newcloudheads, newcloudbookmarks):
raise ccerror.SynchronizationError(
repo.ui,
_(
"oscillating commit cloud workspace detected.\n"
"check for commits that are visible in one repo but hidden in another,\n"
"and hide or unhide those commits in all places."
),
)
backuplock.progress(repo, "finishing synchronizing with '%s'" % workspacename)
synced, cloudrefs = serv.updatereferences(
reponame,
workspacename,
lastsyncstate.version,
lastsyncstate.heads,
newcloudheads,
lastsyncstate.bookmarks.keys(),
newcloudbookmarks,
obsmarkers,
)
if synced:
lastsyncstate.update(
cloudrefs.version,
newcloudheads,
newcloudbookmarks,
newomittedheads,
newomittedbookmarks,
lastsyncstate.maxage,
)
obsmarkersmod.clearsyncingobsmarkers(repo)
return synced, cloudrefs

View File

@ -40,6 +40,7 @@ class SyncState(object):
def __init__(self, repo, workspacename):
self.filename = self._filename(workspacename)
self.repo = repo
self.prevstate = None
if repo.svfs.exists(self.filename):
with repo.svfs.open(self.filename, "r") as f:
try:
@ -59,7 +60,6 @@ class SyncState(object):
n.encode("utf-8") for n in data.get("omittedbookmarks", ())
]
self.maxage = data.get("maxage", None)
self.remotepath = data.get("remotepath", None)
self.lastupdatetime = data.get("lastupdatetime", None)
else:
self.version = 0
@ -68,7 +68,6 @@ class SyncState(object):
self.omittedheads = []
self.omittedbookmarks = []
self.maxage = None
self.remotepath = None
self.lastupdatetime = None
def update(
@ -79,7 +78,6 @@ class SyncState(object):
newomittedheads,
newomittedbookmarks,
newmaxage,
remotepath,
):
data = {
"version": newversion,
@ -88,11 +86,11 @@ class SyncState(object):
"omittedheads": newomittedheads,
"omittedbookmarks": newomittedbookmarks,
"maxage": newmaxage,
"remotepath": remotepath,
"lastupdatetime": time.time(),
}
with self.repo.svfs.open(self.filename, "w", atomictemp=True) as f:
json.dump(data, f)
self.prevstate = (self.version, self.heads, self.bookmarks)
self.version = newversion
self.heads = newheads
self.bookmarks = newbookmarks
@ -100,13 +98,19 @@ class SyncState(object):
self.omittedbookmarks = newomittedbookmarks
self.maxage = newmaxage
def updateremotepath(self, remotepath):
self.update(
self.version,
self.heads,
self.bookmarks,
self.omittedheads,
self.omittedbookmarks,
self.maxage,
remotepath,
)
def oscillating(self, newheads, newbookmarks):
"""detect oscillating workspaces
Returns true if updating the cloud state to the new heads or bookmarks
would be equivalent to updating back to the immediate previous
version.
"""
if self.prevstate is not None and self.lastupdatetime is not None:
prevversion, prevheads, prevbookmarks = self.prevstate
return (
prevversion == self.version - 1
and prevheads == newheads
and prevbookmarks == newbookmarks
and self.lastupdatetime > time.time() - 60
)
return False

View File

@ -105,17 +105,8 @@ Check that backup doesn't interfere with commit cloud
$ hg cloud join
commitcloud: this repository is now connected to the 'user/test/default' workspace for the 'master' repo
commitcloud: synchronizing 'master' with 'user/test/default'
backing up stack rooted at * (glob)
remote: pushing 3 commits:
remote: * A1 (glob)
remote: * A2 (glob)
remote: * A3 (glob)
backing up stack rooted at * (glob)
remote: pushing 2 commits:
remote: * B1 (glob)
remote: * B2 (glob)
commitcloud: commits synchronized
finished in *.*sec (glob)
finished in * (glob)
$ hg up $B2
0 files updated, 0 files merged, 0 files removed, 0 files unresolved
@ -130,12 +121,18 @@ Check that backup doesn't interfere with commit cloud
$ hg cloud sync
commitcloud: synchronizing 'master' with 'user/test/default'
backing up stack rooted at * (glob)
remote: pushing 3 commits:
remote: * B1 (glob)
remote: * B2 (glob)
remote: * B3 (glob)
commitcloud: commits synchronized
finished in *.* (glob)
finished in * (glob)
$ mkcommit B4
7b520430ff426d7f4a6c305bef4a90507afe1b32
$ hg cloud sync
commitcloud: synchronizing 'master' with 'user/test/default'
backing up stack rooted at 458a3fc7650d
remote: pushing 4 commits:
remote: 458a3fc7650d B1
remote: ecd738f5fb6c B2
remote: 901656c16420 B3
remote: 7b520430ff42 B4
commitcloud: commits synchronized
finished in * (glob)

View File

@ -137,9 +137,6 @@ Sync from the second client and `hg unamend` there
$ hg cloud sync
commitcloud: synchronizing 'server' with 'user/test/default'
backing up stack rooted at 1cf4a5a0e8fc
remote: pushing 1 commit:
remote: 1cf4a5a0e8fc feature1
commitcloud: commits synchronized
finished in * (glob)

View File

@ -100,6 +100,8 @@ Fake land the commit
@ 0: df4f53cec30a public 'base'
Rebasing the bookmark will make the draft commit disappear.
$ cd ../client1
$ hg rebase -b foo -d 4
note: not rebasing 1:00422fad0026 "draft-commit" (foo), already in destination as 3:441f69264760 "landed-commit"
@ -123,13 +125,8 @@ Fake land the commit
| Differential Revision: https://phabricator.fb.com/D1234'
o 2: 031d760782fb public 'public-commit-1'
|
| o 1: 00422fad0026 draft 'draft-commit
|/ Differential Revision: https://phabricator.fb.com/D1234'
@ 0: df4f53cec30a public 'base'
BUG! Commit 1 shouldn't be visible anymore.
Sync in client2. This will fail because we don't have the landed commit, so
we will need to pull.
@ -160,6 +157,9 @@ we will need to pull.
commitcloud: synchronizing 'server' with 'user/test/default'
commitcloud: commits synchronized
finished in * (glob)
The draft commit is also gone from here, and the workspace is stable.
$ tglogp
o 4: 67d363c9001e public 'public-commit-2' foo
|
@ -167,13 +167,9 @@ we will need to pull.
| Differential Revision: https://phabricator.fb.com/D1234'
o 2: 031d760782fb public 'public-commit-1'
|
| o 1: 00422fad0026 draft 'draft-commit
|/ Differential Revision: https://phabricator.fb.com/D1234'
@ 0: df4f53cec30a public 'base'
BUG! Commit 1 shouldn't be visible here, either!
$ cd ../client1
$ hg cloud sync -q
$ tglogp
@ -183,7 +179,5 @@ BUG! Commit 1 shouldn't be visible here, either!
| Differential Revision: https://phabricator.fb.com/D1234'
o 2: 031d760782fb public 'public-commit-1'
|
| o 1: 00422fad0026 draft 'draft-commit
|/ Differential Revision: https://phabricator.fb.com/D1234'
@ 0: df4f53cec30a public 'base'

View File

@ -97,80 +97,74 @@ Make a commit in the second client, and sync it
$ cd ..
Return to the first client and configure a different paths.infinitepush
See how the migration going
It will push its commit to the new server, but will fail to sync
because it can't access the second commit.
$ cd client1
$ mkcommit "commit3"
$ hg cloud sync --config paths.infinitepush=ssh://user@dummy/server1
commitcloud: synchronizing 'server' with 'user/test/default'
commitcloud: commit storage has been switched
from: ssh://user@dummy/server
to: ssh://user@dummy/server1
commitcloud: some heads are missing at ssh://user@dummy/server1
pulling from ssh://user@dummy/server
searching for changes
adding changesets
adding manifests
adding file changes
added 1 changesets with 1 changes to 2 files (+1 heads)
new changesets 02f6fc2b7154
(run 'hg heads' to see heads, 'hg merge' to merge)
pushing to ssh://user@dummy/server1
backing up stack rooted at fa5d62c46fd7
remote: pushing 2 commits:
remote: fa5d62c46fd7 commit1
remote: 02f6fc2b7154 commit2
backing up stack rooted at fa5d62c46fd7
remote: pushing 2 commits:
remote: fa5d62c46fd7 commit1
remote: 26d5a99991bd commit3
commitcloud: commits synchronized
finished in * sec (glob)
pulling from ssh://user@dummy/server1
abort: unknown revision '02f6fc2b715444d7df09bd859e1d4877f9ef9946'!
[255]
$ cd ..
Return to the client2, old path will not work unless the new commits have not been backed up there
New path should work fine
Return to client2. We can still sync using the old server.
$ cd client2
$ mkcommit "commit4"
$ hg cloud sync
commitcloud: synchronizing 'server' with 'user/test/default'
pulling from ssh://user@dummy/server
abort: unknown revision '26d5a99991bd2ef9c7e76874a58f8a4dca6f6710'!
[255]
$ hg cloud sync --config paths.infinitepush=ssh://user@dummy/server1
commitcloud: synchronizing 'server' with 'user/test/default'
commitcloud: commit storage has been switched
from: ssh://user@dummy/server
to: ssh://user@dummy/server1
pulling from ssh://user@dummy/server1
searching for changes
adding changesets
adding manifests
adding file changes
added 1 changesets with 1 changes to 2 files (+1 heads)
new changesets 26d5a99991bd
(run 'hg heads' to see heads, 'hg merge' to merge)
backing up stack rooted at fa5d62c46fd7
remote: pushing 3 commits:
remote: fa5d62c46fd7 commit1
remote: 02f6fc2b7154 commit2
remote: c701070be855 commit4
commitcloud: commits synchronized
finished in * sec (glob)
finished in * (glob)
$ hg cloud sync # backwards migration
Configure the new server on this client. It will now send all of its commits.
$ hg cloud sync --config paths.infinitepush=ssh://user@dummy/server1
commitcloud: synchronizing 'server' with 'user/test/default'
commitcloud: commit storage has been switched
from: ssh://user@dummy/server1
to: ssh://user@dummy/server
pushing to ssh://user@dummy/server
backing up stack rooted at fa5d62c46fd7
remote: pushing 4 commits:
remote: pushing 3 commits:
remote: fa5d62c46fd7 commit1
remote: 02f6fc2b7154 commit2
remote: c701070be855 commit4
commitcloud: commits synchronized
finished in * (glob)
$ cd ..
The first client can now successfully sync using the new server.
$ cd client1
$ hg cloud sync --config paths.infinitepush=ssh://user@dummy/server1
commitcloud: synchronizing 'server' with 'user/test/default'
pulling from ssh://user@dummy/server1
searching for changes
adding changesets
adding manifests
adding file changes
added 2 changesets with 2 changes to 3 files (+1 heads)
new changesets 02f6fc2b7154:c701070be855
(run 'hg heads' to see heads, 'hg merge' to merge)
commitcloud: commits synchronized
finished in * (glob)
Switching back to the previous server still works, and the missing commits
are backed up there.
$ hg cloud sync
commitcloud: synchronizing 'server' with 'user/test/default'
backing up stack rooted at fa5d62c46fd7
remote: pushing 2 commits:
remote: fa5d62c46fd7 commit1
remote: 26d5a99991bd commit3
commitcloud: commits synchronized
finished in * sec (glob)
finished in * (glob)

View File

@ -71,7 +71,7 @@ Connect the first client
finished in * (glob)
Make some commits
$ hg debugdrawdag <<EOS
$ drawdag <<EOS
> C E G
> | | |
> B D F
@ -117,7 +117,7 @@ Create a new client that isn't connected yet
$ cat shared.rc >> client2/.hg/hgrc
Share commits A B C D and E into the repo manually with a bundle
$ hg bundle -q -R client1 --base 0 -r 'A+B+C+D+E' ABCDE.hg
$ hg bundle -q -R client1 --base 0 -r "$A+$B+$C+$D+$E" ABCDE.hg
$ hg unbundle -R client2 ABCDE.hg
adding changesets
adding manifests
@ -159,11 +159,11 @@ Connect to commit cloud
added 2 changesets with 2 changes to 3 files (+1 heads)
new changesets 64b4d9634423:878302dcadc7
(run 'hg heads' to see heads, 'hg merge' to merge)
detected obsmarker inconsistency (fixing by obsoleting [] and reviving [449486ddff7a, 65299708466c, 27ad02806080])
commitcloud: commits synchronized
finished in * (glob)
Syncing in the two repos causes the commits to be revived, and the cloud
workspace does not oscillate between the two views.
The commits have been revived, so syncing does not oscillate between the two views.
$ cd ..
$ hg -R client1 cloud sync
@ -364,47 +364,47 @@ Ensure everything is synced
Create a commit that was obsoleted without the commitcloud extension loaded, but is bookmarked.
$ hg hide 5 --config extensions.commitcloud=!
hiding commit 27ad02806080 "E"
$ hg hide $G --config extensions.commitcloud=!
hiding commit 878302dcadc7 "G"
1 changesets hidden
$ hg book --hidden -r 5 hiddenbook
$ tglogp -r 3::
x 5: 27ad02806080 draft 'E' hiddenbook
$ hg book --hidden -r $G hiddenbook
$ tglogp -r $F::
x 7: 878302dcadc7 draft 'G' hiddenbook
|
o 3: 449486ddff7a draft 'D'
o 6: 64b4d9634423 draft 'F'
|
~
$ hg cloud sync
commitcloud: synchronizing 'server' with 'user/test/default'
commitcloud: commits synchronized
finished in * (glob)
$ tglogp -r 3::
x 5: 27ad02806080 draft 'E' hiddenbook
$ tglogp -r $F::
x 7: 878302dcadc7 draft 'G' hiddenbook
|
o 3: 449486ddff7a draft 'D'
o 6: 64b4d9634423 draft 'F'
|
~
$ python $TESTTMP/dumpcommitcloudmetadata.py
version: 4
bookmarks:
foo => 5817a557f93f46ab290e8571c89624ff856130c0
hiddenbook => 27ad028060800678c2de95fea2e826bbd4bf2c21
hiddenbook => 878302dcadc7a800f326d8e06a5e9beec77e5a1c
heads:
65299708466caa8f13c05d82e76d611c183defee
27ad028060800678c2de95fea2e826bbd4bf2c21
878302dcadc7a800f326d8e06a5e9beec77e5a1c
Clients are now in sync.
Clients are now in sync, except for the obsoletion state of the commit.
$ cd ../client1
$ hg cloud sync
commitcloud: synchronizing 'server' with 'user/test/default'
commitcloud: commits synchronized
finished in * (glob)
$ tglogp -r 3::
o 6: 27ad02806080 draft 'E' hiddenbook
$ tglogp -r $F::
o 7: 878302dcadc7 draft 'G' hiddenbook
|
o 3: 449486ddff7a draft 'D'
o 4: 64b4d9634423 draft 'F'
|
~
@ -413,10 +413,10 @@ Clients are now in sync.
commitcloud: synchronizing 'server' with 'user/test/default'
commitcloud: commits synchronized
finished in * (glob)
$ tglogp -r 3::
x 5: 27ad02806080 draft 'E' hiddenbook
$ tglogp -r $F::
x 7: 878302dcadc7 draft 'G' hiddenbook
|
o 3: 449486ddff7a draft 'D'
o 6: 64b4d9634423 draft 'F'
|
~
@ -425,10 +425,129 @@ Clients are now in sync.
commitcloud: synchronizing 'server' with 'user/test/default'
commitcloud: commits synchronized
finished in * (glob)
$ tglogp -r 3::
o 6: 27ad02806080 draft 'E' hiddenbook
$ tglogp -r $F::
o 7: 878302dcadc7 draft 'G' hiddenbook
|
o 3: 449486ddff7a draft 'D'
o 4: 64b4d9634423 draft 'F'
|
~
$ python $TESTTMP/dumpcommitcloudmetadata.py
version: 4
bookmarks:
foo => 5817a557f93f46ab290e8571c89624ff856130c0
hiddenbook => 878302dcadc7a800f326d8e06a5e9beec77e5a1c
heads:
65299708466caa8f13c05d82e76d611c183defee
27ad028060800678c2de95fea2e826bbd4bf2c21
878302dcadc7a800f326d8e06a5e9beec77e5a1c
Delete the bookmark on client 1, and sync it.
$ hg book -d hiddenbook
$ hg cloud sync
commitcloud: synchronizing 'server' with 'user/test/default'
commitcloud: commits synchronized
finished in * (glob)
On client 2, cloud sync will remove the bookmark. Since the commit is obsolete
it is also removed as a head. (It remains visible in smartlog because of
hiddenoverride, but commit cloud ignores it).
$ cd ../client2
$ hg cloud sync
commitcloud: synchronizing 'server' with 'user/test/default'
commitcloud: commits synchronized
finished in * (glob)
$ tglogp -r $F::
x 7: 878302dcadc7 draft 'G'
|
o 6: 64b4d9634423 draft 'F'
|
~
In client 1 the obsmarker inconsistency is finally detected and fixed.
$ cd ../client1
$ hg cloud sync
commitcloud: synchronizing 'server' with 'user/test/default'
detected obsmarker inconsistency (fixing by obsoleting [878302dcadc7] and reviving [])
commitcloud: commits synchronized
finished in * (glob)
$ tglogp -r $F::
o 4: 64b4d9634423 draft 'F'
|
~
Everything is stable now.
$ cd ../client2
$ hg cloud sync -q
$ cd ../client1
$ hg cloud sync -q
$ cd ../client2
$ hg cloud sync -q
$ tglogp -r $F::
x 7: 878302dcadc7 draft 'G'
|
o 6: 64b4d9634423 draft 'F'
|
~
$ python $TESTTMP/dumpcommitcloudmetadata.py
version: 6
bookmarks:
foo => 5817a557f93f46ab290e8571c89624ff856130c0
heads:
65299708466caa8f13c05d82e76d611c183defee
27ad028060800678c2de95fea2e826bbd4bf2c21
64b4d963442377cb7aa4b0997eeca249ac8643c9
Make a new commit. Copy it to the other client via a bundle, and then hide it
with commit cloud inactive.
$ cd ../client1
$ hg up -q 0
$ echo x > X
$ hg commit -Aqm X
$ cd ..
$ hg bundle -q -R client1 --base 0 -r tip X.hg
$ hg unbundle -R client2 X.hg
adding changesets
adding manifests
adding file changes
added 1 changesets with 1 changes to 1 files (+1 heads)
new changesets 48be23e24839
(run 'hg heads .' to see heads, 'hg merge' to merge)
$ cd client1
$ hg hide tip --config extensions.commitcloud=!
hiding commit 48be23e24839 "X"
0 files updated, 0 files merged, 1 files removed, 0 files unresolved
working directory now at df4f53cec30a
1 changesets hidden
Cloud sync should act as if it never saw the commit.
$ hg cloud sync
commitcloud: synchronizing 'server' with 'user/test/default'
commitcloud: commits synchronized
finished in * (glob)
But client2 will push it as if it was a new commit.
$ cd ../client2
$ hg cloud sync
commitcloud: synchronizing 'server' with 'user/test/default'
backing up stack rooted at 48be23e24839
remote: pushing 1 commit:
remote: 48be23e24839 X
commitcloud: commits synchronized
finished in * (glob)
Now client1 will revive the commit.
$ cd ../client1
$ hg cloud sync
commitcloud: synchronizing 'server' with 'user/test/default'
detected obsmarker inconsistency (fixing by obsoleting [] and reviving [48be23e24839])
commitcloud: commits synchronized
finished in * (glob)

View File

@ -821,10 +821,8 @@ Simulate failure to backup a commit by setting the server maxbundlesize limit ve
remote: 715c1454ae33 stack commit 2
remote: 9bd68ef10d6b toobig
push of head 9bd68ef10d6b failed: bundle is too big: 1695 bytes. max allowed size is 0 MB
abort: commitcloud: failed to synchronize commits: '2 heads could not be pushed'
(please retry 'hg cloud sync')
(please contact The Test Team @ FB if this error persists)
[255]
commitcloud: failed to synchronize 2 commits
finished in * (glob)
$ hg cloud check -r .
9bd68ef10d6bdb8ebf3273a7b91bc4f3debe2a87 not backed up

View File

@ -150,6 +150,12 @@ Now cloud sync. The sets of commits should be merged.
$ hg cloud sync
commitcloud: synchronizing 'server' with 'user/test/default'
backing up stack rooted at dae3b312bb78
remote: pushing 4 commits:
remote: dae3b312bb78 Z
remote: c70a9bd6bfd1 E
remote: ba83c5428cb2 F
remote: 6caded0e9807 D
pulling from ssh://user@dummy/server
searching for changes
adding changesets
@ -162,12 +168,6 @@ Now cloud sync. The sets of commits should be merged.
added 1 changesets with 1 changes to 2 files (+1 heads)
new changesets d8fc5ae9b7ef:dd114d9b2f9e
(run 'hg heads .' to see heads, 'hg merge' to merge)
backing up stack rooted at dae3b312bb78
remote: pushing 4 commits:
remote: dae3b312bb78 Z
remote: c70a9bd6bfd1 E
remote: ba83c5428cb2 F
remote: 6caded0e9807 D
commitcloud: commits synchronized
finished in * sec (glob)
$ tglogm