2016-08-26 16:09:01 +03:00
|
|
|
# Infinite push
|
|
|
|
#
|
2016-08-31 16:14:20 +03:00
|
|
|
# Copyright 2016 Facebook, Inc.
|
2016-08-26 16:09:01 +03:00
|
|
|
#
|
|
|
|
# This software may be used and distributed according to the terms of the
|
|
|
|
# GNU General Public License version 2 or any later version.
|
2018-06-05 14:24:00 +03:00
|
|
|
""" store draft commits in the cloud
|
|
|
|
|
2018-09-10 21:55:43 +03:00
|
|
|
Configs::
|
|
|
|
|
2016-11-01 11:44:38 +03:00
|
|
|
[infinitepush]
|
|
|
|
# Server-side and client-side option. Pattern of the infinitepush bookmark
|
|
|
|
branchpattern = PATTERN
|
|
|
|
|
|
|
|
# Server or client
|
|
|
|
server = False
|
|
|
|
|
|
|
|
# Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
|
|
|
|
indextype = disk
|
|
|
|
|
|
|
|
# Server-side option. Used only if indextype=sql.
|
|
|
|
# Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
|
|
|
|
sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
|
|
|
|
|
|
|
|
# Server-side option. Used only if indextype=disk.
|
|
|
|
# Filesystem path to the index store
|
|
|
|
indexpath = PATH
|
|
|
|
|
|
|
|
# Server-side option. Possible values: 'disk' or 'external'
|
|
|
|
# Fails if not set
|
|
|
|
storetype = disk
|
|
|
|
|
|
|
|
# Server-side option.
|
|
|
|
# Path to the binary that will save bundle to the bundlestore
|
|
|
|
# Formatted cmd line will be passed to it (see `put_args`)
|
|
|
|
put_binary = put
|
|
|
|
|
2018-01-10 19:10:27 +03:00
|
|
|
# Server-side option. Used only if storetype=external.
|
2016-11-01 11:44:38 +03:00
|
|
|
# Format cmd-line string for put binary. Placeholder: {filename}
|
|
|
|
put_args = {filename}
|
|
|
|
|
|
|
|
# Server-side option.
|
|
|
|
# Path to the binary that get bundle from the bundlestore.
|
|
|
|
# Formatted cmd line will be passed to it (see `get_args`)
|
|
|
|
get_binary = get
|
|
|
|
|
2018-01-10 19:10:27 +03:00
|
|
|
# Server-side option. Used only if storetype=external.
|
2016-11-01 11:44:38 +03:00
|
|
|
# Format cmd-line string for get binary. Placeholders: {filename} {handle}
|
|
|
|
get_args = {filename} {handle}
|
|
|
|
|
|
|
|
# Server-side option
|
|
|
|
logfile = FIlE
|
|
|
|
|
|
|
|
# Server-side option
|
|
|
|
loglevel = DEBUG
|
2016-11-21 19:25:10 +03:00
|
|
|
|
2017-01-24 21:32:44 +03:00
|
|
|
# Server-side option. Used only if indextype=sql.
|
|
|
|
# Sets mysql wait_timeout option.
|
|
|
|
waittimeout = 300
|
|
|
|
|
|
|
|
# Server-side option. Used only if indextype=sql.
|
|
|
|
# Sets mysql innodb_lock_wait_timeout option.
|
|
|
|
locktimeout = 120
|
|
|
|
|
2018-03-12 22:59:03 +03:00
|
|
|
# Server-side option. Used only if indextype=sql.
|
|
|
|
# limit number of days to generate warning on trying to
|
|
|
|
# fetch too old commit for hg up / hg pull with short hash rev
|
|
|
|
shorthasholdrevthreshold = 31
|
|
|
|
|
2017-02-23 15:25:02 +03:00
|
|
|
# Server-side option. Used only if indextype=sql.
|
|
|
|
# Name of the repository
|
|
|
|
reponame = ''
|
|
|
|
|
2017-02-24 16:41:53 +03:00
|
|
|
# Client-side option. Used by --list-remote option. List of remote scratch
|
|
|
|
# patterns to list if no patterns are specified.
|
|
|
|
defaultremotepatterns = ['*']
|
|
|
|
|
2017-06-20 15:40:01 +03:00
|
|
|
# Server-side option. If bookmark that was pushed matches
|
|
|
|
# `fillmetadatabranchpattern` then background
|
|
|
|
# `hg debugfillinfinitepushmetadata` process will save metadata
|
|
|
|
# in infinitepush index for nodes that are ancestor of the bookmark.
|
|
|
|
fillmetadatabranchpattern = ''
|
|
|
|
|
2017-09-22 18:55:56 +03:00
|
|
|
# Instructs infinitepush to forward all received bundle2 parts to the
|
|
|
|
# bundle for storage. Defaults to False.
|
|
|
|
storeallparts = True
|
|
|
|
|
2018-04-04 14:50:08 +03:00
|
|
|
# Server-side option. Maximum acceptable bundle size in megabytes.
|
|
|
|
maxbundlesize = 500
|
|
|
|
|
2018-04-30 12:46:59 +03:00
|
|
|
# Which compression algorithm to use for infinitepush bundles.
|
|
|
|
bundlecompression = ZS
|
|
|
|
|
2017-02-23 15:25:02 +03:00
|
|
|
[remotenames]
|
|
|
|
# Client-side option
|
|
|
|
# This option should be set only if remotenames extension is enabled.
|
|
|
|
# Whether remote bookmarks are tracked by remotenames extension.
|
|
|
|
bookmarks = True
|
2018-05-29 21:13:08 +03:00
|
|
|
"""
|
2016-08-26 16:09:01 +03:00
|
|
|
|
|
|
|
from __future__ import absolute_import
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2018-03-09 19:56:03 +03:00
|
|
|
import collections
|
2016-12-13 21:44:40 +03:00
|
|
|
import contextlib
|
2016-08-26 16:09:01 +03:00
|
|
|
import errno
|
2018-03-09 19:56:03 +03:00
|
|
|
import functools
|
2016-12-01 19:44:57 +03:00
|
|
|
import json
|
2016-09-09 19:16:43 +03:00
|
|
|
import logging
|
2016-08-26 16:09:01 +03:00
|
|
|
import os
|
2016-12-13 21:44:40 +03:00
|
|
|
import random
|
2017-02-10 11:20:54 +03:00
|
|
|
import re
|
2016-12-01 19:44:57 +03:00
|
|
|
import socket
|
|
|
|
import struct
|
2017-06-20 15:40:01 +03:00
|
|
|
import subprocess
|
2017-02-10 11:20:54 +03:00
|
|
|
import sys
|
2016-08-26 16:09:01 +03:00
|
|
|
import tempfile
|
2016-12-13 21:44:40 +03:00
|
|
|
import time
|
2016-08-26 16:09:01 +03:00
|
|
|
|
2019-01-30 03:25:33 +03:00
|
|
|
from edenscm.mercurial import (
|
2016-08-26 16:09:01 +03:00
|
|
|
bundle2,
|
|
|
|
changegroup,
|
2018-03-09 19:56:05 +03:00
|
|
|
cmdutil,
|
2016-08-26 16:09:01 +03:00
|
|
|
commands,
|
|
|
|
discovery,
|
|
|
|
encoding,
|
|
|
|
error,
|
|
|
|
exchange,
|
|
|
|
extensions,
|
|
|
|
hg,
|
2018-07-06 17:04:31 +03:00
|
|
|
hintutil,
|
2018-03-09 19:56:03 +03:00
|
|
|
i18n,
|
2016-08-26 16:09:01 +03:00
|
|
|
localrepo,
|
2018-03-09 19:56:03 +03:00
|
|
|
node as nodemod,
|
2018-08-18 20:40:36 +03:00
|
|
|
obsolete,
|
2018-03-09 19:56:03 +03:00
|
|
|
peer,
|
2016-12-06 18:41:04 +03:00
|
|
|
phases,
|
2016-09-12 12:41:22 +03:00
|
|
|
pushkey,
|
2018-07-17 12:03:13 +03:00
|
|
|
ui as uimod,
|
2016-12-06 18:41:04 +03:00
|
|
|
util,
|
2016-08-26 16:09:01 +03:00
|
|
|
wireproto,
|
|
|
|
)
|
|
|
|
|
2018-06-05 14:24:00 +03:00
|
|
|
from . import bundleparts, common, infinitepushcommands
|
2018-05-29 21:13:08 +03:00
|
|
|
|
|
|
|
|
|
|
|
copiedpart = bundleparts.copiedpart
|
|
|
|
getscratchbranchparts = bundleparts.getscratchbranchparts
|
|
|
|
scratchbookmarksparttype = bundleparts.scratchbookmarksparttype
|
|
|
|
scratchbranchparttype = bundleparts.scratchbranchparttype
|
|
|
|
|
2018-03-09 19:56:03 +03:00
|
|
|
batchable = peer.batchable
|
|
|
|
bin = nodemod.bin
|
|
|
|
decodelist = wireproto.decodelist
|
|
|
|
encodelist = wireproto.encodelist
|
|
|
|
future = peer.future
|
|
|
|
hex = nodemod.hex
|
|
|
|
_ = i18n._
|
|
|
|
wrapcommand = extensions.wrapcommand
|
|
|
|
wrapfunction = extensions.wrapfunction
|
|
|
|
unwrapfunction = extensions.unwrapfunction
|
|
|
|
repository = hg.repository
|
2016-08-26 16:09:01 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
pushrebaseparttype = "b2x:rebase"
|
|
|
|
experimental = "experimental"
|
|
|
|
configbookmark = "server-bundlestore-bookmark"
|
|
|
|
configcreate = "server-bundlestore-create"
|
|
|
|
configscratchpush = "infinitepush-scratchpush"
|
|
|
|
confignonforwardmove = "non-forward-move"
|
2016-08-26 16:09:01 +03:00
|
|
|
|
2017-06-15 15:19:37 +03:00
|
|
|
cmdtable = infinitepushcommands.cmdtable
|
2016-10-18 11:21:18 +03:00
|
|
|
_scratchbranchmatcher = lambda x: False
|
2018-05-29 21:13:08 +03:00
|
|
|
_maybehash = re.compile(r"^[a-f0-9]+$").search
|
2016-10-18 11:21:18 +03:00
|
|
|
|
2018-03-12 22:59:03 +03:00
|
|
|
colortable = {
|
2018-05-29 21:13:08 +03:00
|
|
|
"commitcloud.changeset": "green",
|
|
|
|
"commitcloud.meta": "bold",
|
|
|
|
"commitcloud.commitcloud": "yellow",
|
2018-03-12 22:59:03 +03:00
|
|
|
}
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-09-09 19:16:43 +03:00
|
|
|
def _buildexternalbundlestore(ui):
|
2018-05-29 21:13:08 +03:00
|
|
|
put_args = ui.configlist("infinitepush", "put_args", [])
|
|
|
|
put_binary = ui.config("infinitepush", "put_binary")
|
2016-09-09 19:16:43 +03:00
|
|
|
if not put_binary:
|
2018-05-29 21:13:08 +03:00
|
|
|
raise error.Abort("put binary is not specified")
|
|
|
|
get_args = ui.configlist("infinitepush", "get_args", [])
|
|
|
|
get_binary = ui.config("infinitepush", "get_binary")
|
2016-09-09 19:16:43 +03:00
|
|
|
if not get_binary:
|
2018-05-29 21:13:08 +03:00
|
|
|
raise error.Abort("get binary is not specified")
|
2016-10-04 14:52:03 +03:00
|
|
|
from . import store
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-09-09 19:16:43 +03:00
|
|
|
return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-09-09 19:16:43 +03:00
|
|
|
def _buildsqlindex(ui):
|
2018-05-29 21:13:08 +03:00
|
|
|
sqlhost = ui.config("infinitepush", "sqlhost")
|
2016-09-09 19:16:43 +03:00
|
|
|
if not sqlhost:
|
2018-05-29 21:13:08 +03:00
|
|
|
raise error.Abort(_("please set infinitepush.sqlhost"))
|
|
|
|
host, port, db, user, password = sqlhost.split(":")
|
|
|
|
reponame = ui.config("infinitepush", "reponame")
|
2016-09-09 19:16:43 +03:00
|
|
|
if not reponame:
|
2018-05-29 21:13:08 +03:00
|
|
|
raise error.Abort(_("please set infinitepush.reponame"))
|
2016-09-09 19:16:43 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
logfile = ui.config("infinitepush", "logfile", "")
|
|
|
|
waittimeout = ui.configint("infinitepush", "waittimeout", 300)
|
|
|
|
locktimeout = ui.configint("infinitepush", "locktimeout", 120)
|
2018-03-12 22:59:03 +03:00
|
|
|
shorthasholdrevthreshold = ui.configint(
|
2018-05-29 21:13:08 +03:00
|
|
|
"infinitepush", "shorthasholdrevthreshold", 60
|
|
|
|
)
|
2016-10-17 16:23:56 +03:00
|
|
|
from . import sqlindexapi
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-10-17 16:23:56 +03:00
|
|
|
return sqlindexapi.sqlindexapi(
|
2018-05-29 21:13:08 +03:00
|
|
|
reponame,
|
|
|
|
host,
|
|
|
|
port,
|
|
|
|
db,
|
|
|
|
user,
|
|
|
|
password,
|
|
|
|
logfile,
|
|
|
|
_getloglevel(ui),
|
2018-03-12 22:59:03 +03:00
|
|
|
shorthasholdrevthreshold=shorthasholdrevthreshold,
|
2018-05-29 21:13:08 +03:00
|
|
|
waittimeout=waittimeout,
|
|
|
|
locktimeout=locktimeout,
|
|
|
|
)
|
|
|
|
|
2016-09-09 19:16:43 +03:00
|
|
|
|
|
|
|
def _getloglevel(ui):
|
2018-05-29 21:13:08 +03:00
|
|
|
loglevel = ui.config("infinitepush", "loglevel", "DEBUG")
|
2016-09-09 19:16:43 +03:00
|
|
|
numeric_loglevel = getattr(logging, loglevel.upper(), None)
|
|
|
|
if not isinstance(numeric_loglevel, int):
|
2018-05-29 21:13:08 +03:00
|
|
|
raise error.Abort(_("invalid log level %s") % loglevel)
|
2016-09-09 19:16:43 +03:00
|
|
|
return numeric_loglevel
|
2016-08-26 16:09:01 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2017-07-21 18:19:59 +03:00
|
|
|
def _tryhoist(ui, remotebookmark):
|
2018-05-29 21:13:08 +03:00
|
|
|
"""returns a bookmarks with hoisted part removed
|
2017-07-21 18:19:59 +03:00
|
|
|
|
|
|
|
Remotenames extension has a 'hoist' config that allows to use remote
|
|
|
|
bookmarks without specifying remote path. For example, 'hg update master'
|
|
|
|
works as well as 'hg update remote/master'. We want to allow the same in
|
|
|
|
infinitepush.
|
2018-05-29 21:13:08 +03:00
|
|
|
"""
|
2017-07-21 18:19:59 +03:00
|
|
|
|
|
|
|
if common.isremotebooksenabled(ui):
|
2018-05-29 21:13:08 +03:00
|
|
|
hoist = ui.config("remotenames", "hoist") + "/"
|
2017-07-21 18:19:59 +03:00
|
|
|
if remotebookmark.startswith(hoist):
|
2018-05-29 21:13:08 +03:00
|
|
|
return remotebookmark[len(hoist) :]
|
2017-07-21 18:19:59 +03:00
|
|
|
return remotebookmark
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-08-26 16:09:01 +03:00
|
|
|
class bundlestore(object):
|
|
|
|
def __init__(self, repo):
|
|
|
|
self._repo = repo
|
2018-05-29 21:13:08 +03:00
|
|
|
storetype = self._repo.ui.config("infinitepush", "storetype", "")
|
|
|
|
if storetype == "disk":
|
2016-10-04 14:52:03 +03:00
|
|
|
from . import store
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-09-07 15:20:43 +03:00
|
|
|
self.store = store.filebundlestore(self._repo.ui, self._repo)
|
2018-05-29 21:13:08 +03:00
|
|
|
elif storetype == "external":
|
2016-09-09 19:16:43 +03:00
|
|
|
self.store = _buildexternalbundlestore(self._repo.ui)
|
2016-10-04 14:52:31 +03:00
|
|
|
else:
|
|
|
|
raise error.Abort(
|
2018-05-29 21:13:08 +03:00
|
|
|
_("unknown infinitepush store type specified %s") % storetype
|
|
|
|
)
|
2016-09-09 19:16:43 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
indextype = self._repo.ui.config("infinitepush", "indextype", "")
|
|
|
|
if indextype == "disk":
|
2016-10-17 16:23:56 +03:00
|
|
|
from . import fileindexapi
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-10-17 16:23:56 +03:00
|
|
|
self.index = fileindexapi.fileindexapi(self._repo)
|
2018-05-29 21:13:08 +03:00
|
|
|
elif indextype == "sql":
|
2016-09-09 19:16:43 +03:00
|
|
|
self.index = _buildsqlindex(self._repo.ui)
|
2016-08-26 16:09:01 +03:00
|
|
|
else:
|
2016-09-09 19:16:43 +03:00
|
|
|
raise error.Abort(
|
2018-05-29 21:13:08 +03:00
|
|
|
_("unknown infinitepush index type specified %s") % indextype
|
|
|
|
)
|
|
|
|
|
2016-08-26 16:09:01 +03:00
|
|
|
|
2016-10-04 14:52:03 +03:00
|
|
|
def _isserver(ui):
|
2018-05-29 21:13:08 +03:00
|
|
|
return ui.configbool("infinitepush", "server")
|
|
|
|
|
2016-10-04 14:52:03 +03:00
|
|
|
|
2016-08-26 16:09:01 +03:00
|
|
|
def reposetup(ui, repo):
|
2016-10-04 14:52:03 +03:00
|
|
|
if _isserver(ui) and repo.local():
|
2016-08-31 16:14:20 +03:00
|
|
|
repo.bundlestore = bundlestore(repo)
|
2016-08-26 16:09:01 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-08-26 16:09:01 +03:00
|
|
|
def uisetup(ui):
|
|
|
|
# remotenames circumvents the default push implementation entirely, so make
|
|
|
|
# sure we load after it so that we wrap it.
|
|
|
|
order = extensions._order
|
2018-05-29 21:13:08 +03:00
|
|
|
order.remove("infinitepush")
|
|
|
|
order.append("infinitepush")
|
2016-08-26 16:09:01 +03:00
|
|
|
extensions._order = order
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-08-26 16:09:01 +03:00
|
|
|
def extsetup(ui):
|
2016-09-12 12:41:22 +03:00
|
|
|
commonsetup(ui)
|
2016-10-04 14:52:03 +03:00
|
|
|
if _isserver(ui):
|
2016-08-26 16:09:01 +03:00
|
|
|
serverextsetup(ui)
|
|
|
|
else:
|
|
|
|
clientextsetup(ui)
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-09-12 12:41:22 +03:00
|
|
|
def commonsetup(ui):
|
2018-05-29 21:13:08 +03:00
|
|
|
wireproto.commands["listkeyspatterns"] = (
|
|
|
|
wireprotolistkeyspatterns,
|
|
|
|
"namespace patterns",
|
|
|
|
)
|
|
|
|
scratchbranchpat = ui.config("infinitepush", "branchpattern")
|
2016-10-18 11:21:18 +03:00
|
|
|
if scratchbranchpat:
|
|
|
|
global _scratchbranchmatcher
|
|
|
|
kind, pat, _scratchbranchmatcher = util.stringmatcher(scratchbranchpat)
|
2016-09-12 12:41:22 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-08-26 16:09:01 +03:00
|
|
|
def serverextsetup(ui):
|
2018-05-29 21:13:08 +03:00
|
|
|
origpushkeyhandler = bundle2.parthandlermapping["pushkey"]
|
2016-08-26 16:09:01 +03:00
|
|
|
|
|
|
|
def newpushkeyhandler(*args, **kwargs):
|
|
|
|
bundle2pushkey(origpushkeyhandler, *args, **kwargs)
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-08-26 16:09:01 +03:00
|
|
|
newpushkeyhandler.params = origpushkeyhandler.params
|
2018-05-29 21:13:08 +03:00
|
|
|
bundle2.parthandlermapping["pushkey"] = newpushkeyhandler
|
2016-08-26 16:09:01 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
orighandlephasehandler = bundle2.parthandlermapping["phase-heads"]
|
|
|
|
newphaseheadshandler = lambda *args, **kwargs: bundle2handlephases(
|
|
|
|
orighandlephasehandler, *args, **kwargs
|
|
|
|
)
|
2017-10-24 19:33:41 +03:00
|
|
|
newphaseheadshandler.params = orighandlephasehandler.params
|
2018-05-29 21:13:08 +03:00
|
|
|
bundle2.parthandlermapping["phase-heads"] = newphaseheadshandler
|
|
|
|
|
|
|
|
wrapfunction(localrepo.localrepository, "listkeys", localrepolistkeys)
|
|
|
|
wireproto.commands["lookup"] = (_lookupwrap(wireproto.commands["lookup"][0]), "key")
|
|
|
|
wrapfunction(exchange, "getbundlechunks", getbundlechunks)
|
2017-10-24 19:33:41 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
wrapfunction(bundle2, "processparts", processparts)
|
2016-08-26 16:09:01 +03:00
|
|
|
|
2019-02-18 15:25:41 +03:00
|
|
|
if util.safehasattr(wireproto, "_capabilities"):
|
|
|
|
extensions.wrapfunction(wireproto, "_capabilities", _capabilities)
|
|
|
|
else:
|
|
|
|
extensions.wrapfunction(wireproto, "capabilities", _capabilities)
|
|
|
|
|
|
|
|
|
|
|
|
def _capabilities(orig, repo, proto):
|
|
|
|
caps = orig(repo, proto)
|
|
|
|
caps.append("listkeyspatterns")
|
|
|
|
return caps
|
|
|
|
|
2017-09-22 18:55:56 +03:00
|
|
|
|
2016-08-26 16:09:01 +03:00
|
|
|
def clientextsetup(ui):
|
2018-05-29 21:13:08 +03:00
|
|
|
entry = wrapcommand(commands.table, "push", _push)
|
2016-08-26 16:09:01 +03:00
|
|
|
# Don't add the 'to' arg if it already exists
|
2018-05-29 21:13:08 +03:00
|
|
|
if not any(a for a in entry[1] if a[1] == "to"):
|
|
|
|
entry[1].append(("", "to", "", _("push revs to this bookmark")))
|
2016-08-26 16:09:01 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
if not any(a for a in entry[1] if a[1] == "non-forward-move"):
|
2016-08-26 18:01:30 +03:00
|
|
|
entry[1].append(
|
2018-05-29 21:13:08 +03:00
|
|
|
(
|
|
|
|
"",
|
|
|
|
"non-forward-move",
|
|
|
|
None,
|
|
|
|
_("allows moving a remote bookmark to an " "arbitrary place"),
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
if not any(a for a in entry[1] if a[1] == "create"):
|
|
|
|
entry[1].append(("", "create", None, _("create a new remote bookmark")))
|
2016-08-26 16:09:01 +03:00
|
|
|
|
2016-09-28 23:17:25 +03:00
|
|
|
entry[1].append(
|
2018-05-29 21:13:08 +03:00
|
|
|
("", "bundle-store", None, _("force push to go to bundle store (EXPERIMENTAL)"))
|
|
|
|
)
|
2016-09-28 23:17:25 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
bookcmd = extensions.wrapcommand(commands.table, "bookmarks", exbookmarks)
|
2016-10-26 20:03:35 +03:00
|
|
|
bookcmd[1].append(
|
2018-05-29 21:13:08 +03:00
|
|
|
(
|
|
|
|
"",
|
|
|
|
"list-remote",
|
|
|
|
None,
|
|
|
|
"list remote bookmarks. "
|
|
|
|
"Positional arguments are interpreted as wildcard patterns. "
|
|
|
|
"Only allowed wildcard is '*' in the end of the pattern. "
|
|
|
|
"If no positional arguments are specified then it will list "
|
|
|
|
'the most "important" remote bookmarks. '
|
|
|
|
"Otherwise it will list remote bookmarks "
|
|
|
|
"that match at least one pattern "
|
|
|
|
"",
|
|
|
|
)
|
|
|
|
)
|
2016-10-26 20:03:35 +03:00
|
|
|
bookcmd[1].append(
|
2018-05-29 21:13:08 +03:00
|
|
|
("", "remote-path", "", "name of the remote path to list the bookmarks")
|
|
|
|
)
|
2016-10-26 20:03:35 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
wrapcommand(commands.table, "pull", _pull)
|
|
|
|
wrapcommand(commands.table, "update", _update)
|
2016-08-26 16:09:01 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
wrapfunction(bundle2, "_addpartsfromopts", _addpartsfromopts)
|
2016-09-28 23:17:25 +03:00
|
|
|
|
2016-09-12 12:41:22 +03:00
|
|
|
wireproto.wirepeer.listkeyspatterns = listkeyspatterns
|
|
|
|
|
2016-10-02 12:54:03 +03:00
|
|
|
# Move infinitepush part before pushrebase part
|
|
|
|
# to avoid generation of both parts.
|
2016-08-26 16:09:01 +03:00
|
|
|
partorder = exchange.b2partsgenorder
|
2018-05-29 21:13:08 +03:00
|
|
|
index = partorder.index("changeset")
|
2016-10-02 12:54:03 +03:00
|
|
|
if pushrebaseparttype in partorder:
|
|
|
|
index = min(index, partorder.index(pushrebaseparttype))
|
2018-05-29 21:13:08 +03:00
|
|
|
partorder.insert(index, partorder.pop(partorder.index(scratchbranchparttype)))
|
2016-08-26 16:09:01 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-10-26 20:03:35 +03:00
|
|
|
def _showbookmarks(ui, bookmarks, **opts):
|
|
|
|
# Copy-paste from commands.py
|
2018-05-29 21:13:08 +03:00
|
|
|
fm = ui.formatter("bookmarks", opts)
|
2016-10-26 20:03:35 +03:00
|
|
|
for bmark, n in sorted(bookmarks.iteritems()):
|
|
|
|
fm.startitem()
|
|
|
|
if not ui.quiet:
|
2018-05-29 21:13:08 +03:00
|
|
|
fm.plain(" ")
|
|
|
|
fm.write("bookmark", "%s", bmark)
|
|
|
|
pad = " " * (25 - encoding.colwidth(bmark))
|
|
|
|
fm.condwrite(not ui.quiet, "node", pad + " %s", n)
|
|
|
|
fm.plain("\n")
|
2016-10-26 20:03:35 +03:00
|
|
|
fm.end()
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-10-26 20:03:35 +03:00
|
|
|
def exbookmarks(orig, ui, repo, *names, **opts):
|
2018-05-29 21:13:08 +03:00
|
|
|
pattern = opts.get("list_remote")
|
|
|
|
delete = opts.get("delete")
|
|
|
|
remotepath = opts.get("remote_path")
|
|
|
|
path = ui.paths.getpath(remotepath or None, default=("default"))
|
2016-10-26 20:03:35 +03:00
|
|
|
if pattern:
|
|
|
|
destpath = path.pushloc or path.loc
|
|
|
|
other = hg.peer(repo, opts, destpath)
|
2017-02-24 16:41:53 +03:00
|
|
|
if not names:
|
2017-08-22 11:24:23 +03:00
|
|
|
raise error.Abort(
|
2018-05-29 21:13:08 +03:00
|
|
|
"--list-remote requires a bookmark pattern",
|
|
|
|
hint='use "hg book" to get a list of your local bookmarks',
|
|
|
|
)
|
2017-02-24 16:41:53 +03:00
|
|
|
else:
|
2018-05-29 21:13:08 +03:00
|
|
|
fetchedbookmarks = other.listkeyspatterns("bookmarks", patterns=names)
|
2016-10-26 20:03:35 +03:00
|
|
|
_showbookmarks(ui, fetchedbookmarks, **opts)
|
|
|
|
return
|
2018-05-29 21:13:08 +03:00
|
|
|
elif delete and "remotenames" in extensions._extensions:
|
2017-11-07 22:43:56 +03:00
|
|
|
existing_local_bms = set(repo._bookmarks.keys())
|
|
|
|
scratch_bms = []
|
|
|
|
other_bms = []
|
|
|
|
for name in names:
|
|
|
|
if _scratchbranchmatcher(name) and name not in existing_local_bms:
|
|
|
|
scratch_bms.append(name)
|
|
|
|
else:
|
|
|
|
other_bms.append(name)
|
|
|
|
|
|
|
|
if len(scratch_bms) > 0:
|
2018-05-29 21:13:08 +03:00
|
|
|
if remotepath == "":
|
|
|
|
remotepath = "default"
|
|
|
|
_deleteinfinitepushbookmarks(ui, repo, remotepath, scratch_bms)
|
2017-11-07 22:43:56 +03:00
|
|
|
|
|
|
|
if len(other_bms) > 0 or len(scratch_bms) == 0:
|
|
|
|
return orig(ui, repo, *other_bms, **opts)
|
|
|
|
else:
|
|
|
|
return orig(ui, repo, *names, **opts)
|
2016-10-26 20:03:35 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2017-12-05 20:15:50 +03:00
|
|
|
def _addpartsfromopts(orig, ui, repo, bundler, *args, **kwargs):
|
|
|
|
""" adds a stream level part to bundle2 storing whether this is an
|
|
|
|
infinitepush bundle or not """
|
2018-05-29 21:13:08 +03:00
|
|
|
if ui.configbool("infinitepush", "bundle-stream", False):
|
|
|
|
bundler.addparam("infinitepush", True)
|
2017-12-05 20:15:50 +03:00
|
|
|
return orig(ui, repo, bundler, *args, **kwargs)
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-09-12 12:41:22 +03:00
|
|
|
def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
|
|
|
|
patterns = decodelist(patterns)
|
2016-12-01 19:44:57 +03:00
|
|
|
d = repo.listkeys(encoding.tolocal(namespace), patterns).iteritems()
|
2016-09-12 12:41:22 +03:00
|
|
|
return pushkey.encodekeys(d)
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-09-12 12:41:22 +03:00
|
|
|
def localrepolistkeys(orig, self, namespace, patterns=None):
|
2018-05-29 21:13:08 +03:00
|
|
|
if namespace == "bookmarks" and patterns:
|
2016-09-12 12:41:22 +03:00
|
|
|
index = self.bundlestore.index
|
2018-02-06 20:57:21 +03:00
|
|
|
# Using sortdict instead of a dictionary to ensure that bookmaks are
|
|
|
|
# restored in the same order after a pullbackup. See T24417531
|
|
|
|
results = util.sortdict()
|
2016-09-12 12:41:22 +03:00
|
|
|
bookmarks = orig(self, namespace)
|
2016-10-26 20:03:35 +03:00
|
|
|
for pattern in patterns:
|
|
|
|
results.update(index.getbookmarks(pattern))
|
2018-05-29 21:13:08 +03:00
|
|
|
if pattern.endswith("*"):
|
|
|
|
pattern = "re:^" + pattern[:-1] + ".*"
|
2016-10-26 20:03:35 +03:00
|
|
|
kind, pat, matcher = util.stringmatcher(pattern)
|
2016-12-01 19:44:57 +03:00
|
|
|
for bookmark, node in bookmarks.iteritems():
|
2016-10-26 20:03:35 +03:00
|
|
|
if matcher(bookmark):
|
|
|
|
results[bookmark] = node
|
2016-09-12 12:41:22 +03:00
|
|
|
return results
|
|
|
|
else:
|
|
|
|
return orig(self, namespace)
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-09-12 12:41:22 +03:00
|
|
|
@batchable
|
|
|
|
def listkeyspatterns(self, namespace, patterns):
|
2018-05-29 21:13:08 +03:00
|
|
|
if not self.capable("pushkey"):
|
2016-09-12 12:41:22 +03:00
|
|
|
yield {}, None
|
|
|
|
f = future()
|
2018-05-29 21:13:08 +03:00
|
|
|
self.ui.debug(
|
|
|
|
'preparing listkeys for "%s" with pattern "%s"\n' % (namespace, patterns)
|
|
|
|
)
|
2016-09-12 12:41:22 +03:00
|
|
|
yield {
|
2018-05-29 21:13:08 +03:00
|
|
|
"namespace": encoding.fromlocal(namespace),
|
|
|
|
"patterns": encodelist(patterns),
|
2016-09-12 12:41:22 +03:00
|
|
|
}, f
|
|
|
|
d = f.value
|
2018-05-29 21:13:08 +03:00
|
|
|
self.ui.debug('received listkey for "%s": %i bytes\n' % (namespace, len(d)))
|
2016-09-12 12:41:22 +03:00
|
|
|
yield pushkey.decodekeys(d)
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-12-06 18:41:04 +03:00
|
|
|
def _readbundlerevs(bundlerepo):
|
2018-05-29 21:13:08 +03:00
|
|
|
return list(bundlerepo.revs("bundle()"))
|
|
|
|
|
2016-12-06 18:41:04 +03:00
|
|
|
|
|
|
|
def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
|
2018-05-29 21:13:08 +03:00
|
|
|
"""Tells remotefilelog to include all changed files to the changegroup
|
2016-10-18 11:20:12 +03:00
|
|
|
|
|
|
|
By default remotefilelog doesn't include file content to the changegroup.
|
|
|
|
But we need to include it if we are fetching from bundlestore.
|
2018-05-29 21:13:08 +03:00
|
|
|
"""
|
2016-10-18 11:20:12 +03:00
|
|
|
changedfiles = set()
|
2016-12-06 18:41:04 +03:00
|
|
|
cl = bundlerepo.changelog
|
|
|
|
for r in bundlerevs:
|
2016-10-18 11:20:12 +03:00
|
|
|
# [3] means changed files
|
|
|
|
changedfiles.update(cl.read(r)[3])
|
|
|
|
if not changedfiles:
|
|
|
|
return bundlecaps
|
|
|
|
|
2018-12-20 04:01:49 +03:00
|
|
|
changedfiles = "\0".join("path:%s" % p for p in changedfiles)
|
2016-10-18 11:20:12 +03:00
|
|
|
newcaps = []
|
|
|
|
appended = False
|
2018-05-29 21:13:08 +03:00
|
|
|
for cap in bundlecaps or []:
|
|
|
|
if cap.startswith("excludepattern="):
|
|
|
|
newcaps.append("\0".join((cap, changedfiles)))
|
2016-10-18 11:20:12 +03:00
|
|
|
appended = True
|
|
|
|
else:
|
|
|
|
newcaps.append(cap)
|
|
|
|
if not appended:
|
|
|
|
# Not found excludepattern cap. Just append it
|
2018-05-29 21:13:08 +03:00
|
|
|
newcaps.append("excludepattern=" + changedfiles)
|
2016-10-18 11:20:12 +03:00
|
|
|
|
|
|
|
return newcaps
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2018-03-15 23:40:04 +03:00
|
|
|
def _rebundle(bundlerepo, bundleroots, unknownhead, cgversion, bundlecaps):
|
2018-05-29 21:13:08 +03:00
|
|
|
"""
|
2016-12-06 18:41:04 +03:00
|
|
|
Bundle may include more revision then user requested. For example,
|
|
|
|
if user asks for revision but bundle also consists its descendants.
|
|
|
|
This function will filter out all revision that user is not requested.
|
2018-05-29 21:13:08 +03:00
|
|
|
"""
|
2017-09-22 18:55:56 +03:00
|
|
|
parts = []
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
outgoing = discovery.outgoing(
|
|
|
|
bundlerepo, commonheads=bundleroots, missingheads=[unknownhead]
|
|
|
|
)
|
|
|
|
cgstream = changegroup.makestream(
|
|
|
|
bundlerepo, outgoing, cgversion, "pull", bundlecaps=bundlecaps
|
|
|
|
)
|
2017-09-22 18:55:56 +03:00
|
|
|
cgstream = util.chunkbuffer(cgstream).read()
|
2018-05-29 21:13:08 +03:00
|
|
|
cgpart = bundle2.bundlepart("changegroup", data=cgstream)
|
|
|
|
cgpart.addparam("version", cgversion)
|
2017-09-22 18:55:56 +03:00
|
|
|
parts.append(cgpart)
|
2017-09-22 18:55:56 +03:00
|
|
|
|
|
|
|
try:
|
2018-05-29 21:13:08 +03:00
|
|
|
treemod = extensions.find("treemanifest")
|
|
|
|
remotefilelog = extensions.find("remotefilelog")
|
2017-09-22 18:55:56 +03:00
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
else:
|
2018-03-28 23:38:33 +03:00
|
|
|
# This parsing should be refactored to be shared with
|
|
|
|
# exchange.getbundlechunks. But I'll do that in a separate diff.
|
|
|
|
if bundlecaps is None:
|
|
|
|
bundlecaps = set()
|
|
|
|
b2caps = {}
|
|
|
|
for bcaps in bundlecaps:
|
2018-05-29 21:13:08 +03:00
|
|
|
if bcaps.startswith("bundle2="):
|
|
|
|
blob = util.urlreq.unquote(bcaps[len("bundle2=") :])
|
2018-03-28 23:38:33 +03:00
|
|
|
b2caps.update(bundle2.decodecaps(blob))
|
|
|
|
|
2018-03-07 21:26:52 +03:00
|
|
|
missing = outgoing.missing
|
2018-05-29 21:13:08 +03:00
|
|
|
if remotefilelog.shallowbundle.cansendtrees(
|
remotefilelog: don't process manifests when not necessary
Summary:
When building a bundle that will be used for a pull that can't use the linkrev
fastpath, we need to process the manifests of the commits for which we will
send files so that we can correctly set up their linkrevs. Currently this is
sometimes happening for all commits, which can be very slow when pulling a
large number of commits.
First, make it clearer what `LocalFiles` and `LocalTrees` means, and make it
easier to test for trees being local.
Second, when processing manifests looking for files that will be included in
the bundle, don't process non-local trees if we are only interested in local
trees.
Third, correctly determine whether to send all, local, or no trees or files
depending on what combinations of treemanifest and remotefilelog the client and
server support, and what operation is taking place.
Finally, we aren't passing `b2caps` through to `changegroup.makestream` in the
changegroup part generator for bundle2 parts. This means the above test for
whether the client supports treemanifest always failed.
With these combined changes, a pull of a specific public revision (so not
following the fastpath) where the client supports both remotefilelog and
treemanifest, will result in the server using `LocalFiles` and `LocalTrees`,
and therefore not processing the manifests of public commits looking for which
files to send.
Reviewed By: DurhamG, ikostia
Differential Revision: D8446137
fbshipit-source-id: 9a4247af75fe79c1962a29fcb8bf0181868699f4
2018-06-19 20:06:24 +03:00
|
|
|
bundlerepo, missing, source="pull", bundlecaps=bundlecaps, b2caps=b2caps
|
2018-05-29 21:13:08 +03:00
|
|
|
):
|
2018-03-30 19:31:31 +03:00
|
|
|
try:
|
2018-05-29 21:13:08 +03:00
|
|
|
treepart = treemod.createtreepackpart(
|
|
|
|
bundlerepo, outgoing, treemod.TREEGROUP_PARTTYPE2
|
|
|
|
)
|
2018-03-30 19:31:31 +03:00
|
|
|
parts.append(treepart)
|
|
|
|
except BaseException as ex:
|
|
|
|
parts.append(bundle2.createerrorpart(str(ex)))
|
2017-09-22 18:55:56 +03:00
|
|
|
|
2017-09-22 18:55:56 +03:00
|
|
|
return parts
|
2016-12-06 18:41:04 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-12-06 18:41:04 +03:00
|
|
|
def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
|
|
|
|
cl = bundlerepo.changelog
|
|
|
|
bundleroots = []
|
|
|
|
for rev in bundlerevs:
|
|
|
|
node = cl.node(rev)
|
|
|
|
parents = cl.parents(node)
|
|
|
|
for parent in parents:
|
|
|
|
# include all revs that exist in the main repo
|
|
|
|
# to make sure that bundle may apply client-side
|
|
|
|
if parent in oldrepo:
|
|
|
|
bundleroots.append(parent)
|
|
|
|
return bundleroots
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2017-04-06 12:53:28 +03:00
|
|
|
def _needsrebundling(head, bundlerepo):
|
2018-05-29 21:13:08 +03:00
|
|
|
bundleheads = list(bundlerepo.revs("heads(bundle())"))
|
|
|
|
return not (len(bundleheads) == 1 and bundlerepo[bundleheads[0]].node() == head)
|
|
|
|
|
2017-04-06 12:53:28 +03:00
|
|
|
|
2018-01-10 09:41:46 +03:00
|
|
|
# TODO(stash): remove copy-paste from upstream hg
|
|
|
|
def _decodebundle2caps(bundlecaps):
|
|
|
|
b2caps = {}
|
|
|
|
for bcaps in bundlecaps:
|
2018-05-29 21:13:08 +03:00
|
|
|
if bcaps.startswith("bundle2="):
|
|
|
|
blob = util.urlreq.unquote(bcaps[len("bundle2=") :])
|
2018-01-10 09:41:46 +03:00
|
|
|
b2caps.update(bundle2.decodecaps(blob))
|
|
|
|
return b2caps
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2018-01-10 09:41:46 +03:00
|
|
|
def _getsupportedcgversion(repo, bundlecaps):
|
|
|
|
b2caps = _decodebundle2caps(bundlecaps)
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
cgversion = "01"
|
|
|
|
cgversions = b2caps.get("changegroup")
|
2018-01-10 09:41:46 +03:00
|
|
|
if cgversions: # 3.1 and 3.2 ship with an empty value
|
2018-05-29 21:13:08 +03:00
|
|
|
cgversions = [
|
|
|
|
v for v in cgversions if v in changegroup.supportedoutgoingversions(repo)
|
|
|
|
]
|
2018-01-10 09:41:46 +03:00
|
|
|
if not cgversions:
|
2018-05-29 21:13:08 +03:00
|
|
|
raise ValueError(_("no common changegroup version"))
|
2018-01-10 09:41:46 +03:00
|
|
|
cgversion = max(cgversions)
|
|
|
|
return cgversion
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
|
|
|
def _generateoutputparts(
|
|
|
|
head, cgversion, bundlecaps, bundlerepo, bundleroots, bundlefile
|
|
|
|
):
|
|
|
|
"""generates bundle that will be send to the user
|
2017-05-03 10:31:24 +03:00
|
|
|
|
|
|
|
returns tuple with raw bundle string and bundle type
|
2018-05-29 21:13:08 +03:00
|
|
|
"""
|
2017-09-22 18:55:56 +03:00
|
|
|
parts = []
|
2017-05-03 10:31:24 +03:00
|
|
|
if not _needsrebundling(head, bundlerepo):
|
|
|
|
with util.posixfile(bundlefile, "rb") as f:
|
|
|
|
unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
|
|
|
|
if isinstance(unbundler, changegroup.cg1unpacker):
|
2018-05-29 21:13:08 +03:00
|
|
|
part = bundle2.bundlepart("changegroup", data=unbundler._stream.read())
|
|
|
|
part.addparam("version", "01")
|
2017-09-22 18:55:56 +03:00
|
|
|
parts.append(part)
|
2017-05-03 10:31:24 +03:00
|
|
|
elif isinstance(unbundler, bundle2.unbundle20):
|
2017-09-22 18:55:56 +03:00
|
|
|
haschangegroup = False
|
2017-05-03 10:31:24 +03:00
|
|
|
for part in unbundler.iterparts():
|
2018-05-29 21:13:08 +03:00
|
|
|
if part.type == "changegroup":
|
2017-09-22 18:55:56 +03:00
|
|
|
haschangegroup = True
|
|
|
|
newpart = bundle2.bundlepart(part.type, data=part.read())
|
|
|
|
for key, value in part.params.iteritems():
|
|
|
|
newpart.addparam(key, value)
|
|
|
|
parts.append(newpart)
|
|
|
|
|
|
|
|
if not haschangegroup:
|
2017-05-03 10:31:24 +03:00
|
|
|
raise error.Abort(
|
2018-05-29 21:13:08 +03:00
|
|
|
"unexpected bundle without changegroup part, "
|
|
|
|
+ "head: %s" % hex(head),
|
|
|
|
hint="report to administrator",
|
|
|
|
)
|
2017-05-03 10:31:24 +03:00
|
|
|
else:
|
2018-05-29 21:13:08 +03:00
|
|
|
raise error.Abort("unknown bundle type")
|
2017-05-03 10:31:24 +03:00
|
|
|
else:
|
2018-03-15 23:40:04 +03:00
|
|
|
parts = _rebundle(bundlerepo, bundleroots, head, cgversion, bundlecaps)
|
2017-09-22 18:55:56 +03:00
|
|
|
|
|
|
|
return parts
|
2017-05-03 10:31:24 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-12-06 18:41:04 +03:00
|
|
|
def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
|
|
|
|
heads = heads or []
|
|
|
|
# newheads are parents of roots of scratch bundles that were requested
|
|
|
|
newphases = {}
|
|
|
|
scratchbundles = []
|
|
|
|
newheads = []
|
|
|
|
scratchheads = []
|
2017-03-06 11:40:49 +03:00
|
|
|
nodestobundle = {}
|
2017-05-17 16:42:04 +03:00
|
|
|
allbundlestocleanup = []
|
2018-01-10 09:41:46 +03:00
|
|
|
|
|
|
|
cgversion = _getsupportedcgversion(repo, bundlecaps or [])
|
2017-03-06 11:40:49 +03:00
|
|
|
try:
|
|
|
|
for head in heads:
|
|
|
|
if head not in repo.changelog.nodemap:
|
2017-05-03 10:31:24 +03:00
|
|
|
if head not in nodestobundle:
|
2017-06-15 15:19:37 +03:00
|
|
|
newbundlefile = common.downloadbundle(repo, head)
|
2017-05-03 10:31:24 +03:00
|
|
|
bundlepath = "bundle:%s+%s" % (repo.root, newbundlefile)
|
2017-04-06 12:53:28 +03:00
|
|
|
bundlerepo = repository(repo.ui, bundlepath)
|
|
|
|
|
2017-05-17 16:42:04 +03:00
|
|
|
allbundlestocleanup.append((bundlerepo, newbundlefile))
|
2017-03-06 11:40:49 +03:00
|
|
|
bundlerevs = set(_readbundlerevs(bundlerepo))
|
|
|
|
bundlecaps = _includefilelogstobundle(
|
2018-05-29 21:13:08 +03:00
|
|
|
bundlecaps, bundlerepo, bundlerevs, repo.ui
|
|
|
|
)
|
2017-03-06 11:40:49 +03:00
|
|
|
cl = bundlerepo.changelog
|
|
|
|
bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
|
2018-03-14 03:30:31 +03:00
|
|
|
draftcommits = set()
|
2018-04-19 00:25:40 +03:00
|
|
|
bundleheads = set([head])
|
2017-03-06 11:40:49 +03:00
|
|
|
for rev in bundlerevs:
|
|
|
|
node = cl.node(rev)
|
2018-03-14 03:30:31 +03:00
|
|
|
draftcommits.add(node)
|
|
|
|
if node in heads:
|
2018-04-19 00:25:40 +03:00
|
|
|
bundleheads.add(node)
|
2018-05-29 21:13:08 +03:00
|
|
|
nodestobundle[node] = (
|
|
|
|
bundlerepo,
|
|
|
|
bundleroots,
|
|
|
|
newbundlefile,
|
|
|
|
)
|
2018-03-14 03:30:31 +03:00
|
|
|
|
|
|
|
if draftcommits:
|
|
|
|
# Filter down to roots of this head, so we don't report
|
|
|
|
# non-roots as phase roots and we don't report commits
|
|
|
|
# that aren't related to the requested head.
|
2018-05-29 21:13:08 +03:00
|
|
|
for rev in bundlerepo.revs(
|
|
|
|
"roots((%ln) & ::%ln)", draftcommits, bundleheads
|
|
|
|
):
|
2018-03-14 03:30:31 +03:00
|
|
|
newphases[bundlerepo[rev].hex()] = str(phases.draft)
|
2017-05-03 10:31:24 +03:00
|
|
|
|
|
|
|
scratchbundles.append(
|
2018-05-29 21:13:08 +03:00
|
|
|
_generateoutputparts(
|
|
|
|
head, cgversion, bundlecaps, *nodestobundle[head]
|
|
|
|
)
|
|
|
|
)
|
2017-03-06 11:40:49 +03:00
|
|
|
newheads.extend(bundleroots)
|
|
|
|
scratchheads.append(head)
|
|
|
|
finally:
|
2017-05-17 16:42:04 +03:00
|
|
|
for bundlerepo, bundlefile in allbundlestocleanup:
|
2017-03-06 11:40:49 +03:00
|
|
|
bundlerepo.close()
|
2017-05-17 16:42:04 +03:00
|
|
|
try:
|
|
|
|
os.unlink(bundlefile)
|
|
|
|
except (IOError, OSError):
|
|
|
|
# if we can't cleanup the file then just ignore the error,
|
|
|
|
# no need to fail
|
|
|
|
pass
|
2016-12-06 18:41:04 +03:00
|
|
|
|
|
|
|
pullfrombundlestore = bool(scratchbundles)
|
|
|
|
wrappedchangegrouppart = False
|
|
|
|
wrappedlistkeys = False
|
2018-05-29 21:13:08 +03:00
|
|
|
oldchangegrouppart = exchange.getbundle2partsmapping["changegroup"]
|
2016-12-06 18:41:04 +03:00
|
|
|
try:
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-12-06 18:41:04 +03:00
|
|
|
def _changegrouppart(bundler, *args, **kwargs):
|
|
|
|
# Order is important here. First add non-scratch part
|
|
|
|
# and only then add parts with scratch bundles because
|
|
|
|
# non-scratch part contains parents of roots of scratch bundles.
|
|
|
|
result = oldchangegrouppart(bundler, *args, **kwargs)
|
2017-09-22 18:55:56 +03:00
|
|
|
for bundle in scratchbundles:
|
|
|
|
for part in bundle:
|
|
|
|
bundler.addpart(part)
|
2016-12-06 18:41:04 +03:00
|
|
|
return result
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
exchange.getbundle2partsmapping["changegroup"] = _changegrouppart
|
2016-12-06 18:41:04 +03:00
|
|
|
wrappedchangegrouppart = True
|
|
|
|
|
|
|
|
def _listkeys(orig, self, namespace):
|
|
|
|
origvalues = orig(self, namespace)
|
2018-05-29 21:13:08 +03:00
|
|
|
if namespace == "phases" and pullfrombundlestore:
|
|
|
|
if origvalues.get("publishing") == "True":
|
2016-12-06 18:41:04 +03:00
|
|
|
# Make repo non-publishing to preserve draft phase
|
2018-05-29 21:13:08 +03:00
|
|
|
del origvalues["publishing"]
|
2016-12-06 18:41:04 +03:00
|
|
|
origvalues.update(newphases)
|
|
|
|
return origvalues
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
wrapfunction(localrepo.localrepository, "listkeys", _listkeys)
|
2016-12-06 18:41:04 +03:00
|
|
|
wrappedlistkeys = True
|
|
|
|
heads = list((set(newheads) | set(heads)) - set(scratchheads))
|
2018-05-29 21:13:08 +03:00
|
|
|
result = orig(repo, source, heads=heads, bundlecaps=bundlecaps, **kwargs)
|
2016-12-06 18:41:04 +03:00
|
|
|
finally:
|
|
|
|
if wrappedchangegrouppart:
|
2018-05-29 21:13:08 +03:00
|
|
|
exchange.getbundle2partsmapping["changegroup"] = oldchangegrouppart
|
2016-12-06 18:41:04 +03:00
|
|
|
if wrappedlistkeys:
|
2018-05-29 21:13:08 +03:00
|
|
|
unwrapfunction(localrepo.localrepository, "listkeys", _listkeys)
|
2016-12-06 18:41:04 +03:00
|
|
|
return result
|
2016-08-26 16:09:01 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-08-26 16:09:01 +03:00
|
|
|
def _lookupwrap(orig):
|
|
|
|
def _lookup(repo, proto, key):
|
|
|
|
localkey = encoding.tolocal(key)
|
|
|
|
|
2016-10-18 11:21:18 +03:00
|
|
|
if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
|
2016-08-26 16:09:01 +03:00
|
|
|
scratchnode = repo.bundlestore.index.getnode(localkey)
|
|
|
|
if scratchnode:
|
|
|
|
return "%s %s\n" % (1, scratchnode)
|
|
|
|
else:
|
2018-05-29 21:13:08 +03:00
|
|
|
return "%s %s\n" % (0, "scratch branch %s not found" % localkey)
|
2016-08-26 16:09:01 +03:00
|
|
|
else:
|
|
|
|
try:
|
2017-06-15 12:44:27 +03:00
|
|
|
r = hex(repo.lookup(localkey))
|
2016-08-26 16:09:01 +03:00
|
|
|
return "%s %s\n" % (1, r)
|
|
|
|
except Exception as inst:
|
2018-01-29 20:41:57 +03:00
|
|
|
try:
|
|
|
|
node = repo.bundlestore.index.getnodebyprefix(localkey)
|
|
|
|
if node:
|
|
|
|
return "%s %s\n" % (1, node)
|
|
|
|
else:
|
|
|
|
return "%s %s\n" % (0, str(inst))
|
|
|
|
except Exception as inst:
|
|
|
|
return "%s %s\n" % (0, str(inst))
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-08-26 16:09:01 +03:00
|
|
|
return _lookup
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-12-01 19:44:57 +03:00
|
|
|
def _decodebookmarks(stream):
|
2018-05-29 21:13:08 +03:00
|
|
|
sizeofjsonsize = struct.calcsize(">i")
|
|
|
|
size = struct.unpack(">i", stream.read(sizeofjsonsize))[0]
|
2016-12-01 19:44:57 +03:00
|
|
|
unicodedict = json.loads(stream.read(size))
|
|
|
|
# python json module always returns unicode strings. We need to convert
|
|
|
|
# it back to bytes string
|
|
|
|
result = {}
|
|
|
|
for bookmark, node in unicodedict.iteritems():
|
2018-05-29 21:13:08 +03:00
|
|
|
bookmark = bookmark.encode("ascii")
|
|
|
|
node = node.encode("ascii")
|
2016-12-01 19:44:57 +03:00
|
|
|
result[bookmark] = node
|
|
|
|
return result
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2017-02-10 11:20:54 +03:00
|
|
|
def _update(orig, ui, repo, node=None, rev=None, **opts):
|
2018-02-05 23:02:03 +03:00
|
|
|
"""commit cloud (infinitepush) extension for hg up
|
|
|
|
`hg up` will access:
|
|
|
|
* local repo
|
|
|
|
* hidden commits
|
|
|
|
* remote commits
|
|
|
|
* commit cloud (infinitepush) storage
|
|
|
|
"""
|
2017-02-10 11:20:54 +03:00
|
|
|
if rev and node:
|
|
|
|
raise error.Abort(_("please specify just one revision"))
|
|
|
|
|
2018-12-08 05:00:02 +03:00
|
|
|
unfi = repo.unfiltered()
|
|
|
|
if not opts.get("date") and (rev or node) not in unfi:
|
2017-02-15 20:33:44 +03:00
|
|
|
mayberemote = rev or node
|
2017-07-21 18:19:59 +03:00
|
|
|
mayberemote = _tryhoist(ui, mayberemote)
|
2017-02-15 20:33:44 +03:00
|
|
|
dopull = False
|
|
|
|
kwargs = {}
|
|
|
|
if _scratchbranchmatcher(mayberemote):
|
|
|
|
dopull = True
|
2018-05-29 21:13:08 +03:00
|
|
|
kwargs["bookmark"] = [mayberemote]
|
2018-01-29 20:41:57 +03:00
|
|
|
elif _maybehash(mayberemote):
|
2017-02-15 20:33:44 +03:00
|
|
|
dopull = True
|
2018-05-29 21:13:08 +03:00
|
|
|
kwargs["rev"] = [mayberemote]
|
2017-02-15 20:33:44 +03:00
|
|
|
|
|
|
|
if dopull:
|
2017-02-10 11:20:54 +03:00
|
|
|
ui.warn(
|
2018-05-29 21:13:08 +03:00
|
|
|
_("'%s' does not exist locally - looking for it " + "remotely...\n")
|
|
|
|
% mayberemote
|
|
|
|
)
|
2017-02-10 11:20:54 +03:00
|
|
|
# Try pulling node from remote repo
|
2018-01-23 23:08:32 +03:00
|
|
|
pullstarttime = time.time()
|
|
|
|
|
2017-02-10 11:20:54 +03:00
|
|
|
try:
|
2018-07-17 12:03:13 +03:00
|
|
|
(pullcmd, pullopts) = cmdutil.getcmdanddefaultopts(
|
|
|
|
"pull", commands.table
|
|
|
|
)
|
|
|
|
pullopts.update(kwargs)
|
2018-07-06 17:49:47 +03:00
|
|
|
# Prefer to pull from 'infinitepush' path if it exists.
|
|
|
|
# 'infinitepush' path has both infinitepush and non-infinitepush
|
|
|
|
# revisions, so pulling from it is safer.
|
|
|
|
# This is useful for dogfooding other hg backend that stores
|
|
|
|
# only public commits (e.g. Mononoke)
|
2018-07-17 12:03:13 +03:00
|
|
|
with _resetinfinitepushpath(ui):
|
2018-12-08 05:00:02 +03:00
|
|
|
pullcmd(ui, unfi, **pullopts)
|
2017-02-10 11:20:54 +03:00
|
|
|
except Exception:
|
2018-03-12 22:59:03 +03:00
|
|
|
remoteerror = str(sys.exc_info()[1])
|
|
|
|
replacements = {
|
2018-05-29 21:13:08 +03:00
|
|
|
"commitcloud.changeset": ("changeset:",),
|
|
|
|
"commitcloud.meta": ("date:", "summary:", "author:"),
|
|
|
|
"commitcloud.commitcloud": ("#commitcloud",),
|
2018-03-12 22:59:03 +03:00
|
|
|
}
|
|
|
|
for label, keywords in replacements.iteritems():
|
|
|
|
for kw in keywords:
|
2018-05-29 21:13:08 +03:00
|
|
|
remoteerror = remoteerror.replace(kw, ui.label(kw, label))
|
2018-03-12 22:59:03 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
ui.warn(_("pull failed: %s\n") % remoteerror)
|
2018-07-06 17:04:31 +03:00
|
|
|
|
|
|
|
# User updates to own commit from Commit Cloud
|
|
|
|
if ui.username() in remoteerror:
|
|
|
|
hintutil.trigger("commitcloud-sync-education", ui)
|
2017-02-10 11:20:54 +03:00
|
|
|
else:
|
2017-02-15 20:33:44 +03:00
|
|
|
ui.warn(_("'%s' found remotely\n") % mayberemote)
|
2018-01-23 23:08:32 +03:00
|
|
|
pulltime = time.time() - pullstarttime
|
|
|
|
ui.warn(_("pull finished in %.3f sec\n") % pulltime)
|
2018-02-05 17:07:37 +03:00
|
|
|
|
2018-07-06 17:04:31 +03:00
|
|
|
try:
|
|
|
|
return orig(ui, repo, node, rev, **opts)
|
|
|
|
except Exception:
|
|
|
|
# Show the triggered hints anyway
|
|
|
|
hintutil.show(ui)
|
|
|
|
raise
|
2017-02-10 11:20:54 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2018-07-17 12:03:13 +03:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def _resetinfinitepushpath(ui):
|
|
|
|
"""
|
|
|
|
Sets "default" path to "infinitepush" path and deletes "infinitepush" path.
|
|
|
|
In some cases (e.g. when testing new hg backend which doesn't have commit cloud
|
|
|
|
commits) we want to do normal `hg pull` from "default" path but `hg pull -r HASH`
|
|
|
|
from "infinitepush" path if it's present. This is better than just setting
|
|
|
|
another path because of "remotenames" extension. Pulling or pushing to
|
|
|
|
another path will add lots of new remote bookmarks and that can be slow
|
|
|
|
and slow down smartlog.
|
|
|
|
"""
|
|
|
|
|
|
|
|
overrides = {}
|
|
|
|
if "infinitepush" in ui.paths:
|
|
|
|
overrides[("paths", "default")] = ui.paths["infinitepush"].loc
|
|
|
|
overrides[("paths", "infinitepush")] = "!"
|
|
|
|
with ui.configoverride(overrides, "infinitepush"):
|
|
|
|
loc, sub = ui.configsuboptions("paths", "default")
|
|
|
|
ui.paths["default"] = uimod.path(ui, "default", rawloc=loc, suboptions=sub)
|
|
|
|
del ui.paths["infinitepush"]
|
|
|
|
yield
|
|
|
|
else:
|
|
|
|
yield
|
|
|
|
|
|
|
|
|
2016-08-26 16:09:01 +03:00
|
|
|
def _pull(orig, ui, repo, source="default", **opts):
|
2018-07-06 17:49:47 +03:00
|
|
|
# If '-r' or '-B' option is set, then prefer to pull from 'infinitepush' path
|
|
|
|
# if it exists. 'infinitepush' path has both infinitepush and non-infinitepush
|
|
|
|
# revisions, so pulling from it is safer.
|
|
|
|
# This is useful for dogfooding other hg backend that stores only public commits
|
|
|
|
# (e.g. Mononoke)
|
|
|
|
if opts.get("rev") or opts.get("bookmark"):
|
2018-07-17 12:03:13 +03:00
|
|
|
with _resetinfinitepushpath(ui):
|
|
|
|
return _dopull(orig, ui, repo, source, **opts)
|
|
|
|
|
|
|
|
return _dopull(orig, ui, repo, source, **opts)
|
|
|
|
|
2018-07-06 17:49:47 +03:00
|
|
|
|
2018-07-17 12:03:13 +03:00
|
|
|
def _dopull(orig, ui, repo, source="default", **opts):
|
2016-08-26 16:09:01 +03:00
|
|
|
# Copy paste from `pull` command
|
2018-05-29 21:13:08 +03:00
|
|
|
source, branches = hg.parseurl(ui.expandpath(source), opts.get("branch"))
|
2016-08-26 16:09:01 +03:00
|
|
|
|
2016-09-13 12:34:55 +03:00
|
|
|
scratchbookmarks = {}
|
2017-03-29 13:10:12 +03:00
|
|
|
unfi = repo.unfiltered()
|
|
|
|
unknownnodes = []
|
2018-05-29 21:13:08 +03:00
|
|
|
for rev in opts.get("rev", []):
|
2017-03-29 13:10:12 +03:00
|
|
|
if rev not in unfi:
|
|
|
|
unknownnodes.append(rev)
|
2018-05-29 21:13:08 +03:00
|
|
|
if opts.get("bookmark"):
|
2016-08-26 16:09:01 +03:00
|
|
|
bookmarks = []
|
2018-05-29 21:13:08 +03:00
|
|
|
revs = opts.get("rev") or []
|
|
|
|
for bookmark in opts.get("bookmark"):
|
2016-10-18 11:21:18 +03:00
|
|
|
if _scratchbranchmatcher(bookmark):
|
2017-02-23 15:25:02 +03:00
|
|
|
# rev is not known yet
|
|
|
|
# it will be fetched with listkeyspatterns next
|
2018-05-29 21:13:08 +03:00
|
|
|
scratchbookmarks[bookmark] = "REVTOFETCH"
|
2017-02-23 15:25:02 +03:00
|
|
|
else:
|
|
|
|
bookmarks.append(bookmark)
|
|
|
|
|
|
|
|
if scratchbookmarks:
|
|
|
|
other = hg.peer(repo, opts, source)
|
|
|
|
fetchedbookmarks = other.listkeyspatterns(
|
2018-05-29 21:13:08 +03:00
|
|
|
"bookmarks", patterns=scratchbookmarks
|
|
|
|
)
|
2017-02-23 15:25:02 +03:00
|
|
|
for bookmark in scratchbookmarks:
|
2016-09-12 12:41:22 +03:00
|
|
|
if bookmark not in fetchedbookmarks:
|
2018-05-29 21:13:08 +03:00
|
|
|
raise error.Abort("remote bookmark %s not found!" % bookmark)
|
2016-09-13 12:34:55 +03:00
|
|
|
scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
|
2016-09-12 12:41:22 +03:00
|
|
|
revs.append(fetchedbookmarks[bookmark])
|
2018-05-29 21:13:08 +03:00
|
|
|
opts["bookmark"] = bookmarks
|
|
|
|
opts["rev"] = revs
|
2016-11-01 13:42:26 +03:00
|
|
|
|
2018-08-18 20:40:36 +03:00
|
|
|
# Pulling revisions that were filtered results in a error.
|
|
|
|
# Let's revive them.
|
|
|
|
unfi = repo.unfiltered()
|
|
|
|
torevive = []
|
|
|
|
for rev in opts.get("rev", []):
|
|
|
|
try:
|
|
|
|
repo[rev]
|
|
|
|
except error.FilteredRepoLookupError:
|
|
|
|
node = unfi[rev].node()
|
|
|
|
torevive.append(unfi[node])
|
|
|
|
except error.RepoLookupError:
|
|
|
|
pass
|
|
|
|
obsolete.revive(torevive)
|
2016-11-01 13:42:26 +03:00
|
|
|
|
2017-03-29 13:10:12 +03:00
|
|
|
if scratchbookmarks or unknownnodes:
|
2016-08-26 16:09:01 +03:00
|
|
|
# Set anyincoming to True
|
2018-05-29 21:13:08 +03:00
|
|
|
wrapfunction(discovery, "findcommonincoming", _findcommonincoming)
|
2016-08-26 16:09:01 +03:00
|
|
|
try:
|
2016-10-21 16:36:14 +03:00
|
|
|
# Remote scratch bookmarks will be deleted because remotenames doesn't
|
|
|
|
# know about them. Let's save it before pull and restore after
|
|
|
|
remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
|
2016-08-26 16:09:01 +03:00
|
|
|
result = orig(ui, repo, source, **opts)
|
2016-09-13 12:34:55 +03:00
|
|
|
# TODO(stash): race condition is possible
|
|
|
|
# if scratch bookmarks was updated right after orig.
|
|
|
|
# But that's unlikely and shouldn't be harmful.
|
2017-06-15 15:19:37 +03:00
|
|
|
if common.isremotebooksenabled(ui):
|
2016-10-21 16:36:14 +03:00
|
|
|
remotescratchbookmarks.update(scratchbookmarks)
|
|
|
|
_saveremotebookmarks(repo, remotescratchbookmarks, source)
|
|
|
|
else:
|
|
|
|
_savelocalbookmarks(repo, scratchbookmarks)
|
2016-08-26 16:09:01 +03:00
|
|
|
return result
|
|
|
|
finally:
|
2017-02-23 15:25:02 +03:00
|
|
|
if scratchbookmarks:
|
2018-05-29 21:13:08 +03:00
|
|
|
unwrapfunction(discovery, "findcommonincoming")
|
|
|
|
|
2016-08-26 16:09:01 +03:00
|
|
|
|
2016-10-21 16:36:14 +03:00
|
|
|
def _readscratchremotebookmarks(ui, repo, other):
|
2017-06-15 15:19:37 +03:00
|
|
|
if common.isremotebooksenabled(ui):
|
2018-05-29 21:13:08 +03:00
|
|
|
remotenamesext = extensions.find("remotenames")
|
2016-10-21 16:36:14 +03:00
|
|
|
remotepath = remotenamesext.activepath(repo.ui, other)
|
|
|
|
result = {}
|
2016-11-01 11:53:41 +03:00
|
|
|
# Let's refresh remotenames to make sure we have it up to date
|
|
|
|
# Seems that `repo.names['remotebookmarks']` may return stale bookmarks
|
|
|
|
# and it results in deleting scratch bookmarks. Our best guess how to
|
|
|
|
# fix it is to use `clearnames()`
|
|
|
|
repo._remotenames.clearnames()
|
2018-05-29 21:13:08 +03:00
|
|
|
for remotebookmark in repo.names["remotebookmarks"].listnames(repo):
|
2016-10-21 16:36:14 +03:00
|
|
|
path, bookname = remotenamesext.splitremotename(remotebookmark)
|
|
|
|
if path == remotepath and _scratchbranchmatcher(bookname):
|
2018-05-29 21:13:08 +03:00
|
|
|
nodes = repo.names["remotebookmarks"].nodes(repo, remotebookmark)
|
2017-06-07 22:55:29 +03:00
|
|
|
if nodes:
|
|
|
|
result[bookname] = hex(nodes[0])
|
2016-10-21 16:36:14 +03:00
|
|
|
return result
|
|
|
|
else:
|
|
|
|
return {}
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-10-21 16:36:14 +03:00
|
|
|
def _saveremotebookmarks(repo, newbookmarks, remote):
|
2018-05-29 21:13:08 +03:00
|
|
|
remotenamesext = extensions.find("remotenames")
|
2016-10-21 16:36:14 +03:00
|
|
|
remotepath = remotenamesext.activepath(repo.ui, remote)
|
2018-03-09 19:56:03 +03:00
|
|
|
branches = collections.defaultdict(list)
|
2016-10-21 16:36:14 +03:00
|
|
|
bookmarks = {}
|
|
|
|
remotenames = remotenamesext.readremotenames(repo)
|
|
|
|
for hexnode, nametype, remote, rname in remotenames:
|
|
|
|
if remote != remotepath:
|
|
|
|
continue
|
2018-05-29 21:13:08 +03:00
|
|
|
if nametype == "bookmarks":
|
2016-10-21 16:36:14 +03:00
|
|
|
if rname in newbookmarks:
|
|
|
|
# It's possible if we have a normal bookmark that matches
|
|
|
|
# scratch branch pattern. In this case just use the current
|
|
|
|
# bookmark node
|
|
|
|
del newbookmarks[rname]
|
|
|
|
bookmarks[rname] = hexnode
|
2018-05-29 21:13:08 +03:00
|
|
|
elif nametype == "branches":
|
2016-10-21 16:36:14 +03:00
|
|
|
# saveremotenames expects 20 byte binary nodes for branches
|
|
|
|
branches[rname].append(bin(hexnode))
|
|
|
|
|
2016-12-01 19:44:57 +03:00
|
|
|
for bookmark, hexnode in newbookmarks.iteritems():
|
2016-10-21 16:36:14 +03:00
|
|
|
bookmarks[bookmark] = hexnode
|
|
|
|
remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-10-21 16:36:14 +03:00
|
|
|
def _savelocalbookmarks(repo, bookmarks):
|
2016-12-05 14:31:06 +03:00
|
|
|
if not bookmarks:
|
|
|
|
return
|
2018-05-29 21:13:08 +03:00
|
|
|
with repo.wlock(), repo.lock(), repo.transaction("bookmark") as tr:
|
2017-07-18 20:09:28 +03:00
|
|
|
changes = []
|
2017-07-17 22:02:08 +03:00
|
|
|
for scratchbook, node in bookmarks.iteritems():
|
|
|
|
changectx = repo[node]
|
2017-07-18 20:09:28 +03:00
|
|
|
changes.append((scratchbook, changectx.node()))
|
|
|
|
repo._bookmarks.applychanges(repo, tr, changes)
|
2016-10-13 10:12:24 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-08-26 16:09:01 +03:00
|
|
|
def _findcommonincoming(orig, *args, **kwargs):
|
|
|
|
common, inc, remoteheads = orig(*args, **kwargs)
|
|
|
|
return common, True, remoteheads
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-10-21 16:36:14 +03:00
|
|
|
def _push(orig, ui, repo, dest=None, *args, **opts):
|
2018-05-29 21:13:08 +03:00
|
|
|
bookmark = opts.get("to") or ""
|
|
|
|
create = opts.get("create") or False
|
2016-08-26 16:09:01 +03:00
|
|
|
|
2017-03-18 05:42:50 +03:00
|
|
|
oldphasemove = None
|
2018-05-29 21:13:08 +03:00
|
|
|
overrides = {
|
|
|
|
(experimental, configbookmark): bookmark,
|
|
|
|
(experimental, configcreate): create,
|
|
|
|
}
|
2016-08-26 16:09:01 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
with ui.configoverride(overrides, "infinitepush"):
|
|
|
|
scratchpush = opts.get("bundle_store")
|
2016-10-18 11:21:18 +03:00
|
|
|
if _scratchbranchmatcher(bookmark):
|
2016-10-02 15:13:06 +03:00
|
|
|
# Hack to fix interaction with remotenames. Remotenames push
|
|
|
|
# '--to' bookmark to the server but we don't want to push scratch
|
|
|
|
# bookmark to the server. Let's delete '--to' and '--create' and
|
|
|
|
# also set allow_anon to True (because if --to is not set
|
|
|
|
# remotenames will think that we are pushing anonymoush head)
|
2018-05-29 21:13:08 +03:00
|
|
|
if "to" in opts:
|
|
|
|
del opts["to"]
|
|
|
|
if "create" in opts:
|
|
|
|
del opts["create"]
|
|
|
|
opts["allow_anon"] = True
|
2016-09-15 13:36:24 +03:00
|
|
|
scratchpush = True
|
2016-10-13 10:11:56 +03:00
|
|
|
# bundle2 can be sent back after push (for example, bundle2
|
|
|
|
# containing `pushkey` part to update bookmarks)
|
2018-05-29 21:13:08 +03:00
|
|
|
ui.setconfig(experimental, "bundle2.pushback", True)
|
|
|
|
|
|
|
|
ui.setconfig(
|
|
|
|
experimental,
|
|
|
|
confignonforwardmove,
|
|
|
|
opts.get("non_forward_move"),
|
|
|
|
"--non-forward-move",
|
|
|
|
)
|
2016-09-15 13:36:24 +03:00
|
|
|
if scratchpush:
|
|
|
|
ui.setconfig(experimental, configscratchpush, True)
|
2018-05-29 21:13:08 +03:00
|
|
|
oldphasemove = wrapfunction(exchange, "_localphasemove", _phasemove)
|
2019-02-21 02:20:11 +03:00
|
|
|
path = ui.paths.getpath(
|
|
|
|
dest, default=("infinitepush", "default-push", "default")
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
path = ui.paths.getpath(dest, default=("default-push", "default"))
|
2016-10-21 16:36:14 +03:00
|
|
|
# Copy-paste from `push` command
|
2016-12-05 14:28:23 +03:00
|
|
|
if not path:
|
2018-05-29 21:13:08 +03:00
|
|
|
raise error.Abort(
|
|
|
|
_("default repository not configured!"),
|
|
|
|
hint=_("see 'hg help config.paths'"),
|
|
|
|
)
|
2019-02-21 02:20:11 +03:00
|
|
|
dest = path.pushloc or path.loc
|
|
|
|
if dest.startswith("svn+") and scratchpush:
|
2018-05-29 21:13:08 +03:00
|
|
|
raise error.Abort(
|
|
|
|
"infinite push does not work with svn repo",
|
|
|
|
hint="did you forget to `hg push default`?",
|
|
|
|
)
|
2016-10-21 16:36:14 +03:00
|
|
|
# Remote scratch bookmarks will be deleted because remotenames doesn't
|
|
|
|
# know about them. Let's save it before push and restore after
|
2019-02-21 02:20:11 +03:00
|
|
|
remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, dest)
|
2016-10-26 19:29:23 +03:00
|
|
|
result = orig(ui, repo, dest, *args, **opts)
|
2017-06-15 15:19:37 +03:00
|
|
|
if common.isremotebooksenabled(ui):
|
2016-10-21 16:36:14 +03:00
|
|
|
if bookmark and scratchpush:
|
2019-02-21 02:20:11 +03:00
|
|
|
other = hg.peer(repo, opts, dest)
|
2018-05-29 21:13:08 +03:00
|
|
|
fetchedbookmarks = other.listkeyspatterns(
|
|
|
|
"bookmarks", patterns=[bookmark]
|
|
|
|
)
|
2016-10-21 16:36:14 +03:00
|
|
|
remotescratchbookmarks.update(fetchedbookmarks)
|
2019-02-21 02:20:11 +03:00
|
|
|
_saveremotebookmarks(repo, remotescratchbookmarks, dest)
|
2017-03-18 05:42:50 +03:00
|
|
|
if oldphasemove:
|
|
|
|
exchange._localphasemove = oldphasemove
|
2016-08-26 16:09:01 +03:00
|
|
|
return result
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2017-11-07 22:43:56 +03:00
|
|
|
def _deleteinfinitepushbookmarks(ui, repo, path, names):
|
|
|
|
"""Prune remote names by removing the bookmarks we don't want anymore,
|
|
|
|
then writing the result back to disk
|
|
|
|
"""
|
2018-05-29 21:13:08 +03:00
|
|
|
remotenamesext = extensions.find("remotenames")
|
2017-11-07 22:43:56 +03:00
|
|
|
|
|
|
|
# remotename format is:
|
|
|
|
# (node, nametype ("branches" or "bookmarks"), remote, name)
|
|
|
|
nametype_idx = 1
|
|
|
|
remote_idx = 2
|
|
|
|
name_idx = 3
|
2018-05-29 21:13:08 +03:00
|
|
|
remotenames = [
|
|
|
|
remotename
|
|
|
|
for remotename in remotenamesext.readremotenames(repo)
|
|
|
|
if remotename[remote_idx] == path
|
|
|
|
]
|
|
|
|
remote_bm_names = [
|
|
|
|
remotename[name_idx]
|
|
|
|
for remotename in remotenames
|
|
|
|
if remotename[nametype_idx] == "bookmarks"
|
|
|
|
]
|
2017-11-07 22:43:56 +03:00
|
|
|
|
|
|
|
for name in names:
|
|
|
|
if name not in remote_bm_names:
|
2018-05-29 21:13:08 +03:00
|
|
|
raise error.Abort(
|
|
|
|
_("infinitepush bookmark '{}' does not exist " "in path '{}'").format(
|
|
|
|
name, path
|
|
|
|
)
|
|
|
|
)
|
2017-11-07 22:43:56 +03:00
|
|
|
|
|
|
|
bookmarks = {}
|
2018-03-09 19:56:03 +03:00
|
|
|
branches = collections.defaultdict(list)
|
2017-11-07 22:43:56 +03:00
|
|
|
for node, nametype, remote, name in remotenames:
|
|
|
|
if nametype == "bookmarks" and name not in names:
|
|
|
|
bookmarks[name] = node
|
|
|
|
elif nametype == "branches":
|
|
|
|
# saveremotenames wants binary nodes for branches
|
|
|
|
branches[name].append(bin(node))
|
|
|
|
|
|
|
|
remotenamesext.saveremotenames(repo, path, branches, bookmarks)
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-08-26 16:09:01 +03:00
|
|
|
def _phasemove(orig, pushop, nodes, phase=phases.public):
|
|
|
|
"""prevent commits from being marked public
|
|
|
|
|
|
|
|
Since these are going to a scratch branch, they aren't really being
|
|
|
|
published."""
|
|
|
|
|
|
|
|
if phase != phases.public:
|
|
|
|
orig(pushop, nodes, phase)
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-08-26 16:09:01 +03:00
|
|
|
@exchange.b2partsgenerator(scratchbranchparttype)
|
|
|
|
def partgen(pushop, bundler):
|
|
|
|
bookmark = pushop.ui.config(experimental, configbookmark)
|
|
|
|
create = pushop.ui.configbool(experimental, configcreate)
|
2016-09-15 13:36:24 +03:00
|
|
|
scratchpush = pushop.ui.configbool(experimental, configscratchpush)
|
2018-05-29 21:13:08 +03:00
|
|
|
if "changesets" in pushop.stepsdone or not scratchpush:
|
2016-08-26 16:09:01 +03:00
|
|
|
return
|
|
|
|
|
|
|
|
if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
|
|
|
|
return
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
pushop.stepsdone.add("changesets")
|
|
|
|
pushop.stepsdone.add("treepack")
|
2016-08-26 16:09:01 +03:00
|
|
|
if not pushop.outgoing.missing:
|
2018-05-29 21:13:08 +03:00
|
|
|
pushop.ui.status(_("no changes found\n"))
|
2016-08-26 16:09:01 +03:00
|
|
|
pushop.cgresult = 0
|
|
|
|
return
|
|
|
|
|
2017-09-22 18:55:56 +03:00
|
|
|
# This parameter tells the server that the following bundle is an
|
|
|
|
# infinitepush. This let's it switch the part processing to our infinitepush
|
|
|
|
# code path.
|
|
|
|
bundler.addparam("infinitepush", "True")
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
nonforwardmove = pushop.force or pushop.ui.configbool(
|
|
|
|
experimental, confignonforwardmove
|
|
|
|
)
|
|
|
|
scratchparts = getscratchbranchparts(
|
|
|
|
pushop.repo,
|
|
|
|
pushop.remote,
|
|
|
|
pushop.outgoing,
|
|
|
|
nonforwardmove,
|
|
|
|
pushop.ui,
|
|
|
|
bookmark,
|
|
|
|
create,
|
|
|
|
)
|
2017-11-02 03:10:05 +03:00
|
|
|
|
|
|
|
for scratchpart in scratchparts:
|
|
|
|
bundler.addpart(scratchpart)
|
2016-08-26 16:09:01 +03:00
|
|
|
|
|
|
|
def handlereply(op):
|
|
|
|
# server either succeeds or aborts; no code to read
|
|
|
|
pushop.cgresult = 1
|
|
|
|
|
|
|
|
return handlereply
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-08-26 16:09:01 +03:00
|
|
|
bundle2.capabilities[scratchbranchparttype] = ()
|
2016-12-01 19:44:57 +03:00
|
|
|
bundle2.capabilities[scratchbookmarksparttype] = ()
|
2016-08-26 16:09:01 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-11-21 11:48:09 +03:00
|
|
|
def _getrevs(bundle, oldnode, force, bookmark):
|
2018-05-29 21:13:08 +03:00
|
|
|
"extracts and validates the revs to be imported"
|
|
|
|
revs = [bundle[r] for r in bundle.revs("sort(bundle())")]
|
2016-08-26 16:09:01 +03:00
|
|
|
|
|
|
|
# new bookmark
|
|
|
|
if oldnode is None:
|
|
|
|
return revs
|
|
|
|
|
|
|
|
# Fast forward update
|
2018-05-29 21:13:08 +03:00
|
|
|
if oldnode in bundle and list(bundle.set("bundle() & %s::", oldnode)):
|
2016-08-26 16:09:01 +03:00
|
|
|
return revs
|
|
|
|
|
2016-10-10 13:48:21 +03:00
|
|
|
# Forced non-fast forward update
|
|
|
|
if force:
|
|
|
|
return revs
|
|
|
|
else:
|
2018-05-29 21:13:08 +03:00
|
|
|
raise error.Abort(
|
|
|
|
_("non-forward push"), hint=_("use --non-forward-move to override")
|
|
|
|
)
|
|
|
|
|
2016-08-26 16:09:01 +03:00
|
|
|
|
2016-12-13 21:44:40 +03:00
|
|
|
@contextlib.contextmanager
|
2017-01-10 20:25:46 +03:00
|
|
|
def logservicecall(logger, service, **kwargs):
|
2016-12-13 21:44:40 +03:00
|
|
|
start = time.time()
|
2018-05-29 21:13:08 +03:00
|
|
|
logger(service, eventtype="start", **kwargs)
|
2016-12-13 21:44:40 +03:00
|
|
|
try:
|
|
|
|
yield
|
2018-05-29 21:13:08 +03:00
|
|
|
logger(
|
|
|
|
service,
|
|
|
|
eventtype="success",
|
|
|
|
elapsedms=(time.time() - start) * 1000,
|
|
|
|
**kwargs
|
|
|
|
)
|
2016-12-21 12:41:44 +03:00
|
|
|
except Exception as e:
|
2018-05-29 21:13:08 +03:00
|
|
|
logger(
|
|
|
|
service,
|
|
|
|
eventtype="failure",
|
|
|
|
elapsedms=(time.time() - start) * 1000,
|
|
|
|
errormsg=str(e),
|
|
|
|
**kwargs
|
|
|
|
)
|
2016-12-13 21:44:40 +03:00
|
|
|
raise
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-12-13 21:44:40 +03:00
|
|
|
def _getorcreateinfinitepushlogger(op):
|
2018-05-29 21:13:08 +03:00
|
|
|
logger = op.records["infinitepushlogger"]
|
2016-12-13 21:44:40 +03:00
|
|
|
if not logger:
|
|
|
|
ui = op.repo.ui
|
2017-03-02 15:47:21 +03:00
|
|
|
try:
|
|
|
|
username = util.getuser()
|
|
|
|
except Exception:
|
2018-05-29 21:13:08 +03:00
|
|
|
username = "unknown"
|
2016-12-13 21:44:40 +03:00
|
|
|
# Generate random request id to be able to find all logged entries
|
|
|
|
# for the same request. Since requestid is pseudo-generated it may
|
|
|
|
# not be unique, but we assume that (hostname, username, requestid)
|
|
|
|
# is unique.
|
|
|
|
random.seed()
|
|
|
|
requestid = random.randint(0, 2000000000)
|
|
|
|
hostname = socket.gethostname()
|
2018-03-09 19:56:03 +03:00
|
|
|
logger = functools.partial(
|
2018-05-29 21:13:08 +03:00
|
|
|
ui.log,
|
|
|
|
"infinitepush",
|
|
|
|
user=username,
|
|
|
|
requestid=requestid,
|
|
|
|
hostname=hostname,
|
|
|
|
reponame=ui.config("infinitepush", "reponame"),
|
|
|
|
)
|
|
|
|
op.records.add("infinitepushlogger", logger)
|
2016-12-13 21:44:40 +03:00
|
|
|
else:
|
|
|
|
logger = logger[0]
|
|
|
|
return logger
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2017-09-22 18:55:56 +03:00
|
|
|
def processparts(orig, repo, op, unbundler):
|
2018-05-29 21:13:08 +03:00
|
|
|
if unbundler.params.get("infinitepush") != "True":
|
2017-09-22 18:55:56 +03:00
|
|
|
return orig(repo, op, unbundler)
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
handleallparts = repo.ui.configbool("infinitepush", "storeallparts")
|
2017-09-22 18:55:56 +03:00
|
|
|
|
2017-09-22 18:55:56 +03:00
|
|
|
partforwardingwhitelist = []
|
|
|
|
try:
|
2018-05-29 21:13:08 +03:00
|
|
|
treemfmod = extensions.find("treemanifest")
|
2017-09-22 18:55:56 +03:00
|
|
|
partforwardingwhitelist.append(treemfmod.TREEGROUP_PARTTYPE2)
|
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
|
2017-09-22 18:55:56 +03:00
|
|
|
bundler = bundle2.bundle20(repo.ui)
|
2018-05-29 21:13:08 +03:00
|
|
|
compress = repo.ui.config("infinitepush", "bundlecompression", "UN")
|
2018-04-30 12:46:59 +03:00
|
|
|
bundler.setcompression(compress)
|
2017-09-22 18:55:56 +03:00
|
|
|
cgparams = None
|
|
|
|
scratchbookpart = None
|
|
|
|
with bundle2.partiterator(repo, op, unbundler) as parts:
|
|
|
|
for part in parts:
|
|
|
|
bundlepart = None
|
2018-05-29 21:13:08 +03:00
|
|
|
if part.type == "replycaps":
|
2017-09-22 18:55:56 +03:00
|
|
|
# This configures the current operation to allow reply parts.
|
|
|
|
bundle2._processpart(op, part)
|
|
|
|
elif part.type == scratchbranchparttype:
|
|
|
|
# Scratch branch parts need to be converted to normal
|
|
|
|
# changegroup parts, and the extra parameters stored for later
|
|
|
|
# when we upload to the store. Eventually those parameters will
|
|
|
|
# be put on the actual bundle instead of this part, then we can
|
|
|
|
# send a vanilla changegroup instead of the scratchbranch part.
|
2018-05-29 21:13:08 +03:00
|
|
|
cgversion = part.params.get("cgversion", "01")
|
|
|
|
bundlepart = bundle2.bundlepart("changegroup", data=part.read())
|
|
|
|
bundlepart.addparam("version", cgversion)
|
2017-09-22 18:55:56 +03:00
|
|
|
cgparams = part.params
|
|
|
|
|
|
|
|
# If we're not dumping all parts into the new bundle, we need to
|
2017-10-24 19:33:41 +03:00
|
|
|
# alert the future pushkey and phase-heads handler to skip
|
|
|
|
# the part.
|
2017-09-22 18:55:56 +03:00
|
|
|
if not handleallparts:
|
2018-05-29 21:13:08 +03:00
|
|
|
op.records.add(scratchbranchparttype + "_skippushkey", True)
|
|
|
|
op.records.add(scratchbranchparttype + "_skipphaseheads", True)
|
2017-09-22 18:55:56 +03:00
|
|
|
elif part.type == scratchbookmarksparttype:
|
|
|
|
# Save this for later processing. Details below.
|
2017-12-02 00:23:35 +03:00
|
|
|
#
|
|
|
|
# Upstream https://phab.mercurial-scm.org/D1389 and its
|
|
|
|
# follow-ups stop part.seek support to reduce memory usage
|
|
|
|
# (https://bz.mercurial-scm.org/5691). So we need to copy
|
|
|
|
# the part so it can be consumed later.
|
2017-12-03 05:55:27 +03:00
|
|
|
scratchbookpart = copiedpart(part)
|
2017-09-22 18:55:56 +03:00
|
|
|
else:
|
2017-09-22 18:55:56 +03:00
|
|
|
if handleallparts or part.type in partforwardingwhitelist:
|
2017-09-22 18:55:56 +03:00
|
|
|
# Ideally we would not process any parts, and instead just
|
|
|
|
# forward them to the bundle for storage, but since this
|
|
|
|
# differs from previous behavior, we need to put it behind a
|
|
|
|
# config flag for incremental rollout.
|
|
|
|
bundlepart = bundle2.bundlepart(part.type, data=part.read())
|
|
|
|
for key, value in part.params.iteritems():
|
|
|
|
bundlepart.addparam(key, value)
|
|
|
|
|
|
|
|
# Certain parts require a response
|
2018-05-29 21:13:08 +03:00
|
|
|
if part.type == "pushkey":
|
2017-09-22 18:55:56 +03:00
|
|
|
if op.reply is not None:
|
2018-05-29 21:13:08 +03:00
|
|
|
rpart = op.reply.newpart("reply:pushkey")
|
|
|
|
rpart.addparam("in-reply-to", str(part.id), mandatory=False)
|
|
|
|
rpart.addparam("return", "1", mandatory=False)
|
2017-09-22 18:55:56 +03:00
|
|
|
else:
|
|
|
|
bundle2._processpart(op, part)
|
|
|
|
|
|
|
|
if handleallparts:
|
2018-05-29 21:13:08 +03:00
|
|
|
op.records.add(part.type, {"return": 1})
|
2017-09-22 18:55:56 +03:00
|
|
|
if bundlepart:
|
|
|
|
bundler.addpart(bundlepart)
|
|
|
|
|
|
|
|
# If commits were sent, store them
|
|
|
|
if cgparams:
|
|
|
|
buf = util.chunkbuffer(bundler.getchunks())
|
|
|
|
fd, bundlefile = tempfile.mkstemp()
|
|
|
|
try:
|
|
|
|
try:
|
2018-05-29 21:13:08 +03:00
|
|
|
fp = os.fdopen(fd, "wb")
|
2017-09-22 18:55:56 +03:00
|
|
|
fp.write(buf.read())
|
|
|
|
finally:
|
|
|
|
fp.close()
|
|
|
|
storebundle(op, cgparams, bundlefile)
|
|
|
|
finally:
|
|
|
|
try:
|
|
|
|
os.unlink(bundlefile)
|
|
|
|
except Exception:
|
|
|
|
# we would rather see the original exception
|
|
|
|
pass
|
|
|
|
|
|
|
|
# The scratch bookmark part is sent as part of a push backup. It needs to be
|
|
|
|
# processed after the main bundle has been stored, so that any commits it
|
|
|
|
# references are available in the store.
|
|
|
|
if scratchbookpart:
|
2017-12-02 00:23:35 +03:00
|
|
|
bundle2._processpart(op, scratchbookpart)
|
2017-09-22 18:55:56 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2017-09-22 18:55:56 +03:00
|
|
|
def storebundle(op, params, bundlefile):
|
2016-12-13 21:44:40 +03:00
|
|
|
log = _getorcreateinfinitepushlogger(op)
|
|
|
|
parthandlerstart = time.time()
|
2018-05-29 21:13:08 +03:00
|
|
|
log(scratchbranchparttype, eventtype="start")
|
2016-08-26 16:09:01 +03:00
|
|
|
index = op.repo.bundlestore.index
|
|
|
|
store = op.repo.bundlestore.store
|
2018-05-29 21:13:08 +03:00
|
|
|
op.records.add(scratchbranchparttype + "_skippushkey", True)
|
2016-08-26 16:09:01 +03:00
|
|
|
|
2016-12-14 23:26:30 +03:00
|
|
|
bundle = None
|
2017-09-22 18:55:56 +03:00
|
|
|
try: # guards bundle
|
2016-08-26 16:09:01 +03:00
|
|
|
bundlepath = "bundle:%s+%s" % (op.repo.root, bundlefile)
|
|
|
|
bundle = repository(op.repo.ui, bundlepath)
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
bookmark = params.get("bookmark")
|
|
|
|
create = params.get("create")
|
|
|
|
force = params.get("force")
|
2016-08-26 16:09:01 +03:00
|
|
|
|
2016-09-28 23:17:25 +03:00
|
|
|
if bookmark:
|
|
|
|
oldnode = index.getnode(bookmark)
|
2016-08-26 16:09:01 +03:00
|
|
|
|
2016-10-18 17:43:52 +03:00
|
|
|
if not oldnode and not create:
|
2018-05-29 21:13:08 +03:00
|
|
|
raise error.Abort(
|
|
|
|
"unknown bookmark %s" % bookmark,
|
|
|
|
hint="use --create if you want to create one",
|
|
|
|
)
|
2016-09-28 23:17:25 +03:00
|
|
|
else:
|
|
|
|
oldnode = None
|
2018-05-29 21:13:08 +03:00
|
|
|
bundleheads = bundle.revs("heads(bundle())")
|
2016-11-22 15:10:08 +03:00
|
|
|
if bookmark and len(bundleheads) > 1:
|
2018-05-29 21:13:08 +03:00
|
|
|
raise error.Abort(_("cannot push more than one head to a scratch branch"))
|
2016-11-22 15:10:08 +03:00
|
|
|
|
2016-11-21 11:48:09 +03:00
|
|
|
revs = _getrevs(bundle, oldnode, force, bookmark)
|
2016-08-26 16:09:01 +03:00
|
|
|
|
|
|
|
# Notify the user of what is being pushed
|
2018-05-29 21:13:08 +03:00
|
|
|
plural = "s" if len(revs) > 1 else ""
|
2016-08-26 16:09:01 +03:00
|
|
|
op.repo.ui.warn(_("pushing %s commit%s:\n") % (len(revs), plural))
|
|
|
|
maxoutput = 10
|
|
|
|
for i in range(0, min(len(revs), maxoutput)):
|
2018-05-29 21:13:08 +03:00
|
|
|
firstline = bundle[revs[i]].description().split("\n")[0][:50]
|
2016-08-26 16:09:01 +03:00
|
|
|
op.repo.ui.warn((" %s %s\n") % (revs[i], firstline))
|
|
|
|
|
|
|
|
if len(revs) > maxoutput + 1:
|
|
|
|
op.repo.ui.warn((" ...\n"))
|
2018-05-29 21:13:08 +03:00
|
|
|
firstline = bundle[revs[-1]].description().split("\n")[0][:50]
|
2016-08-26 16:09:01 +03:00
|
|
|
op.repo.ui.warn((" %s %s\n") % (revs[-1], firstline))
|
|
|
|
|
2017-03-30 12:07:30 +03:00
|
|
|
nodesctx = [bundle[rev] for rev in revs]
|
2016-11-22 15:10:08 +03:00
|
|
|
inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
|
2017-06-26 10:42:03 +03:00
|
|
|
if bundleheads:
|
|
|
|
newheadscount = sum(not inindex(rev) for rev in bundleheads)
|
|
|
|
else:
|
|
|
|
newheadscount = 0
|
2016-11-21 11:48:09 +03:00
|
|
|
# If there's a bookmark specified, there should be only one head,
|
|
|
|
# so we choose the last node, which will be that head.
|
|
|
|
# If a bug or malicious client allows there to be a bookmark
|
|
|
|
# with multiple heads, we will place the bookmark on the last head.
|
2017-03-30 12:07:30 +03:00
|
|
|
bookmarknode = nodesctx[-1].hex() if nodesctx else None
|
2016-12-11 00:37:48 +03:00
|
|
|
key = None
|
2017-06-26 10:42:03 +03:00
|
|
|
if newheadscount:
|
2018-05-29 21:13:08 +03:00
|
|
|
with open(bundlefile, "r") as f:
|
2016-12-13 21:44:40 +03:00
|
|
|
bundledata = f.read()
|
2018-05-29 21:13:08 +03:00
|
|
|
with logservicecall(log, "bundlestore", bundlesize=len(bundledata)):
|
|
|
|
bundlesizelimitmb = op.repo.ui.configint(
|
|
|
|
"infinitepush", "maxbundlesize", 100
|
|
|
|
)
|
2018-04-04 14:50:08 +03:00
|
|
|
if len(bundledata) > bundlesizelimitmb * 1024 * 1024:
|
2018-05-29 21:13:08 +03:00
|
|
|
error_msg = (
|
|
|
|
"bundle is too big: %d bytes. "
|
|
|
|
+ "max allowed size is %s MB" % bundlesizelimitmb
|
|
|
|
)
|
2017-07-20 11:23:25 +03:00
|
|
|
raise error.Abort(error_msg % (len(bundledata),))
|
2016-12-13 21:44:40 +03:00
|
|
|
key = store.write(bundledata)
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
with logservicecall(log, "index", newheadscount=newheadscount), index:
|
2017-07-17 22:02:08 +03:00
|
|
|
if key:
|
|
|
|
index.addbundle(key, nodesctx)
|
|
|
|
if bookmark:
|
|
|
|
index.addbookmark(bookmark, bookmarknode)
|
2018-05-29 21:13:08 +03:00
|
|
|
log(
|
|
|
|
scratchbranchparttype,
|
|
|
|
eventtype="success",
|
|
|
|
elapsedms=(time.time() - parthandlerstart) * 1000,
|
|
|
|
)
|
2017-06-20 15:40:01 +03:00
|
|
|
|
|
|
|
fillmetadatabranchpattern = op.repo.ui.config(
|
2018-05-29 21:13:08 +03:00
|
|
|
"infinitepush", "fillmetadatabranchpattern", ""
|
|
|
|
)
|
2017-06-20 15:40:01 +03:00
|
|
|
if bookmark and fillmetadatabranchpattern:
|
|
|
|
__, __, matcher = util.stringmatcher(fillmetadatabranchpattern)
|
|
|
|
if matcher(bookmark):
|
2018-05-29 21:13:08 +03:00
|
|
|
_asyncsavemetadata(op.repo.root, [ctx.hex() for ctx in nodesctx])
|
2016-12-21 12:41:44 +03:00
|
|
|
except Exception as e:
|
2018-05-29 21:13:08 +03:00
|
|
|
log(
|
|
|
|
scratchbranchparttype,
|
|
|
|
eventtype="failure",
|
2017-06-21 21:13:38 +03:00
|
|
|
elapsedms=(time.time() - parthandlerstart) * 1000,
|
2018-05-29 21:13:08 +03:00
|
|
|
errormsg=str(e),
|
|
|
|
)
|
2016-12-13 21:44:40 +03:00
|
|
|
raise
|
2016-08-26 16:09:01 +03:00
|
|
|
finally:
|
2017-09-22 18:55:56 +03:00
|
|
|
if bundle:
|
|
|
|
bundle.close()
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
|
|
|
@bundle2.b2streamparamhandler("infinitepush")
|
2017-12-05 20:15:50 +03:00
|
|
|
def processinfinitepush(unbundler, param, value):
|
|
|
|
""" process the bundle2 stream level parameter containing whether this push
|
|
|
|
is an infinitepush or not. """
|
2018-05-29 21:13:08 +03:00
|
|
|
if value and unbundler.ui.configbool("infinitepush", "bundle-stream", False):
|
2017-12-05 20:15:50 +03:00
|
|
|
pass
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
|
|
|
@bundle2.parthandler(
|
|
|
|
scratchbranchparttype, ("bookmark", "create", "force", "cgversion")
|
|
|
|
)
|
2017-09-22 18:55:56 +03:00
|
|
|
def bundle2scratchbranch(op, part):
|
2018-05-29 21:13:08 +03:00
|
|
|
"""unbundle a bundle2 part containing a changegroup to store"""
|
2017-09-22 18:55:56 +03:00
|
|
|
|
|
|
|
bundler = bundle2.bundle20(op.repo.ui)
|
2018-05-29 21:13:08 +03:00
|
|
|
cgversion = part.params.get("cgversion", "01")
|
|
|
|
cgpart = bundle2.bundlepart("changegroup", data=part.read())
|
|
|
|
cgpart.addparam("version", cgversion)
|
2017-09-22 18:55:56 +03:00
|
|
|
bundler.addpart(cgpart)
|
|
|
|
buf = util.chunkbuffer(bundler.getchunks())
|
|
|
|
|
|
|
|
fd, bundlefile = tempfile.mkstemp()
|
|
|
|
try:
|
2016-08-26 16:09:01 +03:00
|
|
|
try:
|
2018-05-29 21:13:08 +03:00
|
|
|
fp = os.fdopen(fd, "wb")
|
2017-09-22 18:55:56 +03:00
|
|
|
fp.write(buf.read())
|
|
|
|
finally:
|
|
|
|
fp.close()
|
|
|
|
storebundle(op, part.params, bundlefile)
|
|
|
|
finally:
|
|
|
|
try:
|
|
|
|
os.unlink(bundlefile)
|
2016-08-26 16:09:01 +03:00
|
|
|
except OSError as e:
|
|
|
|
if e.errno != errno.ENOENT:
|
|
|
|
raise
|
|
|
|
|
|
|
|
return 1
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2017-01-09 12:35:05 +03:00
|
|
|
@bundle2.parthandler(scratchbookmarksparttype)
|
2016-12-01 19:44:57 +03:00
|
|
|
def bundle2scratchbookmarks(op, part):
|
2018-05-29 21:13:08 +03:00
|
|
|
"""Handler deletes bookmarks first then adds new bookmarks.
|
|
|
|
"""
|
2016-12-01 19:44:57 +03:00
|
|
|
index = op.repo.bundlestore.index
|
|
|
|
decodedbookmarks = _decodebookmarks(part)
|
|
|
|
toinsert = {}
|
|
|
|
todelete = []
|
|
|
|
for bookmark, node in decodedbookmarks.iteritems():
|
|
|
|
if node:
|
|
|
|
toinsert[bookmark] = node
|
|
|
|
else:
|
|
|
|
todelete.append(bookmark)
|
2016-12-13 21:44:40 +03:00
|
|
|
log = _getorcreateinfinitepushlogger(op)
|
2017-07-17 22:02:08 +03:00
|
|
|
with logservicecall(log, scratchbookmarksparttype), index:
|
|
|
|
if todelete:
|
|
|
|
index.deletebookmarks(todelete)
|
|
|
|
if toinsert:
|
|
|
|
index.addmanybookmarks(toinsert)
|
2016-12-01 19:44:57 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2016-08-26 16:09:01 +03:00
|
|
|
def bundle2pushkey(orig, op, part):
|
2018-05-29 21:13:08 +03:00
|
|
|
"""Wrapper of bundle2.handlepushkey()
|
2017-10-24 19:33:41 +03:00
|
|
|
|
|
|
|
The only goal is to skip calling the original function if flag is set.
|
|
|
|
It's set if infinitepush push is happening.
|
2018-05-29 21:13:08 +03:00
|
|
|
"""
|
|
|
|
if op.records[scratchbranchparttype + "_skippushkey"]:
|
2016-08-26 16:09:01 +03:00
|
|
|
if op.reply is not None:
|
2018-05-29 21:13:08 +03:00
|
|
|
rpart = op.reply.newpart("reply:pushkey")
|
|
|
|
rpart.addparam("in-reply-to", str(part.id), mandatory=False)
|
|
|
|
rpart.addparam("return", "1", mandatory=False)
|
2016-08-26 16:09:01 +03:00
|
|
|
return 1
|
|
|
|
|
|
|
|
return orig(op, part)
|
2017-06-20 15:40:01 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2017-10-24 19:33:41 +03:00
|
|
|
def bundle2handlephases(orig, op, part):
|
2018-05-29 21:13:08 +03:00
|
|
|
"""Wrapper of bundle2.handlephases()
|
2017-10-24 19:33:41 +03:00
|
|
|
|
|
|
|
The only goal is to skip calling the original function if flag is set.
|
|
|
|
It's set if infinitepush push is happening.
|
2018-05-29 21:13:08 +03:00
|
|
|
"""
|
2017-10-24 19:33:41 +03:00
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
if op.records[scratchbranchparttype + "_skipphaseheads"]:
|
2017-10-24 19:33:41 +03:00
|
|
|
return
|
|
|
|
|
|
|
|
return orig(op, part)
|
|
|
|
|
2018-05-29 21:13:08 +03:00
|
|
|
|
2017-06-20 15:40:01 +03:00
|
|
|
def _asyncsavemetadata(root, nodes):
|
2018-05-29 21:13:08 +03:00
|
|
|
"""starts a separate process that fills metadata for the nodes
|
2017-06-20 15:40:01 +03:00
|
|
|
|
|
|
|
This function creates a separate process and doesn't wait for it's
|
|
|
|
completion. This was done to avoid slowing down pushes
|
2018-05-29 21:13:08 +03:00
|
|
|
"""
|
2017-06-20 15:40:01 +03:00
|
|
|
|
|
|
|
maxnodes = 50
|
|
|
|
if len(nodes) > maxnodes:
|
|
|
|
return
|
|
|
|
nodesargs = []
|
|
|
|
for node in nodes:
|
2018-05-29 21:13:08 +03:00
|
|
|
nodesargs.append("--node")
|
2017-06-20 15:40:01 +03:00
|
|
|
nodesargs.append(node)
|
2018-05-29 21:13:08 +03:00
|
|
|
with open(os.devnull, "w+b") as devnull:
|
|
|
|
cmdline = [
|
|
|
|
util.hgexecutable(),
|
|
|
|
"debugfillinfinitepushmetadata",
|
|
|
|
"-R",
|
|
|
|
root,
|
|
|
|
] + nodesargs
|
2017-06-20 15:40:01 +03:00
|
|
|
# Process will run in background. We don't care about the return code
|
2018-05-29 21:13:08 +03:00
|
|
|
subprocess.Popen(
|
|
|
|
cmdline,
|
|
|
|
close_fds=True,
|
|
|
|
shell=False,
|
|
|
|
stdin=devnull,
|
|
|
|
stdout=devnull,
|
|
|
|
stderr=devnull,
|
|
|
|
)
|
2018-06-05 14:24:00 +03:00
|
|
|
|
|
|
|
|
|
|
|
def _deltaparent(orig, self, revlog, rev, p1, p2, prev):
|
|
|
|
# This version of deltaparent prefers p1 over prev to use less space
|
|
|
|
dp = revlog.deltaparent(rev)
|
|
|
|
if dp == nodemod.nullrev and not revlog.storedeltachains:
|
|
|
|
# send full snapshot only if revlog configured to do so
|
|
|
|
return nodemod.nullrev
|
|
|
|
return p1
|
|
|
|
|
|
|
|
|
|
|
|
def _createbundler(ui, repo, other):
|
|
|
|
bundler = bundle2.bundle20(ui, bundle2.bundle2caps(other))
|
|
|
|
compress = ui.config("infinitepush", "bundlecompression", "UN")
|
|
|
|
bundler.setcompression(compress)
|
|
|
|
# Disallow pushback because we want to avoid taking repo locks.
|
|
|
|
# And we don't need pushback anyway
|
|
|
|
capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, allowpushback=False))
|
|
|
|
bundler.newpart("replycaps", data=capsblob)
|
|
|
|
return bundler
|
|
|
|
|
|
|
|
|
|
|
|
def _sendbundle(bundler, other):
|
|
|
|
stream = util.chunkbuffer(bundler.getchunks())
|
|
|
|
try:
|
|
|
|
reply = other.unbundle(stream, ["force"], other.url())
|
|
|
|
# Look for an error part in the response. Note that we don't apply
|
|
|
|
# the reply bundle, as we're not expecting any response, except maybe
|
|
|
|
# an error. If we receive any extra parts, that is an error.
|
|
|
|
for part in reply.iterparts():
|
|
|
|
if part.type == "error:abort":
|
|
|
|
raise bundle2.AbortFromPart(
|
|
|
|
part.params["message"], hint=part.params.get("hint")
|
|
|
|
)
|
|
|
|
elif part.type == "reply:changegroup":
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
raise error.Abort(_("unexpected part in reply: %s") % part.type)
|
|
|
|
except error.BundleValueError as exc:
|
|
|
|
raise error.Abort(_("missing support for %s") % exc)
|
|
|
|
|
|
|
|
|
|
|
|
def pushbackupbundle(ui, repo, other, outgoing, bookmarks):
|
|
|
|
"""
|
|
|
|
push a backup bundle to the server
|
|
|
|
|
|
|
|
Pushes an infinitepush bundle containing the commits described in `outgoing`
|
|
|
|
and the bookmarks described in `bookmarks` to the `other` server.
|
|
|
|
"""
|
|
|
|
# Wrap deltaparent function to make sure that bundle takes less space
|
|
|
|
# See _deltaparent comments for details
|
|
|
|
extensions.wrapfunction(changegroup.cg2packer, "deltaparent", _deltaparent)
|
|
|
|
try:
|
|
|
|
bundler = _createbundler(ui, repo, other)
|
|
|
|
bundler.addparam("infinitepush", "True")
|
2019-01-25 15:40:30 +03:00
|
|
|
pushvarspart = bundler.newpart("pushvars")
|
|
|
|
pushvarspart.addparam("BYPASS_READONLY", "True", mandatory=False)
|
|
|
|
|
2018-06-05 14:24:00 +03:00
|
|
|
backup = False
|
2019-02-13 19:17:27 +03:00
|
|
|
|
|
|
|
if outgoing and not outgoing.missing and not bookmarks:
|
|
|
|
ui.status(_("nothing to back up\n"))
|
|
|
|
return True
|
|
|
|
|
2018-06-05 14:24:00 +03:00
|
|
|
if outgoing and outgoing.missing:
|
|
|
|
backup = True
|
|
|
|
parts = bundleparts.getscratchbranchparts(
|
|
|
|
repo,
|
|
|
|
other,
|
|
|
|
outgoing,
|
|
|
|
confignonforwardmove=False,
|
|
|
|
ui=ui,
|
|
|
|
bookmark=None,
|
|
|
|
create=False,
|
|
|
|
)
|
|
|
|
for part in parts:
|
|
|
|
bundler.addpart(part)
|
|
|
|
|
|
|
|
if bookmarks:
|
|
|
|
backup = True
|
|
|
|
bundler.addpart(bundleparts.getscratchbookmarkspart(other, bookmarks))
|
|
|
|
|
|
|
|
if backup:
|
|
|
|
_sendbundle(bundler, other)
|
|
|
|
return backup
|
|
|
|
finally:
|
|
|
|
extensions.unwrapfunction(changegroup.cg2packer, "deltaparent", _deltaparent)
|
2018-06-05 14:24:08 +03:00
|
|
|
|
|
|
|
|
2018-06-05 14:24:19 +03:00
|
|
|
def pushbackupbundlewithdiscovery(ui, repo, other, heads, bookmarks):
|
|
|
|
|
|
|
|
if heads:
|
|
|
|
with ui.configoverride({("remotenames", "fastheaddiscovery"): False}):
|
|
|
|
outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=heads)
|
|
|
|
else:
|
|
|
|
outgoing = None
|
|
|
|
|
|
|
|
return pushbackupbundle(ui, repo, other, outgoing, bookmarks)
|
|
|
|
|
|
|
|
|
2019-02-19 19:45:20 +03:00
|
|
|
def isbackedupnodes(getconnection, nodes):
|
2019-02-19 16:46:29 +03:00
|
|
|
"""
|
2019-02-19 19:45:20 +03:00
|
|
|
check on the server side if the nodes are backed up using 'lookup'
|
|
|
|
|
|
|
|
TODO: deprecate this after supporting 'known' in infinitepush.
|
2019-02-19 16:46:29 +03:00
|
|
|
"""
|
2019-02-19 19:45:20 +03:00
|
|
|
|
|
|
|
def isbackedup(node):
|
|
|
|
try:
|
|
|
|
with getconnection() as conn:
|
|
|
|
conn.peer.lookup(node)
|
|
|
|
return True
|
|
|
|
except error.RepoError:
|
|
|
|
return False
|
|
|
|
|
|
|
|
return [isbackedup(node) for node in nodes]
|
|
|
|
|
|
|
|
|
|
|
|
def isbackedupnodes2(getconnection, nodes):
|
|
|
|
"""
|
|
|
|
check on the server side if the nodes are backed up using 'known'
|
|
|
|
|
|
|
|
TODO: support 'known' in infinitepush.
|
|
|
|
"""
|
|
|
|
with getconnection() as conn:
|
|
|
|
return conn.peer.known([nodemod.bin(n) for n in nodes])
|
2019-02-19 16:46:29 +03:00
|
|
|
|
|
|
|
|
2018-06-05 14:24:19 +03:00
|
|
|
def pushbackupbundledraftheads(ui, repo, getconnection, heads):
|
2018-06-05 14:24:08 +03:00
|
|
|
"""
|
|
|
|
push a backup bundle containing draft heads to the server
|
|
|
|
|
|
|
|
Pushes an infinitepush bundle containing the commits that are draft
|
2018-06-05 14:24:19 +03:00
|
|
|
ancestors of `heads`, to the `other` server.
|
2018-06-05 14:24:08 +03:00
|
|
|
"""
|
|
|
|
if heads:
|
|
|
|
# Calculate the commits to back-up. The bundle needs to cleanly
|
|
|
|
# apply to the server, so we need to include the whole draft stack.
|
|
|
|
commitstobackup = [ctx.node() for ctx in repo.set("draft() & ::%ln", heads)]
|
|
|
|
|
|
|
|
# Calculate the parent commits of the commits we are backing up.
|
|
|
|
# These are the public commits that should be on the server.
|
|
|
|
parentcommits = [
|
|
|
|
ctx.node() for ctx in repo.set("parents(roots(%ln))", commitstobackup)
|
|
|
|
]
|
|
|
|
|
|
|
|
# Build a discovery object encapsulating the commits to backup.
|
|
|
|
# Skip the actual discovery process, as we know exactly which
|
|
|
|
# commits are missing. For the common commits, include all the
|
|
|
|
# parents of the commits we are sending. In the unlikely event that
|
|
|
|
# the server is missing public commits, we will try again with
|
|
|
|
# discovery enabled.
|
|
|
|
og = discovery.outgoing(repo, commonheads=parentcommits, missingheads=heads)
|
|
|
|
og._missing = commitstobackup
|
|
|
|
og._common = parentcommits
|
|
|
|
else:
|
|
|
|
og = None
|
|
|
|
|
2018-06-05 14:24:19 +03:00
|
|
|
try:
|
|
|
|
with getconnection() as conn:
|
|
|
|
return pushbackupbundle(ui, repo, conn.peer, og, None)
|
|
|
|
except Exception as e:
|
|
|
|
ui.warn(_("push failed: %s\n") % e)
|
|
|
|
ui.warn(_("retrying push with discovery\n"))
|
|
|
|
with getconnection() as conn:
|
|
|
|
return pushbackupbundlewithdiscovery(ui, repo, conn.peer, heads, None)
|
|
|
|
|
|
|
|
|
|
|
|
def pushbackupbundlestacks(ui, repo, getconnection, heads):
|
|
|
|
# Push bundles containing the commits. Initially attempt to push one
|
|
|
|
# bundle for each stack (commits that share a single root). If a stack is
|
|
|
|
# too large, or if the push fails, and the stack has multiple heads, push
|
|
|
|
# head-by-head.
|
|
|
|
roots = repo.set("roots(draft() & ::%ls)", heads)
|
|
|
|
newheads = set()
|
|
|
|
failedheads = set()
|
|
|
|
for root in roots:
|
|
|
|
ui.status(_("backing up stack rooted at %s\n") % root)
|
|
|
|
stack = [ctx.hex() for ctx in repo.set("(%n::%ls)", root.node(), heads)]
|
|
|
|
if len(stack) == 0:
|
|
|
|
continue
|
|
|
|
|
|
|
|
stackheads = [ctx.hex() for ctx in repo.set("heads(%ls)", stack)]
|
|
|
|
if len(stack) > 1000:
|
|
|
|
# This stack is too large, something must have gone wrong
|
|
|
|
ui.warn(
|
|
|
|
_("not backing up excessively large stack rooted at %s (%d commits)")
|
|
|
|
% (root, len(stack))
|
|
|
|
)
|
|
|
|
failedheads |= set(stackheads)
|
|
|
|
continue
|
|
|
|
|
|
|
|
if len(stack) < 20 and len(stackheads) > 1:
|
|
|
|
# Attempt to push the whole stack. This makes it easier on the
|
|
|
|
# server when accessing one of the head commits, as the ancestors
|
|
|
|
# will always be in the same bundle.
|
|
|
|
try:
|
|
|
|
if pushbackupbundledraftheads(
|
|
|
|
ui, repo, getconnection, [nodemod.bin(h) for h in stackheads]
|
|
|
|
):
|
|
|
|
newheads |= set(stackheads)
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
ui.warn(_("failed to push stack bundle rooted at %s\n") % root)
|
|
|
|
except Exception as e:
|
|
|
|
ui.warn(_("push of stack %s failed: %s\n") % (root, e))
|
|
|
|
ui.warn(_("retrying each head individually\n"))
|
|
|
|
|
|
|
|
# The stack only has one head, is large, or pushing the whole stack
|
|
|
|
# failed, push each head in turn.
|
|
|
|
for head in stackheads:
|
|
|
|
try:
|
|
|
|
if pushbackupbundledraftheads(
|
|
|
|
ui, repo, getconnection, [nodemod.bin(head)]
|
|
|
|
):
|
|
|
|
newheads.add(head)
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
ui.warn(
|
|
|
|
_("failed to push stack bundle with head %s\n")
|
|
|
|
% nodemod.short(nodemod.bin(head))
|
|
|
|
)
|
|
|
|
except Exception as e:
|
|
|
|
ui.warn(
|
|
|
|
_("push of head %s failed: %s\n")
|
|
|
|
% (nodemod.short(nodemod.bin(head)), e)
|
|
|
|
)
|
|
|
|
failedheads.add(head)
|
|
|
|
|
|
|
|
return newheads, failedheads
|