2017-01-06 15:15:16 +03:00
|
|
|
# Copyright 2017 Facebook, Inc.
|
|
|
|
#
|
|
|
|
# This software may be used and distributed according to the terms of the
|
|
|
|
# GNU General Public License version 2 or any later version.
|
2017-03-13 11:32:32 +03:00
|
|
|
"""
|
|
|
|
[infinitepushbackup]
|
2017-03-14 12:13:16 +03:00
|
|
|
# path to the directory where pushback logs should be stored
|
|
|
|
logdir = path/to/dir
|
|
|
|
|
|
|
|
# max number of logs for one repo for one user
|
|
|
|
maxlognumber = 5
|
|
|
|
|
2017-03-13 11:32:32 +03:00
|
|
|
# There can be at most one backup process per repo. This config options
|
|
|
|
# determines how much time to wait on the lock. If timeout happens then
|
|
|
|
# backups process aborts.
|
|
|
|
waittimeout = 30
|
2017-03-16 12:22:39 +03:00
|
|
|
|
|
|
|
# Backup at most maxheadstobackup heads, other heads are ignored.
|
|
|
|
# Negative number means backup everything.
|
|
|
|
maxheadstobackup = -1
|
2017-03-13 11:32:32 +03:00
|
|
|
"""
|
2017-01-06 15:15:16 +03:00
|
|
|
|
|
|
|
from __future__ import absolute_import
|
2017-03-13 11:32:32 +03:00
|
|
|
import errno
|
2017-03-16 12:22:39 +03:00
|
|
|
import json
|
2017-01-06 15:15:16 +03:00
|
|
|
import os
|
|
|
|
import re
|
|
|
|
import socket
|
2017-03-14 12:13:16 +03:00
|
|
|
import time
|
2017-01-06 15:15:16 +03:00
|
|
|
|
|
|
|
from .bundleparts import (
|
|
|
|
getscratchbookmarkspart,
|
|
|
|
getscratchbranchpart,
|
|
|
|
)
|
|
|
|
from mercurial import (
|
|
|
|
bundle2,
|
2017-01-09 12:42:02 +03:00
|
|
|
changegroup,
|
2017-01-06 15:15:16 +03:00
|
|
|
cmdutil,
|
|
|
|
commands,
|
|
|
|
discovery,
|
|
|
|
encoding,
|
|
|
|
error,
|
|
|
|
hg,
|
2017-03-13 11:32:32 +03:00
|
|
|
lock as lockmod,
|
2017-03-14 12:13:16 +03:00
|
|
|
osutil,
|
2017-03-16 12:22:39 +03:00
|
|
|
phases,
|
2017-01-06 15:15:16 +03:00
|
|
|
util,
|
|
|
|
)
|
|
|
|
|
2017-03-16 12:22:39 +03:00
|
|
|
from collections import defaultdict, namedtuple
|
2017-01-06 15:15:16 +03:00
|
|
|
from hgext3rd.extutil import runshellcommand
|
2017-01-09 12:42:02 +03:00
|
|
|
from mercurial.extensions import wrapfunction, unwrapfunction
|
|
|
|
from mercurial.node import bin, hex, nullrev
|
2017-01-06 15:15:16 +03:00
|
|
|
from mercurial.i18n import _
|
|
|
|
|
|
|
|
cmdtable = {}
|
|
|
|
command = cmdutil.command(cmdtable)
|
|
|
|
|
|
|
|
backupbookmarktuple = namedtuple('backupbookmarktuple',
|
|
|
|
['hostname', 'reporoot', 'localbookmark'])
|
|
|
|
|
2017-03-16 12:22:39 +03:00
|
|
|
class backupstate(object):
|
|
|
|
def __init__(self):
|
|
|
|
self.heads = set()
|
|
|
|
self.localbookmarks = {}
|
|
|
|
|
|
|
|
def empty(self):
|
|
|
|
return not self.heads and not self.localbookmarks
|
|
|
|
|
2017-03-07 12:21:44 +03:00
|
|
|
restoreoptions = [
|
|
|
|
('', 'reporoot', '', 'root of the repo to restore'),
|
|
|
|
('', 'user', '', 'user who ran the backup'),
|
|
|
|
('', 'hostname', '', 'hostname of the repo to restore'),
|
|
|
|
]
|
|
|
|
|
2017-03-13 11:35:30 +03:00
|
|
|
_backuplockname = 'infinitepushbackup.lock'
|
|
|
|
|
2017-01-06 15:15:16 +03:00
|
|
|
@command('pushbackup',
|
|
|
|
[('', 'background', None, 'run backup in background')])
|
|
|
|
def backup(ui, repo, dest=None, **opts):
|
|
|
|
"""
|
|
|
|
Pushes commits, bookmarks and heads to infinitepush.
|
|
|
|
New non-extinct commits are saved since the last `hg pushbackup`
|
|
|
|
or since 0 revision if this backup is the first.
|
|
|
|
Local bookmarks are saved remotely as:
|
|
|
|
infinitepush/backups/USERNAME/HOST/REPOROOT/bookmarks/LOCAL_BOOKMARK
|
|
|
|
Local heads are saved remotely as:
|
|
|
|
infinitepush/backups/USERNAME/HOST/REPOROOT/heads/HEAD_HASH
|
|
|
|
"""
|
|
|
|
|
|
|
|
if opts.get('background'):
|
|
|
|
background_cmd = ['hg', 'pushbackup']
|
|
|
|
if dest:
|
|
|
|
background_cmd.append(dest)
|
2017-03-14 12:13:16 +03:00
|
|
|
logdir = ui.config('infinitepushbackup', 'logdir')
|
|
|
|
if logdir:
|
|
|
|
try:
|
|
|
|
try:
|
|
|
|
username = ui.shortuser(ui.username())
|
|
|
|
except Exception:
|
|
|
|
username = 'unknown'
|
|
|
|
userlogdir = os.path.join(logdir, username)
|
|
|
|
util.makedirs(userlogdir)
|
|
|
|
reporoot = repo.origroot
|
|
|
|
reponame = os.path.basename(reporoot)
|
|
|
|
|
|
|
|
maxlogfilenumber = ui.configint('infinitepushbackup',
|
|
|
|
'maxlognumber', 5)
|
|
|
|
_removeoldlogfiles(userlogdir, reponame, maxlogfilenumber)
|
|
|
|
logfile = _getlogfilename(logdir, username, reponame)
|
|
|
|
background_cmd.extend(('>>', logfile, '2>&1'))
|
|
|
|
except (OSError, IOError) as e:
|
|
|
|
ui.warn(_('infinitepush backup log is disabled: %s\n') % e)
|
2017-01-06 15:15:16 +03:00
|
|
|
runshellcommand(' '.join(background_cmd), os.environ)
|
|
|
|
return 0
|
|
|
|
|
2017-01-09 12:42:02 +03:00
|
|
|
try:
|
2017-03-13 11:32:32 +03:00
|
|
|
timeout = ui.configint('infinitepushbackup', 'waittimeout', 30)
|
2017-03-13 11:35:30 +03:00
|
|
|
with lockmod.lock(repo.vfs, _backuplockname, timeout=timeout):
|
2017-03-13 11:32:32 +03:00
|
|
|
return _dobackup(ui, repo, dest, **opts)
|
|
|
|
except error.LockHeld as e:
|
|
|
|
if e.errno == errno.ETIMEDOUT:
|
|
|
|
ui.warn(_('timeout waiting on backup lock'))
|
|
|
|
return 0
|
2017-01-09 12:42:02 +03:00
|
|
|
else:
|
2017-03-13 11:32:32 +03:00
|
|
|
raise
|
2017-01-06 15:15:16 +03:00
|
|
|
|
2017-03-07 12:21:44 +03:00
|
|
|
@command('pullbackup', restoreoptions)
|
2017-01-06 15:15:16 +03:00
|
|
|
def restore(ui, repo, dest=None, **opts):
|
|
|
|
"""
|
|
|
|
Pulls commits from infinitepush that were previously saved with
|
|
|
|
`hg pushbackup`.
|
|
|
|
If user has only one backup for the `dest` repo then it will be restored.
|
|
|
|
But user may have backed up many local repos that points to `dest` repo.
|
|
|
|
These local repos may reside on different hosts or in different
|
|
|
|
repo roots. It makes restore ambiguous; `--reporoot` and `--hostname`
|
|
|
|
options are used to disambiguate.
|
|
|
|
"""
|
|
|
|
|
|
|
|
other = _getremote(repo, ui, dest, **opts)
|
|
|
|
|
|
|
|
sourcereporoot = opts.get('reporoot')
|
|
|
|
sourcehostname = opts.get('hostname')
|
2017-03-06 11:44:18 +03:00
|
|
|
username = opts.get('user') or ui.shortuser(ui.username())
|
2017-01-06 15:15:16 +03:00
|
|
|
|
2017-03-16 12:22:39 +03:00
|
|
|
allbackupstates = _downloadbackupstate(ui, other, sourcereporoot,
|
|
|
|
sourcehostname, username)
|
|
|
|
if len(allbackupstates) == 0:
|
|
|
|
ui.warn(_('no backups found!'))
|
|
|
|
return 1
|
|
|
|
_checkbackupstates(allbackupstates)
|
2017-03-07 12:21:44 +03:00
|
|
|
|
2017-03-16 12:22:39 +03:00
|
|
|
__, backupstate = allbackupstates.popitem()
|
2017-03-07 12:21:44 +03:00
|
|
|
pullcmd, pullopts = _getcommandandoptions('^pull')
|
2017-03-16 12:22:39 +03:00
|
|
|
# pull backuped heads and nodes that are pointed by bookmarks
|
|
|
|
pullopts['rev'] = list(backupstate.heads |
|
|
|
|
set(backupstate.localbookmarks.values()))
|
2017-03-07 12:21:44 +03:00
|
|
|
if dest:
|
|
|
|
pullopts['source'] = dest
|
|
|
|
result = pullcmd(ui, repo, **pullopts)
|
|
|
|
|
|
|
|
with repo.wlock():
|
|
|
|
with repo.lock():
|
|
|
|
with repo.transaction('bookmark') as tr:
|
2017-03-16 12:22:39 +03:00
|
|
|
for book, hexnode in backupstate.localbookmarks.iteritems():
|
2017-03-16 12:22:39 +03:00
|
|
|
if hexnode in repo:
|
|
|
|
repo._bookmarks[book] = bin(hexnode)
|
|
|
|
else:
|
|
|
|
ui.warn(_('%s not found, not creating %s bookmark') %
|
|
|
|
(hexnode, book))
|
2017-03-07 12:21:44 +03:00
|
|
|
repo._bookmarks.recordchange(tr)
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
2017-03-07 12:21:44 +03:00
|
|
|
@command('debugcheckbackup', restoreoptions)
|
|
|
|
def checkbackup(ui, repo, dest=None, **opts):
|
|
|
|
"""
|
|
|
|
Checks that all the nodes that backup needs are available in bundlestore
|
|
|
|
"""
|
|
|
|
other = _getremote(repo, ui, dest, **opts)
|
|
|
|
|
|
|
|
sourcereporoot = opts.get('reporoot')
|
|
|
|
sourcehostname = opts.get('hostname')
|
|
|
|
username = opts.get('user') or ui.shortuser(ui.username())
|
|
|
|
|
2017-03-16 12:22:39 +03:00
|
|
|
allbackupstates = _downloadbackupstate(ui, other, sourcereporoot,
|
|
|
|
sourcehostname, username)
|
|
|
|
|
|
|
|
_checkbackupstates(allbackupstates)
|
|
|
|
__, bkpstate = allbackupstates.popitem()
|
2017-03-07 12:21:44 +03:00
|
|
|
batch = other.iterbatch()
|
2017-03-16 12:22:39 +03:00
|
|
|
for hexnode in list(bkpstate.heads) + bkpstate.localbookmarks.values():
|
2017-03-07 12:21:44 +03:00
|
|
|
batch.lookup(hexnode)
|
|
|
|
batch.submit()
|
|
|
|
lookupresults = batch.results()
|
|
|
|
for r in lookupresults:
|
|
|
|
# iterate over results to make it throw if revision was not found
|
|
|
|
pass
|
|
|
|
|
2017-03-13 11:35:30 +03:00
|
|
|
@command('debugwaitbackup', [('', 'timeout', '', 'timeout value')])
|
|
|
|
def waitbackup(ui, repo, timeout):
|
|
|
|
try:
|
|
|
|
if timeout:
|
|
|
|
timeout = int(timeout)
|
|
|
|
else:
|
|
|
|
timeout = -1
|
|
|
|
except ValueError:
|
|
|
|
raise error.Abort('timeout should be integer')
|
|
|
|
|
|
|
|
try:
|
|
|
|
with lockmod.lock(repo.vfs, _backuplockname, timeout=timeout):
|
|
|
|
pass
|
|
|
|
except error.LockHeld as e:
|
|
|
|
if e.errno == errno.ETIMEDOUT:
|
|
|
|
raise error.Abort(_('timeout while waiting for backup'))
|
|
|
|
raise
|
|
|
|
|
2017-03-13 11:32:32 +03:00
|
|
|
def _dobackup(ui, repo, dest, **opts):
|
2017-03-16 12:20:02 +03:00
|
|
|
ui.status(_('starting backup %s\n') % time.strftime('%H:%M:%S %d %b %Y %Z'))
|
|
|
|
start = time.time()
|
2017-03-13 11:32:32 +03:00
|
|
|
username = ui.shortuser(ui.username())
|
2017-03-16 12:22:39 +03:00
|
|
|
bkpstate = _readlocalbackupstate(ui, repo)
|
2017-03-13 11:32:32 +03:00
|
|
|
|
2017-03-16 12:22:39 +03:00
|
|
|
maxheadstobackup = ui.configint('infinitepushbackup',
|
|
|
|
'maxheadstobackup', -1)
|
|
|
|
|
|
|
|
currentheads = [ctx.hex() for ctx in repo.set('head() & draft()')]
|
|
|
|
if maxheadstobackup > 0:
|
|
|
|
currentheads = currentheads[-maxheadstobackup:]
|
|
|
|
elif maxheadstobackup == 0:
|
|
|
|
currentheads = []
|
|
|
|
currentheads = set(currentheads)
|
2017-03-16 12:22:39 +03:00
|
|
|
newheads = currentheads - bkpstate.heads
|
|
|
|
removedheads = bkpstate.heads - currentheads
|
2017-03-13 11:32:32 +03:00
|
|
|
other = _getremote(repo, ui, dest, **opts)
|
2017-03-16 12:22:39 +03:00
|
|
|
outgoing, badhexnodes = _getrevstobackup(repo, other, newheads)
|
|
|
|
|
|
|
|
newheads = set(filter(lambda hex: hex not in badhexnodes, newheads))
|
|
|
|
currentheads = set(filter(lambda hex: hex not in badhexnodes, currentheads))
|
|
|
|
|
2017-03-16 12:22:39 +03:00
|
|
|
localbookmarks = _getlocalbookmarks(repo)
|
|
|
|
localbookmarks = _filterbookmarks(localbookmarks, repo, currentheads,
|
|
|
|
badhexnodes)
|
2017-03-16 12:22:39 +03:00
|
|
|
|
|
|
|
newbookmarks = _dictdiff(localbookmarks, bkpstate.localbookmarks)
|
|
|
|
removedbookmarks = _dictdiff(bkpstate.localbookmarks, localbookmarks)
|
|
|
|
|
|
|
|
bookmarkstobackup = _getbookmarkstobackup(
|
|
|
|
username, repo, newbookmarks, removedbookmarks,
|
|
|
|
newheads, removedheads)
|
|
|
|
|
|
|
|
# Special case if backup state is empty. Clean all backup bookmarks from the
|
|
|
|
# server.
|
|
|
|
if bkpstate.empty():
|
|
|
|
bookmarkstobackup[_getbackupheadprefix(username, repo) + '/*'] = ''
|
|
|
|
bookmarkstobackup[_getbackupbookmarkprefix(username, repo) + '/*'] = ''
|
2017-03-13 11:32:32 +03:00
|
|
|
|
|
|
|
# Wrap deltaparent function to make sure that bundle takes less space
|
|
|
|
# See _deltaparent comments for details
|
|
|
|
wrapfunction(changegroup.cg2packer, 'deltaparent', _deltaparent)
|
|
|
|
try:
|
|
|
|
bundler = _createbundler(ui, repo, other)
|
|
|
|
backup = False
|
|
|
|
if outgoing and outgoing.missing:
|
|
|
|
backup = True
|
|
|
|
bundler.addpart(getscratchbranchpart(repo, other, outgoing,
|
|
|
|
confignonforwardmove=False,
|
|
|
|
ui=ui, bookmark=None,
|
|
|
|
create=False))
|
|
|
|
|
2017-03-16 12:22:39 +03:00
|
|
|
if bookmarkstobackup:
|
2017-03-13 11:32:32 +03:00
|
|
|
backup = True
|
|
|
|
bundler.addpart(getscratchbookmarkspart(other, bookmarkstobackup))
|
|
|
|
|
|
|
|
if backup:
|
|
|
|
_sendbundle(bundler, other)
|
2017-03-16 12:22:39 +03:00
|
|
|
_writelocalbackupstate(repo.vfs, currentheads, localbookmarks)
|
2017-03-13 11:32:32 +03:00
|
|
|
else:
|
|
|
|
ui.status(_('nothing to backup\n'))
|
|
|
|
finally:
|
2017-03-16 12:20:02 +03:00
|
|
|
ui.status(_('finished in %f seconds\n') % (time.time() - start))
|
2017-03-13 11:32:32 +03:00
|
|
|
unwrapfunction(changegroup.cg2packer, 'deltaparent', _deltaparent)
|
|
|
|
return 0
|
|
|
|
|
2017-03-16 12:22:39 +03:00
|
|
|
_backupstatefile = 'infinitepushbackupstate'
|
2017-03-07 12:21:44 +03:00
|
|
|
|
|
|
|
# Common helper functions
|
|
|
|
|
2017-03-16 12:22:39 +03:00
|
|
|
def _getlocalbookmarks(repo):
|
|
|
|
localbookmarks = {}
|
|
|
|
for bookmark, node in repo._bookmarks.iteritems():
|
|
|
|
hexnode = hex(node)
|
|
|
|
localbookmarks[bookmark] = hexnode
|
|
|
|
return localbookmarks
|
|
|
|
|
|
|
|
def _filterbookmarks(localbookmarks, repo, headstobackup, badhexnodes):
|
|
|
|
'''Filters out some bookmarks from being backed up
|
|
|
|
|
|
|
|
Filters out bookmarks that point to secret commits and bookmarks that do not
|
|
|
|
point to ancestors of headstobackup or public commits
|
|
|
|
'''
|
|
|
|
|
|
|
|
headrevstobackup = [repo[hexhead].rev() for hexhead in headstobackup]
|
|
|
|
ancestors = repo.changelog.ancestors(headrevstobackup, inclusive=True)
|
|
|
|
secret = set(ctx.hex() for ctx in repo.set('secret()'))
|
|
|
|
filteredbooks = {}
|
|
|
|
for bookmark, hexnode in localbookmarks.iteritems():
|
|
|
|
if (hexnode not in secret and hexnode not in badhexnodes and
|
|
|
|
(repo[hexnode].rev() in ancestors or
|
|
|
|
repo[hexnode].phase() == phases.public)):
|
|
|
|
filteredbooks[bookmark] = hexnode
|
|
|
|
return filteredbooks
|
|
|
|
|
2017-03-16 12:22:39 +03:00
|
|
|
def _downloadbackupstate(ui, other, sourcereporoot, sourcehostname, username):
|
2017-03-06 11:44:18 +03:00
|
|
|
pattern = _getcommonuserprefix(username) + '/*'
|
2017-01-06 15:15:16 +03:00
|
|
|
fetchedbookmarks = other.listkeyspatterns('bookmarks', patterns=[pattern])
|
2017-03-16 12:22:39 +03:00
|
|
|
allbackupstates = defaultdict(backupstate)
|
2017-03-07 12:21:44 +03:00
|
|
|
for book, hexnode in fetchedbookmarks.iteritems():
|
2017-03-06 11:44:18 +03:00
|
|
|
parsed = _parsebackupbookmark(username, book)
|
2017-01-06 15:15:16 +03:00
|
|
|
if parsed:
|
|
|
|
if sourcereporoot and sourcereporoot != parsed.reporoot:
|
|
|
|
continue
|
|
|
|
if sourcehostname and sourcehostname != parsed.hostname:
|
|
|
|
continue
|
2017-03-16 12:22:39 +03:00
|
|
|
key = (parsed.hostname, parsed.reporoot)
|
2017-01-06 15:15:16 +03:00
|
|
|
if parsed.localbookmark:
|
2017-03-16 12:22:39 +03:00
|
|
|
bookname = parsed.localbookmark
|
|
|
|
allbackupstates[key].localbookmarks[bookname] = hexnode
|
|
|
|
else:
|
|
|
|
allbackupstates[key].heads.add(hexnode)
|
2017-01-06 15:15:16 +03:00
|
|
|
else:
|
|
|
|
ui.warn(_('wrong format of backup bookmark: %s') % book)
|
|
|
|
|
2017-03-16 12:22:39 +03:00
|
|
|
return allbackupstates
|
|
|
|
|
|
|
|
def _checkbackupstates(allbackupstates):
|
|
|
|
if len(allbackupstates) == 0:
|
|
|
|
raise error.Abort('no backups found!')
|
|
|
|
|
|
|
|
hostnames = set(key[0] for key in allbackupstates.iterkeys())
|
|
|
|
reporoots = set(key[1] for key in allbackupstates.iterkeys())
|
2017-03-07 12:21:44 +03:00
|
|
|
|
2017-03-06 11:56:18 +03:00
|
|
|
if len(hostnames) > 1:
|
|
|
|
raise error.Abort(
|
|
|
|
_('ambiguous hostname to restore: %s') % sorted(hostnames),
|
|
|
|
hint=_('set --hostname to disambiguate'))
|
|
|
|
|
2017-01-06 15:15:16 +03:00
|
|
|
if len(reporoots) > 1:
|
|
|
|
raise error.Abort(
|
|
|
|
_('ambiguous repo root to restore: %s') % sorted(reporoots),
|
|
|
|
hint=_('set --reporoot to disambiguate'))
|
|
|
|
|
2017-03-06 11:44:18 +03:00
|
|
|
def _getcommonuserprefix(username):
|
2017-01-06 15:15:16 +03:00
|
|
|
return '/'.join(('infinitepush', 'backups', username))
|
|
|
|
|
2017-03-06 11:44:18 +03:00
|
|
|
def _getcommonprefix(username, repo):
|
2017-01-06 15:15:16 +03:00
|
|
|
hostname = socket.gethostname()
|
|
|
|
|
2017-03-06 11:44:18 +03:00
|
|
|
result = '/'.join((_getcommonuserprefix(username), hostname))
|
2017-01-06 15:15:16 +03:00
|
|
|
if not repo.origroot.startswith('/'):
|
|
|
|
result += '/'
|
|
|
|
result += repo.origroot
|
|
|
|
if result.endswith('/'):
|
|
|
|
result = result[:-1]
|
|
|
|
return result
|
|
|
|
|
2017-03-06 11:44:18 +03:00
|
|
|
def _getbackupbookmarkprefix(username, repo):
|
|
|
|
return '/'.join((_getcommonprefix(username, repo), 'bookmarks'))
|
2017-01-06 15:15:16 +03:00
|
|
|
|
|
|
|
def _escapebookmark(bookmark):
|
|
|
|
'''
|
|
|
|
If `bookmark` contains "bookmarks" as a substring then replace it with
|
|
|
|
"bookmarksbookmarks". This will make parsing remote bookmark name
|
|
|
|
unambigious.
|
|
|
|
'''
|
|
|
|
|
|
|
|
bookmark = encoding.fromlocal(bookmark)
|
|
|
|
return bookmark.replace('bookmarks', 'bookmarksbookmarks')
|
|
|
|
|
|
|
|
def _unescapebookmark(bookmark):
|
|
|
|
bookmark = encoding.tolocal(bookmark)
|
|
|
|
return bookmark.replace('bookmarksbookmarks', 'bookmarks')
|
|
|
|
|
2017-03-06 11:44:18 +03:00
|
|
|
def _getbackupbookmarkname(username, bookmark, repo):
|
2017-01-06 15:15:16 +03:00
|
|
|
bookmark = _escapebookmark(bookmark)
|
2017-03-06 11:44:18 +03:00
|
|
|
return '/'.join((_getbackupbookmarkprefix(username, repo), bookmark))
|
2017-01-06 15:15:16 +03:00
|
|
|
|
2017-03-06 11:44:18 +03:00
|
|
|
def _getbackupheadprefix(username, repo):
|
|
|
|
return '/'.join((_getcommonprefix(username, repo), 'heads'))
|
2017-01-06 15:15:16 +03:00
|
|
|
|
2017-03-06 11:44:18 +03:00
|
|
|
def _getbackupheadname(username, hexhead, repo):
|
|
|
|
return '/'.join((_getbackupheadprefix(username, repo), hexhead))
|
2017-01-06 15:15:16 +03:00
|
|
|
|
|
|
|
def _getremote(repo, ui, dest, **opts):
|
|
|
|
path = ui.paths.getpath(dest, default=('default-push', 'default'))
|
|
|
|
if not path:
|
|
|
|
raise error.Abort(_('default repository not configured!'),
|
|
|
|
hint=_("see 'hg help config.paths'"))
|
|
|
|
dest = path.pushloc or path.loc
|
|
|
|
return hg.peer(repo, opts, dest)
|
|
|
|
|
|
|
|
def _getcommandandoptions(command):
|
2017-02-15 20:34:08 +03:00
|
|
|
cmd = commands.table[command][0]
|
|
|
|
opts = dict(opt[1:3] for opt in commands.table[command][1])
|
|
|
|
return cmd, opts
|
2017-01-06 15:15:16 +03:00
|
|
|
|
2017-01-06 15:21:23 +03:00
|
|
|
# Backup helper functions
|
|
|
|
|
2017-01-09 12:42:02 +03:00
|
|
|
def _deltaparent(orig, self, revlog, rev, p1, p2, prev):
|
|
|
|
# This version of deltaparent prefers p1 over prev to use less space
|
|
|
|
dp = revlog.deltaparent(rev)
|
|
|
|
if dp == nullrev and not revlog.storedeltachains:
|
|
|
|
# send full snapshot only if revlog configured to do so
|
|
|
|
return nullrev
|
|
|
|
return p1
|
|
|
|
|
2017-03-16 12:22:39 +03:00
|
|
|
def _getbookmarkstobackup(username, repo, newbookmarks, removedbookmarks,
|
|
|
|
newheads, removedheads):
|
2017-01-06 15:21:23 +03:00
|
|
|
bookmarkstobackup = {}
|
|
|
|
|
2017-03-16 12:22:39 +03:00
|
|
|
for bookmark, hexnode in removedbookmarks.items():
|
|
|
|
backupbookmark = _getbackupbookmarkname(username, bookmark, repo)
|
|
|
|
bookmarkstobackup[backupbookmark] = ''
|
2017-01-06 15:21:23 +03:00
|
|
|
|
2017-03-16 12:22:39 +03:00
|
|
|
for bookmark, hexnode in newbookmarks.items():
|
|
|
|
backupbookmark = _getbackupbookmarkname(username, bookmark, repo)
|
|
|
|
bookmarkstobackup[backupbookmark] = hexnode
|
|
|
|
|
|
|
|
for hexhead in removedheads:
|
|
|
|
headbookmarksname = _getbackupheadname(username, hexhead, repo)
|
|
|
|
bookmarkstobackup[headbookmarksname] = ''
|
|
|
|
|
|
|
|
for hexhead in newheads:
|
2017-03-06 11:44:18 +03:00
|
|
|
headbookmarksname = _getbackupheadname(username, hexhead, repo)
|
2017-01-06 15:21:23 +03:00
|
|
|
bookmarkstobackup[headbookmarksname] = hexhead
|
|
|
|
|
|
|
|
return bookmarkstobackup
|
|
|
|
|
|
|
|
def _createbundler(ui, repo, other):
|
|
|
|
bundler = bundle2.bundle20(ui, bundle2.bundle2caps(other))
|
|
|
|
# Disallow pushback because we want to avoid taking repo locks.
|
|
|
|
# And we don't need pushback anyway
|
|
|
|
capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo,
|
|
|
|
allowpushback=False))
|
|
|
|
bundler.newpart('replycaps', data=capsblob)
|
|
|
|
return bundler
|
|
|
|
|
|
|
|
def _sendbundle(bundler, other):
|
|
|
|
stream = util.chunkbuffer(bundler.getchunks())
|
|
|
|
try:
|
|
|
|
other.unbundle(stream, ['force'], other.url())
|
|
|
|
except error.BundleValueError as exc:
|
|
|
|
raise error.Abort(_('missing support for %s') % exc)
|
|
|
|
|
|
|
|
def findcommonoutgoing(repo, other, heads):
|
|
|
|
if heads:
|
|
|
|
nodes = map(repo.changelog.node, heads)
|
|
|
|
return discovery.findcommonoutgoing(repo, other, onlyheads=nodes)
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
2017-03-16 12:22:39 +03:00
|
|
|
def _getrevstobackup(repo, other, headstobackup):
|
|
|
|
revs = list(repo[hexnode].rev() for hexnode in headstobackup)
|
2017-01-06 15:21:23 +03:00
|
|
|
|
|
|
|
outgoing = findcommonoutgoing(repo, other, revs)
|
2017-01-09 12:40:02 +03:00
|
|
|
rootstofilter = []
|
|
|
|
if outgoing:
|
|
|
|
# In rare cases it's possible to have node without filelogs only
|
|
|
|
# locally. It is possible if remotefilelog is enabled and if node was
|
|
|
|
# stripped server-side. In this case we want to filter this
|
|
|
|
# nodes and all ancestors out
|
|
|
|
for node in outgoing.missing:
|
|
|
|
changectx = repo[node]
|
|
|
|
for file in changectx.files():
|
|
|
|
try:
|
|
|
|
changectx.filectx(file)
|
|
|
|
except error.ManifestLookupError:
|
|
|
|
rootstofilter.append(changectx.rev())
|
|
|
|
|
2017-03-16 12:22:39 +03:00
|
|
|
badhexnodes = set()
|
2017-01-09 12:40:02 +03:00
|
|
|
if rootstofilter:
|
|
|
|
revstofilter = list(repo.revs('%ld::', rootstofilter))
|
2017-03-16 12:22:39 +03:00
|
|
|
badhexnodes = set(repo[rev].hex() for rev in revstofilter)
|
2017-01-09 12:40:02 +03:00
|
|
|
revs = set(revs) - set(revstofilter)
|
|
|
|
outgoing = findcommonoutgoing(repo, other, revs)
|
2017-03-16 12:22:39 +03:00
|
|
|
|
|
|
|
return outgoing, badhexnodes
|
|
|
|
|
|
|
|
def _readlocalbackupstate(ui, repo):
|
|
|
|
if not repo.vfs.exists(_backupstatefile):
|
|
|
|
return backupstate()
|
|
|
|
|
|
|
|
errormsg = 'corrupt %s file' % _backupstatefile
|
|
|
|
with repo.vfs(_backupstatefile) as f:
|
2017-01-06 15:21:23 +03:00
|
|
|
try:
|
2017-03-16 12:22:39 +03:00
|
|
|
state = json.loads(f.read())
|
|
|
|
if 'bookmarks' not in state or 'heads' not in state:
|
|
|
|
ui.warn(_('%s\n') % errormsg)
|
|
|
|
return backupstate()
|
|
|
|
if (type(state['bookmarks']) != type({}) or
|
|
|
|
type(state['heads']) != type([])):
|
|
|
|
ui.warn(_('%s\n') % errormsg)
|
|
|
|
return backupstate()
|
|
|
|
|
|
|
|
result = backupstate()
|
|
|
|
result.heads = set(state['heads'])
|
|
|
|
result.localbookmarks = state['bookmarks']
|
|
|
|
return result
|
2017-01-06 15:21:23 +03:00
|
|
|
except ValueError:
|
2017-03-16 12:22:39 +03:00
|
|
|
ui.warn(_('%s\n') % errormsg)
|
|
|
|
return backupstate()
|
|
|
|
return backupstate()
|
2017-01-06 15:21:23 +03:00
|
|
|
|
2017-03-16 12:22:39 +03:00
|
|
|
def _writelocalbackupstate(vfs, heads, bookmarks):
|
|
|
|
with vfs(_backupstatefile, 'w') as f:
|
|
|
|
f.write(json.dumps({'heads': list(heads), 'bookmarks': bookmarks}))
|
2017-01-06 15:21:23 +03:00
|
|
|
|
|
|
|
# Restore helper functions
|
2017-03-06 11:44:18 +03:00
|
|
|
def _parsebackupbookmark(username, backupbookmark):
|
2017-01-06 15:15:16 +03:00
|
|
|
'''Parses backup bookmark and returns info about it
|
|
|
|
|
|
|
|
Backup bookmark may represent either a local bookmark or a head.
|
|
|
|
Returns None if backup bookmark has wrong format or tuple.
|
|
|
|
First entry is a hostname where this bookmark came from.
|
|
|
|
Second entry is a root of the repo where this bookmark came from.
|
|
|
|
Third entry in a tuple is local bookmark if backup bookmark
|
|
|
|
represents a local bookmark and None otherwise.
|
|
|
|
'''
|
|
|
|
|
2017-03-06 11:44:18 +03:00
|
|
|
backupbookmarkprefix = _getcommonuserprefix(username)
|
|
|
|
commonre = '^{0}/([-\w.]+)(/.*)'.format(re.escape(backupbookmarkprefix))
|
2017-01-06 15:15:16 +03:00
|
|
|
bookmarkre = commonre + '/bookmarks/(.*)$'
|
|
|
|
headsre = commonre + '/heads/[a-f0-9]{40}$'
|
|
|
|
|
|
|
|
match = re.search(bookmarkre, backupbookmark)
|
|
|
|
if not match:
|
|
|
|
match = re.search(headsre, backupbookmark)
|
|
|
|
if not match:
|
|
|
|
return None
|
|
|
|
# It's a local head not a local bookmark.
|
|
|
|
# That's why localbookmark is None
|
|
|
|
return backupbookmarktuple(hostname=match.group(1),
|
|
|
|
reporoot=match.group(2),
|
|
|
|
localbookmark=None)
|
|
|
|
|
|
|
|
return backupbookmarktuple(hostname=match.group(1),
|
|
|
|
reporoot=match.group(2),
|
|
|
|
localbookmark=_unescapebookmark(match.group(3)))
|
2017-03-14 12:13:16 +03:00
|
|
|
|
|
|
|
_timeformat = '%Y%m%d'
|
|
|
|
|
|
|
|
def _getlogfilename(logdir, username, reponame):
|
|
|
|
'''Returns name of the log file for particular user and repo
|
|
|
|
|
|
|
|
Different users have different directories inside logdir. Log filename
|
|
|
|
consists of reponame (basename of repo path) and current day
|
|
|
|
(see _timeformat). That means that two different repos with the same name
|
|
|
|
can share the same log file. This is not a big problem so we ignore it.
|
|
|
|
'''
|
|
|
|
|
|
|
|
currentday = time.strftime(_timeformat)
|
|
|
|
return os.path.join(logdir, username, reponame + currentday)
|
|
|
|
|
|
|
|
def _removeoldlogfiles(userlogdir, reponame, maxlogfilenumber):
|
|
|
|
existinglogfiles = []
|
|
|
|
for entry in osutil.listdir(userlogdir):
|
|
|
|
filename = entry[0]
|
|
|
|
fullpath = os.path.join(userlogdir, filename)
|
|
|
|
if filename.startswith(reponame) and os.path.isfile(fullpath):
|
|
|
|
try:
|
|
|
|
time.strptime(filename[len(reponame):], _timeformat)
|
|
|
|
except ValueError:
|
|
|
|
continue
|
|
|
|
existinglogfiles.append(filename)
|
|
|
|
|
|
|
|
# _timeformat gives us a property that if we sort log file names in
|
|
|
|
# descending order then newer files are going to be in the beginning
|
|
|
|
existinglogfiles = sorted(existinglogfiles, reverse=True)
|
|
|
|
if len(existinglogfiles) > maxlogfilenumber:
|
|
|
|
for filename in existinglogfiles[maxlogfilenumber:]:
|
|
|
|
os.unlink(os.path.join(userlogdir, filename))
|
2017-03-16 12:22:39 +03:00
|
|
|
|
|
|
|
def _dictdiff(first, second):
|
|
|
|
'''Returns new dict that contains items from the first dict that are missing
|
|
|
|
from the second dict.
|
|
|
|
'''
|
|
|
|
result = {}
|
|
|
|
for book, hexnode in first.items():
|
|
|
|
if second.get(book) != hexnode:
|
|
|
|
result[book] = hexnode
|
|
|
|
return result
|