2015-03-25 06:42:21 +03:00
|
|
|
"""
|
|
|
|
remotenames: a mercurial extension for improving client/server workflows
|
|
|
|
|
|
|
|
The remotenames extension provides additional information to clients that is
|
|
|
|
particularly useful when pushing and pulling to peer repositories.
|
|
|
|
|
|
|
|
Before diving in to using remotebookmarks, we suggest you read the included
|
|
|
|
README file, which explains the changes to expect, the configuration knobs
|
|
|
|
available (note: almost everything is configurable), and gives examples of
|
|
|
|
how to set up the configuration options in useful ways.
|
|
|
|
|
|
|
|
This extension is the work of Sean Farley forked from Augie Fackler's seminal
|
|
|
|
remotebranches extension. Ryan McElroy of Facebook also contributed.
|
|
|
|
"""
|
|
|
|
|
2010-01-04 07:37:45 +03:00
|
|
|
import os
|
2015-02-10 03:08:18 +03:00
|
|
|
import errno
|
2015-04-23 01:54:29 +03:00
|
|
|
import shutil
|
2010-01-04 07:37:45 +03:00
|
|
|
|
2015-02-10 22:43:07 +03:00
|
|
|
from mercurial import bookmarks
|
2015-01-17 01:48:56 +03:00
|
|
|
from mercurial import commands
|
|
|
|
from mercurial import encoding
|
2015-01-06 22:18:07 +03:00
|
|
|
from mercurial import error
|
|
|
|
from mercurial import exchange
|
2014-10-03 20:43:48 +04:00
|
|
|
from mercurial import extensions
|
2010-01-04 07:37:45 +03:00
|
|
|
from mercurial import hg
|
2015-02-10 20:18:13 +03:00
|
|
|
from mercurial import localrepo
|
2015-01-06 22:18:07 +03:00
|
|
|
from mercurial import namespaces
|
2015-02-12 10:34:56 +03:00
|
|
|
from mercurial import obsolete
|
2014-04-01 06:22:23 +04:00
|
|
|
from mercurial import repoview
|
2014-03-18 22:46:10 +04:00
|
|
|
from mercurial import revset
|
2015-02-10 22:52:19 +03:00
|
|
|
from mercurial import scmutil
|
2014-03-18 22:46:10 +04:00
|
|
|
from mercurial import templatekw
|
2015-01-06 22:18:07 +03:00
|
|
|
from mercurial import url
|
|
|
|
from mercurial import util
|
2015-02-10 22:52:19 +03:00
|
|
|
from mercurial.i18n import _
|
|
|
|
from mercurial.node import hex, short
|
2010-01-11 02:24:02 +03:00
|
|
|
from hgext import schemes
|
2015-08-29 00:30:30 +03:00
|
|
|
from hgext.convert import hg as converthg
|
2010-01-04 07:37:45 +03:00
|
|
|
|
2014-10-03 20:43:48 +04:00
|
|
|
def expush(orig, repo, remote, *args, **kwargs):
|
|
|
|
res = orig(repo, remote, *args, **kwargs)
|
2015-03-12 00:48:08 +03:00
|
|
|
pullremotenames(repo, remote)
|
|
|
|
return res
|
2014-10-03 20:43:48 +04:00
|
|
|
|
|
|
|
def expull(orig, repo, remote, *args, **kwargs):
|
|
|
|
res = orig(repo, remote, *args, **kwargs)
|
2015-01-29 04:45:26 +03:00
|
|
|
pullremotenames(repo, remote)
|
|
|
|
return res
|
|
|
|
|
|
|
|
def pullremotenames(repo, remote):
|
2015-04-27 23:28:38 +03:00
|
|
|
path = activepath(repo.ui, remote)
|
|
|
|
if path:
|
|
|
|
# on a push, we don't want to keep obsolete heads since
|
|
|
|
# they won't show up as heads on the next pull, so we
|
|
|
|
# remove them here otherwise we would require the user
|
|
|
|
# to issue a pull to refresh .hg/remotenames
|
|
|
|
bmap = {}
|
|
|
|
repo = repo.unfiltered()
|
|
|
|
for branch, nodes in remote.branchmap().iteritems():
|
|
|
|
bmap[branch] = []
|
|
|
|
for node in nodes:
|
|
|
|
if node in repo and not repo[node].obsolete():
|
|
|
|
bmap[branch].append(node)
|
|
|
|
saveremotenames(repo, path, bmap, remote.listkeys('bookmarks'))
|
2014-10-03 20:43:48 +04:00
|
|
|
|
2015-04-01 06:00:57 +03:00
|
|
|
precachedistance(repo)
|
2015-03-13 23:21:29 +03:00
|
|
|
|
2014-04-01 06:22:23 +04:00
|
|
|
def blockerhook(orig, repo, *args, **kwargs):
|
|
|
|
blockers = orig(repo)
|
|
|
|
|
2015-02-10 20:15:37 +03:00
|
|
|
unblock = util.safehasattr(repo, '_unblockhiddenremotenames')
|
|
|
|
if not unblock:
|
2015-01-07 04:19:08 +03:00
|
|
|
return blockers
|
|
|
|
|
2015-01-15 01:45:24 +03:00
|
|
|
# add remotenames to blockers by looping over all names in our own cache
|
2014-04-01 06:22:23 +04:00
|
|
|
cl = repo.changelog
|
2015-07-09 08:28:15 +03:00
|
|
|
for remotename in repo._remotenames.keys():
|
2015-01-15 01:45:24 +03:00
|
|
|
rname = 'remote' + remotename
|
|
|
|
try:
|
|
|
|
ns = repo.names[rname]
|
|
|
|
except KeyError:
|
|
|
|
continue
|
|
|
|
for name in ns.listnames(repo):
|
|
|
|
blockers.update(cl.rev(node) for node in ns.nodes(repo, name))
|
2014-04-01 06:22:23 +04:00
|
|
|
|
|
|
|
return blockers
|
|
|
|
|
2015-01-29 01:03:19 +03:00
|
|
|
def exupdatefromremote(orig, ui, repo, remotemarks, path, trfunc, explicit=()):
|
|
|
|
if ui.configbool('remotenames', 'syncbookmarks', False):
|
|
|
|
return orig(ui, repo, remotemarks, path, trfunc, explicit)
|
|
|
|
|
2015-02-12 09:31:14 +03:00
|
|
|
ui.debug('remotenames: skipped syncing local bookmarks\n')
|
2015-01-29 01:03:19 +03:00
|
|
|
|
2015-01-29 04:45:48 +03:00
|
|
|
def exclone(orig, ui, *args, **opts):
|
|
|
|
"""
|
|
|
|
We may not want local bookmarks on clone... but we always want remotenames!
|
|
|
|
"""
|
|
|
|
srcpeer, dstpeer = orig(ui, *args, **opts)
|
|
|
|
|
|
|
|
pullremotenames(dstpeer.local(), srcpeer)
|
|
|
|
|
|
|
|
if not ui.configbool('remotenames', 'syncbookmarks', False):
|
2015-02-12 09:31:14 +03:00
|
|
|
ui.debug('remotenames: removing cloned bookmarks\n')
|
2015-02-10 03:08:18 +03:00
|
|
|
repo = dstpeer.local()
|
|
|
|
wlock = repo.wlock()
|
|
|
|
try:
|
|
|
|
try:
|
|
|
|
repo.vfs.unlink('bookmarks')
|
|
|
|
except OSError, inst:
|
|
|
|
if inst.errno != errno.ENOENT:
|
|
|
|
raise
|
|
|
|
finally:
|
|
|
|
wlock.release()
|
|
|
|
|
2015-01-29 04:45:48 +03:00
|
|
|
return (srcpeer, dstpeer)
|
|
|
|
|
2015-02-10 20:18:13 +03:00
|
|
|
def excommit(orig, repo, *args, **opts):
|
|
|
|
res = orig(repo, *args, **opts)
|
2015-04-01 06:00:57 +03:00
|
|
|
precachedistance(repo)
|
2015-02-10 20:18:13 +03:00
|
|
|
return res
|
|
|
|
|
2015-02-10 07:16:15 +03:00
|
|
|
def exupdate(orig, repo, *args, **opts):
|
|
|
|
res = orig(repo, *args, **opts)
|
2015-04-01 06:00:57 +03:00
|
|
|
precachedistance(repo)
|
2015-02-10 07:16:15 +03:00
|
|
|
return res
|
|
|
|
|
2015-05-10 04:42:34 +03:00
|
|
|
def exactivate(orig, repo, mark):
|
2015-02-10 07:38:30 +03:00
|
|
|
res = orig(repo, mark)
|
2015-04-01 06:00:57 +03:00
|
|
|
precachedistance(repo)
|
2015-02-10 07:38:30 +03:00
|
|
|
return res
|
|
|
|
|
2015-08-29 00:30:30 +03:00
|
|
|
def exconvertbookmarks(orig, source):
|
|
|
|
"""Make hg convert map remote bookmarks in the source to normal bookmarks in
|
|
|
|
the target.
|
2015-09-02 20:59:32 +03:00
|
|
|
|
2015-08-29 00:30:30 +03:00
|
|
|
This is useful for instance if you need to convert a repo from server A to
|
|
|
|
server B. You clone the repo from A (now you have remote bookmarks), convert
|
|
|
|
to a local version of B, and push those bookmarks to server B.
|
|
|
|
"""
|
|
|
|
bookmarks = orig(source)
|
|
|
|
|
|
|
|
repo = source.repo
|
|
|
|
n = 'remotebookmarks'
|
|
|
|
if n in repo.names:
|
|
|
|
ns = repo.names[n]
|
|
|
|
for name in ns.listnames(repo):
|
|
|
|
nodes = ns.nodes(repo, name)
|
|
|
|
if nodes:
|
|
|
|
bookmarks.setdefault(name, hex(nodes[0]))
|
|
|
|
|
|
|
|
return bookmarks
|
|
|
|
|
2010-01-04 07:37:45 +03:00
|
|
|
def reposetup(ui, repo):
|
2011-03-29 17:24:25 +04:00
|
|
|
if not repo.local():
|
|
|
|
return
|
2010-01-04 07:37:45 +03:00
|
|
|
|
2014-03-31 21:34:43 +04:00
|
|
|
loadremotenames(repo)
|
2015-03-20 06:51:38 +03:00
|
|
|
ns = namespaces.namespace
|
|
|
|
|
2015-03-20 07:17:31 +03:00
|
|
|
if ui.configbool('remotenames', 'bookmarks', True):
|
2015-07-09 08:28:15 +03:00
|
|
|
mark2nodes = repo._remotenames.get('bookmarks')
|
2015-03-20 07:17:31 +03:00
|
|
|
node2marks = {}
|
2015-03-20 06:51:38 +03:00
|
|
|
for name, node in mark2nodes.iteritems():
|
2015-03-20 07:17:31 +03:00
|
|
|
node2marks.setdefault(node[0], []).append(name)
|
|
|
|
remotebookmarkns = ns(
|
2015-04-22 21:31:36 +03:00
|
|
|
'remotebookmarks',
|
|
|
|
templatename='remotebookmarks',
|
|
|
|
logname='bookmark',
|
|
|
|
colorname='remotebookmark',
|
|
|
|
listnames=lambda repo: mark2nodes.keys(),
|
|
|
|
namemap=lambda repo, name: mark2nodes.get(name),
|
|
|
|
nodemap=lambda repo, node: node2marks.get(node, []))
|
2015-03-20 07:17:31 +03:00
|
|
|
repo.names.addnamespace(remotebookmarkns)
|
|
|
|
|
|
|
|
# hoisting only works if there are remote bookmarks
|
2015-03-20 07:31:43 +03:00
|
|
|
hoist = ui.config('remotenames', 'hoist', 'default')
|
2015-03-20 07:17:31 +03:00
|
|
|
if hoist:
|
|
|
|
hoist += '/'
|
|
|
|
|
|
|
|
if hoist:
|
|
|
|
hoist2nodes = {}
|
|
|
|
node2hoists = {}
|
|
|
|
for name, node in mark2nodes.iteritems():
|
|
|
|
if name.startswith(hoist):
|
2015-03-26 20:18:24 +03:00
|
|
|
name = name[len(hoist):]
|
|
|
|
hoist2nodes[name] = node
|
2015-03-20 07:17:31 +03:00
|
|
|
node2hoists.setdefault(node[0], []).append(name)
|
2015-03-26 20:48:23 +03:00
|
|
|
hoistednamens = ns(
|
2015-04-22 21:31:36 +03:00
|
|
|
'hoistednames',
|
|
|
|
templatename='hoistednames',
|
|
|
|
logname='hoistedname',
|
|
|
|
colorname='hoistedname',
|
|
|
|
listnames=lambda repo: hoist2nodes.keys(),
|
|
|
|
namemap=lambda repo, name: hoist2nodes.get(name),
|
|
|
|
nodemap=lambda repo, node: node2hoists.get(node, []))
|
2015-03-26 20:48:23 +03:00
|
|
|
repo.names.addnamespace(hoistednamens)
|
2015-03-20 07:17:31 +03:00
|
|
|
|
|
|
|
if ui.configbool('remotenames', 'branches', True):
|
2015-07-09 08:28:15 +03:00
|
|
|
branch2nodes = repo._remotenames.get('branches')
|
2015-03-20 07:17:31 +03:00
|
|
|
node2branch = {}
|
|
|
|
for name, nodes in branch2nodes.iteritems():
|
|
|
|
for node in nodes:
|
|
|
|
node2branch[node] = [name]
|
|
|
|
remotebranchns = ns(
|
2015-04-22 21:31:36 +03:00
|
|
|
'remotebranches',
|
|
|
|
templatename='remotebranches',
|
|
|
|
logname='branch',
|
|
|
|
colorname='remotebranch',
|
|
|
|
listnames=lambda repo: branch2nodes.keys(),
|
|
|
|
namemap=lambda repo, name: branch2nodes.get(name),
|
|
|
|
nodemap=lambda repo, node: node2branch.get(node, []))
|
2015-03-20 07:17:31 +03:00
|
|
|
repo.names.addnamespace(remotebranchns)
|
2014-12-17 09:23:41 +03:00
|
|
|
|
2015-03-12 00:57:32 +03:00
|
|
|
def _tracking(ui):
|
2015-03-12 22:53:08 +03:00
|
|
|
# omg default true
|
|
|
|
return ui.configbool('remotenames', 'tracking', True)
|
2015-03-12 00:57:32 +03:00
|
|
|
|
2015-03-12 01:38:19 +03:00
|
|
|
|
2015-04-06 21:11:58 +03:00
|
|
|
def exrebasecmd(orig, ui, repo, **opts):
|
2015-03-12 01:38:19 +03:00
|
|
|
dest = opts['dest']
|
2015-03-14 08:46:12 +03:00
|
|
|
source = opts['source']
|
|
|
|
revs = opts['rev']
|
|
|
|
base = opts['base']
|
2015-05-12 22:28:15 +03:00
|
|
|
cont = opts['continue']
|
|
|
|
abort = opts['abort']
|
|
|
|
|
2015-05-10 04:42:34 +03:00
|
|
|
current = bmactive(repo)
|
2015-03-14 08:46:12 +03:00
|
|
|
|
2015-05-12 22:28:15 +03:00
|
|
|
if not (cont or abort or dest or source or revs or base) and current:
|
2015-03-12 01:38:19 +03:00
|
|
|
tracking = _readtracking(repo)
|
2015-03-19 10:06:51 +03:00
|
|
|
if current in tracking:
|
|
|
|
opts['dest'] = tracking[current]
|
2015-03-12 01:38:19 +03:00
|
|
|
|
2015-04-27 21:26:53 +03:00
|
|
|
ret = orig(ui, repo, **opts)
|
|
|
|
precachedistance(repo)
|
|
|
|
return ret
|
2015-03-12 01:38:19 +03:00
|
|
|
|
2015-03-13 01:55:16 +03:00
|
|
|
def exstrip(orig, ui, repo, *args, **opts):
|
|
|
|
ret = orig(ui, repo, *args, **opts)
|
2015-04-01 06:00:57 +03:00
|
|
|
precachedistance(repo)
|
2015-03-13 01:55:16 +03:00
|
|
|
return ret
|
|
|
|
|
2015-03-13 02:50:27 +03:00
|
|
|
def exhistedit(orig, ui, repo, *args, **opts):
|
|
|
|
ret = orig(ui, repo, *args, **opts)
|
2015-04-01 06:00:57 +03:00
|
|
|
precachedistance(repo)
|
2015-03-13 02:50:27 +03:00
|
|
|
return ret
|
|
|
|
|
2015-03-14 01:57:43 +03:00
|
|
|
def expaths(orig, ui, repo, *args, **opts):
|
|
|
|
"""allow adding and removing remote paths
|
|
|
|
|
|
|
|
This is very hacky and only exists as an experimentation.
|
|
|
|
|
|
|
|
"""
|
|
|
|
delete = opts.get('delete')
|
2015-03-14 02:39:30 +03:00
|
|
|
add = opts.get('add')
|
2015-03-14 01:57:43 +03:00
|
|
|
if delete:
|
|
|
|
# find the first section and remote path that matches, and delete that
|
|
|
|
foundpaths = False
|
|
|
|
oldhgrc = repo.vfs.read('hgrc').splitlines(True)
|
|
|
|
f = repo.vfs('hgrc', 'w')
|
|
|
|
for line in oldhgrc:
|
|
|
|
if '[paths]' in line:
|
|
|
|
foundpaths = True
|
|
|
|
if not (foundpaths and line.strip().startswith(delete)):
|
|
|
|
f.write(line)
|
|
|
|
f.close()
|
2015-03-14 03:51:29 +03:00
|
|
|
saveremotenames(repo, delete)
|
2015-04-01 06:00:57 +03:00
|
|
|
precachedistance(repo)
|
2015-03-14 01:57:43 +03:00
|
|
|
return
|
|
|
|
|
2015-03-14 02:39:30 +03:00
|
|
|
if add:
|
|
|
|
# find the first section that matches, then look for previous value; if
|
|
|
|
# not found add a new entry
|
|
|
|
foundpaths = False
|
|
|
|
oldhgrc = repo.vfs.read('hgrc').splitlines(True)
|
|
|
|
f = repo.vfs('hgrc', 'w')
|
|
|
|
done = False
|
|
|
|
for line in oldhgrc:
|
|
|
|
if '[paths]' in line:
|
|
|
|
foundpaths = True
|
|
|
|
if foundpaths and line.strip().startswith(add):
|
|
|
|
done = True
|
|
|
|
line = '%s = %s\n' % (add, args[0])
|
|
|
|
f.write(line)
|
|
|
|
|
|
|
|
# did we not find an existing path?
|
|
|
|
if not done:
|
|
|
|
done = True
|
|
|
|
f.write("%s = %s\n" % (add, args[0]))
|
|
|
|
|
|
|
|
f.close()
|
|
|
|
return
|
|
|
|
|
2015-03-14 01:57:43 +03:00
|
|
|
return orig(ui, repo, *args)
|
|
|
|
|
2015-01-17 01:48:56 +03:00
|
|
|
def extsetup(ui):
|
2015-02-10 23:02:58 +03:00
|
|
|
extensions.wrapfunction(exchange, 'push', expush)
|
|
|
|
extensions.wrapfunction(exchange, 'pull', expull)
|
|
|
|
extensions.wrapfunction(repoview, '_getdynamicblockers', blockerhook)
|
|
|
|
extensions.wrapfunction(bookmarks, 'updatefromremote', exupdatefromremote)
|
2015-05-10 04:42:34 +03:00
|
|
|
if util.safehasattr(bookmarks, 'activate'):
|
|
|
|
extensions.wrapfunction(bookmarks, 'activate', exactivate)
|
|
|
|
else:
|
|
|
|
extensions.wrapfunction(bookmarks, 'setcurrent', exactivate)
|
2015-02-10 23:02:58 +03:00
|
|
|
extensions.wrapfunction(hg, 'clone', exclone)
|
|
|
|
extensions.wrapfunction(hg, 'updaterepo', exupdate)
|
|
|
|
extensions.wrapfunction(localrepo.localrepository, 'commit', excommit)
|
|
|
|
|
2015-08-29 00:30:30 +03:00
|
|
|
extensions.wrapfunction(converthg.mercurial_source, 'getbookmarks',
|
|
|
|
exconvertbookmarks)
|
|
|
|
|
2015-03-12 00:57:32 +03:00
|
|
|
if _tracking(ui):
|
2015-03-14 08:12:55 +03:00
|
|
|
try:
|
|
|
|
rebase = extensions.find('rebase')
|
2015-07-07 05:45:00 +03:00
|
|
|
extensions.wrapcommand(rebase.cmdtable, 'rebase', exrebasecmd)
|
2015-03-14 08:12:55 +03:00
|
|
|
except KeyError:
|
2015-04-06 21:11:58 +03:00
|
|
|
# rebase isn't on, that's fine
|
2015-03-14 08:12:55 +03:00
|
|
|
pass
|
2015-03-12 00:57:32 +03:00
|
|
|
|
2015-02-10 20:15:37 +03:00
|
|
|
entry = extensions.wrapcommand(commands.table, 'log', exlog)
|
|
|
|
entry[1].append(('', 'remote', None, 'show remote names even if hidden'))
|
|
|
|
|
2015-03-14 01:57:43 +03:00
|
|
|
entry = extensions.wrapcommand(commands.table, 'paths', expaths)
|
|
|
|
entry[1].append(('d', 'delete', '', 'delete remote path', 'NAME'))
|
2015-03-14 02:39:30 +03:00
|
|
|
entry[1].append(('a', 'add', '', 'add remote path', 'NAME PATH'))
|
2015-03-14 01:57:43 +03:00
|
|
|
|
2015-03-14 03:17:49 +03:00
|
|
|
extensions.wrapcommand(commands.table, 'pull', expullcmd)
|
|
|
|
|
2015-03-14 04:02:21 +03:00
|
|
|
entry = extensions.wrapcommand(commands.table, 'clone', exclonecmd)
|
|
|
|
entry[1].append(('', 'mirror', None, 'sync all bookmarks'))
|
|
|
|
|
2015-02-10 22:52:19 +03:00
|
|
|
exchange.pushdiscoverymapping['bookmarks'] = expushdiscoverybookmarks
|
|
|
|
|
2015-02-10 23:02:58 +03:00
|
|
|
templatekw.keywords['remotenames'] = remotenameskw
|
|
|
|
|
2015-03-13 01:55:16 +03:00
|
|
|
try:
|
|
|
|
strip = extensions.find('strip')
|
|
|
|
if strip:
|
|
|
|
extensions.wrapcommand(strip.cmdtable, 'strip', exstrip)
|
|
|
|
except KeyError:
|
|
|
|
# strip isn't on
|
|
|
|
pass
|
|
|
|
|
2015-03-13 02:50:27 +03:00
|
|
|
try:
|
|
|
|
histedit = extensions.find('histedit')
|
|
|
|
if histedit:
|
|
|
|
extensions.wrapcommand(histedit.cmdtable, 'histedit', exhistedit)
|
|
|
|
except KeyError:
|
|
|
|
# histedit isn't on
|
|
|
|
pass
|
2015-03-13 01:55:16 +03:00
|
|
|
|
extsetup: use more generic hggit check and wrap commands earlier
Other extensions (eg, pushrebase) need to wrap commands after remotenames so
they can modify inputs to prevent remotenames from complaining about, eg, a
push --to that would not be a fast-forward. By using the afterloaded() call,
which doesn't do what we expect anyway, we prevented pushrebase from being able
to wrap these functions after remotenames did.
Instead, we change to wrap the command immediately, and then force the option
modifications to the end, by loading them "after" a non-existant extension.
Furthermore, we make the option additions more generic: if any other extension
(including hggit) adds the paramters we want to add, we skip adding them again.
2015-07-07 08:02:34 +03:00
|
|
|
|
|
|
|
bookcmd = extensions.wrapcommand(commands.table, 'bookmarks', exbookmarks)
|
|
|
|
branchcmd = extensions.wrapcommand(commands.table, 'branches', exbranches)
|
|
|
|
pushcmd = extensions.wrapcommand(commands.table, 'push', expushcmd)
|
|
|
|
|
|
|
|
if _tracking(ui):
|
|
|
|
bookcmd[1].append(('t', 'track', '',
|
|
|
|
'track this bookmark or remote name', 'BOOKMARK'))
|
|
|
|
bookcmd[1].append(('u', 'untrack', None,
|
|
|
|
'remove tracking for this bookmark', 'BOOKMARK'))
|
|
|
|
|
|
|
|
|
|
|
|
newopts = [
|
|
|
|
(bookcmd, ('a', 'all', None, 'show both remote and local bookmarks')),
|
|
|
|
(bookcmd, ('', 'remote', None, 'show only remote bookmarks')),
|
|
|
|
(branchcmd, ('a', 'all', None, 'show both remote and local branches')),
|
|
|
|
(branchcmd, ('', 'remote', None, 'show only remote branches')),
|
|
|
|
(pushcmd, ('t', 'to', '', 'push revs to this bookmark', 'BOOKMARK')),
|
|
|
|
(pushcmd, ('d', 'delete', '', 'delete remote bookmark', 'BOOKMARK')),
|
|
|
|
]
|
|
|
|
|
|
|
|
def afterload(loaded):
|
|
|
|
if loaded:
|
|
|
|
raise ValueError('nonexistant extension should not be loaded')
|
|
|
|
|
|
|
|
for cmd, newopt in newopts:
|
|
|
|
# avoid adding duplicate optionms
|
|
|
|
skip = False
|
|
|
|
for opt in cmd[1]:
|
|
|
|
if opt[1] == newopt[1]:
|
|
|
|
skip = True
|
|
|
|
if not skip:
|
|
|
|
cmd[1].append(newopt)
|
|
|
|
|
|
|
|
extensions.afterloaded('nonexistant', afterload)
|
2015-06-30 09:37:33 +03:00
|
|
|
|
2015-02-10 20:15:37 +03:00
|
|
|
def exlog(orig, ui, repo, *args, **opts):
|
|
|
|
# hack for logging that turns on the dynamic blockerhook
|
|
|
|
if opts.get('remote'):
|
|
|
|
repo.__setattr__('_unblockhiddenremotenames', True)
|
|
|
|
res = orig(ui, repo, *args, **opts)
|
|
|
|
if opts.get('remote'):
|
|
|
|
repo.__setattr__('_unblockhiddenremotenames', False)
|
|
|
|
return res
|
|
|
|
|
2015-02-12 07:09:29 +03:00
|
|
|
_pushto = False
|
2015-03-10 03:36:25 +03:00
|
|
|
_delete = None
|
2015-02-10 22:52:19 +03:00
|
|
|
|
|
|
|
def expushdiscoverybookmarks(pushop):
|
2015-02-12 06:56:36 +03:00
|
|
|
repo = pushop.repo.unfiltered()
|
2015-02-11 20:22:26 +03:00
|
|
|
remotemarks = pushop.remote.listkeys('bookmarks')
|
2015-02-14 00:34:32 +03:00
|
|
|
force = pushop.force
|
2015-02-11 20:22:26 +03:00
|
|
|
|
2015-03-10 03:36:25 +03:00
|
|
|
if _delete:
|
|
|
|
if _delete not in remotemarks:
|
|
|
|
raise util.Abort(_('remote bookmark %s does not exist'))
|
|
|
|
pushop.outbookmarks.append([_delete, remotemarks[_delete], ''])
|
|
|
|
return exchange._pushdiscoverybookmarks(pushop)
|
|
|
|
|
2015-02-13 07:56:14 +03:00
|
|
|
if not _pushto:
|
2015-02-13 07:58:27 +03:00
|
|
|
ret = exchange._pushdiscoverybookmarks(pushop)
|
2015-04-22 21:31:36 +03:00
|
|
|
if not (repo.ui.configbool('remotenames', 'pushanonheads') or
|
|
|
|
force):
|
2015-02-13 07:56:14 +03:00
|
|
|
# check to make sure we don't push an anonymous head
|
|
|
|
if pushop.revs:
|
|
|
|
revs = set(pushop.revs)
|
|
|
|
else:
|
|
|
|
revs = set(repo.lookup(r) for r in repo.revs('head()'))
|
2015-03-10 10:06:51 +03:00
|
|
|
revs -= set(pushop.remoteheads)
|
2015-02-13 07:56:14 +03:00
|
|
|
# find heads that don't have a bookmark going with them
|
|
|
|
for bookmark in pushop.bookmarks:
|
|
|
|
rev = repo.lookup(bookmark)
|
|
|
|
if rev in revs:
|
|
|
|
revs.remove(rev)
|
2015-04-10 01:48:36 +03:00
|
|
|
# remove heads that advance bookmarks (old mercurial behavior)
|
2015-02-13 07:59:17 +03:00
|
|
|
for bookmark, old, new in pushop.outbookmarks:
|
|
|
|
rev = repo.lookup(new)
|
|
|
|
if rev in revs:
|
|
|
|
revs.remove(rev)
|
2015-02-13 07:56:14 +03:00
|
|
|
|
2015-04-10 03:56:24 +03:00
|
|
|
# we use known() instead of lookup() due to lookup throwing an
|
|
|
|
# aborting error causing the connection to close
|
2015-04-10 01:48:36 +03:00
|
|
|
anonheads = []
|
|
|
|
knownlist = pushop.remote.known(revs)
|
|
|
|
for node, known in zip(revs, knownlist):
|
|
|
|
obs = repo[node].obsolete()
|
|
|
|
closes = repo[node].closesbranch()
|
|
|
|
if known or obs or closes:
|
|
|
|
continue
|
|
|
|
anonheads.append(short(node))
|
|
|
|
|
|
|
|
if anonheads:
|
2015-02-13 07:56:14 +03:00
|
|
|
msg = _("push would create new anonymous heads (%s)")
|
2015-02-14 03:06:16 +03:00
|
|
|
hint = _("use --force to override this warning")
|
2015-04-10 01:48:36 +03:00
|
|
|
raise util.Abort(msg % ', '.join(sorted(anonheads)), hint=hint)
|
2015-02-13 07:58:27 +03:00
|
|
|
return ret
|
2015-02-10 22:52:19 +03:00
|
|
|
|
2015-02-12 07:09:29 +03:00
|
|
|
bookmark = pushop.bookmarks[0]
|
|
|
|
rev = pushop.revs[0]
|
2015-02-10 22:52:19 +03:00
|
|
|
|
|
|
|
# allow new bookmark only if force is True
|
|
|
|
old = ''
|
|
|
|
if bookmark in remotemarks:
|
|
|
|
old = remotemarks[bookmark]
|
|
|
|
elif not force:
|
2015-02-12 10:53:22 +03:00
|
|
|
msg = _('not creating new bookmark')
|
|
|
|
hint = _('use --force to create a new bookmark')
|
|
|
|
raise util.Abort(msg, hint=hint)
|
2015-02-10 22:52:19 +03:00
|
|
|
|
|
|
|
# allow non-ff only if force is True
|
2015-06-27 22:49:41 +03:00
|
|
|
allownonff = repo.ui.configbool('remotenames', 'allownonfastforward')
|
|
|
|
if not force and old != '' and not allownonff:
|
2015-02-10 22:52:19 +03:00
|
|
|
if old not in repo:
|
2015-02-12 10:53:22 +03:00
|
|
|
msg = _('remote bookmark revision is not in local repo')
|
|
|
|
hint = _('pull and merge or rebase or use --force')
|
|
|
|
raise util.Abort(msg, hint=hint)
|
2015-02-12 10:34:56 +03:00
|
|
|
foreground = obsolete.foreground(repo, [repo.lookup(old)])
|
|
|
|
if repo[rev].node() not in foreground:
|
2015-02-12 10:53:22 +03:00
|
|
|
msg = _('pushed rev is not in the foreground of remote bookmark')
|
|
|
|
hint = _('use --force flag to complete non-fast-forward update')
|
|
|
|
raise util.Abort(msg, hint=hint)
|
2015-02-12 10:41:42 +03:00
|
|
|
if repo[old] == repo[rev]:
|
|
|
|
repo.ui.warn(_('remote bookmark already points at pushed rev\n'))
|
|
|
|
return
|
2015-02-10 22:52:19 +03:00
|
|
|
|
|
|
|
pushop.outbookmarks.append((bookmark, old, hex(rev)))
|
|
|
|
|
2015-03-14 01:50:02 +03:00
|
|
|
def _pushrevs(repo, ui, rev):
|
2015-04-02 01:44:32 +03:00
|
|
|
"""Given configuration and default rev, return the revs to be pushed"""
|
2015-03-14 01:50:02 +03:00
|
|
|
pushrev = ui.config('remotenames', 'pushrev')
|
2015-03-19 19:48:50 +03:00
|
|
|
if pushrev == '!':
|
|
|
|
return []
|
|
|
|
elif pushrev:
|
2015-04-02 01:44:32 +03:00
|
|
|
return [repo[pushrev].rev()]
|
2015-03-14 01:50:02 +03:00
|
|
|
if rev:
|
2015-04-02 01:44:32 +03:00
|
|
|
return [repo[rev].rev()]
|
2015-03-14 01:50:02 +03:00
|
|
|
return []
|
2015-03-10 10:06:51 +03:00
|
|
|
|
2015-03-14 03:17:49 +03:00
|
|
|
def expullcmd(orig, ui, repo, source="default", **opts):
|
|
|
|
revrenames = dict((v, k) for k, v in _getrenames(ui).iteritems())
|
|
|
|
source = revrenames.get(source, source)
|
|
|
|
return orig(ui, repo, source, **opts)
|
|
|
|
|
2015-02-10 22:52:19 +03:00
|
|
|
def expushcmd(orig, ui, repo, dest=None, **opts):
|
2015-03-10 03:36:25 +03:00
|
|
|
# needed for discovery method
|
|
|
|
global _pushto, _delete
|
|
|
|
|
|
|
|
_delete = opts.get('delete')
|
2015-03-10 09:37:13 +03:00
|
|
|
if _delete:
|
|
|
|
flag = None
|
|
|
|
for f in ('to', 'bookmark', 'branch', 'rev'):
|
|
|
|
if opts.get(f):
|
|
|
|
flag = f
|
|
|
|
break
|
|
|
|
if flag:
|
|
|
|
msg = _('do not specify --delete and '
|
2015-03-12 00:33:56 +03:00
|
|
|
'--%s at the same time') % flag
|
2015-03-10 09:37:13 +03:00
|
|
|
raise util.Abort(msg)
|
|
|
|
# we want to skip pushing any changesets while deleting a remote
|
|
|
|
# bookmark, so we send the null revision
|
|
|
|
opts['rev'] = ['null']
|
|
|
|
return orig(ui, repo, dest, **opts)
|
|
|
|
|
2015-03-10 10:06:51 +03:00
|
|
|
revs = opts.get('rev')
|
2015-02-10 22:52:19 +03:00
|
|
|
to = opts.get('to')
|
2015-03-12 03:24:01 +03:00
|
|
|
|
2015-03-19 19:48:27 +03:00
|
|
|
paths = dict((path, url) for path, url in ui.configitems('paths'))
|
2015-03-14 03:20:31 +03:00
|
|
|
revrenames = dict((v, k) for k, v in _getrenames(ui).iteritems())
|
|
|
|
|
2015-03-14 10:05:54 +03:00
|
|
|
origdest = dest
|
2015-03-12 03:24:01 +03:00
|
|
|
if not dest and not to and not revs and _tracking(ui):
|
2015-05-10 04:42:34 +03:00
|
|
|
current = bmactive(repo)
|
2015-03-12 03:24:01 +03:00
|
|
|
tracking = _readtracking(repo)
|
|
|
|
# print "tracking on %s %s" % (current, tracking)
|
|
|
|
if current and current in tracking:
|
|
|
|
track = tracking[current]
|
|
|
|
path, book = splitremotename(track)
|
2015-03-12 10:06:11 +03:00
|
|
|
# un-rename a path, if needed
|
|
|
|
path = revrenames.get(path, path)
|
2015-03-12 03:24:01 +03:00
|
|
|
if book and path in paths:
|
|
|
|
dest = path
|
|
|
|
to = book
|
|
|
|
|
2015-03-14 03:20:31 +03:00
|
|
|
# un-rename passed path
|
|
|
|
dest = revrenames.get(dest, dest)
|
|
|
|
|
2015-03-14 10:05:54 +03:00
|
|
|
# if dest was renamed to default but we aren't specifically requesting
|
|
|
|
# to push to default, change dest to default-push, if available
|
|
|
|
if not origdest and dest == 'default' and 'default-push' in paths:
|
|
|
|
dest = 'default-push'
|
|
|
|
|
2015-05-06 21:06:44 +03:00
|
|
|
try:
|
2015-05-06 21:09:49 +03:00
|
|
|
# hgsubversion and hggit do funcky things on push. Just call it
|
|
|
|
# directly
|
2015-05-06 21:06:44 +03:00
|
|
|
path = paths[dest]
|
2015-05-06 21:09:49 +03:00
|
|
|
if path.startswith('svn+') or path.startswith('git+'):
|
2015-05-06 21:10:01 +03:00
|
|
|
return orig(ui, repo, dest, **opts)
|
2015-05-06 21:06:44 +03:00
|
|
|
except KeyError:
|
|
|
|
pass
|
2015-03-19 19:48:27 +03:00
|
|
|
|
2015-02-10 22:52:19 +03:00
|
|
|
if not to:
|
2015-02-12 05:18:56 +03:00
|
|
|
if ui.configbool('remotenames', 'forceto', False):
|
2015-02-12 10:53:22 +03:00
|
|
|
msg = _('must specify --to when pushing')
|
|
|
|
hint = _('see configuration option %s') % 'remotenames.forceto'
|
|
|
|
raise util.Abort(msg, hint=hint)
|
2015-03-10 03:36:25 +03:00
|
|
|
|
2015-03-10 10:06:51 +03:00
|
|
|
if not revs:
|
2015-03-14 01:50:02 +03:00
|
|
|
opts['rev'] = _pushrevs(repo, ui, None)
|
2015-03-10 10:06:51 +03:00
|
|
|
|
2015-02-10 22:52:19 +03:00
|
|
|
return orig(ui, repo, dest, **opts)
|
|
|
|
|
|
|
|
if opts.get('bookmark'):
|
2015-02-12 10:53:22 +03:00
|
|
|
msg = _('do not specify --to/-t and --bookmark/-B at the same time')
|
|
|
|
raise util.Abort(msg)
|
2015-02-10 22:52:19 +03:00
|
|
|
if opts.get('branch'):
|
2015-02-12 10:53:22 +03:00
|
|
|
msg = _('do not specify --to/-t and --branch/-b at the same time')
|
|
|
|
raise util.Abort(msg)
|
2015-02-10 22:52:19 +03:00
|
|
|
|
|
|
|
if revs:
|
|
|
|
revs = [repo.lookup(r) for r in scmutil.revrange(repo, revs)]
|
|
|
|
else:
|
2015-03-14 01:50:02 +03:00
|
|
|
revs = _pushrevs(repo, ui, '.')
|
2015-02-10 22:52:19 +03:00
|
|
|
if len(revs) != 1:
|
2015-02-12 10:53:22 +03:00
|
|
|
msg = _('--to requires exactly one rev to push')
|
|
|
|
hint = _('use --rev BOOKMARK or omit --rev for current commit (.)')
|
|
|
|
raise util.Abort(msg, hint=hint)
|
2015-02-10 22:52:19 +03:00
|
|
|
rev = revs[0]
|
|
|
|
|
2015-02-12 07:09:29 +03:00
|
|
|
_pushto = True
|
2015-02-10 22:52:19 +03:00
|
|
|
|
|
|
|
# big can o' copypasta from exchange.push
|
|
|
|
dest = ui.expandpath(dest or 'default-push', dest or 'default')
|
|
|
|
dest, branches = hg.parseurl(dest, opts.get('branch'))
|
|
|
|
try:
|
|
|
|
other = hg.peer(repo, opts, dest)
|
|
|
|
except error.RepoError:
|
|
|
|
if dest == "default-push":
|
|
|
|
hint = _('see the "path" section in "hg help config"')
|
|
|
|
raise util.Abort(_("default repository not configured!"),
|
|
|
|
hint=hint)
|
|
|
|
else:
|
|
|
|
raise
|
|
|
|
|
|
|
|
# all checks pass, go for it!
|
2015-04-02 01:44:32 +03:00
|
|
|
node = repo.lookup(rev)
|
2015-02-12 09:31:27 +03:00
|
|
|
ui.status(_('pushing rev %s to destination %s bookmark %s\n') % (
|
2015-04-02 01:44:32 +03:00
|
|
|
short(node), dest, to))
|
2015-02-10 22:52:19 +03:00
|
|
|
|
|
|
|
# TODO: subrepo stuff
|
|
|
|
|
2015-04-02 01:44:32 +03:00
|
|
|
force = opts.get('force')
|
|
|
|
# NB: despite the name, 'revs' doesn't work if it's a numeric rev
|
|
|
|
pushop = exchange.push(repo, other, force, revs=[node], bookmarks=(to,))
|
2015-02-10 22:52:19 +03:00
|
|
|
|
|
|
|
result = not pushop.cgresult
|
|
|
|
if pushop.bkresult is not None:
|
|
|
|
if pushop.bkresult == 2:
|
|
|
|
result = 2
|
|
|
|
elif not result and pushop.bkresult:
|
|
|
|
result = 2
|
|
|
|
|
2015-02-12 07:09:29 +03:00
|
|
|
_pushto = False
|
2015-02-10 22:52:19 +03:00
|
|
|
return result
|
|
|
|
|
2015-03-14 04:02:21 +03:00
|
|
|
def exclonecmd(orig, ui, *args, **opts):
|
|
|
|
if opts['mirror']:
|
|
|
|
ui.setconfig('remotenames', 'syncbookmarks', True, 'mirror-clone')
|
|
|
|
orig(ui, *args, **opts)
|
|
|
|
|
2015-01-29 02:53:49 +03:00
|
|
|
def exbranches(orig, ui, repo, *args, **opts):
|
2015-01-17 02:47:07 +03:00
|
|
|
if not opts.get('remote'):
|
|
|
|
orig(ui, repo, *args, **opts)
|
2015-01-17 01:48:56 +03:00
|
|
|
|
2015-01-17 02:47:07 +03:00
|
|
|
if opts.get('all') or opts.get('remote'):
|
2015-01-29 02:26:42 +03:00
|
|
|
# exit early if namespace doesn't even exist
|
|
|
|
namespace = 'remotebranches'
|
|
|
|
if namespace not in repo.names:
|
|
|
|
return
|
|
|
|
|
|
|
|
ns = repo.names[namespace]
|
|
|
|
label = 'log.' + ns.colorname
|
|
|
|
fm = ui.formatter('branches', opts)
|
2015-01-17 01:06:05 +03:00
|
|
|
|
2015-02-10 06:11:55 +03:00
|
|
|
# it seems overkill to hide displaying hidden remote branches
|
|
|
|
repo = repo.unfiltered()
|
|
|
|
|
2015-01-17 01:06:05 +03:00
|
|
|
# create a sorted by descending rev list
|
|
|
|
revs = set()
|
|
|
|
for name in ns.listnames(repo):
|
2015-02-10 06:11:28 +03:00
|
|
|
for n in ns.nodes(repo, name):
|
|
|
|
revs.add(repo.changelog.rev(n))
|
2015-01-17 01:06:05 +03:00
|
|
|
|
|
|
|
for r in sorted(revs, reverse=True):
|
2015-02-10 07:49:41 +03:00
|
|
|
ctx = repo[r]
|
|
|
|
for name in ns.names(repo, ctx.node()):
|
2015-01-17 01:06:05 +03:00
|
|
|
fm.startitem()
|
|
|
|
padsize = max(31 - len(str(r)) - encoding.colwidth(name), 0)
|
2015-01-17 02:35:06 +03:00
|
|
|
|
2015-02-10 07:49:41 +03:00
|
|
|
tmplabel = label
|
|
|
|
if ctx.obsolete():
|
|
|
|
tmplabel = tmplabel + ' changeset.obsolete'
|
2015-01-17 02:35:06 +03:00
|
|
|
fm.write(ns.colorname, '%s', name, label=label)
|
2015-01-17 01:06:05 +03:00
|
|
|
fmt = ' ' * padsize + ' %d:%s'
|
|
|
|
fm.condwrite(not ui.quiet, 'rev node', fmt, r,
|
2015-02-10 07:49:41 +03:00
|
|
|
fm.hexfunc(ctx.node()), label=tmplabel)
|
2015-01-17 01:06:05 +03:00
|
|
|
fm.plain('\n')
|
|
|
|
fm.end()
|
|
|
|
|
2015-03-12 01:16:53 +03:00
|
|
|
def _readtracking(repo):
|
|
|
|
tracking = {}
|
|
|
|
try:
|
2015-03-12 01:38:19 +03:00
|
|
|
for line in repo.vfs.read('bookmarks.tracking').strip().split('\n'):
|
|
|
|
try:
|
|
|
|
book, track = line.strip().split(' ')
|
|
|
|
tracking[book] = track
|
|
|
|
except ValueError:
|
|
|
|
# corrupt file, ignore entry
|
|
|
|
pass
|
2015-03-12 01:16:53 +03:00
|
|
|
except IOError:
|
|
|
|
pass
|
|
|
|
return tracking
|
|
|
|
|
|
|
|
def _writetracking(repo, tracking):
|
|
|
|
data = ''
|
|
|
|
for book, track in tracking.iteritems():
|
|
|
|
data += '%s %s\n' % (book, track)
|
2015-03-12 22:53:08 +03:00
|
|
|
repo.vfs.write('bookmarks.tracking', data)
|
2015-03-12 01:16:53 +03:00
|
|
|
|
2015-03-14 04:02:03 +03:00
|
|
|
def _removetracking(repo, bookmarks):
|
|
|
|
tracking = _readtracking(repo)
|
|
|
|
needwrite = False
|
|
|
|
for bmark in bookmarks:
|
|
|
|
try:
|
|
|
|
del tracking[bmark]
|
|
|
|
needwrite = True
|
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
if needwrite:
|
|
|
|
_writetracking(repo, tracking)
|
|
|
|
|
2015-01-29 02:53:49 +03:00
|
|
|
def exbookmarks(orig, ui, repo, *args, **opts):
|
2015-01-29 02:27:57 +03:00
|
|
|
"""Bookmark output is sorted by bookmark name.
|
|
|
|
|
|
|
|
This has the side benefit of grouping all remote bookmarks by remote name.
|
|
|
|
|
|
|
|
"""
|
2015-03-12 22:54:08 +03:00
|
|
|
delete = opts.get('delete')
|
|
|
|
rename = opts.get('rename')
|
|
|
|
inactive = opts.get('inactive')
|
|
|
|
remote = opts.get('remote')
|
|
|
|
track = opts.get('track')
|
2015-03-14 06:38:55 +03:00
|
|
|
untrack = opts.get('untrack')
|
2015-03-11 06:38:03 +03:00
|
|
|
|
|
|
|
disallowed = set(ui.configlist('remotenames', 'disallowedbookmarks'))
|
2015-03-14 03:37:05 +03:00
|
|
|
|
|
|
|
if not delete:
|
|
|
|
for name in args:
|
|
|
|
if name in disallowed:
|
2015-04-22 21:31:36 +03:00
|
|
|
msg = _("bookmark '%s' not allowed by configuration")
|
|
|
|
raise util.Abort(msg % name)
|
2015-03-11 06:38:03 +03:00
|
|
|
|
2015-03-14 06:38:55 +03:00
|
|
|
if untrack:
|
|
|
|
if track:
|
|
|
|
msg = _('do not specify --untrack and --track at the same time')
|
|
|
|
raise util.Abort(msg)
|
|
|
|
_removetracking(repo, args)
|
|
|
|
return
|
|
|
|
|
2015-03-12 22:54:08 +03:00
|
|
|
if delete or rename or args or inactive:
|
2015-07-17 00:15:55 +03:00
|
|
|
if delete and track:
|
|
|
|
msg = _('do not specifiy --track and --delete at the same time')
|
|
|
|
raise util.Abort(msg)
|
|
|
|
|
2015-03-14 03:54:40 +03:00
|
|
|
ret = orig(ui, repo, *args, **opts)
|
2015-07-17 00:15:55 +03:00
|
|
|
|
|
|
|
oldtracking = _readtracking(repo)
|
|
|
|
tracking = dict(oldtracking)
|
|
|
|
|
|
|
|
if rename and not track:
|
2015-09-02 20:59:32 +03:00
|
|
|
if rename in tracking:
|
|
|
|
tracked = tracking[rename]
|
|
|
|
del tracking[rename]
|
|
|
|
for arg in args:
|
|
|
|
tracking[arg] = tracked
|
2015-07-17 00:15:55 +03:00
|
|
|
|
2015-03-14 03:54:40 +03:00
|
|
|
if track:
|
|
|
|
for arg in args:
|
|
|
|
tracking[arg] = track
|
2015-07-17 00:15:55 +03:00
|
|
|
|
|
|
|
if delete:
|
|
|
|
for arg in args:
|
|
|
|
if arg in tracking:
|
|
|
|
del tracking[arg]
|
|
|
|
|
|
|
|
if tracking != oldtracking:
|
2015-03-14 03:54:40 +03:00
|
|
|
_writetracking(repo, tracking)
|
2015-03-14 04:02:03 +03:00
|
|
|
# update the cache
|
2015-04-01 06:00:57 +03:00
|
|
|
precachedistance(repo)
|
2015-03-14 04:02:03 +03:00
|
|
|
|
2015-03-14 03:54:40 +03:00
|
|
|
return ret
|
2015-01-29 02:27:57 +03:00
|
|
|
|
2015-03-12 22:54:08 +03:00
|
|
|
if not remote:
|
2015-04-28 03:16:11 +03:00
|
|
|
displaylocalbookmarks(ui, repo, opts)
|
2015-04-28 00:50:06 +03:00
|
|
|
|
2015-03-12 22:54:08 +03:00
|
|
|
if remote or opts.get('all'):
|
2015-04-28 03:12:54 +03:00
|
|
|
displayremotebookmarks(ui, repo, opts)
|
2015-01-29 02:27:57 +03:00
|
|
|
|
2015-04-28 03:16:11 +03:00
|
|
|
def displaylocalbookmarks(ui, repo, opts):
|
|
|
|
# copy pasta from commands.py; need to patch core
|
|
|
|
fm = ui.formatter('bookmarks', opts)
|
|
|
|
hexfn = fm.hexfunc
|
|
|
|
marks = repo._bookmarks
|
|
|
|
if len(marks) == 0 and not fm:
|
|
|
|
ui.status(_("no bookmarks set\n"))
|
2015-04-28 03:46:12 +03:00
|
|
|
|
|
|
|
tracking = _readtracking(repo)
|
|
|
|
distances = readdistancecache(repo)
|
|
|
|
nq = not ui.quiet
|
|
|
|
|
2015-04-28 03:16:11 +03:00
|
|
|
for bmark, n in sorted(marks.iteritems()):
|
2015-05-10 04:42:34 +03:00
|
|
|
current = bmactive(repo)
|
2015-04-28 03:16:11 +03:00
|
|
|
if bmark == current:
|
2015-05-10 04:42:34 +03:00
|
|
|
prefix, label = '*', 'bookmarks.current bookmarks.active'
|
2015-04-28 03:16:11 +03:00
|
|
|
else:
|
|
|
|
prefix, label = ' ', ''
|
|
|
|
|
|
|
|
fm.startitem()
|
2015-04-28 03:46:12 +03:00
|
|
|
if nq:
|
2015-04-28 03:16:11 +03:00
|
|
|
fm.plain(' %s ' % prefix, label=label)
|
|
|
|
fm.write('bookmark', '%s', bmark, label=label)
|
|
|
|
pad = " " * (25 - encoding.colwidth(bmark))
|
|
|
|
rev = repo.changelog.rev(n)
|
|
|
|
h = hexfn(n)
|
2015-04-28 03:46:12 +03:00
|
|
|
fm.condwrite(nq, 'rev node', pad + ' %d:%s', rev, h, label=label)
|
|
|
|
if ui.verbose and bmark in tracking:
|
|
|
|
tracked = tracking[bmark]
|
|
|
|
if bmark in distances:
|
|
|
|
distance = distances[bmark]
|
|
|
|
else:
|
|
|
|
distance = calculatenamedistance(repo, bmark, tracked)
|
|
|
|
if tracked:
|
|
|
|
ab = ''
|
|
|
|
if distance != (0, 0) and distance != (None, None):
|
|
|
|
ab = ': %s ahead, %s behind' % distance
|
2015-04-28 03:16:11 +03:00
|
|
|
pad = " " * (25 - encoding.colwidth(str(rev)) -
|
|
|
|
encoding.colwidth(str(h)))
|
2015-04-28 03:46:12 +03:00
|
|
|
fm.write('bookmark', pad + '[%s%s]', tracked, ab, label=label)
|
2015-04-28 03:16:11 +03:00
|
|
|
if distance != (None, None):
|
|
|
|
distances[bmark] = distance
|
|
|
|
fm.data(active=(bmark == current))
|
|
|
|
fm.plain('\n')
|
|
|
|
fm.end()
|
|
|
|
|
|
|
|
# write distance cache
|
|
|
|
writedistancecache(repo, distances)
|
|
|
|
|
2015-04-28 03:12:54 +03:00
|
|
|
def displayremotebookmarks(ui, repo, opts):
|
|
|
|
n = 'remotebookmarks'
|
|
|
|
if n not in repo.names:
|
|
|
|
return
|
|
|
|
ns = repo.names[n]
|
|
|
|
color = ns.colorname
|
|
|
|
label = 'log.' + color
|
|
|
|
|
|
|
|
fm = ui.formatter('bookmarks', opts)
|
|
|
|
|
|
|
|
# it seems overkill to hide displaying hidden remote bookmarks
|
|
|
|
repo = repo.unfiltered()
|
|
|
|
|
|
|
|
for name in sorted(ns.listnames(repo)):
|
|
|
|
node = ns.nodes(repo, name)[0]
|
|
|
|
ctx = repo[node]
|
|
|
|
fm.startitem()
|
|
|
|
|
|
|
|
if not ui.quiet:
|
|
|
|
fm.plain(' ')
|
|
|
|
|
|
|
|
padsize = max(25 - encoding.colwidth(name), 0)
|
|
|
|
fmt = ' ' * padsize + ' %d:%s'
|
|
|
|
|
|
|
|
tmplabel = label
|
|
|
|
if ctx.obsolete():
|
|
|
|
tmplabel = tmplabel + ' changeset.obsolete'
|
|
|
|
fm.write(color, '%s', name, label=label)
|
|
|
|
fm.condwrite(not ui.quiet, 'rev node', fmt, ctx.rev(),
|
|
|
|
fm.hexfunc(node), label=tmplabel)
|
|
|
|
fm.plain('\n')
|
|
|
|
fm.end()
|
2015-01-29 02:27:57 +03:00
|
|
|
|
2014-03-21 22:37:03 +04:00
|
|
|
def activepath(ui, remote):
|
|
|
|
local = None
|
|
|
|
try:
|
|
|
|
local = remote.local()
|
|
|
|
except AttributeError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
# determine the remote path from the repo, if possible; else just
|
|
|
|
# use the string given to us
|
|
|
|
rpath = remote
|
|
|
|
if local:
|
|
|
|
rpath = getattr(remote, 'root', None)
|
|
|
|
if rpath is None:
|
|
|
|
# Maybe a localpeer? (hg@1ac628cd7113, 2.3)
|
|
|
|
rpath = getattr(getattr(remote, '_repo', None),
|
|
|
|
'root', None)
|
|
|
|
elif not isinstance(remote, str):
|
2014-03-25 07:45:03 +04:00
|
|
|
try:
|
|
|
|
rpath = remote._url
|
2015-03-09 23:11:37 +03:00
|
|
|
except AttributeError:
|
2014-03-25 07:45:03 +04:00
|
|
|
rpath = remote.url
|
2014-03-21 22:37:03 +04:00
|
|
|
|
2015-04-09 01:08:37 +03:00
|
|
|
candidates = []
|
2014-03-21 22:37:03 +04:00
|
|
|
for path, uri in ui.configitems('paths'):
|
|
|
|
uri = ui.expandpath(expandscheme(ui, uri))
|
|
|
|
if local:
|
|
|
|
uri = os.path.realpath(uri)
|
|
|
|
else:
|
|
|
|
if uri.startswith('http'):
|
|
|
|
try:
|
2015-05-29 05:32:52 +03:00
|
|
|
uri = util.url(uri).authinfo()[0]
|
2014-03-21 22:37:03 +04:00
|
|
|
except AttributeError:
|
2015-05-29 05:32:52 +03:00
|
|
|
uri = url.getauthinfo(uri)[0]
|
2014-03-21 22:37:03 +04:00
|
|
|
uri = uri.rstrip('/')
|
2015-03-14 04:10:49 +03:00
|
|
|
# guard against hgsubversion nonsense
|
|
|
|
if not isinstance(rpath, basestring):
|
|
|
|
continue
|
2014-03-21 22:37:03 +04:00
|
|
|
rpath = rpath.rstrip('/')
|
|
|
|
if uri == rpath:
|
2015-04-09 01:08:37 +03:00
|
|
|
candidates.append(path)
|
|
|
|
|
|
|
|
if not candidates:
|
|
|
|
return ''
|
|
|
|
|
|
|
|
# be stable under different orderings of paths in config files
|
|
|
|
# prefer any name other than 'default' and 'default-push' if available
|
|
|
|
# prefer shortest name of remaining names, and break ties by alphabetizing
|
|
|
|
cset = set(candidates)
|
|
|
|
cset.discard('default')
|
|
|
|
cset.discard('default-push')
|
|
|
|
if cset:
|
|
|
|
candidates = list(cset)
|
|
|
|
|
2015-04-22 21:31:36 +03:00
|
|
|
candidates.sort() # alphabetical
|
|
|
|
candidates.sort(key=len) # sort is stable so first will be the correct one
|
2015-04-09 01:08:37 +03:00
|
|
|
bestpath = candidates[0]
|
2015-03-03 10:22:51 +03:00
|
|
|
|
2015-03-12 10:06:11 +03:00
|
|
|
renames = _getrenames(ui)
|
2015-04-09 01:08:37 +03:00
|
|
|
realpath = renames.get(bestpath, bestpath)
|
2014-03-21 22:37:03 +04:00
|
|
|
return realpath
|
2014-03-21 22:34:32 +04:00
|
|
|
|
2015-03-03 10:22:51 +03:00
|
|
|
# memoization
|
|
|
|
_renames = None
|
2015-03-12 10:06:11 +03:00
|
|
|
def _getrenames(ui):
|
2015-03-03 10:22:51 +03:00
|
|
|
global _renames
|
|
|
|
if _renames is None:
|
|
|
|
_renames = {}
|
|
|
|
for k, v in ui.configitems('remotenames'):
|
|
|
|
if k.startswith('rename.'):
|
|
|
|
_renames[k[7:]] = v
|
|
|
|
return _renames
|
|
|
|
|
2014-03-21 22:34:32 +04:00
|
|
|
def expandscheme(ui, uri):
|
|
|
|
'''For a given uri, expand the scheme for it'''
|
|
|
|
urischemes = [s for s in schemes.schemes.iterkeys()
|
|
|
|
if uri.startswith('%s://' % s)]
|
|
|
|
for s in urischemes:
|
|
|
|
# TODO: refactor schemes so we don't
|
|
|
|
# duplicate this logic
|
2015-03-09 23:12:23 +03:00
|
|
|
ui.note(_('performing schemes expansion with '
|
|
|
|
'scheme %s\n') % s)
|
2014-03-21 22:34:32 +04:00
|
|
|
scheme = hg.schemes[s]
|
|
|
|
parts = uri.split('://', 1)[1].split('/', scheme.parts)
|
|
|
|
if len(parts) > scheme.parts:
|
|
|
|
tail = parts[-1]
|
|
|
|
parts = parts[:-1]
|
|
|
|
else:
|
|
|
|
tail = ''
|
2014-12-16 10:45:59 +03:00
|
|
|
ctx = dict((str(i + 1), v) for i, v in enumerate(parts))
|
|
|
|
uri = ''.join(scheme.templater.process(scheme.url, ctx)) + tail
|
2014-03-21 22:34:32 +04:00
|
|
|
return uri
|
|
|
|
|
2014-04-01 04:27:54 +04:00
|
|
|
def splitremotename(remote):
|
2014-03-22 21:41:01 +04:00
|
|
|
name = ''
|
|
|
|
if '/' in remote:
|
|
|
|
remote, name = remote.split('/', 1)
|
|
|
|
return remote, name
|
2014-03-21 22:34:32 +04:00
|
|
|
|
2014-04-01 04:27:54 +04:00
|
|
|
def joinremotename(remote, ref):
|
2014-03-22 21:41:20 +04:00
|
|
|
if ref:
|
|
|
|
remote += '/' + ref
|
|
|
|
return remote
|
|
|
|
|
2015-07-14 01:34:10 +03:00
|
|
|
def shareawarevfs(repo):
|
|
|
|
if repo.shared():
|
|
|
|
return scmutil.vfs(repo.sharedpath)
|
|
|
|
else:
|
|
|
|
return repo.vfs
|
|
|
|
|
2015-01-14 03:28:01 +03:00
|
|
|
def readremotenames(repo):
|
2015-07-14 01:34:10 +03:00
|
|
|
rfile = shareawarevfs(repo).join('remotenames')
|
2014-03-31 21:34:43 +04:00
|
|
|
# exit early if there is nothing to do
|
|
|
|
if not os.path.exists(rfile):
|
|
|
|
return
|
|
|
|
|
2015-01-14 03:28:01 +03:00
|
|
|
# needed to heuristically determine if a file is in the old format
|
2014-12-16 21:50:05 +03:00
|
|
|
branches = repo.names['branches'].listnames(repo)
|
|
|
|
bookmarks = repo.names['bookmarks'].listnames(repo)
|
|
|
|
|
2014-03-31 21:34:43 +04:00
|
|
|
f = open(rfile)
|
|
|
|
for line in f:
|
2015-01-14 03:28:01 +03:00
|
|
|
nametype = None
|
2014-03-31 21:34:43 +04:00
|
|
|
line = line.strip()
|
|
|
|
if not line:
|
|
|
|
continue
|
2015-01-15 01:45:24 +03:00
|
|
|
nametype = None
|
|
|
|
remote, rname = None, None
|
|
|
|
|
2015-01-06 21:50:19 +03:00
|
|
|
node, name = line.split(' ', 1)
|
2015-01-14 08:45:42 +03:00
|
|
|
|
|
|
|
# check for nametype being written into the file format
|
|
|
|
if ' ' in name:
|
|
|
|
nametype, name = name.split(' ', 1)
|
|
|
|
|
2015-01-06 23:31:17 +03:00
|
|
|
remote, rname = splitremotename(name)
|
|
|
|
|
|
|
|
# skip old data that didn't write the name (only wrote the alias)
|
|
|
|
if not rname:
|
|
|
|
continue
|
|
|
|
|
2015-01-14 08:45:42 +03:00
|
|
|
# old format didn't save the nametype, so check for the name in
|
|
|
|
# branches and bookmarks
|
2015-01-14 03:28:01 +03:00
|
|
|
if nametype is None:
|
|
|
|
if rname in branches:
|
|
|
|
nametype = 'branches'
|
|
|
|
elif rname in bookmarks:
|
|
|
|
nametype = 'bookmarks'
|
|
|
|
|
|
|
|
yield node, nametype, remote, rname
|
|
|
|
|
|
|
|
f.close()
|
|
|
|
|
|
|
|
def loadremotenames(repo):
|
2015-07-09 08:28:15 +03:00
|
|
|
remotenames = {
|
|
|
|
'bookmarks': {},
|
|
|
|
'branches': {},
|
|
|
|
}
|
|
|
|
|
2015-01-14 03:28:01 +03:00
|
|
|
alias_default = repo.ui.configbool('remotenames', 'alias.default')
|
|
|
|
|
|
|
|
for node, nametype, remote, rname in readremotenames(repo):
|
2015-01-06 23:31:17 +03:00
|
|
|
# handle alias_default here
|
|
|
|
if remote != "default" and rname == "default" and alias_default:
|
|
|
|
name = remote
|
2015-01-14 03:28:01 +03:00
|
|
|
else:
|
|
|
|
name = joinremotename(remote, rname)
|
2015-01-06 23:31:17 +03:00
|
|
|
|
2015-01-14 03:28:01 +03:00
|
|
|
# if the node doesn't exist, skip it
|
2015-01-06 22:16:51 +03:00
|
|
|
try:
|
|
|
|
ctx = repo[node]
|
|
|
|
except error.RepoLookupError:
|
2014-03-31 21:34:43 +04:00
|
|
|
continue
|
2014-12-16 21:50:05 +03:00
|
|
|
|
2015-01-14 03:28:01 +03:00
|
|
|
# only mark as remote if the head changeset isn't marked closed
|
2014-03-31 21:34:43 +04:00
|
|
|
if not ctx.extra().get('close'):
|
2015-07-09 08:28:15 +03:00
|
|
|
nodes = remotenames[nametype].get(name, [])
|
2015-02-10 06:11:28 +03:00
|
|
|
nodes.append(ctx.node())
|
2015-07-09 08:28:15 +03:00
|
|
|
remotenames[nametype][name] = nodes
|
|
|
|
|
|
|
|
|
|
|
|
repo._remotenames = remotenames
|
2014-03-31 21:34:43 +04:00
|
|
|
|
2015-03-03 12:08:44 +03:00
|
|
|
def transition(repo, ui):
|
|
|
|
"""
|
|
|
|
Help with transitioning to using a remotenames workflow.
|
|
|
|
|
|
|
|
Allows deleting matching local bookmarks defined in a config file:
|
|
|
|
|
|
|
|
[remotenames]
|
2015-03-25 06:49:30 +03:00
|
|
|
transitionbookmarks = master
|
|
|
|
stable
|
2015-03-03 12:08:44 +03:00
|
|
|
"""
|
|
|
|
transmarks = ui.configlist('remotenames', 'transitionbookmarks')
|
|
|
|
localmarks = repo._bookmarks
|
|
|
|
for mark in transmarks:
|
|
|
|
if mark in localmarks:
|
|
|
|
del localmarks[mark]
|
|
|
|
localmarks.write()
|
|
|
|
|
2015-04-10 02:50:41 +03:00
|
|
|
message = ui.config('remotenames', 'transitionmessage')
|
|
|
|
if message:
|
|
|
|
ui.warn(message + '\n')
|
|
|
|
|
2015-03-14 02:57:19 +03:00
|
|
|
def saveremotenames(repo, remote, branches={}, bookmarks={}):
|
2015-04-27 23:28:38 +03:00
|
|
|
wlock = repo.wlock()
|
2015-03-13 23:32:15 +03:00
|
|
|
try:
|
2015-04-27 23:28:38 +03:00
|
|
|
# delete old files
|
|
|
|
try:
|
|
|
|
repo.vfs.unlink('remotedistance')
|
|
|
|
except OSError, inst:
|
|
|
|
if inst.errno != errno.ENOENT:
|
|
|
|
raise
|
|
|
|
|
2015-07-14 01:34:10 +03:00
|
|
|
if not shareawarevfs(repo).exists('remotenames'):
|
2015-04-27 23:28:38 +03:00
|
|
|
transition(repo, repo.ui)
|
|
|
|
|
|
|
|
# while we're removing old paths, also update _remotenames
|
2015-07-09 08:28:15 +03:00
|
|
|
for btype, rmap in repo._remotenames.iteritems():
|
2015-04-27 23:28:38 +03:00
|
|
|
for rname in rmap.copy():
|
|
|
|
if remote == splitremotename(rname)[0]:
|
2015-07-09 08:28:15 +03:00
|
|
|
del repo._remotenames[btype][rname]
|
2015-04-27 23:28:38 +03:00
|
|
|
|
|
|
|
# read in all data first before opening file to write
|
|
|
|
olddata = set(readremotenames(repo))
|
|
|
|
|
2015-07-14 01:34:10 +03:00
|
|
|
f = shareawarevfs(repo)('remotenames', 'w')
|
2015-04-27 23:28:38 +03:00
|
|
|
|
|
|
|
# only update the given 'remote'; iterate over old data and re-save it
|
|
|
|
for node, nametype, oldremote, rname in olddata:
|
|
|
|
if oldremote != remote:
|
|
|
|
n = joinremotename(oldremote, rname)
|
|
|
|
f.write('%s %s %s\n' % (node, nametype, n))
|
|
|
|
|
|
|
|
for branch, nodes in branches.iteritems():
|
|
|
|
for n in nodes:
|
|
|
|
rname = joinremotename(remote, branch)
|
|
|
|
f.write('%s branches %s\n' % (hex(n), rname))
|
|
|
|
for bookmark, n in bookmarks.iteritems():
|
|
|
|
f.write('%s bookmarks %s\n' % (n, joinremotename(remote, bookmark)))
|
|
|
|
f.close()
|
2015-01-14 03:16:27 +03:00
|
|
|
|
2015-04-27 23:28:38 +03:00
|
|
|
finally:
|
|
|
|
wlock.release()
|
2014-03-21 20:56:00 +04:00
|
|
|
|
2015-03-26 20:45:07 +03:00
|
|
|
def calculatedistance(repo, fromrev, torev):
|
|
|
|
"""
|
|
|
|
Return the (ahead, behind) distance between `fromrev` and `torev`.
|
|
|
|
The returned tuple will contain ints if calculated, Nones otherwise.
|
|
|
|
"""
|
|
|
|
if not repo.ui.configbool('remotenames', 'calculatedistance', True):
|
|
|
|
return (None, None)
|
|
|
|
|
|
|
|
ahead = len(repo.revs('only(%d, %d)' % (fromrev, torev)))
|
|
|
|
behind = len(repo.revs('only(%d, %d)' % (torev, fromrev)))
|
|
|
|
|
|
|
|
return (ahead, behind)
|
|
|
|
|
2015-04-28 03:26:59 +03:00
|
|
|
def calculatenamedistance(repo, fromname, toname):
|
|
|
|
"""
|
|
|
|
Similar to calculatedistance, but accepts names such as local and remote
|
|
|
|
bookmarks, and will return (None, None) if any of the names do not resolve
|
|
|
|
in the given repository.
|
|
|
|
"""
|
|
|
|
distance = (None, None)
|
|
|
|
if fromname and fromname in repo and toname in repo:
|
|
|
|
rev1 = repo[fromname].rev()
|
|
|
|
rev2 = repo[toname].rev()
|
|
|
|
distance = calculatedistance(repo, rev1, rev2)
|
|
|
|
return distance
|
|
|
|
|
2015-04-28 00:50:06 +03:00
|
|
|
def writedistancecache(repo, distance):
|
2015-04-01 05:39:44 +03:00
|
|
|
try:
|
2015-04-28 00:50:06 +03:00
|
|
|
f = repo.vfs('cache/distance', 'w')
|
|
|
|
for k, v in distance.iteritems():
|
|
|
|
f.write('%s %d %d\n' % (k, v[0], v[1]))
|
2015-04-01 05:39:44 +03:00
|
|
|
except (IOError, OSError):
|
|
|
|
pass
|
|
|
|
|
2015-04-28 00:50:06 +03:00
|
|
|
def readdistancecache(repo):
|
|
|
|
distances = {}
|
2015-03-28 19:56:23 +03:00
|
|
|
try:
|
2015-04-28 00:50:06 +03:00
|
|
|
for line in repo.vfs.read('cache/distance').splitlines():
|
|
|
|
line = line.rsplit(' ', 2)
|
|
|
|
try:
|
|
|
|
d = (int(line[1]), int(line[2]))
|
|
|
|
distances[line[0]] = d
|
|
|
|
except ValueError:
|
|
|
|
# corrupt entry, ignore line
|
|
|
|
pass
|
2015-03-28 19:56:23 +03:00
|
|
|
except (IOError, OSError):
|
2015-04-28 00:50:06 +03:00
|
|
|
pass
|
|
|
|
|
|
|
|
return distances
|
2015-03-28 19:56:23 +03:00
|
|
|
|
2015-03-30 20:44:08 +03:00
|
|
|
def invalidatedistancecache(repo):
|
|
|
|
"""Try to invalidate any existing distance caches"""
|
|
|
|
error = False
|
|
|
|
try:
|
2015-04-23 01:54:29 +03:00
|
|
|
if repo.vfs.isdir('cache/distance'):
|
|
|
|
shutil.rmtree(repo.vfs.join('cache/distance'))
|
|
|
|
else:
|
|
|
|
repo.vfs.unlink('cache/distance')
|
2015-04-28 01:25:01 +03:00
|
|
|
except (OSError, IOError), inst:
|
|
|
|
if inst.errno != errno.ENOENT:
|
|
|
|
error = True
|
|
|
|
try:
|
2015-04-01 06:09:26 +03:00
|
|
|
repo.vfs.unlink('cache/distance.current')
|
2015-03-30 20:44:08 +03:00
|
|
|
except (OSError, IOError), inst:
|
|
|
|
if inst.errno != errno.ENOENT:
|
|
|
|
error = True
|
2015-04-28 01:25:01 +03:00
|
|
|
|
2015-03-30 20:44:08 +03:00
|
|
|
if error:
|
|
|
|
repo.ui.warn(_('Unable to invalidate tracking cache; ' +
|
2015-04-22 21:52:13 +03:00
|
|
|
'distance displayed may be incorrect\n'))
|
2015-03-30 20:44:08 +03:00
|
|
|
|
2015-04-01 06:00:57 +03:00
|
|
|
def precachedistance(repo):
|
2015-03-19 09:49:31 +03:00
|
|
|
"""
|
|
|
|
Caclulate and cache the distance between bookmarks and what they
|
|
|
|
track, plus the distance from the tipmost head on current topological
|
|
|
|
branch. This can be an expensive operation especially in repositories
|
|
|
|
with a high commit rate, so it can be turned off in your hgrc:
|
|
|
|
|
|
|
|
[remotenames]
|
2015-04-01 06:00:57 +03:00
|
|
|
precachedistance = False
|
|
|
|
precachecurrent = False
|
2015-03-19 09:49:31 +03:00
|
|
|
"""
|
2015-04-27 21:27:43 +03:00
|
|
|
# to avoid stale namespaces, let's reload
|
|
|
|
loadremotenames(repo)
|
|
|
|
|
2015-04-28 01:25:45 +03:00
|
|
|
wlock = repo.wlock()
|
2015-02-10 06:30:17 +03:00
|
|
|
try:
|
2015-04-01 06:00:57 +03:00
|
|
|
invalidatedistancecache(repo)
|
2015-03-14 00:31:26 +03:00
|
|
|
|
2015-04-28 00:50:06 +03:00
|
|
|
distances = {}
|
2015-04-01 06:00:57 +03:00
|
|
|
if repo.ui.configbool('remotenames', 'precachedistance', True):
|
2015-04-28 03:26:59 +03:00
|
|
|
distances = {}
|
|
|
|
for bmark, tracked in _readtracking(repo).iteritems():
|
|
|
|
distance = calculatenamedistance(repo, bmark, tracked)
|
|
|
|
if distance != (None, None):
|
2015-04-28 00:50:06 +03:00
|
|
|
distances[bmark] = distance
|
|
|
|
writedistancecache(repo, distances)
|
2015-03-13 03:38:27 +03:00
|
|
|
|
2015-04-01 06:00:57 +03:00
|
|
|
if repo.ui.configbool('remotenames', 'precachecurrent', True):
|
|
|
|
# are we on a 'branch' but not at the head?
|
|
|
|
# i.e. is there a bookmark that we are heading towards?
|
2015-03-13 03:38:27 +03:00
|
|
|
revs = list(repo.revs('limit(.:: and bookmark() - ., 1)'))
|
|
|
|
if revs:
|
2015-04-01 06:00:57 +03:00
|
|
|
# if we are here then we have one or more bookmarks
|
|
|
|
# and we'll pick the first one for now
|
2015-03-13 03:38:27 +03:00
|
|
|
bmark = repo[revs[0]].bookmarks()[0]
|
2015-04-01 06:00:57 +03:00
|
|
|
distance = len(repo.revs('only(%d, .)' % revs[0]))
|
2015-04-01 06:09:26 +03:00
|
|
|
repo.vfs.write('cache/distance.current',
|
2015-04-01 06:00:57 +03:00
|
|
|
'%s %d' % (bmark, distance))
|
2015-03-13 03:38:27 +03:00
|
|
|
|
2015-02-10 06:30:17 +03:00
|
|
|
finally:
|
|
|
|
wlock.release()
|
|
|
|
|
2014-03-21 20:48:38 +04:00
|
|
|
#########
|
|
|
|
# revsets
|
|
|
|
#########
|
|
|
|
|
2011-03-30 04:02:08 +04:00
|
|
|
def upstream_revs(filt, repo, subset, x):
|
2014-04-01 04:55:54 +04:00
|
|
|
upstream_tips = set()
|
2015-07-09 08:28:15 +03:00
|
|
|
for remotename in repo._remotenames.keys():
|
2015-01-15 01:45:24 +03:00
|
|
|
rname = 'remote' + remotename
|
|
|
|
try:
|
|
|
|
ns = repo.names[rname]
|
|
|
|
except KeyError:
|
|
|
|
continue
|
|
|
|
for name in ns.listnames(repo):
|
2015-02-27 00:37:00 +03:00
|
|
|
if filt(splitremotename(name)[0]):
|
2015-01-15 01:45:24 +03:00
|
|
|
upstream_tips.update(ns.nodes(repo, name))
|
2014-04-01 04:55:54 +04:00
|
|
|
|
2014-04-01 04:20:52 +04:00
|
|
|
if not upstream_tips:
|
|
|
|
return revset.baseset([])
|
2014-03-11 01:49:44 +04:00
|
|
|
|
2014-04-01 04:55:54 +04:00
|
|
|
tipancestors = repo.revs('::%ln', upstream_tips)
|
2014-12-16 10:45:59 +03:00
|
|
|
return revset.filteredset(subset, lambda n: n in tipancestors)
|
2011-03-30 04:02:08 +04:00
|
|
|
|
|
|
|
def upstream(repo, subset, x):
|
|
|
|
'''``upstream()``
|
2014-04-01 04:27:54 +04:00
|
|
|
Select changesets in an upstream repository according to remotenames.
|
2011-03-30 04:02:08 +04:00
|
|
|
'''
|
2015-02-27 01:30:58 +03:00
|
|
|
repo = repo.unfiltered()
|
2015-02-27 00:37:00 +03:00
|
|
|
upstream_names = repo.ui.configlist('remotenames', 'upstream')
|
2015-02-27 01:31:31 +03:00
|
|
|
# override default args from hgrc with args passed in on the command line
|
|
|
|
if x:
|
|
|
|
upstream_names = [revset.getstring(symbol,
|
|
|
|
"remote path must be a string")
|
|
|
|
for symbol in revset.getlist(x)]
|
2015-04-22 21:31:36 +03:00
|
|
|
|
2015-02-27 01:05:15 +03:00
|
|
|
default_path = dict(repo.ui.configitems('paths')).get('default')
|
|
|
|
if not upstream_names and default_path:
|
|
|
|
default_path = expandscheme(repo.ui, default_path)
|
|
|
|
upstream_names = [activepath(repo.ui, default_path)]
|
2015-04-22 21:31:36 +03:00
|
|
|
|
|
|
|
def filt(name):
|
|
|
|
if upstream_names:
|
|
|
|
return name in upstream_names
|
|
|
|
return True
|
|
|
|
|
2011-03-30 04:02:08 +04:00
|
|
|
return upstream_revs(filt, repo, subset, x)
|
|
|
|
|
|
|
|
def pushed(repo, subset, x):
|
|
|
|
'''``pushed()``
|
2014-04-01 04:27:54 +04:00
|
|
|
Select changesets in any remote repository according to remotenames.
|
2011-03-30 04:02:08 +04:00
|
|
|
'''
|
2014-12-16 10:45:59 +03:00
|
|
|
revset.getargs(x, 0, 0, "pushed takes no arguments")
|
2011-03-30 04:02:08 +04:00
|
|
|
return upstream_revs(lambda x: True, repo, subset, x)
|
|
|
|
|
2014-04-01 04:27:54 +04:00
|
|
|
def remotenamesrevset(repo, subset, x):
|
|
|
|
"""``remotenames()``
|
2012-06-20 13:24:55 +04:00
|
|
|
All remote branches heads.
|
|
|
|
"""
|
2014-12-16 10:45:59 +03:00
|
|
|
revset.getargs(x, 0, 0, "remotenames takes no arguments")
|
2014-04-01 04:55:54 +04:00
|
|
|
remoterevs = set()
|
|
|
|
cl = repo.changelog
|
2015-07-09 08:28:15 +03:00
|
|
|
for remotename in repo._remotenames.keys():
|
2015-01-15 01:45:24 +03:00
|
|
|
rname = 'remote' + remotename
|
|
|
|
try:
|
|
|
|
ns = repo.names[rname]
|
|
|
|
except KeyError:
|
|
|
|
continue
|
|
|
|
for name in ns.listnames(repo):
|
|
|
|
remoterevs.update(ns.nodes(repo, name))
|
|
|
|
|
2014-04-01 04:55:54 +04:00
|
|
|
return revset.baseset(sorted(cl.rev(n) for n in remoterevs))
|
2012-06-20 13:24:55 +04:00
|
|
|
|
2014-03-18 22:46:10 +04:00
|
|
|
revset.symbols.update({'upstream': upstream,
|
|
|
|
'pushed': pushed,
|
2014-04-01 04:27:54 +04:00
|
|
|
'remotenames': remotenamesrevset})
|
2012-06-20 19:23:51 +04:00
|
|
|
|
2014-03-21 20:48:38 +04:00
|
|
|
###########
|
|
|
|
# templates
|
|
|
|
###########
|
|
|
|
|
2015-01-06 07:29:40 +03:00
|
|
|
def remotenameskw(**args):
|
|
|
|
""":remotenames: List of strings. List of remote names associated with the
|
|
|
|
changeset. If remotenames.suppressbranches is True then branch names will
|
|
|
|
be hidden if there is a bookmark at the same changeset.
|
|
|
|
|
|
|
|
"""
|
|
|
|
repo, ctx = args['repo'], args['ctx']
|
|
|
|
|
2015-01-17 03:08:04 +03:00
|
|
|
remotenames = []
|
|
|
|
if 'remotebookmarks' in repo.names:
|
|
|
|
remotenames = repo.names['remotebookmarks'].names(repo, ctx.node())
|
2015-01-06 07:29:40 +03:00
|
|
|
|
|
|
|
suppress = repo.ui.configbool('remotenames', 'suppressbranches', False)
|
2015-01-17 03:08:04 +03:00
|
|
|
if (not remotenames or not suppress) and 'remotebranches' in repo.names:
|
2015-01-15 01:45:24 +03:00
|
|
|
remotenames += repo.names['remotebranches'].names(repo, ctx.node())
|
2015-01-06 07:29:40 +03:00
|
|
|
|
|
|
|
return templatekw.showlist('remotename', remotenames,
|
|
|
|
plural='remotenames', **args)
|
2015-05-10 04:42:34 +03:00
|
|
|
|
|
|
|
#############################
|
|
|
|
# bookmarks api compatibility
|
|
|
|
#############################
|
|
|
|
def bmactive(repo):
|
|
|
|
try:
|
|
|
|
return repo._activebookmark
|
|
|
|
except AttributeError:
|
|
|
|
return repo._bookmarkcurrent
|