2010-01-04 07:37:45 +03:00
|
|
|
import os
|
2015-02-10 03:08:18 +03:00
|
|
|
import errno
|
2010-01-04 07:37:45 +03:00
|
|
|
|
2015-01-17 01:48:56 +03:00
|
|
|
from mercurial import commands
|
|
|
|
from mercurial import encoding
|
2015-01-06 22:18:07 +03:00
|
|
|
from mercurial import error
|
|
|
|
from mercurial import exchange
|
2014-10-03 20:43:48 +04:00
|
|
|
from mercurial import extensions
|
2010-01-04 07:37:45 +03:00
|
|
|
from mercurial import hg
|
2015-02-10 20:18:13 +03:00
|
|
|
from mercurial import localrepo
|
2015-01-06 22:18:07 +03:00
|
|
|
from mercurial import namespaces
|
2014-04-01 06:22:23 +04:00
|
|
|
from mercurial import repoview
|
2014-03-18 22:46:10 +04:00
|
|
|
from mercurial import revset
|
|
|
|
from mercurial import templatekw
|
2015-01-06 22:18:07 +03:00
|
|
|
from mercurial import ui
|
|
|
|
from mercurial import url
|
|
|
|
from mercurial import util
|
2014-12-16 10:45:59 +03:00
|
|
|
from mercurial.node import hex
|
2010-01-11 02:24:02 +03:00
|
|
|
from hgext import schemes
|
2015-01-29 01:03:19 +03:00
|
|
|
from mercurial import bookmarks
|
2010-01-04 07:37:45 +03:00
|
|
|
|
2015-01-15 01:45:24 +03:00
|
|
|
_remotenames = {
|
|
|
|
"bookmarks": {},
|
|
|
|
"branches": {},
|
|
|
|
}
|
2014-12-16 20:33:01 +03:00
|
|
|
|
2014-10-03 20:43:48 +04:00
|
|
|
def expush(orig, repo, remote, *args, **kwargs):
|
2015-01-07 04:19:08 +03:00
|
|
|
# hack for pushing that turns off the dynamic blockerhook
|
|
|
|
repo.__setattr__('_hackremotenamepush', True)
|
|
|
|
|
2014-10-03 20:43:48 +04:00
|
|
|
res = orig(repo, remote, *args, **kwargs)
|
|
|
|
lock = repo.lock()
|
|
|
|
try:
|
|
|
|
try:
|
2014-03-21 22:37:03 +04:00
|
|
|
path = activepath(repo.ui, remote)
|
2014-10-03 20:43:48 +04:00
|
|
|
if path:
|
2014-03-19 23:44:28 +04:00
|
|
|
# on a push, we don't want to keep obsolete heads since
|
|
|
|
# they won't show up as heads on the next pull, so we
|
|
|
|
# remove them here otherwise we would require the user
|
2014-04-01 04:27:54 +04:00
|
|
|
# to issue a pull to refresh .hg/remotenames
|
2014-03-19 23:44:28 +04:00
|
|
|
bmap = {}
|
|
|
|
repo = repo.unfiltered()
|
|
|
|
for branch, nodes in remote.branchmap().iteritems():
|
|
|
|
bmap[branch] = [n for n in nodes if not repo[n].obsolete()]
|
2014-04-01 04:27:54 +04:00
|
|
|
saveremotenames(repo, path, bmap, remote.listkeys('bookmarks'))
|
2014-10-03 20:43:48 +04:00
|
|
|
except Exception, e:
|
|
|
|
ui.debug('remote branches for path %s not saved: %s\n'
|
|
|
|
% (path, e))
|
|
|
|
finally:
|
2015-01-07 04:19:08 +03:00
|
|
|
repo.__setattr__('_hackremotenamepush', False)
|
2014-10-03 20:43:48 +04:00
|
|
|
lock.release()
|
|
|
|
return res
|
|
|
|
|
|
|
|
def expull(orig, repo, remote, *args, **kwargs):
|
|
|
|
res = orig(repo, remote, *args, **kwargs)
|
2015-01-29 04:45:26 +03:00
|
|
|
pullremotenames(repo, remote)
|
2015-02-10 07:14:55 +03:00
|
|
|
writedistance(repo)
|
2015-01-29 04:45:26 +03:00
|
|
|
return res
|
|
|
|
|
|
|
|
def pullremotenames(repo, remote):
|
2014-10-03 20:43:48 +04:00
|
|
|
lock = repo.lock()
|
|
|
|
try:
|
|
|
|
try:
|
2014-03-21 22:37:03 +04:00
|
|
|
path = activepath(repo.ui, remote)
|
2014-10-03 20:43:48 +04:00
|
|
|
if path:
|
2014-03-31 19:52:39 +04:00
|
|
|
saveremotenames(repo, path, remote.branchmap(),
|
|
|
|
remote.listkeys('bookmarks'))
|
2014-10-03 20:43:48 +04:00
|
|
|
except Exception, e:
|
|
|
|
ui.debug('remote branches for path %s not saved: %s\n'
|
|
|
|
% (path, e))
|
|
|
|
finally:
|
|
|
|
lock.release()
|
|
|
|
|
2014-04-01 06:22:23 +04:00
|
|
|
def blockerhook(orig, repo, *args, **kwargs):
|
|
|
|
blockers = orig(repo)
|
|
|
|
|
2015-01-07 04:41:55 +03:00
|
|
|
# protect un-hiding changesets behind a config knob
|
2015-01-11 01:46:42 +03:00
|
|
|
nohide = not repo.ui.configbool('remotenames', 'unhide')
|
2015-01-07 04:41:55 +03:00
|
|
|
hackpush = util.safehasattr(repo, '_hackremotenamepush')
|
2015-01-15 01:45:24 +03:00
|
|
|
if nohide or (hackpush and repo._hackremotenamepush):
|
2015-01-07 04:19:08 +03:00
|
|
|
return blockers
|
|
|
|
|
2015-01-15 01:45:24 +03:00
|
|
|
# add remotenames to blockers by looping over all names in our own cache
|
2014-04-01 06:22:23 +04:00
|
|
|
cl = repo.changelog
|
2015-01-15 01:45:24 +03:00
|
|
|
for remotename in _remotenames.keys():
|
|
|
|
rname = 'remote' + remotename
|
|
|
|
try:
|
|
|
|
ns = repo.names[rname]
|
|
|
|
except KeyError:
|
|
|
|
continue
|
|
|
|
for name in ns.listnames(repo):
|
|
|
|
blockers.update(cl.rev(node) for node in ns.nodes(repo, name))
|
2014-04-01 06:22:23 +04:00
|
|
|
|
|
|
|
return blockers
|
|
|
|
|
2015-01-29 01:03:19 +03:00
|
|
|
def exupdatefromremote(orig, ui, repo, remotemarks, path, trfunc, explicit=()):
|
|
|
|
if ui.configbool('remotenames', 'syncbookmarks', False):
|
|
|
|
return orig(ui, repo, remotemarks, path, trfunc, explicit)
|
|
|
|
|
|
|
|
ui.status('remotenames: skipped syncing local bookmarks\n')
|
|
|
|
|
2015-01-29 04:45:48 +03:00
|
|
|
def exclone(orig, ui, *args, **opts):
|
|
|
|
"""
|
|
|
|
We may not want local bookmarks on clone... but we always want remotenames!
|
|
|
|
"""
|
|
|
|
srcpeer, dstpeer = orig(ui, *args, **opts)
|
|
|
|
|
|
|
|
pullremotenames(dstpeer.local(), srcpeer)
|
|
|
|
|
|
|
|
if not ui.configbool('remotenames', 'syncbookmarks', False):
|
|
|
|
ui.status('remotenames: removing cloned bookmarks\n')
|
2015-02-10 03:08:18 +03:00
|
|
|
repo = dstpeer.local()
|
|
|
|
wlock = repo.wlock()
|
|
|
|
try:
|
|
|
|
try:
|
|
|
|
repo.vfs.unlink('bookmarks')
|
|
|
|
except OSError, inst:
|
|
|
|
if inst.errno != errno.ENOENT:
|
|
|
|
raise
|
|
|
|
finally:
|
|
|
|
wlock.release()
|
|
|
|
|
2015-01-29 04:45:48 +03:00
|
|
|
return (srcpeer, dstpeer)
|
|
|
|
|
2015-02-10 20:18:13 +03:00
|
|
|
def excommit(orig, repo, *args, **opts):
|
|
|
|
res = orig(repo, *args, **opts)
|
|
|
|
writedistance(repo)
|
|
|
|
return res
|
|
|
|
|
2015-02-10 07:16:15 +03:00
|
|
|
def exupdate(orig, repo, *args, **opts):
|
|
|
|
res = orig(repo, *args, **opts)
|
|
|
|
writedistance(repo)
|
|
|
|
return res
|
|
|
|
|
2014-04-01 04:16:59 +04:00
|
|
|
extensions.wrapfunction(exchange, 'push', expush)
|
|
|
|
extensions.wrapfunction(exchange, 'pull', expull)
|
2014-04-01 06:22:23 +04:00
|
|
|
extensions.wrapfunction(repoview, '_getdynamicblockers', blockerhook)
|
2015-01-29 01:03:19 +03:00
|
|
|
extensions.wrapfunction(bookmarks, 'updatefromremote', exupdatefromremote)
|
2015-01-29 04:45:48 +03:00
|
|
|
extensions.wrapfunction(hg, 'clone', exclone)
|
2015-02-10 07:16:15 +03:00
|
|
|
extensions.wrapfunction(hg, 'updaterepo', exupdate)
|
2015-02-10 20:18:13 +03:00
|
|
|
extensions.wrapfunction(localrepo.localrepository, 'commit', excommit)
|
2014-10-03 20:43:48 +04:00
|
|
|
|
2010-01-04 07:37:45 +03:00
|
|
|
def reposetup(ui, repo):
|
2011-03-29 17:24:25 +04:00
|
|
|
if not repo.local():
|
|
|
|
return
|
2010-01-04 07:37:45 +03:00
|
|
|
|
2014-03-31 21:34:43 +04:00
|
|
|
loadremotenames(repo)
|
|
|
|
|
2015-01-15 01:45:24 +03:00
|
|
|
# cache this so we don't iterate over new values
|
|
|
|
items = list(repo.names.iteritems())
|
|
|
|
for nsname, ns in items:
|
|
|
|
d = _remotenames.get(nsname)
|
|
|
|
if not d:
|
|
|
|
continue
|
|
|
|
|
|
|
|
rname = 'remote' + nsname
|
|
|
|
rtmpl = 'remote' + ns.templatename
|
|
|
|
names = lambda rp, d=d: d.keys()
|
2015-02-10 06:11:28 +03:00
|
|
|
namemap = lambda rp, name, d=d: d.get(name)
|
|
|
|
nodemap = lambda rp, node, d=d: [name for name, n in d.iteritems()
|
|
|
|
for n2 in n if n2 == node]
|
2015-01-15 01:45:24 +03:00
|
|
|
|
|
|
|
n = namespaces.namespace(rname, templatename=rtmpl,
|
|
|
|
logname=ns.templatename, colorname=rtmpl,
|
|
|
|
listnames=names, namemap=namemap,
|
|
|
|
nodemap=nodemap)
|
|
|
|
repo.names.addnamespace(n)
|
2014-12-17 09:23:41 +03:00
|
|
|
|
2015-01-17 01:48:56 +03:00
|
|
|
def extsetup(ui):
|
2015-01-29 02:53:49 +03:00
|
|
|
entry = extensions.wrapcommand(commands.table, 'bookmarks', exbookmarks)
|
2015-01-17 01:48:56 +03:00
|
|
|
entry[1].append(('a', 'all', None, 'show both remote and local bookmarks'))
|
2015-01-17 02:47:07 +03:00
|
|
|
entry[1].append(('', 'remote', None, 'show only remote bookmarks'))
|
2015-01-17 01:48:56 +03:00
|
|
|
|
2015-01-29 02:53:49 +03:00
|
|
|
entry = extensions.wrapcommand(commands.table, 'branches', exbranches)
|
2015-01-17 01:06:05 +03:00
|
|
|
entry[1].append(('a', 'all', None, 'show both remote and local branches'))
|
2015-01-17 02:47:07 +03:00
|
|
|
entry[1].append(('', 'remote', None, 'show only remote branches'))
|
2015-01-17 01:06:05 +03:00
|
|
|
|
2015-01-29 02:53:49 +03:00
|
|
|
def exbranches(orig, ui, repo, *args, **opts):
|
2015-01-17 02:47:07 +03:00
|
|
|
if not opts.get('remote'):
|
|
|
|
orig(ui, repo, *args, **opts)
|
2015-01-17 01:48:56 +03:00
|
|
|
|
2015-01-17 02:47:07 +03:00
|
|
|
if opts.get('all') or opts.get('remote'):
|
2015-01-29 02:26:42 +03:00
|
|
|
# exit early if namespace doesn't even exist
|
|
|
|
namespace = 'remotebranches'
|
|
|
|
if namespace not in repo.names:
|
|
|
|
return
|
|
|
|
|
|
|
|
ns = repo.names[namespace]
|
|
|
|
label = 'log.' + ns.colorname
|
|
|
|
fm = ui.formatter('branches', opts)
|
2015-01-17 01:06:05 +03:00
|
|
|
|
2015-02-10 06:11:55 +03:00
|
|
|
# it seems overkill to hide displaying hidden remote branches
|
|
|
|
repo = repo.unfiltered()
|
|
|
|
|
2015-01-17 01:06:05 +03:00
|
|
|
# create a sorted by descending rev list
|
|
|
|
revs = set()
|
|
|
|
for name in ns.listnames(repo):
|
2015-02-10 06:11:28 +03:00
|
|
|
for n in ns.nodes(repo, name):
|
|
|
|
revs.add(repo.changelog.rev(n))
|
2015-01-17 01:06:05 +03:00
|
|
|
|
|
|
|
for r in sorted(revs, reverse=True):
|
2015-02-10 07:49:41 +03:00
|
|
|
ctx = repo[r]
|
|
|
|
for name in ns.names(repo, ctx.node()):
|
2015-01-17 01:06:05 +03:00
|
|
|
fm.startitem()
|
|
|
|
padsize = max(31 - len(str(r)) - encoding.colwidth(name), 0)
|
2015-01-17 02:35:06 +03:00
|
|
|
|
2015-02-10 07:49:41 +03:00
|
|
|
tmplabel = label
|
|
|
|
if ctx.obsolete():
|
|
|
|
tmplabel = tmplabel + ' changeset.obsolete'
|
2015-01-17 02:35:06 +03:00
|
|
|
fm.write(ns.colorname, '%s', name, label=label)
|
2015-01-17 01:06:05 +03:00
|
|
|
fmt = ' ' * padsize + ' %d:%s'
|
|
|
|
fm.condwrite(not ui.quiet, 'rev node', fmt, r,
|
2015-02-10 07:49:41 +03:00
|
|
|
fm.hexfunc(ctx.node()), label=tmplabel)
|
2015-01-17 01:06:05 +03:00
|
|
|
fm.plain('\n')
|
|
|
|
fm.end()
|
|
|
|
|
2015-01-29 02:53:49 +03:00
|
|
|
def exbookmarks(orig, ui, repo, *args, **opts):
|
2015-01-29 02:27:57 +03:00
|
|
|
"""Bookmark output is sorted by bookmark name.
|
|
|
|
|
|
|
|
This has the side benefit of grouping all remote bookmarks by remote name.
|
|
|
|
|
|
|
|
"""
|
|
|
|
if not opts.get('remote'):
|
|
|
|
orig(ui, repo, *args, **opts)
|
|
|
|
|
|
|
|
if opts.get('all') or opts.get('remote'):
|
|
|
|
n = 'remotebookmarks'
|
|
|
|
if n not in repo.names:
|
|
|
|
return
|
|
|
|
ns = repo.names[n]
|
|
|
|
color = ns.colorname
|
|
|
|
label = 'log.' + color
|
|
|
|
|
|
|
|
fm = ui.formatter('bookmarks', opts)
|
|
|
|
|
2015-02-09 06:40:27 +03:00
|
|
|
# it seems overkill to hide displaying hidden remote bookmarks
|
|
|
|
repo = repo.unfiltered()
|
|
|
|
|
2015-01-29 02:27:57 +03:00
|
|
|
for name in sorted(ns.listnames(repo)):
|
|
|
|
node = ns.nodes(repo, name)[0]
|
2015-02-09 06:53:08 +03:00
|
|
|
ctx = repo[node]
|
2015-01-29 02:27:57 +03:00
|
|
|
fm.startitem()
|
|
|
|
|
|
|
|
if not ui.quiet:
|
|
|
|
fm.plain(' ')
|
|
|
|
|
|
|
|
padsize = max(25 - encoding.colwidth(name), 0)
|
|
|
|
fmt = ' ' * padsize + ' %d:%s'
|
|
|
|
|
2015-02-09 06:53:08 +03:00
|
|
|
tmplabel = label
|
|
|
|
if ctx.obsolete():
|
|
|
|
tmplabel = tmplabel + ' changeset.obsolete'
|
2015-01-29 02:27:57 +03:00
|
|
|
fm.write(color, '%s', name, label=label)
|
2015-02-09 06:53:08 +03:00
|
|
|
fm.condwrite(not ui.quiet, 'rev node', fmt, ctx.rev(),
|
|
|
|
fm.hexfunc(node), label=tmplabel)
|
2015-01-29 02:27:57 +03:00
|
|
|
fm.plain('\n')
|
|
|
|
|
2014-03-21 22:37:03 +04:00
|
|
|
def activepath(ui, remote):
|
|
|
|
realpath = ''
|
|
|
|
local = None
|
|
|
|
try:
|
|
|
|
local = remote.local()
|
|
|
|
except AttributeError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
# determine the remote path from the repo, if possible; else just
|
|
|
|
# use the string given to us
|
|
|
|
rpath = remote
|
|
|
|
if local:
|
|
|
|
rpath = getattr(remote, 'root', None)
|
|
|
|
if rpath is None:
|
|
|
|
# Maybe a localpeer? (hg@1ac628cd7113, 2.3)
|
|
|
|
rpath = getattr(getattr(remote, '_repo', None),
|
|
|
|
'root', None)
|
|
|
|
elif not isinstance(remote, str):
|
2014-03-25 07:45:03 +04:00
|
|
|
try:
|
|
|
|
rpath = remote._url
|
|
|
|
except:
|
|
|
|
rpath = remote.url
|
2014-03-21 22:37:03 +04:00
|
|
|
|
|
|
|
for path, uri in ui.configitems('paths'):
|
|
|
|
uri = ui.expandpath(expandscheme(ui, uri))
|
|
|
|
if local:
|
|
|
|
uri = os.path.realpath(uri)
|
|
|
|
else:
|
|
|
|
if uri.startswith('http'):
|
|
|
|
try:
|
|
|
|
uri = url.url(uri).authinfo()[0]
|
|
|
|
except AttributeError:
|
|
|
|
try:
|
|
|
|
uri = util.url(uri).authinfo()[0]
|
|
|
|
except AttributeError:
|
|
|
|
uri = url.getauthinfo(uri)[0]
|
|
|
|
uri = uri.rstrip('/')
|
|
|
|
rpath = rpath.rstrip('/')
|
|
|
|
if uri == rpath:
|
|
|
|
realpath = path
|
|
|
|
# prefer a non-default name to default
|
|
|
|
if path != 'default' and path != 'default-push':
|
|
|
|
break
|
|
|
|
return realpath
|
2014-03-21 22:34:32 +04:00
|
|
|
|
|
|
|
def expandscheme(ui, uri):
|
|
|
|
'''For a given uri, expand the scheme for it'''
|
|
|
|
urischemes = [s for s in schemes.schemes.iterkeys()
|
|
|
|
if uri.startswith('%s://' % s)]
|
|
|
|
for s in urischemes:
|
|
|
|
# TODO: refactor schemes so we don't
|
|
|
|
# duplicate this logic
|
|
|
|
ui.note('performing schemes expansion with '
|
|
|
|
'scheme %s\n' % s)
|
|
|
|
scheme = hg.schemes[s]
|
|
|
|
parts = uri.split('://', 1)[1].split('/', scheme.parts)
|
|
|
|
if len(parts) > scheme.parts:
|
|
|
|
tail = parts[-1]
|
|
|
|
parts = parts[:-1]
|
|
|
|
else:
|
|
|
|
tail = ''
|
2014-12-16 10:45:59 +03:00
|
|
|
ctx = dict((str(i + 1), v) for i, v in enumerate(parts))
|
|
|
|
uri = ''.join(scheme.templater.process(scheme.url, ctx)) + tail
|
2014-03-21 22:34:32 +04:00
|
|
|
return uri
|
|
|
|
|
2014-04-01 04:27:54 +04:00
|
|
|
def splitremotename(remote):
|
2014-03-22 21:41:01 +04:00
|
|
|
name = ''
|
|
|
|
if '/' in remote:
|
|
|
|
remote, name = remote.split('/', 1)
|
|
|
|
return remote, name
|
2014-03-21 22:34:32 +04:00
|
|
|
|
2014-04-01 04:27:54 +04:00
|
|
|
def joinremotename(remote, ref):
|
2014-03-22 21:41:20 +04:00
|
|
|
if ref:
|
|
|
|
remote += '/' + ref
|
|
|
|
return remote
|
|
|
|
|
2015-01-14 03:28:01 +03:00
|
|
|
def readremotenames(repo):
|
2014-03-31 21:34:43 +04:00
|
|
|
rfile = repo.join('remotenames')
|
|
|
|
# exit early if there is nothing to do
|
|
|
|
if not os.path.exists(rfile):
|
|
|
|
return
|
|
|
|
|
2015-01-14 03:28:01 +03:00
|
|
|
# needed to heuristically determine if a file is in the old format
|
2014-12-16 21:50:05 +03:00
|
|
|
branches = repo.names['branches'].listnames(repo)
|
|
|
|
bookmarks = repo.names['bookmarks'].listnames(repo)
|
|
|
|
|
2014-03-31 21:34:43 +04:00
|
|
|
f = open(rfile)
|
|
|
|
for line in f:
|
2015-01-14 03:28:01 +03:00
|
|
|
nametype = None
|
2014-03-31 21:34:43 +04:00
|
|
|
line = line.strip()
|
|
|
|
if not line:
|
|
|
|
continue
|
2015-01-15 01:45:24 +03:00
|
|
|
nametype = None
|
|
|
|
remote, rname = None, None
|
|
|
|
|
2015-01-06 21:50:19 +03:00
|
|
|
node, name = line.split(' ', 1)
|
2015-01-14 08:45:42 +03:00
|
|
|
|
|
|
|
# check for nametype being written into the file format
|
|
|
|
if ' ' in name:
|
|
|
|
nametype, name = name.split(' ', 1)
|
|
|
|
|
2015-01-06 23:31:17 +03:00
|
|
|
remote, rname = splitremotename(name)
|
|
|
|
|
|
|
|
# skip old data that didn't write the name (only wrote the alias)
|
|
|
|
if not rname:
|
|
|
|
continue
|
|
|
|
|
2015-01-14 08:45:42 +03:00
|
|
|
# old format didn't save the nametype, so check for the name in
|
|
|
|
# branches and bookmarks
|
2015-01-14 03:28:01 +03:00
|
|
|
if nametype is None:
|
|
|
|
if rname in branches:
|
|
|
|
nametype = 'branches'
|
|
|
|
elif rname in bookmarks:
|
|
|
|
nametype = 'bookmarks'
|
|
|
|
|
|
|
|
yield node, nametype, remote, rname
|
|
|
|
|
|
|
|
f.close()
|
|
|
|
|
|
|
|
def loadremotenames(repo):
|
|
|
|
alias_default = repo.ui.configbool('remotenames', 'alias.default')
|
|
|
|
|
|
|
|
for node, nametype, remote, rname in readremotenames(repo):
|
2015-01-06 23:31:17 +03:00
|
|
|
# handle alias_default here
|
|
|
|
if remote != "default" and rname == "default" and alias_default:
|
|
|
|
name = remote
|
2015-01-14 03:28:01 +03:00
|
|
|
else:
|
|
|
|
name = joinremotename(remote, rname)
|
2015-01-06 23:31:17 +03:00
|
|
|
|
2015-01-14 03:28:01 +03:00
|
|
|
# if the node doesn't exist, skip it
|
2015-01-06 22:16:51 +03:00
|
|
|
try:
|
|
|
|
ctx = repo[node]
|
|
|
|
except error.RepoLookupError:
|
2014-03-31 21:34:43 +04:00
|
|
|
continue
|
2014-12-16 21:50:05 +03:00
|
|
|
|
2015-01-14 03:28:01 +03:00
|
|
|
# only mark as remote if the head changeset isn't marked closed
|
2014-03-31 21:34:43 +04:00
|
|
|
if not ctx.extra().get('close'):
|
2015-02-10 06:11:28 +03:00
|
|
|
nodes = _remotenames[nametype].get(name, [])
|
|
|
|
nodes.append(ctx.node())
|
|
|
|
_remotenames[nametype][name] = nodes
|
2014-03-31 21:34:43 +04:00
|
|
|
|
2014-04-01 04:27:54 +04:00
|
|
|
def saveremotenames(repo, remote, branches, bookmarks):
|
2015-01-14 03:16:27 +03:00
|
|
|
# read in all data first before opening file to write
|
|
|
|
olddata = set(readremotenames(repo))
|
|
|
|
|
2014-04-01 04:27:54 +04:00
|
|
|
bfile = repo.join('remotenames')
|
2014-03-21 20:56:00 +04:00
|
|
|
f = open(bfile, 'w')
|
2015-01-14 03:16:27 +03:00
|
|
|
|
|
|
|
# only update the given 'remote', so iterate over old data and re-save it
|
|
|
|
for node, nametype, oldremote, rname in olddata:
|
|
|
|
if oldremote != remote:
|
2015-01-14 07:27:35 +03:00
|
|
|
n = joinremotename(oldremote, rname)
|
|
|
|
f.write('%s %s %s\n' % (node, nametype, n))
|
2015-01-14 03:16:27 +03:00
|
|
|
|
2014-03-18 22:46:10 +04:00
|
|
|
for branch, nodes in branches.iteritems():
|
2014-03-21 20:56:00 +04:00
|
|
|
for n in nodes:
|
2015-01-14 07:27:35 +03:00
|
|
|
rname = joinremotename(remote, branch)
|
|
|
|
f.write('%s branches %s\n' % (hex(n), rname))
|
2014-03-18 22:46:10 +04:00
|
|
|
for bookmark, n in bookmarks.iteritems():
|
2015-01-14 07:27:35 +03:00
|
|
|
f.write('%s bookmarks %s\n' % (n, joinremotename(remote, bookmark)))
|
2014-03-21 20:56:00 +04:00
|
|
|
f.close()
|
|
|
|
|
2015-02-10 06:20:25 +03:00
|
|
|
def distancefromremote(repo, remote="default"):
|
|
|
|
"""returns the signed distance between the current node and remote"""
|
|
|
|
b = repo._bookmarkcurrent
|
|
|
|
|
|
|
|
# if no bookmark is active, fallback to the branchname
|
|
|
|
if not b:
|
|
|
|
b = repo.lookupbranch('.')
|
|
|
|
|
|
|
|
# get the non-default name
|
|
|
|
paths = dict(repo.ui.configitems('paths'))
|
|
|
|
rpath = paths.get(remote)
|
|
|
|
if remote == 'default':
|
|
|
|
for path, uri in paths.iteritems():
|
|
|
|
if path != 'default' and path != 'default-push' and rpath == uri:
|
|
|
|
remote = path
|
|
|
|
|
|
|
|
# if we couldn't find anything for remote then return
|
|
|
|
if not rpath:
|
|
|
|
return 0
|
|
|
|
|
|
|
|
remoteb = joinremotename(remote, b)
|
|
|
|
distance = 0
|
|
|
|
|
|
|
|
if remoteb in repo:
|
|
|
|
rev1 = repo[remoteb].rev()
|
|
|
|
rev2 = repo['.'].rev()
|
|
|
|
sign = 1
|
|
|
|
if rev2 < rev1:
|
|
|
|
sign = -1
|
|
|
|
rev1, rev2 = rev2, rev1
|
|
|
|
nodes = repo.revs('%s::%s' % (rev1, rev2))
|
|
|
|
distance = sign * (len(nodes) - 1)
|
|
|
|
|
|
|
|
return distance
|
|
|
|
|
2015-02-10 06:30:17 +03:00
|
|
|
def writedistance(repo, remote="default"):
|
|
|
|
distance = distancefromremote(repo, remote)
|
|
|
|
sign = '+'
|
|
|
|
if distance < 0:
|
|
|
|
sign = '-'
|
|
|
|
|
|
|
|
wlock = repo.wlock()
|
|
|
|
try:
|
|
|
|
try:
|
|
|
|
fp = repo.vfs('remotedistance', 'w', atomictemp=True)
|
|
|
|
fp.write('%s %s' % (sign, abs(distance)))
|
|
|
|
fp.close()
|
|
|
|
except OSError, inst:
|
|
|
|
if inst.errno != errno.ENOENT:
|
|
|
|
raise
|
|
|
|
finally:
|
|
|
|
wlock.release()
|
|
|
|
|
2014-03-21 20:48:38 +04:00
|
|
|
#########
|
|
|
|
# revsets
|
|
|
|
#########
|
|
|
|
|
2011-03-30 04:02:08 +04:00
|
|
|
def upstream_revs(filt, repo, subset, x):
|
2014-04-01 04:55:54 +04:00
|
|
|
upstream_tips = set()
|
2015-01-15 01:45:24 +03:00
|
|
|
for remotename in _remotenames.keys():
|
|
|
|
rname = 'remote' + remotename
|
|
|
|
try:
|
|
|
|
ns = repo.names[rname]
|
|
|
|
except KeyError:
|
|
|
|
continue
|
|
|
|
for name in ns.listnames(repo):
|
|
|
|
if filt(name):
|
|
|
|
upstream_tips.update(ns.nodes(repo, name))
|
2014-04-01 04:55:54 +04:00
|
|
|
|
2014-04-01 04:20:52 +04:00
|
|
|
if not upstream_tips:
|
|
|
|
return revset.baseset([])
|
2014-03-11 01:49:44 +04:00
|
|
|
|
2014-04-01 04:55:54 +04:00
|
|
|
tipancestors = repo.revs('::%ln', upstream_tips)
|
2014-12-16 10:45:59 +03:00
|
|
|
return revset.filteredset(subset, lambda n: n in tipancestors)
|
2011-03-30 04:02:08 +04:00
|
|
|
|
|
|
|
def upstream(repo, subset, x):
|
|
|
|
'''``upstream()``
|
2014-04-01 04:27:54 +04:00
|
|
|
Select changesets in an upstream repository according to remotenames.
|
2011-03-30 04:02:08 +04:00
|
|
|
'''
|
2014-12-16 10:45:59 +03:00
|
|
|
revset.getargs(x, 0, 0, "upstream takes no arguments")
|
2011-03-30 04:02:08 +04:00
|
|
|
upstream_names = [s + '/' for s in
|
2014-04-01 04:27:54 +04:00
|
|
|
repo.ui.configlist('remotenames', 'upstream')]
|
2011-03-30 04:02:08 +04:00
|
|
|
if not upstream_names:
|
|
|
|
filt = lambda x: True
|
|
|
|
else:
|
|
|
|
filt = lambda name: any(map(name.startswith, upstream_names))
|
|
|
|
return upstream_revs(filt, repo, subset, x)
|
|
|
|
|
|
|
|
def pushed(repo, subset, x):
|
|
|
|
'''``pushed()``
|
2014-04-01 04:27:54 +04:00
|
|
|
Select changesets in any remote repository according to remotenames.
|
2011-03-30 04:02:08 +04:00
|
|
|
'''
|
2014-12-16 10:45:59 +03:00
|
|
|
revset.getargs(x, 0, 0, "pushed takes no arguments")
|
2011-03-30 04:02:08 +04:00
|
|
|
return upstream_revs(lambda x: True, repo, subset, x)
|
|
|
|
|
2014-04-01 04:27:54 +04:00
|
|
|
def remotenamesrevset(repo, subset, x):
|
|
|
|
"""``remotenames()``
|
2012-06-20 13:24:55 +04:00
|
|
|
All remote branches heads.
|
|
|
|
"""
|
2014-12-16 10:45:59 +03:00
|
|
|
revset.getargs(x, 0, 0, "remotenames takes no arguments")
|
2014-04-01 04:55:54 +04:00
|
|
|
remoterevs = set()
|
|
|
|
cl = repo.changelog
|
2015-01-15 01:45:24 +03:00
|
|
|
for remotename in _remotenames.keys():
|
|
|
|
rname = 'remote' + remotename
|
|
|
|
try:
|
|
|
|
ns = repo.names[rname]
|
|
|
|
except KeyError:
|
|
|
|
continue
|
|
|
|
for name in ns.listnames(repo):
|
|
|
|
remoterevs.update(ns.nodes(repo, name))
|
|
|
|
|
2014-04-01 04:55:54 +04:00
|
|
|
return revset.baseset(sorted(cl.rev(n) for n in remoterevs))
|
2012-06-20 13:24:55 +04:00
|
|
|
|
2014-03-18 22:46:10 +04:00
|
|
|
revset.symbols.update({'upstream': upstream,
|
|
|
|
'pushed': pushed,
|
2014-04-01 04:27:54 +04:00
|
|
|
'remotenames': remotenamesrevset})
|
2012-06-20 19:23:51 +04:00
|
|
|
|
2014-03-21 20:48:38 +04:00
|
|
|
###########
|
|
|
|
# templates
|
|
|
|
###########
|
|
|
|
|
2015-01-06 07:29:40 +03:00
|
|
|
def remotenameskw(**args):
|
|
|
|
""":remotenames: List of strings. List of remote names associated with the
|
|
|
|
changeset. If remotenames.suppressbranches is True then branch names will
|
|
|
|
be hidden if there is a bookmark at the same changeset.
|
|
|
|
|
|
|
|
"""
|
|
|
|
repo, ctx = args['repo'], args['ctx']
|
|
|
|
|
2015-01-17 03:08:04 +03:00
|
|
|
remotenames = []
|
|
|
|
if 'remotebookmarks' in repo.names:
|
|
|
|
remotenames = repo.names['remotebookmarks'].names(repo, ctx.node())
|
2015-01-06 07:29:40 +03:00
|
|
|
|
|
|
|
suppress = repo.ui.configbool('remotenames', 'suppressbranches', False)
|
2015-01-17 03:08:04 +03:00
|
|
|
if (not remotenames or not suppress) and 'remotebranches' in repo.names:
|
2015-01-15 01:45:24 +03:00
|
|
|
remotenames += repo.names['remotebranches'].names(repo, ctx.node())
|
2015-01-06 07:29:40 +03:00
|
|
|
|
|
|
|
return templatekw.showlist('remotename', remotenames,
|
|
|
|
plural='remotenames', **args)
|
|
|
|
|
|
|
|
templatekw.keywords['remotenames'] = remotenameskw
|