mirror of
https://github.com/facebook/sapling.git
synced 2024-10-06 23:07:18 +03:00
fix trivial spelling errors
This commit is contained in:
parent
ec8f1367b5
commit
2f4504e446
@ -321,7 +321,7 @@ def checkfile(f, logfunc=_defaultlogger.log, maxerr=None, warnings=False,
|
||||
:f: filepath
|
||||
:logfunc: function used to report error
|
||||
logfunc(filename, linenumber, linecontent, errormessage)
|
||||
:maxerr: number of error to display before arborting.
|
||||
:maxerr: number of error to display before aborting.
|
||||
Set to false (default) to report all errors
|
||||
|
||||
return True if no error is found, False otherwise.
|
||||
@ -365,7 +365,7 @@ def checkfile(f, logfunc=_defaultlogger.log, maxerr=None, warnings=False,
|
||||
p, msg = pat
|
||||
ignore = None
|
||||
|
||||
# fix-up regexes for multiline searches
|
||||
# fix-up regexes for multi-line searches
|
||||
po = p
|
||||
# \s doesn't match \n
|
||||
p = re.sub(r'(?<!\\)\\s', r'[ \\t]', p)
|
||||
|
@ -1442,7 +1442,7 @@ Mercurial: http://mercurial.selenic.com/
|
||||
|
||||
*hgcommand-mappings-override*
|
||||
|
||||
The default mappings can be overriden by user-provided instead by mapping
|
||||
The default mappings can be overridden by user-provided instead by mapping
|
||||
to <Plug>CommandName. This is especially useful when these mappings
|
||||
collide with other existing mappings (vim will warn of this during plugin
|
||||
initialization, but will not clobber the existing mappings).
|
||||
|
@ -55,7 +55,7 @@
|
||||
" 3) Optional (but recommended for speed)
|
||||
"
|
||||
" Install patchutils ( http://cyberelk.net/tim/patchutils/ ) for your
|
||||
" OS. For windows it is availble from Cygwin
|
||||
" OS. For windows it is available from Cygwin
|
||||
"
|
||||
" http://www.cygwin.com
|
||||
"
|
||||
|
@ -27,7 +27,7 @@
|
||||
# On 64-bit systems, make sure it's assigned a 32-bit app pool.
|
||||
#
|
||||
# - In the application, setup a wildcard script handler mapping of type
|
||||
# IpsapiModule with the shim dll as its executable. This file MUST reside
|
||||
# IsapiModule with the shim dll as its executable. This file MUST reside
|
||||
# in the same directory as the shim. Remove all other handlers, if you wish.
|
||||
#
|
||||
# - Make sure the ISAPI and CGI restrictions (configured globally on the
|
||||
|
@ -516,7 +516,7 @@ class bzmysql_3_0(bzmysql_2_18):
|
||||
raise util.Abort(_('unknown database schema'))
|
||||
return ids[0][0]
|
||||
|
||||
# Buzgilla via XMLRPC interface.
|
||||
# Bugzilla via XMLRPC interface.
|
||||
|
||||
class cookietransportrequest(object):
|
||||
"""A Transport request method that retains cookies over its lifetime.
|
||||
|
@ -499,5 +499,5 @@ else:
|
||||
orig(m.group(2), **opts)
|
||||
m = re.match(ansire, m.group(3))
|
||||
finally:
|
||||
# Explicity reset original attributes
|
||||
# Explicitly reset original attributes
|
||||
_kernel32.SetConsoleTextAttribute(stdout, origattr)
|
||||
|
@ -74,7 +74,7 @@ def convert(ui, src, dest=None, revmapfile=None, **opts):
|
||||
|
||||
The authormap is a simple text file that maps each source commit
|
||||
author to a destination commit author. It is handy for source SCMs
|
||||
that use unix logins to identify authors (eg: CVS). One line per
|
||||
that use unix logins to identify authors (e.g.: CVS). One line per
|
||||
author mapping and the line format is::
|
||||
|
||||
source author = destination author
|
||||
|
@ -246,7 +246,7 @@ class bzr_source(converter_source):
|
||||
# register the change as move
|
||||
renames[topath] = frompath
|
||||
|
||||
# no futher changes, go to the next change
|
||||
# no further changes, go to the next change
|
||||
continue
|
||||
|
||||
# we got unicode paths, need to convert them
|
||||
|
@ -167,7 +167,7 @@ class converter(object):
|
||||
|
||||
def toposort(self, parents, sortmode):
|
||||
'''Return an ordering such that every uncommitted changeset is
|
||||
preceeded by all its uncommitted ancestors.'''
|
||||
preceded by all its uncommitted ancestors.'''
|
||||
|
||||
def mapchildren(parents):
|
||||
"""Return a (children, roots) tuple where 'children' maps parent
|
||||
|
@ -202,7 +202,7 @@ class convert_cvs(converter_source):
|
||||
def getfile(self, name, rev):
|
||||
|
||||
def chunkedread(fp, count):
|
||||
# file-objects returned by socked.makefile() do not handle
|
||||
# file-objects returned by socket.makefile() do not handle
|
||||
# large read() requests very well.
|
||||
chunksize = 65536
|
||||
output = StringIO()
|
||||
|
@ -156,8 +156,8 @@ def createlog(ui, directory=None, root="", rlog=True, cache=None):
|
||||
# The cvsps cache pickle needs a uniquified name, based on the
|
||||
# repository location. The address may have all sort of nasties
|
||||
# in it, slashes, colons and such. So here we take just the
|
||||
# alphanumerics, concatenated in a way that does not mix up the
|
||||
# various components, so that
|
||||
# alphanumeric characters, concatenated in a way that does not
|
||||
# mix up the various components, so that
|
||||
# :pserver:user@server:/path
|
||||
# and
|
||||
# /pserver/user/server/path
|
||||
@ -503,7 +503,7 @@ def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
|
||||
|
||||
# Check if log entry belongs to the current changeset or not.
|
||||
|
||||
# Since CVS is file centric, two different file revisions with
|
||||
# Since CVS is file-centric, two different file revisions with
|
||||
# different branchpoints should be treated as belonging to two
|
||||
# different changesets (and the ordering is important and not
|
||||
# honoured by cvsps at this point).
|
||||
|
@ -89,7 +89,7 @@ class gnuarch_source(converter_source, commandline):
|
||||
|
||||
# Get the complete list of revisions for that tree version
|
||||
output, status = self.runlines('revisions', '-r', '-f', treeversion)
|
||||
self.checkexit(status, 'failed retrieveing revisions for %s'
|
||||
self.checkexit(status, 'failed retrieving revisions for %s'
|
||||
% treeversion)
|
||||
|
||||
# No new iteration unless a revision has a continuation-of header
|
||||
|
@ -887,8 +887,8 @@ class svn_source(converter_source):
|
||||
io = StringIO()
|
||||
info = svn.ra.get_file(self.ra, file, revnum, io)
|
||||
data = io.getvalue()
|
||||
# ra.get_files() seems to keep a reference on the input buffer
|
||||
# preventing collection. Release it explicitely.
|
||||
# ra.get_file() seems to keep a reference on the input buffer
|
||||
# preventing collection. Release it explicitly.
|
||||
io.close()
|
||||
if isinstance(info, list):
|
||||
info = info[-1]
|
||||
@ -923,7 +923,7 @@ class svn_source(converter_source):
|
||||
# Given the repository url of this wc, say
|
||||
# "http://server/plone/CMFPlone/branches/Plone-2_0-branch"
|
||||
# extract the "entry" portion (a relative path) from what
|
||||
# svn log --xml says, ie
|
||||
# svn log --xml says, i.e.
|
||||
# "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py"
|
||||
# that is to say "tests/PloneTestCase.py"
|
||||
if path.startswith(module):
|
||||
|
@ -109,7 +109,7 @@ def snapshot(ui, repo, files, node, tmproot):
|
||||
return dirname, fns_and_mtime
|
||||
|
||||
def dodiff(ui, repo, diffcmd, diffopts, pats, opts):
|
||||
'''Do the actuall diff:
|
||||
'''Do the actual diff:
|
||||
|
||||
- copy to a temp structure if diffing 2 internal revisions
|
||||
- copy to a temp structure if diffing working revision with
|
||||
|
@ -22,7 +22,7 @@ configure it, set the following options in your hgrc::
|
||||
# Style to use (optional)
|
||||
#style = foo
|
||||
# The URL of the CIA notification service (optional)
|
||||
# You can use mailto: URLs to send by email, eg
|
||||
# You can use mailto: URLs to send by email, e.g.
|
||||
# mailto:cia@cia.vc
|
||||
# Make sure to set email.from if you do this.
|
||||
#url = http://cia.vc/
|
||||
|
@ -282,7 +282,7 @@ class autowatcher(watcher):
|
||||
callable that takes one parameter. It will be called each time
|
||||
a directory is about to be automatically watched. If it returns
|
||||
True, the directory will be watched if it still exists,
|
||||
otherwise, it will beb skipped.'''
|
||||
otherwise, it will be skipped.'''
|
||||
|
||||
super(autowatcher, self).__init__()
|
||||
self.addfilter = addfilter
|
||||
|
@ -7,7 +7,7 @@
|
||||
#
|
||||
# $Id$
|
||||
#
|
||||
# Keyword expansion hack against the grain of a DSCM
|
||||
# Keyword expansion hack against the grain of a Distributed SCM
|
||||
#
|
||||
# There are many good reasons why this is not needed in a distributed
|
||||
# SCM, still it may be useful in very small projects based on single
|
||||
@ -168,7 +168,7 @@ def _shrinktext(text, subfunc):
|
||||
return subfunc(r'$\1$', text)
|
||||
|
||||
def _preselect(wstatus, changed):
|
||||
'''Retrieves modfied and added files from a working directory state
|
||||
'''Retrieves modified and added files from a working directory state
|
||||
and returns the subset of each contained in given changed files
|
||||
retrieved from a change context.'''
|
||||
modified, added = wstatus[:2]
|
||||
|
@ -55,7 +55,7 @@ class basestore(object):
|
||||
def get(self, files):
|
||||
'''Get the specified largefiles from the store and write to local
|
||||
files under repo.root. files is a list of (filename, hash)
|
||||
tuples. Return (success, missing), lists of files successfuly
|
||||
tuples. Return (success, missing), lists of files successfully
|
||||
downloaded and those not found in the store. success is a list
|
||||
of (filename, hash) tuples; missing is a list of filenames that
|
||||
we could not get. (The detailed error message will already have
|
||||
|
@ -65,7 +65,7 @@ def lfconvert(ui, src, dest, *pats, **opts):
|
||||
dstlock = rdst.lock()
|
||||
|
||||
# Get a list of all changesets in the source. The easy way to do this
|
||||
# is to simply walk the changelog, using changelog.nodesbewteen().
|
||||
# is to simply walk the changelog, using changelog.nodesbetween().
|
||||
# Take a look at mercurial/revlog.py:639 for more details.
|
||||
# Use a generator instead of a list to decrease memory usage
|
||||
ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
|
||||
@ -177,7 +177,7 @@ def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
|
||||
if f not in lfiles and f not in normalfiles:
|
||||
islfile = _islfile(f, ctx, matcher, size)
|
||||
# If this file was renamed or copied then copy
|
||||
# the lfileness of its predecessor
|
||||
# the largefile-ness of its predecessor
|
||||
if f in ctx.manifest():
|
||||
fctx = ctx.filectx(f)
|
||||
renamed = fctx.renamed()
|
||||
@ -389,7 +389,7 @@ def cachelfiles(ui, repo, node, filelist=None):
|
||||
# If we are mid-merge, then we have to trust the standin that is in the
|
||||
# working copy to have the correct hashvalue. This is because the
|
||||
# original hg.merge() already updated the standin as part of the normal
|
||||
# merge process -- we just have to udpate the largefile to match.
|
||||
# merge process -- we just have to update the largefile to match.
|
||||
if (getattr(repo, "_ismerging", False) and
|
||||
os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
|
||||
expectedhash = lfutil.readstandin(repo, lfile)
|
||||
|
@ -18,7 +18,7 @@ import basestore
|
||||
|
||||
class localstore(basestore.basestore):
|
||||
'''localstore first attempts to grab files out of the store in the remote
|
||||
Mercurial repository. Failling that, it attempts to grab the files from
|
||||
Mercurial repository. Failing that, it attempts to grab the files from
|
||||
the user cache.'''
|
||||
|
||||
def __init__(self, ui, repo, remote):
|
||||
|
@ -155,7 +155,7 @@ def removelargefiles(ui, repo, *pats, **opts):
|
||||
ui.status(_('removing %s\n') % m.rel(f))
|
||||
|
||||
# Need to lock because standin files are deleted then removed from the
|
||||
# repository and we could race inbetween.
|
||||
# repository and we could race in-between.
|
||||
wlock = repo.wlock()
|
||||
try:
|
||||
lfdirstate = lfutil.openlfdirstate(ui, repo)
|
||||
@ -251,7 +251,7 @@ def overrideverify(orig, ui, repo, *pats, **opts):
|
||||
|
||||
# Override needs to refresh standins so that update's normal merge
|
||||
# will go through properly. Then the other update hook (overriding repo.update)
|
||||
# will get the new files. Filemerge is also overriden so that the merge
|
||||
# will get the new files. Filemerge is also overridden so that the merge
|
||||
# will merge standins correctly.
|
||||
def overrideupdate(orig, ui, repo, *pats, **opts):
|
||||
lfdirstate = lfutil.openlfdirstate(ui, repo)
|
||||
@ -696,7 +696,7 @@ def overridepull(orig, ui, repo, source=None, **opts):
|
||||
result = orig(ui, repo, source, **opts)
|
||||
# If we do not have the new largefiles for any new heads we pulled, we
|
||||
# will run into a problem later if we try to merge or rebase with one of
|
||||
# these heads, so cache the largefiles now direclty into the system
|
||||
# these heads, so cache the largefiles now directly into the system
|
||||
# cache.
|
||||
ui.status(_("caching new largefiles\n"))
|
||||
numcached = 0
|
||||
@ -912,7 +912,7 @@ def overrideforget(orig, ui, repo, *pats, **opts):
|
||||
ui.status(_('removing %s\n') % m.rel(f))
|
||||
|
||||
# Need to lock because standin files are deleted then removed from the
|
||||
# repository and we could race inbetween.
|
||||
# repository and we could race in-between.
|
||||
wlock = repo.wlock()
|
||||
try:
|
||||
lfdirstate = lfutil.openlfdirstate(ui, repo)
|
||||
|
@ -156,7 +156,7 @@ def reposetup(ui, repo):
|
||||
# Create a function that we can use to override what is
|
||||
# normally the ignore matcher. We've already checked
|
||||
# for ignored files on the first dirstate walk, and
|
||||
# unecessarily re-checking here causes a huge performance
|
||||
# unnecessarily re-checking here causes a huge performance
|
||||
# hit because lfdirstate only knows about largefiles
|
||||
def _ignoreoverride(self):
|
||||
return False
|
||||
@ -192,7 +192,7 @@ def reposetup(ui, repo):
|
||||
(unsure, modified, added, removed, missing, unknown,
|
||||
ignored, clean) = s
|
||||
# Replace the list of ignored and unknown files with
|
||||
# the previously caclulated lists, and strip out the
|
||||
# the previously calculated lists, and strip out the
|
||||
# largefiles
|
||||
lfiles = set(lfdirstate._map)
|
||||
ignored = set(result[5]).difference(lfiles)
|
||||
|
@ -1535,7 +1535,7 @@ class queue(object):
|
||||
# if amending a patch, we start with existing
|
||||
# files plus specified files - unfiltered
|
||||
match = scmutil.matchfiles(repo, mm + aa + dd + matchfn.files())
|
||||
# filter with inc/exl options
|
||||
# filter with include/exclude options
|
||||
matchfn = scmutil.match(repo[None], opts=opts)
|
||||
else:
|
||||
match = scmutil.matchall(repo)
|
||||
@ -3185,9 +3185,9 @@ def finish(ui, repo, *revrange, **opts):
|
||||
revs = scmutil.revrange(repo, revrange)
|
||||
if repo['.'].rev() in revs and repo[None].files():
|
||||
ui.warn(_('warning: uncommitted changes in the working directory\n'))
|
||||
# queue.finish may changes phases but leave the responsability to lock the
|
||||
# queue.finish may changes phases but leave the responsibility to lock the
|
||||
# repo to the caller to avoid deadlock with wlock. This command code is
|
||||
# responsability for this locking.
|
||||
# responsibility for this locking.
|
||||
lock = repo.lock()
|
||||
try:
|
||||
q.finish(repo, revs)
|
||||
|
@ -81,7 +81,7 @@ def scanpatch(fp):
|
||||
class header(object):
|
||||
"""patch header
|
||||
|
||||
XXX shoudn't we move this to mercurial/patch.py ?
|
||||
XXX shouldn't we move this to mercurial/patch.py ?
|
||||
"""
|
||||
diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
|
||||
diff_re = re.compile('diff -r .* (.*)$')
|
||||
|
@ -66,7 +66,7 @@
|
||||
using select() for socket reads
|
||||
tested on Debian unstable with Python 2.2.2"""
|
||||
|
||||
"""0.05 update - ensure case insensitivty on domain names
|
||||
"""0.05 update - ensure case insensitivity on domain names
|
||||
support for unicast DNS queries"""
|
||||
|
||||
"""0.04 update - added some unit tests
|
||||
@ -335,7 +335,7 @@ class DNSRecord(DNSEntry):
|
||||
raise AbstractMethodException
|
||||
|
||||
def toString(self, other):
|
||||
"""String representation with addtional information"""
|
||||
"""String representation with additional information"""
|
||||
arg = "%s/%s,%s" % (self.ttl, self.getRemainingTTL(currentTimeMillis()), other)
|
||||
return DNSEntry.toString(self, "record", arg)
|
||||
|
||||
@ -904,7 +904,7 @@ class Listener(object):
|
||||
to cache information as it arrives.
|
||||
|
||||
It requires registration with an Engine object in order to have
|
||||
the read() method called when a socket is availble for reading."""
|
||||
the read() method called when a socket is available for reading."""
|
||||
|
||||
def __init__(self, zeroconf):
|
||||
self.zeroconf = zeroconf
|
||||
@ -1140,7 +1140,7 @@ class ServiceInfo(object):
|
||||
return self.port
|
||||
|
||||
def getPriority(self):
|
||||
"""Pirority accessor"""
|
||||
"""Priority accessor"""
|
||||
return self.priority
|
||||
|
||||
def getWeight(self):
|
||||
@ -1259,7 +1259,7 @@ class Zeroconf(object):
|
||||
# SO_REUSEADDR should be equivalent to SO_REUSEPORT for
|
||||
# multicast UDP sockets (p 731, "TCP/IP Illustrated,
|
||||
# Volume 2"), but some BSD-derived systems require
|
||||
# SO_REUSEPORT to be specified explicity. Also, not all
|
||||
# SO_REUSEPORT to be specified explicitly. Also, not all
|
||||
# versions of Python have SO_REUSEPORT available. So
|
||||
# if you're on a BSD-based system, and haven't upgraded
|
||||
# to Python 2.3 yet, you may find this library doesn't
|
||||
|
@ -32,7 +32,7 @@ class HTTPRangeHandler(urllib2.BaseHandler):
|
||||
|
||||
This was extremely simple. The Range header is a HTTP feature to
|
||||
begin with so all this class does is tell urllib2 that the
|
||||
"206 Partial Content" reponse from the HTTP server is what we
|
||||
"206 Partial Content" response from the HTTP server is what we
|
||||
expected.
|
||||
|
||||
Example:
|
||||
@ -64,7 +64,7 @@ class HTTPRangeHandler(urllib2.BaseHandler):
|
||||
|
||||
class RangeableFileObject(object):
|
||||
"""File object wrapper to enable raw range handling.
|
||||
This was implemented primarilary for handling range
|
||||
This was implemented primarily for handling range
|
||||
specifications for file:// urls. This object effectively makes
|
||||
a file object look like it consists only of a range of bytes in
|
||||
the stream.
|
||||
@ -431,7 +431,7 @@ def range_tuple_normalize(range_tup):
|
||||
Return a tuple whose first element is guaranteed to be an int
|
||||
and whose second element will be '' (meaning: the last byte) or
|
||||
an int. Finally, return None if the normalized tuple == (0,'')
|
||||
as that is equivelant to retrieving the entire file.
|
||||
as that is equivalent to retrieving the entire file.
|
||||
"""
|
||||
if range_tup is None:
|
||||
return None
|
||||
|
@ -183,7 +183,7 @@ class changelog(revlog.revlog):
|
||||
nodeid\n : manifest node in ascii
|
||||
user\n : user, no \n or \r allowed
|
||||
time tz extra\n : date (time is int or float, timezone is int)
|
||||
: extra is metadatas, encoded and separated by '\0'
|
||||
: extra is metadata, encoded and separated by '\0'
|
||||
: older versions ignore it
|
||||
files\n\n : files modified by the cset, no \n or \r allowed
|
||||
(.*) : comment (free text, ideally utf-8)
|
||||
|
@ -1258,7 +1258,7 @@ def _makegraphlogrevset(repo, pats, opts, revs):
|
||||
opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
|
||||
opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
|
||||
# pats/include/exclude are passed to match.match() directly in
|
||||
# _matchfile() revset but walkchangerevs() builds its matcher with
|
||||
# _matchfiles() revset but walkchangerevs() builds its matcher with
|
||||
# scmutil.match(). The difference is input pats are globbed on
|
||||
# platforms without shell expansion (windows).
|
||||
pctx = repo[None]
|
||||
@ -1304,7 +1304,7 @@ def _makegraphlogrevset(repo, pats, opts, revs):
|
||||
fnopats = (('_ancestors', '_fancestors'),
|
||||
('_descendants', '_fdescendants'))
|
||||
if pats:
|
||||
# follow() revset inteprets its file argument as a
|
||||
# follow() revset interprets its file argument as a
|
||||
# manifest entry, so use match.files(), not pats.
|
||||
opts[fpats[followfirst]] = list(match.files())
|
||||
else:
|
||||
|
@ -543,7 +543,7 @@ def bisect(ui, repo, rev=None, extra=None, command=None,
|
||||
hg bisect --good
|
||||
hg bisect --bad
|
||||
|
||||
- mark the current revision, or a known revision, to be skipped (eg. if
|
||||
- mark the current revision, or a known revision, to be skipped (e.g. if
|
||||
that revision is not usable because of another issue)::
|
||||
|
||||
hg bisect --skip
|
||||
@ -1252,7 +1252,7 @@ def commit(ui, repo, *pats, **opts):
|
||||
Returns 0 on success, 1 if nothing changed.
|
||||
"""
|
||||
if opts.get('subrepos'):
|
||||
# Let --subrepos on the command line overide config setting.
|
||||
# Let --subrepos on the command line override config setting.
|
||||
ui.setconfig('ui', 'commitsubrepos', True)
|
||||
|
||||
extra = {}
|
||||
|
@ -237,7 +237,7 @@ class changectx(object):
|
||||
|
||||
def extinct(self):
|
||||
"""True if the changeset is extinct"""
|
||||
# We should just compute a cache a check againts it.
|
||||
# We should just compute a cache a check against it.
|
||||
# see revset implementation for details
|
||||
#
|
||||
# But this naive implementation does not require cache
|
||||
|
@ -21,7 +21,7 @@ def findcommonincoming(repo, remote, heads=None, force=False):
|
||||
any longer.
|
||||
"heads" is either the supplied heads, or else the remote's heads.
|
||||
|
||||
If you pass heads and they are all known locally, the reponse lists justs
|
||||
If you pass heads and they are all known locally, the response lists just
|
||||
these heads in "common" and in "heads".
|
||||
|
||||
Please use findcommonoutgoing to compute the set of outgoing nodes to give
|
||||
@ -348,7 +348,7 @@ def visibleheads(repo):
|
||||
def visiblebranchmap(repo):
|
||||
"""return a branchmap for the visible set"""
|
||||
# XXX Recomputing this data on the fly is very slow. We should build a
|
||||
# XXX cached version while computin the standard branchmap version.
|
||||
# XXX cached version while computing the standard branchmap version.
|
||||
sroots = repo._phasecache.phaseroots[phases.secret]
|
||||
if sroots or repo.obsstore:
|
||||
vbranchmap = {}
|
||||
|
@ -223,7 +223,7 @@ def toutf8b(s):
|
||||
|
||||
Principles of operation:
|
||||
|
||||
- ASCII and UTF-8 data sucessfully round-trips and is understood
|
||||
- ASCII and UTF-8 data successfully round-trips and is understood
|
||||
by Unicode-oriented clients
|
||||
- filenames and file contents in arbitrary other encodings can have
|
||||
be round-tripped or recovered by clueful clients
|
||||
|
@ -159,7 +159,7 @@ def get(repo, status):
|
||||
Return a list of revision(s) that match the given status:
|
||||
|
||||
- ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
|
||||
- ``goods``, ``bads`` : csets topologicaly good/bad
|
||||
- ``goods``, ``bads`` : csets topologically good/bad
|
||||
- ``range`` : csets taking part in the bisection
|
||||
- ``pruned`` : csets that are goods, bads or skipped
|
||||
- ``untested`` : csets whose fate is yet unknown
|
||||
@ -170,8 +170,8 @@ def get(repo, status):
|
||||
if status in ('good', 'bad', 'skip', 'current'):
|
||||
return map(repo.changelog.rev, state[status])
|
||||
else:
|
||||
# In the floowing sets, we do *not* call 'bisect()' with more
|
||||
# than one level of recusrsion, because that can be very, very
|
||||
# In the following sets, we do *not* call 'bisect()' with more
|
||||
# than one level of recursion, because that can be very, very
|
||||
# time consuming. Instead, we always develop the expression as
|
||||
# much as possible.
|
||||
|
||||
@ -200,7 +200,7 @@ def get(repo, status):
|
||||
|
||||
# 'ignored' is all csets that were not used during the bisection
|
||||
# due to DAG topology, but may however have had an impact.
|
||||
# Eg., a branch merged between bads and goods, but whose branch-
|
||||
# E.g., a branch merged between bads and goods, but whose branch-
|
||||
# point is out-side of the range.
|
||||
iba = '::bisect(bad) - ::bisect(good)' # Ignored bads' ancestors
|
||||
iga = '::bisect(good) - ::bisect(bad)' # Ignored goods' ancestors
|
||||
|
@ -1431,7 +1431,7 @@ The full set of options is:
|
||||
Example: ``http://hgserver/static/``.
|
||||
|
||||
``stripes``
|
||||
How many lines a "zebra stripe" should span in multiline output.
|
||||
How many lines a "zebra stripe" should span in multi-line output.
|
||||
Default is 1; set to 0 to disable.
|
||||
|
||||
``style``
|
||||
|
@ -11,7 +11,7 @@ but recognizes only the following sections:
|
||||
- paths
|
||||
- collections
|
||||
|
||||
The ``web`` options are thorougly described in :hg:`help config`.
|
||||
The ``web`` options are thoroughly described in :hg:`help config`.
|
||||
|
||||
The ``paths`` section maps URL paths to paths of repositories in the
|
||||
filesystem. hgweb will not expose the filesystem directly - only
|
||||
|
@ -12,7 +12,7 @@ from mercurial.hgweb import common
|
||||
from mercurial.i18n import _
|
||||
|
||||
def _splitURI(uri):
|
||||
""" Return path and query splited from uri
|
||||
""" Return path and query split from uri
|
||||
|
||||
Just like CGI environment, the path is unquoted, the query is
|
||||
not.
|
||||
|
@ -19,7 +19,7 @@ def launch(application):
|
||||
environ = dict(os.environ.iteritems())
|
||||
environ.setdefault('PATH_INFO', '')
|
||||
if environ.get('SERVER_SOFTWARE', '').startswith('Microsoft-IIS'):
|
||||
# IIS includes script_name in path_info
|
||||
# IIS includes script_name in PATH_INFO
|
||||
scriptname = environ['SCRIPT_NAME']
|
||||
if environ['PATH_INFO'].startswith(scriptname):
|
||||
environ['PATH_INFO'] = environ['PATH_INFO'][len(scriptname):]
|
||||
|
@ -293,7 +293,7 @@ class HTTPConnection(object):
|
||||
host: The host to which we'll connect.
|
||||
port: Optional. The port over which we'll connect. Default 80 for
|
||||
non-ssl, 443 for ssl.
|
||||
use_ssl: Optional. Wether to use ssl. Defaults to False if port is
|
||||
use_ssl: Optional. Whether to use ssl. Defaults to False if port is
|
||||
not 443, true if port is 443.
|
||||
ssl_validator: a function(socket) to validate the ssl cert
|
||||
timeout: Optional. Connection timeout, default is TIMEOUT_DEFAULT.
|
||||
@ -374,7 +374,7 @@ class HTTPConnection(object):
|
||||
if self.ssl:
|
||||
# This is the default, but in the case of proxied SSL
|
||||
# requests the proxy logic above will have cleared
|
||||
# blocking mode, so reenable it just to be safe.
|
||||
# blocking mode, so re-enable it just to be safe.
|
||||
sock.setblocking(1)
|
||||
logger.debug('wrapping socket for ssl with options %r',
|
||||
self.ssl_opts)
|
||||
@ -414,7 +414,7 @@ class HTTPConnection(object):
|
||||
"""Close the connection to the server.
|
||||
|
||||
This is a no-op if the connection is already closed. The
|
||||
connection may automatically close if requessted by the server
|
||||
connection may automatically close if requested by the server
|
||||
or required by the nature of a response.
|
||||
"""
|
||||
if self.sock is None:
|
||||
|
@ -120,7 +120,7 @@ class AbstractSimpleReader(AbstractReader):
|
||||
if data:
|
||||
assert not self._finished, (
|
||||
'tried to add data (%r) to a closed reader!' % data)
|
||||
logger.debug('%s read an addtional %d data', self.name, len(data))
|
||||
logger.debug('%s read an additional %d data', self.name, len(data))
|
||||
self._done_chunks.append(data)
|
||||
|
||||
|
||||
@ -162,7 +162,7 @@ class ChunkedReader(AbstractReader):
|
||||
|
||||
def _load(self, data):
|
||||
assert not self._finished, 'tried to add data to a closed reader!'
|
||||
logger.debug('chunked read an addtional %d data', len(data))
|
||||
logger.debug('chunked read an additional %d data', len(data))
|
||||
position = 0
|
||||
if self._leftover_data:
|
||||
logger.debug('chunked reader trying to finish block from leftover data')
|
||||
@ -188,7 +188,7 @@ class ChunkedReader(AbstractReader):
|
||||
return
|
||||
if amt == 0:
|
||||
self._finished = True
|
||||
logger.debug('closing chunked redaer due to chunk of length 0')
|
||||
logger.debug('closing chunked reader due to chunk of length 0')
|
||||
return
|
||||
self._done_chunks.append(data[block_start:block_start + amt])
|
||||
position = block_start + amt + len(self._eol)
|
||||
|
@ -67,8 +67,8 @@ EXTRA ATTRIBUTES AND METHODS
|
||||
|
||||
close_connection() - close the connection to the host
|
||||
readlines() - you know, readlines()
|
||||
status - the return status (ie 404)
|
||||
reason - english translation of status (ie 'File not found')
|
||||
status - the return status (i.e. 404)
|
||||
reason - english translation of status (i.e. 'File not found')
|
||||
|
||||
If you want the best of both worlds, use this inside an
|
||||
AttributeError-catching try:
|
||||
@ -297,7 +297,7 @@ class KeepAliveHandler(object):
|
||||
# first. We previously got into a nasty loop
|
||||
# where an exception was uncaught, and so the
|
||||
# connection stayed open. On the next try, the
|
||||
# same exception was raised, etc. The tradeoff is
|
||||
# same exception was raised, etc. The trade-off is
|
||||
# that it's now possible this call will raise
|
||||
# a DIFFERENT exception
|
||||
if DEBUG:
|
||||
@ -370,7 +370,7 @@ class HTTPResponse(httplib.HTTPResponse):
|
||||
# so if you THEN do a normal read, you must first take stuff from
|
||||
# the buffer.
|
||||
|
||||
# the read method wraps the original to accomodate buffering,
|
||||
# the read method wraps the original to accommodate buffering,
|
||||
# although read() never adds to the buffer.
|
||||
# Both readline and readlines have been stolen with almost no
|
||||
# modification from socket.py
|
||||
@ -442,7 +442,7 @@ class HTTPResponse(httplib.HTTPResponse):
|
||||
try:
|
||||
chunk_left = int(line, 16)
|
||||
except ValueError:
|
||||
# close the connection as protocol synchronisation is
|
||||
# close the connection as protocol synchronization is
|
||||
# probably lost
|
||||
self.close()
|
||||
raise httplib.IncompleteRead(value)
|
||||
@ -548,7 +548,7 @@ def safesend(self, str):
|
||||
read = getattr(str, 'read', None)
|
||||
if read is not None:
|
||||
if self.debuglevel > 0:
|
||||
print "sendIng a read()able"
|
||||
print "sending a read()able"
|
||||
data = read(blocksize)
|
||||
while data:
|
||||
self.sock.sendall(data)
|
||||
@ -737,7 +737,7 @@ def test_timeout(url):
|
||||
|
||||
|
||||
def test(url, N=10):
|
||||
print "checking error hander (do this on a non-200)"
|
||||
print "checking error handler (do this on a non-200)"
|
||||
try: error_handler(url)
|
||||
except IOError:
|
||||
print "exiting - exception will prevent further tests"
|
||||
|
@ -300,7 +300,7 @@ class localrepository(object):
|
||||
"""hiddenrevs: revs that should be hidden by command and tools
|
||||
|
||||
This set is carried on the repo to ease initialisation and lazy
|
||||
loading it'll probably move back to changelog for efficienty and
|
||||
loading it'll probably move back to changelog for efficiency and
|
||||
consistency reason
|
||||
|
||||
Note that the hiddenrevs will needs invalidations when
|
||||
@ -712,7 +712,7 @@ class localrepository(object):
|
||||
# Remove candidate heads that no longer are in the repo (e.g., as
|
||||
# the result of a strip that just happened). Avoid using 'node in
|
||||
# self' here because that dives down into branchcache code somewhat
|
||||
# recrusively.
|
||||
# recursively.
|
||||
bheadrevs = [self.changelog.rev(node) for node in bheads
|
||||
if self.changelog.hasnode(node)]
|
||||
newheadrevs = [self.changelog.rev(node) for node in newnodes
|
||||
@ -732,7 +732,7 @@ class localrepository(object):
|
||||
iterrevs = list(bheadrevs)
|
||||
|
||||
# This loop prunes out two kinds of heads - heads that are
|
||||
# superceded by a head in newheadrevs, and newheadrevs that are not
|
||||
# superseded by a head in newheadrevs, and newheadrevs that are not
|
||||
# heads because an existing head is their descendant.
|
||||
while iterrevs:
|
||||
latest = iterrevs.pop()
|
||||
@ -1479,7 +1479,7 @@ class localrepository(object):
|
||||
and you also know the set of candidate new heads that may have resulted
|
||||
from the destruction, you can set newheadnodes. This will enable the
|
||||
code to update the branchheads cache, rather than having future code
|
||||
decide it's invalid and regenrating it from scratch.
|
||||
decide it's invalid and regenerating it from scratch.
|
||||
'''
|
||||
# If we have info, newheadnodes, on how to update the branch cache, do
|
||||
# it, Otherwise, since nodes were destroyed, the cache is stale and this
|
||||
@ -1906,7 +1906,7 @@ class localrepository(object):
|
||||
ret = remote.addchangegroup(cg, 'push', self.url())
|
||||
|
||||
if ret:
|
||||
# push succeed, synchonize target of the push
|
||||
# push succeed, synchronize target of the push
|
||||
cheads = outgoing.missingheads
|
||||
elif revs is None:
|
||||
# All out push fails. synchronize all common
|
||||
@ -1925,7 +1925,7 @@ class localrepository(object):
|
||||
# missing = ((commonheads::missingheads) - commonheads)
|
||||
#
|
||||
# We can pick:
|
||||
# * missingheads part of comon (::commonheads)
|
||||
# * missingheads part of common (::commonheads)
|
||||
common = set(outgoing.common)
|
||||
cheads = [node for node in revs if node in common]
|
||||
# and
|
||||
@ -2539,7 +2539,7 @@ class localrepository(object):
|
||||
# uncompressed only if compatible.
|
||||
|
||||
if not stream:
|
||||
# if the server explicitely prefer to stream (for fast LANs)
|
||||
# if the server explicitly prefer to stream (for fast LANs)
|
||||
stream = remote.capable('stream-preferred')
|
||||
|
||||
if stream and not heads:
|
||||
|
@ -122,8 +122,8 @@ class lock(object):
|
||||
def release(self):
|
||||
"""release the lock and execute callback function if any
|
||||
|
||||
If the lock have been aquired multiple time, the actual release is
|
||||
delayed to the last relase call."""
|
||||
If the lock have been acquired multiple time, the actual release is
|
||||
delayed to the last release call."""
|
||||
if self.held > 1:
|
||||
self.held -= 1
|
||||
elif self.held == 1:
|
||||
|
@ -151,7 +151,7 @@ def mimetextpatch(s, subtype='plain', display=False):
|
||||
|
||||
def mimetextqp(body, subtype, charset):
|
||||
'''Return MIME message.
|
||||
Qouted-printable transfer encoding will be used if necessary.
|
||||
Quoted-printable transfer encoding will be used if necessary.
|
||||
'''
|
||||
enc = None
|
||||
for line in body.splitlines():
|
||||
|
@ -133,7 +133,7 @@ def splitparagraphs(blocks):
|
||||
def match(lines, i, itemre, singleline):
|
||||
"""Does itemre match an item at line i?
|
||||
|
||||
A list item can be followed by an idented line or another list
|
||||
A list item can be followed by an indented line or another list
|
||||
item (but only if singleline is True).
|
||||
"""
|
||||
line1 = lines[i]
|
||||
|
@ -62,7 +62,7 @@ _enabled = False
|
||||
_pack = struct.pack
|
||||
_unpack = struct.unpack
|
||||
|
||||
# the obsolete feature is not mature enought to be enabled by default.
|
||||
# the obsolete feature is not mature enough to be enabled by default.
|
||||
# you have to rely on third party extension extension to enable this.
|
||||
_enabled = False
|
||||
|
||||
|
@ -195,7 +195,7 @@ class phasecache(object):
|
||||
return self._phaserevs
|
||||
|
||||
def phase(self, repo, rev):
|
||||
# We need a repo argument here to be able to build _phaserev
|
||||
# We need a repo argument here to be able to build _phaserevs
|
||||
# if necessary. The repository instance is not stored in
|
||||
# phasecache to avoid reference cycles. The changelog instance
|
||||
# is not stored because it is a filecache() property and can
|
||||
@ -363,7 +363,7 @@ def newheads(repo, heads, roots):
|
||||
"""compute new head of a subset minus another
|
||||
|
||||
* `heads`: define the first subset
|
||||
* `rroots`: define the second we substract to the first"""
|
||||
* `roots`: define the second we substract to the first"""
|
||||
revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
|
||||
heads, roots, roots, heads)
|
||||
return [c.node() for c in revset]
|
||||
|
@ -57,7 +57,7 @@ _depthbits = 24
|
||||
_depthbytes = _depthbits / 8
|
||||
_vecbytes = _bytes - _depthbytes
|
||||
_vecbits = _vecbytes * 8
|
||||
_radius = (_vecbits - 30) / 2 # high probability vecs are related
|
||||
_radius = (_vecbits - 30) / 2 # high probability vectors are related
|
||||
|
||||
def _bin(bs):
|
||||
'''convert a bytestring to a long'''
|
||||
|
@ -12,7 +12,7 @@ from numbers import Number
|
||||
def bytesformatter(format, args):
|
||||
'''Custom implementation of a formatter for bytestrings.
|
||||
|
||||
This function currently relias on the string formatter to do the
|
||||
This function currently relies on the string formatter to do the
|
||||
formatting and always returns bytes objects.
|
||||
|
||||
>>> bytesformatter(20, 10)
|
||||
|
@ -547,7 +547,7 @@ class revlog(object):
|
||||
# Our topologically sorted list of output nodes.
|
||||
orderedout = []
|
||||
# Don't start at nullid since we don't want nullid in our output list,
|
||||
# and if nullid shows up in descedents, empty parents will look like
|
||||
# and if nullid shows up in descendants, empty parents will look like
|
||||
# they're descendants.
|
||||
for r in xrange(max(lowestrev, 0), highestrev + 1):
|
||||
n = self.node(r)
|
||||
@ -1015,7 +1015,7 @@ class revlog(object):
|
||||
see addrevision for argument descriptions.
|
||||
invariants:
|
||||
- text is optional (can be None); if not set, cachedelta must be set.
|
||||
if both are set, they must correspond to eachother.
|
||||
if both are set, they must correspond to each other.
|
||||
"""
|
||||
btext = [text]
|
||||
def buildtext():
|
||||
|
@ -335,7 +335,7 @@ def bisect(repo, subset, x):
|
||||
Changesets marked in the specified bisect status:
|
||||
|
||||
- ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
|
||||
- ``goods``, ``bads`` : csets topologicaly good/bad
|
||||
- ``goods``, ``bads`` : csets topologically good/bad
|
||||
- ``range`` : csets taking part in the bisection
|
||||
- ``pruned`` : csets that are goods, bads or skipped
|
||||
- ``untested`` : csets whose fate is yet unknown
|
||||
@ -594,7 +594,7 @@ def destination(repo, subset, x):
|
||||
|
||||
# The visited lineage is a match if the current source is in the arg
|
||||
# set. Since every candidate dest is visited by way of iterating
|
||||
# subset, any dests futher back in the lineage will be tested by a
|
||||
# subset, any dests further back in the lineage will be tested by a
|
||||
# different iteration over subset. Likewise, if the src was already
|
||||
# selected, the current lineage can be selected without going back
|
||||
# further.
|
||||
|
@ -109,7 +109,8 @@ def findcommonheads(ui, local, remote,
|
||||
srvheadhashes = srvheadhashesref.value
|
||||
yesno = yesnoref.value
|
||||
else:
|
||||
# compatibitity with pre-batch, but post-known remotes during 1.9 devel
|
||||
# compatibility with pre-batch, but post-known remotes during 1.9
|
||||
# development
|
||||
srvheadhashes = remote.heads()
|
||||
sample = []
|
||||
|
||||
|
@ -888,7 +888,7 @@ class gitsubrepo(abstractsubrepo):
|
||||
def _gitnodir(self, commands, env=None, stream=False, cwd=None):
|
||||
"""Calls the git command
|
||||
|
||||
The methods tries to call the git command. versions previor to 1.6.0
|
||||
The methods tries to call the git command. versions prior to 1.6.0
|
||||
are not supported and very probably fail.
|
||||
"""
|
||||
self._ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
|
||||
|
@ -124,8 +124,8 @@ def _updatetags(filetags, tagtype, alltags, tagtypes):
|
||||
continue
|
||||
|
||||
# we prefer alltags[name] if:
|
||||
# it supercedes us OR
|
||||
# mutual supercedes and it has a higher rank
|
||||
# it supersedes us OR
|
||||
# mutual supersedes and it has a higher rank
|
||||
# otherwise we win because we're tip-most
|
||||
anode, ahist = nodehist
|
||||
bnode, bhist = alltags[name]
|
||||
|
@ -1,4 +1,4 @@
|
||||
# transaction.py - simple journalling scheme for mercurial
|
||||
# transaction.py - simple journaling scheme for mercurial
|
||||
#
|
||||
# This transaction scheme is intended to gracefully handle program
|
||||
# errors and interruptions. More serious failures like system crashes
|
||||
|
@ -713,8 +713,8 @@ class ui(object):
|
||||
With stock hg, this is simply a debug message that is hidden
|
||||
by default, but with extensions or GUI tools it may be
|
||||
visible. 'topic' is the current operation, 'item' is a
|
||||
non-numeric marker of the current position (ie the currently
|
||||
in-process file), 'pos' is the current numeric position (ie
|
||||
non-numeric marker of the current position (i.e. the currently
|
||||
in-process file), 'pos' is the current numeric position (i.e.
|
||||
revision, bytes, etc.), unit is a corresponding unit label,
|
||||
and total is the highest expected pos.
|
||||
|
||||
|
@ -175,7 +175,7 @@ class httpconnection(keepalive.HTTPConnection):
|
||||
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
self.sock.connect((self.host, self.port))
|
||||
if _generic_proxytunnel(self):
|
||||
# we do not support client x509 certificates
|
||||
# we do not support client X.509 certificates
|
||||
self.sock = sslutil.ssl_wrap_socket(self.sock, None, None)
|
||||
else:
|
||||
keepalive.HTTPConnection.connect(self)
|
||||
|
@ -1,4 +1,4 @@
|
||||
# util.py - Mercurial utility functions and platform specfic implementations
|
||||
# util.py - Mercurial utility functions and platform specific implementations
|
||||
#
|
||||
# Copyright 2005 K. Thananchayan <thananck@yahoo.com>
|
||||
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
|
||||
@ -7,7 +7,7 @@
|
||||
# This software may be used and distributed according to the terms of the
|
||||
# GNU General Public License version 2 or any later version.
|
||||
|
||||
"""Mercurial utility functions and platform specfic implementations.
|
||||
"""Mercurial utility functions and platform specific implementations.
|
||||
|
||||
This contains helper routines that are independent of the SCM core and
|
||||
hide platform-specific details from the core.
|
||||
@ -799,7 +799,7 @@ def mktempcopy(name, emptyok=False, createmode=None):
|
||||
return temp
|
||||
|
||||
class atomictempfile(object):
|
||||
'''writeable file object that atomically updates a file
|
||||
'''writable file object that atomically updates a file
|
||||
|
||||
All writes will go to a temporary copy of the original file. Call
|
||||
close() when you are done writing, and atomictempfile will rename
|
||||
@ -1239,7 +1239,7 @@ def MBTextWrapper(**kwargs):
|
||||
so overriding is needed to use width information of each characters.
|
||||
|
||||
In addition, characters classified into 'ambiguous' width are
|
||||
treated as wide in east asian area, but as narrow in other.
|
||||
treated as wide in East Asian area, but as narrow in other.
|
||||
|
||||
This requires use decision to determine width of such characters.
|
||||
"""
|
||||
@ -1300,7 +1300,7 @@ def MBTextWrapper(**kwargs):
|
||||
width = self.width - len(indent)
|
||||
|
||||
# First chunk on line is whitespace -- drop it, unless this
|
||||
# is the very beginning of the text (ie. no lines started yet).
|
||||
# is the very beginning of the text (i.e. no lines started yet).
|
||||
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
|
||||
del chunks[-1]
|
||||
|
||||
|
@ -152,7 +152,7 @@ def samestat(s1, s2):
|
||||
# backslash
|
||||
# (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
|
||||
# So, to quote a string, we must surround it in double quotes, double
|
||||
# the number of backslashes that preceed double quotes and add another
|
||||
# the number of backslashes that precede double quotes and add another
|
||||
# backslash before every double quote (being careful with the double
|
||||
# quote we've appended to the end)
|
||||
_quotere = None
|
||||
|
@ -516,7 +516,7 @@ def stream(repo, proto):
|
||||
it is serving. Client checks to see if it understands the format.
|
||||
|
||||
The format is simple: the server writes out a line with the amount
|
||||
of files, then the total amount of bytes to be transfered (separated
|
||||
of files, then the total amount of bytes to be transferred (separated
|
||||
by a space). Then, for each file, the server first writes the filename
|
||||
and filesize (separated by the null character), then the file contents.
|
||||
'''
|
||||
|
@ -558,7 +558,7 @@ leave existing directory in place after clone failure
|
||||
$ test -d d/.hg
|
||||
[1]
|
||||
|
||||
reenable perm to allow deletion
|
||||
re-enable perm to allow deletion
|
||||
|
||||
$ chmod +rx c/.hg/store/data
|
||||
|
||||
|
@ -60,8 +60,8 @@
|
||||
|
||||
The authormap is a simple text file that maps each source commit author to
|
||||
a destination commit author. It is handy for source SCMs that use unix
|
||||
logins to identify authors (eg: CVS). One line per author mapping and the
|
||||
line format is:
|
||||
logins to identify authors (e.g.: CVS). One line per author mapping and
|
||||
the line format is:
|
||||
|
||||
source author = destination author
|
||||
|
||||
|
@ -727,7 +727,7 @@ Cat and hg cat files before custom expansion
|
||||
ignore $Id$
|
||||
a
|
||||
|
||||
Write custom keyword and prepare multiline commit message
|
||||
Write custom keyword and prepare multi-line commit message
|
||||
|
||||
$ echo '$Xinfo$' >> a
|
||||
$ cat <<EOF >> log
|
||||
@ -745,7 +745,7 @@ Interrupted commit should not change state
|
||||
? c
|
||||
? log
|
||||
|
||||
Commit with multiline message and custom expansion
|
||||
Commit with multi-line message and custom expansion
|
||||
|
||||
$ hg --debug commit -l log -d '2 0' -u 'User Name <user@example.com>'
|
||||
a
|
||||
|
@ -84,7 +84,7 @@
|
||||
|
||||
|
||||
pull did not updated ../alpha state.
|
||||
push from alpha to beta should update phase even if nothing is transfered
|
||||
push from alpha to beta should update phase even if nothing is transferred
|
||||
|
||||
$ cd ../alpha
|
||||
$ hgph # not updated by remote pull
|
||||
|
Loading…
Reference in New Issue
Block a user