2011-09-24 19:35:45 +04:00
|
|
|
# Copyright 2009-2010 Gregory P. Ward
|
|
|
|
# Copyright 2009-2010 Intelerad Medical Systems Incorporated
|
|
|
|
# Copyright 2010-2011 Fog Creek Software
|
|
|
|
# Copyright 2010-2011 Unity Technologies
|
|
|
|
#
|
|
|
|
# This software may be used and distributed according to the terms of the
|
|
|
|
# GNU General Public License version 2 or any later version.
|
|
|
|
|
|
|
|
'''largefiles utility code: must not import other modules in this package.'''
|
|
|
|
|
|
|
|
import os
|
|
|
|
import errno
|
2011-10-21 01:05:13 +04:00
|
|
|
import platform
|
2011-09-24 19:35:45 +04:00
|
|
|
import shutil
|
|
|
|
import stat
|
|
|
|
|
2011-10-11 16:01:24 +04:00
|
|
|
from mercurial import dirstate, httpconnection, match as match_, util, scmutil
|
2011-09-24 19:35:45 +04:00
|
|
|
from mercurial.i18n import _
|
|
|
|
|
|
|
|
shortname = '.hglf'
|
2012-12-13 22:19:06 +04:00
|
|
|
shortnameslash = shortname + '/'
|
2011-09-24 19:35:45 +04:00
|
|
|
longname = 'largefiles'
|
|
|
|
|
|
|
|
|
|
|
|
# -- Portability wrappers ----------------------------------------------
|
|
|
|
|
2012-03-09 19:11:52 +04:00
|
|
|
def dirstatewalk(dirstate, matcher, unknown=False, ignored=False):
|
2011-10-11 12:42:56 +04:00
|
|
|
return dirstate.walk(matcher, [], unknown, ignored)
|
2011-09-24 19:35:45 +04:00
|
|
|
|
2012-03-09 19:11:52 +04:00
|
|
|
def repoadd(repo, list):
|
2011-10-11 12:42:56 +04:00
|
|
|
add = repo[None].add
|
2011-09-24 19:35:45 +04:00
|
|
|
return add(list)
|
|
|
|
|
2012-03-09 19:11:52 +04:00
|
|
|
def repoforget(repo, list):
|
2011-10-11 12:42:56 +04:00
|
|
|
forget = repo[None].forget
|
2011-09-24 19:35:45 +04:00
|
|
|
return forget(list)
|
|
|
|
|
|
|
|
# -- Private worker functions ------------------------------------------
|
|
|
|
|
2011-10-12 05:11:01 +04:00
|
|
|
def getminsize(ui, assumelfiles, opt, default=10):
|
|
|
|
lfsize = opt
|
|
|
|
if not lfsize and assumelfiles:
|
2011-10-19 04:06:23 +04:00
|
|
|
lfsize = ui.config(longname, 'minsize', default=default)
|
2011-10-12 05:11:01 +04:00
|
|
|
if lfsize:
|
|
|
|
try:
|
2011-10-12 05:07:08 +04:00
|
|
|
lfsize = float(lfsize)
|
2011-10-12 05:11:01 +04:00
|
|
|
except ValueError:
|
2011-10-12 05:07:08 +04:00
|
|
|
raise util.Abort(_('largefiles: size must be number (not %s)\n')
|
2011-10-12 05:11:01 +04:00
|
|
|
% lfsize)
|
|
|
|
if lfsize is None:
|
|
|
|
raise util.Abort(_('minimum size for largefiles must be specified'))
|
|
|
|
return lfsize
|
|
|
|
|
2011-09-24 19:35:45 +04:00
|
|
|
def link(src, dest):
|
|
|
|
try:
|
2011-10-06 13:10:06 +04:00
|
|
|
util.oslink(src, dest)
|
2011-09-24 19:35:45 +04:00
|
|
|
except OSError:
|
2011-11-24 21:13:18 +04:00
|
|
|
# if hardlinks fail, fallback on atomic copy
|
|
|
|
dst = util.atomictempfile(dest)
|
2011-12-20 21:43:38 +04:00
|
|
|
for chunk in util.filechunkiter(open(src, 'rb')):
|
2011-11-24 21:13:18 +04:00
|
|
|
dst.write(chunk)
|
|
|
|
dst.close()
|
2011-09-24 19:35:45 +04:00
|
|
|
os.chmod(dest, os.stat(src).st_mode)
|
|
|
|
|
2011-10-20 21:24:09 +04:00
|
|
|
def usercachepath(ui, hash):
|
2011-10-23 22:25:48 +04:00
|
|
|
path = ui.configpath(longname, 'usercache', None)
|
2011-09-24 19:35:45 +04:00
|
|
|
if path:
|
|
|
|
path = os.path.join(path, hash)
|
|
|
|
else:
|
|
|
|
if os.name == 'nt':
|
2011-10-14 05:42:54 +04:00
|
|
|
appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
|
2011-12-15 22:19:43 +04:00
|
|
|
if appdata:
|
|
|
|
path = os.path.join(appdata, longname, hash)
|
2011-10-21 01:05:13 +04:00
|
|
|
elif platform.system() == 'Darwin':
|
2011-12-15 22:19:43 +04:00
|
|
|
home = os.getenv('HOME')
|
|
|
|
if home:
|
|
|
|
path = os.path.join(home, 'Library', 'Caches',
|
|
|
|
longname, hash)
|
2011-09-24 19:35:45 +04:00
|
|
|
elif os.name == 'posix':
|
2011-10-21 01:05:13 +04:00
|
|
|
path = os.getenv('XDG_CACHE_HOME')
|
|
|
|
if path:
|
|
|
|
path = os.path.join(path, longname, hash)
|
|
|
|
else:
|
2011-12-15 22:19:43 +04:00
|
|
|
home = os.getenv('HOME')
|
|
|
|
if home:
|
|
|
|
path = os.path.join(home, '.cache', longname, hash)
|
2011-09-24 19:35:45 +04:00
|
|
|
else:
|
2011-10-14 04:24:29 +04:00
|
|
|
raise util.Abort(_('unknown operating system: %s\n') % os.name)
|
2011-09-24 19:35:45 +04:00
|
|
|
return path
|
|
|
|
|
2011-10-20 21:24:09 +04:00
|
|
|
def inusercache(ui, hash):
|
2011-12-15 22:19:43 +04:00
|
|
|
path = usercachepath(ui, hash)
|
|
|
|
return path and os.path.exists(path)
|
2011-09-24 19:35:45 +04:00
|
|
|
|
|
|
|
def findfile(repo, hash):
|
2011-10-20 21:24:09 +04:00
|
|
|
if instore(repo, hash):
|
2012-06-12 16:18:18 +04:00
|
|
|
repo.ui.note(_('found %s in store\n') % hash)
|
2012-01-18 16:41:03 +04:00
|
|
|
return storepath(repo, hash)
|
2011-10-20 21:24:11 +04:00
|
|
|
elif inusercache(repo.ui, hash):
|
2012-06-12 16:18:18 +04:00
|
|
|
repo.ui.note(_('found %s in system cache\n') % hash)
|
2011-11-03 18:59:32 +04:00
|
|
|
path = storepath(repo, hash)
|
|
|
|
util.makedirs(os.path.dirname(path))
|
|
|
|
link(usercachepath(repo.ui, hash), path)
|
2012-01-18 16:41:03 +04:00
|
|
|
return path
|
|
|
|
return None
|
2011-09-24 19:35:45 +04:00
|
|
|
|
2012-03-09 19:11:52 +04:00
|
|
|
class largefilesdirstate(dirstate.dirstate):
|
2011-09-24 19:35:45 +04:00
|
|
|
def __getitem__(self, key):
|
2012-03-09 19:11:52 +04:00
|
|
|
return super(largefilesdirstate, self).__getitem__(unixpath(key))
|
2011-09-24 19:35:45 +04:00
|
|
|
def normal(self, f):
|
2012-03-09 19:11:52 +04:00
|
|
|
return super(largefilesdirstate, self).normal(unixpath(f))
|
2011-09-24 19:35:45 +04:00
|
|
|
def remove(self, f):
|
2012-03-09 19:11:52 +04:00
|
|
|
return super(largefilesdirstate, self).remove(unixpath(f))
|
2011-09-24 19:35:45 +04:00
|
|
|
def add(self, f):
|
2012-03-09 19:11:52 +04:00
|
|
|
return super(largefilesdirstate, self).add(unixpath(f))
|
2011-09-24 19:35:45 +04:00
|
|
|
def drop(self, f):
|
2012-03-09 19:11:52 +04:00
|
|
|
return super(largefilesdirstate, self).drop(unixpath(f))
|
2011-09-24 19:35:45 +04:00
|
|
|
def forget(self, f):
|
2012-03-09 19:11:52 +04:00
|
|
|
return super(largefilesdirstate, self).forget(unixpath(f))
|
2012-01-07 21:43:34 +04:00
|
|
|
def normallookup(self, f):
|
2012-03-09 19:11:52 +04:00
|
|
|
return super(largefilesdirstate, self).normallookup(unixpath(f))
|
2012-12-13 22:19:06 +04:00
|
|
|
def _ignore(self):
|
|
|
|
return False
|
2011-09-24 19:35:45 +04:00
|
|
|
|
largefiles: enable islfilesrepo() prior to a commit (issue3541)
Previously, even if a file was added with --large, 'hg addremove' or 'hg ci -A'
would add all files (including the previously added large files) as normal
files. Only after a commit where a file was added with --large would subsequent
adds or 'ci -A' take into account the minsize or the pattern configuration.
This change more closely follows the help for largefiles, which mentions that
'add --large' is required to enable the configuration, but doesn't mention the
previously required commit.
Also, if 'hg add --large' was performed and then 'hg forget <file>' (both before
a largefile enabling commit), the forget command would error out saying
'.hglf/<file> not tracked'. This is also fixed.
This reports that a repo is largefiles enabled as soon as a file is added with
--large, which enables 'add', 'addremove' and 'ci -A' to honor the config
settings before the first commit. Note that prior to the next commit, if all
largefiles are forgotten, the repository goes back to reporting the repo as not
largefiles enabled.
It makes no sense to handle this by adding a --large option to 'addremove',
because then it would also be needed for 'commit', but only when '-A' is
specified. While this gets around the awkwardness of having to add a largefile,
then commit it, and then addremove the other files when importing an existing
codebase (and preserving that extra commit in permanent history), it does still
require finding and manually adding one of the files as --large. Therefore it
is probably desirable to have a --large option for init as well.
2012-07-31 04:56:41 +04:00
|
|
|
def openlfdirstate(ui, repo, create=True):
|
2011-09-24 19:35:45 +04:00
|
|
|
'''
|
2011-10-13 04:59:27 +04:00
|
|
|
Return a dirstate object that tracks largefiles: i.e. its root is
|
|
|
|
the repo root, but it is saved in .hg/largefiles/dirstate.
|
2011-09-24 19:35:45 +04:00
|
|
|
'''
|
2012-12-13 22:19:06 +04:00
|
|
|
lfstoredir = repo.join(longname)
|
|
|
|
opener = scmutil.opener(lfstoredir)
|
2012-03-09 19:11:52 +04:00
|
|
|
lfdirstate = largefilesdirstate(opener, ui, repo.root,
|
2011-10-23 18:23:31 +04:00
|
|
|
repo.dirstate._validate)
|
2011-09-24 19:35:45 +04:00
|
|
|
|
2011-10-13 04:59:27 +04:00
|
|
|
# If the largefiles dirstate does not exist, populate and create
|
|
|
|
# it. This ensures that we create it on the first meaningful
|
2012-01-07 22:05:59 +04:00
|
|
|
# largefiles operation in a new clone.
|
2012-12-13 22:19:06 +04:00
|
|
|
if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
|
|
|
|
util.makedirs(lfstoredir)
|
2011-09-24 19:35:45 +04:00
|
|
|
matcher = getstandinmatcher(repo)
|
2012-03-09 19:11:52 +04:00
|
|
|
for standin in dirstatewalk(repo.dirstate, matcher):
|
2011-09-24 19:35:45 +04:00
|
|
|
lfile = splitstandin(standin)
|
|
|
|
hash = readstandin(repo, lfile)
|
|
|
|
lfdirstate.normallookup(lfile)
|
|
|
|
try:
|
2011-11-22 22:52:23 +04:00
|
|
|
if hash == hashfile(repo.wjoin(lfile)):
|
2011-09-24 19:35:45 +04:00
|
|
|
lfdirstate.normal(lfile)
|
2011-11-22 20:51:43 +04:00
|
|
|
except OSError, err:
|
2011-09-24 19:35:45 +04:00
|
|
|
if err.errno != errno.ENOENT:
|
|
|
|
raise
|
|
|
|
return lfdirstate
|
|
|
|
|
2012-03-09 19:11:52 +04:00
|
|
|
def lfdirstatestatus(lfdirstate, repo, rev):
|
2012-01-07 22:05:59 +04:00
|
|
|
match = match_.always(repo.root, repo.getcwd())
|
|
|
|
s = lfdirstate.status(match, [], False, False, False)
|
|
|
|
unsure, modified, added, removed, missing, unknown, ignored, clean = s
|
|
|
|
for lfile in unsure:
|
|
|
|
if repo[rev][standin(lfile)].data().strip() != \
|
|
|
|
hashfile(repo.wjoin(lfile)):
|
|
|
|
modified.append(lfile)
|
|
|
|
else:
|
|
|
|
clean.append(lfile)
|
|
|
|
lfdirstate.normal(lfile)
|
2011-09-24 19:35:45 +04:00
|
|
|
return (modified, added, removed, missing, unknown, ignored, clean)
|
|
|
|
|
|
|
|
def listlfiles(repo, rev=None, matcher=None):
|
2011-10-13 04:59:27 +04:00
|
|
|
'''return a list of largefiles in the working copy or the
|
|
|
|
specified changeset'''
|
2011-09-24 19:35:45 +04:00
|
|
|
|
|
|
|
if matcher is None:
|
|
|
|
matcher = getstandinmatcher(repo)
|
|
|
|
|
|
|
|
# ignore unknown files in working directory
|
2011-10-14 05:42:54 +04:00
|
|
|
return [splitstandin(f)
|
|
|
|
for f in repo[rev].walk(matcher)
|
2011-09-24 19:35:45 +04:00
|
|
|
if rev is not None or repo.dirstate[f] != '?']
|
|
|
|
|
2011-10-20 21:24:09 +04:00
|
|
|
def instore(repo, hash):
|
|
|
|
return os.path.exists(storepath(repo, hash))
|
2011-09-24 19:35:45 +04:00
|
|
|
|
2011-10-20 21:24:09 +04:00
|
|
|
def storepath(repo, hash):
|
2011-09-24 19:35:45 +04:00
|
|
|
return repo.join(os.path.join(longname, hash))
|
|
|
|
|
|
|
|
def copyfromcache(repo, hash, filename):
|
2011-10-13 04:59:27 +04:00
|
|
|
'''Copy the specified largefile from the repo or system cache to
|
|
|
|
filename in the repository. Return true on success or false if the
|
|
|
|
file was not found in either cache (which should not happened:
|
|
|
|
this is meant to be called only after ensuring that the needed
|
|
|
|
largefile exists in the cache).'''
|
2011-09-24 19:35:45 +04:00
|
|
|
path = findfile(repo, hash)
|
|
|
|
if path is None:
|
|
|
|
return False
|
|
|
|
util.makedirs(os.path.dirname(repo.wjoin(filename)))
|
2011-11-24 21:11:43 +04:00
|
|
|
# The write may fail before the file is fully written, but we
|
|
|
|
# don't use atomic writes in the working copy.
|
2011-09-24 19:35:45 +04:00
|
|
|
shutil.copy(path, repo.wjoin(filename))
|
|
|
|
return True
|
|
|
|
|
2011-10-20 21:24:09 +04:00
|
|
|
def copytostore(repo, rev, file, uploaded=False):
|
2012-10-24 05:07:14 +04:00
|
|
|
hash = readstandin(repo, file, rev)
|
2011-10-20 21:24:09 +04:00
|
|
|
if instore(repo, hash):
|
2011-09-24 19:35:45 +04:00
|
|
|
return
|
2011-10-20 21:24:09 +04:00
|
|
|
copytostoreabsolute(repo, repo.wjoin(file), hash)
|
2011-09-24 19:35:45 +04:00
|
|
|
|
2012-01-08 17:33:10 +04:00
|
|
|
def copyalltostore(repo, node):
|
|
|
|
'''Copy all largefiles in a given revision to the store'''
|
|
|
|
|
|
|
|
ctx = repo[node]
|
|
|
|
for filename in ctx.files():
|
|
|
|
if isstandin(filename) and filename in ctx.manifest():
|
|
|
|
realfile = splitstandin(filename)
|
|
|
|
copytostore(repo, ctx.node(), realfile)
|
|
|
|
|
|
|
|
|
2011-10-20 21:24:09 +04:00
|
|
|
def copytostoreabsolute(repo, file, hash):
|
2011-10-25 19:45:28 +04:00
|
|
|
util.makedirs(os.path.dirname(storepath(repo, hash)))
|
2011-10-20 21:24:09 +04:00
|
|
|
if inusercache(repo.ui, hash):
|
|
|
|
link(usercachepath(repo.ui, hash), storepath(repo, hash))
|
largefiles: don't copy largefiles from working dir to the store while converting
Previously, if one or more largefiles for a repo being converted were not in the
usercache, the convert would abort with a reference to the largefile being
missing (as opposed to the previous patch, where the standin was referenced as
missing). This is because commitctx() tries to copy all largefiles to the
local store, first from the user cache, and if the file isn't found there, from
the working directory. No files will exist in the working directory during a
convert, however. It is not sufficient to force the source repo to be local
before proceeding, because clone and pull do not download largefiles by default.
This is slightly less than ideal because while the conversion will now complete,
it won't be possible to update to revs with missing largefiles unless the user
intervenes manually, because there is no default path pointing back to the
source repo. Ideally these files would be cached during the conversion.
This check could have been done in reposetup.commitctx() instead, but this
ensures the local store directory is created, which is necessary to enable the
standin matcher.
The rm -> 'rm -f' change in the test is to temporarily suppress an error
clearing the cache- as noted, the cache is is not repopulated during convert.
When that is fixed, this can be changed back and the verification errors will
disappear too.
2012-10-24 05:32:19 +04:00
|
|
|
elif not getattr(repo, "_isconverting", False):
|
2012-02-23 16:22:55 +04:00
|
|
|
dst = util.atomictempfile(storepath(repo, hash),
|
|
|
|
createmode=repo.store.createmode)
|
2011-12-20 21:43:38 +04:00
|
|
|
for chunk in util.filechunkiter(open(file, 'rb')):
|
2011-11-24 21:12:13 +04:00
|
|
|
dst.write(chunk)
|
|
|
|
dst.close()
|
2011-10-20 21:24:09 +04:00
|
|
|
linktousercache(repo, hash)
|
2011-09-24 19:35:45 +04:00
|
|
|
|
2011-10-20 21:24:09 +04:00
|
|
|
def linktousercache(repo, hash):
|
2011-12-15 22:19:43 +04:00
|
|
|
path = usercachepath(repo.ui, hash)
|
|
|
|
if path:
|
|
|
|
util.makedirs(os.path.dirname(path))
|
|
|
|
link(storepath(repo, hash), path)
|
2011-09-24 19:35:45 +04:00
|
|
|
|
|
|
|
def getstandinmatcher(repo, pats=[], opts={}):
|
|
|
|
'''Return a match object that applies pats to the standin directory'''
|
2012-12-13 22:19:06 +04:00
|
|
|
standindir = repo.wjoin(shortname)
|
2011-09-24 19:35:45 +04:00
|
|
|
if pats:
|
|
|
|
# patterns supplied: search standin directory relative to current dir
|
|
|
|
cwd = repo.getcwd()
|
|
|
|
if os.path.isabs(cwd):
|
|
|
|
# cwd is an absolute path for hg -R <reponame>
|
|
|
|
# work relative to the repository root in this case
|
|
|
|
cwd = ''
|
|
|
|
pats = [os.path.join(standindir, cwd, pat) for pat in pats]
|
|
|
|
elif os.path.isdir(standindir):
|
|
|
|
# no patterns: relative to repo root
|
|
|
|
pats = [standindir]
|
|
|
|
else:
|
|
|
|
# no patterns and no standin dir: return matcher that matches nothing
|
2012-12-13 22:19:06 +04:00
|
|
|
return match_.match(repo.root, None, [], exact=True)
|
2011-09-24 19:35:45 +04:00
|
|
|
|
2012-12-13 22:19:06 +04:00
|
|
|
# no warnings about missing files or directories
|
|
|
|
match = scmutil.match(repo[None], pats, opts)
|
|
|
|
match.bad = lambda f, msg: None
|
2011-09-24 19:35:45 +04:00
|
|
|
return match
|
|
|
|
|
|
|
|
def composestandinmatcher(repo, rmatcher):
|
2011-10-13 04:59:27 +04:00
|
|
|
'''Return a matcher that accepts standins corresponding to the
|
|
|
|
files accepted by rmatcher. Pass the list of files in the matcher
|
|
|
|
as the paths specified by the user.'''
|
2011-09-24 19:35:45 +04:00
|
|
|
smatcher = getstandinmatcher(repo, rmatcher.files())
|
|
|
|
isstandin = smatcher.matchfn
|
2012-03-09 19:11:52 +04:00
|
|
|
def composedmatchfn(f):
|
2011-09-24 19:35:45 +04:00
|
|
|
return isstandin(f) and rmatcher.matchfn(splitstandin(f))
|
2012-03-09 19:11:52 +04:00
|
|
|
smatcher.matchfn = composedmatchfn
|
2011-09-24 19:35:45 +04:00
|
|
|
|
|
|
|
return smatcher
|
|
|
|
|
|
|
|
def standin(filename):
|
|
|
|
'''Return the repo-relative path to the standin for the specified big
|
|
|
|
file.'''
|
|
|
|
# Notes:
|
2012-08-16 00:39:18 +04:00
|
|
|
# 1) Some callers want an absolute path, but for instance addlargefiles
|
|
|
|
# needs it repo-relative so it can be passed to repoadd(). So leave
|
2011-09-24 19:35:45 +04:00
|
|
|
# it up to the caller to use repo.wjoin() to get an absolute path.
|
|
|
|
# 2) Join with '/' because that's what dirstate always uses, even on
|
|
|
|
# Windows. Change existing separator to '/' first in case we are
|
|
|
|
# passed filenames from an external source (like the command line).
|
2012-12-13 22:19:06 +04:00
|
|
|
return shortnameslash + util.pconvert(filename)
|
2011-09-24 19:35:45 +04:00
|
|
|
|
|
|
|
def isstandin(filename):
|
2011-10-13 04:59:27 +04:00
|
|
|
'''Return true if filename is a big file standin. filename must be
|
|
|
|
in Mercurial's internal form (slash-separated).'''
|
2012-12-13 22:19:06 +04:00
|
|
|
return filename.startswith(shortnameslash)
|
2011-09-24 19:35:45 +04:00
|
|
|
|
|
|
|
def splitstandin(filename):
|
|
|
|
# Split on / because that's what dirstate always uses, even on Windows.
|
|
|
|
# Change local separator to / first just in case we are passed filenames
|
|
|
|
# from an external source (like the command line).
|
2012-02-05 17:58:31 +04:00
|
|
|
bits = util.pconvert(filename).split('/', 1)
|
2011-09-24 19:35:45 +04:00
|
|
|
if len(bits) == 2 and bits[0] == shortname:
|
|
|
|
return bits[1]
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
def updatestandin(repo, standin):
|
|
|
|
file = repo.wjoin(splitstandin(standin))
|
|
|
|
if os.path.exists(file):
|
|
|
|
hash = hashfile(file)
|
|
|
|
executable = getexecutable(file)
|
|
|
|
writestandin(repo, standin, hash, executable)
|
|
|
|
|
|
|
|
def readstandin(repo, filename, node=None):
|
|
|
|
'''read hex hash from standin for filename at given node, or working
|
|
|
|
directory if no node is given'''
|
|
|
|
return repo[node][standin(filename)].data().strip()
|
|
|
|
|
|
|
|
def writestandin(repo, standin, hash, executable):
|
2011-10-13 04:59:27 +04:00
|
|
|
'''write hash to <repo.root>/<standin>'''
|
2011-09-24 19:35:45 +04:00
|
|
|
writehash(hash, repo.wjoin(standin), executable)
|
|
|
|
|
|
|
|
def copyandhash(instream, outfile):
|
|
|
|
'''Read bytes from instream (iterable) and write them to outfile,
|
|
|
|
computing the SHA-1 hash of the data along the way. Close outfile
|
|
|
|
when done and return the binary hash.'''
|
|
|
|
hasher = util.sha1('')
|
|
|
|
for data in instream:
|
|
|
|
hasher.update(data)
|
|
|
|
outfile.write(data)
|
|
|
|
|
|
|
|
# Blecch: closing a file that somebody else opened is rude and
|
2011-10-13 04:59:27 +04:00
|
|
|
# wrong. But it's so darn convenient and practical! After all,
|
2011-09-24 19:35:45 +04:00
|
|
|
# outfile was opened just to copy and hash.
|
|
|
|
outfile.close()
|
|
|
|
|
|
|
|
return hasher.digest()
|
|
|
|
|
|
|
|
def hashrepofile(repo, file):
|
|
|
|
return hashfile(repo.wjoin(file))
|
|
|
|
|
|
|
|
def hashfile(file):
|
|
|
|
if not os.path.exists(file):
|
|
|
|
return ''
|
|
|
|
hasher = util.sha1('')
|
|
|
|
fd = open(file, 'rb')
|
|
|
|
for data in blockstream(fd):
|
|
|
|
hasher.update(data)
|
|
|
|
fd.close()
|
|
|
|
return hasher.hexdigest()
|
|
|
|
|
|
|
|
class limitreader(object):
|
|
|
|
def __init__(self, f, limit):
|
|
|
|
self.f = f
|
|
|
|
self.limit = limit
|
|
|
|
|
|
|
|
def read(self, length):
|
|
|
|
if self.limit == 0:
|
|
|
|
return ''
|
|
|
|
length = length > self.limit and self.limit or length
|
|
|
|
self.limit -= length
|
|
|
|
return self.f.read(length)
|
|
|
|
|
|
|
|
def close(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def blockstream(infile, blocksize=128 * 1024):
|
|
|
|
"""Generator that yields blocks of data from infile and closes infile."""
|
|
|
|
while True:
|
|
|
|
data = infile.read(blocksize)
|
|
|
|
if not data:
|
|
|
|
break
|
|
|
|
yield data
|
2011-10-13 04:59:27 +04:00
|
|
|
# same blecch as copyandhash() above
|
2011-09-24 19:35:45 +04:00
|
|
|
infile.close()
|
|
|
|
|
|
|
|
def writehash(hash, filename, executable):
|
|
|
|
util.makedirs(os.path.dirname(filename))
|
2011-11-24 21:22:45 +04:00
|
|
|
util.writefile(filename, hash + '\n')
|
|
|
|
os.chmod(filename, getmode(executable))
|
2011-09-24 19:35:45 +04:00
|
|
|
|
|
|
|
def getexecutable(filename):
|
|
|
|
mode = os.stat(filename).st_mode
|
2011-10-14 05:42:54 +04:00
|
|
|
return ((mode & stat.S_IXUSR) and
|
|
|
|
(mode & stat.S_IXGRP) and
|
|
|
|
(mode & stat.S_IXOTH))
|
2011-09-24 19:35:45 +04:00
|
|
|
|
|
|
|
def getmode(executable):
|
|
|
|
if executable:
|
|
|
|
return 0755
|
|
|
|
else:
|
|
|
|
return 0644
|
|
|
|
|
|
|
|
def urljoin(first, second, *arg):
|
|
|
|
def join(left, right):
|
|
|
|
if not left.endswith('/'):
|
|
|
|
left += '/'
|
|
|
|
if right.startswith('/'):
|
|
|
|
right = right[1:]
|
|
|
|
return left + right
|
|
|
|
|
|
|
|
url = join(first, second)
|
|
|
|
for a in arg:
|
|
|
|
url = join(url, a)
|
|
|
|
return url
|
|
|
|
|
|
|
|
def hexsha1(data):
|
|
|
|
"""hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
|
|
|
|
object data"""
|
2011-10-23 23:59:15 +04:00
|
|
|
h = util.sha1()
|
2011-09-24 19:35:45 +04:00
|
|
|
for chunk in util.filechunkiter(data):
|
|
|
|
h.update(chunk)
|
|
|
|
return h.hexdigest()
|
|
|
|
|
|
|
|
def httpsendfile(ui, filename):
|
2011-10-11 12:42:56 +04:00
|
|
|
return httpconnection.httpsendfile(ui, filename, 'rb')
|
2011-09-24 19:35:45 +04:00
|
|
|
|
|
|
|
def unixpath(path):
|
2011-10-13 04:59:27 +04:00
|
|
|
'''Return a version of path normalized for use with the lfdirstate.'''
|
2012-02-05 17:58:31 +04:00
|
|
|
return util.pconvert(os.path.normpath(path))
|
2011-09-24 19:35:45 +04:00
|
|
|
|
|
|
|
def islfilesrepo(repo):
|
largefiles: enable islfilesrepo() prior to a commit (issue3541)
Previously, even if a file was added with --large, 'hg addremove' or 'hg ci -A'
would add all files (including the previously added large files) as normal
files. Only after a commit where a file was added with --large would subsequent
adds or 'ci -A' take into account the minsize or the pattern configuration.
This change more closely follows the help for largefiles, which mentions that
'add --large' is required to enable the configuration, but doesn't mention the
previously required commit.
Also, if 'hg add --large' was performed and then 'hg forget <file>' (both before
a largefile enabling commit), the forget command would error out saying
'.hglf/<file> not tracked'. This is also fixed.
This reports that a repo is largefiles enabled as soon as a file is added with
--large, which enables 'add', 'addremove' and 'ci -A' to honor the config
settings before the first commit. Note that prior to the next commit, if all
largefiles are forgotten, the repository goes back to reporting the repo as not
largefiles enabled.
It makes no sense to handle this by adding a --large option to 'addremove',
because then it would also be needed for 'commit', but only when '-A' is
specified. While this gets around the awkwardness of having to add a largefile,
then commit it, and then addremove the other files when importing an existing
codebase (and preserving that extra commit in permanent history), it does still
require finding and manually adding one of the files as --large. Therefore it
is probably desirable to have a --large option for init as well.
2012-07-31 04:56:41 +04:00
|
|
|
if ('largefiles' in repo.requirements and
|
2012-12-13 22:19:06 +04:00
|
|
|
util.any(shortnameslash in f[0] for f in repo.store.datafiles())):
|
largefiles: enable islfilesrepo() prior to a commit (issue3541)
Previously, even if a file was added with --large, 'hg addremove' or 'hg ci -A'
would add all files (including the previously added large files) as normal
files. Only after a commit where a file was added with --large would subsequent
adds or 'ci -A' take into account the minsize or the pattern configuration.
This change more closely follows the help for largefiles, which mentions that
'add --large' is required to enable the configuration, but doesn't mention the
previously required commit.
Also, if 'hg add --large' was performed and then 'hg forget <file>' (both before
a largefile enabling commit), the forget command would error out saying
'.hglf/<file> not tracked'. This is also fixed.
This reports that a repo is largefiles enabled as soon as a file is added with
--large, which enables 'add', 'addremove' and 'ci -A' to honor the config
settings before the first commit. Note that prior to the next commit, if all
largefiles are forgotten, the repository goes back to reporting the repo as not
largefiles enabled.
It makes no sense to handle this by adding a --large option to 'addremove',
because then it would also be needed for 'commit', but only when '-A' is
specified. While this gets around the awkwardness of having to add a largefile,
then commit it, and then addremove the other files when importing an existing
codebase (and preserving that extra commit in permanent history), it does still
require finding and manually adding one of the files as --large. Therefore it
is probably desirable to have a --large option for init as well.
2012-07-31 04:56:41 +04:00
|
|
|
return True
|
|
|
|
|
|
|
|
return util.any(openlfdirstate(repo.ui, repo, False))
|
2011-09-24 19:35:45 +04:00
|
|
|
|
2011-10-22 01:52:16 +04:00
|
|
|
class storeprotonotcapable(Exception):
|
2011-09-24 19:35:45 +04:00
|
|
|
def __init__(self, storetypes):
|
|
|
|
self.storetypes = storetypes
|
2012-02-10 17:46:09 +04:00
|
|
|
|
|
|
|
def getcurrentheads(repo):
|
|
|
|
branches = repo.branchmap()
|
|
|
|
heads = []
|
|
|
|
for branch in branches:
|
|
|
|
newheads = repo.branchheads(branch)
|
|
|
|
heads = heads + newheads
|
|
|
|
return heads
|
largefiles: optimize update speed by only updating changed largefiles
Historically, during 'hg update', every largefile in the working copy was
hashed (which is a very expensive operation on big files) and any
largefiles that did not have a hash that matched their standin were
updated.
This patch optimizes 'hg update' by keeping track of what standins have
changed between the old and new revisions, and only updating the largefiles
that have changed. This saves a lot of time by avoiding the unecessary
calculation of a list of sha1 hashes for big files.
With this patch, the time 'hg update' takes to complete is a function of
how many largefiles need to be updated and what their size is.
Performance tests on a repository with about 80 largefiles ranging from
a few MB to about 97 MB are shown below. The tests show how long it takes
to run 'hg update' with no changes actually being updated.
Mercurial 2.1 release:
$ time hg update
0 files updated, 0 files merged, 0 files removed, 0 files unresolved
getting changed largefiles
0 largefiles updated, 0 removed
real 0m10.045s
user 0m9.367s
sys 0m0.674s
With this patch:
$ time hg update
0 files updated, 0 files merged, 0 files removed, 0 files unresolved
real 0m0.965s
user 0m0.845s
sys 0m0.115s
The same repsoitory, without the largefiles extension enabled:
$ time hg update
0 files updated, 0 files merged, 0 files removed, 0 files unresolved
real 0m0.799s
user 0m0.684s
sys 0m0.111s
So before the patch, 'hg update' with no changes was approximately 9.25s
slower with largefiles enabled. With this patch, it is approximately 0.165s
slower.
2012-02-13 21:37:07 +04:00
|
|
|
|
|
|
|
def getstandinsstate(repo):
|
|
|
|
standins = []
|
|
|
|
matcher = getstandinmatcher(repo)
|
2012-03-09 19:11:52 +04:00
|
|
|
for standin in dirstatewalk(repo.dirstate, matcher):
|
largefiles: optimize update speed by only updating changed largefiles
Historically, during 'hg update', every largefile in the working copy was
hashed (which is a very expensive operation on big files) and any
largefiles that did not have a hash that matched their standin were
updated.
This patch optimizes 'hg update' by keeping track of what standins have
changed between the old and new revisions, and only updating the largefiles
that have changed. This saves a lot of time by avoiding the unecessary
calculation of a list of sha1 hashes for big files.
With this patch, the time 'hg update' takes to complete is a function of
how many largefiles need to be updated and what their size is.
Performance tests on a repository with about 80 largefiles ranging from
a few MB to about 97 MB are shown below. The tests show how long it takes
to run 'hg update' with no changes actually being updated.
Mercurial 2.1 release:
$ time hg update
0 files updated, 0 files merged, 0 files removed, 0 files unresolved
getting changed largefiles
0 largefiles updated, 0 removed
real 0m10.045s
user 0m9.367s
sys 0m0.674s
With this patch:
$ time hg update
0 files updated, 0 files merged, 0 files removed, 0 files unresolved
real 0m0.965s
user 0m0.845s
sys 0m0.115s
The same repsoitory, without the largefiles extension enabled:
$ time hg update
0 files updated, 0 files merged, 0 files removed, 0 files unresolved
real 0m0.799s
user 0m0.684s
sys 0m0.111s
So before the patch, 'hg update' with no changes was approximately 9.25s
slower with largefiles enabled. With this patch, it is approximately 0.165s
slower.
2012-02-13 21:37:07 +04:00
|
|
|
lfile = splitstandin(standin)
|
|
|
|
standins.append((lfile, readstandin(repo, lfile)))
|
|
|
|
return standins
|
2012-03-09 19:45:49 +04:00
|
|
|
|
|
|
|
def getlfilestoupdate(oldstandins, newstandins):
|
|
|
|
changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
|
|
|
|
filelist = []
|
|
|
|
for f in changedstandins:
|
|
|
|
if f[0] not in filelist:
|
|
|
|
filelist.append(f[0])
|
|
|
|
return filelist
|