lfs: split prepush hook into individual functions

Summary:
This makes it possible to reuse part of them - like uploading blobs for
given revisions without going through prepush hook.

`pointer.tostoreids()` was changed to `pointer.tostoreid()` to simplify
things a bit.

Unnecessary remoterepo assignment was removed.

Test Plan: `arc unit`

Reviewers: #mercurial, davidsp, rmcelroy

Reviewed By: rmcelroy

Subscribers: rmcelroy, mjpieters

Differential Revision: https://phabricator.intern.facebook.com/D5009560

Signature: t1:5009560:1494230285:6469a2701baa8cfa4511a08149a37fc429733343
This commit is contained in:
Jun Wu 2017-05-08 11:20:50 -07:00
parent 67f97aa1d3
commit 4532efb04d
2 changed files with 27 additions and 35 deletions

View File

@ -89,8 +89,8 @@ class GithubPointer(BasePointer):
size=metadata['size'],
extrameta=metadata)
def tostoreids(self):
return [StoreID(self['oid'], self['size'])]
def tostoreid(self):
return StoreID(self['oid'], self['size'])
def deserialize(text):
pointerformats = [

View File

@ -3,12 +3,13 @@
from __future__ import absolute_import
from mercurial import (
error,
filelog,
revlog,
util as hgutil,
)
from mercurial.i18n import _
from mercurial.node import bin, nullid
from mercurial.node import bin, nullid, short
from . import (
blobstore,
@ -45,10 +46,8 @@ def readfromstore(self, text):
# still write hg filelog metadata
if not self.opener.options['lfsbypass']:
verifyhash = True
storeids = metadata.tostoreids()
storeids = [metadata.tostoreid()]
store = self.opener.lfslocalblobstore
if not isinstance(storeids, list):
storeids = [storeids]
missing = filter(lambda id: not store.has(id), storeids)
if missing:
self.opener.lfsremoteblobstore.readbatch(missing, store)
@ -177,51 +176,44 @@ def prepush(pushop):
deserialized into metadata so that we can block the push on their upload to
the remote blobstore.
"""
repo = pushop.repo
ui = pushop.ui
remoterepo = pushop.remote.local()
# We beed to pass on the information to the remote about the threshold so
# that _peek_islargefile can mark the file as large file.
threshold = repo.svfs.options.get('lfsthreshold')
if threshold is not None and util.safehasattr(remoterepo, 'svfs'):
remoterepo.svfs.options['lfsthreshold'] = threshold
pointers = extractpointers(pushop.repo, pushop.outgoing.missing)
uploadblobs(pushop.repo, [p.tostoreid() for p in pointers])
def extractpointers(repo, revs):
"""return a list of lfs pointers added by given revs"""
ui = repo.ui
if ui.verbose:
ui.write(_('lfs: computing set of blobs to upload\n'))
toupload = []
totalsize = 0
for i, n in enumerate(pushop.outgoing.missing):
pointers = {}
for i, n in enumerate(revs):
ctx = repo[n]
files = set(ctx.files())
for f in files:
if f not in ctx:
continue
filectx = ctx[f]
flags = filectx.filelog().flags(filectx.filerev())
if flags & revlog.REVIDX_EXTSTORED != revlog.REVIDX_EXTSTORED:
fctx = ctx[f]
if not _islfs(fctx.filelog(), fctx.filenode()):
continue
try:
metadata = pointer.deserialize(ctx[f].rawdata())
totalsize += long(metadata['size'])
storeids = metadata.tostoreids()
if isinstance(storeids, list):
toupload.extend(storeids)
else:
toupload.append(storeids)
metadata = pointer.deserialize(fctx.rawdata())
pointers[metadata['oid']] = metadata
except pointer.PointerDeserializationError:
msg = _('lfs: could not deserialize pointer for file %s, '
'revision %s\n')
ui.write(msg % (f, filectx.filerev()))
raise
raise error.Abort(_('lfs: corrupted pointer (%s@%s)\n')
% (f, short(ctx.node())))
return pointers.values()
if not toupload:
def uploadblobs(repo, storeids):
"""upload given storeids from local blobstore"""
if not storeids:
return
totalsize = sum(s.size for s in storeids)
ui = repo.ui
if ui.verbose:
msg = _('lfs: need to upload %s objects (%s)\n')
ui.write(msg % (len(toupload), hgutil.bytecount(totalsize)))
ui.write(msg % (len(storeids), hgutil.bytecount(totalsize)))
remoteblob = repo.svfs.lfsremoteblobstore
remoteblob.writebatch(toupload, repo.svfs.lfslocalblobstore,
remoteblob.writebatch(storeids, repo.svfs.lfslocalblobstore,
total=totalsize)