2012-08-16 00:38:42 +04:00
|
|
|
# transaction.py - simple journaling scheme for mercurial
|
2005-05-04 01:16:10 +04:00
|
|
|
#
|
|
|
|
# This transaction scheme is intended to gracefully handle program
|
|
|
|
# errors and interruptions. More serious failures like system crashes
|
|
|
|
# can be recovered with an fsck-like tool. As the whole repository is
|
|
|
|
# effectively log-structured, this should amount to simply truncating
|
|
|
|
# anything that isn't referenced in the changelog.
|
|
|
|
#
|
2006-08-12 23:30:02 +04:00
|
|
|
# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
|
2005-05-04 01:16:10 +04:00
|
|
|
#
|
2009-04-26 03:08:54 +04:00
|
|
|
# This software may be used and distributed according to the terms of the
|
2010-01-20 07:20:08 +03:00
|
|
|
# GNU General Public License version 2 or any later version.
|
2005-05-04 01:16:10 +04:00
|
|
|
|
2015-08-09 06:10:23 +03:00
|
|
|
from __future__ import absolute_import
|
|
|
|
|
2014-04-02 22:41:23 +04:00
|
|
|
import errno
|
2015-08-09 06:10:23 +03:00
|
|
|
|
|
|
|
from .i18n import _
|
|
|
|
from . import (
|
|
|
|
error,
|
|
|
|
util,
|
|
|
|
)
|
2009-04-24 11:56:53 +04:00
|
|
|
|
2014-11-13 14:17:36 +03:00
|
|
|
version = 2
|
2014-10-21 22:37:29 +04:00
|
|
|
|
transaction: allow running file generators after finalizers
Previously, transaction.close would run the file generators before running the
finalizers (see the list below for what is in each). Since file generators
contain the bookmarks and the dirstate, this meant we made the dirstate and
bookmarks visible to external readers before we actually wrote the commits into
the changelog, which could result in missing bookmarks and missing working copy
parents (especially on servers with high commit throughput, since pulls might
fail to see certain bookmarks in this situation).
By moving the changelog writing to be before the bookmark/dirstate writing, we
ensure the commits are present before they are referenced.
This implementation allows certain file generators to be after the finalizers.
We didn't want to move all of the generators, since it's important that things
like phases actually run before the finalizers (otherwise you could expose
commits as public when they really shouldn't be).
For reference, file generators currently consist of: bookmarks, dirstate, and
phases. Finalizers currently consist of: changelog, revbranchcache, and fncache.
2016-04-08 00:10:49 +03:00
|
|
|
# These are the file generators that should only be executed after the
|
|
|
|
# finalizers are done, since they rely on the output of the finalizers (like
|
|
|
|
# the changelog having been written).
|
|
|
|
postfinalizegenerators = set([
|
|
|
|
'bookmarks',
|
|
|
|
'dirstate'
|
|
|
|
])
|
|
|
|
|
2016-04-17 02:01:24 +03:00
|
|
|
gengroupall='all'
|
|
|
|
gengroupprefinalize='prefinalize'
|
|
|
|
gengrouppostfinalize='postfinalize'
|
transaction: allow running file generators after finalizers
Previously, transaction.close would run the file generators before running the
finalizers (see the list below for what is in each). Since file generators
contain the bookmarks and the dirstate, this meant we made the dirstate and
bookmarks visible to external readers before we actually wrote the commits into
the changelog, which could result in missing bookmarks and missing working copy
parents (especially on servers with high commit throughput, since pulls might
fail to see certain bookmarks in this situation).
By moving the changelog writing to be before the bookmark/dirstate writing, we
ensure the commits are present before they are referenced.
This implementation allows certain file generators to be after the finalizers.
We didn't want to move all of the generators, since it's important that things
like phases actually run before the finalizers (otherwise you could expose
commits as public when they really shouldn't be).
For reference, file generators currently consist of: bookmarks, dirstate, and
phases. Finalizers currently consist of: changelog, revbranchcache, and fncache.
2016-04-08 00:10:49 +03:00
|
|
|
|
2009-04-24 11:56:53 +04:00
|
|
|
def active(func):
|
|
|
|
def _active(self, *args, **kwds):
|
|
|
|
if self.count == 0:
|
|
|
|
raise error.Abort(_(
|
|
|
|
'cannot use transaction when it is already committed/aborted'))
|
|
|
|
return func(self, *args, **kwds)
|
|
|
|
return _active
|
2005-05-04 01:16:10 +04:00
|
|
|
|
2014-10-18 08:04:35 +04:00
|
|
|
def _playback(journal, report, opener, vfsmap, entries, backupentries,
|
|
|
|
unlink=True):
|
2014-08-15 06:37:46 +04:00
|
|
|
for f, o, _ignore in entries:
|
2009-05-04 17:31:57 +04:00
|
|
|
if o or not unlink:
|
|
|
|
try:
|
2016-09-22 15:52:00 +03:00
|
|
|
fp = opener(f, 'a', checkambig=True)
|
2010-12-24 17:23:01 +03:00
|
|
|
fp.truncate(o)
|
|
|
|
fp.close()
|
2009-10-31 20:17:59 +03:00
|
|
|
except IOError:
|
2009-05-04 17:31:57 +04:00
|
|
|
report(_("failed to truncate %s\n") % f)
|
|
|
|
raise
|
|
|
|
else:
|
|
|
|
try:
|
2013-11-12 11:23:52 +04:00
|
|
|
opener.unlink(f)
|
2015-06-24 08:20:08 +03:00
|
|
|
except (IOError, OSError) as inst:
|
2009-05-04 17:31:57 +04:00
|
|
|
if inst.errno != errno.ENOENT:
|
|
|
|
raise
|
2014-03-25 02:21:51 +04:00
|
|
|
|
|
|
|
backupfiles = []
|
2014-11-05 04:52:46 +03:00
|
|
|
for l, f, b, c in backupentries:
|
2014-11-13 14:17:09 +03:00
|
|
|
if l not in vfsmap and c:
|
|
|
|
report("couldn't handle %s: unknown cache location %s\n"
|
|
|
|
% (b, l))
|
2014-10-18 08:04:35 +04:00
|
|
|
vfs = vfsmap[l]
|
2014-11-13 14:17:09 +03:00
|
|
|
try:
|
|
|
|
if f and b:
|
|
|
|
filepath = vfs.join(f)
|
|
|
|
backuppath = vfs.join(b)
|
|
|
|
try:
|
2016-06-12 23:11:56 +03:00
|
|
|
util.copyfile(backuppath, filepath, checkambig=True)
|
2014-11-13 14:17:09 +03:00
|
|
|
backupfiles.append(b)
|
|
|
|
except IOError:
|
|
|
|
report(_("failed to recover %s\n") % f)
|
|
|
|
else:
|
|
|
|
target = f or b
|
|
|
|
try:
|
|
|
|
vfs.unlink(target)
|
2015-06-24 08:20:08 +03:00
|
|
|
except (IOError, OSError) as inst:
|
2014-11-13 14:17:09 +03:00
|
|
|
if inst.errno != errno.ENOENT:
|
|
|
|
raise
|
2015-10-08 22:55:45 +03:00
|
|
|
except (IOError, OSError, error.Abort) as inst:
|
2014-11-13 14:17:09 +03:00
|
|
|
if not c:
|
2014-11-05 04:38:48 +03:00
|
|
|
raise
|
2014-03-25 02:21:51 +04:00
|
|
|
|
|
|
|
backuppath = "%s.backupfiles" % journal
|
|
|
|
if opener.exists(backuppath):
|
|
|
|
opener.unlink(backuppath)
|
2015-10-15 21:29:51 +03:00
|
|
|
opener.unlink(journal)
|
2014-11-13 14:17:09 +03:00
|
|
|
try:
|
|
|
|
for f in backupfiles:
|
|
|
|
if opener.exists(f):
|
|
|
|
opener.unlink(f)
|
2015-10-08 22:55:45 +03:00
|
|
|
except (IOError, OSError, error.Abort) as inst:
|
2014-11-13 14:17:09 +03:00
|
|
|
# only pure backup file remains, it is sage to ignore any error
|
|
|
|
pass
|
2009-05-04 17:31:57 +04:00
|
|
|
|
2005-11-19 09:48:47 +03:00
|
|
|
class transaction(object):
|
2015-01-17 06:35:04 +03:00
|
|
|
def __init__(self, report, opener, vfsmap, journalname, undoname=None,
|
2015-10-08 21:53:46 +03:00
|
|
|
after=None, createmode=None, validator=None, releasefn=None):
|
2014-03-25 02:57:47 +04:00
|
|
|
"""Begin a new transaction
|
|
|
|
|
|
|
|
Begins a new transaction that allows rolling back writes in the event of
|
|
|
|
an exception.
|
|
|
|
|
|
|
|
* `after`: called after the transaction has been committed
|
|
|
|
* `createmode`: the mode of the journal file that will be created
|
2015-10-08 21:53:46 +03:00
|
|
|
* `releasefn`: called after releasing (with transaction and result)
|
2014-03-25 02:57:47 +04:00
|
|
|
"""
|
2006-02-28 21:24:54 +03:00
|
|
|
self.count = 1
|
2010-05-27 19:47:40 +04:00
|
|
|
self.usages = 1
|
2005-07-03 06:57:59 +04:00
|
|
|
self.report = report
|
2014-10-18 07:49:39 +04:00
|
|
|
# a vfs to the store content
|
2005-05-04 01:16:10 +04:00
|
|
|
self.opener = opener
|
2014-10-18 07:49:39 +04:00
|
|
|
# a map to access file in various {location -> vfs}
|
|
|
|
vfsmap = vfsmap.copy()
|
|
|
|
vfsmap[''] = opener # set default value
|
|
|
|
self._vfsmap = vfsmap
|
2005-05-19 04:31:51 +04:00
|
|
|
self.after = after
|
2005-05-04 01:16:10 +04:00
|
|
|
self.entries = []
|
2005-05-10 12:31:00 +04:00
|
|
|
self.map = {}
|
2015-01-17 01:54:24 +03:00
|
|
|
self.journal = journalname
|
2015-01-17 06:35:04 +03:00
|
|
|
self.undoname = undoname
|
2009-05-11 23:12:40 +04:00
|
|
|
self._queue = []
|
2015-03-10 08:43:36 +03:00
|
|
|
# A callback to validate transaction content before closing it.
|
|
|
|
# should raise exception is anything is wrong.
|
|
|
|
# target user is repository hooks.
|
|
|
|
if validator is None:
|
|
|
|
validator = lambda tr: None
|
|
|
|
self.validator = validator
|
2015-10-08 21:53:46 +03:00
|
|
|
# A callback to do something just after releasing transaction.
|
|
|
|
if releasefn is None:
|
|
|
|
releasefn = lambda tr, success: None
|
|
|
|
self.releasefn = releasefn
|
|
|
|
|
2014-04-18 01:04:59 +04:00
|
|
|
# a dict of arguments to be passed to hooks
|
|
|
|
self.hookargs = {}
|
2014-11-05 13:22:17 +03:00
|
|
|
self.file = opener.open(self.journal, "w")
|
2005-05-04 01:16:10 +04:00
|
|
|
|
2014-11-05 04:52:46 +03:00
|
|
|
# a list of ('location', 'path', 'backuppath', cache) entries.
|
2014-10-18 08:04:35 +04:00
|
|
|
# - if 'backuppath' is empty, no file existed at backup time
|
|
|
|
# - if 'path' is empty, this is a temporary transaction file
|
|
|
|
# - if 'location' is not empty, the path is outside main opener reach.
|
|
|
|
# use 'location' value as a key in a vfsmap to find the right 'vfs'
|
|
|
|
# (cache is currently unused)
|
2014-11-05 13:22:17 +03:00
|
|
|
self._backupentries = []
|
|
|
|
self._backupmap = {}
|
2015-01-17 01:54:24 +03:00
|
|
|
self._backupjournal = "%s.backupfiles" % self.journal
|
2014-11-05 12:31:57 +03:00
|
|
|
self._backupsfile = opener.open(self._backupjournal, 'w')
|
|
|
|
self._backupsfile.write('%d\n' % version)
|
2014-11-05 13:22:17 +03:00
|
|
|
|
2008-02-09 23:38:54 +03:00
|
|
|
if createmode is not None:
|
2015-06-24 08:30:33 +03:00
|
|
|
opener.chmod(self.journal, createmode & 0o666)
|
|
|
|
opener.chmod(self._backupjournal, createmode & 0o666)
|
2005-05-04 01:16:10 +04:00
|
|
|
|
2014-08-08 01:40:02 +04:00
|
|
|
# hold file generations to be performed on commit
|
|
|
|
self._filegenerators = {}
|
2014-04-18 00:47:38 +04:00
|
|
|
# hold callback to write pending data for hooks
|
2014-10-18 08:19:54 +04:00
|
|
|
self._pendingcallback = {}
|
|
|
|
# True is any pending data have been written ever
|
|
|
|
self._anypending = False
|
2014-10-18 09:28:09 +04:00
|
|
|
# holds callback to call when writing the transaction
|
|
|
|
self._finalizecallback = {}
|
2014-04-18 00:47:38 +04:00
|
|
|
# hold callback for post transaction close
|
2014-10-28 16:24:43 +03:00
|
|
|
self._postclosecallback = {}
|
2015-01-07 08:56:33 +03:00
|
|
|
# holds callbacks to call during abort
|
|
|
|
self._abortcallback = {}
|
2014-08-08 01:40:02 +04:00
|
|
|
|
2005-05-04 01:16:10 +04:00
|
|
|
def __del__(self):
|
2005-07-01 21:01:07 +04:00
|
|
|
if self.journal:
|
2009-11-02 12:19:14 +03:00
|
|
|
self._abort()
|
2005-05-04 01:16:10 +04:00
|
|
|
|
2009-05-11 23:12:40 +04:00
|
|
|
@active
|
|
|
|
def startgroup(self):
|
2014-11-05 13:00:15 +03:00
|
|
|
"""delay registration of file entry
|
|
|
|
|
|
|
|
This is used by strip to delay vision of strip offset. The transaction
|
|
|
|
sees either none or all of the strip actions to be done."""
|
2014-11-05 13:05:38 +03:00
|
|
|
self._queue.append([])
|
2009-05-11 23:12:40 +04:00
|
|
|
|
|
|
|
@active
|
|
|
|
def endgroup(self):
|
2014-11-05 13:00:15 +03:00
|
|
|
"""apply delayed registration of file entry.
|
|
|
|
|
|
|
|
This is used by strip to delay vision of strip offset. The transaction
|
|
|
|
sees either none or all of the strip actions to be done."""
|
2009-05-11 23:12:40 +04:00
|
|
|
q = self._queue.pop()
|
2014-11-05 13:13:01 +03:00
|
|
|
for f, o, data in q:
|
|
|
|
self._addentry(f, o, data)
|
2009-05-11 23:12:40 +04:00
|
|
|
|
2009-04-24 11:56:53 +04:00
|
|
|
@active
|
2006-04-17 21:19:58 +04:00
|
|
|
def add(self, file, offset, data=None):
|
2014-11-05 16:00:48 +03:00
|
|
|
"""record the state of an append-only file before update"""
|
2014-11-05 12:31:57 +03:00
|
|
|
if file in self.map or file in self._backupmap:
|
2010-01-25 09:05:27 +03:00
|
|
|
return
|
2009-05-11 23:12:40 +04:00
|
|
|
if self._queue:
|
2014-11-05 13:05:38 +03:00
|
|
|
self._queue[-1].append((file, offset, data))
|
2009-05-11 23:12:40 +04:00
|
|
|
return
|
|
|
|
|
2014-11-05 13:13:01 +03:00
|
|
|
self._addentry(file, offset, data)
|
|
|
|
|
|
|
|
def _addentry(self, file, offset, data):
|
|
|
|
"""add a append-only entry to memory and on-disk state"""
|
|
|
|
if file in self.map or file in self._backupmap:
|
|
|
|
return
|
2006-04-17 21:19:58 +04:00
|
|
|
self.entries.append((file, offset, data))
|
|
|
|
self.map[file] = len(self.entries) - 1
|
2005-05-04 01:16:10 +04:00
|
|
|
# add enough data to the journal to do the truncate
|
|
|
|
self.file.write("%s\0%d\n" % (file, offset))
|
|
|
|
self.file.flush()
|
|
|
|
|
2014-03-25 02:21:51 +04:00
|
|
|
@active
|
2014-11-05 04:59:32 +03:00
|
|
|
def addbackup(self, file, hardlink=True, location=''):
|
2014-03-25 02:21:51 +04:00
|
|
|
"""Adds a backup of the file to the transaction
|
|
|
|
|
|
|
|
Calling addbackup() creates a hardlink backup of the specified file
|
|
|
|
that is used to recover the file in the event of the transaction
|
|
|
|
aborting.
|
|
|
|
|
|
|
|
* `file`: the file path, relative to .hg/store
|
|
|
|
* `hardlink`: use a hardlink to quickly create the backup
|
|
|
|
"""
|
2014-11-05 13:05:38 +03:00
|
|
|
if self._queue:
|
|
|
|
msg = 'cannot use transaction.addbackup inside "group"'
|
2017-03-27 02:59:30 +03:00
|
|
|
raise error.ProgrammingError(msg)
|
2014-03-25 02:21:51 +04:00
|
|
|
|
2014-11-05 12:31:57 +03:00
|
|
|
if file in self.map or file in self._backupmap:
|
2014-03-25 02:21:51 +04:00
|
|
|
return
|
2014-11-05 04:59:32 +03:00
|
|
|
vfs = self._vfsmap[location]
|
2014-12-16 00:32:34 +03:00
|
|
|
dirname, filename = vfs.split(file)
|
|
|
|
backupfilename = "%s.backup.%s" % (self.journal, filename)
|
2014-12-16 00:27:46 +03:00
|
|
|
backupfile = vfs.reljoin(dirname, backupfilename)
|
2014-09-28 11:36:42 +04:00
|
|
|
if vfs.exists(file):
|
|
|
|
filepath = vfs.join(file)
|
2014-11-14 17:54:55 +03:00
|
|
|
backuppath = vfs.join(backupfile)
|
2015-01-05 23:44:15 +03:00
|
|
|
util.copyfile(filepath, backuppath, hardlink=hardlink)
|
2014-03-25 02:21:51 +04:00
|
|
|
else:
|
2014-11-05 04:38:48 +03:00
|
|
|
backupfile = ''
|
2014-03-25 02:21:51 +04:00
|
|
|
|
2014-11-05 04:59:32 +03:00
|
|
|
self._addbackupentry((location, file, backupfile, False))
|
2014-11-05 16:06:24 +03:00
|
|
|
|
|
|
|
def _addbackupentry(self, entry):
|
|
|
|
"""register a new backup entry and write it to disk"""
|
|
|
|
self._backupentries.append(entry)
|
2015-05-26 23:02:28 +03:00
|
|
|
self._backupmap[entry[1]] = len(self._backupentries) - 1
|
2014-11-05 04:52:46 +03:00
|
|
|
self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
|
2014-11-05 12:31:57 +03:00
|
|
|
self._backupsfile.flush()
|
2014-03-25 02:21:51 +04:00
|
|
|
|
2014-11-05 12:27:08 +03:00
|
|
|
@active
|
2014-11-12 17:57:41 +03:00
|
|
|
def registertmp(self, tmpfile, location=''):
|
2014-11-05 12:27:08 +03:00
|
|
|
"""register a temporary transaction file
|
|
|
|
|
2014-11-19 18:52:05 +03:00
|
|
|
Such files will be deleted when the transaction exits (on both
|
|
|
|
failure and success).
|
2014-11-05 12:27:08 +03:00
|
|
|
"""
|
2014-11-12 17:57:41 +03:00
|
|
|
self._addbackupentry((location, '', tmpfile, False))
|
2014-11-05 12:27:08 +03:00
|
|
|
|
2014-08-08 01:40:02 +04:00
|
|
|
@active
|
2014-10-18 07:53:42 +04:00
|
|
|
def addfilegenerator(self, genid, filenames, genfunc, order=0,
|
|
|
|
location=''):
|
2014-08-08 01:40:02 +04:00
|
|
|
"""add a function to generates some files at transaction commit
|
|
|
|
|
|
|
|
The `genfunc` argument is a function capable of generating proper
|
|
|
|
content of each entry in the `filename` tuple.
|
|
|
|
|
|
|
|
At transaction close time, `genfunc` will be called with one file
|
|
|
|
object argument per entries in `filenames`.
|
|
|
|
|
|
|
|
The transaction itself is responsible for the backup, creation and
|
|
|
|
final write of such file.
|
|
|
|
|
|
|
|
The `genid` argument is used to ensure the same set of file is only
|
|
|
|
generated once. Call to `addfilegenerator` for a `genid` already
|
|
|
|
present will overwrite the old entry.
|
|
|
|
|
|
|
|
The `order` argument may be used to control the order in which multiple
|
|
|
|
generator will be executed.
|
2014-10-18 07:53:42 +04:00
|
|
|
|
|
|
|
The `location` arguments may be used to indicate the files are located
|
|
|
|
outside of the the standard directory for transaction. It should match
|
2014-04-18 00:47:38 +04:00
|
|
|
one of the key of the `transaction.vfsmap` dictionary.
|
2014-08-08 01:40:02 +04:00
|
|
|
"""
|
2014-09-28 11:36:42 +04:00
|
|
|
# For now, we are unable to do proper backup and restore of custom vfs
|
|
|
|
# but for bookmarks that are handled outside this mechanism.
|
2014-10-18 07:53:42 +04:00
|
|
|
self._filegenerators[genid] = (order, filenames, genfunc, location)
|
2014-08-08 01:40:02 +04:00
|
|
|
|
2016-04-17 02:01:24 +03:00
|
|
|
def _generatefiles(self, suffix='', group=gengroupall):
|
2014-09-29 11:59:25 +04:00
|
|
|
# write files registered for generation
|
2014-10-18 08:57:32 +04:00
|
|
|
any = False
|
transaction: allow running file generators after finalizers
Previously, transaction.close would run the file generators before running the
finalizers (see the list below for what is in each). Since file generators
contain the bookmarks and the dirstate, this meant we made the dirstate and
bookmarks visible to external readers before we actually wrote the commits into
the changelog, which could result in missing bookmarks and missing working copy
parents (especially on servers with high commit throughput, since pulls might
fail to see certain bookmarks in this situation).
By moving the changelog writing to be before the bookmark/dirstate writing, we
ensure the commits are present before they are referenced.
This implementation allows certain file generators to be after the finalizers.
We didn't want to move all of the generators, since it's important that things
like phases actually run before the finalizers (otherwise you could expose
commits as public when they really shouldn't be).
For reference, file generators currently consist of: bookmarks, dirstate, and
phases. Finalizers currently consist of: changelog, revbranchcache, and fncache.
2016-04-08 00:10:49 +03:00
|
|
|
for id, entry in sorted(self._filegenerators.iteritems()):
|
2014-10-18 08:57:32 +04:00
|
|
|
any = True
|
2014-10-18 07:53:42 +04:00
|
|
|
order, filenames, genfunc, location = entry
|
transaction: allow running file generators after finalizers
Previously, transaction.close would run the file generators before running the
finalizers (see the list below for what is in each). Since file generators
contain the bookmarks and the dirstate, this meant we made the dirstate and
bookmarks visible to external readers before we actually wrote the commits into
the changelog, which could result in missing bookmarks and missing working copy
parents (especially on servers with high commit throughput, since pulls might
fail to see certain bookmarks in this situation).
By moving the changelog writing to be before the bookmark/dirstate writing, we
ensure the commits are present before they are referenced.
This implementation allows certain file generators to be after the finalizers.
We didn't want to move all of the generators, since it's important that things
like phases actually run before the finalizers (otherwise you could expose
commits as public when they really shouldn't be).
For reference, file generators currently consist of: bookmarks, dirstate, and
phases. Finalizers currently consist of: changelog, revbranchcache, and fncache.
2016-04-08 00:10:49 +03:00
|
|
|
|
|
|
|
# for generation at closing, check if it's before or after finalize
|
2016-04-17 02:01:24 +03:00
|
|
|
postfinalize = group == gengrouppostfinalize
|
|
|
|
if (group != gengroupall and
|
transaction: allow running file generators after finalizers
Previously, transaction.close would run the file generators before running the
finalizers (see the list below for what is in each). Since file generators
contain the bookmarks and the dirstate, this meant we made the dirstate and
bookmarks visible to external readers before we actually wrote the commits into
the changelog, which could result in missing bookmarks and missing working copy
parents (especially on servers with high commit throughput, since pulls might
fail to see certain bookmarks in this situation).
By moving the changelog writing to be before the bookmark/dirstate writing, we
ensure the commits are present before they are referenced.
This implementation allows certain file generators to be after the finalizers.
We didn't want to move all of the generators, since it's important that things
like phases actually run before the finalizers (otherwise you could expose
commits as public when they really shouldn't be).
For reference, file generators currently consist of: bookmarks, dirstate, and
phases. Finalizers currently consist of: changelog, revbranchcache, and fncache.
2016-04-08 00:10:49 +03:00
|
|
|
(id in postfinalizegenerators) != (postfinalize)):
|
|
|
|
continue
|
|
|
|
|
2014-10-18 07:53:42 +04:00
|
|
|
vfs = self._vfsmap[location]
|
2014-09-29 11:59:25 +04:00
|
|
|
files = []
|
|
|
|
try:
|
|
|
|
for name in filenames:
|
2014-09-29 12:29:08 +04:00
|
|
|
name += suffix
|
|
|
|
if suffix:
|
|
|
|
self.registertmp(name, location=location)
|
|
|
|
else:
|
|
|
|
self.addbackup(name, location=location)
|
2016-06-02 18:44:20 +03:00
|
|
|
files.append(vfs(name, 'w', atomictemp=True,
|
|
|
|
checkambig=not suffix))
|
2014-09-29 11:59:25 +04:00
|
|
|
genfunc(*files)
|
|
|
|
finally:
|
|
|
|
for f in files:
|
|
|
|
f.close()
|
2014-10-18 08:57:32 +04:00
|
|
|
return any
|
2014-09-29 11:59:25 +04:00
|
|
|
|
2009-04-24 11:56:53 +04:00
|
|
|
@active
|
2006-04-17 21:19:58 +04:00
|
|
|
def find(self, file):
|
|
|
|
if file in self.map:
|
|
|
|
return self.entries[self.map[file]]
|
2014-11-05 12:31:57 +03:00
|
|
|
if file in self._backupmap:
|
|
|
|
return self._backupentries[self._backupmap[file]]
|
2006-04-17 21:19:58 +04:00
|
|
|
return None
|
|
|
|
|
2009-04-24 11:56:53 +04:00
|
|
|
@active
|
2006-04-17 21:19:58 +04:00
|
|
|
def replace(self, file, offset, data=None):
|
2009-05-11 23:12:40 +04:00
|
|
|
'''
|
|
|
|
replace can only replace already committed entries
|
|
|
|
that are not pending in the queue
|
|
|
|
'''
|
|
|
|
|
2006-04-17 21:19:58 +04:00
|
|
|
if file not in self.map:
|
|
|
|
raise KeyError(file)
|
|
|
|
index = self.map[file]
|
|
|
|
self.entries[index] = (file, offset, data)
|
|
|
|
self.file.write("%s\0%d\n" % (file, offset))
|
|
|
|
self.file.flush()
|
|
|
|
|
2009-04-24 11:56:53 +04:00
|
|
|
@active
|
2006-02-28 21:24:54 +03:00
|
|
|
def nest(self):
|
|
|
|
self.count += 1
|
2010-05-27 19:47:40 +04:00
|
|
|
self.usages += 1
|
2006-02-28 21:24:54 +03:00
|
|
|
return self
|
|
|
|
|
2010-05-27 19:47:40 +04:00
|
|
|
def release(self):
|
|
|
|
if self.count > 0:
|
|
|
|
self.usages -= 1
|
2010-07-27 00:29:49 +04:00
|
|
|
# if the transaction scopes are left without being closed, fail
|
2010-05-27 19:47:40 +04:00
|
|
|
if self.count > 0 and self.usages == 0:
|
|
|
|
self._abort()
|
|
|
|
|
2016-01-16 00:14:47 +03:00
|
|
|
def __enter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
2016-01-20 02:18:21 +03:00
|
|
|
try:
|
|
|
|
if exc_type is None:
|
|
|
|
self.close()
|
|
|
|
finally:
|
|
|
|
self.release()
|
2016-01-16 00:14:47 +03:00
|
|
|
|
2006-02-28 21:24:54 +03:00
|
|
|
def running(self):
|
|
|
|
return self.count > 0
|
|
|
|
|
2014-10-18 08:19:54 +04:00
|
|
|
def addpending(self, category, callback):
|
|
|
|
"""add a callback to be called when the transaction is pending
|
|
|
|
|
2014-11-08 19:27:50 +03:00
|
|
|
The transaction will be given as callback's first argument.
|
|
|
|
|
2014-10-18 08:19:54 +04:00
|
|
|
Category is a unique identifier to allow overwriting an old callback
|
|
|
|
with a newer callback.
|
|
|
|
"""
|
|
|
|
self._pendingcallback[category] = callback
|
|
|
|
|
|
|
|
@active
|
|
|
|
def writepending(self):
|
|
|
|
'''write pending file to temporary version
|
|
|
|
|
|
|
|
This is used to allow hooks to view a transaction before commit'''
|
|
|
|
categories = sorted(self._pendingcallback)
|
|
|
|
for cat in categories:
|
|
|
|
# remove callback since the data will have been flushed
|
2014-11-08 19:27:50 +03:00
|
|
|
any = self._pendingcallback.pop(cat)(self)
|
2014-10-18 08:19:54 +04:00
|
|
|
self._anypending = self._anypending or any
|
2014-10-18 09:19:05 +04:00
|
|
|
self._anypending |= self._generatefiles(suffix='.pending')
|
2014-10-18 08:19:54 +04:00
|
|
|
return self._anypending
|
|
|
|
|
2014-10-18 09:28:09 +04:00
|
|
|
@active
|
|
|
|
def addfinalize(self, category, callback):
|
|
|
|
"""add a callback to be called when the transaction is closed
|
|
|
|
|
2014-11-08 19:31:38 +03:00
|
|
|
The transaction will be given as callback's first argument.
|
|
|
|
|
2014-10-18 09:28:09 +04:00
|
|
|
Category is a unique identifier to allow overwriting old callbacks with
|
|
|
|
newer callbacks.
|
|
|
|
"""
|
|
|
|
self._finalizecallback[category] = callback
|
|
|
|
|
2014-10-28 16:24:43 +03:00
|
|
|
@active
|
|
|
|
def addpostclose(self, category, callback):
|
|
|
|
"""add a callback to be called after the transaction is closed
|
|
|
|
|
2014-11-08 19:35:15 +03:00
|
|
|
The transaction will be given as callback's first argument.
|
|
|
|
|
2014-10-28 16:24:43 +03:00
|
|
|
Category is a unique identifier to allow overwriting an old callback
|
|
|
|
with a newer callback.
|
|
|
|
"""
|
|
|
|
self._postclosecallback[category] = callback
|
|
|
|
|
2015-01-07 08:56:33 +03:00
|
|
|
@active
|
|
|
|
def addabort(self, category, callback):
|
|
|
|
"""add a callback to be called when the transaction is aborted.
|
|
|
|
|
|
|
|
The transaction will be given as the first argument to the callback.
|
|
|
|
|
|
|
|
Category is a unique identifier to allow overwriting an old callback
|
|
|
|
with a newer callback.
|
|
|
|
"""
|
|
|
|
self._abortcallback[category] = callback
|
|
|
|
|
2009-04-24 11:56:53 +04:00
|
|
|
@active
|
2005-05-04 01:16:10 +04:00
|
|
|
def close(self):
|
2009-07-22 04:31:45 +04:00
|
|
|
'''commit the transaction'''
|
2014-11-13 13:22:47 +03:00
|
|
|
if self.count == 1:
|
2015-03-10 08:43:36 +03:00
|
|
|
self.validator(self) # will raise exception if needed
|
2016-04-17 02:01:24 +03:00
|
|
|
self._generatefiles(group=gengroupprefinalize)
|
2014-10-18 09:28:09 +04:00
|
|
|
categories = sorted(self._finalizecallback)
|
|
|
|
for cat in categories:
|
2014-11-08 19:31:38 +03:00
|
|
|
self._finalizecallback[cat](self)
|
2016-04-16 19:02:37 +03:00
|
|
|
# Prevent double usage and help clear cycles.
|
|
|
|
self._finalizecallback = None
|
2016-04-17 02:01:24 +03:00
|
|
|
self._generatefiles(group=gengrouppostfinalize)
|
2014-03-25 02:57:47 +04:00
|
|
|
|
2006-02-28 21:24:54 +03:00
|
|
|
self.count -= 1
|
|
|
|
if self.count != 0:
|
|
|
|
return
|
2005-05-04 01:16:10 +04:00
|
|
|
self.file.close()
|
2014-11-05 12:31:57 +03:00
|
|
|
self._backupsfile.close()
|
2014-11-05 12:27:08 +03:00
|
|
|
# cleanup temporary files
|
2014-11-13 14:17:09 +03:00
|
|
|
for l, f, b, c in self._backupentries:
|
|
|
|
if l not in self._vfsmap and c:
|
2015-10-17 23:28:02 +03:00
|
|
|
self.report("couldn't remove %s: unknown cache location %s\n"
|
2014-11-13 14:17:09 +03:00
|
|
|
% (b, l))
|
|
|
|
continue
|
2014-10-18 08:04:35 +04:00
|
|
|
vfs = self._vfsmap[l]
|
|
|
|
if not f and b and vfs.exists(b):
|
2014-11-13 14:17:09 +03:00
|
|
|
try:
|
|
|
|
vfs.unlink(b)
|
2015-10-08 22:55:45 +03:00
|
|
|
except (IOError, OSError, error.Abort) as inst:
|
2014-11-13 14:17:09 +03:00
|
|
|
if not c:
|
|
|
|
raise
|
|
|
|
# Abort may be raise by read only opener
|
2015-10-17 23:28:02 +03:00
|
|
|
self.report("couldn't remove %s: %s\n"
|
2014-11-13 14:17:09 +03:00
|
|
|
% (vfs.join(b), inst))
|
2005-05-04 01:16:10 +04:00
|
|
|
self.entries = []
|
2015-01-17 05:34:14 +03:00
|
|
|
self._writeundo()
|
2005-05-19 04:31:51 +04:00
|
|
|
if self.after:
|
2005-07-28 06:16:20 +04:00
|
|
|
self.after()
|
2014-11-05 12:31:57 +03:00
|
|
|
if self.opener.isfile(self._backupjournal):
|
|
|
|
self.opener.unlink(self._backupjournal)
|
2015-10-15 21:29:51 +03:00
|
|
|
if self.opener.isfile(self.journal):
|
|
|
|
self.opener.unlink(self.journal)
|
2016-01-06 22:12:09 +03:00
|
|
|
for l, _f, b, c in self._backupentries:
|
|
|
|
if l not in self._vfsmap and c:
|
|
|
|
self.report("couldn't remove %s: unknown cache location"
|
|
|
|
"%s\n" % (b, l))
|
|
|
|
continue
|
|
|
|
vfs = self._vfsmap[l]
|
|
|
|
if b and vfs.exists(b):
|
|
|
|
try:
|
|
|
|
vfs.unlink(b)
|
|
|
|
except (IOError, OSError, error.Abort) as inst:
|
|
|
|
if not c:
|
|
|
|
raise
|
|
|
|
# Abort may be raise by read only opener
|
|
|
|
self.report("couldn't remove %s: %s\n"
|
|
|
|
% (vfs.join(b), inst))
|
2014-11-05 12:31:57 +03:00
|
|
|
self._backupentries = []
|
2005-07-03 01:16:42 +04:00
|
|
|
self.journal = None
|
2015-10-08 21:53:46 +03:00
|
|
|
|
|
|
|
self.releasefn(self, True) # notify success of closing transaction
|
|
|
|
|
2014-10-28 16:24:43 +03:00
|
|
|
# run post close action
|
|
|
|
categories = sorted(self._postclosecallback)
|
|
|
|
for cat in categories:
|
2014-11-08 19:35:15 +03:00
|
|
|
self._postclosecallback[cat](self)
|
2016-04-16 19:02:37 +03:00
|
|
|
# Prevent double usage and help clear cycles.
|
|
|
|
self._postclosecallback = None
|
2005-05-04 01:16:10 +04:00
|
|
|
|
2009-04-24 11:56:53 +04:00
|
|
|
@active
|
2005-05-04 01:16:10 +04:00
|
|
|
def abort(self):
|
2009-07-22 04:31:45 +04:00
|
|
|
'''abort the transaction (generally called on error, or when the
|
|
|
|
transaction is not explicitly committed before going out of
|
|
|
|
scope)'''
|
2009-04-24 11:56:53 +04:00
|
|
|
self._abort()
|
|
|
|
|
2015-01-17 05:34:14 +03:00
|
|
|
def _writeundo(self):
|
|
|
|
"""write transaction data for possible future undo call"""
|
|
|
|
if self.undoname is None:
|
|
|
|
return
|
|
|
|
undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, 'w')
|
|
|
|
undobackupfile.write('%d\n' % version)
|
|
|
|
for l, f, b, c in self._backupentries:
|
|
|
|
if not f: # temporary file
|
|
|
|
continue
|
|
|
|
if not b:
|
|
|
|
u = ''
|
|
|
|
else:
|
|
|
|
if l not in self._vfsmap and c:
|
2015-10-17 23:28:02 +03:00
|
|
|
self.report("couldn't remove %s: unknown cache location"
|
2015-01-17 05:34:14 +03:00
|
|
|
"%s\n" % (b, l))
|
|
|
|
continue
|
|
|
|
vfs = self._vfsmap[l]
|
|
|
|
base, name = vfs.split(b)
|
|
|
|
assert name.startswith(self.journal), name
|
|
|
|
uname = name.replace(self.journal, self.undoname, 1)
|
|
|
|
u = vfs.reljoin(base, uname)
|
|
|
|
util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
|
|
|
|
undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
|
|
|
|
undobackupfile.close()
|
|
|
|
|
|
|
|
|
2009-04-24 11:56:53 +04:00
|
|
|
def _abort(self):
|
2009-04-19 22:02:32 +04:00
|
|
|
self.count = 0
|
2010-05-27 19:47:40 +04:00
|
|
|
self.usages = 0
|
2009-04-19 22:02:32 +04:00
|
|
|
self.file.close()
|
2014-11-05 12:31:57 +03:00
|
|
|
self._backupsfile.close()
|
2009-04-19 22:02:32 +04:00
|
|
|
|
2010-01-11 23:40:19 +03:00
|
|
|
try:
|
2014-11-05 12:31:57 +03:00
|
|
|
if not self.entries and not self._backupentries:
|
|
|
|
if self._backupjournal:
|
|
|
|
self.opener.unlink(self._backupjournal)
|
2015-10-15 21:29:51 +03:00
|
|
|
if self.journal:
|
|
|
|
self.opener.unlink(self.journal)
|
2010-01-11 23:40:19 +03:00
|
|
|
return
|
2005-05-04 01:16:10 +04:00
|
|
|
|
2010-01-11 23:40:19 +03:00
|
|
|
self.report(_("transaction abort!\n"))
|
2005-05-04 01:16:10 +04:00
|
|
|
|
2005-05-21 05:20:39 +04:00
|
|
|
try:
|
2015-01-07 08:56:33 +03:00
|
|
|
for cat in sorted(self._abortcallback):
|
|
|
|
self._abortcallback[cat](self)
|
2016-04-16 19:02:37 +03:00
|
|
|
# Prevent double usage and help clear cycles.
|
|
|
|
self._abortcallback = None
|
2014-10-18 08:04:35 +04:00
|
|
|
_playback(self.journal, self.report, self.opener, self._vfsmap,
|
2014-11-05 12:31:57 +03:00
|
|
|
self.entries, self._backupentries, False)
|
2009-05-04 17:31:57 +04:00
|
|
|
self.report(_("rollback completed\n"))
|
2015-05-18 23:38:24 +03:00
|
|
|
except BaseException:
|
2009-05-04 17:31:57 +04:00
|
|
|
self.report(_("rollback failed - please run hg recover\n"))
|
|
|
|
finally:
|
|
|
|
self.journal = None
|
2015-10-08 21:53:46 +03:00
|
|
|
self.releasefn(self, False) # notify failure of transaction
|
2005-05-04 01:16:10 +04:00
|
|
|
|
2014-10-18 08:04:35 +04:00
|
|
|
def rollback(opener, vfsmap, file, report):
|
2014-03-25 02:21:51 +04:00
|
|
|
"""Rolls back the transaction contained in the given file
|
|
|
|
|
|
|
|
Reads the entries in the specified file, and the corresponding
|
|
|
|
'*.backupfiles' file, to recover from an incomplete transaction.
|
|
|
|
|
|
|
|
* `file`: a file containing a list of entries, specifying where
|
|
|
|
to truncate each file. The file should contain a list of
|
|
|
|
file\0offset pairs, delimited by newlines. The corresponding
|
|
|
|
'*.backupfiles' file should contain a list of file\0backupfile
|
|
|
|
pairs, delimited by \0.
|
|
|
|
"""
|
2009-05-04 17:31:57 +04:00
|
|
|
entries = []
|
2014-03-25 02:21:51 +04:00
|
|
|
backupentries = []
|
2009-04-19 22:02:32 +04:00
|
|
|
|
2013-11-12 11:23:52 +04:00
|
|
|
fp = opener.open(file)
|
2010-12-24 17:23:01 +03:00
|
|
|
lines = fp.readlines()
|
|
|
|
fp.close()
|
|
|
|
for l in lines:
|
2014-02-18 00:49:56 +04:00
|
|
|
try:
|
|
|
|
f, o = l.split('\0')
|
|
|
|
entries.append((f, int(o), None))
|
|
|
|
except ValueError:
|
|
|
|
report(_("couldn't read journal entry %r!\n") % l)
|
2005-05-04 01:16:10 +04:00
|
|
|
|
2014-03-25 02:21:51 +04:00
|
|
|
backupjournal = "%s.backupfiles" % file
|
|
|
|
if opener.exists(backupjournal):
|
|
|
|
fp = opener.open(backupjournal)
|
2014-10-21 23:38:28 +04:00
|
|
|
lines = fp.readlines()
|
|
|
|
if lines:
|
|
|
|
ver = lines[0][:-1]
|
2014-10-21 22:37:29 +04:00
|
|
|
if ver == str(version):
|
2014-10-21 23:38:28 +04:00
|
|
|
for line in lines[1:]:
|
|
|
|
if line:
|
|
|
|
# Shave off the trailing newline
|
|
|
|
line = line[:-1]
|
2014-11-05 04:52:46 +03:00
|
|
|
l, f, b, c = line.split('\0')
|
|
|
|
backupentries.append((l, f, b, bool(c)))
|
2014-10-21 22:37:29 +04:00
|
|
|
else:
|
2014-11-05 04:52:46 +03:00
|
|
|
report(_("journal was created by a different version of "
|
2015-04-14 17:59:26 +03:00
|
|
|
"Mercurial\n"))
|
2014-03-25 02:21:51 +04:00
|
|
|
|
2014-10-18 08:04:35 +04:00
|
|
|
_playback(file, report, opener, vfsmap, entries, backupentries)
|