2014-01-02 23:06:45 +04:00
|
|
|
# hgsql.py
|
2013-10-18 03:46:12 +04:00
|
|
|
#
|
|
|
|
# Copyright 2013 Facebook, Inc.
|
|
|
|
#
|
|
|
|
# This software may be used and distributed according to the terms of the
|
|
|
|
# GNU General Public License version 2 or any later version.
|
|
|
|
|
2014-01-02 23:06:45 +04:00
|
|
|
"""
|
|
|
|
CREATE TABLE revisions(
|
2014-08-05 06:38:52 +04:00
|
|
|
repo CHAR(64) BINARY NOT NULL,
|
|
|
|
path VARCHAR(512) BINARY NOT NULL,
|
2014-01-02 23:06:45 +04:00
|
|
|
chunk INT UNSIGNED NOT NULL,
|
|
|
|
chunkcount INT UNSIGNED NOT NULL,
|
|
|
|
linkrev INT UNSIGNED NOT NULL,
|
2014-01-22 04:05:27 +04:00
|
|
|
rev INT UNSIGNED NOT NULL,
|
|
|
|
node CHAR(40) BINARY NOT NULL,
|
2014-01-02 23:06:45 +04:00
|
|
|
entry BINARY(64) NOT NULL,
|
|
|
|
data0 CHAR(1) NOT NULL,
|
|
|
|
data1 LONGBLOB NOT NULL,
|
|
|
|
createdtime DATETIME NOT NULL,
|
2014-01-22 04:05:27 +04:00
|
|
|
INDEX linkrevs (repo, linkrev),
|
|
|
|
PRIMARY KEY (repo, path, rev, chunk)
|
2014-01-02 23:06:45 +04:00
|
|
|
);
|
|
|
|
|
2014-02-04 02:18:38 +04:00
|
|
|
CREATE TABLE revision_references(
|
2014-01-02 23:06:45 +04:00
|
|
|
autoid INT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY,
|
2014-01-22 04:05:27 +04:00
|
|
|
repo CHAR(32) BINARY NOT NULL,
|
|
|
|
namespace CHAR(32) BINARY NOT NULL,
|
|
|
|
name VARCHAR(256) BINARY,
|
|
|
|
value char(40) BINARY NOT NULL,
|
2014-01-02 23:06:45 +04:00
|
|
|
UNIQUE KEY bookmarkindex (repo, namespace, name)
|
|
|
|
);
|
|
|
|
"""
|
2013-10-29 03:09:47 +04:00
|
|
|
|
2013-10-18 03:46:12 +04:00
|
|
|
from mercurial.node import bin, hex, nullid, nullrev
|
|
|
|
from mercurial.i18n import _
|
2013-10-29 23:55:14 +04:00
|
|
|
from mercurial.extensions import wrapfunction, wrapcommand
|
2013-10-18 03:47:34 +04:00
|
|
|
from mercurial import changelog, error, cmdutil, revlog, localrepo, transaction
|
2013-10-31 21:37:49 +04:00
|
|
|
from mercurial import wireproto, bookmarks, repair, commands, hg, mdiff, phases
|
2015-10-09 23:13:18 +03:00
|
|
|
from mercurial import util, changegroup, exchange, bundle2
|
2013-12-11 07:00:55 +04:00
|
|
|
import MySQLdb, struct, time, Queue, threading, _mysql_exceptions
|
2013-10-18 03:46:12 +04:00
|
|
|
from MySQLdb import cursors
|
2014-03-20 00:06:20 +04:00
|
|
|
import warnings
|
2014-10-27 23:56:13 +03:00
|
|
|
import sys
|
2013-10-18 03:46:12 +04:00
|
|
|
|
|
|
|
cmdtable = {}
|
|
|
|
command = cmdutil.command(cmdtable)
|
|
|
|
testedwith = 'internal'
|
|
|
|
|
2014-03-12 23:35:04 +04:00
|
|
|
writelock = 'write_lock'
|
2013-10-31 00:41:33 +04:00
|
|
|
|
2014-10-27 23:56:13 +03:00
|
|
|
INITIAL_SYNC_NORMAL = 'normal'
|
|
|
|
INITIAL_SYNC_DISABLE = 'disabled'
|
|
|
|
INITIAL_SYNC_FORCE = 'force'
|
|
|
|
|
|
|
|
initialsync = INITIAL_SYNC_NORMAL
|
2013-10-18 03:46:12 +04:00
|
|
|
|
2013-10-29 05:57:18 +04:00
|
|
|
class CorruptionException(Exception):
|
|
|
|
pass
|
|
|
|
|
2013-10-18 03:46:12 +04:00
|
|
|
def uisetup(ui):
|
2014-11-05 04:41:13 +03:00
|
|
|
# Enable SQL for local commands that write to the repository.
|
2013-10-29 23:55:14 +04:00
|
|
|
wrapcommand(commands.table, 'pull', pull)
|
2014-03-13 23:42:20 +04:00
|
|
|
wrapcommand(commands.table, 'commit', commit)
|
2014-01-29 08:46:23 +04:00
|
|
|
|
2014-11-05 04:41:13 +03:00
|
|
|
wrapcommand(commands.table, 'bookmark', bookmarkcommand)
|
|
|
|
wrapfunction(exchange, '_localphasemove', _localphasemove)
|
2015-01-06 21:16:35 +03:00
|
|
|
wrapfunction(exchange, 'push', push)
|
2014-11-05 04:41:13 +03:00
|
|
|
|
|
|
|
# Enable SQL for remote commands that write to the repository
|
2013-12-11 07:00:55 +04:00
|
|
|
wireproto.commands['unbundle'] = (wireproto.unbundle, 'heads')
|
2014-11-05 04:41:13 +03:00
|
|
|
wrapfunction(exchange, 'unbundle', unbundle)
|
2013-10-29 23:55:14 +04:00
|
|
|
|
2014-01-29 08:46:23 +04:00
|
|
|
wrapfunction(wireproto, 'pushkey', pushkey)
|
|
|
|
wireproto.commands['pushkey'] = (wireproto.pushkey, 'namespace key old new')
|
|
|
|
|
2014-11-05 04:41:13 +03:00
|
|
|
wrapfunction(bookmarks, 'updatefromremote', updatefromremote)
|
|
|
|
wrapfunction(changegroup, 'addchangegroup', addchangegroup)
|
2014-02-19 05:16:45 +04:00
|
|
|
|
2014-11-05 04:41:13 +03:00
|
|
|
# Record revlog writes
|
2014-01-04 00:01:50 +04:00
|
|
|
def writeentry(orig, self, transaction, ifh, dfh, entry, data, link, offset):
|
2014-01-22 04:05:27 +04:00
|
|
|
"""records each revlog write to the repo's pendingrev list"""
|
2014-02-19 03:59:46 +04:00
|
|
|
if not util.safehasattr(transaction, "repo"):
|
|
|
|
return orig(self, transaction, ifh, dfh, entry, data, link, offset)
|
|
|
|
|
2014-01-22 04:05:27 +04:00
|
|
|
e = struct.unpack(revlog.indexformatng, entry)
|
|
|
|
node = hex(e[7])
|
|
|
|
data0 = data[0] or ''
|
|
|
|
transaction.repo.pendingrevs.append((self.indexfile, link,
|
|
|
|
len(self) - 1, node, entry, data0, data[1]))
|
2014-01-04 00:01:50 +04:00
|
|
|
return orig(self, transaction, ifh, dfh, entry, data, link, offset)
|
|
|
|
wrapfunction(revlog.revlog, '_writeentry', writeentry)
|
|
|
|
|
2014-11-05 04:41:13 +03:00
|
|
|
# Reorder incoming revs to be in linkrev order
|
2013-10-30 02:55:43 +04:00
|
|
|
wrapfunction(revlog.revlog, 'addgroup', addgroup)
|
2014-02-19 05:16:45 +04:00
|
|
|
|
2014-11-05 04:41:13 +03:00
|
|
|
# Write SQL bookmarks at the same time as local bookmarks
|
2014-12-02 04:30:20 +03:00
|
|
|
wrapfunction(bookmarks.bmstore, 'write', bookmarkwrite)
|
2013-10-29 03:09:47 +04:00
|
|
|
|
2014-10-27 23:56:13 +03:00
|
|
|
def extsetup(ui):
|
|
|
|
if ui.configbool('hgsql', 'enabled'):
|
|
|
|
commands.globalopts.append(
|
|
|
|
('', 'forcesync', False,
|
|
|
|
_('force hgsql sync even on read-only commands'),
|
|
|
|
_('TYPE')))
|
|
|
|
|
|
|
|
# Directly examining argv seems like a terrible idea, but it seems
|
|
|
|
# neccesary unless we refactor mercurial dispatch code. This is because
|
|
|
|
# the first place we have access to parsed options is in the same function
|
|
|
|
# (dispatch.dispatch) that created the repo and the repo creation initiates
|
|
|
|
# the sync operation in which the lock is elided unless we set this.
|
|
|
|
if '--forcesync' in sys.argv:
|
|
|
|
ui.debug('forcesync enabled\n')
|
|
|
|
global initialsync
|
|
|
|
initialsync = INITIAL_SYNC_FORCE
|
|
|
|
|
2013-10-18 03:46:12 +04:00
|
|
|
def reposetup(ui, repo):
|
2013-10-18 03:47:34 +04:00
|
|
|
if repo.ui.configbool("hgsql", "enabled"):
|
2013-11-22 00:32:05 +04:00
|
|
|
wraprepo(repo)
|
2013-10-29 03:09:47 +04:00
|
|
|
|
2014-10-27 23:56:13 +03:00
|
|
|
if initialsync != INITIAL_SYNC_DISABLE:
|
2014-01-03 00:19:50 +04:00
|
|
|
# Use a noop to force a sync
|
|
|
|
def noop():
|
|
|
|
pass
|
Make forcesync only acquire local repo lock
Summary:
Previously, the --forcesync flag would cause the command to acquire the
sqllock, which would mean a read only command would block for any ongoing write
commands to complete, which is undesirable. Now, we will wait for the lcoal repo
lock only, instead of skipping the sync as we did before the initial patch.
This ensures that forcesync operations will see all committed changes, but won't
wait for ongoing write transactions from other masters.
Test Plan: updated test
Reviewers: sid0, davidsp, pyd, mpm, durham
Reviewed By: durham
Differential Revision: https://phabricator.fb.com/D1682970
Signature: t1:1682970:1416014352:e7f3daa09e79abd43a16c8eb74bc69272d73c83d
2014-11-14 21:53:35 +03:00
|
|
|
waitforlock = (initialsync == INITIAL_SYNC_FORCE)
|
|
|
|
executewithsql(repo, noop, waitforlock=waitforlock)
|
2013-10-29 03:09:47 +04:00
|
|
|
|
2014-01-24 07:22:49 +04:00
|
|
|
# Incoming commits are only allowed via push and pull
|
2015-10-09 23:13:18 +03:00
|
|
|
def unbundle(orig, repo, cg, *args, **kwargs):
|
|
|
|
if not repo.ui.configbool("hgsql", "enabled"):
|
|
|
|
return orig(repo, cg, *args, **kwargs)
|
|
|
|
|
|
|
|
isbundle2 = util.safehasattr(cg, 'params')
|
|
|
|
islazylocking = repo.ui.configbool('experimental', 'bundle2lazylocking')
|
|
|
|
if isbundle2 and islazylocking:
|
|
|
|
# lazy locked bundle2
|
|
|
|
exception = None
|
|
|
|
oldopclass = None
|
|
|
|
context = None
|
|
|
|
try:
|
|
|
|
context = sqlcontext(repo, takelock=True, waitforlock=True)
|
|
|
|
|
|
|
|
# Temporarily replace bundleoperation so we can hook into it's
|
|
|
|
# locking mechanism.
|
|
|
|
oldopclass = bundle2.bundleoperation
|
|
|
|
class sqllockedoperation(bundle2.bundleoperation):
|
|
|
|
def __init__(self, repo, transactiongetter, *args, **kwargs):
|
|
|
|
def sqllocktr():
|
|
|
|
if not context.active():
|
|
|
|
context.__enter__()
|
|
|
|
return transactiongetter()
|
|
|
|
|
|
|
|
super(sqllockedoperation, self).__init__(repo, sqllocktr,
|
|
|
|
*args, **kwargs)
|
|
|
|
# undo our temporary wrapping
|
|
|
|
bundle2.bundleoperation = oldopclass
|
|
|
|
bundle2.bundleoperation = sqllockedoperation
|
|
|
|
|
|
|
|
return orig(repo, cg, *args, **kwargs)
|
|
|
|
except Exception as ex:
|
|
|
|
exception = ex
|
|
|
|
raise
|
|
|
|
finally:
|
|
|
|
# Be extra sure to undo our wrapping
|
|
|
|
if oldopclass:
|
|
|
|
bundle2.bundleoperation = oldopclass
|
|
|
|
# release sqllock here in exit
|
|
|
|
if context:
|
|
|
|
type = value = traceback = None
|
|
|
|
if exception:
|
|
|
|
type = exception.__class__
|
|
|
|
value = exception
|
|
|
|
traceback = [] # This isn't really important
|
|
|
|
context.__exit__(type, value, traceback)
|
2014-02-19 03:59:46 +04:00
|
|
|
else:
|
2015-10-09 23:13:18 +03:00
|
|
|
# bundle1 or non-lazy locked
|
|
|
|
return executewithsql(repo, orig, True, repo, cg, *args, **kwargs)
|
2014-01-03 00:19:50 +04:00
|
|
|
|
|
|
|
def pull(orig, *args, **kwargs):
|
|
|
|
repo = args[1]
|
2014-02-19 03:59:46 +04:00
|
|
|
if repo.ui.configbool("hgsql", "enabled"):
|
2014-03-12 23:35:04 +04:00
|
|
|
return executewithsql(repo, orig, True, *args, **kwargs)
|
2014-02-19 03:59:46 +04:00
|
|
|
else:
|
|
|
|
return orig(*args, **kwargs)
|
2013-10-29 05:57:18 +04:00
|
|
|
|
2015-01-06 21:16:35 +03:00
|
|
|
def push(orig, *args, **kwargs):
|
|
|
|
repo = args[0]
|
|
|
|
if repo.ui.configbool("hgsql", "enabled"):
|
|
|
|
# A push locks the local repo in order to update phase data, so we need
|
|
|
|
# to take the lock for the local repo during a push.
|
|
|
|
return executewithsql(repo, orig, True, *args, **kwargs)
|
|
|
|
else:
|
|
|
|
return orig(*args, **kwargs)
|
|
|
|
|
2014-03-13 23:42:20 +04:00
|
|
|
def commit(orig, *args, **kwargs):
|
|
|
|
repo = args[1]
|
|
|
|
if repo.ui.configbool("hgsql", "enabled"):
|
|
|
|
return executewithsql(repo, orig, True, *args, **kwargs)
|
|
|
|
else:
|
|
|
|
return orig(*args, **kwargs)
|
|
|
|
|
2014-02-19 05:16:45 +04:00
|
|
|
def updatefromremote(orig, *args, **kwargs):
|
|
|
|
repo = args[1]
|
|
|
|
if repo.ui.configbool("hgsql", "enabled"):
|
2014-03-12 23:35:04 +04:00
|
|
|
return executewithsql(repo, orig, True, *args, **kwargs)
|
2014-02-19 05:16:45 +04:00
|
|
|
else:
|
|
|
|
return orig(*args, **kwargs)
|
|
|
|
|
2014-04-09 21:56:53 +04:00
|
|
|
def addchangegroup(orig, *args, **kwargs):
|
|
|
|
repo = args[0]
|
|
|
|
if repo.ui.configbool("hgsql", "enabled"):
|
|
|
|
return executewithsql(repo, orig, True, *args, **kwargs)
|
|
|
|
else:
|
|
|
|
return orig(*args, **kwargs)
|
|
|
|
|
2014-08-26 14:51:11 +04:00
|
|
|
def _localphasemove(orig, pushop, *args, **kwargs):
|
|
|
|
repo = pushop.repo
|
|
|
|
if repo.ui.configbool("hgsql", "enabled"):
|
|
|
|
return executewithsql(repo, orig, True, pushop, *args, **kwargs)
|
|
|
|
else:
|
|
|
|
return orig(pushop, *args, **kwargs)
|
|
|
|
|
2015-10-08 02:42:52 +03:00
|
|
|
class sqlcontext(object):
|
|
|
|
def __init__(self, repo, takelock=False, waitforlock=False):
|
|
|
|
self.repo = repo
|
|
|
|
self.takelock = takelock
|
|
|
|
self.waitforlock = waitforlock
|
|
|
|
self._connected = False
|
|
|
|
self._locked = False
|
|
|
|
self._used = False
|
|
|
|
self._startlocktime = 0
|
2015-10-09 23:13:18 +03:00
|
|
|
self._active = False
|
|
|
|
|
|
|
|
def active(self):
|
|
|
|
return self._active
|
2015-10-08 02:42:52 +03:00
|
|
|
|
|
|
|
def __enter__(self):
|
|
|
|
if self._used:
|
|
|
|
raise Exception("error: using sqlcontext twice")
|
|
|
|
self._used = True
|
2015-10-09 23:13:18 +03:00
|
|
|
self._active = True
|
2015-10-08 02:42:52 +03:00
|
|
|
|
|
|
|
repo = self.repo
|
|
|
|
if not repo.sqlconn:
|
|
|
|
repo.sqlconnect()
|
|
|
|
self._connected = True
|
|
|
|
|
|
|
|
if self.takelock and not writelock in repo.heldlocks:
|
|
|
|
startwait = time.time()
|
|
|
|
repo.sqllock(writelock)
|
|
|
|
self._locked = True
|
|
|
|
repo.ui.log("sqllock", "waited for sql lock for %s seconds\n",
|
|
|
|
time.time() - startwait)
|
|
|
|
self._startlocktime = time.time()
|
|
|
|
|
|
|
|
if self._connected:
|
|
|
|
repo.syncdb(waitforlock=self.waitforlock)
|
|
|
|
|
|
|
|
def __exit__(self, type, value, traceback):
|
|
|
|
try:
|
|
|
|
repo = self.repo
|
|
|
|
if self._locked:
|
|
|
|
repo.ui.log("sqllock", "held sql lock for %s seconds\n",
|
|
|
|
time.time() - self._startlocktime)
|
|
|
|
repo.sqlunlock(writelock)
|
|
|
|
|
|
|
|
if self._connected:
|
|
|
|
repo.sqlclose()
|
2015-10-09 23:13:18 +03:00
|
|
|
|
|
|
|
self._active = False
|
2015-10-08 02:42:52 +03:00
|
|
|
except (_mysql_exceptions.ProgrammingError,
|
|
|
|
_mysql_exceptions.OperationalError):
|
|
|
|
# Only raise sql exceptions if the wrapped code threw no exception
|
|
|
|
if type is None:
|
|
|
|
raise
|
|
|
|
|
2014-03-12 23:35:04 +04:00
|
|
|
def executewithsql(repo, action, sqllock=False, *args, **kwargs):
|
2014-01-24 07:22:49 +04:00
|
|
|
"""Executes the given action while having a SQL connection open.
|
2014-03-11 22:25:23 +04:00
|
|
|
If a locks are specified, those locks are held for the duration of the
|
2014-01-24 07:22:49 +04:00
|
|
|
action.
|
|
|
|
"""
|
|
|
|
# executewithsql can be executed in a nested scenario (ex: writing
|
|
|
|
# bookmarks during a pull), so track whether this call performed
|
|
|
|
# the connect.
|
Make forcesync only acquire local repo lock
Summary:
Previously, the --forcesync flag would cause the command to acquire the
sqllock, which would mean a read only command would block for any ongoing write
commands to complete, which is undesirable. Now, we will wait for the lcoal repo
lock only, instead of skipping the sync as we did before the initial patch.
This ensures that forcesync operations will see all committed changes, but won't
wait for ongoing write transactions from other masters.
Test Plan: updated test
Reviewers: sid0, davidsp, pyd, mpm, durham
Reviewed By: durham
Differential Revision: https://phabricator.fb.com/D1682970
Signature: t1:1682970:1416014352:e7f3daa09e79abd43a16c8eb74bc69272d73c83d
2014-11-14 21:53:35 +03:00
|
|
|
|
|
|
|
waitforlock = sqllock
|
2014-11-15 05:01:56 +03:00
|
|
|
if 'waitforlock' in kwargs:
|
|
|
|
if not waitforlock:
|
|
|
|
waitforlock = kwargs['waitforlock']
|
Make forcesync only acquire local repo lock
Summary:
Previously, the --forcesync flag would cause the command to acquire the
sqllock, which would mean a read only command would block for any ongoing write
commands to complete, which is undesirable. Now, we will wait for the lcoal repo
lock only, instead of skipping the sync as we did before the initial patch.
This ensures that forcesync operations will see all committed changes, but won't
wait for ongoing write transactions from other masters.
Test Plan: updated test
Reviewers: sid0, davidsp, pyd, mpm, durham
Reviewed By: durham
Differential Revision: https://phabricator.fb.com/D1682970
Signature: t1:1682970:1416014352:e7f3daa09e79abd43a16c8eb74bc69272d73c83d
2014-11-14 21:53:35 +03:00
|
|
|
del kwargs['waitforlock']
|
|
|
|
|
2015-10-08 02:42:52 +03:00
|
|
|
with sqlcontext(repo, takelock=sqllock, waitforlock=waitforlock):
|
|
|
|
return action(*args, **kwargs)
|
2014-01-03 00:19:50 +04:00
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
def wraprepo(repo):
|
|
|
|
class sqllocalrepo(repo.__class__):
|
|
|
|
def sqlconnect(self):
|
|
|
|
if self.sqlconn:
|
|
|
|
raise Exception("SQL connection already open")
|
|
|
|
if self.sqlcursor:
|
|
|
|
raise Exception("SQL cursor already open without connection")
|
2015-09-08 04:18:37 +03:00
|
|
|
retry = 3
|
|
|
|
while True:
|
|
|
|
try:
|
|
|
|
self.sqlconn = MySQLdb.connect(**self.sqlargs)
|
|
|
|
break
|
|
|
|
except (_mysql_exceptions.ProgrammingError,
|
|
|
|
_mysql_exceptions.OperationalError):
|
|
|
|
# mysql can be flakey occasionally, so do some minimal
|
|
|
|
# retrying.
|
|
|
|
retry -= 1
|
|
|
|
if retry == 0:
|
|
|
|
raise
|
|
|
|
time.sleep(0.2)
|
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
self.sqlconn.autocommit(False)
|
2014-03-08 04:29:09 +04:00
|
|
|
waittimeout = self.ui.config('hgsql', 'waittimeout', '300')
|
|
|
|
waittimeout = self.sqlconn.escape_string("%s" % (waittimeout,))
|
2015-09-08 04:49:31 +03:00
|
|
|
self.locktimeout = self.ui.config('hgsql', 'locktimeout', '60')
|
|
|
|
self.locktimeout = self.sqlconn.escape_string("%s" %
|
|
|
|
(self.locktimeout,))
|
2014-03-08 04:29:09 +04:00
|
|
|
self.sqlconn.query("SET wait_timeout=%s" % waittimeout)
|
2015-09-08 04:49:31 +03:00
|
|
|
self.sqlconn.query("SET innodb_lock_wait_timeout=%s" %
|
|
|
|
self.locktimeout)
|
2013-11-22 00:32:05 +04:00
|
|
|
self.sqlcursor = self.sqlconn.cursor()
|
|
|
|
|
|
|
|
def sqlclose(self):
|
2014-04-17 22:09:48 +04:00
|
|
|
with warnings.catch_warnings():
|
|
|
|
warnings.simplefilter("ignore")
|
|
|
|
self.sqlcursor.close()
|
|
|
|
self.sqlconn.close()
|
2013-11-22 00:32:05 +04:00
|
|
|
self.sqlcursor = None
|
|
|
|
self.sqlconn = None
|
|
|
|
|
2015-09-08 04:49:31 +03:00
|
|
|
def sqllock(self, name):
|
|
|
|
escapedname = self.sqlconn.escape_string("%s_%s" %
|
|
|
|
(name, self.sqlreponame))
|
2013-11-22 00:32:05 +04:00
|
|
|
# cast to int to prevent passing bad sql data
|
2015-09-08 04:49:31 +03:00
|
|
|
self.sqlconn.query("SELECT GET_LOCK('%s', %s)" %
|
|
|
|
(escapedname, self.locktimeout))
|
2013-11-22 00:32:05 +04:00
|
|
|
result = self.sqlconn.store_result().fetch_row()[0][0]
|
|
|
|
if result != 1:
|
2015-09-25 08:37:52 +03:00
|
|
|
raise util.Abort("timed out waiting for mysql repo lock (%s)" % escapedname)
|
2014-01-29 08:46:23 +04:00
|
|
|
self.heldlocks.add(name)
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2014-03-12 23:52:27 +04:00
|
|
|
def hassqllock(self, name):
|
|
|
|
if not name in self.heldlocks:
|
|
|
|
return False
|
|
|
|
|
|
|
|
escapedname = self.sqlconn.escape_string("%s_%s" % (name, self.sqlreponame))
|
|
|
|
self.sqlconn.query("SELECT IS_USED_LOCK('%s')" % (escapedname,))
|
|
|
|
lockheldby = self.sqlconn.store_result().fetch_row()[0][0]
|
|
|
|
if lockheldby == None:
|
|
|
|
raise Exception("unable to check %s lock" % escapedname)
|
|
|
|
|
|
|
|
self.sqlconn.query("SELECT CONNECTION_ID()")
|
|
|
|
myconnectid = self.sqlconn.store_result().fetch_row()[0][0]
|
|
|
|
if myconnectid == None:
|
|
|
|
raise Exception("unable to read connection id")
|
|
|
|
|
|
|
|
return lockheldby == myconnectid
|
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
def sqlunlock(self, name):
|
2014-01-29 08:46:23 +04:00
|
|
|
escapedname = self.sqlconn.escape_string("%s_%s" % (name, self.sqlreponame))
|
|
|
|
self.sqlconn.query("SELECT RELEASE_LOCK('%s')" % (escapedname,))
|
2013-11-22 00:32:05 +04:00
|
|
|
self.sqlconn.store_result().fetch_row()
|
2014-01-29 08:46:23 +04:00
|
|
|
self.heldlocks.discard(name)
|
2013-11-22 00:32:05 +04:00
|
|
|
|
|
|
|
def transaction(self, *args, **kwargs):
|
|
|
|
tr = super(sqllocalrepo, self).transaction(*args, **kwargs)
|
2014-01-04 00:03:52 +04:00
|
|
|
if tr.count > 1:
|
|
|
|
return tr
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2015-04-23 01:10:26 +03:00
|
|
|
validator = tr.validator
|
|
|
|
def pretxnclose(tr):
|
|
|
|
validator(tr)
|
|
|
|
self.committodb(tr)
|
|
|
|
del self.pendingrevs[:]
|
|
|
|
tr.validator = pretxnclose
|
2014-01-03 00:19:50 +04:00
|
|
|
|
|
|
|
def transactionabort(orig):
|
|
|
|
del self.pendingrevs[:]
|
|
|
|
return orig()
|
|
|
|
wrapfunction(tr, "_abort", transactionabort)
|
2015-04-23 01:10:26 +03:00
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
tr.repo = self
|
|
|
|
return tr
|
|
|
|
|
|
|
|
def needsync(self):
|
2013-12-11 07:00:55 +04:00
|
|
|
"""Returns True if the local repo is not in sync with the database.
|
|
|
|
If it returns False, the heads and bookmarks match the database.
|
|
|
|
"""
|
2014-01-02 23:06:45 +04:00
|
|
|
self.sqlcursor.execute("""SELECT namespace, name, value
|
2014-02-04 02:18:38 +04:00
|
|
|
FROM revision_references WHERE repo = %s""", (self.sqlreponame))
|
2013-11-22 00:32:05 +04:00
|
|
|
sqlheads = set()
|
|
|
|
sqlbookmarks = {}
|
2014-01-24 06:40:55 +04:00
|
|
|
tip = -1
|
|
|
|
for namespace, name, value in self.sqlcursor:
|
2014-01-02 23:06:45 +04:00
|
|
|
if namespace == "heads":
|
2014-01-24 06:40:55 +04:00
|
|
|
sqlheads.add(bin(value))
|
2014-01-02 23:06:45 +04:00
|
|
|
elif namespace == "bookmarks":
|
2014-01-24 06:40:55 +04:00
|
|
|
sqlbookmarks[name] = bin(value)
|
|
|
|
elif namespace == "tip":
|
|
|
|
tip = int(value)
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2014-01-02 23:06:45 +04:00
|
|
|
heads = set(self.heads())
|
2013-11-22 00:32:05 +04:00
|
|
|
bookmarks = self._bookmarks
|
|
|
|
|
2014-01-24 06:40:55 +04:00
|
|
|
outofsync = heads != sqlheads or bookmarks != sqlbookmarks or tip != len(self) - 1
|
|
|
|
return outofsync, sqlheads, sqlbookmarks, tip
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2014-10-27 23:56:13 +03:00
|
|
|
def syncdb(self, waitforlock=False):
|
|
|
|
ui = self.ui
|
2014-01-04 00:03:52 +04:00
|
|
|
if not self.needsync()[0]:
|
2014-10-27 23:56:13 +03:00
|
|
|
ui.debug("syncing not needed\n")
|
2013-11-22 00:32:05 +04:00
|
|
|
return
|
|
|
|
ui.debug("syncing with mysql\n")
|
|
|
|
|
2015-09-30 01:41:30 +03:00
|
|
|
# Save a copy of the old manifest cache so we can put it back
|
|
|
|
# afterwards.
|
|
|
|
oldmancache = self.manifest._mancache
|
|
|
|
|
2014-02-07 02:39:57 +04:00
|
|
|
try:
|
2014-10-27 23:56:13 +03:00
|
|
|
lock = self.lock(wait=waitforlock)
|
2014-02-07 02:39:57 +04:00
|
|
|
except error.LockHeld:
|
2014-10-27 23:56:13 +03:00
|
|
|
# Oh well. Don't block this non-critical read-only operation.
|
|
|
|
ui.debug("skipping sync for current operation\n")
|
2014-02-07 02:39:57 +04:00
|
|
|
return
|
|
|
|
|
2015-06-19 22:45:16 +03:00
|
|
|
configbackups = []
|
2013-11-22 00:32:05 +04:00
|
|
|
try:
|
2015-06-19 22:45:16 +03:00
|
|
|
# Disable all pretxnclose hooks, since these revisions are
|
|
|
|
# technically already commited.
|
|
|
|
for name, value in ui.configitems("hooks"):
|
|
|
|
if name.startswith("pretxnclose"):
|
|
|
|
configbackups.append(ui.backupconfig("hooks", name))
|
|
|
|
ui.setconfig("hooks", name, None)
|
|
|
|
# The hg-ssh wrapper installs a hook to block all writes. We need to
|
|
|
|
# circumvent this when we sync from the server.
|
|
|
|
configbackups.append(ui.backupconfig("hooks", "pretxnopen.hg-ssh"))
|
2015-06-12 00:37:03 +03:00
|
|
|
self.ui.setconfig("hooks", "pretxnopen.hg-ssh", None)
|
2014-02-07 02:39:57 +04:00
|
|
|
# Someone else may have synced us while we were waiting.
|
|
|
|
# Restart the transaction so we have access to the latest rows.
|
|
|
|
self.sqlconn.rollback()
|
2014-01-24 06:40:55 +04:00
|
|
|
outofsync, sqlheads, sqlbookmarks, fetchend = self.needsync()
|
2014-01-04 00:03:52 +04:00
|
|
|
if not outofsync:
|
2013-12-11 07:00:55 +04:00
|
|
|
return
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
transaction = self.transaction("syncdb")
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2014-02-07 02:39:57 +04:00
|
|
|
self.hook('presyncdb', throw=True)
|
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
try:
|
2013-12-11 07:00:55 +04:00
|
|
|
# Inspect the changelog now that we have the lock
|
|
|
|
fetchstart = len(self.changelog)
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
queue = Queue.Queue()
|
|
|
|
abort = threading.Event()
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
t = threading.Thread(target=self.fetchthread,
|
2014-01-24 06:40:55 +04:00
|
|
|
args=(queue, abort, fetchstart, fetchend))
|
2013-12-11 07:00:55 +04:00
|
|
|
t.setDaemon(True)
|
|
|
|
try:
|
|
|
|
t.start()
|
|
|
|
addentries(self, queue, transaction)
|
|
|
|
finally:
|
|
|
|
abort.set()
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2014-09-18 00:53:22 +04:00
|
|
|
phases.advanceboundary(self, transaction, phases.public,
|
|
|
|
self.heads())
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
transaction.close()
|
|
|
|
finally:
|
|
|
|
transaction.release()
|
|
|
|
|
|
|
|
# We circumvent the changelog and manifest when we add entries to
|
|
|
|
# the revlogs. So clear all the caches.
|
|
|
|
self.invalidate()
|
2015-09-25 07:23:45 +03:00
|
|
|
self._filecache.pop('changelog', None)
|
|
|
|
self._filecache.pop('manifest', None)
|
|
|
|
self._filecache.pop('_phasecache', None)
|
2013-12-11 07:00:55 +04:00
|
|
|
|
2015-09-30 01:41:30 +03:00
|
|
|
# Refill the cache
|
|
|
|
self.manifest._mancache = oldmancache
|
|
|
|
|
2014-01-04 00:03:52 +04:00
|
|
|
heads = set(self.heads())
|
|
|
|
heads.discard(nullid)
|
|
|
|
if heads != sqlheads:
|
|
|
|
raise CorruptionException("heads don't match after sync")
|
2013-12-11 07:00:55 +04:00
|
|
|
|
2014-01-24 06:40:55 +04:00
|
|
|
if len(self) - 1 != fetchend:
|
|
|
|
raise CorruptionException("tip doesn't match after sync")
|
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
self.disablesync = True
|
|
|
|
try:
|
|
|
|
bm = self._bookmarks
|
|
|
|
bm.clear()
|
2014-02-04 02:18:38 +04:00
|
|
|
self.sqlcursor.execute("""SELECT name, value FROM revision_references
|
2014-01-02 23:06:45 +04:00
|
|
|
WHERE namespace = 'bookmarks' AND repo = %s""",
|
|
|
|
(self.sqlreponame))
|
|
|
|
for name, node in self.sqlcursor:
|
2013-12-11 07:00:55 +04:00
|
|
|
node = bin(node)
|
|
|
|
if node in self:
|
|
|
|
bm[name] = node
|
|
|
|
bm.write()
|
|
|
|
finally:
|
|
|
|
self.disablesync = False
|
2014-01-04 00:03:52 +04:00
|
|
|
|
|
|
|
if bm != sqlbookmarks:
|
|
|
|
raise CorruptionException("bookmarks don't match after sync")
|
2013-11-22 00:32:05 +04:00
|
|
|
finally:
|
2015-06-19 22:45:16 +03:00
|
|
|
for backup in configbackups:
|
|
|
|
ui.restoreconfig(backup)
|
2013-12-11 07:00:55 +04:00
|
|
|
lock.release()
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2014-01-24 06:40:55 +04:00
|
|
|
def fetchthread(self, queue, abort, fetchstart, fetchend):
|
2014-01-24 07:22:49 +04:00
|
|
|
"""Fetches every revision from fetchstart to fetchend (inclusive)
|
|
|
|
and places them on the queue. This function is meant to run on a
|
|
|
|
background thread and listens to the abort event to abort early.
|
|
|
|
"""
|
2013-11-22 00:32:05 +04:00
|
|
|
ui = self.ui
|
|
|
|
clrev = fetchstart
|
|
|
|
chunksize = 1000
|
2015-08-12 21:10:25 +03:00
|
|
|
try:
|
|
|
|
while True:
|
|
|
|
if abort.isSet():
|
|
|
|
break
|
|
|
|
|
|
|
|
maxrev = min(clrev + chunksize, fetchend + 1)
|
|
|
|
self.sqlcursor.execute("""SELECT path, chunk, chunkcount,
|
|
|
|
linkrev, entry, data0, data1 FROM revisions WHERE repo = %s
|
|
|
|
AND linkrev > %s AND linkrev < %s ORDER BY linkrev ASC""",
|
|
|
|
(self.sqlreponame, clrev - 1, maxrev))
|
|
|
|
|
|
|
|
# Put split chunks back together into a single revision
|
|
|
|
groupedrevdata = {}
|
|
|
|
for revdata in self.sqlcursor:
|
|
|
|
name = revdata[0]
|
|
|
|
chunk = revdata[1]
|
|
|
|
linkrev = revdata[3]
|
|
|
|
groupedrevdata.setdefault((name, linkrev), {})[chunk] = revdata
|
|
|
|
|
|
|
|
if not groupedrevdata:
|
|
|
|
break
|
|
|
|
|
|
|
|
fullrevisions = []
|
|
|
|
for chunks in groupedrevdata.itervalues():
|
|
|
|
chunkcount = chunks[0][2]
|
|
|
|
if chunkcount == 1:
|
|
|
|
fullrevisions.append(chunks[0])
|
|
|
|
elif chunkcount == len(chunks):
|
|
|
|
fullchunk = list(chunks[0])
|
|
|
|
data1 = ""
|
|
|
|
for i in range(0, chunkcount):
|
|
|
|
data1 += chunks[i][6]
|
|
|
|
fullchunk[6] = data1
|
|
|
|
fullrevisions.append(tuple(fullchunk))
|
|
|
|
else:
|
|
|
|
raise Exception("missing revision chunk - expected %s got %s" %
|
|
|
|
(chunkcount, len(chunks)))
|
|
|
|
|
|
|
|
fullrevisions = sorted(fullrevisions, key=lambda revdata: revdata[3])
|
|
|
|
for revdata in fullrevisions:
|
|
|
|
queue.put(revdata)
|
|
|
|
|
|
|
|
clrev += chunksize
|
|
|
|
except Exception as ex:
|
|
|
|
queue.put(ex)
|
|
|
|
return
|
2013-10-31 21:37:49 +04:00
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
queue.put(False)
|
2013-10-31 21:37:49 +04:00
|
|
|
|
2014-02-19 05:16:45 +04:00
|
|
|
def pushkey(self, namespace, key, old, new):
|
|
|
|
def _pushkey():
|
|
|
|
return super(sqllocalrepo, self).pushkey(namespace, key, old, new)
|
|
|
|
|
2014-03-12 23:35:04 +04:00
|
|
|
return executewithsql(self, _pushkey, namespace == 'bookmarks')
|
2014-02-19 05:16:45 +04:00
|
|
|
|
2014-12-02 04:30:20 +03:00
|
|
|
def committodb(self, tr):
|
2014-01-24 07:22:49 +04:00
|
|
|
"""Commits all pending revisions to the database
|
|
|
|
"""
|
2014-01-03 00:19:50 +04:00
|
|
|
if self.sqlconn == None:
|
2014-01-29 08:46:23 +04:00
|
|
|
raise util.Abort("invalid repo change - only hg push and pull" +
|
2014-01-22 04:09:08 +04:00
|
|
|
" are allowed")
|
2014-01-03 00:19:50 +04:00
|
|
|
|
2014-12-02 04:30:20 +03:00
|
|
|
if not self.pendingrevs and not 'bookmark_moved' in tr.hookargs:
|
2014-08-26 14:51:11 +04:00
|
|
|
return
|
|
|
|
|
2014-01-04 00:03:52 +04:00
|
|
|
reponame = self.sqlreponame
|
|
|
|
cursor = self.sqlcursor
|
2014-01-24 06:40:55 +04:00
|
|
|
maxcommitsize = self.maxcommitsize
|
2015-08-29 06:10:02 +03:00
|
|
|
maxinsertsize = self.maxinsertsize
|
2014-01-24 07:22:49 +04:00
|
|
|
maxrowsize = self.maxrowsize
|
2014-01-04 00:03:52 +04:00
|
|
|
|
2014-12-02 04:30:20 +03:00
|
|
|
if self.pendingrevs:
|
|
|
|
self._validatependingrevs()
|
2014-01-04 00:03:52 +04:00
|
|
|
|
2015-08-29 06:10:02 +03:00
|
|
|
def insert(args, values):
|
|
|
|
argstring = ','.join(args)
|
|
|
|
cursor.execute("INSERT INTO revisions(repo, path, "
|
|
|
|
"chunk, chunkcount, linkrev, rev, node, entry, "
|
|
|
|
"data0, data1, createdtime) VALUES %s" %
|
|
|
|
argstring, values)
|
|
|
|
|
2014-01-03 00:19:50 +04:00
|
|
|
try:
|
2015-08-29 06:10:02 +03:00
|
|
|
commitsize = 0
|
|
|
|
insertsize = 0
|
|
|
|
args = []
|
|
|
|
values = []
|
|
|
|
now = time.strftime('%Y-%m-%d %H:%M:%S')
|
2014-01-03 00:19:50 +04:00
|
|
|
for revision in self.pendingrevs:
|
2014-01-22 04:05:27 +04:00
|
|
|
path, linkrev, rev, node, entry, data0, data1 = revision
|
2014-01-03 00:19:50 +04:00
|
|
|
|
|
|
|
start = 0
|
|
|
|
chunk = 0
|
|
|
|
datalen = len(data1)
|
2014-01-24 07:22:49 +04:00
|
|
|
chunkcount = datalen / maxrowsize
|
|
|
|
if datalen % maxrowsize != 0 or datalen == 0:
|
2014-01-03 00:19:50 +04:00
|
|
|
chunkcount += 1
|
2014-01-24 07:22:49 +04:00
|
|
|
|
|
|
|
# We keep row size down by breaking large revisions down into
|
|
|
|
# smaller chunks.
|
2014-01-03 00:19:50 +04:00
|
|
|
while chunk == 0 or start < len(data1):
|
2014-01-24 07:22:49 +04:00
|
|
|
end = min(len(data1), start + maxrowsize)
|
2014-01-03 00:19:50 +04:00
|
|
|
datachunk = data1[start:end]
|
2015-08-29 06:10:02 +03:00
|
|
|
args.append('(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)')
|
|
|
|
values.extend((reponame, path, chunk, chunkcount, linkrev, rev,
|
|
|
|
node, entry, data0, datachunk, now))
|
|
|
|
|
|
|
|
size = len(datachunk)
|
|
|
|
commitsize += size
|
|
|
|
insertsize += size
|
|
|
|
|
2014-01-03 00:19:50 +04:00
|
|
|
chunk += 1
|
|
|
|
start = end
|
|
|
|
|
2015-08-29 06:10:02 +03:00
|
|
|
# Minimize roundtrips by doing bulk inserts
|
|
|
|
if insertsize > maxinsertsize:
|
|
|
|
insert(args, values)
|
|
|
|
del args[:]
|
|
|
|
del values[:]
|
|
|
|
insertsize = 0
|
|
|
|
|
2014-01-24 06:40:55 +04:00
|
|
|
# MySQL transactions can only reach a certain size, so we commit
|
|
|
|
# every so often. As long as we don't update the tip pushkey,
|
|
|
|
# this is ok.
|
2015-08-29 06:10:02 +03:00
|
|
|
if commitsize > maxcommitsize:
|
2014-01-24 06:40:55 +04:00
|
|
|
self.sqlconn.commit()
|
2015-08-29 06:10:02 +03:00
|
|
|
commitsize = 0
|
|
|
|
|
2015-09-14 21:06:25 +03:00
|
|
|
if args:
|
2015-08-29 06:10:02 +03:00
|
|
|
insert(args, values)
|
2014-01-24 06:40:55 +04:00
|
|
|
|
2015-09-14 21:19:13 +03:00
|
|
|
# commit at the end just to make sure we're clean
|
|
|
|
self.sqlconn.commit()
|
2015-06-09 03:35:09 +03:00
|
|
|
|
2015-10-13 03:41:58 +03:00
|
|
|
# Compute new heads, and delete old heads
|
|
|
|
newheads = set(self.heads())
|
|
|
|
oldheads = []
|
2015-07-11 04:39:01 +03:00
|
|
|
cursor.execute(
|
2015-10-13 03:41:58 +03:00
|
|
|
"SELECT value FROM revision_references "
|
|
|
|
"WHERE repo = %s AND namespace='heads'",
|
|
|
|
(reponame,)
|
2015-07-11 04:39:01 +03:00
|
|
|
)
|
2015-10-13 03:41:58 +03:00
|
|
|
for head in cursor:
|
|
|
|
head = head[0]
|
|
|
|
if head in newheads:
|
|
|
|
newheads.discard(head)
|
|
|
|
else:
|
|
|
|
oldheads.append(head)
|
|
|
|
|
|
|
|
if oldheads:
|
|
|
|
headargs = ','.join(['%s'] * len(oldheads))
|
|
|
|
cursor.execute(
|
|
|
|
"DELETE revision_references FROM revision_references " +
|
|
|
|
"FORCE INDEX (bookmarkindex) " +
|
|
|
|
"WHERE namespace = 'heads' " +
|
|
|
|
"AND repo = %s AND value IN (" + headargs + ")",
|
|
|
|
(reponame,) + tuple(oldheads)
|
|
|
|
)
|
|
|
|
|
|
|
|
# Compute new bookmarks, and delete old bookmarks
|
|
|
|
newbookmarks = self._bookmarks.copy()
|
|
|
|
oldbookmarks = []
|
2015-07-11 04:39:01 +03:00
|
|
|
cursor.execute(
|
2015-10-13 03:41:58 +03:00
|
|
|
"SELECT name, value FROM revision_references "
|
|
|
|
"WHERE namespace = 'bookmarks' AND repo = %s",
|
|
|
|
(reponame,)
|
2015-07-11 04:39:01 +03:00
|
|
|
)
|
2015-10-13 03:41:58 +03:00
|
|
|
for k, v in cursor:
|
|
|
|
if newbookmarks.get(k) == v:
|
|
|
|
del newbookmarks[k]
|
|
|
|
else:
|
|
|
|
oldbookmarks.append(k)
|
|
|
|
|
|
|
|
if oldbookmarks:
|
|
|
|
bookargs = ','.join(['%s'] * len(oldbookmarks))
|
|
|
|
cursor.execute(
|
|
|
|
"DELETE revision_references FROM revision_references " +
|
|
|
|
"FORCE INDEX (bookmarkindex) " +
|
|
|
|
"WHERE namespace = 'bookmarks' AND repo = %s " +
|
|
|
|
"AND name IN (" + bookargs + ")",
|
|
|
|
(repo.sqlreponame,) + tuple(oldbookmarks)
|
|
|
|
)
|
2015-07-11 04:39:01 +03:00
|
|
|
|
2015-06-09 03:24:14 +03:00
|
|
|
tmpl = []
|
|
|
|
values = []
|
2015-10-13 03:41:58 +03:00
|
|
|
for head in newheads:
|
2015-06-09 03:24:14 +03:00
|
|
|
tmpl.append("(%s, 'heads', NULL, %s)")
|
|
|
|
values.append(reponame)
|
|
|
|
values.append(hex(head))
|
2014-12-02 04:30:20 +03:00
|
|
|
|
2015-10-13 03:41:58 +03:00
|
|
|
for k, v in newbookmarks.iteritems():
|
2015-06-09 03:24:14 +03:00
|
|
|
tmpl.append("(%s, 'bookmarks', %s, %s)")
|
|
|
|
values.append(repo.sqlreponame)
|
|
|
|
values.append(k)
|
|
|
|
values.append(hex(v))
|
|
|
|
|
2015-10-13 03:41:58 +03:00
|
|
|
if tmpl:
|
|
|
|
cursor.execute("INSERT INTO revision_references(repo, namespace, name, value) " +
|
|
|
|
"VALUES %s" % ','.join(tmpl), tuple(values))
|
2014-12-02 04:30:20 +03:00
|
|
|
|
2014-03-20 00:06:20 +04:00
|
|
|
# revision_references has multiple keys (primary key, and a unique index), so
|
|
|
|
# mysql gives a warning when using ON DUPLICATE KEY since it would only update one
|
|
|
|
# row despite multiple key duplicates. This doesn't matter for us, since we know
|
|
|
|
# there is only one row that will share the same key. So suppress the warning.
|
2014-04-17 22:09:48 +04:00
|
|
|
cursor.execute("""INSERT INTO revision_references(repo, namespace, name, value)
|
|
|
|
VALUES(%s, 'tip', 'tip', %s) ON DUPLICATE KEY UPDATE value=%s""",
|
|
|
|
(reponame, len(self) - 1, len(self) - 1))
|
2014-03-12 23:52:27 +04:00
|
|
|
|
|
|
|
# Just to be super sure, check the write lock before doing the final commit
|
|
|
|
if not self.hassqllock(writelock):
|
|
|
|
raise Exception("attempting to write to sql without holding %s (precommit)"
|
|
|
|
% writelock)
|
2014-01-24 06:40:55 +04:00
|
|
|
|
2014-01-03 00:19:50 +04:00
|
|
|
self.sqlconn.commit()
|
|
|
|
except:
|
|
|
|
self.sqlconn.rollback()
|
|
|
|
raise
|
|
|
|
finally:
|
|
|
|
del self.pendingrevs[:]
|
|
|
|
|
2014-01-22 04:09:08 +04:00
|
|
|
def _validatependingrevs(self):
|
2014-01-24 07:22:49 +04:00
|
|
|
"""Validates that the current pending revisions will be valid when
|
2014-01-22 04:09:08 +04:00
|
|
|
written to the database.
|
|
|
|
"""
|
|
|
|
reponame = self.sqlreponame
|
|
|
|
cursor = self.sqlcursor
|
|
|
|
|
2014-03-12 23:52:27 +04:00
|
|
|
# Ensure we hold the write lock
|
|
|
|
if not self.hassqllock(writelock):
|
|
|
|
raise Exception("attempting to write to sql without holding %s (prevalidate)"
|
|
|
|
% writelock)
|
|
|
|
|
2014-01-22 04:09:08 +04:00
|
|
|
# Validate that we are appending to the correct linkrev
|
2014-02-04 02:18:38 +04:00
|
|
|
cursor.execute("""SELECT value FROM revision_references WHERE repo = %s AND
|
2014-01-24 06:40:55 +04:00
|
|
|
namespace = 'tip'""", reponame)
|
|
|
|
tipresults = cursor.fetchall()
|
|
|
|
if len(tipresults) == 0:
|
|
|
|
maxlinkrev = -1
|
|
|
|
elif len(tipresults) == 1:
|
|
|
|
maxlinkrev = int(tipresults[0][0])
|
|
|
|
else:
|
|
|
|
raise CorruptionException(("multiple tips for %s in " +
|
|
|
|
" the database") % reponame)
|
|
|
|
|
|
|
|
minlinkrev = min(self.pendingrevs, key= lambda x: x[1])[1]
|
|
|
|
if maxlinkrev == None or maxlinkrev != minlinkrev - 1:
|
2014-01-22 04:09:08 +04:00
|
|
|
raise CorruptionException("attempting to write non-sequential " +
|
2014-01-24 06:40:55 +04:00
|
|
|
"linkrev %s, expected %s" % (minlinkrev, maxlinkrev + 1))
|
|
|
|
|
|
|
|
# Clean up excess revisions left from interrupted commits.
|
|
|
|
# Since MySQL can only hold so much data in a transaction, we allow
|
|
|
|
# committing across multiple db transactions. That means if
|
|
|
|
# the commit is interrupted, the next transaction needs to clean
|
2014-01-24 07:22:49 +04:00
|
|
|
# up bad revisions.
|
2014-01-24 06:40:55 +04:00
|
|
|
cursor.execute("""DELETE FROM revisions WHERE repo = %s AND
|
|
|
|
linkrev > %s""", (reponame, maxlinkrev))
|
2014-01-22 04:09:08 +04:00
|
|
|
|
|
|
|
# Validate that all rev dependencies (base, p1, p2) have the same
|
|
|
|
# node in the database
|
|
|
|
pending = set([(path, rev) for path, _, rev, _, _, _, _ in self.pendingrevs])
|
|
|
|
expectedrevs = set()
|
|
|
|
for revision in self.pendingrevs:
|
|
|
|
path, linkrev, rev, node, entry, data0, data1 = revision
|
|
|
|
e = struct.unpack(revlog.indexformatng, entry)
|
|
|
|
_, _, _, base, _, p1r, p2r, _ = e
|
|
|
|
|
|
|
|
if p1r != nullrev and not (path, p1r) in pending:
|
|
|
|
expectedrevs.add((path, p1r))
|
|
|
|
if p2r != nullrev and not (path, p2r) in pending:
|
|
|
|
expectedrevs.add((path, p2r))
|
|
|
|
if (base != nullrev and base != rev and
|
|
|
|
not (path, base) in pending):
|
|
|
|
expectedrevs.add((path, base))
|
|
|
|
|
|
|
|
if not expectedrevs:
|
|
|
|
return
|
|
|
|
|
2015-08-29 04:14:19 +03:00
|
|
|
missingrevs = []
|
|
|
|
expectedlist = list(expectedrevs)
|
|
|
|
expectedcount = len(expectedrevs)
|
|
|
|
batchsize = self.ui.configint('hgsql', 'verifybatchsize', 1000)
|
|
|
|
i = 0
|
|
|
|
while i < expectedcount:
|
|
|
|
checkrevs = set(expectedlist[i:i + batchsize])
|
|
|
|
i += batchsize
|
|
|
|
|
|
|
|
whereclauses = []
|
|
|
|
args = []
|
|
|
|
args.append(reponame)
|
|
|
|
for path, rev in checkrevs:
|
|
|
|
whereclauses.append("(path, rev, chunk) = (%s, %s, 0)")
|
|
|
|
args.append(path)
|
|
|
|
args.append(rev)
|
|
|
|
|
|
|
|
whereclause = ' OR '.join(whereclauses)
|
|
|
|
cursor.execute("""SELECT path, rev, node FROM revisions WHERE
|
|
|
|
repo = %s AND (""" + whereclause + ")",
|
|
|
|
args)
|
|
|
|
|
|
|
|
for path, rev, node in cursor:
|
|
|
|
rev = int(rev)
|
|
|
|
checkrevs.remove((path, rev))
|
|
|
|
rl = None
|
|
|
|
if path == '00changelog.i':
|
|
|
|
rl = self.changelog
|
|
|
|
elif path == '00manifest.i':
|
|
|
|
rl = self.manifest
|
|
|
|
else:
|
|
|
|
rl = revlog.revlog(self.svfs, path)
|
|
|
|
localnode = hex(rl.node(rev))
|
|
|
|
if localnode != node:
|
|
|
|
raise CorruptionException(("expected node %s at rev %d of "
|
|
|
|
"%s but found %s") % (node, rev, path, localnode))
|
|
|
|
|
|
|
|
if len(checkrevs) > 0:
|
|
|
|
missingrevs.extend(checkrevs)
|
|
|
|
|
|
|
|
if missingrevs:
|
2014-01-22 04:09:08 +04:00
|
|
|
raise CorruptionException(("unable to verify %d dependent " +
|
2015-08-29 04:14:19 +03:00
|
|
|
"revisions before adding a commit") % (len(missingrevs)))
|
2014-01-22 04:09:08 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
ui = repo.ui
|
2014-01-24 07:22:49 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
sqlargs = {}
|
|
|
|
sqlargs['host'] = ui.config("hgsql", "host")
|
|
|
|
sqlargs['db'] = ui.config("hgsql", "database")
|
|
|
|
sqlargs['user'] = ui.config("hgsql", "user")
|
2014-01-02 23:06:45 +04:00
|
|
|
sqlargs['port'] = ui.configint("hgsql", "port")
|
2013-12-11 07:00:55 +04:00
|
|
|
password = ui.config("hgsql", "password", "")
|
|
|
|
if password:
|
|
|
|
sqlargs['passwd'] = password
|
|
|
|
sqlargs['cursorclass'] = cursors.SSCursor
|
|
|
|
|
2014-01-24 07:22:49 +04:00
|
|
|
repo.sqlargs = sqlargs
|
|
|
|
|
2014-01-02 23:06:45 +04:00
|
|
|
repo.sqlreponame = ui.config("hgsql", "reponame")
|
|
|
|
if not repo.sqlreponame:
|
|
|
|
raise Exception("missing hgsql.reponame")
|
2015-08-29 06:10:02 +03:00
|
|
|
repo.maxcommitsize = ui.configbytes("hgsql", "maxcommitsize", 52428800) # 50MB
|
|
|
|
repo.maxinsertsize = ui.configbytes("hgsql", "maxinsertsize", 1048576) # 1MB
|
2014-01-24 07:22:49 +04:00
|
|
|
repo.maxrowsize = ui.configbytes("hgsql", "maxrowsize", 1048576)
|
2013-11-22 00:32:05 +04:00
|
|
|
repo.sqlconn = None
|
|
|
|
repo.sqlcursor = None
|
|
|
|
repo.disablesync = False
|
|
|
|
repo.pendingrevs = []
|
2014-01-29 08:46:23 +04:00
|
|
|
repo.heldlocks = set()
|
2014-01-24 06:40:55 +04:00
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
repo.__class__ = sqllocalrepo
|
2013-10-31 21:37:49 +04:00
|
|
|
|
|
|
|
class bufferedopener(object):
|
2014-01-24 07:22:49 +04:00
|
|
|
"""Opener implementation that buffers all writes in memory until
|
|
|
|
flush or close is called.
|
|
|
|
"""
|
2013-10-31 21:37:49 +04:00
|
|
|
def __init__(self, opener, path, mode):
|
|
|
|
self.opener = opener
|
|
|
|
self.path = path
|
|
|
|
self.mode = mode
|
|
|
|
self.buffer = []
|
|
|
|
self.closed = False
|
|
|
|
|
|
|
|
def write(self, value):
|
2013-12-11 07:00:55 +04:00
|
|
|
if self.closed:
|
2014-01-24 07:22:49 +04:00
|
|
|
raise Exception("attempted to write to a closed bufferedopener")
|
2013-10-31 21:37:49 +04:00
|
|
|
self.buffer.append(value)
|
|
|
|
|
|
|
|
def flush(self):
|
|
|
|
buffer = self.buffer
|
|
|
|
self.buffer = []
|
2014-10-27 23:56:13 +03:00
|
|
|
|
2013-10-31 21:37:49 +04:00
|
|
|
if buffer:
|
|
|
|
fp = self.opener(self.path, self.mode)
|
|
|
|
fp.write(''.join(buffer))
|
|
|
|
fp.close()
|
|
|
|
|
|
|
|
def close(self):
|
|
|
|
self.flush()
|
|
|
|
self.closed = True
|
|
|
|
|
|
|
|
def addentries(repo, queue, transaction):
|
2014-01-24 07:22:49 +04:00
|
|
|
"""Reads new rev entries from a queue and writes them to a buffered
|
|
|
|
revlog. At the end it flushes all the new data to disk.
|
|
|
|
"""
|
2015-07-09 07:23:54 +03:00
|
|
|
opener = repo.svfs
|
2013-10-18 03:46:12 +04:00
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
revlogs = {}
|
|
|
|
def writeentry(revdata):
|
2014-01-24 07:22:49 +04:00
|
|
|
# Instantiates pseudo-revlogs for us to write data directly to
|
2014-01-02 23:06:45 +04:00
|
|
|
path, chunk, chunkcount, link, entry, data0, data1 = revdata
|
2013-11-22 00:32:05 +04:00
|
|
|
revlog = revlogs.get(path)
|
|
|
|
if not revlog:
|
|
|
|
revlog = EntryRevlog(opener, path)
|
|
|
|
revlogs[path] = revlog
|
|
|
|
|
2014-01-24 07:22:49 +04:00
|
|
|
# Replace the existing openers with buffered ones so we can
|
|
|
|
# perform the flush to disk all at once at the end.
|
2013-11-22 00:32:05 +04:00
|
|
|
if not hasattr(revlog, 'ifh') or revlog.ifh.closed:
|
|
|
|
dfh = None
|
|
|
|
if not revlog._inline:
|
|
|
|
dfh = bufferedopener(opener, revlog.datafile, "a")
|
|
|
|
ifh = bufferedopener(opener, revlog.indexfile, "a+")
|
|
|
|
revlog.ifh = ifh
|
|
|
|
revlog.dfh = dfh
|
|
|
|
|
|
|
|
revlog.addentry(transaction, revlog.ifh, revlog.dfh, entry,
|
|
|
|
data0, data1)
|
|
|
|
|
|
|
|
leftover = None
|
|
|
|
exit = False
|
2013-12-11 07:00:55 +04:00
|
|
|
|
2014-10-27 23:56:13 +03:00
|
|
|
# Read one linkrev at a time from the queue
|
2013-11-22 00:32:05 +04:00
|
|
|
while not exit:
|
|
|
|
currentlinkrev = -1
|
|
|
|
|
|
|
|
revisions = []
|
|
|
|
if leftover:
|
|
|
|
revisions.append(leftover)
|
|
|
|
leftover = None
|
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
# Read everything from the current linkrev
|
2013-10-31 21:37:49 +04:00
|
|
|
while True:
|
2013-11-22 00:32:05 +04:00
|
|
|
revdata = queue.get()
|
|
|
|
if not revdata:
|
|
|
|
exit = True
|
2013-10-31 21:37:49 +04:00
|
|
|
break
|
|
|
|
|
2015-08-12 21:10:25 +03:00
|
|
|
# The background thread had an exception, rethrow from the
|
|
|
|
# foreground thread.
|
|
|
|
if isinstance(revdata, Exception):
|
|
|
|
raise revdata
|
|
|
|
|
2014-01-02 23:06:45 +04:00
|
|
|
linkrev = revdata[3]
|
2013-11-22 00:32:05 +04:00
|
|
|
if currentlinkrev == -1:
|
|
|
|
currentlinkrev = linkrev
|
|
|
|
if linkrev == currentlinkrev:
|
|
|
|
revisions.append(revdata)
|
|
|
|
elif linkrev < currentlinkrev:
|
2014-01-24 07:22:49 +04:00
|
|
|
raise CorruptionException("SQL data is not in linkrev order")
|
2013-11-22 00:32:05 +04:00
|
|
|
else:
|
|
|
|
leftover = revdata
|
|
|
|
currentlinkrev = linkrev
|
|
|
|
break
|
2013-10-29 23:55:14 +04:00
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
if not revisions:
|
|
|
|
continue
|
2013-10-31 21:37:49 +04:00
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
for revdata in revisions:
|
|
|
|
writeentry(revdata)
|
2013-10-29 23:55:14 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
# Flush filelogs, then manifest, then changelog
|
|
|
|
changelog = revlogs.pop("00changelog.i", None)
|
|
|
|
manifest = revlogs.pop("00manifest.i", None)
|
2013-10-31 21:37:49 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
def flushrevlog(revlog):
|
2013-11-22 00:32:05 +04:00
|
|
|
if not revlog.ifh.closed:
|
|
|
|
revlog.ifh.flush()
|
|
|
|
if revlog.dfh and not revlog.dfh.closed:
|
|
|
|
revlog.dfh.flush()
|
2013-12-11 07:00:55 +04:00
|
|
|
|
|
|
|
for filelog in revlogs.itervalues():
|
|
|
|
flushrevlog(filelog)
|
|
|
|
|
|
|
|
if manifest:
|
|
|
|
flushrevlog(manifest)
|
|
|
|
if changelog:
|
|
|
|
flushrevlog(changelog)
|
2013-10-18 03:46:12 +04:00
|
|
|
|
|
|
|
class EntryRevlog(revlog.revlog):
|
2014-01-24 07:22:49 +04:00
|
|
|
"""Pseudo-revlog implementation that allows applying data directly to
|
|
|
|
the end of the revlog.
|
|
|
|
"""
|
2013-10-18 03:46:12 +04:00
|
|
|
def addentry(self, transaction, ifh, dfh, entry, data0, data1):
|
2013-10-18 04:15:41 +04:00
|
|
|
curr = len(self)
|
2013-12-11 07:00:55 +04:00
|
|
|
offset = self.end(curr - 1)
|
2013-10-18 04:15:41 +04:00
|
|
|
|
2013-10-18 03:46:12 +04:00
|
|
|
e = struct.unpack(revlog.indexformatng, entry)
|
2013-10-29 05:57:18 +04:00
|
|
|
offsettype, datalen, textlen, base, link, p1r, p2r, node = e
|
2014-01-24 07:22:49 +04:00
|
|
|
|
|
|
|
# The first rev has special metadata encoded in it that should be
|
|
|
|
# stripped before being added to the index.
|
2013-10-18 04:15:41 +04:00
|
|
|
if curr == 0:
|
|
|
|
elist = list(e)
|
2013-10-29 05:57:18 +04:00
|
|
|
type = revlog.gettype(offsettype)
|
|
|
|
offsettype = revlog.offset_type(0, type)
|
|
|
|
elist[0] = offsettype
|
2013-10-18 04:15:41 +04:00
|
|
|
e = tuple(elist)
|
|
|
|
|
2014-01-24 07:22:49 +04:00
|
|
|
# Verify that the rev's parents and base appear earlier in the revlog
|
2013-10-29 05:57:18 +04:00
|
|
|
if p1r >= curr or p2r >= curr:
|
2014-01-24 07:22:49 +04:00
|
|
|
raise CorruptionException("parent revision is not in revlog: %s" %
|
|
|
|
self.indexfile)
|
2013-10-29 05:57:18 +04:00
|
|
|
if base > curr:
|
2014-01-24 07:22:49 +04:00
|
|
|
raise CorruptionException("base revision is not in revlog: %s" %
|
|
|
|
self.indexfile)
|
2013-10-29 05:57:18 +04:00
|
|
|
|
|
|
|
expectedoffset = revlog.getoffset(offsettype)
|
2013-12-11 07:00:55 +04:00
|
|
|
if expectedoffset != 0 and expectedoffset != offset:
|
2013-10-29 05:57:18 +04:00
|
|
|
raise CorruptionException("revision offset doesn't match prior length " +
|
|
|
|
"(%s offset vs %s length): %s" %
|
2013-12-11 07:00:55 +04:00
|
|
|
(expectedoffset, offset, self.indexfile))
|
2013-10-29 05:57:18 +04:00
|
|
|
|
|
|
|
if node not in self.nodemap:
|
2013-10-18 03:46:12 +04:00
|
|
|
self.index.insert(-1, e)
|
2013-10-29 05:57:18 +04:00
|
|
|
self.nodemap[node] = len(self) - 1
|
2013-10-18 03:46:12 +04:00
|
|
|
|
|
|
|
if not self._inline:
|
2013-10-18 03:47:34 +04:00
|
|
|
transaction.add(self.datafile, offset)
|
|
|
|
transaction.add(self.indexfile, curr * len(entry))
|
2013-10-18 03:46:12 +04:00
|
|
|
if data0:
|
|
|
|
dfh.write(data0)
|
|
|
|
dfh.write(data1)
|
|
|
|
ifh.write(entry)
|
|
|
|
else:
|
2013-10-18 03:47:34 +04:00
|
|
|
offset += curr * self._io.size
|
|
|
|
transaction.add(self.indexfile, offset, curr)
|
2013-10-18 03:46:12 +04:00
|
|
|
ifh.write(entry)
|
|
|
|
ifh.write(data0)
|
|
|
|
ifh.write(data1)
|
2013-10-18 04:15:41 +04:00
|
|
|
self.checkinlinesize(transaction, ifh)
|
2013-10-18 03:46:12 +04:00
|
|
|
|
2015-07-23 01:50:55 +03:00
|
|
|
def addgroup(orig, self, bundle, linkmapper, transaction, addrevisioncb=None):
|
2014-01-24 07:22:49 +04:00
|
|
|
"""Copy paste of revlog.addgroup, but we ensure that the revisions are
|
|
|
|
added in linkrev order.
|
2013-10-30 02:55:43 +04:00
|
|
|
"""
|
2014-02-19 03:59:46 +04:00
|
|
|
if not util.safehasattr(transaction, "repo"):
|
2015-07-23 01:50:55 +03:00
|
|
|
return orig(self, bundle, linkmapper, transaction,
|
|
|
|
addrevisioncb=addrevisioncb)
|
2014-02-19 03:59:46 +04:00
|
|
|
|
2013-10-30 02:55:43 +04:00
|
|
|
# track the base of the current delta log
|
|
|
|
content = []
|
|
|
|
node = None
|
|
|
|
|
|
|
|
r = len(self)
|
|
|
|
end = 0
|
|
|
|
if r:
|
|
|
|
end = self.end(r - 1)
|
|
|
|
ifh = self.opener(self.indexfile, "a+")
|
|
|
|
isize = r * self._io.size
|
|
|
|
if self._inline:
|
|
|
|
transaction.add(self.indexfile, end + isize, r)
|
|
|
|
dfh = None
|
|
|
|
else:
|
|
|
|
transaction.add(self.indexfile, isize, r)
|
|
|
|
transaction.add(self.datafile, end)
|
|
|
|
dfh = self.opener(self.datafile, "a")
|
|
|
|
|
|
|
|
try:
|
|
|
|
# loop through our set of deltas
|
|
|
|
chunkdatas = []
|
|
|
|
chunkmap = {}
|
|
|
|
|
|
|
|
lastlinkrev = -1
|
|
|
|
reorder = False
|
|
|
|
|
2014-01-28 06:50:24 +04:00
|
|
|
# Read all of the data from the stream
|
2013-10-30 02:55:43 +04:00
|
|
|
chain = None
|
|
|
|
while True:
|
|
|
|
chunkdata = bundle.deltachunk(chain)
|
|
|
|
if not chunkdata:
|
|
|
|
break
|
|
|
|
|
|
|
|
node = chunkdata['node']
|
|
|
|
cs = chunkdata['cs']
|
|
|
|
link = linkmapper(cs)
|
|
|
|
if link < lastlinkrev:
|
|
|
|
reorder = True
|
|
|
|
lastlinkrev = link
|
|
|
|
chunkdatas.append((link, chunkdata))
|
|
|
|
chunkmap[node] = chunkdata
|
|
|
|
chain = node
|
|
|
|
|
2014-01-28 06:50:24 +04:00
|
|
|
# If we noticed a incoming rev was not in linkrev order
|
|
|
|
# we reorder all the revs appropriately.
|
2013-10-30 02:55:43 +04:00
|
|
|
if reorder:
|
|
|
|
chunkdatas = sorted(chunkdatas)
|
|
|
|
|
|
|
|
fulltexts = {}
|
|
|
|
def getfulltext(node):
|
|
|
|
if node in fulltexts:
|
|
|
|
return fulltexts[node]
|
|
|
|
if node in self.nodemap:
|
|
|
|
return self.revision(node)
|
|
|
|
|
|
|
|
chunkdata = chunkmap[node]
|
|
|
|
deltabase = chunkdata['deltabase']
|
|
|
|
delta = chunkdata['delta']
|
|
|
|
|
|
|
|
deltachain = []
|
|
|
|
currentbase = deltabase
|
|
|
|
while True:
|
|
|
|
if currentbase in fulltexts:
|
|
|
|
deltachain.append(fulltexts[currentbase])
|
|
|
|
break
|
|
|
|
elif currentbase in self.nodemap:
|
|
|
|
deltachain.append(self.revision(currentbase))
|
|
|
|
break
|
|
|
|
elif currentbase == nullid:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
deltachunk = chunkmap[currentbase]
|
|
|
|
currentbase = deltachunk['deltabase']
|
|
|
|
deltachain.append(deltachunk['delta'])
|
|
|
|
|
|
|
|
prevtext = deltachain.pop()
|
|
|
|
while deltachain:
|
|
|
|
prevtext = mdiff.patch(prevtext, deltachain.pop())
|
|
|
|
|
|
|
|
fulltext = mdiff.patch(prevtext, delta)
|
|
|
|
fulltexts[node] = fulltext
|
|
|
|
return fulltext
|
|
|
|
|
|
|
|
visited = set()
|
|
|
|
prevnode = self.node(len(self) - 1)
|
|
|
|
for link, chunkdata in chunkdatas:
|
|
|
|
node = chunkdata['node']
|
|
|
|
deltabase = chunkdata['deltabase']
|
|
|
|
if (not deltabase in self.nodemap and
|
|
|
|
not deltabase in visited):
|
|
|
|
fulltext = getfulltext(node)
|
|
|
|
ptext = getfulltext(prevnode)
|
|
|
|
delta = mdiff.textdiff(ptext, fulltext)
|
|
|
|
|
|
|
|
chunkdata['delta'] = delta
|
|
|
|
chunkdata['deltabase'] = prevnode
|
|
|
|
|
|
|
|
prevnode = node
|
|
|
|
visited.add(node)
|
|
|
|
|
2014-01-28 06:50:24 +04:00
|
|
|
# Apply the reordered revs to the revlog
|
2013-10-30 02:55:43 +04:00
|
|
|
for link, chunkdata in chunkdatas:
|
|
|
|
node = chunkdata['node']
|
|
|
|
p1 = chunkdata['p1']
|
|
|
|
p2 = chunkdata['p2']
|
|
|
|
cs = chunkdata['cs']
|
|
|
|
deltabase = chunkdata['deltabase']
|
|
|
|
delta = chunkdata['delta']
|
|
|
|
|
|
|
|
content.append(node)
|
|
|
|
|
|
|
|
link = linkmapper(cs)
|
|
|
|
if node in self.nodemap:
|
|
|
|
# this can happen if two branches make the same change
|
|
|
|
continue
|
|
|
|
|
|
|
|
for p in (p1, p2):
|
|
|
|
if p not in self.nodemap:
|
|
|
|
raise LookupError(p, self.indexfile,
|
|
|
|
_('unknown parent'))
|
|
|
|
|
|
|
|
if deltabase not in self.nodemap:
|
|
|
|
raise LookupError(deltabase, self.indexfile,
|
|
|
|
_('unknown delta base'))
|
|
|
|
|
|
|
|
baserev = self.rev(deltabase)
|
2015-07-23 01:50:55 +03:00
|
|
|
chain = self._addrevision(node, None, transaction, link, p1, p2,
|
|
|
|
revlog.REVIDX_DEFAULT_FLAGS, (baserev, delta),
|
|
|
|
ifh, dfh)
|
|
|
|
|
|
|
|
if addrevisioncb:
|
|
|
|
# Data for added revision can't be read unless flushed
|
|
|
|
# because _loadchunk always opensa new file handle and
|
|
|
|
# there is no guarantee data was actually written yet.
|
|
|
|
if dfh:
|
|
|
|
dfh.flush()
|
|
|
|
ifh.flush()
|
|
|
|
addrevisioncb(self, chain)
|
|
|
|
|
2013-10-30 02:55:43 +04:00
|
|
|
if not dfh and not self._inline:
|
|
|
|
# addrevision switched from inline to conventional
|
|
|
|
# reopen the index
|
|
|
|
ifh.close()
|
|
|
|
dfh = self.opener(self.datafile, "a")
|
|
|
|
ifh = self.opener(self.indexfile, "a")
|
|
|
|
finally:
|
|
|
|
if dfh:
|
|
|
|
dfh.close()
|
|
|
|
ifh.close()
|
|
|
|
|
|
|
|
return content
|
|
|
|
|
2014-02-19 05:16:45 +04:00
|
|
|
def bookmarkcommand(orig, ui, repo, *names, **opts):
|
|
|
|
if not repo.ui.configbool("hgsql", "enabled"):
|
|
|
|
return orig(ui, repo, *names, **opts)
|
|
|
|
|
|
|
|
write = (opts.get('delete') or opts.get('rename')
|
|
|
|
or opts.get('inactive') or names)
|
|
|
|
|
|
|
|
def _bookmarkcommand():
|
|
|
|
return orig(ui, repo, *names, **opts)
|
|
|
|
|
|
|
|
if write:
|
2014-03-12 23:35:04 +04:00
|
|
|
return executewithsql(repo, _bookmarkcommand, True)
|
2014-02-19 05:16:45 +04:00
|
|
|
else:
|
|
|
|
return _bookmarkcommand()
|
|
|
|
|
2014-12-02 04:30:20 +03:00
|
|
|
def bookmarkwrite(orig, self):
|
2013-11-22 00:32:05 +04:00
|
|
|
repo = self._repo
|
2014-02-19 05:16:45 +04:00
|
|
|
if not repo.ui.configbool("hgsql", "enabled") or repo.disablesync:
|
2014-12-02 04:30:20 +03:00
|
|
|
return orig(self)
|
2013-10-29 03:09:47 +04:00
|
|
|
|
2014-02-19 05:16:45 +04:00
|
|
|
if not repo.sqlconn:
|
|
|
|
raise util.Abort("attempted bookmark write without sql connection")
|
2014-03-12 23:52:27 +04:00
|
|
|
elif not repo.hassqllock(writelock):
|
2014-03-12 23:35:04 +04:00
|
|
|
raise util.Abort("attempted bookmark write without write lock")
|
2014-02-19 05:16:45 +04:00
|
|
|
|
|
|
|
try:
|
|
|
|
cursor = repo.sqlcursor
|
|
|
|
cursor.execute("""DELETE FROM revision_references WHERE repo = %s AND
|
|
|
|
namespace = 'bookmarks'""", (repo.sqlreponame))
|
|
|
|
|
|
|
|
for k, v in self.iteritems():
|
|
|
|
cursor.execute("""INSERT INTO revision_references(repo, namespace, name, value)
|
|
|
|
VALUES(%s, 'bookmarks', %s, %s)""",
|
|
|
|
(repo.sqlreponame, k, hex(v)))
|
|
|
|
repo.sqlconn.commit()
|
2014-12-02 04:30:20 +03:00
|
|
|
return orig(self)
|
2014-02-19 05:16:45 +04:00
|
|
|
except:
|
|
|
|
repo.sqlconn.rollback()
|
|
|
|
raise
|
2014-01-29 08:46:23 +04:00
|
|
|
|
|
|
|
def pushkey(orig, repo, proto, namespace, key, old, new):
|
2014-02-19 03:59:46 +04:00
|
|
|
if repo.ui.configbool("hgsql", "enabled"):
|
|
|
|
def commitpushkey():
|
|
|
|
return orig(repo, proto, namespace, key, old, new)
|
2014-01-29 08:46:23 +04:00
|
|
|
|
2014-03-12 23:35:04 +04:00
|
|
|
return executewithsql(repo, commitpushkey, namespace == 'bookmarks')
|
2014-02-19 03:59:46 +04:00
|
|
|
else:
|
|
|
|
return orig(repo, proto, namespace, key, old, new)
|
2013-10-29 23:55:14 +04:00
|
|
|
|
|
|
|
# recover must be a norepo command because loading the repo fails
|
2014-07-08 03:10:26 +04:00
|
|
|
commands.norepo += " sqlrecover sqlstrip"
|
2013-10-29 23:55:14 +04:00
|
|
|
|
|
|
|
@command('^sqlrecover', [
|
|
|
|
('f', 'force', '', _('strips as far back as necessary'), ''),
|
2014-01-24 07:22:49 +04:00
|
|
|
('', 'no-backup', '', _('does not produce backup bundles for strips'), ''),
|
2013-10-29 23:55:14 +04:00
|
|
|
], _('hg sqlrecover'))
|
|
|
|
def sqlrecover(ui, *args, **opts):
|
|
|
|
"""
|
|
|
|
Strips commits from the local repo until it is back in sync with the SQL
|
|
|
|
server.
|
|
|
|
"""
|
|
|
|
|
2014-10-27 23:56:13 +03:00
|
|
|
global initialsync
|
|
|
|
initialsync = INITIAL_SYNC_DISABLE
|
2013-10-29 23:55:14 +04:00
|
|
|
repo = hg.repository(ui, ui.environ['PWD'])
|
2013-11-22 00:32:05 +04:00
|
|
|
repo.disablesync = True
|
|
|
|
|
|
|
|
if repo.recover():
|
|
|
|
ui.status("recovered from incomplete transaction")
|
2013-10-29 23:55:14 +04:00
|
|
|
|
|
|
|
def iscorrupt():
|
2013-11-22 00:32:05 +04:00
|
|
|
repo.sqlconnect()
|
2013-10-29 23:55:14 +04:00
|
|
|
try:
|
2013-11-22 00:32:05 +04:00
|
|
|
repo.syncdb()
|
2013-12-11 07:00:55 +04:00
|
|
|
except:
|
2013-10-29 23:55:14 +04:00
|
|
|
return True
|
|
|
|
finally:
|
2013-11-22 00:32:05 +04:00
|
|
|
repo.sqlclose()
|
2013-10-29 23:55:14 +04:00
|
|
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
reposize = len(repo)
|
|
|
|
|
|
|
|
stripsize = 10
|
2014-01-24 07:22:49 +04:00
|
|
|
while iscorrupt() and len(repo) > 0:
|
|
|
|
if not opts.get('force') and reposize > len(repo) + 10000:
|
|
|
|
ui.warn("unable to fix repo after stripping 10000 commits " +
|
|
|
|
"(use -f to strip more)")
|
|
|
|
|
2013-10-29 23:55:14 +04:00
|
|
|
striprev = max(0, len(repo) - stripsize)
|
|
|
|
nodelist = [repo[striprev].node()]
|
2014-01-24 07:22:49 +04:00
|
|
|
ui.status("stripping back to %s commits" % (striprev))
|
|
|
|
|
|
|
|
backup = "none" if opts.get("no-backup") else "all"
|
|
|
|
repair.strip(ui, repo, nodelist, backup=backup, topic="sqlrecover")
|
|
|
|
|
|
|
|
stripsize = min(stripsize * 5, 10000)
|
2013-10-29 23:55:14 +04:00
|
|
|
|
|
|
|
if len(repo) == 0:
|
|
|
|
ui.warn(_("unable to fix repo corruption\n"))
|
|
|
|
elif len(repo) == reposize:
|
|
|
|
ui.status(_("local repo was not corrupt - no action taken\n"))
|
|
|
|
else:
|
|
|
|
ui.status(_("local repo now matches SQL\n"))
|
2014-07-08 03:10:26 +04:00
|
|
|
|
|
|
|
@command('^sqlstrip', [
|
|
|
|
('', 'i-know-what-i-am-doing', None, _('only run sqlstrip if you know ' +
|
|
|
|
'exactly what you\'re doing')),
|
|
|
|
], _('hg sqlstrip [OPTIONS] REV'))
|
|
|
|
def sqlstrip(ui, rev, *args, **opts):
|
|
|
|
"""strips all revisions greater than or equal to the given revision from the sql database
|
|
|
|
|
|
|
|
Deletes all revisions with linkrev >= the given revision from the local
|
|
|
|
repo and from the sql database. This is permanent and cannot be undone.
|
|
|
|
Once the revisions are deleted from the database, you will need to run
|
|
|
|
this command on each master server before proceeding to write new revisions.
|
|
|
|
"""
|
|
|
|
|
|
|
|
if not opts.get('i_know_what_i_am_doing'):
|
|
|
|
raise util.Abort("You must pass --i-know-what-i-am-doing to run this " +
|
|
|
|
"command. If you have multiple servers using the database, this " +
|
|
|
|
"command will break your servers until you run it on each one. " +
|
|
|
|
"Only the Mercurial server admins should ever run this.")
|
|
|
|
|
2015-10-16 02:21:31 +03:00
|
|
|
ui.warn("*** YOU ARE ABOUT TO DELETE HISTORY (MANDATORY 5 SECOND WAIT) ***\n")
|
|
|
|
import time
|
|
|
|
time.sleep(5)
|
|
|
|
|
2014-10-27 23:56:13 +03:00
|
|
|
global initialsync
|
|
|
|
initialsync = INITIAL_SYNC_DISABLE
|
2014-07-08 03:10:26 +04:00
|
|
|
repo = hg.repository(ui, ui.environ['PWD'])
|
|
|
|
repo.disablesync = True
|
|
|
|
|
2014-09-26 05:15:17 +04:00
|
|
|
try:
|
|
|
|
rev = int(rev)
|
|
|
|
except ValueError:
|
|
|
|
raise util.Abort("specified rev must be an integer: '%s'" % rev)
|
|
|
|
|
2014-07-08 03:10:26 +04:00
|
|
|
lock = repo.lock()
|
|
|
|
try:
|
|
|
|
repo.sqlconnect()
|
|
|
|
try:
|
|
|
|
repo.sqllock(writelock)
|
|
|
|
|
|
|
|
if rev not in repo:
|
|
|
|
raise util.Abort("revision %s is not in the repo" % rev)
|
|
|
|
|
|
|
|
reponame = repo.sqlreponame
|
|
|
|
cursor = repo.sqlcursor
|
|
|
|
changelog = repo.changelog
|
|
|
|
|
|
|
|
revs = repo.revs('%s:' % rev)
|
|
|
|
# strip locally
|
|
|
|
ui.status("stripping locally\n")
|
|
|
|
repair.strip(ui, repo, [changelog.node(r) for r in revs], "all")
|
|
|
|
|
|
|
|
ui.status("stripping from the database\n")
|
|
|
|
ui.status("deleting old references\n")
|
|
|
|
cursor.execute("""DELETE FROM revision_references WHERE repo = %s""", (reponame,))
|
|
|
|
|
|
|
|
ui.status("adding new head references\n")
|
|
|
|
for head in repo.heads():
|
|
|
|
cursor.execute("""INSERT INTO revision_references(repo, namespace, value)
|
|
|
|
VALUES(%s, 'heads', %s)""",
|
|
|
|
(reponame, hex(head)))
|
|
|
|
|
|
|
|
ui.status("adding new tip reference\n")
|
|
|
|
cursor.execute("""INSERT INTO revision_references(repo, namespace, name, value)
|
|
|
|
VALUES(%s, 'tip', 'tip', %s)""",
|
|
|
|
(reponame, len(repo) - 1))
|
|
|
|
|
|
|
|
ui.status("adding new bookmark references\n")
|
|
|
|
for k, v in repo._bookmarks.iteritems():
|
|
|
|
cursor.execute("""INSERT INTO revision_references(repo, namespace, name, value)
|
|
|
|
VALUES(%s, 'bookmarks', %s, %s)""",
|
|
|
|
(reponame, k, hex(v)))
|
|
|
|
|
|
|
|
ui.status("deleting revision data\n")
|
|
|
|
cursor.execute("""DELETE FROM revisions WHERE repo = %s and linkrev > %s""",
|
2014-09-26 05:15:17 +04:00
|
|
|
(reponame, rev))
|
2014-07-08 03:10:26 +04:00
|
|
|
|
|
|
|
repo.sqlconn.commit()
|
|
|
|
finally:
|
|
|
|
repo.sqlunlock(writelock)
|
|
|
|
repo.sqlclose()
|
|
|
|
finally:
|
2014-08-05 06:38:52 +04:00
|
|
|
lock.release()
|