2014-01-02 23:06:45 +04:00
|
|
|
# hgsql.py
|
2013-10-18 03:46:12 +04:00
|
|
|
#
|
|
|
|
# Copyright 2013 Facebook, Inc.
|
|
|
|
#
|
|
|
|
# This software may be used and distributed according to the terms of the
|
|
|
|
# GNU General Public License version 2 or any later version.
|
|
|
|
|
2014-01-02 23:06:45 +04:00
|
|
|
"""
|
|
|
|
CREATE TABLE revisions(
|
2014-08-05 06:38:52 +04:00
|
|
|
repo CHAR(64) BINARY NOT NULL,
|
|
|
|
path VARCHAR(512) BINARY NOT NULL,
|
2014-01-02 23:06:45 +04:00
|
|
|
chunk INT UNSIGNED NOT NULL,
|
|
|
|
chunkcount INT UNSIGNED NOT NULL,
|
|
|
|
linkrev INT UNSIGNED NOT NULL,
|
2014-01-22 04:05:27 +04:00
|
|
|
rev INT UNSIGNED NOT NULL,
|
|
|
|
node CHAR(40) BINARY NOT NULL,
|
2014-01-02 23:06:45 +04:00
|
|
|
entry BINARY(64) NOT NULL,
|
|
|
|
data0 CHAR(1) NOT NULL,
|
|
|
|
data1 LONGBLOB NOT NULL,
|
|
|
|
createdtime DATETIME NOT NULL,
|
2014-01-22 04:05:27 +04:00
|
|
|
INDEX linkrevs (repo, linkrev),
|
|
|
|
PRIMARY KEY (repo, path, rev, chunk)
|
2014-01-02 23:06:45 +04:00
|
|
|
);
|
|
|
|
|
2014-02-04 02:18:38 +04:00
|
|
|
CREATE TABLE revision_references(
|
2014-01-02 23:06:45 +04:00
|
|
|
autoid INT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY,
|
2014-01-22 04:05:27 +04:00
|
|
|
repo CHAR(32) BINARY NOT NULL,
|
|
|
|
namespace CHAR(32) BINARY NOT NULL,
|
|
|
|
name VARCHAR(256) BINARY,
|
|
|
|
value char(40) BINARY NOT NULL,
|
2014-01-02 23:06:45 +04:00
|
|
|
UNIQUE KEY bookmarkindex (repo, namespace, name)
|
|
|
|
);
|
|
|
|
"""
|
2013-10-29 03:09:47 +04:00
|
|
|
|
2013-10-18 03:46:12 +04:00
|
|
|
from mercurial.node import bin, hex, nullid, nullrev
|
|
|
|
from mercurial.i18n import _
|
2013-10-29 23:55:14 +04:00
|
|
|
from mercurial.extensions import wrapfunction, wrapcommand
|
2013-10-18 03:47:34 +04:00
|
|
|
from mercurial import changelog, error, cmdutil, revlog, localrepo, transaction
|
2013-10-31 21:37:49 +04:00
|
|
|
from mercurial import wireproto, bookmarks, repair, commands, hg, mdiff, phases
|
2014-08-26 14:51:11 +04:00
|
|
|
from mercurial import util, changegroup, exchange
|
2013-12-11 07:00:55 +04:00
|
|
|
import MySQLdb, struct, time, Queue, threading, _mysql_exceptions
|
2013-10-18 03:46:12 +04:00
|
|
|
from MySQLdb import cursors
|
2014-03-20 00:06:20 +04:00
|
|
|
import warnings
|
2013-10-18 03:46:12 +04:00
|
|
|
|
|
|
|
cmdtable = {}
|
|
|
|
command = cmdutil.command(cmdtable)
|
|
|
|
testedwith = 'internal'
|
|
|
|
|
2014-03-12 23:35:04 +04:00
|
|
|
writelock = 'write_lock'
|
2013-10-31 00:41:33 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
disableinitialsync = False
|
2013-10-18 03:46:12 +04:00
|
|
|
|
2013-10-29 05:57:18 +04:00
|
|
|
class CorruptionException(Exception):
|
|
|
|
pass
|
|
|
|
|
2013-10-18 03:46:12 +04:00
|
|
|
def uisetup(ui):
|
2013-10-29 23:55:14 +04:00
|
|
|
wrapcommand(commands.table, 'pull', pull)
|
2014-03-13 23:42:20 +04:00
|
|
|
wrapcommand(commands.table, 'commit', commit)
|
2014-01-29 08:46:23 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
wrapfunction(wireproto, 'unbundle', unbundle)
|
|
|
|
wireproto.commands['unbundle'] = (wireproto.unbundle, 'heads')
|
2013-10-29 23:55:14 +04:00
|
|
|
|
2014-01-29 08:46:23 +04:00
|
|
|
wrapfunction(wireproto, 'pushkey', pushkey)
|
|
|
|
wireproto.commands['pushkey'] = (wireproto.pushkey, 'namespace key old new')
|
|
|
|
|
2014-02-19 05:16:45 +04:00
|
|
|
wrapcommand(commands.table, 'bookmark', bookmarkcommand)
|
|
|
|
|
2014-01-04 00:01:50 +04:00
|
|
|
def writeentry(orig, self, transaction, ifh, dfh, entry, data, link, offset):
|
2014-01-22 04:05:27 +04:00
|
|
|
"""records each revlog write to the repo's pendingrev list"""
|
2014-02-19 03:59:46 +04:00
|
|
|
if not util.safehasattr(transaction, "repo"):
|
|
|
|
return orig(self, transaction, ifh, dfh, entry, data, link, offset)
|
|
|
|
|
2014-01-22 04:05:27 +04:00
|
|
|
e = struct.unpack(revlog.indexformatng, entry)
|
|
|
|
node = hex(e[7])
|
|
|
|
data0 = data[0] or ''
|
|
|
|
transaction.repo.pendingrevs.append((self.indexfile, link,
|
|
|
|
len(self) - 1, node, entry, data0, data[1]))
|
2014-01-04 00:01:50 +04:00
|
|
|
return orig(self, transaction, ifh, dfh, entry, data, link, offset)
|
|
|
|
wrapfunction(revlog.revlog, '_writeentry', writeentry)
|
|
|
|
|
2013-10-30 02:55:43 +04:00
|
|
|
wrapfunction(revlog.revlog, 'addgroup', addgroup)
|
2014-02-19 05:16:45 +04:00
|
|
|
|
2014-10-15 04:07:18 +04:00
|
|
|
wrapfunction(bookmarks.bmstore, '_write', bookmarkwrite)
|
2014-02-19 05:16:45 +04:00
|
|
|
wrapfunction(bookmarks, 'updatefromremote', updatefromremote)
|
2014-04-09 21:56:53 +04:00
|
|
|
wrapfunction(changegroup, 'addchangegroup', addchangegroup)
|
2014-08-26 14:51:11 +04:00
|
|
|
wrapfunction(exchange, '_localphasemove', _localphasemove)
|
2013-10-29 03:09:47 +04:00
|
|
|
|
2013-10-18 03:46:12 +04:00
|
|
|
def reposetup(ui, repo):
|
2013-10-18 03:47:34 +04:00
|
|
|
if repo.ui.configbool("hgsql", "enabled"):
|
2013-11-22 00:32:05 +04:00
|
|
|
wraprepo(repo)
|
2013-10-29 03:09:47 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
if not disableinitialsync:
|
2014-01-03 00:19:50 +04:00
|
|
|
# Use a noop to force a sync
|
|
|
|
def noop():
|
|
|
|
pass
|
|
|
|
executewithsql(repo, noop)
|
2013-10-29 03:09:47 +04:00
|
|
|
|
2014-01-24 07:22:49 +04:00
|
|
|
# Incoming commits are only allowed via push and pull
|
2014-01-03 00:19:50 +04:00
|
|
|
def unbundle(orig, *args, **kwargs):
|
|
|
|
repo = args[0]
|
2014-02-19 03:59:46 +04:00
|
|
|
if repo.ui.configbool("hgsql", "enabled"):
|
2014-03-12 23:35:04 +04:00
|
|
|
return executewithsql(repo, orig, True, *args, **kwargs)
|
2014-02-19 03:59:46 +04:00
|
|
|
else:
|
|
|
|
return orig(*args, **kwargs)
|
2014-01-03 00:19:50 +04:00
|
|
|
|
|
|
|
def pull(orig, *args, **kwargs):
|
|
|
|
repo = args[1]
|
2014-02-19 03:59:46 +04:00
|
|
|
if repo.ui.configbool("hgsql", "enabled"):
|
2014-03-12 23:35:04 +04:00
|
|
|
return executewithsql(repo, orig, True, *args, **kwargs)
|
2014-02-19 03:59:46 +04:00
|
|
|
else:
|
|
|
|
return orig(*args, **kwargs)
|
2013-10-29 05:57:18 +04:00
|
|
|
|
2014-03-13 23:42:20 +04:00
|
|
|
def commit(orig, *args, **kwargs):
|
|
|
|
repo = args[1]
|
|
|
|
if repo.ui.configbool("hgsql", "enabled"):
|
|
|
|
return executewithsql(repo, orig, True, *args, **kwargs)
|
|
|
|
else:
|
|
|
|
return orig(*args, **kwargs)
|
|
|
|
|
2014-02-19 05:16:45 +04:00
|
|
|
def updatefromremote(orig, *args, **kwargs):
|
|
|
|
repo = args[1]
|
|
|
|
if repo.ui.configbool("hgsql", "enabled"):
|
2014-03-12 23:35:04 +04:00
|
|
|
return executewithsql(repo, orig, True, *args, **kwargs)
|
2014-02-19 05:16:45 +04:00
|
|
|
else:
|
|
|
|
return orig(*args, **kwargs)
|
|
|
|
|
2014-04-09 21:56:53 +04:00
|
|
|
def addchangegroup(orig, *args, **kwargs):
|
|
|
|
repo = args[0]
|
|
|
|
if repo.ui.configbool("hgsql", "enabled"):
|
|
|
|
return executewithsql(repo, orig, True, *args, **kwargs)
|
|
|
|
else:
|
|
|
|
return orig(*args, **kwargs)
|
|
|
|
|
2014-08-26 14:51:11 +04:00
|
|
|
def _localphasemove(orig, pushop, *args, **kwargs):
|
|
|
|
repo = pushop.repo
|
|
|
|
if repo.ui.configbool("hgsql", "enabled"):
|
|
|
|
return executewithsql(repo, orig, True, pushop, *args, **kwargs)
|
|
|
|
else:
|
|
|
|
return orig(pushop, *args, **kwargs)
|
|
|
|
|
2014-03-12 23:35:04 +04:00
|
|
|
def executewithsql(repo, action, sqllock=False, *args, **kwargs):
|
2014-01-24 07:22:49 +04:00
|
|
|
"""Executes the given action while having a SQL connection open.
|
2014-03-11 22:25:23 +04:00
|
|
|
If a locks are specified, those locks are held for the duration of the
|
2014-01-24 07:22:49 +04:00
|
|
|
action.
|
|
|
|
"""
|
|
|
|
# executewithsql can be executed in a nested scenario (ex: writing
|
|
|
|
# bookmarks during a pull), so track whether this call performed
|
|
|
|
# the connect.
|
2014-01-22 04:06:25 +04:00
|
|
|
connected = False
|
|
|
|
if not repo.sqlconn:
|
|
|
|
repo.sqlconnect()
|
|
|
|
connected = True
|
2014-01-24 07:22:49 +04:00
|
|
|
|
2014-03-12 23:35:04 +04:00
|
|
|
locked = False
|
|
|
|
if sqllock and not writelock in repo.heldlocks:
|
|
|
|
repo.sqllock(writelock)
|
|
|
|
locked = True
|
2014-01-03 00:19:50 +04:00
|
|
|
|
|
|
|
result = None
|
|
|
|
success = False
|
2013-10-18 03:46:12 +04:00
|
|
|
try:
|
2014-01-22 04:06:25 +04:00
|
|
|
if connected:
|
2014-03-12 23:35:04 +04:00
|
|
|
repo.syncdb(readonly=not sqllock)
|
2014-01-03 00:19:50 +04:00
|
|
|
result = action(*args, **kwargs)
|
|
|
|
success = True
|
2013-11-22 00:32:05 +04:00
|
|
|
finally:
|
2013-11-27 23:16:45 +04:00
|
|
|
try:
|
2014-03-11 22:25:23 +04:00
|
|
|
# Release the locks in the reverse order they were obtained
|
2014-03-12 23:35:04 +04:00
|
|
|
if locked:
|
|
|
|
repo.sqlunlock(writelock)
|
2014-01-22 04:06:25 +04:00
|
|
|
if connected:
|
|
|
|
repo.sqlclose()
|
2013-11-27 23:16:45 +04:00
|
|
|
except _mysql_exceptions.ProgrammingError, ex:
|
2014-01-03 00:19:50 +04:00
|
|
|
if success:
|
|
|
|
raise
|
2014-01-24 07:22:49 +04:00
|
|
|
# If the action caused an exception, hide sql cleanup exceptions,
|
|
|
|
# so the real exception is propagated up.
|
2013-11-27 23:16:45 +04:00
|
|
|
pass
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2014-01-03 00:19:50 +04:00
|
|
|
return result
|
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
def wraprepo(repo):
|
|
|
|
class sqllocalrepo(repo.__class__):
|
|
|
|
def sqlconnect(self):
|
|
|
|
if self.sqlconn:
|
|
|
|
raise Exception("SQL connection already open")
|
|
|
|
if self.sqlcursor:
|
|
|
|
raise Exception("SQL cursor already open without connection")
|
2013-12-11 07:00:55 +04:00
|
|
|
self.sqlconn = MySQLdb.connect(**self.sqlargs)
|
2013-11-22 00:32:05 +04:00
|
|
|
self.sqlconn.autocommit(False)
|
2014-03-08 04:29:09 +04:00
|
|
|
waittimeout = self.ui.config('hgsql', 'waittimeout', '300')
|
|
|
|
waittimeout = self.sqlconn.escape_string("%s" % (waittimeout,))
|
|
|
|
self.sqlconn.query("SET wait_timeout=%s" % waittimeout)
|
2013-11-22 00:32:05 +04:00
|
|
|
self.sqlcursor = self.sqlconn.cursor()
|
|
|
|
|
|
|
|
def sqlclose(self):
|
2014-04-17 22:09:48 +04:00
|
|
|
with warnings.catch_warnings():
|
|
|
|
warnings.simplefilter("ignore")
|
|
|
|
self.sqlcursor.close()
|
|
|
|
self.sqlconn.close()
|
2013-11-22 00:32:05 +04:00
|
|
|
self.sqlcursor = None
|
|
|
|
self.sqlconn = None
|
|
|
|
|
|
|
|
def sqllock(self, name, timeout=60):
|
2014-01-29 08:46:23 +04:00
|
|
|
escapedname = self.sqlconn.escape_string("%s_%s" % (name, self.sqlreponame))
|
2013-11-22 00:32:05 +04:00
|
|
|
# cast to int to prevent passing bad sql data
|
|
|
|
timeout = int(timeout)
|
2014-01-29 08:46:23 +04:00
|
|
|
self.sqlconn.query("SELECT GET_LOCK('%s', %s)" % (escapedname, timeout))
|
2013-11-22 00:32:05 +04:00
|
|
|
result = self.sqlconn.store_result().fetch_row()[0][0]
|
|
|
|
if result != 1:
|
2014-01-29 08:46:23 +04:00
|
|
|
raise Exception("unable to obtain %s lock" % escapedname)
|
|
|
|
self.heldlocks.add(name)
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2014-03-12 23:52:27 +04:00
|
|
|
def hassqllock(self, name):
|
|
|
|
if not name in self.heldlocks:
|
|
|
|
return False
|
|
|
|
|
|
|
|
escapedname = self.sqlconn.escape_string("%s_%s" % (name, self.sqlreponame))
|
|
|
|
self.sqlconn.query("SELECT IS_USED_LOCK('%s')" % (escapedname,))
|
|
|
|
lockheldby = self.sqlconn.store_result().fetch_row()[0][0]
|
|
|
|
if lockheldby == None:
|
|
|
|
raise Exception("unable to check %s lock" % escapedname)
|
|
|
|
|
|
|
|
self.sqlconn.query("SELECT CONNECTION_ID()")
|
|
|
|
myconnectid = self.sqlconn.store_result().fetch_row()[0][0]
|
|
|
|
if myconnectid == None:
|
|
|
|
raise Exception("unable to read connection id")
|
|
|
|
|
|
|
|
return lockheldby == myconnectid
|
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
def sqlunlock(self, name):
|
2014-01-29 08:46:23 +04:00
|
|
|
escapedname = self.sqlconn.escape_string("%s_%s" % (name, self.sqlreponame))
|
|
|
|
self.sqlconn.query("SELECT RELEASE_LOCK('%s')" % (escapedname,))
|
2013-11-22 00:32:05 +04:00
|
|
|
self.sqlconn.store_result().fetch_row()
|
2014-01-29 08:46:23 +04:00
|
|
|
self.heldlocks.discard(name)
|
2013-11-22 00:32:05 +04:00
|
|
|
|
|
|
|
def transaction(self, *args, **kwargs):
|
|
|
|
tr = super(sqllocalrepo, self).transaction(*args, **kwargs)
|
2014-01-04 00:03:52 +04:00
|
|
|
if tr.count > 1:
|
|
|
|
return tr
|
2013-11-22 00:32:05 +04:00
|
|
|
|
|
|
|
def transactionclose(orig):
|
2014-01-03 00:19:50 +04:00
|
|
|
if tr.count == 1:
|
|
|
|
self.committodb()
|
2013-11-22 00:32:05 +04:00
|
|
|
del self.pendingrevs[:]
|
2014-01-03 00:19:50 +04:00
|
|
|
return orig()
|
|
|
|
|
|
|
|
def transactionabort(orig):
|
|
|
|
del self.pendingrevs[:]
|
|
|
|
return orig()
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2014-01-03 00:19:50 +04:00
|
|
|
wrapfunction(tr, "_abort", transactionabort)
|
2013-11-22 00:32:05 +04:00
|
|
|
wrapfunction(tr, "close", transactionclose)
|
|
|
|
tr.repo = self
|
|
|
|
return tr
|
|
|
|
|
|
|
|
def needsync(self):
|
2013-12-11 07:00:55 +04:00
|
|
|
"""Returns True if the local repo is not in sync with the database.
|
|
|
|
If it returns False, the heads and bookmarks match the database.
|
|
|
|
"""
|
2014-01-02 23:06:45 +04:00
|
|
|
self.sqlcursor.execute("""SELECT namespace, name, value
|
2014-02-04 02:18:38 +04:00
|
|
|
FROM revision_references WHERE repo = %s""", (self.sqlreponame))
|
2013-11-22 00:32:05 +04:00
|
|
|
sqlheads = set()
|
|
|
|
sqlbookmarks = {}
|
2014-01-24 06:40:55 +04:00
|
|
|
tip = -1
|
|
|
|
for namespace, name, value in self.sqlcursor:
|
2014-01-02 23:06:45 +04:00
|
|
|
if namespace == "heads":
|
2014-01-24 06:40:55 +04:00
|
|
|
sqlheads.add(bin(value))
|
2014-01-02 23:06:45 +04:00
|
|
|
elif namespace == "bookmarks":
|
2014-01-24 06:40:55 +04:00
|
|
|
sqlbookmarks[name] = bin(value)
|
|
|
|
elif namespace == "tip":
|
|
|
|
tip = int(value)
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2014-01-02 23:06:45 +04:00
|
|
|
heads = set(self.heads())
|
2013-11-22 00:32:05 +04:00
|
|
|
bookmarks = self._bookmarks
|
|
|
|
|
2014-01-24 06:40:55 +04:00
|
|
|
outofsync = heads != sqlheads or bookmarks != sqlbookmarks or tip != len(self) - 1
|
|
|
|
return outofsync, sqlheads, sqlbookmarks, tip
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2014-02-07 02:39:57 +04:00
|
|
|
def syncdb(self, readonly=True):
|
2014-01-04 00:03:52 +04:00
|
|
|
if not self.needsync()[0]:
|
2013-11-22 00:32:05 +04:00
|
|
|
return
|
|
|
|
|
|
|
|
ui = self.ui
|
|
|
|
ui.debug("syncing with mysql\n")
|
|
|
|
|
2014-02-07 02:39:57 +04:00
|
|
|
try:
|
|
|
|
lock = self.lock(wait=not readonly)
|
|
|
|
except error.LockHeld:
|
|
|
|
# Oh well. Don't block read only operations.
|
|
|
|
ui.debug("skipping sync for readonly operation\n")
|
|
|
|
return
|
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
try:
|
|
|
|
|
2014-02-07 02:39:57 +04:00
|
|
|
# Someone else may have synced us while we were waiting.
|
|
|
|
# Restart the transaction so we have access to the latest rows.
|
|
|
|
self.sqlconn.rollback()
|
2014-01-24 06:40:55 +04:00
|
|
|
outofsync, sqlheads, sqlbookmarks, fetchend = self.needsync()
|
2014-01-04 00:03:52 +04:00
|
|
|
if not outofsync:
|
2013-12-11 07:00:55 +04:00
|
|
|
return
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
transaction = self.transaction("syncdb")
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2014-02-07 02:39:57 +04:00
|
|
|
self.hook('presyncdb', throw=True)
|
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
try:
|
2013-12-11 07:00:55 +04:00
|
|
|
# Inspect the changelog now that we have the lock
|
|
|
|
fetchstart = len(self.changelog)
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
queue = Queue.Queue()
|
|
|
|
abort = threading.Event()
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
t = threading.Thread(target=self.fetchthread,
|
2014-01-24 06:40:55 +04:00
|
|
|
args=(queue, abort, fetchstart, fetchend))
|
2013-12-11 07:00:55 +04:00
|
|
|
t.setDaemon(True)
|
|
|
|
try:
|
|
|
|
t.start()
|
|
|
|
addentries(self, queue, transaction)
|
|
|
|
finally:
|
|
|
|
abort.set()
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2014-09-18 00:53:22 +04:00
|
|
|
phases.advanceboundary(self, transaction, phases.public,
|
|
|
|
self.heads())
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
transaction.close()
|
|
|
|
finally:
|
|
|
|
transaction.release()
|
|
|
|
|
|
|
|
# We circumvent the changelog and manifest when we add entries to
|
|
|
|
# the revlogs. So clear all the caches.
|
|
|
|
self.invalidate()
|
|
|
|
|
2014-01-04 00:03:52 +04:00
|
|
|
heads = set(self.heads())
|
|
|
|
heads.discard(nullid)
|
|
|
|
if heads != sqlheads:
|
|
|
|
raise CorruptionException("heads don't match after sync")
|
2013-12-11 07:00:55 +04:00
|
|
|
|
2014-01-24 06:40:55 +04:00
|
|
|
if len(self) - 1 != fetchend:
|
|
|
|
raise CorruptionException("tip doesn't match after sync")
|
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
self.disablesync = True
|
|
|
|
try:
|
|
|
|
bm = self._bookmarks
|
|
|
|
bm.clear()
|
2014-02-04 02:18:38 +04:00
|
|
|
self.sqlcursor.execute("""SELECT name, value FROM revision_references
|
2014-01-02 23:06:45 +04:00
|
|
|
WHERE namespace = 'bookmarks' AND repo = %s""",
|
|
|
|
(self.sqlreponame))
|
|
|
|
for name, node in self.sqlcursor:
|
2013-12-11 07:00:55 +04:00
|
|
|
node = bin(node)
|
|
|
|
if node in self:
|
|
|
|
bm[name] = node
|
|
|
|
bm.write()
|
|
|
|
finally:
|
|
|
|
self.disablesync = False
|
2014-01-04 00:03:52 +04:00
|
|
|
|
|
|
|
if bm != sqlbookmarks:
|
|
|
|
raise CorruptionException("bookmarks don't match after sync")
|
2013-11-22 00:32:05 +04:00
|
|
|
finally:
|
2013-12-11 07:00:55 +04:00
|
|
|
lock.release()
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2014-01-24 06:40:55 +04:00
|
|
|
def fetchthread(self, queue, abort, fetchstart, fetchend):
|
2014-01-24 07:22:49 +04:00
|
|
|
"""Fetches every revision from fetchstart to fetchend (inclusive)
|
|
|
|
and places them on the queue. This function is meant to run on a
|
|
|
|
background thread and listens to the abort event to abort early.
|
|
|
|
"""
|
2013-11-22 00:32:05 +04:00
|
|
|
ui = self.ui
|
|
|
|
clrev = fetchstart
|
|
|
|
chunksize = 1000
|
|
|
|
while True:
|
|
|
|
if abort.isSet():
|
|
|
|
break
|
2013-10-31 21:37:49 +04:00
|
|
|
|
2014-01-24 06:40:55 +04:00
|
|
|
maxrev = min(clrev + chunksize, fetchend + 1)
|
2014-01-02 23:06:45 +04:00
|
|
|
self.sqlcursor.execute("""SELECT path, chunk, chunkcount,
|
|
|
|
linkrev, entry, data0, data1 FROM revisions WHERE repo = %s
|
|
|
|
AND linkrev > %s AND linkrev < %s ORDER BY linkrev ASC""",
|
2014-01-24 06:40:55 +04:00
|
|
|
(self.sqlreponame, clrev - 1, maxrev))
|
2013-10-31 21:37:49 +04:00
|
|
|
|
2014-01-24 07:22:49 +04:00
|
|
|
# Put split chunks back together into a single revision
|
2013-11-22 00:32:05 +04:00
|
|
|
groupedrevdata = {}
|
|
|
|
for revdata in self.sqlcursor:
|
2014-01-02 23:06:45 +04:00
|
|
|
name = revdata[0]
|
|
|
|
chunk = revdata[1]
|
|
|
|
linkrev = revdata[3]
|
2013-11-22 00:32:05 +04:00
|
|
|
groupedrevdata.setdefault((name, linkrev), {})[chunk] = revdata
|
2013-10-29 03:09:47 +04:00
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
if not groupedrevdata:
|
|
|
|
break
|
2013-10-18 03:46:12 +04:00
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
fullrevisions = []
|
|
|
|
for chunks in groupedrevdata.itervalues():
|
2014-01-02 23:06:45 +04:00
|
|
|
chunkcount = chunks[0][2]
|
2013-11-22 00:32:05 +04:00
|
|
|
if chunkcount == 1:
|
|
|
|
fullrevisions.append(chunks[0])
|
|
|
|
elif chunkcount == len(chunks):
|
|
|
|
fullchunk = list(chunks[0])
|
|
|
|
data1 = ""
|
|
|
|
for i in range(0, chunkcount):
|
2014-01-02 23:06:45 +04:00
|
|
|
data1 += chunks[i][6]
|
2014-01-22 04:05:27 +04:00
|
|
|
fullchunk[6] = data1
|
2013-11-22 00:32:05 +04:00
|
|
|
fullrevisions.append(tuple(fullchunk))
|
|
|
|
else:
|
|
|
|
raise Exception("missing revision chunk - expected %s got %s" %
|
|
|
|
(chunkcount, len(chunks)))
|
2013-10-18 03:46:12 +04:00
|
|
|
|
2014-01-02 23:06:45 +04:00
|
|
|
fullrevisions = sorted(fullrevisions, key=lambda revdata: revdata[3])
|
2013-11-22 00:32:05 +04:00
|
|
|
for revdata in fullrevisions:
|
|
|
|
queue.put(revdata)
|
2013-10-31 21:37:49 +04:00
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
clrev += chunksize
|
2013-10-31 21:37:49 +04:00
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
queue.put(False)
|
2013-10-31 21:37:49 +04:00
|
|
|
|
2014-02-19 05:16:45 +04:00
|
|
|
def pushkey(self, namespace, key, old, new):
|
|
|
|
def _pushkey():
|
|
|
|
return super(sqllocalrepo, self).pushkey(namespace, key, old, new)
|
|
|
|
|
2014-03-12 23:35:04 +04:00
|
|
|
return executewithsql(self, _pushkey, namespace == 'bookmarks')
|
2014-02-19 05:16:45 +04:00
|
|
|
|
2014-01-03 00:19:50 +04:00
|
|
|
def committodb(self):
|
2014-01-24 07:22:49 +04:00
|
|
|
"""Commits all pending revisions to the database
|
|
|
|
"""
|
2014-01-03 00:19:50 +04:00
|
|
|
if self.sqlconn == None:
|
2014-01-29 08:46:23 +04:00
|
|
|
raise util.Abort("invalid repo change - only hg push and pull" +
|
2014-01-22 04:09:08 +04:00
|
|
|
" are allowed")
|
2014-01-03 00:19:50 +04:00
|
|
|
|
2014-08-26 14:51:11 +04:00
|
|
|
if not self.pendingrevs:
|
|
|
|
return
|
|
|
|
|
2014-01-04 00:03:52 +04:00
|
|
|
reponame = self.sqlreponame
|
|
|
|
cursor = self.sqlcursor
|
2014-01-24 06:40:55 +04:00
|
|
|
maxcommitsize = self.maxcommitsize
|
2014-01-24 07:22:49 +04:00
|
|
|
maxrowsize = self.maxrowsize
|
2014-01-04 00:03:52 +04:00
|
|
|
|
2014-01-22 04:09:08 +04:00
|
|
|
self._validatependingrevs()
|
2014-01-04 00:03:52 +04:00
|
|
|
|
2014-01-03 00:19:50 +04:00
|
|
|
try:
|
2014-01-24 07:22:49 +04:00
|
|
|
datasize = 0
|
2014-01-03 00:19:50 +04:00
|
|
|
for revision in self.pendingrevs:
|
2014-01-22 04:05:27 +04:00
|
|
|
path, linkrev, rev, node, entry, data0, data1 = revision
|
2014-01-03 00:19:50 +04:00
|
|
|
|
|
|
|
start = 0
|
|
|
|
chunk = 0
|
|
|
|
datalen = len(data1)
|
2014-01-24 07:22:49 +04:00
|
|
|
chunkcount = datalen / maxrowsize
|
|
|
|
if datalen % maxrowsize != 0 or datalen == 0:
|
2014-01-03 00:19:50 +04:00
|
|
|
chunkcount += 1
|
2014-01-24 07:22:49 +04:00
|
|
|
|
|
|
|
# We keep row size down by breaking large revisions down into
|
|
|
|
# smaller chunks.
|
2014-01-03 00:19:50 +04:00
|
|
|
while chunk == 0 or start < len(data1):
|
2014-01-24 07:22:49 +04:00
|
|
|
end = min(len(data1), start + maxrowsize)
|
2014-01-03 00:19:50 +04:00
|
|
|
datachunk = data1[start:end]
|
|
|
|
cursor.execute("""INSERT INTO revisions(repo, path, chunk,
|
2014-01-22 04:05:27 +04:00
|
|
|
chunkcount, linkrev, rev, node, entry, data0, data1, createdtime)
|
|
|
|
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)""",
|
|
|
|
(reponame, path, chunk, chunkcount, linkrev, rev,
|
|
|
|
node, entry, data0, datachunk,
|
|
|
|
time.strftime('%Y-%m-%d %H:%M:%S')))
|
2014-01-03 00:19:50 +04:00
|
|
|
chunk += 1
|
|
|
|
start = end
|
|
|
|
|
2014-01-24 06:40:55 +04:00
|
|
|
# MySQL transactions can only reach a certain size, so we commit
|
|
|
|
# every so often. As long as we don't update the tip pushkey,
|
|
|
|
# this is ok.
|
|
|
|
datasize += len(datachunk)
|
|
|
|
if datasize > maxcommitsize:
|
|
|
|
self.sqlconn.commit()
|
|
|
|
datasize = 0
|
|
|
|
|
2014-02-04 02:18:38 +04:00
|
|
|
cursor.execute("""DELETE FROM revision_references WHERE repo = %s
|
2014-03-12 23:52:27 +04:00
|
|
|
AND namespace = 'heads'""", (reponame,))
|
2014-01-03 00:19:50 +04:00
|
|
|
|
|
|
|
for head in self.heads():
|
2014-02-04 02:18:38 +04:00
|
|
|
cursor.execute("""INSERT INTO revision_references(repo, namespace, value)
|
2014-01-03 00:19:50 +04:00
|
|
|
VALUES(%s, 'heads', %s)""",
|
|
|
|
(reponame, hex(head)))
|
|
|
|
|
2014-03-20 00:06:20 +04:00
|
|
|
# revision_references has multiple keys (primary key, and a unique index), so
|
|
|
|
# mysql gives a warning when using ON DUPLICATE KEY since it would only update one
|
|
|
|
# row despite multiple key duplicates. This doesn't matter for us, since we know
|
|
|
|
# there is only one row that will share the same key. So suppress the warning.
|
2014-04-17 22:09:48 +04:00
|
|
|
cursor.execute("""INSERT INTO revision_references(repo, namespace, name, value)
|
|
|
|
VALUES(%s, 'tip', 'tip', %s) ON DUPLICATE KEY UPDATE value=%s""",
|
|
|
|
(reponame, len(self) - 1, len(self) - 1))
|
2014-03-12 23:52:27 +04:00
|
|
|
|
|
|
|
# Just to be super sure, check the write lock before doing the final commit
|
|
|
|
if not self.hassqllock(writelock):
|
|
|
|
raise Exception("attempting to write to sql without holding %s (precommit)"
|
|
|
|
% writelock)
|
2014-01-24 06:40:55 +04:00
|
|
|
|
2014-01-03 00:19:50 +04:00
|
|
|
self.sqlconn.commit()
|
|
|
|
except:
|
|
|
|
self.sqlconn.rollback()
|
|
|
|
raise
|
|
|
|
finally:
|
|
|
|
del self.pendingrevs[:]
|
|
|
|
|
2014-01-22 04:09:08 +04:00
|
|
|
def _validatependingrevs(self):
|
2014-01-24 07:22:49 +04:00
|
|
|
"""Validates that the current pending revisions will be valid when
|
2014-01-22 04:09:08 +04:00
|
|
|
written to the database.
|
|
|
|
"""
|
|
|
|
reponame = self.sqlreponame
|
|
|
|
cursor = self.sqlcursor
|
|
|
|
|
2014-03-12 23:52:27 +04:00
|
|
|
# Ensure we hold the write lock
|
|
|
|
if not self.hassqllock(writelock):
|
|
|
|
raise Exception("attempting to write to sql without holding %s (prevalidate)"
|
|
|
|
% writelock)
|
|
|
|
|
2014-01-22 04:09:08 +04:00
|
|
|
# Validate that we are appending to the correct linkrev
|
2014-02-04 02:18:38 +04:00
|
|
|
cursor.execute("""SELECT value FROM revision_references WHERE repo = %s AND
|
2014-01-24 06:40:55 +04:00
|
|
|
namespace = 'tip'""", reponame)
|
|
|
|
tipresults = cursor.fetchall()
|
|
|
|
if len(tipresults) == 0:
|
|
|
|
maxlinkrev = -1
|
|
|
|
elif len(tipresults) == 1:
|
|
|
|
maxlinkrev = int(tipresults[0][0])
|
|
|
|
else:
|
|
|
|
raise CorruptionException(("multiple tips for %s in " +
|
|
|
|
" the database") % reponame)
|
|
|
|
|
|
|
|
minlinkrev = min(self.pendingrevs, key= lambda x: x[1])[1]
|
|
|
|
if maxlinkrev == None or maxlinkrev != minlinkrev - 1:
|
2014-01-22 04:09:08 +04:00
|
|
|
raise CorruptionException("attempting to write non-sequential " +
|
2014-01-24 06:40:55 +04:00
|
|
|
"linkrev %s, expected %s" % (minlinkrev, maxlinkrev + 1))
|
|
|
|
|
|
|
|
# Clean up excess revisions left from interrupted commits.
|
|
|
|
# Since MySQL can only hold so much data in a transaction, we allow
|
|
|
|
# committing across multiple db transactions. That means if
|
|
|
|
# the commit is interrupted, the next transaction needs to clean
|
2014-01-24 07:22:49 +04:00
|
|
|
# up bad revisions.
|
2014-01-24 06:40:55 +04:00
|
|
|
cursor.execute("""DELETE FROM revisions WHERE repo = %s AND
|
|
|
|
linkrev > %s""", (reponame, maxlinkrev))
|
2014-01-22 04:09:08 +04:00
|
|
|
|
|
|
|
# Validate that all rev dependencies (base, p1, p2) have the same
|
|
|
|
# node in the database
|
|
|
|
pending = set([(path, rev) for path, _, rev, _, _, _, _ in self.pendingrevs])
|
|
|
|
expectedrevs = set()
|
|
|
|
for revision in self.pendingrevs:
|
|
|
|
path, linkrev, rev, node, entry, data0, data1 = revision
|
|
|
|
e = struct.unpack(revlog.indexformatng, entry)
|
|
|
|
_, _, _, base, _, p1r, p2r, _ = e
|
|
|
|
|
|
|
|
if p1r != nullrev and not (path, p1r) in pending:
|
|
|
|
expectedrevs.add((path, p1r))
|
|
|
|
if p2r != nullrev and not (path, p2r) in pending:
|
|
|
|
expectedrevs.add((path, p2r))
|
|
|
|
if (base != nullrev and base != rev and
|
|
|
|
not (path, base) in pending):
|
|
|
|
expectedrevs.add((path, base))
|
|
|
|
|
|
|
|
if not expectedrevs:
|
|
|
|
return
|
|
|
|
|
2014-04-17 21:33:37 +04:00
|
|
|
whereclauses = []
|
|
|
|
args = []
|
|
|
|
args.append(reponame)
|
|
|
|
for path, rev in expectedrevs:
|
|
|
|
whereclauses.append("(path, rev, chunk) = (%s, %s, 0)")
|
|
|
|
args.append(path)
|
|
|
|
args.append(rev)
|
|
|
|
|
|
|
|
whereclause = ' OR '.join(whereclauses)
|
2014-01-22 04:09:08 +04:00
|
|
|
cursor.execute("""SELECT path, rev, node FROM revisions WHERE
|
2014-04-17 21:33:37 +04:00
|
|
|
repo = %s AND (""" + whereclause + ")",
|
|
|
|
args)
|
2014-01-22 04:09:08 +04:00
|
|
|
|
|
|
|
for path, rev, node in cursor:
|
|
|
|
rev = int(rev)
|
|
|
|
expectedrevs.remove((path, rev))
|
|
|
|
rl = None
|
|
|
|
if path == '00changelog.i':
|
|
|
|
rl = self.changelog
|
|
|
|
elif path == '00manifest.i':
|
|
|
|
rl = self.manifest
|
|
|
|
else:
|
|
|
|
rl = revlog.revlog(self.sopener, path)
|
|
|
|
localnode = hex(rl.node(rev))
|
|
|
|
if localnode != node:
|
Replace (path,rev,chunk) IN (....) query since it didn't use an index
It turns out mysql doesn't optimize '(a,b,c) IN ((x,y,z),(m,n,o),...)' queries,
so we want to avoid using them.
2014-03-11 01:52:31 +04:00
|
|
|
raise CorruptionException(("expected node %s at rev %d of " +
|
|
|
|
"%s but found %s") % (node, rev, path, localnode))
|
2014-01-22 04:09:08 +04:00
|
|
|
|
|
|
|
if len(expectedrevs) > 0:
|
|
|
|
raise CorruptionException(("unable to verify %d dependent " +
|
|
|
|
"revisions before adding a commit") % (len(expectedrevs)))
|
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
ui = repo.ui
|
2014-01-24 07:22:49 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
sqlargs = {}
|
|
|
|
sqlargs['host'] = ui.config("hgsql", "host")
|
|
|
|
sqlargs['db'] = ui.config("hgsql", "database")
|
|
|
|
sqlargs['user'] = ui.config("hgsql", "user")
|
2014-01-02 23:06:45 +04:00
|
|
|
sqlargs['port'] = ui.configint("hgsql", "port")
|
2013-12-11 07:00:55 +04:00
|
|
|
password = ui.config("hgsql", "password", "")
|
|
|
|
if password:
|
|
|
|
sqlargs['passwd'] = password
|
|
|
|
sqlargs['cursorclass'] = cursors.SSCursor
|
|
|
|
|
2014-01-24 07:22:49 +04:00
|
|
|
repo.sqlargs = sqlargs
|
|
|
|
|
2014-01-02 23:06:45 +04:00
|
|
|
repo.sqlreponame = ui.config("hgsql", "reponame")
|
|
|
|
if not repo.sqlreponame:
|
|
|
|
raise Exception("missing hgsql.reponame")
|
2014-01-24 07:22:49 +04:00
|
|
|
repo.maxcommitsize = ui.configbytes("hgsql", "maxcommitsize", 52428800)
|
|
|
|
repo.maxrowsize = ui.configbytes("hgsql", "maxrowsize", 1048576)
|
2013-11-22 00:32:05 +04:00
|
|
|
repo.sqlconn = None
|
|
|
|
repo.sqlcursor = None
|
|
|
|
repo.disablesync = False
|
|
|
|
repo.pendingrevs = []
|
2014-01-29 08:46:23 +04:00
|
|
|
repo.heldlocks = set()
|
2014-01-24 06:40:55 +04:00
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
repo.__class__ = sqllocalrepo
|
2013-10-31 21:37:49 +04:00
|
|
|
|
|
|
|
class bufferedopener(object):
|
2014-01-24 07:22:49 +04:00
|
|
|
"""Opener implementation that buffers all writes in memory until
|
|
|
|
flush or close is called.
|
|
|
|
"""
|
2013-10-31 21:37:49 +04:00
|
|
|
def __init__(self, opener, path, mode):
|
|
|
|
self.opener = opener
|
|
|
|
self.path = path
|
|
|
|
self.mode = mode
|
|
|
|
self.buffer = []
|
|
|
|
self.closed = False
|
|
|
|
|
|
|
|
def write(self, value):
|
2013-12-11 07:00:55 +04:00
|
|
|
if self.closed:
|
2014-01-24 07:22:49 +04:00
|
|
|
raise Exception("attempted to write to a closed bufferedopener")
|
2013-10-31 21:37:49 +04:00
|
|
|
self.buffer.append(value)
|
|
|
|
|
|
|
|
def flush(self):
|
|
|
|
buffer = self.buffer
|
|
|
|
self.buffer = []
|
|
|
|
|
|
|
|
if buffer:
|
|
|
|
fp = self.opener(self.path, self.mode)
|
|
|
|
fp.write(''.join(buffer))
|
|
|
|
fp.close()
|
|
|
|
|
|
|
|
def close(self):
|
|
|
|
self.flush()
|
|
|
|
self.closed = True
|
|
|
|
|
|
|
|
def addentries(repo, queue, transaction):
|
2014-01-24 07:22:49 +04:00
|
|
|
"""Reads new rev entries from a queue and writes them to a buffered
|
|
|
|
revlog. At the end it flushes all the new data to disk.
|
|
|
|
"""
|
2013-10-18 03:46:12 +04:00
|
|
|
opener = repo.sopener
|
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
revlogs = {}
|
|
|
|
def writeentry(revdata):
|
2014-01-24 07:22:49 +04:00
|
|
|
# Instantiates pseudo-revlogs for us to write data directly to
|
2014-01-02 23:06:45 +04:00
|
|
|
path, chunk, chunkcount, link, entry, data0, data1 = revdata
|
2013-11-22 00:32:05 +04:00
|
|
|
revlog = revlogs.get(path)
|
|
|
|
if not revlog:
|
|
|
|
revlog = EntryRevlog(opener, path)
|
|
|
|
revlogs[path] = revlog
|
|
|
|
|
2014-01-24 07:22:49 +04:00
|
|
|
# Replace the existing openers with buffered ones so we can
|
|
|
|
# perform the flush to disk all at once at the end.
|
2013-11-22 00:32:05 +04:00
|
|
|
if not hasattr(revlog, 'ifh') or revlog.ifh.closed:
|
|
|
|
dfh = None
|
|
|
|
if not revlog._inline:
|
|
|
|
dfh = bufferedopener(opener, revlog.datafile, "a")
|
|
|
|
ifh = bufferedopener(opener, revlog.indexfile, "a+")
|
|
|
|
revlog.ifh = ifh
|
|
|
|
revlog.dfh = dfh
|
|
|
|
|
|
|
|
revlog.addentry(transaction, revlog.ifh, revlog.dfh, entry,
|
|
|
|
data0, data1)
|
|
|
|
|
|
|
|
leftover = None
|
|
|
|
exit = False
|
2013-12-11 07:00:55 +04:00
|
|
|
|
|
|
|
# Read one linkrev at a time from the queue
|
2013-11-22 00:32:05 +04:00
|
|
|
while not exit:
|
|
|
|
currentlinkrev = -1
|
|
|
|
|
|
|
|
revisions = []
|
|
|
|
if leftover:
|
|
|
|
revisions.append(leftover)
|
|
|
|
leftover = None
|
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
# Read everything from the current linkrev
|
2013-10-31 21:37:49 +04:00
|
|
|
while True:
|
2013-11-22 00:32:05 +04:00
|
|
|
revdata = queue.get()
|
|
|
|
if not revdata:
|
|
|
|
exit = True
|
2013-10-31 21:37:49 +04:00
|
|
|
break
|
|
|
|
|
2014-01-02 23:06:45 +04:00
|
|
|
linkrev = revdata[3]
|
2013-11-22 00:32:05 +04:00
|
|
|
if currentlinkrev == -1:
|
|
|
|
currentlinkrev = linkrev
|
|
|
|
if linkrev == currentlinkrev:
|
|
|
|
revisions.append(revdata)
|
|
|
|
elif linkrev < currentlinkrev:
|
2014-01-24 07:22:49 +04:00
|
|
|
raise CorruptionException("SQL data is not in linkrev order")
|
2013-11-22 00:32:05 +04:00
|
|
|
else:
|
|
|
|
leftover = revdata
|
|
|
|
currentlinkrev = linkrev
|
|
|
|
break
|
2013-10-29 23:55:14 +04:00
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
if not revisions:
|
|
|
|
continue
|
2013-10-31 21:37:49 +04:00
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
for revdata in revisions:
|
|
|
|
writeentry(revdata)
|
2013-10-29 23:55:14 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
# Flush filelogs, then manifest, then changelog
|
|
|
|
changelog = revlogs.pop("00changelog.i", None)
|
|
|
|
manifest = revlogs.pop("00manifest.i", None)
|
2013-10-31 21:37:49 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
def flushrevlog(revlog):
|
2013-11-22 00:32:05 +04:00
|
|
|
if not revlog.ifh.closed:
|
|
|
|
revlog.ifh.flush()
|
|
|
|
if revlog.dfh and not revlog.dfh.closed:
|
|
|
|
revlog.dfh.flush()
|
2013-12-11 07:00:55 +04:00
|
|
|
|
|
|
|
for filelog in revlogs.itervalues():
|
|
|
|
flushrevlog(filelog)
|
|
|
|
|
|
|
|
if manifest:
|
|
|
|
flushrevlog(manifest)
|
|
|
|
if changelog:
|
|
|
|
flushrevlog(changelog)
|
2013-10-18 03:46:12 +04:00
|
|
|
|
|
|
|
class EntryRevlog(revlog.revlog):
|
2014-01-24 07:22:49 +04:00
|
|
|
"""Pseudo-revlog implementation that allows applying data directly to
|
|
|
|
the end of the revlog.
|
|
|
|
"""
|
2013-10-18 03:46:12 +04:00
|
|
|
def addentry(self, transaction, ifh, dfh, entry, data0, data1):
|
2013-10-18 04:15:41 +04:00
|
|
|
curr = len(self)
|
2013-12-11 07:00:55 +04:00
|
|
|
offset = self.end(curr - 1)
|
2013-10-18 04:15:41 +04:00
|
|
|
|
2013-10-18 03:46:12 +04:00
|
|
|
e = struct.unpack(revlog.indexformatng, entry)
|
2013-10-29 05:57:18 +04:00
|
|
|
offsettype, datalen, textlen, base, link, p1r, p2r, node = e
|
2014-01-24 07:22:49 +04:00
|
|
|
|
|
|
|
# The first rev has special metadata encoded in it that should be
|
|
|
|
# stripped before being added to the index.
|
2013-10-18 04:15:41 +04:00
|
|
|
if curr == 0:
|
|
|
|
elist = list(e)
|
2013-10-29 05:57:18 +04:00
|
|
|
type = revlog.gettype(offsettype)
|
|
|
|
offsettype = revlog.offset_type(0, type)
|
|
|
|
elist[0] = offsettype
|
2013-10-18 04:15:41 +04:00
|
|
|
e = tuple(elist)
|
|
|
|
|
2014-01-24 07:22:49 +04:00
|
|
|
# Verify that the rev's parents and base appear earlier in the revlog
|
2013-10-29 05:57:18 +04:00
|
|
|
if p1r >= curr or p2r >= curr:
|
2014-01-24 07:22:49 +04:00
|
|
|
raise CorruptionException("parent revision is not in revlog: %s" %
|
|
|
|
self.indexfile)
|
2013-10-29 05:57:18 +04:00
|
|
|
if base > curr:
|
2014-01-24 07:22:49 +04:00
|
|
|
raise CorruptionException("base revision is not in revlog: %s" %
|
|
|
|
self.indexfile)
|
2013-10-29 05:57:18 +04:00
|
|
|
|
|
|
|
expectedoffset = revlog.getoffset(offsettype)
|
2013-12-11 07:00:55 +04:00
|
|
|
if expectedoffset != 0 and expectedoffset != offset:
|
2013-10-29 05:57:18 +04:00
|
|
|
raise CorruptionException("revision offset doesn't match prior length " +
|
|
|
|
"(%s offset vs %s length): %s" %
|
2013-12-11 07:00:55 +04:00
|
|
|
(expectedoffset, offset, self.indexfile))
|
2013-10-29 05:57:18 +04:00
|
|
|
|
|
|
|
if node not in self.nodemap:
|
2013-10-18 03:46:12 +04:00
|
|
|
self.index.insert(-1, e)
|
2013-10-29 05:57:18 +04:00
|
|
|
self.nodemap[node] = len(self) - 1
|
2013-10-18 03:46:12 +04:00
|
|
|
|
|
|
|
if not self._inline:
|
2013-10-18 03:47:34 +04:00
|
|
|
transaction.add(self.datafile, offset)
|
|
|
|
transaction.add(self.indexfile, curr * len(entry))
|
2013-10-18 03:46:12 +04:00
|
|
|
if data0:
|
|
|
|
dfh.write(data0)
|
|
|
|
dfh.write(data1)
|
|
|
|
ifh.write(entry)
|
|
|
|
else:
|
2013-10-18 03:47:34 +04:00
|
|
|
offset += curr * self._io.size
|
|
|
|
transaction.add(self.indexfile, offset, curr)
|
2013-10-18 03:46:12 +04:00
|
|
|
ifh.write(entry)
|
|
|
|
ifh.write(data0)
|
|
|
|
ifh.write(data1)
|
2013-10-18 04:15:41 +04:00
|
|
|
self.checkinlinesize(transaction, ifh)
|
2013-10-18 03:46:12 +04:00
|
|
|
|
2013-10-30 02:55:43 +04:00
|
|
|
def addgroup(orig, self, bundle, linkmapper, transaction):
|
2014-01-24 07:22:49 +04:00
|
|
|
"""Copy paste of revlog.addgroup, but we ensure that the revisions are
|
|
|
|
added in linkrev order.
|
2013-10-30 02:55:43 +04:00
|
|
|
"""
|
2014-02-19 03:59:46 +04:00
|
|
|
if not util.safehasattr(transaction, "repo"):
|
|
|
|
return orig(self, bundle, linkmapper, transaction)
|
|
|
|
|
2013-10-30 02:55:43 +04:00
|
|
|
# track the base of the current delta log
|
|
|
|
content = []
|
|
|
|
node = None
|
|
|
|
|
|
|
|
r = len(self)
|
|
|
|
end = 0
|
|
|
|
if r:
|
|
|
|
end = self.end(r - 1)
|
|
|
|
ifh = self.opener(self.indexfile, "a+")
|
|
|
|
isize = r * self._io.size
|
|
|
|
if self._inline:
|
|
|
|
transaction.add(self.indexfile, end + isize, r)
|
|
|
|
dfh = None
|
|
|
|
else:
|
|
|
|
transaction.add(self.indexfile, isize, r)
|
|
|
|
transaction.add(self.datafile, end)
|
|
|
|
dfh = self.opener(self.datafile, "a")
|
|
|
|
|
|
|
|
try:
|
|
|
|
# loop through our set of deltas
|
|
|
|
chunkdatas = []
|
|
|
|
chunkmap = {}
|
|
|
|
|
|
|
|
lastlinkrev = -1
|
|
|
|
reorder = False
|
|
|
|
|
2014-01-28 06:50:24 +04:00
|
|
|
# Read all of the data from the stream
|
2013-10-30 02:55:43 +04:00
|
|
|
chain = None
|
|
|
|
while True:
|
|
|
|
chunkdata = bundle.deltachunk(chain)
|
|
|
|
if not chunkdata:
|
|
|
|
break
|
|
|
|
|
|
|
|
node = chunkdata['node']
|
|
|
|
cs = chunkdata['cs']
|
|
|
|
link = linkmapper(cs)
|
|
|
|
if link < lastlinkrev:
|
|
|
|
reorder = True
|
|
|
|
lastlinkrev = link
|
|
|
|
chunkdatas.append((link, chunkdata))
|
|
|
|
chunkmap[node] = chunkdata
|
|
|
|
chain = node
|
|
|
|
|
2014-01-28 06:50:24 +04:00
|
|
|
# If we noticed a incoming rev was not in linkrev order
|
|
|
|
# we reorder all the revs appropriately.
|
2013-10-30 02:55:43 +04:00
|
|
|
if reorder:
|
|
|
|
chunkdatas = sorted(chunkdatas)
|
|
|
|
|
|
|
|
fulltexts = {}
|
|
|
|
def getfulltext(node):
|
|
|
|
if node in fulltexts:
|
|
|
|
return fulltexts[node]
|
|
|
|
if node in self.nodemap:
|
|
|
|
return self.revision(node)
|
|
|
|
|
|
|
|
chunkdata = chunkmap[node]
|
|
|
|
deltabase = chunkdata['deltabase']
|
|
|
|
delta = chunkdata['delta']
|
|
|
|
|
|
|
|
deltachain = []
|
|
|
|
currentbase = deltabase
|
|
|
|
while True:
|
|
|
|
if currentbase in fulltexts:
|
|
|
|
deltachain.append(fulltexts[currentbase])
|
|
|
|
break
|
|
|
|
elif currentbase in self.nodemap:
|
|
|
|
deltachain.append(self.revision(currentbase))
|
|
|
|
break
|
|
|
|
elif currentbase == nullid:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
deltachunk = chunkmap[currentbase]
|
|
|
|
currentbase = deltachunk['deltabase']
|
|
|
|
deltachain.append(deltachunk['delta'])
|
|
|
|
|
|
|
|
prevtext = deltachain.pop()
|
|
|
|
while deltachain:
|
|
|
|
prevtext = mdiff.patch(prevtext, deltachain.pop())
|
|
|
|
|
|
|
|
fulltext = mdiff.patch(prevtext, delta)
|
|
|
|
fulltexts[node] = fulltext
|
|
|
|
return fulltext
|
|
|
|
|
|
|
|
reorders = 0
|
|
|
|
visited = set()
|
|
|
|
prevnode = self.node(len(self) - 1)
|
|
|
|
for link, chunkdata in chunkdatas:
|
|
|
|
node = chunkdata['node']
|
|
|
|
deltabase = chunkdata['deltabase']
|
|
|
|
if (not deltabase in self.nodemap and
|
|
|
|
not deltabase in visited):
|
|
|
|
fulltext = getfulltext(node)
|
|
|
|
ptext = getfulltext(prevnode)
|
|
|
|
delta = mdiff.textdiff(ptext, fulltext)
|
|
|
|
|
|
|
|
chunkdata['delta'] = delta
|
|
|
|
chunkdata['deltabase'] = prevnode
|
|
|
|
reorders += 1
|
|
|
|
|
|
|
|
prevnode = node
|
|
|
|
visited.add(node)
|
|
|
|
|
2014-01-28 06:50:24 +04:00
|
|
|
# Apply the reordered revs to the revlog
|
2013-10-30 02:55:43 +04:00
|
|
|
for link, chunkdata in chunkdatas:
|
|
|
|
node = chunkdata['node']
|
|
|
|
p1 = chunkdata['p1']
|
|
|
|
p2 = chunkdata['p2']
|
|
|
|
cs = chunkdata['cs']
|
|
|
|
deltabase = chunkdata['deltabase']
|
|
|
|
delta = chunkdata['delta']
|
|
|
|
|
|
|
|
content.append(node)
|
|
|
|
|
|
|
|
link = linkmapper(cs)
|
|
|
|
if node in self.nodemap:
|
|
|
|
# this can happen if two branches make the same change
|
|
|
|
continue
|
|
|
|
|
|
|
|
for p in (p1, p2):
|
|
|
|
if p not in self.nodemap:
|
|
|
|
raise LookupError(p, self.indexfile,
|
|
|
|
_('unknown parent'))
|
|
|
|
|
|
|
|
if deltabase not in self.nodemap:
|
|
|
|
raise LookupError(deltabase, self.indexfile,
|
|
|
|
_('unknown delta base'))
|
|
|
|
|
|
|
|
baserev = self.rev(deltabase)
|
|
|
|
self._addrevision(node, None, transaction, link,
|
|
|
|
p1, p2, (baserev, delta), ifh, dfh)
|
|
|
|
if not dfh and not self._inline:
|
|
|
|
# addrevision switched from inline to conventional
|
|
|
|
# reopen the index
|
|
|
|
ifh.close()
|
|
|
|
dfh = self.opener(self.datafile, "a")
|
|
|
|
ifh = self.opener(self.indexfile, "a")
|
|
|
|
finally:
|
|
|
|
if dfh:
|
|
|
|
dfh.close()
|
|
|
|
ifh.close()
|
|
|
|
|
|
|
|
return content
|
|
|
|
|
2014-02-19 05:16:45 +04:00
|
|
|
def bookmarkcommand(orig, ui, repo, *names, **opts):
|
|
|
|
if not repo.ui.configbool("hgsql", "enabled"):
|
|
|
|
return orig(ui, repo, *names, **opts)
|
|
|
|
|
|
|
|
write = (opts.get('delete') or opts.get('rename')
|
|
|
|
or opts.get('inactive') or names)
|
|
|
|
|
|
|
|
def _bookmarkcommand():
|
|
|
|
return orig(ui, repo, *names, **opts)
|
|
|
|
|
|
|
|
if write:
|
2014-03-12 23:35:04 +04:00
|
|
|
return executewithsql(repo, _bookmarkcommand, True)
|
2014-02-19 05:16:45 +04:00
|
|
|
else:
|
|
|
|
return _bookmarkcommand()
|
|
|
|
|
2014-10-15 04:07:18 +04:00
|
|
|
def bookmarkwrite(orig, self, fp):
|
2013-11-22 00:32:05 +04:00
|
|
|
repo = self._repo
|
2014-02-19 05:16:45 +04:00
|
|
|
if not repo.ui.configbool("hgsql", "enabled") or repo.disablesync:
|
2014-10-15 04:07:18 +04:00
|
|
|
return orig(self, fp)
|
2013-10-29 03:09:47 +04:00
|
|
|
|
2014-02-19 05:16:45 +04:00
|
|
|
if not repo.sqlconn:
|
|
|
|
raise util.Abort("attempted bookmark write without sql connection")
|
2014-03-12 23:52:27 +04:00
|
|
|
elif not repo.hassqllock(writelock):
|
2014-03-12 23:35:04 +04:00
|
|
|
raise util.Abort("attempted bookmark write without write lock")
|
2014-02-19 05:16:45 +04:00
|
|
|
|
|
|
|
try:
|
|
|
|
cursor = repo.sqlcursor
|
|
|
|
cursor.execute("""DELETE FROM revision_references WHERE repo = %s AND
|
|
|
|
namespace = 'bookmarks'""", (repo.sqlreponame))
|
|
|
|
|
|
|
|
for k, v in self.iteritems():
|
|
|
|
cursor.execute("""INSERT INTO revision_references(repo, namespace, name, value)
|
|
|
|
VALUES(%s, 'bookmarks', %s, %s)""",
|
|
|
|
(repo.sqlreponame, k, hex(v)))
|
|
|
|
repo.sqlconn.commit()
|
2014-10-15 04:07:18 +04:00
|
|
|
return orig(self, fp)
|
2014-02-19 05:16:45 +04:00
|
|
|
except:
|
|
|
|
repo.sqlconn.rollback()
|
|
|
|
raise
|
2014-01-29 08:46:23 +04:00
|
|
|
|
|
|
|
def pushkey(orig, repo, proto, namespace, key, old, new):
|
2014-02-19 03:59:46 +04:00
|
|
|
if repo.ui.configbool("hgsql", "enabled"):
|
|
|
|
def commitpushkey():
|
|
|
|
return orig(repo, proto, namespace, key, old, new)
|
2014-01-29 08:46:23 +04:00
|
|
|
|
2014-03-12 23:35:04 +04:00
|
|
|
return executewithsql(repo, commitpushkey, namespace == 'bookmarks')
|
2014-02-19 03:59:46 +04:00
|
|
|
else:
|
|
|
|
return orig(repo, proto, namespace, key, old, new)
|
2013-10-29 23:55:14 +04:00
|
|
|
|
|
|
|
# recover must be a norepo command because loading the repo fails
|
2014-07-08 03:10:26 +04:00
|
|
|
commands.norepo += " sqlrecover sqlstrip"
|
2013-10-29 23:55:14 +04:00
|
|
|
|
|
|
|
@command('^sqlrecover', [
|
|
|
|
('f', 'force', '', _('strips as far back as necessary'), ''),
|
2014-01-24 07:22:49 +04:00
|
|
|
('', 'no-backup', '', _('does not produce backup bundles for strips'), ''),
|
2013-10-29 23:55:14 +04:00
|
|
|
], _('hg sqlrecover'))
|
|
|
|
def sqlrecover(ui, *args, **opts):
|
|
|
|
"""
|
|
|
|
Strips commits from the local repo until it is back in sync with the SQL
|
|
|
|
server.
|
|
|
|
"""
|
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
global disableinitialsync
|
|
|
|
disableinitialsync = True
|
2013-10-29 23:55:14 +04:00
|
|
|
repo = hg.repository(ui, ui.environ['PWD'])
|
2013-11-22 00:32:05 +04:00
|
|
|
repo.disablesync = True
|
|
|
|
|
|
|
|
if repo.recover():
|
|
|
|
ui.status("recovered from incomplete transaction")
|
2013-10-29 23:55:14 +04:00
|
|
|
|
|
|
|
def iscorrupt():
|
2013-11-22 00:32:05 +04:00
|
|
|
repo.sqlconnect()
|
2013-10-29 23:55:14 +04:00
|
|
|
try:
|
2013-11-22 00:32:05 +04:00
|
|
|
repo.syncdb()
|
2013-12-11 07:00:55 +04:00
|
|
|
except:
|
2013-10-29 23:55:14 +04:00
|
|
|
return True
|
|
|
|
finally:
|
2013-11-22 00:32:05 +04:00
|
|
|
repo.sqlclose()
|
2013-10-29 23:55:14 +04:00
|
|
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
reposize = len(repo)
|
|
|
|
|
|
|
|
stripsize = 10
|
2014-01-24 07:22:49 +04:00
|
|
|
while iscorrupt() and len(repo) > 0:
|
|
|
|
if not opts.get('force') and reposize > len(repo) + 10000:
|
|
|
|
ui.warn("unable to fix repo after stripping 10000 commits " +
|
|
|
|
"(use -f to strip more)")
|
|
|
|
|
2013-10-29 23:55:14 +04:00
|
|
|
striprev = max(0, len(repo) - stripsize)
|
|
|
|
nodelist = [repo[striprev].node()]
|
2014-01-24 07:22:49 +04:00
|
|
|
ui.status("stripping back to %s commits" % (striprev))
|
|
|
|
|
|
|
|
backup = "none" if opts.get("no-backup") else "all"
|
|
|
|
repair.strip(ui, repo, nodelist, backup=backup, topic="sqlrecover")
|
|
|
|
|
|
|
|
stripsize = min(stripsize * 5, 10000)
|
2013-10-29 23:55:14 +04:00
|
|
|
|
|
|
|
if len(repo) == 0:
|
|
|
|
ui.warn(_("unable to fix repo corruption\n"))
|
|
|
|
elif len(repo) == reposize:
|
|
|
|
ui.status(_("local repo was not corrupt - no action taken\n"))
|
|
|
|
else:
|
|
|
|
ui.status(_("local repo now matches SQL\n"))
|
2014-07-08 03:10:26 +04:00
|
|
|
|
|
|
|
@command('^sqlstrip', [
|
|
|
|
('', 'i-know-what-i-am-doing', None, _('only run sqlstrip if you know ' +
|
|
|
|
'exactly what you\'re doing')),
|
|
|
|
], _('hg sqlstrip [OPTIONS] REV'))
|
|
|
|
def sqlstrip(ui, rev, *args, **opts):
|
|
|
|
"""strips all revisions greater than or equal to the given revision from the sql database
|
|
|
|
|
|
|
|
Deletes all revisions with linkrev >= the given revision from the local
|
|
|
|
repo and from the sql database. This is permanent and cannot be undone.
|
|
|
|
Once the revisions are deleted from the database, you will need to run
|
|
|
|
this command on each master server before proceeding to write new revisions.
|
|
|
|
"""
|
|
|
|
|
|
|
|
if not opts.get('i_know_what_i_am_doing'):
|
|
|
|
raise util.Abort("You must pass --i-know-what-i-am-doing to run this " +
|
|
|
|
"command. If you have multiple servers using the database, this " +
|
|
|
|
"command will break your servers until you run it on each one. " +
|
|
|
|
"Only the Mercurial server admins should ever run this.")
|
|
|
|
|
|
|
|
global disableinitialsync
|
|
|
|
disableinitialsync = True
|
|
|
|
repo = hg.repository(ui, ui.environ['PWD'])
|
|
|
|
repo.disablesync = True
|
|
|
|
|
2014-09-26 05:15:17 +04:00
|
|
|
try:
|
|
|
|
rev = int(rev)
|
|
|
|
except ValueError:
|
|
|
|
raise util.Abort("specified rev must be an integer: '%s'" % rev)
|
|
|
|
|
2014-07-08 03:10:26 +04:00
|
|
|
lock = repo.lock()
|
|
|
|
try:
|
|
|
|
repo.sqlconnect()
|
|
|
|
try:
|
|
|
|
repo.sqllock(writelock)
|
|
|
|
|
|
|
|
if rev not in repo:
|
|
|
|
raise util.Abort("revision %s is not in the repo" % rev)
|
|
|
|
|
|
|
|
reponame = repo.sqlreponame
|
|
|
|
cursor = repo.sqlcursor
|
|
|
|
changelog = repo.changelog
|
|
|
|
|
|
|
|
revs = repo.revs('%s:' % rev)
|
|
|
|
# strip locally
|
|
|
|
ui.status("stripping locally\n")
|
|
|
|
repair.strip(ui, repo, [changelog.node(r) for r in revs], "all")
|
|
|
|
|
|
|
|
ui.status("stripping from the database\n")
|
|
|
|
ui.status("deleting old references\n")
|
|
|
|
cursor.execute("""DELETE FROM revision_references WHERE repo = %s""", (reponame,))
|
|
|
|
|
|
|
|
ui.status("adding new head references\n")
|
|
|
|
for head in repo.heads():
|
|
|
|
cursor.execute("""INSERT INTO revision_references(repo, namespace, value)
|
|
|
|
VALUES(%s, 'heads', %s)""",
|
|
|
|
(reponame, hex(head)))
|
|
|
|
|
|
|
|
ui.status("adding new tip reference\n")
|
|
|
|
cursor.execute("""INSERT INTO revision_references(repo, namespace, name, value)
|
|
|
|
VALUES(%s, 'tip', 'tip', %s)""",
|
|
|
|
(reponame, len(repo) - 1))
|
|
|
|
|
|
|
|
ui.status("adding new bookmark references\n")
|
|
|
|
for k, v in repo._bookmarks.iteritems():
|
|
|
|
cursor.execute("""INSERT INTO revision_references(repo, namespace, name, value)
|
|
|
|
VALUES(%s, 'bookmarks', %s, %s)""",
|
|
|
|
(reponame, k, hex(v)))
|
|
|
|
|
|
|
|
ui.status("deleting revision data\n")
|
|
|
|
cursor.execute("""DELETE FROM revisions WHERE repo = %s and linkrev > %s""",
|
2014-09-26 05:15:17 +04:00
|
|
|
(reponame, rev))
|
2014-07-08 03:10:26 +04:00
|
|
|
|
|
|
|
repo.sqlconn.commit()
|
|
|
|
finally:
|
|
|
|
repo.sqlunlock(writelock)
|
|
|
|
repo.sqlclose()
|
|
|
|
finally:
|
2014-08-05 06:38:52 +04:00
|
|
|
lock.release()
|