2014-01-02 23:06:45 +04:00
|
|
|
# hgsql.py
|
2013-10-18 03:46:12 +04:00
|
|
|
#
|
|
|
|
# Copyright 2013 Facebook, Inc.
|
|
|
|
#
|
|
|
|
# This software may be used and distributed according to the terms of the
|
|
|
|
# GNU General Public License version 2 or any later version.
|
|
|
|
|
2014-01-02 23:06:45 +04:00
|
|
|
"""
|
|
|
|
CREATE TABLE revisions(
|
2014-01-22 04:05:27 +04:00
|
|
|
repo CHAR(32) BINARY NOT NULL,
|
|
|
|
path VARCHAR(256) BINARY NOT NULL,
|
2014-01-02 23:06:45 +04:00
|
|
|
chunk INT UNSIGNED NOT NULL,
|
|
|
|
chunkcount INT UNSIGNED NOT NULL,
|
|
|
|
linkrev INT UNSIGNED NOT NULL,
|
2014-01-22 04:05:27 +04:00
|
|
|
rev INT UNSIGNED NOT NULL,
|
|
|
|
node CHAR(40) BINARY NOT NULL,
|
2014-01-02 23:06:45 +04:00
|
|
|
entry BINARY(64) NOT NULL,
|
|
|
|
data0 CHAR(1) NOT NULL,
|
|
|
|
data1 LONGBLOB NOT NULL,
|
|
|
|
createdtime DATETIME NOT NULL,
|
2014-01-22 04:05:27 +04:00
|
|
|
INDEX linkrevs (repo, linkrev),
|
|
|
|
PRIMARY KEY (repo, path, rev, chunk)
|
2014-01-02 23:06:45 +04:00
|
|
|
);
|
|
|
|
|
|
|
|
CREATE TABLE pushkeys(
|
|
|
|
autoid INT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY,
|
2014-01-22 04:05:27 +04:00
|
|
|
repo CHAR(32) BINARY NOT NULL,
|
|
|
|
namespace CHAR(32) BINARY NOT NULL,
|
|
|
|
name VARCHAR(256) BINARY,
|
|
|
|
value char(40) BINARY NOT NULL,
|
2014-01-02 23:06:45 +04:00
|
|
|
UNIQUE KEY bookmarkindex (repo, namespace, name)
|
|
|
|
);
|
|
|
|
"""
|
2013-10-29 03:09:47 +04:00
|
|
|
|
2013-10-18 03:46:12 +04:00
|
|
|
from mercurial.node import bin, hex, nullid, nullrev
|
|
|
|
from mercurial.i18n import _
|
2013-10-29 23:55:14 +04:00
|
|
|
from mercurial.extensions import wrapfunction, wrapcommand
|
2013-10-18 03:47:34 +04:00
|
|
|
from mercurial import changelog, error, cmdutil, revlog, localrepo, transaction
|
2013-10-31 21:37:49 +04:00
|
|
|
from mercurial import wireproto, bookmarks, repair, commands, hg, mdiff, phases
|
2013-12-11 07:00:55 +04:00
|
|
|
import MySQLdb, struct, time, Queue, threading, _mysql_exceptions
|
2013-10-18 03:46:12 +04:00
|
|
|
from MySQLdb import cursors
|
|
|
|
|
|
|
|
cmdtable = {}
|
|
|
|
command = cmdutil.command(cmdtable)
|
|
|
|
testedwith = 'internal'
|
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
bookmarklock = 'bookmark_lock'
|
|
|
|
commitlock = 'commit_lock'
|
2013-10-31 00:41:33 +04:00
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
maxrecordsize = 1024 * 1024
|
2013-12-11 07:00:55 +04:00
|
|
|
disableinitialsync = False
|
2013-10-18 03:46:12 +04:00
|
|
|
|
2013-10-29 05:57:18 +04:00
|
|
|
class CorruptionException(Exception):
|
|
|
|
pass
|
|
|
|
|
2013-10-18 03:46:12 +04:00
|
|
|
def uisetup(ui):
|
2013-10-29 23:55:14 +04:00
|
|
|
wrapcommand(commands.table, 'pull', pull)
|
2013-12-11 07:00:55 +04:00
|
|
|
wrapfunction(wireproto, 'unbundle', unbundle)
|
|
|
|
wireproto.commands['unbundle'] = (wireproto.unbundle, 'heads')
|
2013-10-29 23:55:14 +04:00
|
|
|
|
2014-01-04 00:01:50 +04:00
|
|
|
def writeentry(orig, self, transaction, ifh, dfh, entry, data, link, offset):
|
2014-01-22 04:05:27 +04:00
|
|
|
"""records each revlog write to the repo's pendingrev list"""
|
|
|
|
e = struct.unpack(revlog.indexformatng, entry)
|
|
|
|
node = hex(e[7])
|
|
|
|
data0 = data[0] or ''
|
|
|
|
transaction.repo.pendingrevs.append((self.indexfile, link,
|
|
|
|
len(self) - 1, node, entry, data0, data[1]))
|
2014-01-04 00:01:50 +04:00
|
|
|
return orig(self, transaction, ifh, dfh, entry, data, link, offset)
|
|
|
|
wrapfunction(revlog.revlog, '_writeentry', writeentry)
|
|
|
|
|
2013-10-30 02:55:43 +04:00
|
|
|
wrapfunction(revlog.revlog, 'addgroup', addgroup)
|
2013-10-29 03:09:47 +04:00
|
|
|
wrapfunction(bookmarks.bmstore, 'write', bookmarkwrite)
|
|
|
|
|
2013-10-18 03:46:12 +04:00
|
|
|
def reposetup(ui, repo):
|
2013-10-18 03:47:34 +04:00
|
|
|
if repo.ui.configbool("hgsql", "enabled"):
|
2013-11-22 00:32:05 +04:00
|
|
|
wraprepo(repo)
|
2013-10-29 03:09:47 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
if not disableinitialsync:
|
2014-01-03 00:19:50 +04:00
|
|
|
# Use a noop to force a sync
|
|
|
|
def noop():
|
|
|
|
pass
|
|
|
|
executewithsql(repo, noop)
|
2013-10-29 03:09:47 +04:00
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
# Handle incoming commits
|
2014-01-03 00:19:50 +04:00
|
|
|
def unbundle(orig, *args, **kwargs):
|
|
|
|
repo = args[0]
|
|
|
|
return executewithsql(repo, orig, commitlock, *args, **kwargs)
|
|
|
|
|
|
|
|
def pull(orig, *args, **kwargs):
|
|
|
|
repo = args[1]
|
|
|
|
return executewithsql(repo, orig, commitlock, *args, **kwargs)
|
2013-10-29 05:57:18 +04:00
|
|
|
|
2014-01-03 00:19:50 +04:00
|
|
|
def executewithsql(repo, action, lock=None, *args, **kwargs):
|
2014-01-22 04:06:25 +04:00
|
|
|
connected = False
|
|
|
|
if not repo.sqlconn:
|
|
|
|
repo.sqlconnect()
|
|
|
|
connected = True
|
2014-01-03 00:19:50 +04:00
|
|
|
if lock:
|
|
|
|
repo.sqllock(lock)
|
|
|
|
|
|
|
|
result = None
|
|
|
|
success = False
|
2013-10-18 03:46:12 +04:00
|
|
|
try:
|
2014-01-22 04:06:25 +04:00
|
|
|
if connected:
|
|
|
|
repo.syncdb()
|
2014-01-03 00:19:50 +04:00
|
|
|
result = action(*args, **kwargs)
|
|
|
|
success = True
|
2013-11-22 00:32:05 +04:00
|
|
|
finally:
|
2013-11-27 23:16:45 +04:00
|
|
|
try:
|
2014-01-03 00:19:50 +04:00
|
|
|
if lock:
|
|
|
|
repo.sqlunlock(lock)
|
2014-01-22 04:06:25 +04:00
|
|
|
if connected:
|
|
|
|
repo.sqlclose()
|
2013-11-27 23:16:45 +04:00
|
|
|
except _mysql_exceptions.ProgrammingError, ex:
|
2014-01-03 00:19:50 +04:00
|
|
|
if success:
|
|
|
|
raise
|
|
|
|
# if the action caused an exception, hide sql cleanup exceptions,
|
|
|
|
# so the real exception is propagated up
|
2013-11-27 23:16:45 +04:00
|
|
|
pass
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2014-01-03 00:19:50 +04:00
|
|
|
return result
|
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
def wraprepo(repo):
|
|
|
|
class sqllocalrepo(repo.__class__):
|
|
|
|
def sqlconnect(self):
|
|
|
|
if self.sqlconn:
|
|
|
|
raise Exception("SQL connection already open")
|
|
|
|
if self.sqlcursor:
|
|
|
|
raise Exception("SQL cursor already open without connection")
|
2013-12-11 07:00:55 +04:00
|
|
|
self.sqlconn = MySQLdb.connect(**self.sqlargs)
|
2013-11-22 00:32:05 +04:00
|
|
|
self.sqlconn.autocommit(False)
|
|
|
|
self.sqlconn.query("SET SESSION wait_timeout=300;")
|
|
|
|
self.sqlcursor = self.sqlconn.cursor()
|
|
|
|
|
|
|
|
def sqlclose(self):
|
|
|
|
self.sqlcursor.close()
|
|
|
|
self.sqlconn.close()
|
|
|
|
self.sqlcursor = None
|
|
|
|
self.sqlconn = None
|
|
|
|
|
|
|
|
def sqllock(self, name, timeout=60):
|
2014-01-02 23:06:45 +04:00
|
|
|
name = self.sqlconn.escape_string("%s_%s" % (name, self.sqlreponame))
|
2013-11-22 00:32:05 +04:00
|
|
|
# cast to int to prevent passing bad sql data
|
|
|
|
timeout = int(timeout)
|
|
|
|
self.sqlconn.query("SELECT GET_LOCK('%s', %s)" % (name, timeout))
|
|
|
|
result = self.sqlconn.store_result().fetch_row()[0][0]
|
|
|
|
if result != 1:
|
|
|
|
raise Exception("unable to obtain %s lock" % name)
|
|
|
|
|
|
|
|
def sqlunlock(self, name):
|
2014-01-02 23:06:45 +04:00
|
|
|
name = self.sqlconn.escape_string("%s_%s" % (name, self.sqlreponame))
|
2013-11-22 00:32:05 +04:00
|
|
|
self.sqlconn.query("SELECT RELEASE_LOCK('%s')" % (name,))
|
|
|
|
self.sqlconn.store_result().fetch_row()
|
|
|
|
|
|
|
|
def transaction(self, *args, **kwargs):
|
|
|
|
tr = super(sqllocalrepo, self).transaction(*args, **kwargs)
|
2014-01-04 00:03:52 +04:00
|
|
|
if tr.count > 1:
|
|
|
|
return tr
|
2013-11-22 00:32:05 +04:00
|
|
|
|
|
|
|
def transactionclose(orig):
|
2014-01-03 00:19:50 +04:00
|
|
|
if tr.count == 1:
|
|
|
|
self.committodb()
|
2013-11-22 00:32:05 +04:00
|
|
|
del self.pendingrevs[:]
|
2014-01-03 00:19:50 +04:00
|
|
|
return orig()
|
|
|
|
|
|
|
|
def transactionabort(orig):
|
|
|
|
del self.pendingrevs[:]
|
|
|
|
return orig()
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2014-01-03 00:19:50 +04:00
|
|
|
wrapfunction(tr, "_abort", transactionabort)
|
2013-11-22 00:32:05 +04:00
|
|
|
wrapfunction(tr, "close", transactionclose)
|
|
|
|
tr.repo = self
|
|
|
|
return tr
|
|
|
|
|
|
|
|
def needsync(self):
|
2013-12-11 07:00:55 +04:00
|
|
|
"""Returns True if the local repo is not in sync with the database.
|
|
|
|
If it returns False, the heads and bookmarks match the database.
|
|
|
|
"""
|
2014-01-02 23:06:45 +04:00
|
|
|
self.sqlcursor.execute("""SELECT namespace, name, value
|
|
|
|
FROM pushkeys WHERE repo = %s""", (self.sqlreponame))
|
2013-11-22 00:32:05 +04:00
|
|
|
sqlheads = set()
|
|
|
|
sqlbookmarks = {}
|
2014-01-02 23:06:45 +04:00
|
|
|
for namespace, name, node in self.sqlcursor:
|
|
|
|
if namespace == "heads":
|
2013-11-22 00:32:05 +04:00
|
|
|
sqlheads.add(bin(node))
|
2014-01-02 23:06:45 +04:00
|
|
|
elif namespace == "bookmarks":
|
2013-11-22 00:32:05 +04:00
|
|
|
sqlbookmarks[name] = bin(node)
|
|
|
|
|
2014-01-02 23:06:45 +04:00
|
|
|
heads = set(self.heads())
|
2013-11-22 00:32:05 +04:00
|
|
|
bookmarks = self._bookmarks
|
|
|
|
|
2014-01-04 00:03:52 +04:00
|
|
|
outofsync = heads != sqlheads or bookmarks != sqlbookmarks
|
|
|
|
return outofsync, sqlheads, sqlbookmarks
|
2013-11-22 00:32:05 +04:00
|
|
|
|
|
|
|
def syncdb(self):
|
2014-01-04 00:03:52 +04:00
|
|
|
if not self.needsync()[0]:
|
2013-11-22 00:32:05 +04:00
|
|
|
return
|
|
|
|
|
|
|
|
ui = self.ui
|
|
|
|
ui.debug("syncing with mysql\n")
|
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
lock = self.lock()
|
2013-11-22 00:32:05 +04:00
|
|
|
try:
|
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
# someone else may have synced us while we were waiting
|
2014-01-04 00:03:52 +04:00
|
|
|
outofsync, sqlheads, sqlbookmarks = self.needsync()
|
|
|
|
if not outofsync:
|
2013-12-11 07:00:55 +04:00
|
|
|
return
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
transaction = self.transaction("syncdb")
|
2013-11-22 00:32:05 +04:00
|
|
|
|
|
|
|
try:
|
2013-12-11 07:00:55 +04:00
|
|
|
# Inspect the changelog now that we have the lock
|
|
|
|
fetchstart = len(self.changelog)
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
queue = Queue.Queue()
|
|
|
|
abort = threading.Event()
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
t = threading.Thread(target=self.fetchthread,
|
|
|
|
args=(queue, abort, fetchstart))
|
|
|
|
t.setDaemon(True)
|
|
|
|
try:
|
|
|
|
t.start()
|
|
|
|
addentries(self, queue, transaction)
|
|
|
|
finally:
|
|
|
|
abort.set()
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
phases.advanceboundary(self, phases.public, self.heads())
|
2013-11-22 00:32:05 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
transaction.close()
|
|
|
|
finally:
|
|
|
|
transaction.release()
|
|
|
|
|
|
|
|
# We circumvent the changelog and manifest when we add entries to
|
|
|
|
# the revlogs. So clear all the caches.
|
|
|
|
self.invalidate()
|
|
|
|
|
2014-01-04 00:03:52 +04:00
|
|
|
heads = set(self.heads())
|
|
|
|
heads.discard(nullid)
|
|
|
|
if heads != sqlheads:
|
|
|
|
raise CorruptionException("heads don't match after sync")
|
2013-12-11 07:00:55 +04:00
|
|
|
|
|
|
|
self.disablesync = True
|
|
|
|
try:
|
|
|
|
bm = self._bookmarks
|
|
|
|
bm.clear()
|
2014-01-02 23:06:45 +04:00
|
|
|
self.sqlcursor.execute("""SELECT name, value FROM pushkeys
|
|
|
|
WHERE namespace = 'bookmarks' AND repo = %s""",
|
|
|
|
(self.sqlreponame))
|
|
|
|
for name, node in self.sqlcursor:
|
2013-12-11 07:00:55 +04:00
|
|
|
node = bin(node)
|
|
|
|
if node in self:
|
|
|
|
bm[name] = node
|
|
|
|
bm.write()
|
|
|
|
finally:
|
|
|
|
self.disablesync = False
|
2014-01-04 00:03:52 +04:00
|
|
|
|
|
|
|
if bm != sqlbookmarks:
|
|
|
|
raise CorruptionException("bookmarks don't match after sync")
|
2013-11-22 00:32:05 +04:00
|
|
|
finally:
|
2013-12-11 07:00:55 +04:00
|
|
|
lock.release()
|
2013-11-22 00:32:05 +04:00
|
|
|
|
|
|
|
def fetchthread(self, queue, abort, fetchstart):
|
|
|
|
ui = self.ui
|
|
|
|
clrev = fetchstart
|
|
|
|
chunksize = 1000
|
|
|
|
while True:
|
|
|
|
if abort.isSet():
|
|
|
|
break
|
2013-10-31 21:37:49 +04:00
|
|
|
|
2014-01-02 23:06:45 +04:00
|
|
|
self.sqlcursor.execute("""SELECT path, chunk, chunkcount,
|
|
|
|
linkrev, entry, data0, data1 FROM revisions WHERE repo = %s
|
|
|
|
AND linkrev > %s AND linkrev < %s ORDER BY linkrev ASC""",
|
|
|
|
(self.sqlreponame, clrev - 1, clrev + chunksize))
|
2013-10-31 21:37:49 +04:00
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
# put split chunks back together
|
|
|
|
groupedrevdata = {}
|
|
|
|
for revdata in self.sqlcursor:
|
2014-01-02 23:06:45 +04:00
|
|
|
name = revdata[0]
|
|
|
|
chunk = revdata[1]
|
|
|
|
linkrev = revdata[3]
|
2013-11-22 00:32:05 +04:00
|
|
|
groupedrevdata.setdefault((name, linkrev), {})[chunk] = revdata
|
2013-10-29 03:09:47 +04:00
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
if not groupedrevdata:
|
|
|
|
break
|
2013-10-18 03:46:12 +04:00
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
fullrevisions = []
|
|
|
|
for chunks in groupedrevdata.itervalues():
|
2014-01-02 23:06:45 +04:00
|
|
|
chunkcount = chunks[0][2]
|
2013-11-22 00:32:05 +04:00
|
|
|
if chunkcount == 1:
|
|
|
|
fullrevisions.append(chunks[0])
|
|
|
|
elif chunkcount == len(chunks):
|
|
|
|
fullchunk = list(chunks[0])
|
|
|
|
data1 = ""
|
|
|
|
for i in range(0, chunkcount):
|
2014-01-02 23:06:45 +04:00
|
|
|
data1 += chunks[i][6]
|
2014-01-22 04:05:27 +04:00
|
|
|
fullchunk[6] = data1
|
2013-11-22 00:32:05 +04:00
|
|
|
fullrevisions.append(tuple(fullchunk))
|
|
|
|
else:
|
|
|
|
raise Exception("missing revision chunk - expected %s got %s" %
|
|
|
|
(chunkcount, len(chunks)))
|
2013-10-18 03:46:12 +04:00
|
|
|
|
2014-01-02 23:06:45 +04:00
|
|
|
fullrevisions = sorted(fullrevisions, key=lambda revdata: revdata[3])
|
2013-11-22 00:32:05 +04:00
|
|
|
for revdata in fullrevisions:
|
|
|
|
queue.put(revdata)
|
2013-10-31 21:37:49 +04:00
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
clrev += chunksize
|
|
|
|
if (clrev - fetchstart) % 5000 == 0:
|
|
|
|
ui.debug("Queued %s\n" % (clrev))
|
2013-10-31 21:37:49 +04:00
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
queue.put(False)
|
2013-10-31 21:37:49 +04:00
|
|
|
|
2014-01-03 00:19:50 +04:00
|
|
|
def committodb(self):
|
|
|
|
if self.sqlconn == None:
|
|
|
|
raise Exception("invalid repo change - only hg push and pull are allowed")
|
|
|
|
|
2014-01-04 00:03:52 +04:00
|
|
|
if not self.pendingrevs:
|
|
|
|
return
|
|
|
|
|
|
|
|
reponame = self.sqlreponame
|
|
|
|
cursor = self.sqlcursor
|
|
|
|
|
|
|
|
cursor.execute("""SELECT max(linkrev) FROM revisions WHERE repo = %s""",
|
|
|
|
reponame)
|
|
|
|
maxlinkrev = cursor.fetchall()[0][0]
|
|
|
|
minlinkrev = min(self.pendingrevs, key= lambda x: x[2])
|
|
|
|
if maxlinkrev != None and maxlinkrev != minlinkrev[2] - 1:
|
|
|
|
raise CorruptionException("attempting to write non-sequential " +
|
|
|
|
"linkrev %s, expected %s" % (minlinkrev[2], maxlinkrev + 1))
|
|
|
|
|
2014-01-03 00:19:50 +04:00
|
|
|
# Commit to db
|
|
|
|
try:
|
|
|
|
for revision in self.pendingrevs:
|
2014-01-22 04:05:27 +04:00
|
|
|
path, linkrev, rev, node, entry, data0, data1 = revision
|
2014-01-03 00:19:50 +04:00
|
|
|
|
|
|
|
start = 0
|
|
|
|
chunk = 0
|
|
|
|
datalen = len(data1)
|
|
|
|
chunkcount = datalen / maxrecordsize
|
|
|
|
if datalen % maxrecordsize != 0 or datalen == 0:
|
|
|
|
chunkcount += 1
|
|
|
|
while chunk == 0 or start < len(data1):
|
|
|
|
end = min(len(data1), start + maxrecordsize)
|
|
|
|
datachunk = data1[start:end]
|
|
|
|
cursor.execute("""INSERT INTO revisions(repo, path, chunk,
|
2014-01-22 04:05:27 +04:00
|
|
|
chunkcount, linkrev, rev, node, entry, data0, data1, createdtime)
|
|
|
|
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)""",
|
|
|
|
(reponame, path, chunk, chunkcount, linkrev, rev,
|
|
|
|
node, entry, data0, datachunk,
|
|
|
|
time.strftime('%Y-%m-%d %H:%M:%S')))
|
2014-01-03 00:19:50 +04:00
|
|
|
chunk += 1
|
|
|
|
start = end
|
|
|
|
|
|
|
|
cursor.execute("""DELETE FROM pushkeys WHERE repo = %s
|
|
|
|
AND namespace = 'heads'""", (reponame))
|
|
|
|
|
|
|
|
for head in self.heads():
|
|
|
|
cursor.execute("""INSERT INTO pushkeys(repo, namespace, value)
|
|
|
|
VALUES(%s, 'heads', %s)""",
|
|
|
|
(reponame, hex(head)))
|
|
|
|
|
|
|
|
self.sqlconn.commit()
|
|
|
|
except:
|
|
|
|
self.sqlconn.rollback()
|
|
|
|
raise
|
|
|
|
finally:
|
|
|
|
del self.pendingrevs[:]
|
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
ui = repo.ui
|
|
|
|
sqlargs = {}
|
|
|
|
sqlargs['host'] = ui.config("hgsql", "host")
|
|
|
|
sqlargs['db'] = ui.config("hgsql", "database")
|
|
|
|
sqlargs['user'] = ui.config("hgsql", "user")
|
2014-01-02 23:06:45 +04:00
|
|
|
sqlargs['port'] = ui.configint("hgsql", "port")
|
2013-12-11 07:00:55 +04:00
|
|
|
password = ui.config("hgsql", "password", "")
|
|
|
|
if password:
|
|
|
|
sqlargs['passwd'] = password
|
|
|
|
sqlargs['cursorclass'] = cursors.SSCursor
|
|
|
|
|
2014-01-02 23:06:45 +04:00
|
|
|
repo.sqlreponame = ui.config("hgsql", "reponame")
|
|
|
|
if not repo.sqlreponame:
|
|
|
|
raise Exception("missing hgsql.reponame")
|
2013-12-11 07:00:55 +04:00
|
|
|
repo.sqlargs = sqlargs
|
2013-11-22 00:32:05 +04:00
|
|
|
repo.sqlconn = None
|
|
|
|
repo.sqlcursor = None
|
|
|
|
repo.disablesync = False
|
|
|
|
repo.pendingrevs = []
|
|
|
|
repo.__class__ = sqllocalrepo
|
2013-10-31 21:37:49 +04:00
|
|
|
|
|
|
|
class bufferedopener(object):
|
|
|
|
def __init__(self, opener, path, mode):
|
|
|
|
self.opener = opener
|
|
|
|
self.path = path
|
|
|
|
self.mode = mode
|
|
|
|
self.buffer = []
|
|
|
|
self.closed = False
|
|
|
|
|
|
|
|
def write(self, value):
|
2013-12-11 07:00:55 +04:00
|
|
|
if self.closed:
|
|
|
|
raise Exception("")
|
2013-10-31 21:37:49 +04:00
|
|
|
self.buffer.append(value)
|
|
|
|
|
|
|
|
def flush(self):
|
|
|
|
buffer = self.buffer
|
|
|
|
self.buffer = []
|
|
|
|
|
|
|
|
if buffer:
|
|
|
|
fp = self.opener(self.path, self.mode)
|
|
|
|
fp.write(''.join(buffer))
|
|
|
|
fp.close()
|
|
|
|
|
|
|
|
def close(self):
|
|
|
|
self.flush()
|
|
|
|
self.closed = True
|
|
|
|
|
|
|
|
def addentries(repo, queue, transaction):
|
2013-10-18 03:46:12 +04:00
|
|
|
opener = repo.sopener
|
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
revlogs = {}
|
|
|
|
def writeentry(revdata):
|
2014-01-02 23:06:45 +04:00
|
|
|
path, chunk, chunkcount, link, entry, data0, data1 = revdata
|
2013-11-22 00:32:05 +04:00
|
|
|
revlog = revlogs.get(path)
|
|
|
|
if not revlog:
|
|
|
|
revlog = EntryRevlog(opener, path)
|
|
|
|
revlogs[path] = revlog
|
|
|
|
|
|
|
|
if not hasattr(revlog, 'ifh') or revlog.ifh.closed:
|
|
|
|
dfh = None
|
|
|
|
if not revlog._inline:
|
|
|
|
dfh = bufferedopener(opener, revlog.datafile, "a")
|
|
|
|
ifh = bufferedopener(opener, revlog.indexfile, "a+")
|
|
|
|
revlog.ifh = ifh
|
|
|
|
revlog.dfh = dfh
|
|
|
|
|
|
|
|
revlog.addentry(transaction, revlog.ifh, revlog.dfh, entry,
|
|
|
|
data0, data1)
|
|
|
|
revlog.dirty = True
|
|
|
|
|
|
|
|
clrev = len(repo)
|
|
|
|
leftover = None
|
|
|
|
exit = False
|
2013-12-11 07:00:55 +04:00
|
|
|
|
|
|
|
# Read one linkrev at a time from the queue
|
2013-11-22 00:32:05 +04:00
|
|
|
while not exit:
|
|
|
|
currentlinkrev = -1
|
|
|
|
|
|
|
|
revisions = []
|
|
|
|
if leftover:
|
|
|
|
revisions.append(leftover)
|
|
|
|
leftover = None
|
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
# Read everything from the current linkrev
|
2013-10-31 21:37:49 +04:00
|
|
|
while True:
|
2013-11-22 00:32:05 +04:00
|
|
|
revdata = queue.get()
|
|
|
|
if not revdata:
|
|
|
|
exit = True
|
2013-10-31 21:37:49 +04:00
|
|
|
break
|
|
|
|
|
2014-01-02 23:06:45 +04:00
|
|
|
linkrev = revdata[3]
|
2013-11-22 00:32:05 +04:00
|
|
|
if currentlinkrev == -1:
|
|
|
|
currentlinkrev = linkrev
|
|
|
|
if linkrev == currentlinkrev:
|
|
|
|
revisions.append(revdata)
|
|
|
|
elif linkrev < currentlinkrev:
|
|
|
|
raise Exception("SQL data is not in linkrev order")
|
|
|
|
else:
|
|
|
|
leftover = revdata
|
|
|
|
currentlinkrev = linkrev
|
|
|
|
break
|
2013-10-29 23:55:14 +04:00
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
if not revisions:
|
|
|
|
continue
|
2013-10-31 21:37:49 +04:00
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
for revdata in revisions:
|
|
|
|
writeentry(revdata)
|
2013-10-29 23:55:14 +04:00
|
|
|
|
2013-11-22 00:32:05 +04:00
|
|
|
clrev += 1
|
2013-10-31 21:37:49 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
# Flush filelogs, then manifest, then changelog
|
|
|
|
changelog = revlogs.pop("00changelog.i", None)
|
|
|
|
manifest = revlogs.pop("00manifest.i", None)
|
2013-10-31 21:37:49 +04:00
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
def flushrevlog(revlog):
|
2013-11-22 00:32:05 +04:00
|
|
|
if not revlog.ifh.closed:
|
|
|
|
revlog.ifh.flush()
|
|
|
|
if revlog.dfh and not revlog.dfh.closed:
|
|
|
|
revlog.dfh.flush()
|
2013-12-11 07:00:55 +04:00
|
|
|
|
|
|
|
for filelog in revlogs.itervalues():
|
|
|
|
flushrevlog(filelog)
|
|
|
|
|
|
|
|
if manifest:
|
|
|
|
flushrevlog(manifest)
|
|
|
|
if changelog:
|
|
|
|
flushrevlog(changelog)
|
2013-10-18 03:46:12 +04:00
|
|
|
|
|
|
|
class EntryRevlog(revlog.revlog):
|
|
|
|
def addentry(self, transaction, ifh, dfh, entry, data0, data1):
|
2013-10-18 04:15:41 +04:00
|
|
|
curr = len(self)
|
2013-12-11 07:00:55 +04:00
|
|
|
offset = self.end(curr - 1)
|
2013-10-18 04:15:41 +04:00
|
|
|
|
2013-10-18 03:46:12 +04:00
|
|
|
e = struct.unpack(revlog.indexformatng, entry)
|
2013-10-29 05:57:18 +04:00
|
|
|
offsettype, datalen, textlen, base, link, p1r, p2r, node = e
|
2013-10-18 04:15:41 +04:00
|
|
|
if curr == 0:
|
|
|
|
elist = list(e)
|
2013-10-29 05:57:18 +04:00
|
|
|
type = revlog.gettype(offsettype)
|
|
|
|
offsettype = revlog.offset_type(0, type)
|
|
|
|
elist[0] = offsettype
|
2013-10-18 04:15:41 +04:00
|
|
|
e = tuple(elist)
|
|
|
|
|
2013-10-29 05:57:18 +04:00
|
|
|
# Verify that the revlog is in a good state
|
|
|
|
if p1r >= curr or p2r >= curr:
|
|
|
|
raise CorruptionException("parent revision is not in revlog: %s" % self.indexfile)
|
|
|
|
if base > curr:
|
|
|
|
raise CorruptionException("base revision is not in revlog: %s" % self.indexfile)
|
|
|
|
|
|
|
|
expectedoffset = revlog.getoffset(offsettype)
|
2013-12-11 07:00:55 +04:00
|
|
|
if expectedoffset != 0 and expectedoffset != offset:
|
2013-10-29 05:57:18 +04:00
|
|
|
raise CorruptionException("revision offset doesn't match prior length " +
|
|
|
|
"(%s offset vs %s length): %s" %
|
2013-12-11 07:00:55 +04:00
|
|
|
(expectedoffset, offset, self.indexfile))
|
2013-10-29 05:57:18 +04:00
|
|
|
|
|
|
|
if node not in self.nodemap:
|
2013-10-18 03:46:12 +04:00
|
|
|
self.index.insert(-1, e)
|
2013-10-29 05:57:18 +04:00
|
|
|
self.nodemap[node] = len(self) - 1
|
2013-10-18 03:46:12 +04:00
|
|
|
|
|
|
|
if not self._inline:
|
2013-10-18 03:47:34 +04:00
|
|
|
transaction.add(self.datafile, offset)
|
|
|
|
transaction.add(self.indexfile, curr * len(entry))
|
2013-10-18 03:46:12 +04:00
|
|
|
if data0:
|
|
|
|
dfh.write(data0)
|
|
|
|
dfh.write(data1)
|
|
|
|
ifh.write(entry)
|
|
|
|
else:
|
2013-10-18 03:47:34 +04:00
|
|
|
offset += curr * self._io.size
|
|
|
|
transaction.add(self.indexfile, offset, curr)
|
2013-10-18 03:46:12 +04:00
|
|
|
ifh.write(entry)
|
|
|
|
ifh.write(data0)
|
|
|
|
ifh.write(data1)
|
2013-10-18 04:15:41 +04:00
|
|
|
self.checkinlinesize(transaction, ifh)
|
2013-10-18 03:46:12 +04:00
|
|
|
|
2013-10-30 02:55:43 +04:00
|
|
|
def addgroup(orig, self, bundle, linkmapper, transaction):
|
|
|
|
"""
|
|
|
|
copy paste of revlog.addgroup, but we ensure that the revisions are added
|
|
|
|
in linkrev order.
|
|
|
|
"""
|
|
|
|
# track the base of the current delta log
|
|
|
|
content = []
|
|
|
|
node = None
|
|
|
|
|
|
|
|
r = len(self)
|
|
|
|
end = 0
|
|
|
|
if r:
|
|
|
|
end = self.end(r - 1)
|
|
|
|
ifh = self.opener(self.indexfile, "a+")
|
|
|
|
isize = r * self._io.size
|
|
|
|
if self._inline:
|
|
|
|
transaction.add(self.indexfile, end + isize, r)
|
|
|
|
dfh = None
|
|
|
|
else:
|
|
|
|
transaction.add(self.indexfile, isize, r)
|
|
|
|
transaction.add(self.datafile, end)
|
|
|
|
dfh = self.opener(self.datafile, "a")
|
|
|
|
|
|
|
|
try:
|
|
|
|
# loop through our set of deltas
|
|
|
|
chunkdatas = []
|
|
|
|
chunkmap = {}
|
|
|
|
|
|
|
|
lastlinkrev = -1
|
|
|
|
reorder = False
|
|
|
|
|
|
|
|
chain = None
|
|
|
|
while True:
|
|
|
|
chunkdata = bundle.deltachunk(chain)
|
|
|
|
if not chunkdata:
|
|
|
|
break
|
|
|
|
|
|
|
|
node = chunkdata['node']
|
|
|
|
cs = chunkdata['cs']
|
|
|
|
link = linkmapper(cs)
|
|
|
|
if link < lastlinkrev:
|
|
|
|
reorder = True
|
|
|
|
lastlinkrev = link
|
|
|
|
chunkdatas.append((link, chunkdata))
|
|
|
|
chunkmap[node] = chunkdata
|
|
|
|
chain = node
|
|
|
|
|
|
|
|
if reorder:
|
|
|
|
chunkdatas = sorted(chunkdatas)
|
|
|
|
|
|
|
|
fulltexts = {}
|
|
|
|
def getfulltext(node):
|
|
|
|
if node in fulltexts:
|
|
|
|
return fulltexts[node]
|
|
|
|
if node in self.nodemap:
|
|
|
|
return self.revision(node)
|
|
|
|
|
|
|
|
chunkdata = chunkmap[node]
|
|
|
|
deltabase = chunkdata['deltabase']
|
|
|
|
delta = chunkdata['delta']
|
|
|
|
|
|
|
|
deltachain = []
|
|
|
|
currentbase = deltabase
|
|
|
|
while True:
|
|
|
|
if currentbase in fulltexts:
|
|
|
|
deltachain.append(fulltexts[currentbase])
|
|
|
|
break
|
|
|
|
elif currentbase in self.nodemap:
|
|
|
|
deltachain.append(self.revision(currentbase))
|
|
|
|
break
|
|
|
|
elif currentbase == nullid:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
deltachunk = chunkmap[currentbase]
|
|
|
|
currentbase = deltachunk['deltabase']
|
|
|
|
deltachain.append(deltachunk['delta'])
|
|
|
|
|
|
|
|
prevtext = deltachain.pop()
|
|
|
|
while deltachain:
|
|
|
|
prevtext = mdiff.patch(prevtext, deltachain.pop())
|
|
|
|
|
|
|
|
fulltext = mdiff.patch(prevtext, delta)
|
|
|
|
fulltexts[node] = fulltext
|
|
|
|
return fulltext
|
|
|
|
|
|
|
|
reorders = 0
|
|
|
|
visited = set()
|
|
|
|
prevnode = self.node(len(self) - 1)
|
|
|
|
for link, chunkdata in chunkdatas:
|
|
|
|
node = chunkdata['node']
|
|
|
|
deltabase = chunkdata['deltabase']
|
|
|
|
if (not deltabase in self.nodemap and
|
|
|
|
not deltabase in visited):
|
|
|
|
fulltext = getfulltext(node)
|
|
|
|
ptext = getfulltext(prevnode)
|
|
|
|
delta = mdiff.textdiff(ptext, fulltext)
|
|
|
|
|
|
|
|
chunkdata['delta'] = delta
|
|
|
|
chunkdata['deltabase'] = prevnode
|
|
|
|
reorders += 1
|
|
|
|
|
|
|
|
prevnode = node
|
|
|
|
visited.add(node)
|
|
|
|
|
|
|
|
for link, chunkdata in chunkdatas:
|
|
|
|
node = chunkdata['node']
|
|
|
|
p1 = chunkdata['p1']
|
|
|
|
p2 = chunkdata['p2']
|
|
|
|
cs = chunkdata['cs']
|
|
|
|
deltabase = chunkdata['deltabase']
|
|
|
|
delta = chunkdata['delta']
|
|
|
|
|
|
|
|
content.append(node)
|
|
|
|
|
|
|
|
link = linkmapper(cs)
|
|
|
|
if node in self.nodemap:
|
|
|
|
# this can happen if two branches make the same change
|
|
|
|
continue
|
|
|
|
|
|
|
|
for p in (p1, p2):
|
|
|
|
if p not in self.nodemap:
|
|
|
|
raise LookupError(p, self.indexfile,
|
|
|
|
_('unknown parent'))
|
|
|
|
|
|
|
|
if deltabase not in self.nodemap:
|
|
|
|
raise LookupError(deltabase, self.indexfile,
|
|
|
|
_('unknown delta base'))
|
|
|
|
|
|
|
|
baserev = self.rev(deltabase)
|
|
|
|
self._addrevision(node, None, transaction, link,
|
|
|
|
p1, p2, (baserev, delta), ifh, dfh)
|
|
|
|
if not dfh and not self._inline:
|
|
|
|
# addrevision switched from inline to conventional
|
|
|
|
# reopen the index
|
|
|
|
ifh.close()
|
|
|
|
dfh = self.opener(self.datafile, "a")
|
|
|
|
ifh = self.opener(self.indexfile, "a")
|
|
|
|
finally:
|
|
|
|
if dfh:
|
|
|
|
dfh.close()
|
|
|
|
ifh.close()
|
|
|
|
|
|
|
|
return content
|
|
|
|
|
2013-10-29 03:09:47 +04:00
|
|
|
def bookmarkwrite(orig, self):
|
2013-11-22 00:32:05 +04:00
|
|
|
repo = self._repo
|
|
|
|
if repo.disablesync:
|
2013-10-29 03:09:47 +04:00
|
|
|
return orig(self)
|
|
|
|
|
2014-01-03 00:19:50 +04:00
|
|
|
def commitbookmarks():
|
|
|
|
try:
|
|
|
|
cursor = repo.sqlcursor
|
|
|
|
cursor.execute("""DELETE FROM pushkeys WHERE repo = %s AND
|
|
|
|
namespace = 'bookmarks'""", (repo.sqlreponame))
|
|
|
|
|
|
|
|
for k, v in self.iteritems():
|
|
|
|
cursor.execute("""INSERT INTO pushkeys(repo, namespace, name, value)
|
|
|
|
VALUES(%s, 'bookmarks', %s, %s)""",
|
|
|
|
(repo.sqlreponame, k, hex(v)))
|
|
|
|
repo.sqlconn.commit()
|
|
|
|
return orig(self)
|
|
|
|
except:
|
|
|
|
repo.sqlconn.rollback()
|
|
|
|
raise
|
|
|
|
|
|
|
|
executewithsql(repo, commitbookmarks, bookmarklock)
|
2013-10-29 23:55:14 +04:00
|
|
|
|
|
|
|
# recover must be a norepo command because loading the repo fails
|
|
|
|
commands.norepo += " sqlrecover"
|
|
|
|
|
|
|
|
@command('^sqlrecover', [
|
|
|
|
('f', 'force', '', _('strips as far back as necessary'), ''),
|
|
|
|
], _('hg sqlrecover'))
|
|
|
|
def sqlrecover(ui, *args, **opts):
|
|
|
|
"""
|
|
|
|
Strips commits from the local repo until it is back in sync with the SQL
|
|
|
|
server.
|
|
|
|
"""
|
|
|
|
|
2013-12-11 07:00:55 +04:00
|
|
|
global disableinitialsync
|
|
|
|
disableinitialsync = True
|
2013-10-29 23:55:14 +04:00
|
|
|
repo = hg.repository(ui, ui.environ['PWD'])
|
2013-11-22 00:32:05 +04:00
|
|
|
repo.disablesync = True
|
|
|
|
|
|
|
|
if repo.recover():
|
|
|
|
ui.status("recovered from incomplete transaction")
|
2013-10-29 23:55:14 +04:00
|
|
|
|
|
|
|
def iscorrupt():
|
2013-11-22 00:32:05 +04:00
|
|
|
repo.sqlconnect()
|
2013-10-29 23:55:14 +04:00
|
|
|
try:
|
2013-11-22 00:32:05 +04:00
|
|
|
repo.syncdb()
|
2013-12-11 07:00:55 +04:00
|
|
|
except:
|
2013-10-29 23:55:14 +04:00
|
|
|
return True
|
|
|
|
finally:
|
2013-11-22 00:32:05 +04:00
|
|
|
repo.sqlclose()
|
2013-10-29 23:55:14 +04:00
|
|
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
reposize = len(repo)
|
|
|
|
|
|
|
|
stripsize = 10
|
|
|
|
while iscorrupt():
|
|
|
|
if reposize > len(repo) + 10000:
|
|
|
|
ui.warn("unable to fix repo after stripping 10000 commits (use -f to strip more)")
|
|
|
|
striprev = max(0, len(repo) - stripsize)
|
|
|
|
nodelist = [repo[striprev].node()]
|
|
|
|
repair.strip(ui, repo, nodelist, backup="none", topic="sqlrecover")
|
|
|
|
stripsize *= 5
|
|
|
|
|
|
|
|
if len(repo) == 0:
|
|
|
|
ui.warn(_("unable to fix repo corruption\n"))
|
|
|
|
elif len(repo) == reposize:
|
|
|
|
ui.status(_("local repo was not corrupt - no action taken\n"))
|
|
|
|
else:
|
|
|
|
ui.status(_("local repo now matches SQL\n"))
|