remove old code

Summary:
infinitepush server is basically just used for unit tests nowadays, and is used with "indextype=disk" and "storetype=disk"

this diff removes the code that is not used

After that, we could, as a followup, separate sever part code to ext/infinitepushtestserver
from the client side code for bundle exchange.

Reviewed By: RajivTS

Differential Revision: D40798209

fbshipit-source-id: d400eaae7fdc52db202f987a74ae53b458bf178d
This commit is contained in:
Liubov Dmitrieva 2022-10-31 09:27:35 -07:00 committed by Facebook GitHub Bot
parent ca321c8836
commit 07ac140324
7 changed files with 5 additions and 791 deletions

View File

@ -2,22 +2,10 @@
This extension adds ability to save certain pushes to a remote blob store
as bundles and to serve commits from remote blob store.
The revisions are stored on disk or in everstore.
The metadata are stored in sql or on disk.
The revisions are stored on disk.
The metadata are stored on disk.
## Config options
infinitepush.branchpattern: pattern to detect a scratchbranch, example
're:scratch/.+'
infinitepush.indextype: disk or sql for the metadata
infinitepush.reponame: only relevant for sql metadata backend, reponame to put in
sql
infinitepush.indexpath: only relevant for ondisk metadata backend, the path to
store the index on disk. If not set will be under .hg
in a folder named filebundlestore
infinitepush.storepath: only relevant for ondisk metadata backend, the path to
store the bundles. If not set, it will be
.hg/filebundlestore

View File

@ -15,62 +15,12 @@ Configs::
# Server or client
server = False
# Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
indextype = disk
# Server-side option. Used only if indextype=sql.
# Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
# Server-side option. Used only if indextype=disk.
# Filesystem path to the index store
indexpath = PATH
# Server-side option. Possible values: 'disk' or 'external'
# Fails if not set
storetype = disk
# Server-side option.
# Path to the binary that will save bundle to the bundlestore
# Formatted cmd line will be passed to it (see `put_args`)
put_binary = put
# Server-side option. Used only if storetype=external.
# Format cmd-line string for put binary. Placeholder: {filename}
put_args = {filename}
# Server-side option.
# Path to the binary that get bundle from the bundlestore.
# Formatted cmd line will be passed to it (see `get_args`)
get_binary = get
# Server-side option. Used only if storetype=external.
# Format cmd-line string for get binary. Placeholders: {filename} {handle}
get_args = {filename} {handle}
# Server-side option
logfile = FIlE
# Server-side option
loglevel = DEBUG
# Server-side option. Used only if indextype=sql.
# Sets mysql wait_timeout option.
waittimeout = 300
# Server-side option. Used only if indextype=sql.
# Sets mysql innodb_lock_wait_timeout option.
locktimeout = 120
# Server-side option. Used only if indextype=sql.
# limit number of days to generate warning on trying to
# fetch too old commit for hg up / hg pull with short hash rev
shorthasholdrevthreshold = 31
# Server-side option. Used only if indextype=sql.
# Name of the repository
reponame = ''
# Client-side option. Used by --list-remote option. List of remote scratch
# patterns to list if no patterns are specified.
defaultremotepatterns = ['*']

View File

@ -17,31 +17,10 @@ from edenscm.i18n import _
class bundlestore(object):
def __init__(self, repo):
storetype = repo.ui.config("infinitepush", "storetype", "")
if storetype == "disk":
self.store = filebundlestore(repo)
elif storetype == "external":
self.store = externalbundlestore(repo)
else:
raise error.Abort(
_("unknown infinitepush store type specified %s") % storetype
)
self.store = filebundlestore(repo)
from . import fileindex
indextype = repo.ui.config("infinitepush", "indextype", "")
if indextype == "disk":
from . import fileindex
self.index = fileindex.fileindex(repo)
elif indextype == "sql":
# Delayed import of sqlindex to avoid including unnecessary
# dependencies on mysql.connector.
from . import sqlindex
self.index = sqlindex.sqlindex(repo)
else:
raise error.Abort(
_("unknown infinitepush index type specified %s") % indextype
)
self.index = fileindex.fileindex(repo)
class filebundlestore(object):
@ -85,85 +64,3 @@ class filebundlestore(object):
return None
return f.read()
class externalbundlestore(object):
def __init__(self, repo):
"""
`put_binary` - path to binary file which uploads bundle to external
storage and prints key to stdout
`put_args` - format string with additional args to `put_binary`
{filename} replacement field can be used.
`get_binary` - path to binary file which accepts filename and key
(in that order), downloads bundle from store and saves it to file
`get_args` - format string with additional args to `get_binary`.
{filename} and {handle} replacement field can be used.
"""
ui = repo.ui
# path to the binary which uploads a bundle to the external store
# and prints the key to stdout.
self.put_binary = ui.config("infinitepush", "put_binary")
if not self.put_binary:
raise error.Abort("put binary is not specified")
# Additional args to ``put_binary``. The '{filename}' replacement field
# can be used to get the filename.
self.put_args = ui.configlist("infinitepush", "put_args", [])
# path to the binary which accepts a file and key (in that order) and
# downloads the bundle form the store and saves it to the file.
self.get_binary = ui.config("infinitepush", "get_binary")
if not self.get_binary:
raise error.Abort("get binary is not specified")
# Additional args to ``get_binary``. The '{filename}' and '{handle}'
# replacement fields can be used to get the filename and key.
self.get_args = ui.configlist("infinitepush", "get_args", [])
def _call_binary(self, args):
p = subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True
)
stdout, stderr = p.communicate()
returncode = p.returncode
return returncode, stdout, stderr
def write(self, data):
# Won't work on windows because you can't open file second time without
# closing it
with NamedTemporaryFile() as temp:
temp.write(data)
temp.flush()
temp.seek(0)
formatted_args = [arg.format(filename=temp.name) for arg in self.put_args]
returncode, stdout, stderr = self._call_binary(
[self.put_binary] + formatted_args
)
if returncode != 0:
raise error.Abort(
"Infinitepush failed to upload bundle to external store: %s"
% stderr
)
stdout_lines = stdout.splitlines()
if len(stdout_lines) == 1:
return stdout_lines[0]
else:
raise error.Abort(
"Infinitepush received bad output from %s: %s"
% (self.put_binary, stdout)
)
def read(self, handle):
# Won't work on windows because you can't open file second time without
# closing it
with NamedTemporaryFile() as temp:
formatted_args = [
arg.format(filename=temp.name, handle=handle) for arg in self.get_args
]
returncode, stdout, stderr = self._call_binary(
[self.get_binary] + formatted_args
)
if returncode != 0:
raise error.Abort("Failed to download from external store: %s" % stderr)
return temp.read()

View File

@ -1,431 +0,0 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import logging
import os
import time
import warnings
# pyre-fixme[21]: Could not find module `mysql.connector`.
import mysql.connector
from edenscm import error, pycompat, util
from edenscm.i18n import _
def _getloglevel(ui):
loglevel = ui.config("infinitepush", "loglevel", "DEBUG")
numeric_loglevel = getattr(logging, loglevel.upper(), None)
if not isinstance(numeric_loglevel, int):
raise error.Abort(_("invalid log level %s") % loglevel)
return numeric_loglevel
def _convertbookmarkpattern(pattern):
# To search for \, specify it as \\
# To search for _, specify it as \_
# To search for %, specify it as \%
pattern = pattern.replace("\\", "\\\\")
pattern = pattern.replace("_", "\\_")
pattern = pattern.replace("%", "\\%")
if pattern.endswith("*"):
pattern = pattern[:-1] + "%"
return pattern
SEC_IN_DAY = 24 * 60 * 60
class sqlindex(object):
"""SQL-based backend for infinitepush index.
See schema.sql for the SQL schema.
This is a context manager. All write operations should use:
with index:
index.addbookmark(...)
...
"""
def __init__(self, repo):
ui = repo.ui
sqlhost = ui.config("infinitepush", "sqlhost")
if not sqlhost:
raise error.Abort(_("please set infinitepush.sqlhost"))
reponame = ui.config("infinitepush", "reponame")
if not reponame:
raise error.Abort(_("please set infinitepush.reponame"))
self.reponame = reponame
host, port, database, user, password = sqlhost.split(":")
self.sqlargs = {
"host": host,
"port": port,
"database": database,
"user": user,
"password": password,
}
self.sqlconn = None
self.sqlcursor = None
logfile = ui.config("infinitepush", "logfile", os.devnull)
logging.basicConfig(filename=logfile)
self.log = logging.getLogger()
self.log.setLevel(_getloglevel(ui))
self._connected = False
self._waittimeout = ui.configint("infinitepush", "waittimeout", 300)
self._locktimeout = ui.configint("infinitepush", "locktimeout", 120)
self.shorthasholdrevthreshold = ui.configint(
"infinitepush", "shorthasholdrevthreshold", 60
)
def sqlconnect(self):
if self.sqlconn:
raise error.Abort("SQL connection already open")
if self.sqlcursor:
raise error.Abort("SQL cursor already open without connection")
retry = 3
while True:
try:
try:
self.sqlconn = mysql.connector.connect(
force_ipv6=True, ssl_disabled=True, **self.sqlargs
)
except AttributeError:
# For older versions of mysql-connector
self.sqlconn = mysql.connector.connect(
force_ipv6=True, **self.sqlargs
)
# Code is copy-pasted from hgsql. Bug fixes need to be
# back-ported!
# The default behavior is to return byte arrays, when we
# need strings. This custom convert returns strings.
self.sqlconn.set_converter_class(CustomConverter)
self.sqlconn.autocommit = False
break
except mysql.connector.errors.Error:
# mysql can be flakey occasionally, so do some minimal
# retrying.
retry -= 1
if retry == 0:
raise
time.sleep(0.2)
waittimeout = self.sqlconn.converter.escape("%s" % self._waittimeout)
self.sqlcursor = self.sqlconn.cursor()
self.sqlcursor.execute("SET wait_timeout=%s" % waittimeout)
self.sqlcursor.execute("SET innodb_lock_wait_timeout=%s" % self._locktimeout)
self._connected = True
def close(self):
"""Cleans up the metadata store connection."""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.sqlcursor.close()
self.sqlconn.close()
self.sqlcursor = None
self.sqlconn = None
def __enter__(self):
if not self._connected:
self.sqlconnect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.sqlconn.commit()
else:
self.sqlconn.rollback()
def addbundle(self, bundleid, nodesctx, iscrossbackendsync=False):
"""Record a bundleid containing the given nodes."""
if not self._connected:
self.sqlconnect()
# insert bundle
self.log.info("ADD BUNDLE %r %r" % (self.reponame, bundleid))
self.sqlcursor.execute(
"INSERT INTO bundles(bundle, reponame) VALUES " "(%s, %s)",
params=(bundleid, self.reponame),
)
# insert nodes to bundle mapping
self.sqlcursor.executemany(
"INSERT INTO nodestobundle(node, bundle, reponame) "
"VALUES (%s, %s, %s) ON DUPLICATE KEY UPDATE "
"bundle=VALUES(bundle)",
[(ctx.hex(), bundleid, self.reponame) for ctx in nodesctx],
)
# insert metadata
data = [
(
ctx.hex(), # node
ctx.description(), # message
ctx.p1().hex(), # p1
ctx.p2().hex(), # p2
ctx.user(), # author
ctx.extra().get("committer", ctx.user()), # committer
int(ctx.date()[0]), # author_date
int(
ctx.extra().get("committer_date", int(ctx.date()[0]))
), # committer_date
self.reponame, # reponame
)
for ctx in nodesctx
]
self.sqlcursor.executemany(
"INSERT IGNORE INTO nodesmetadata(node, message, p1, p2, "
"author, committer, author_date, committer_date, "
"reponame) VALUES "
"(%s, %s, %s, %s, %s, %s, %s, %s, %s)",
data,
)
def addbookmark(self, bookmark, node, isbackup):
"""Record a bookmark pointing to a particular node."""
if not self._connected:
self.sqlconnect()
self.log.info(
"ADD BOOKMARKS %r bookmark: %r node: %r" % (self.reponame, bookmark, node)
)
self.sqlcursor.execute(
"INSERT INTO bookmarkstonode(bookmark, node, reponame) "
"VALUES (%s, %s, %s) ON DUPLICATE KEY UPDATE node=VALUES(node)",
params=(bookmark, node, self.reponame),
)
def addmanybookmarks(self, bookmarks, isbackup):
"""Record the contents of the ``bookmarks`` dict as bookmarks."""
if not self._connected:
self.sqlconnect()
data = [
(bookmark, node, self.reponame)
for bookmark, node in pycompat.iteritems(bookmarks)
]
self.sqlcursor.executemany(
"INSERT INTO bookmarkstonode(bookmark, node, reponame) "
"VALUES (%s, %s, %s) ON DUPLICATE KEY UPDATE node=VALUES(node)",
data,
)
def deletebookmarks(self, patterns):
"""Delete all bookmarks that match any of the patterns in ``patterns``."""
if not self._connected:
self.sqlconnect()
# build and execute detete query
self.log.info("DELETE BOOKMARKS: %s" % patterns)
patterns = [_convertbookmarkpattern(pattern) for pattern in patterns]
condition1 = "reponame = %s"
condition2 = " OR ".join(("bookmark LIKE (%s)",) * len(patterns))
query = "DELETE FROM bookmarkstonode WHERE (%s) AND (%s)" % (
condition1,
condition2,
)
self.sqlcursor.execute(query, params=[self.reponame] + patterns)
def getbundle(self, node):
"""Get the bundleid for a bundle that contains the given node."""
if not self._connected:
self.sqlconnect()
self.log.info("GET BUNDLE %r %r" % (self.reponame, node))
self.sqlcursor.execute(
"SELECT bundle from nodestobundle " "WHERE node = %s AND reponame = %s",
params=(node, self.reponame),
)
result = self.sqlcursor.fetchall()
if len(result) != 1 or len(result[0]) != 1:
self.log.info("No matching node")
return None
bundle = result[0][0]
self.log.info("Found bundle %r" % bundle)
return bundle
def getnodebyprefix(self, prefix):
"""Get the node that matches the given hash prefix.
If there is no match, returns None.
If there are multiple matches, raises an exception."""
if not self._connected:
self.sqlconnect()
self.log.info("GET NODE BY PREFIX %r %r" % (self.reponame, prefix))
nodeprefixpattern = prefix + "%"
result = None
if len(prefix) >= 6 and len(prefix) < 20:
# With longer hashes we can make more complex QUERY
# in order to return some suggestions with the matched PREFIX
# so user can pick up the desired one easily
# there is no need to go this path for prefixes longer than 20
# because to find several commits is highly unlikely
# Order suggestions by date to show the recent ones first
cmd = (
"SELECT t1.node, t2.message, t2.author, t2.committer_date "
"FROM nodestobundle t1 JOIN nodesmetadata t2 "
"ON t1.node = t2.node AND t1.reponame = t2.reponame "
"WHERE t1.node LIKE %s AND t1.reponame = %s "
"ORDER BY t2.committer_date DESC LIMIT 5"
)
params = (nodeprefixpattern, self.reponame)
self.sqlcursor.execute(cmd, params)
result = self.sqlcursor.fetchall()
def gettitle(s):
return s.splitlines()[0]
# format time from timestamp
def formattime(s):
_timeformat = r"%d %b %Y %H:%M"
return time.strftime(_timeformat, time.localtime(int(s)))
# format metadata output from query rows
def formatdata(arr):
return "\n".join(
[
" changeset: {c}\n"
" author: {a}\n"
" date: {d}\n"
" summary: {m}\n".format(
c=c, m=gettitle(m), a=a, d=formattime(d)
)
for c, m, a, d in result
]
)
if len(result) > 1:
raise error.Abort(
("ambiguous identifier '%s'\n" % prefix)
+ "#commitcloud suggestions are:\n"
+ formatdata(result)
)
if len(result) == 1:
revdate = result[0][3]
threshold = self.shorthasholdrevthreshold * SEC_IN_DAY
if time.time() - revdate > threshold:
raise error.Abort(
"commit '%s' is more than %d days old\n"
"description:\n%s"
"#commitcloud hint: if you would like to fetch this "
"commit, please provide the full hash"
% (prefix, self.shorthasholdrevthreshold, formatdata(result))
)
else:
self.sqlcursor.execute(
"SELECT node from nodestobundle "
"WHERE node LIKE %s "
"AND reponame = %s "
"LIMIT 2",
params=(nodeprefixpattern, self.reponame),
)
result = self.sqlcursor.fetchall()
if len(result) > 1:
raise error.Abort(
"ambiguous identifier '%s'\n"
"suggestion: provide longer commithash prefix" % prefix
)
# result not found
if len(result) != 1 or len(result[0]) == 0:
self.log.info("No matching node")
return None
node = result[0][0]
# Log found result. It is unique.
self.log.info("Found node %r" % node)
return node
def getnode(self, bookmark):
"""Get the node for the given bookmark."""
if not self._connected:
self.sqlconnect()
self.log.info("GET NODE reponame: %r bookmark: %r" % (self.reponame, bookmark))
self.sqlcursor.execute(
"SELECT node from bookmarkstonode WHERE " "bookmark = %s AND reponame = %s",
params=(bookmark, self.reponame),
)
result = self.sqlcursor.fetchall()
if len(result) != 1 or len(result[0]) != 1:
self.log.info("No matching bookmark")
return None
node = result[0][0]
self.log.info("Found node %r" % node)
return node
def getbookmarks(self, query):
"""Get all bookmarks that match the pattern."""
if not self._connected:
self.sqlconnect()
self.log.info("QUERY BOOKMARKS reponame: %r query: %r" % (self.reponame, query))
query = _convertbookmarkpattern(query)
self.sqlcursor.execute(
"SELECT bookmark, node from bookmarkstonode WHERE "
"reponame = %s AND bookmark LIKE %s "
# Bookmarks have to be restored in the same order of creation
# See T24417531
"ORDER BY time ASC",
params=(self.reponame, query),
)
result = self.sqlcursor.fetchall()
bookmarks = util.sortdict()
for row in result:
if len(row) != 2:
self.log.info("Bad row returned: %s" % row)
continue
bookmarks[row[0]] = row[1]
return bookmarks
def saveoptionaljsonmetadata(self, node, jsonmetadata):
"""Save optional metadata for the given node."""
if not self._connected:
self.sqlconnect()
self.log.info(
(
"INSERT METADATA, QUERY BOOKMARKS reponame: %r "
+ "node: %r, jsonmetadata: %s"
)
% (self.reponame, node, jsonmetadata)
)
self.sqlcursor.execute(
"UPDATE nodesmetadata SET optional_json_metadata=%s WHERE "
"reponame=%s AND node=%s",
params=(jsonmetadata, self.reponame, node),
)
# pyre-fixme[11]: Annotation `MySQLConverter` is not defined as a type.
class CustomConverter(mysql.connector.conversion.MySQLConverter):
"""Ensure that all values being returned are returned as python string
(versus the default byte arrays)."""
def _STRING_to_python(self, value, dsc=None):
if isinstance(value, bytearray):
value = bytes(value)
return pycompat.decodeutf8(value)
def _VAR_STRING_to_python(self, value, dsc=None):
if isinstance(value, bytearray):
value = bytes(value)
return pycompat.decodeutf8(value)
def _BLOB_to_python(self, value, dsc=None):
if isinstance(value, bytearray):
value = bytes(value)
return pycompat.decodeutf8(value)
# localstr is Mercurial-specific. See encoding.py
def _localstr_to_mysql(self, value):
return bytes(value)

View File

@ -39,69 +39,9 @@ setupserver() {
cat >> .hg/hgrc << EOF
[infinitepush]
server=yes
indextype=disk
storetype=disk
reponame=babar
EOF
}
setupsqlclienthgrc() {
cat << EOF >> .hg/hgrc
[ui]
ssh=$(dummysshcmd)
[extensions]
infinitepush=
[infinitepush]
branchpattern=re:scratch/.+
server=False
[paths]
default = ssh://user@dummy/server
EOF
}
setupsqlserverhgrc() {
cat << EOF >> .hg/hgrc
[ui]
ssh=$(dummysshcmd)
[extensions]
infinitepush=
[infinitepush]
branchpattern=re:scratch/.+
server=True
indextype=sql
storetype=disk
reponame=$1
EOF
}
createinfinitepushtablessql() {
cat <<EOF
DROP TABLE IF EXISTS nodestobundle;
DROP TABLE IF EXISTS bookmarkstonode;
DROP TABLE IF EXISTS bundles;
DROP TABLE IF EXISTS nodesmetadata;
DROP TABLE IF EXISTS forwardfillerqueue;
DROP TABLE IF EXISTS replaybookmarksqueue;
$(cat $INFINITEPUSH_TESTDIR/infinitepush/schema.sql)
EOF
}
createdb() {
mysql -h $DBHOST -P $DBPORT -u $DBUSER $DBPASSOPT -e "CREATE DATABASE IF NOT EXISTS $DBNAME;" 2>/dev/null
createinfinitepushtablessql | mysql -h $DBHOST -P $DBPORT -D $DBNAME -u $DBUSER $DBPASSOPT
}
querysqlindex() {
mysql -h "$DBHOST" -P "$DBPORT" -u "$DBUSER" -D "$DBNAME" "$DBPASSOPT" -e "$1"
}
setupdb() {
source "$INFINITEPUSH_TESTDIR/hgsql/library.sh"
echo "sqlhost=$DBHOST:$DBPORT:$DBNAME:$DBUSER:$DBPASS" >> .hg/hgrc
createdb
}
waitbgbackup() {
sleep 1
hg debugwaitbackup

View File

@ -1,66 +0,0 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
CREATE TABLE `bookmarkstonode` (
`node` varbinary(64) NOT NULL,
`bookmark` varbinary(512) NOT NULL,
`reponame` varbinary(255) NOT NULL,
`time` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (`reponame`,`bookmark`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE `bundles` (
`bundle` varbinary(512) NOT NULL,
`reponame` varbinary(255) NOT NULL,
PRIMARY KEY (`bundle`,`reponame`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE `nodestobundle` (
`node` varbinary(64) NOT NULL,
`bundle` varbinary(512) NOT NULL,
`reponame` varbinary(255) NOT NULL,
PRIMARY KEY (`node`,`reponame`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE `nodesmetadata` (
`node` varbinary(64) NOT NULL,
`message` mediumblob NOT NULL,
`p1` varbinary(64) NOT NULL,
`p2` varbinary(64) DEFAULT NULL,
`author` varbinary(255) NOT NULL,
`committer` varbinary(255) DEFAULT NULL,
`author_date` bigint(20) NOT NULL,
`committer_date` bigint(20) DEFAULT NULL,
`reponame` varbinary(255) NOT NULL,
`optional_json_metadata` mediumblob,
PRIMARY KEY (`reponame`,`node`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE `forwardfillerqueue` (
`id` BIGINT(20) UNSIGNED NOT NULL AUTO_INCREMENT,
`bundle` VARBINARY(64) NOT NULL,
`reponame` VARBINARY(255) NOT NULL,
`slice` TINYINT(3) UNSIGNED DEFAULT 0,
`created_at` DATETIME DEFAULT CURRENT_TIMESTAMP,
`claimed_by` VARCHAR(255) NULL,
`claimed_at` DATETIME NULL,
PRIMARY KEY (`id`),
KEY `queue_order` (`reponame`, `slice`, `id`),
KEY `claim_review` (`claimed_at`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE `replaybookmarksqueue` (
`id` BIGINT(20) UNSIGNED NOT NULL AUTO_INCREMENT,
`reponame` varbinary(255) NOT NULL,
`bookmark` varbinary(512) NOT NULL,
`node` varbinary(64) NOT NULL,
`bookmark_hash` varbinary(64) NOT NULL,
`created_at` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
`synced` TINYINT(1) NOT NULL DEFAULT 0,
PRIMARY KEY (`id`),
KEY `sync_queue` (`synced`, `reponame`, `bookmark_hash`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8

View File

@ -1,64 +0,0 @@
#chg-compatible
#debugruntest-compatible
$ setconfig experimental.allowfilepeer=True
$ . "$TESTDIR/library.sh"
$ . "$TESTDIR/infinitepush/library.sh"
$ setupcommon
$ hg init repo
$ cd repo
$ setupserver
$ cd ..
$ hg clone ssh://user@dummy/repo client -q
$ cat >> $TESTTMP/uilog.py <<EOF
> from edenscm import extensions
> from edenscm import ui as uimod
> def uisetup(ui):
> extensions.wrapfunction(uimod.ui, 'log', mylog)
> def mylog(orig, self, service, *msg, **opts):
> if service in ['infinitepush']:
> kwstr = ", ".join("%s=%s" % (k, v) for k, v in
> sorted(opts.items()))
> msgstr = msg[0] % msg[1:]
> self.warn('%s: %s (%s)\n' % (service, msgstr, kwstr))
> return orig(self, service, *msg, **opts)
> EOF
$ cat >> repo/.hg/hgrc <<EOF
> [extensions]
> uilog=$TESTTMP/uilog.py
> EOF
$ cd client
$ mkcommit commit
$ hg push -r . --to scratch/scratchpush --create
pushing to ssh://user@dummy/repo
searching for changes
remote: infinitepush: b2x:infinitepush \(eventtype=start, hostname=.+, requestid=\d+, user=\w+\) (re)
remote: pushing 1 commit:
remote: 7e6a6fd9c7c8 commit
remote: infinitepush: bundlestore \(bundlesize=654, eventtype=start, hostname=.+, reponame=babar, requestid=\d+, user=\w+\) (re)
remote: infinitepush: bundlestore \(bundlesize=654, elapsedms=.+, eventtype=success, hostname=.+, reponame=babar, requestid=\d+, user=\w+\) (re)
remote: infinitepush: index \(eventtype=start, hostname=.+, newheadscount=1, reponame=babar, requestid=\d+, user=\w+\) (re)
remote: infinitepush: index \(elapsedms=.+, eventtype=success, hostname=.+, newheadscount=1, reponame=babar, requestid=\d+, user=\w+\) (re)
remote: infinitepush: b2x:infinitepush \(elapsedms=.+, eventtype=success, hostname=.+, reponame=babar, requestid=\d+, user=\w+\) (re)
$ cd ..
Make upload to bundlestore fail
$ cat >> repo/.hg/hgrc <<EOF
> [scratchbranch]
> storepath=/dev/null
> EOF
$ cd client
$ mkcommit failpushcommit
$ hg push -r . --to scratch/scratchpush 2>err
[255]
$ grep '^remote: ' err
remote: infinitepush: b2x:infinitepush \(eventtype=start, hostname=.+, requestid=\d+, user=\w+\) (re)
remote: pushing 2 commits:
remote: 7e6a6fd9c7c8 commit
remote: bba29d9d577a failpushcommit
remote: infinitepush: bundlestore \(bundlesize=1247, eventtype=start, hostname=.+, requestid=\d+, user=\w+\) (re)
remote: infinitepush: bundlestore \(bundlesize=1247, elapsedms=[-+0-9.e]+, errormsg=\[Errno 20\] \$ENOTDIR\$: '/dev/null/[0-9a-f]+', eventtype=failure, hostname=.+, reponame=babar, requestid=\d+, user=\w+\) (re)
remote: infinitepush: b2x:infinitepush \(elapsedms=[-+0-9.e]+, errormsg=\[Errno 20\] \$ENOTDIR\$: '/dev/null/[0-9a-f]+', eventtype=failure, hostname=.+, reponame=babar, requestid=\d+, user=\w+\) (re)
remote: abort: \$ENOTDIR\$: /dev/null/[0-9a-f]+ (re)