2011-06-03 18:27:41 +04:00
|
|
|
# commandserver.py - communicate with Mercurial's API over a pipe
|
|
|
|
#
|
|
|
|
# Copyright Matt Mackall <mpm@selenic.com>
|
|
|
|
#
|
|
|
|
# This software may be used and distributed according to the terms of the
|
|
|
|
# GNU General Public License version 2 or any later version.
|
|
|
|
|
2015-11-24 16:58:40 +03:00
|
|
|
from __future__ import absolute_import
|
|
|
|
|
|
|
|
import errno
|
2016-05-21 09:23:21 +03:00
|
|
|
import gc
|
2015-11-24 16:58:40 +03:00
|
|
|
import os
|
2016-05-21 09:23:21 +03:00
|
|
|
import random
|
2016-05-22 05:43:18 +03:00
|
|
|
import signal
|
|
|
|
import socket
|
2011-06-03 18:27:41 +04:00
|
|
|
import struct
|
2015-11-24 16:58:40 +03:00
|
|
|
import traceback
|
|
|
|
|
|
|
|
from .i18n import _
|
|
|
|
from . import (
|
|
|
|
encoding,
|
|
|
|
error,
|
2016-11-22 21:33:11 +03:00
|
|
|
pycompat,
|
2017-07-15 06:26:21 +03:00
|
|
|
selectors2,
|
2015-11-24 16:58:40 +03:00
|
|
|
util,
|
|
|
|
)
|
2011-06-03 18:27:41 +04:00
|
|
|
|
|
|
|
logfile = None
|
|
|
|
|
|
|
|
def log(*args):
|
|
|
|
if not logfile:
|
|
|
|
return
|
|
|
|
|
|
|
|
for a in args:
|
|
|
|
logfile.write(str(a))
|
|
|
|
|
|
|
|
logfile.flush()
|
|
|
|
|
|
|
|
class channeledoutput(object):
|
|
|
|
"""
|
2014-09-27 07:15:01 +04:00
|
|
|
Write data to out in the following format:
|
2011-06-03 18:27:41 +04:00
|
|
|
|
|
|
|
data length (unsigned int),
|
|
|
|
data
|
|
|
|
"""
|
2014-09-27 07:37:53 +04:00
|
|
|
def __init__(self, out, channel):
|
2011-06-03 18:27:41 +04:00
|
|
|
self.out = out
|
|
|
|
self.channel = channel
|
|
|
|
|
2015-12-13 13:32:01 +03:00
|
|
|
@property
|
|
|
|
def name(self):
|
|
|
|
return '<%c-channel>' % self.channel
|
|
|
|
|
2011-06-03 18:27:41 +04:00
|
|
|
def write(self, data):
|
|
|
|
if not data:
|
|
|
|
return
|
2016-02-29 07:41:54 +03:00
|
|
|
# single write() to guarantee the same atomicity as the underlying file
|
|
|
|
self.out.write(struct.pack('>cI', self.channel, len(data)) + data)
|
2011-06-03 18:27:41 +04:00
|
|
|
self.out.flush()
|
|
|
|
|
|
|
|
def __getattr__(self, attr):
|
2016-01-19 18:08:00 +03:00
|
|
|
if attr in ('isatty', 'fileno', 'tell', 'seek'):
|
2013-01-01 22:50:04 +04:00
|
|
|
raise AttributeError(attr)
|
2014-09-27 07:37:53 +04:00
|
|
|
return getattr(self.out, attr)
|
2011-06-03 18:27:41 +04:00
|
|
|
|
|
|
|
class channeledinput(object):
|
|
|
|
"""
|
|
|
|
Read data from in_.
|
|
|
|
|
|
|
|
Requests for input are written to out in the following format:
|
|
|
|
channel identifier - 'I' for plain input, 'L' line based (1 byte)
|
|
|
|
how many bytes to send at most (unsigned int),
|
|
|
|
|
|
|
|
The client replies with:
|
|
|
|
data length (unsigned int), 0 meaning EOF
|
|
|
|
data
|
|
|
|
"""
|
|
|
|
|
|
|
|
maxchunksize = 4 * 1024
|
|
|
|
|
|
|
|
def __init__(self, in_, out, channel):
|
|
|
|
self.in_ = in_
|
|
|
|
self.out = out
|
|
|
|
self.channel = channel
|
|
|
|
|
2015-12-13 13:32:01 +03:00
|
|
|
@property
|
|
|
|
def name(self):
|
|
|
|
return '<%c-channel>' % self.channel
|
|
|
|
|
2011-06-03 18:27:41 +04:00
|
|
|
def read(self, size=-1):
|
|
|
|
if size < 0:
|
|
|
|
# if we need to consume all the clients input, ask for 4k chunks
|
|
|
|
# so the pipe doesn't fill up risking a deadlock
|
|
|
|
size = self.maxchunksize
|
|
|
|
s = self._read(size, self.channel)
|
|
|
|
buf = s
|
|
|
|
while s:
|
|
|
|
s = self._read(size, self.channel)
|
2011-06-24 17:36:24 +04:00
|
|
|
buf += s
|
2011-06-03 18:27:41 +04:00
|
|
|
|
|
|
|
return buf
|
|
|
|
else:
|
|
|
|
return self._read(size, self.channel)
|
|
|
|
|
|
|
|
def _read(self, size, channel):
|
|
|
|
if not size:
|
|
|
|
return ''
|
|
|
|
assert size > 0
|
|
|
|
|
|
|
|
# tell the client we need at most size bytes
|
|
|
|
self.out.write(struct.pack('>cI', channel, size))
|
|
|
|
self.out.flush()
|
|
|
|
|
|
|
|
length = self.in_.read(4)
|
|
|
|
length = struct.unpack('>I', length)[0]
|
|
|
|
if not length:
|
|
|
|
return ''
|
|
|
|
else:
|
|
|
|
return self.in_.read(length)
|
|
|
|
|
|
|
|
def readline(self, size=-1):
|
|
|
|
if size < 0:
|
|
|
|
size = self.maxchunksize
|
|
|
|
s = self._read(size, 'L')
|
|
|
|
buf = s
|
|
|
|
# keep asking for more until there's either no more or
|
|
|
|
# we got a full line
|
|
|
|
while s and s[-1] != '\n':
|
|
|
|
s = self._read(size, 'L')
|
2011-06-24 17:36:24 +04:00
|
|
|
buf += s
|
2011-06-03 18:27:41 +04:00
|
|
|
|
|
|
|
return buf
|
|
|
|
else:
|
|
|
|
return self._read(size, 'L')
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def next(self):
|
|
|
|
l = self.readline()
|
|
|
|
if not l:
|
|
|
|
raise StopIteration
|
|
|
|
return l
|
|
|
|
|
|
|
|
def __getattr__(self, attr):
|
2016-01-19 18:08:00 +03:00
|
|
|
if attr in ('isatty', 'fileno', 'tell', 'seek'):
|
2013-01-01 22:50:04 +04:00
|
|
|
raise AttributeError(attr)
|
2011-06-03 18:27:41 +04:00
|
|
|
return getattr(self.in_, attr)
|
|
|
|
|
|
|
|
class server(object):
|
|
|
|
"""
|
2014-09-27 10:10:14 +04:00
|
|
|
Listens for commands on fin, runs them and writes the output on a channel
|
|
|
|
based stream to fout.
|
2011-06-03 18:27:41 +04:00
|
|
|
"""
|
2014-09-27 10:10:14 +04:00
|
|
|
def __init__(self, ui, repo, fin, fout):
|
2016-11-22 21:33:11 +03:00
|
|
|
self.cwd = pycompat.getcwd()
|
2011-06-03 18:27:41 +04:00
|
|
|
|
2015-06-26 01:44:15 +03:00
|
|
|
# developer config: cmdserver.log
|
codemod: register core configitems using a script
This is done by a script [2] using RedBaron [1], a tool designed for doing
code refactoring. All "default" values are decided by the script and are
strongly consistent with the existing code.
There are 2 changes done manually to fix tests:
[warn] mercurial/exchange.py: experimental.bundle2-output-capture: default needs manual removal
[warn] mercurial/localrepo.py: experimental.hook-track-tags: default needs manual removal
Since RedBaron is not confident about how to indent things [2].
[1]: https://github.com/PyCQA/redbaron
[2]: https://github.com/PyCQA/redbaron/issues/100
[3]:
#!/usr/bin/env python
# codemod_configitems.py - codemod tool to fill configitems
#
# Copyright 2017 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import, print_function
import os
import sys
import redbaron
def readpath(path):
with open(path) as f:
return f.read()
def writepath(path, content):
with open(path, 'w') as f:
f.write(content)
_configmethods = {'config', 'configbool', 'configint', 'configbytes',
'configlist', 'configdate'}
def extractstring(rnode):
"""get the string from a RedBaron string or call_argument node"""
while rnode.type != 'string':
rnode = rnode.value
return rnode.value[1:-1] # unquote, "'str'" -> "str"
def uiconfigitems(red):
"""match *.ui.config* pattern, yield (node, method, args, section, name)"""
for node in red.find_all('atomtrailers'):
entry = None
try:
obj = node[-3].value
method = node[-2].value
args = node[-1]
section = args[0].value
name = args[1].value
if (obj in ('ui', 'self') and method in _configmethods
and section.type == 'string' and name.type == 'string'):
entry = (node, method, args, extractstring(section),
extractstring(name))
except Exception:
pass
else:
if entry:
yield entry
def coreconfigitems(red):
"""match coreconfigitem(...) pattern, yield (node, args, section, name)"""
for node in red.find_all('atomtrailers'):
entry = None
try:
args = node[1]
section = args[0].value
name = args[1].value
if (node[0].value == 'coreconfigitem' and section.type == 'string'
and name.type == 'string'):
entry = (node, args, extractstring(section),
extractstring(name))
except Exception:
pass
else:
if entry:
yield entry
def registercoreconfig(cfgred, section, name, defaultrepr):
"""insert coreconfigitem to cfgred AST
section and name are plain string, defaultrepr is a string
"""
# find a place to insert the "coreconfigitem" item
entries = list(coreconfigitems(cfgred))
for node, args, nodesection, nodename in reversed(entries):
if (nodesection, nodename) < (section, name):
# insert after this entry
node.insert_after(
'coreconfigitem(%r, %r,\n'
' default=%s,\n'
')' % (section, name, defaultrepr))
return
def main(argv):
if not argv:
print('Usage: codemod_configitems.py FILES\n'
'For example, FILES could be "{hgext,mercurial}/*/**.py"')
dirname = os.path.dirname
reporoot = dirname(dirname(dirname(os.path.abspath(__file__))))
# register configitems to this destination
cfgpath = os.path.join(reporoot, 'mercurial', 'configitems.py')
cfgred = redbaron.RedBaron(readpath(cfgpath))
# state about what to do
registered = set((s, n) for n, a, s, n in coreconfigitems(cfgred))
toregister = {} # {(section, name): defaultrepr}
coreconfigs = set() # {(section, name)}, whether it's used in core
# first loop: scan all files before taking any action
for i, path in enumerate(argv):
print('(%d/%d) scanning %s' % (i + 1, len(argv), path))
iscore = ('mercurial' in path) and ('hgext' not in path)
red = redbaron.RedBaron(readpath(path))
# find all repo.ui.config* and ui.config* calls, and collect their
# section, name and default value information.
for node, method, args, section, name in uiconfigitems(red):
if section == 'web':
# [web] section has some weirdness, ignore them for now
continue
defaultrepr = None
key = (section, name)
if len(args) == 2:
if key in registered:
continue
if method == 'configlist':
defaultrepr = 'list'
elif method == 'configbool':
defaultrepr = 'False'
else:
defaultrepr = 'None'
elif len(args) >= 3 and (args[2].target is None or
args[2].target.value == 'default'):
# try to understand the "default" value
dnode = args[2].value
if dnode.type == 'name':
if dnode.value in {'None', 'True', 'False'}:
defaultrepr = dnode.value
elif dnode.type == 'string':
defaultrepr = repr(dnode.value[1:-1])
elif dnode.type in ('int', 'float'):
defaultrepr = dnode.value
# inconsistent default
if key in toregister and toregister[key] != defaultrepr:
defaultrepr = None
# interesting to rewrite
if key not in registered:
if defaultrepr is None:
print('[note] %s: %s.%s: unsupported default'
% (path, section, name))
registered.add(key) # skip checking it again
else:
toregister[key] = defaultrepr
if iscore:
coreconfigs.add(key)
# second loop: rewrite files given "toregister" result
for path in argv:
# reconstruct redbaron - trade CPU for memory
red = redbaron.RedBaron(readpath(path))
changed = False
for node, method, args, section, name in uiconfigitems(red):
key = (section, name)
defaultrepr = toregister.get(key)
if defaultrepr is None or key not in coreconfigs:
continue
if len(args) >= 3 and (args[2].target is None or
args[2].target.value == 'default'):
try:
del args[2]
changed = True
except Exception:
# redbaron fails to do the rewrite due to indentation
# see https://github.com/PyCQA/redbaron/issues/100
print('[warn] %s: %s.%s: default needs manual removal'
% (path, section, name))
if key not in registered:
print('registering %s.%s' % (section, name))
registercoreconfig(cfgred, section, name, defaultrepr)
registered.add(key)
if changed:
print('updating %s' % path)
writepath(path, red.dumps())
if toregister:
print('updating configitems.py')
writepath(cfgpath, cfgred.dumps())
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
2017-07-15 00:22:40 +03:00
|
|
|
logpath = ui.config("cmdserver", "log")
|
2011-06-03 18:27:41 +04:00
|
|
|
if logpath:
|
|
|
|
global logfile
|
|
|
|
if logpath == '-':
|
2012-08-16 00:39:18 +04:00
|
|
|
# write log on a special 'd' (debug) channel
|
2014-09-27 10:10:14 +04:00
|
|
|
logfile = channeledoutput(fout, 'd')
|
2011-06-03 18:27:41 +04:00
|
|
|
else:
|
|
|
|
logfile = open(logpath, 'a')
|
|
|
|
|
2014-03-03 18:21:24 +04:00
|
|
|
if repo:
|
|
|
|
# the ui here is really the repo ui so take its baseui so we don't
|
|
|
|
# end up with its local configuration
|
|
|
|
self.ui = repo.baseui
|
|
|
|
self.repo = repo
|
|
|
|
self.repoui = repo.ui
|
|
|
|
else:
|
|
|
|
self.ui = ui
|
|
|
|
self.repo = self.repoui = None
|
2011-06-03 18:27:41 +04:00
|
|
|
|
2014-09-27 10:10:14 +04:00
|
|
|
self.cerr = channeledoutput(fout, 'e')
|
|
|
|
self.cout = channeledoutput(fout, 'o')
|
|
|
|
self.cin = channeledinput(fin, fout, 'I')
|
|
|
|
self.cresult = channeledoutput(fout, 'r')
|
2011-06-03 18:27:41 +04:00
|
|
|
|
2014-09-27 10:10:14 +04:00
|
|
|
self.client = fin
|
2011-06-03 18:27:41 +04:00
|
|
|
|
2016-05-21 09:18:23 +03:00
|
|
|
def cleanup(self):
|
|
|
|
"""release and restore resources taken during server session"""
|
|
|
|
|
2011-06-03 18:27:41 +04:00
|
|
|
def _read(self, size):
|
2011-06-21 16:13:39 +04:00
|
|
|
if not size:
|
|
|
|
return ''
|
|
|
|
|
2011-06-03 18:27:41 +04:00
|
|
|
data = self.client.read(size)
|
|
|
|
|
|
|
|
# is the other end closed?
|
|
|
|
if not data:
|
2012-05-12 18:00:58 +04:00
|
|
|
raise EOFError
|
2011-06-03 18:27:41 +04:00
|
|
|
|
|
|
|
return data
|
|
|
|
|
2016-02-16 22:11:45 +03:00
|
|
|
def _readstr(self):
|
|
|
|
"""read a string from the channel
|
|
|
|
|
|
|
|
format:
|
|
|
|
data length (uint32), data
|
|
|
|
"""
|
|
|
|
length = struct.unpack('>I', self._read(4))[0]
|
|
|
|
if not length:
|
|
|
|
return ''
|
|
|
|
return self._read(length)
|
|
|
|
|
|
|
|
def _readlist(self):
|
|
|
|
"""read a list of NULL separated strings from the channel"""
|
|
|
|
s = self._readstr()
|
|
|
|
if s:
|
|
|
|
return s.split('\0')
|
|
|
|
else:
|
|
|
|
return []
|
|
|
|
|
2011-06-03 18:27:41 +04:00
|
|
|
def runcommand(self):
|
|
|
|
""" reads a list of \0 terminated arguments, executes
|
|
|
|
and writes the return code to the result channel """
|
2015-11-24 17:03:54 +03:00
|
|
|
from . import dispatch # avoid cycle
|
2011-06-03 18:27:41 +04:00
|
|
|
|
2016-02-15 17:20:41 +03:00
|
|
|
args = self._readlist()
|
2011-06-03 18:27:41 +04:00
|
|
|
|
2011-06-24 20:43:59 +04:00
|
|
|
# copy the uis so changes (e.g. --config or --verbose) don't
|
|
|
|
# persist between requests
|
2011-06-24 20:44:17 +04:00
|
|
|
copiedui = self.ui.copy()
|
2014-04-26 13:13:06 +04:00
|
|
|
uis = [copiedui]
|
2014-03-03 18:21:24 +04:00
|
|
|
if self.repo:
|
|
|
|
self.repo.baseui = copiedui
|
|
|
|
# clone ui without using ui.copy because this is protected
|
|
|
|
repoui = self.repoui.__class__(self.repoui)
|
|
|
|
repoui.copy = copiedui.copy # redo copy protection
|
2014-04-26 13:13:06 +04:00
|
|
|
uis.append(repoui)
|
2014-03-03 18:21:24 +04:00
|
|
|
self.repo.ui = self.repo.dirstate._ui = repoui
|
|
|
|
self.repo.invalidateall()
|
2011-06-24 20:44:17 +04:00
|
|
|
|
2014-04-26 13:13:06 +04:00
|
|
|
for ui in uis:
|
2016-06-12 08:07:26 +03:00
|
|
|
ui.resetstate()
|
2015-12-14 17:13:42 +03:00
|
|
|
# any kind of interaction must use server channels, but chg may
|
|
|
|
# replace channels by fully functional tty files. so nontty is
|
|
|
|
# enforced only if cin is a channel.
|
|
|
|
if not util.safehasattr(self.cin, 'fileno'):
|
|
|
|
ui.setconfig('ui', 'nontty', 'true', 'commandserver')
|
2014-04-26 13:13:06 +04:00
|
|
|
|
2011-07-11 18:49:45 +04:00
|
|
|
req = dispatch.request(args[:], copiedui, self.repo, self.cin,
|
2011-06-03 18:27:41 +04:00
|
|
|
self.cout, self.cerr)
|
|
|
|
|
2014-03-03 10:50:51 +04:00
|
|
|
ret = (dispatch.dispatch(req) or 0) & 255 # might return None
|
2011-06-03 18:27:41 +04:00
|
|
|
|
2011-07-11 18:49:45 +04:00
|
|
|
# restore old cwd
|
|
|
|
if '--cwd' in args:
|
|
|
|
os.chdir(self.cwd)
|
|
|
|
|
2011-06-03 18:27:41 +04:00
|
|
|
self.cresult.write(struct.pack('>i', int(ret)))
|
|
|
|
|
|
|
|
def getencoding(self):
|
|
|
|
""" writes the current encoding to the result channel """
|
|
|
|
self.cresult.write(encoding.encoding)
|
|
|
|
|
|
|
|
def serveone(self):
|
|
|
|
cmd = self.client.readline()[:-1]
|
|
|
|
if cmd:
|
|
|
|
handler = self.capabilities.get(cmd)
|
|
|
|
if handler:
|
|
|
|
handler(self)
|
|
|
|
else:
|
|
|
|
# clients are expected to check what commands are supported by
|
|
|
|
# looking at the servers capabilities
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_('unknown command %s') % cmd)
|
2011-06-03 18:27:41 +04:00
|
|
|
|
|
|
|
return cmd != ''
|
|
|
|
|
2017-09-29 18:48:34 +03:00
|
|
|
capabilities = {'runcommand': runcommand,
|
|
|
|
'getencoding': getencoding}
|
2011-06-03 18:27:41 +04:00
|
|
|
|
|
|
|
def serve(self):
|
2012-12-12 05:38:14 +04:00
|
|
|
hellomsg = 'capabilities: ' + ' '.join(sorted(self.capabilities))
|
2011-06-22 18:13:04 +04:00
|
|
|
hellomsg += '\n'
|
|
|
|
hellomsg += 'encoding: ' + encoding.encoding
|
2014-10-18 07:24:50 +04:00
|
|
|
hellomsg += '\n'
|
2016-02-03 12:11:22 +03:00
|
|
|
hellomsg += 'pid: %d' % util.getpid()
|
2016-07-18 00:56:05 +03:00
|
|
|
if util.safehasattr(os, 'getpgid'):
|
|
|
|
hellomsg += '\n'
|
|
|
|
hellomsg += 'pgid: %d' % os.getpgid(0)
|
2011-06-22 18:13:04 +04:00
|
|
|
|
|
|
|
# write the hello msg in -one- chunk
|
|
|
|
self.cout.write(hellomsg)
|
2011-06-03 18:27:41 +04:00
|
|
|
|
|
|
|
try:
|
|
|
|
while self.serveone():
|
|
|
|
pass
|
|
|
|
except EOFError:
|
|
|
|
# we'll get here if the client disconnected while we were reading
|
|
|
|
# its request
|
|
|
|
return 1
|
|
|
|
|
|
|
|
return 0
|
2014-09-27 09:52:09 +04:00
|
|
|
|
2014-11-15 07:50:43 +03:00
|
|
|
def _protectio(ui):
|
|
|
|
""" duplicates streams and redirect original to null if ui uses stdio """
|
|
|
|
ui.flush()
|
|
|
|
newfiles = []
|
|
|
|
nullfd = os.open(os.devnull, os.O_RDWR)
|
2017-02-13 17:36:38 +03:00
|
|
|
for f, sysf, mode in [(ui.fin, util.stdin, pycompat.sysstr('rb')),
|
|
|
|
(ui.fout, util.stdout, pycompat.sysstr('wb'))]:
|
2014-11-15 07:50:43 +03:00
|
|
|
if f is sysf:
|
|
|
|
newfd = os.dup(f.fileno())
|
|
|
|
os.dup2(nullfd, f.fileno())
|
|
|
|
f = os.fdopen(newfd, mode)
|
|
|
|
newfiles.append(f)
|
|
|
|
os.close(nullfd)
|
|
|
|
return tuple(newfiles)
|
|
|
|
|
|
|
|
def _restoreio(ui, fin, fout):
|
|
|
|
""" restores streams from duplicated ones """
|
|
|
|
ui.flush()
|
|
|
|
for f, uif in [(fin, ui.fin), (fout, ui.fout)]:
|
|
|
|
if f is not uif:
|
|
|
|
os.dup2(f.fileno(), uif.fileno())
|
|
|
|
f.close()
|
|
|
|
|
2014-09-27 09:52:09 +04:00
|
|
|
class pipeservice(object):
|
|
|
|
def __init__(self, ui, repo, opts):
|
2014-11-15 07:04:41 +03:00
|
|
|
self.ui = ui
|
|
|
|
self.repo = repo
|
2014-09-27 09:52:09 +04:00
|
|
|
|
|
|
|
def init(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def run(self):
|
2014-11-15 07:04:41 +03:00
|
|
|
ui = self.ui
|
2014-11-15 07:50:43 +03:00
|
|
|
# redirect stdio to null device so that broken extensions or in-process
|
|
|
|
# hooks will never cause corruption of channel protocol.
|
|
|
|
fin, fout = _protectio(ui)
|
|
|
|
try:
|
|
|
|
sv = server(ui, self.repo, fin, fout)
|
|
|
|
return sv.serve()
|
|
|
|
finally:
|
2016-05-21 09:18:23 +03:00
|
|
|
sv.cleanup()
|
2014-11-15 07:50:43 +03:00
|
|
|
_restoreio(ui, fin, fout)
|
2014-09-27 10:04:46 +04:00
|
|
|
|
2016-05-22 07:53:32 +03:00
|
|
|
def _initworkerprocess():
|
2016-07-18 17:59:08 +03:00
|
|
|
# use a different process group from the master process, in order to:
|
|
|
|
# 1. make the current process group no longer "orphaned" (because the
|
|
|
|
# parent of this process is in a different process group while
|
|
|
|
# remains in a same session)
|
|
|
|
# according to POSIX 2.2.2.52, orphaned process group will ignore
|
|
|
|
# terminal-generated stop signals like SIGTSTP (Ctrl+Z), which will
|
|
|
|
# cause trouble for things like ncurses.
|
|
|
|
# 2. the client can use kill(-pgid, sig) to simulate terminal-generated
|
|
|
|
# SIGINT (Ctrl+C) and process-exit-generated SIGHUP. our child
|
|
|
|
# processes like ssh will be killed properly, without affecting
|
|
|
|
# unrelated processes.
|
2016-05-21 12:14:13 +03:00
|
|
|
os.setpgid(0, 0)
|
|
|
|
# change random state otherwise forked request handlers would have a
|
|
|
|
# same state inherited from parent.
|
|
|
|
random.seed()
|
|
|
|
|
2016-05-22 07:53:32 +03:00
|
|
|
def _serverequest(ui, repo, conn, createcmdserver):
|
2016-05-21 12:14:13 +03:00
|
|
|
fin = conn.makefile('rb')
|
|
|
|
fout = conn.makefile('wb')
|
|
|
|
sv = None
|
|
|
|
try:
|
|
|
|
sv = createcmdserver(repo, conn, fin, fout)
|
2014-10-04 11:46:50 +04:00
|
|
|
try:
|
2016-05-21 12:14:13 +03:00
|
|
|
sv.serve()
|
|
|
|
# handle exceptions that may be raised by command server. most of
|
|
|
|
# known exceptions are caught by dispatch.
|
|
|
|
except error.Abort as inst:
|
|
|
|
ui.warn(_('abort: %s\n') % inst)
|
|
|
|
except IOError as inst:
|
|
|
|
if inst.errno != errno.EPIPE:
|
|
|
|
raise
|
|
|
|
except KeyboardInterrupt:
|
|
|
|
pass
|
2016-05-21 09:23:21 +03:00
|
|
|
finally:
|
2016-05-21 12:14:13 +03:00
|
|
|
sv.cleanup()
|
|
|
|
except: # re-raises
|
|
|
|
# also write traceback to error channel. otherwise client cannot
|
|
|
|
# see it because it is written to server's stderr by default.
|
|
|
|
if sv:
|
|
|
|
cerr = sv.cerr
|
|
|
|
else:
|
|
|
|
cerr = channeledoutput(fout, 'e')
|
|
|
|
traceback.print_exc(file=cerr)
|
|
|
|
raise
|
|
|
|
finally:
|
|
|
|
fin.close()
|
|
|
|
try:
|
|
|
|
fout.close() # implicit flush() may cause another EPIPE
|
|
|
|
except IOError as inst:
|
|
|
|
if inst.errno != errno.EPIPE:
|
|
|
|
raise
|
2014-10-04 11:46:50 +04:00
|
|
|
|
2016-05-22 05:43:18 +03:00
|
|
|
class unixservicehandler(object):
|
|
|
|
"""Set of pluggable operations for unix-mode services
|
|
|
|
|
|
|
|
Almost all methods except for createcmdserver() are called in the main
|
|
|
|
process. You can't pass mutable resource back from createcmdserver().
|
|
|
|
"""
|
|
|
|
|
|
|
|
pollinterval = None
|
|
|
|
|
|
|
|
def __init__(self, ui):
|
|
|
|
self.ui = ui
|
|
|
|
|
|
|
|
def bindsocket(self, sock, address):
|
|
|
|
util.bindunixsocket(sock, address)
|
2017-04-30 21:08:27 +03:00
|
|
|
sock.listen(socket.SOMAXCONN)
|
2017-04-30 21:21:05 +03:00
|
|
|
self.ui.status(_('listening at %s\n') % address)
|
|
|
|
self.ui.flush() # avoid buffering of status message
|
2016-05-22 05:43:18 +03:00
|
|
|
|
|
|
|
def unlinksocket(self, address):
|
|
|
|
os.unlink(address)
|
|
|
|
|
|
|
|
def shouldexit(self):
|
|
|
|
"""True if server should shut down; checked per pollinterval"""
|
|
|
|
return False
|
|
|
|
|
|
|
|
def newconnection(self):
|
|
|
|
"""Called when main process notices new connection"""
|
|
|
|
|
|
|
|
def createcmdserver(self, repo, conn, fin, fout):
|
|
|
|
"""Create new command server instance; called in the process that
|
|
|
|
serves for the current connection"""
|
|
|
|
return server(self.ui, repo, fin, fout)
|
|
|
|
|
2016-05-22 07:45:09 +03:00
|
|
|
class unixforkingservice(object):
|
2014-10-04 11:46:50 +04:00
|
|
|
"""
|
|
|
|
Listens on unix domain socket and forks server per connection
|
|
|
|
"""
|
2016-05-22 07:45:09 +03:00
|
|
|
|
|
|
|
def __init__(self, ui, repo, opts, handler=None):
|
2014-10-04 11:46:50 +04:00
|
|
|
self.ui = ui
|
|
|
|
self.repo = repo
|
|
|
|
self.address = opts['address']
|
2016-05-22 07:45:09 +03:00
|
|
|
if not util.safehasattr(socket, 'AF_UNIX'):
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_('unsupported platform'))
|
2014-10-04 11:46:50 +04:00
|
|
|
if not self.address:
|
2015-10-08 22:55:45 +03:00
|
|
|
raise error.Abort(_('no socket path specified with --address'))
|
2016-05-22 05:43:18 +03:00
|
|
|
self._servicehandler = handler or unixservicehandler(ui)
|
|
|
|
self._sock = None
|
|
|
|
self._oldsigchldhandler = None
|
|
|
|
self._workerpids = set() # updated by signal handler; do not iterate
|
2017-02-09 01:37:38 +03:00
|
|
|
self._socketunlinked = None
|
2016-05-22 05:43:18 +03:00
|
|
|
|
|
|
|
def init(self):
|
|
|
|
self._sock = socket.socket(socket.AF_UNIX)
|
|
|
|
self._servicehandler.bindsocket(self._sock, self.address)
|
|
|
|
o = signal.signal(signal.SIGCHLD, self._sigchldhandler)
|
|
|
|
self._oldsigchldhandler = o
|
2017-02-09 01:37:38 +03:00
|
|
|
self._socketunlinked = False
|
|
|
|
|
|
|
|
def _unlinksocket(self):
|
|
|
|
if not self._socketunlinked:
|
|
|
|
self._servicehandler.unlinksocket(self.address)
|
|
|
|
self._socketunlinked = True
|
2016-05-22 05:43:18 +03:00
|
|
|
|
|
|
|
def _cleanup(self):
|
|
|
|
signal.signal(signal.SIGCHLD, self._oldsigchldhandler)
|
|
|
|
self._sock.close()
|
2017-02-09 01:37:38 +03:00
|
|
|
self._unlinksocket()
|
2016-05-22 05:43:18 +03:00
|
|
|
# don't kill child processes as they have active clients, just wait
|
|
|
|
self._reapworkers(0)
|
|
|
|
|
|
|
|
def run(self):
|
|
|
|
try:
|
|
|
|
self._mainloop()
|
|
|
|
finally:
|
|
|
|
self._cleanup()
|
|
|
|
|
|
|
|
def _mainloop(self):
|
2017-02-09 01:45:30 +03:00
|
|
|
exiting = False
|
2016-05-22 05:43:18 +03:00
|
|
|
h = self._servicehandler
|
2017-07-15 06:26:21 +03:00
|
|
|
selector = selectors2.DefaultSelector()
|
|
|
|
selector.register(self._sock, selectors2.EVENT_READ)
|
2017-02-09 01:45:30 +03:00
|
|
|
while True:
|
|
|
|
if not exiting and h.shouldexit():
|
|
|
|
# clients can no longer connect() to the domain socket, so
|
|
|
|
# we stop queuing new requests.
|
|
|
|
# for requests that are queued (connect()-ed, but haven't been
|
|
|
|
# accept()-ed), handle them before exit. otherwise, clients
|
|
|
|
# waiting for recv() will receive ECONNRESET.
|
|
|
|
self._unlinksocket()
|
|
|
|
exiting = True
|
2017-07-16 21:17:00 +03:00
|
|
|
ready = selector.select(timeout=h.pollinterval)
|
|
|
|
if not ready:
|
|
|
|
# only exit if we completed all queued requests
|
|
|
|
if exiting:
|
|
|
|
break
|
|
|
|
continue
|
2016-05-22 05:43:18 +03:00
|
|
|
try:
|
|
|
|
conn, _addr = self._sock.accept()
|
2017-07-16 21:17:00 +03:00
|
|
|
except socket.error as inst:
|
2016-05-22 05:43:18 +03:00
|
|
|
if inst.args[0] == errno.EINTR:
|
|
|
|
continue
|
|
|
|
raise
|
|
|
|
|
|
|
|
pid = os.fork()
|
|
|
|
if pid:
|
|
|
|
try:
|
|
|
|
self.ui.debug('forked worker process (pid=%d)\n' % pid)
|
|
|
|
self._workerpids.add(pid)
|
|
|
|
h.newconnection()
|
|
|
|
finally:
|
|
|
|
conn.close() # release handle in parent process
|
|
|
|
else:
|
|
|
|
try:
|
2016-07-16 08:46:31 +03:00
|
|
|
self._runworker(conn)
|
2016-05-22 05:43:18 +03:00
|
|
|
conn.close()
|
|
|
|
os._exit(0)
|
|
|
|
except: # never return, hence no re-raises
|
|
|
|
try:
|
|
|
|
self.ui.traceback(force=True)
|
|
|
|
finally:
|
|
|
|
os._exit(255)
|
2017-07-16 14:39:32 +03:00
|
|
|
selector.close()
|
2016-05-22 05:43:18 +03:00
|
|
|
|
|
|
|
def _sigchldhandler(self, signal, frame):
|
|
|
|
self._reapworkers(os.WNOHANG)
|
|
|
|
|
|
|
|
def _reapworkers(self, options):
|
|
|
|
while self._workerpids:
|
|
|
|
try:
|
|
|
|
pid, _status = os.waitpid(-1, options)
|
|
|
|
except OSError as inst:
|
|
|
|
if inst.errno == errno.EINTR:
|
|
|
|
continue
|
|
|
|
if inst.errno != errno.ECHILD:
|
|
|
|
raise
|
|
|
|
# no child processes at all (reaped by other waitpid()?)
|
|
|
|
self._workerpids.clear()
|
|
|
|
return
|
|
|
|
if pid == 0:
|
|
|
|
# no waitable child processes
|
|
|
|
return
|
|
|
|
self.ui.debug('worker process exited (pid=%d)\n' % pid)
|
|
|
|
self._workerpids.discard(pid)
|
|
|
|
|
2016-07-16 08:46:31 +03:00
|
|
|
def _runworker(self, conn):
|
2016-05-22 05:43:18 +03:00
|
|
|
signal.signal(signal.SIGCHLD, self._oldsigchldhandler)
|
2016-05-22 07:53:32 +03:00
|
|
|
_initworkerprocess()
|
2016-05-22 05:43:18 +03:00
|
|
|
h = self._servicehandler
|
2016-05-22 07:53:32 +03:00
|
|
|
try:
|
|
|
|
_serverequest(self.ui, self.repo, conn, h.createcmdserver)
|
|
|
|
finally:
|
|
|
|
gc.collect() # trigger __del__ since worker process uses os._exit
|