Nominatim/nominatim/nominatim.py

362 lines
12 KiB
Python
Raw Normal View History

2020-01-21 23:57:07 +03:00
#! /usr/bin/env python3
2020-01-19 15:37:45 +03:00
#-----------------------------------------------------------------------------
# nominatim - [description]
#-----------------------------------------------------------------------------
#
# Indexing tool for the Nominatim database.
#
# Based on C version by Brian Quinion
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#-----------------------------------------------------------------------------
from argparse import ArgumentParser, RawDescriptionHelpFormatter, ArgumentTypeError
import logging
import sys
import re
import getpass
from datetime import datetime
import psycopg2
from psycopg2.extras import wait_select
import select
2020-01-19 15:37:45 +03:00
log = logging.getLogger()
def make_connection(options, asynchronous=False):
params = {'dbname' : options.dbname,
'user' : options.user,
'password' : options.password,
'host' : options.host,
'port' : options.port,
'async' : asynchronous}
return psycopg2.connect(**params)
2020-01-19 15:37:45 +03:00
2020-01-21 00:19:33 +03:00
class RankRunner(object):
""" Returns SQL commands for indexing one rank within the placex table.
"""
def __init__(self, rank):
self.rank = rank
def name(self):
return "rank {}".format(self.rank)
def sql_index_sectors(self):
return """SELECT geometry_sector, count(*) FROM placex
WHERE rank_search = {} and indexed_status > 0
GROUP BY geometry_sector
ORDER BY geometry_sector""".format(self.rank)
def sql_nosector_places(self):
return """SELECT place_id FROM placex
WHERE indexed_status > 0 and rank_search = {}
ORDER BY geometry_sector""".format(self.rank)
def sql_sector_places(self):
return """SELECT place_id FROM placex
WHERE indexed_status > 0 and rank_search = {}
and geometry_sector = %s""".format(self.rank)
def sql_index_place(self):
return "UPDATE placex SET indexed_status = 0 WHERE place_id = %s"
class InterpolationRunner(object):
""" Returns SQL commands for indexing the address interpolation table
location_property_osmline.
"""
def name(self):
return "interpolation lines (location_property_osmline)"
def sql_index_sectors(self):
return """SELECT geometry_sector, count(*) FROM location_property_osmline
WHERE indexed_status > 0
GROUP BY geometry_sector
ORDER BY geometry_sector"""
def sql_nosector_places(self):
return """SELECT place_id FROM location_property_osmline
WHERE indexed_status > 0
ORDER BY geometry_sector"""
def sql_sector_places(self):
return """SELECT place_id FROM location_property_osmline
WHERE indexed_status > 0 and geometry_sector = %s
ORDER BY geometry_sector"""
def sql_index_place(self):
return """UPDATE location_property_osmline
SET indexed_status = 0 WHERE place_id = %s"""
class DBConnection(object):
2020-01-29 13:50:30 +03:00
""" A single non-blocking database connection.
2020-01-21 00:19:33 +03:00
"""
def __init__(self, options):
self.current_query = None
self.current_params = None
self.conn = None
self.connect()
def connect(self):
if self.conn is not None:
self.cursor.close()
self.conn.close()
self.conn = make_connection(options, asynchronous=True)
self.wait()
2020-01-19 15:37:45 +03:00
self.cursor = self.conn.cursor()
def wait(self):
2020-01-21 00:19:33 +03:00
""" Block until any pending operation is done.
"""
while True:
try:
wait_select(self.conn)
self.current_query = None
return
except psycopg2.extensions.TransactionRollbackError as e:
if e.pgcode == '40P01':
log.info("Deadlock detected (params = {}), retry."
.format(self.current_params))
self.cursor.execute(self.current_query, self.current_params)
else:
raise
except psycopg2.errors.DeadlockDetected:
self.cursor.execute(self.current_query, self.current_params)
2020-01-19 15:37:45 +03:00
def perform(self, sql, args=None):
2020-01-21 00:19:33 +03:00
""" Send SQL query to the server. Returns immediately without
blocking.
"""
self.current_query = sql
self.current_params = args
self.cursor.execute(sql, args)
def fileno(self):
2020-01-21 00:19:33 +03:00
""" File descriptor to wait for. (Makes this class select()able.)
"""
return self.conn.fileno()
def is_done(self):
2020-01-21 00:19:33 +03:00
""" Check if the connection is available for a new query.
Also checks if the previous query has run into a deadlock.
If so, then the previous query is repeated.
"""
if self.current_query is None:
return True
try:
if self.conn.poll() == psycopg2.extensions.POLL_OK:
self.current_query = None
return True
except psycopg2.extensions.TransactionRollbackError as e:
if e.pgcode == '40P01':
2020-01-22 13:18:21 +03:00
log.info("Deadlock detected (params = {}), retry.".format(self.current_params))
self.cursor.execute(self.current_query, self.current_params)
else:
raise
except psycopg2.errors.DeadlockDetected:
self.cursor.execute(self.current_query, self.current_params)
2020-01-19 15:37:45 +03:00
2020-01-21 00:19:33 +03:00
return False
2020-01-19 15:37:45 +03:00
class Indexer(object):
2020-01-21 00:19:33 +03:00
""" Main indexing routine.
"""
2020-01-19 15:37:45 +03:00
def __init__(self, options):
2020-01-21 00:19:33 +03:00
self.minrank = max(0, options.minrank)
self.maxrank = min(30, options.maxrank)
2020-01-19 15:37:45 +03:00
self.conn = make_connection(options)
2020-01-21 00:19:33 +03:00
self.threads = [DBConnection(options) for i in range(options.threads)]
2020-01-19 15:37:45 +03:00
def run(self):
2020-01-21 00:19:33 +03:00
""" Run indexing over the entire database.
"""
2020-01-21 01:34:56 +03:00
log.warning("Starting indexing rank ({} to {}) using {} threads".format(
2020-01-21 00:19:33 +03:00
self.minrank, self.maxrank, len(self.threads)))
2020-01-19 15:37:45 +03:00
2020-01-21 00:19:33 +03:00
for rank in range(self.minrank, self.maxrank):
2020-01-19 15:37:45 +03:00
self.index(RankRunner(rank))
2020-01-21 00:19:33 +03:00
if self.maxrank == 30:
2020-01-19 15:37:45 +03:00
self.index(InterpolationRunner())
2020-01-22 13:18:21 +03:00
self.index(RankRunner(self.maxrank))
2020-01-19 15:37:45 +03:00
def index(self, obj):
2020-01-21 00:19:33 +03:00
""" Index a single rank or table. `obj` describes the SQL to use
for indexing.
"""
2020-01-21 01:34:56 +03:00
log.warning("Starting {}".format(obj.name()))
2020-01-19 15:37:45 +03:00
2020-01-20 02:05:28 +03:00
cur = self.conn.cursor(name='main')
2020-01-19 15:37:45 +03:00
cur.execute(obj.sql_index_sectors())
total_tuples = 0
for r in cur:
total_tuples += r[1]
log.debug("Total number of rows; {}".format(total_tuples))
cur.scroll(0, mode='absolute')
next_thread = self.find_free_thread()
2020-01-19 15:37:45 +03:00
done_tuples = 0
rank_start_time = datetime.now()
2020-01-20 02:05:28 +03:00
sector_sql = obj.sql_sector_places()
index_sql = obj.sql_index_place()
min_grouped_tuples = total_tuples - len(self.threads) * 1000
2020-01-21 01:17:52 +03:00
2020-01-21 01:34:56 +03:00
next_info = 100 if log.isEnabledFor(logging.INFO) else total_tuples + 1
2020-01-21 01:17:52 +03:00
2020-01-19 15:37:45 +03:00
for r in cur:
sector = r[0]
# Should we do the remaining ones together?
2020-01-20 02:05:28 +03:00
do_all = done_tuples > min_grouped_tuples
2020-01-19 15:37:45 +03:00
pcur = self.conn.cursor(name='places')
if do_all:
pcur.execute(obj.sql_nosector_places())
else:
2020-01-20 02:05:28 +03:00
pcur.execute(sector_sql, (sector, ))
2020-01-19 15:37:45 +03:00
for place in pcur:
place_id = place[0]
log.debug("Processing place {}".format(place_id))
thread = next(next_thread)
2020-01-19 15:37:45 +03:00
2020-01-20 02:05:28 +03:00
thread.perform(index_sql, (place_id,))
2020-01-19 15:37:45 +03:00
done_tuples += 1
2020-01-21 01:17:52 +03:00
if done_tuples >= next_info:
now = datetime.now()
done_time = (now - rank_start_time).total_seconds()
tuples_per_sec = done_tuples / done_time
2020-01-21 01:34:56 +03:00
log.info("Done {} in {} @ {:.3f} per second - {} ETA (seconds): {:.2f}"
2020-01-21 01:17:52 +03:00
.format(done_tuples, int(done_time),
tuples_per_sec, obj.name(),
(total_tuples - done_tuples)/tuples_per_sec))
next_info += int(tuples_per_sec)
2020-01-19 15:37:45 +03:00
pcur.close()
if do_all:
break
cur.close()
for t in self.threads:
t.wait()
2020-01-19 15:37:45 +03:00
rank_end_time = datetime.now()
diff_seconds = (rank_end_time-rank_start_time).total_seconds()
2020-01-21 01:34:56 +03:00
log.warning("Done {}/{} in {} @ {:.3f} per second - FINISHED {}\n".format(
2020-01-20 02:05:28 +03:00
done_tuples, total_tuples, int(diff_seconds),
2020-01-19 15:37:45 +03:00
done_tuples/diff_seconds, obj.name()))
def find_free_thread(self):
2020-01-21 00:19:33 +03:00
""" Generator that returns the next connection that is free for
sending a query.
"""
2020-01-20 02:05:28 +03:00
ready = self.threads
command_stat = 0
while True:
2020-01-20 02:05:28 +03:00
for thread in ready:
if thread.is_done():
command_stat += 1
yield thread
# refresh the connections occasionaly to avoid potential
# memory leaks in Postgresql.
if command_stat > 100000:
for t in self.threads:
while not t.is_done():
wait_select(t.conn)
t.connect()
command_stat = 0
ready = self.threads
else:
ready, _, _ = select.select(self.threads, [], [])
assert(False, "Unreachable code")
2020-01-19 15:37:45 +03:00
def nominatim_arg_parser():
""" Setup the command-line parser for the tool.
"""
def h(s):
return re.sub("\s\s+" , " ", s)
2020-01-21 00:19:33 +03:00
p = ArgumentParser(description="Indexing tool for Nominatim.",
2020-01-19 15:37:45 +03:00
formatter_class=RawDescriptionHelpFormatter)
p.add_argument('-d', '--database',
dest='dbname', action='store', default='nominatim',
help='Name of the PostgreSQL database to connect to.')
p.add_argument('-U', '--username',
dest='user', action='store',
help='PostgreSQL user name.')
p.add_argument('-W', '--password',
dest='password_prompt', action='store_true',
help='Force password prompt.')
p.add_argument('-H', '--host',
dest='host', action='store',
help='PostgreSQL server hostname or socket location.')
p.add_argument('-P', '--port',
dest='port', action='store',
help='PostgreSQL server port')
p.add_argument('-r', '--minrank',
dest='minrank', type=int, metavar='RANK', default=0,
help='Minimum/starting rank.')
p.add_argument('-R', '--maxrank',
dest='maxrank', type=int, metavar='RANK', default=30,
help='Maximum/finishing rank.')
p.add_argument('-t', '--threads',
dest='threads', type=int, metavar='NUM', default=1,
help='Number of threads to create for indexing.')
p.add_argument('-v', '--verbose',
dest='loglevel', action='count', default=0,
help='Increase verbosity')
return p
if __name__ == '__main__':
logging.basicConfig(stream=sys.stderr, format='%(levelname)s: %(message)s')
options = nominatim_arg_parser().parse_args(sys.argv[1:])
log.setLevel(max(3 - options.loglevel, 0) * 10)
options.password = None
if options.password_prompt:
password = getpass.getpass("Database password: ")
options.password = password
Indexer(options).run()