2020-01-19 15:37:45 +03:00
|
|
|
#! /usr/bin/env python
|
|
|
|
#-----------------------------------------------------------------------------
|
|
|
|
# nominatim - [description]
|
|
|
|
#-----------------------------------------------------------------------------
|
|
|
|
#
|
|
|
|
# Indexing tool for the Nominatim database.
|
|
|
|
#
|
|
|
|
# Based on C version by Brian Quinion
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or
|
|
|
|
# modify it under the terms of the GNU General Public License
|
|
|
|
# as published by the Free Software Foundation; either version 2
|
|
|
|
# of the License, or (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program; if not, write to the Free Software
|
|
|
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
|
|
#-----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
from argparse import ArgumentParser, RawDescriptionHelpFormatter, ArgumentTypeError
|
|
|
|
import logging
|
|
|
|
import sys
|
|
|
|
import re
|
|
|
|
import getpass
|
|
|
|
from datetime import datetime
|
|
|
|
import psycopg2
|
|
|
|
from psycopg2.extras import wait_select
|
2020-01-19 23:56:37 +03:00
|
|
|
import select
|
2020-01-19 15:37:45 +03:00
|
|
|
|
|
|
|
log = logging.getLogger()
|
|
|
|
|
|
|
|
def make_connection(options, asynchronous=False):
|
|
|
|
return psycopg2.connect(dbname=options.dbname, user=options.user,
|
|
|
|
password=options.password, host=options.host,
|
|
|
|
port=options.port, async_=asynchronous)
|
|
|
|
|
2020-01-19 23:56:37 +03:00
|
|
|
class IndexingThread(object):
|
2020-01-19 15:37:45 +03:00
|
|
|
|
2020-01-19 23:56:37 +03:00
|
|
|
def __init__(self, thread_num, options):
|
|
|
|
log.debug("Creating thread {}".format(thread_num))
|
|
|
|
self.thread_num = thread_num
|
|
|
|
self.conn = make_connection(options, asynchronous=True)
|
|
|
|
self.wait()
|
2020-01-19 15:37:45 +03:00
|
|
|
|
|
|
|
self.cursor = self.conn.cursor()
|
|
|
|
self.perform("SET lc_messages TO 'C'")
|
2020-01-19 23:56:37 +03:00
|
|
|
self.wait()
|
2020-01-19 16:06:48 +03:00
|
|
|
self.perform(InterpolationRunner.prepare())
|
2020-01-19 23:56:37 +03:00
|
|
|
self.wait()
|
2020-01-19 16:06:48 +03:00
|
|
|
self.perform(RankRunner.prepare())
|
2020-01-19 23:56:37 +03:00
|
|
|
self.wait()
|
2020-01-19 15:37:45 +03:00
|
|
|
|
2020-01-19 23:56:37 +03:00
|
|
|
self.current_query = None
|
|
|
|
self.current_params = None
|
|
|
|
|
|
|
|
def wait(self):
|
|
|
|
wait_select(self.conn)
|
|
|
|
self.current_query = None
|
2020-01-19 15:37:45 +03:00
|
|
|
|
|
|
|
def perform(self, sql, args=None):
|
2020-01-19 23:56:37 +03:00
|
|
|
self.current_query = sql
|
|
|
|
self.current_params = args
|
|
|
|
self.cursor.execute(sql, args)
|
|
|
|
|
|
|
|
def fileno(self):
|
|
|
|
return self.conn.fileno()
|
|
|
|
|
|
|
|
def is_done(self):
|
|
|
|
if self.current_query is None:
|
|
|
|
return True
|
|
|
|
|
|
|
|
try:
|
|
|
|
if self.conn.poll() == psycopg2.extensions.POLL_OK:
|
|
|
|
self.current_query = None
|
|
|
|
return True
|
|
|
|
except psycopg2.extensions.TransactionRollbackError as e:
|
|
|
|
if e.pgcode is None:
|
|
|
|
raise RuntimeError("Postgres exception has no error code")
|
|
|
|
if e.pgcode == '40P01':
|
|
|
|
log.info("Deadlock detected, retry.")
|
|
|
|
self.cursor.execute(self.current_query, self.current_params)
|
|
|
|
else:
|
|
|
|
raise
|
2020-01-19 15:37:45 +03:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class Indexer(object):
|
|
|
|
|
|
|
|
def __init__(self, options):
|
|
|
|
self.options = options
|
|
|
|
self.conn = make_connection(options)
|
|
|
|
|
|
|
|
self.threads = []
|
|
|
|
for i in range(options.threads):
|
2020-01-19 23:56:37 +03:00
|
|
|
t = IndexingThread(i, options)
|
2020-01-19 15:37:45 +03:00
|
|
|
self.threads.append(t)
|
|
|
|
|
|
|
|
def run(self):
|
|
|
|
log.info("Starting indexing rank ({} to {}) using {} threads".format(
|
|
|
|
self.options.minrank, self.options.maxrank,
|
|
|
|
self.options.threads))
|
|
|
|
|
2020-01-20 02:05:28 +03:00
|
|
|
for rank in range(self.options.minrank, min(self.options.maxrank, 30)):
|
2020-01-19 15:37:45 +03:00
|
|
|
self.index(RankRunner(rank))
|
|
|
|
|
|
|
|
if self.options.maxrank >= 30:
|
|
|
|
self.index(InterpolationRunner())
|
|
|
|
self.index(RankRunner(30))
|
|
|
|
|
|
|
|
def index(self, obj):
|
|
|
|
log.info("Starting {}".format(obj.name()))
|
|
|
|
|
2020-01-20 02:05:28 +03:00
|
|
|
cur = self.conn.cursor(name='main')
|
2020-01-19 15:37:45 +03:00
|
|
|
cur.execute(obj.sql_index_sectors())
|
|
|
|
|
|
|
|
total_tuples = 0
|
|
|
|
for r in cur:
|
|
|
|
total_tuples += r[1]
|
|
|
|
log.debug("Total number of rows; {}".format(total_tuples))
|
|
|
|
|
|
|
|
cur.scroll(0, mode='absolute')
|
|
|
|
|
2020-01-19 23:56:37 +03:00
|
|
|
next_thread = self.find_free_thread()
|
2020-01-19 15:37:45 +03:00
|
|
|
done_tuples = 0
|
|
|
|
rank_start_time = datetime.now()
|
2020-01-20 02:05:28 +03:00
|
|
|
|
|
|
|
sector_sql = obj.sql_sector_places()
|
|
|
|
index_sql = obj.sql_index_place()
|
|
|
|
min_grouped_tuples = total_tuples - len(self.threads) * 1000
|
2020-01-19 15:37:45 +03:00
|
|
|
for r in cur:
|
|
|
|
sector = r[0]
|
|
|
|
|
|
|
|
# Should we do the remaining ones together?
|
2020-01-20 02:05:28 +03:00
|
|
|
do_all = done_tuples > min_grouped_tuples
|
2020-01-19 15:37:45 +03:00
|
|
|
|
|
|
|
pcur = self.conn.cursor(name='places')
|
|
|
|
|
|
|
|
if do_all:
|
|
|
|
pcur.execute(obj.sql_nosector_places())
|
|
|
|
else:
|
2020-01-20 02:05:28 +03:00
|
|
|
pcur.execute(sector_sql, (sector, ))
|
2020-01-19 15:37:45 +03:00
|
|
|
|
|
|
|
for place in pcur:
|
|
|
|
place_id = place[0]
|
|
|
|
log.debug("Processing place {}".format(place_id))
|
2020-01-19 23:56:37 +03:00
|
|
|
thread = next(next_thread)
|
2020-01-19 15:37:45 +03:00
|
|
|
|
2020-01-20 02:05:28 +03:00
|
|
|
thread.perform(index_sql, (place_id,))
|
2020-01-19 15:37:45 +03:00
|
|
|
done_tuples += 1
|
|
|
|
|
|
|
|
pcur.close()
|
|
|
|
|
|
|
|
if do_all:
|
|
|
|
break
|
|
|
|
|
|
|
|
cur.close()
|
|
|
|
|
2020-01-19 23:56:37 +03:00
|
|
|
for t in self.threads:
|
|
|
|
t.wait()
|
2020-01-19 15:37:45 +03:00
|
|
|
|
|
|
|
rank_end_time = datetime.now()
|
|
|
|
diff_seconds = (rank_end_time-rank_start_time).total_seconds()
|
|
|
|
|
2020-01-20 02:05:28 +03:00
|
|
|
log.info("Done {}/{} in {} @ {} per second - FINISHED {}\n".format(
|
|
|
|
done_tuples, total_tuples, int(diff_seconds),
|
2020-01-19 15:37:45 +03:00
|
|
|
done_tuples/diff_seconds, obj.name()))
|
|
|
|
|
2020-01-19 23:56:37 +03:00
|
|
|
def find_free_thread(self):
|
2020-01-20 02:05:28 +03:00
|
|
|
ready = self.threads
|
2020-01-19 23:56:37 +03:00
|
|
|
|
|
|
|
while True:
|
2020-01-20 02:05:28 +03:00
|
|
|
for thread in ready:
|
2020-01-19 23:56:37 +03:00
|
|
|
if thread.is_done():
|
|
|
|
yield thread
|
|
|
|
|
2020-01-20 02:05:28 +03:00
|
|
|
ready, _, _ = select.select(self.threads, [], [])
|
2020-01-19 23:56:37 +03:00
|
|
|
|
|
|
|
assert(False, "Unreachable code")
|
2020-01-19 15:37:45 +03:00
|
|
|
|
|
|
|
class RankRunner(object):
|
|
|
|
|
|
|
|
def __init__(self, rank):
|
|
|
|
self.rank = rank
|
|
|
|
|
|
|
|
def name(self):
|
|
|
|
return "rank {}".format(self.rank)
|
|
|
|
|
2020-01-19 16:06:48 +03:00
|
|
|
@classmethod
|
|
|
|
def prepare(cls):
|
|
|
|
return """PREPARE rnk_index AS
|
|
|
|
UPDATE placex
|
|
|
|
SET indexed_status = 0 WHERE place_id = $1"""
|
|
|
|
|
2020-01-19 15:37:45 +03:00
|
|
|
def sql_index_sectors(self):
|
|
|
|
return """SELECT geometry_sector, count(*) FROM placex
|
|
|
|
WHERE rank_search = {} and indexed_status > 0
|
|
|
|
GROUP BY geometry_sector
|
|
|
|
ORDER BY geometry_sector""".format(self.rank)
|
|
|
|
|
|
|
|
def sql_nosector_places(self):
|
|
|
|
return """SELECT place_id FROM placex
|
|
|
|
WHERE indexed_status > 0 and rank_search = {}
|
|
|
|
ORDER BY geometry_sector""".format(self.rank)
|
|
|
|
|
|
|
|
def sql_sector_places(self):
|
|
|
|
return """SELECT place_id FROM placex
|
2020-01-20 02:05:28 +03:00
|
|
|
WHERE indexed_status > 0 and rank_search = {}
|
|
|
|
and geometry_sector = %s""".format(self.rank)
|
2020-01-19 15:37:45 +03:00
|
|
|
|
|
|
|
def sql_index_place(self):
|
2020-01-19 16:06:48 +03:00
|
|
|
return "EXECUTE rnk_index(%s)"
|
2020-01-19 15:37:45 +03:00
|
|
|
|
|
|
|
|
|
|
|
class InterpolationRunner(object):
|
|
|
|
|
|
|
|
def name(self):
|
|
|
|
return "interpolation lines (location_property_osmline)"
|
|
|
|
|
2020-01-19 16:06:48 +03:00
|
|
|
@classmethod
|
|
|
|
def prepare(cls):
|
|
|
|
return """PREPARE ipl_index AS
|
|
|
|
UPDATE location_property_osmline
|
|
|
|
SET indexed_status = 0 WHERE place_id = $1"""
|
|
|
|
|
2020-01-19 15:37:45 +03:00
|
|
|
def sql_index_sectors(self):
|
|
|
|
return """SELECT geometry_sector, count(*) FROM location_property_osmline
|
|
|
|
WHERE indexed_status > 0
|
|
|
|
GROUP BY geometry_sector
|
|
|
|
ORDER BY geometry_sector"""
|
|
|
|
|
|
|
|
def sql_nosector_places(self):
|
|
|
|
return """SELECT place_id FROM location_property_osmline
|
|
|
|
WHERE indexed_status > 0
|
|
|
|
ORDER BY geometry_sector"""
|
|
|
|
|
|
|
|
def sql_sector_places(self):
|
|
|
|
return """SELECT place_id FROM location_property_osmline
|
|
|
|
WHERE indexed_status > 0 and geometry_sector = %s
|
|
|
|
ORDER BY geometry_sector"""
|
|
|
|
|
|
|
|
def sql_index_place(self):
|
2020-01-19 16:06:48 +03:00
|
|
|
return "EXECUTE ipl_index(%s)"
|
2020-01-19 15:37:45 +03:00
|
|
|
|
|
|
|
|
|
|
|
def nominatim_arg_parser():
|
|
|
|
""" Setup the command-line parser for the tool.
|
|
|
|
"""
|
|
|
|
def h(s):
|
|
|
|
return re.sub("\s\s+" , " ", s)
|
|
|
|
|
|
|
|
p = ArgumentParser(description=__doc__,
|
|
|
|
formatter_class=RawDescriptionHelpFormatter)
|
|
|
|
|
|
|
|
p.add_argument('-d', '--database',
|
|
|
|
dest='dbname', action='store', default='nominatim',
|
|
|
|
help='Name of the PostgreSQL database to connect to.')
|
|
|
|
p.add_argument('-U', '--username',
|
|
|
|
dest='user', action='store',
|
|
|
|
help='PostgreSQL user name.')
|
|
|
|
p.add_argument('-W', '--password',
|
|
|
|
dest='password_prompt', action='store_true',
|
|
|
|
help='Force password prompt.')
|
|
|
|
p.add_argument('-H', '--host',
|
|
|
|
dest='host', action='store',
|
|
|
|
help='PostgreSQL server hostname or socket location.')
|
|
|
|
p.add_argument('-P', '--port',
|
|
|
|
dest='port', action='store',
|
|
|
|
help='PostgreSQL server port')
|
|
|
|
p.add_argument('-r', '--minrank',
|
|
|
|
dest='minrank', type=int, metavar='RANK', default=0,
|
|
|
|
help='Minimum/starting rank.')
|
|
|
|
p.add_argument('-R', '--maxrank',
|
|
|
|
dest='maxrank', type=int, metavar='RANK', default=30,
|
|
|
|
help='Maximum/finishing rank.')
|
|
|
|
p.add_argument('-t', '--threads',
|
|
|
|
dest='threads', type=int, metavar='NUM', default=1,
|
|
|
|
help='Number of threads to create for indexing.')
|
|
|
|
p.add_argument('-v', '--verbose',
|
|
|
|
dest='loglevel', action='count', default=0,
|
|
|
|
help='Increase verbosity')
|
|
|
|
|
|
|
|
return p
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
logging.basicConfig(stream=sys.stderr, format='%(levelname)s: %(message)s')
|
|
|
|
|
|
|
|
options = nominatim_arg_parser().parse_args(sys.argv[1:])
|
|
|
|
|
|
|
|
log.setLevel(max(3 - options.loglevel, 0) * 10)
|
|
|
|
|
|
|
|
options.password = None
|
|
|
|
if options.password_prompt:
|
|
|
|
password = getpass.getpass("Database password: ")
|
|
|
|
options.password = password
|
|
|
|
|
|
|
|
Indexer(options).run()
|