more formatting fixes

Found by flake8.
This commit is contained in:
Sarah Hoffmann 2021-07-12 17:45:42 +02:00
parent b4fec57b6d
commit cf98cff2a1
21 changed files with 62 additions and 53 deletions

View File

@ -103,7 +103,7 @@ class CommandlineParser:
return 1
##### Subcommand classes
# Subcommand classes
#
# Each class needs to implement two functions: add_args() adds the CLI parameters
# for the subfunction, run() executes the subcommand.

View File

@ -90,7 +90,7 @@ class APISearch:
if args.query:
params = dict(q=args.query)
else:
params = {k : getattr(args, k) for k, _ in STRUCTURED_QUERY if getattr(args, k)}
params = {k: getattr(args, k) for k, _ in STRUCTURED_QUERY if getattr(args, k)}
for param, _ in EXTRADATA_PARAMS:
if getattr(args, param):

View File

@ -24,4 +24,4 @@ class NominatimArgs:
main_data=self.config.TABLESPACE_PLACE_DATA,
main_index=self.config.TABLESPACE_PLACE_INDEX
)
)
)

View File

@ -61,7 +61,7 @@ class UpdateRefresh:
args.threads or 1)
indexer.index_postcodes()
else:
LOG.error("The place table doesn\'t exist. " \
LOG.error("The place table doesn't exist. "
"Postcode updates on a frozen database is not possible.")
if args.word_counts:

View File

@ -93,7 +93,7 @@ class UpdateReplication:
indexed_only=not args.once)
# Sanity check to not overwhelm the Geofabrik servers.
if 'download.geofabrik.de'in params['base_url']\
if 'download.geofabrik.de' in params['base_url']\
and params['update_interval'] < 86400:
LOG.fatal("Update interval too low for download.geofabrik.de.\n"
"Please check install documentation "

View File

@ -85,7 +85,7 @@ class DBConnection:
# Use a dict to hand in the parameters because async is a reserved
# word in Python3.
self.conn = psycopg2.connect(**{'dsn' : self.dsn, 'async' : True})
self.conn = psycopg2.connect(**{'dsn': self.dsn, 'async': True})
self.wait()
self.cursor = self.conn.cursor(cursor_factory=cursor_factory)

View File

@ -61,7 +61,7 @@ def _setup_postgresql_features(conn):
"""
pg_version = conn.server_version_tuple()
return {
'has_index_non_key_column' : pg_version >= (11, 0, 0)
'has_index_non_key_column': pg_version >= (11, 0, 0)
}
class SQLPreprocessor:

View File

@ -61,9 +61,9 @@ def execute_file(dsn, fname, ignore_errors=False, pre_code=None, post_code=None)
# List of characters that need to be quoted for the copy command.
_SQL_TRANSLATION = {ord(u'\\') : u'\\\\',
ord(u'\t') : u'\\t',
ord(u'\n') : u'\\n'}
_SQL_TRANSLATION = {ord(u'\\'): u'\\\\',
ord(u'\t'): u'\\t',
ord(u'\n'): u'\\n'}
class CopyBuffer:
""" Data collector for the copy_from command.

View File

@ -203,7 +203,7 @@ class Indexer:
# And insert the curent batch
for idx in range(0, len(places), batch):
part = places[idx:idx+batch]
part = places[idx:idx + batch]
LOG.debug("Processing places: %s", str(part))
runner.index_places(pool.next_free_worker(), part)
progress.add(len(part))

View File

@ -63,7 +63,7 @@ class ProgressLogger:
places_per_sec = self.done_places
else:
diff_seconds = (rank_end_time - self.rank_start_time).total_seconds()
places_per_sec = self.done_places/diff_seconds
places_per_sec = self.done_places / diff_seconds
LOG.warning("Done %d/%d in %d @ %.3f per second - FINISHED %s\n",
self.done_places, self.total_places, int(diff_seconds),

View File

@ -25,7 +25,7 @@ class AbstractPlacexRunner:
SET indexed_status = 0, address = v.addr, token_info = v.ti
FROM (VALUES {}) as v(id, addr, ti)
WHERE place_id = v.id
""".format(','.join(["(%s, %s::hstore, %s::jsonb)"] * num_places))
""".format(','.join(["(%s, %s::hstore, %s::jsonb)"] * num_places))
@staticmethod
@ -124,7 +124,7 @@ class InterpolationRunner:
SET indexed_status = 0, address = v.addr, token_info = v.ti
FROM (VALUES {}) as v(id, addr, ti)
WHERE place_id = v.id
""".format(','.join(["(%s, %s::hstore, %s::jsonb)"] * num_places))
""".format(','.join(["(%s, %s::hstore, %s::jsonb)"] * num_places))
def index_places(self, worker, places):

View File

@ -341,7 +341,7 @@ class LegacyICUNameAnalyzer:
term = self.name_processor.get_search_normalized(word)
if term:
copystr.add(word, ' ' + term, cls, typ,
oper if oper in ('in', 'near') else None, 0)
oper if oper in ('in', 'near') else None, 0)
added += 1
copystr.copy_out(cursor, 'word',

View File

@ -582,7 +582,7 @@ class _TokenCache:
with conn.cursor() as cur:
cur.execute("""SELECT i, ARRAY[getorcreate_housenumber_id(i::text)]::text
FROM generate_series(1, 100) as i""")
self._cached_housenumbers = {str(r[0]) : r[1] for r in cur}
self._cached_housenumbers = {str(r[0]): r[1] for r in cur}
# For postcodes remember the ones that have already been added
self.postcodes = set()

View File

@ -24,6 +24,7 @@ def _check(hint=None):
"""
def decorator(func):
title = func.__doc__.split('\n', 1)[0].strip()
def run_check(conn, config):
print(title, end=' ... ')
ret = func(conn, config)
@ -98,13 +99,12 @@ def _get_indexes(conn):
if conn.table_exists('place'):
indexes.extend(('idx_placex_pendingsector',
'idx_location_area_country_place_id',
'idx_place_osm_unique'
))
'idx_place_osm_unique'))
return indexes
### CHECK FUNCTIONS
# CHECK FUNCTIONS
#
# Functions are exectured in the order they appear here.

View File

@ -184,8 +184,10 @@ def truncate_data_tables(conn):
conn.commit()
_COPY_COLUMNS = 'osm_type, osm_id, class, type, name, admin_level, address, extratags, geometry'
def load_data(dsn, threads):
""" Copy data into the word and placex table.
"""
@ -250,6 +252,7 @@ def create_search_indices(conn, config, drop=False):
sql.run_sql_file(conn, 'indices.sql', drop=drop)
def create_country_names(conn, tokenizer, languages=None):
""" Add default country names to search index. `languages` is a comma-
separated list of language codes as used in OSM. If `languages` is not
@ -261,8 +264,7 @@ def create_country_names(conn, tokenizer, languages=None):
def _include_key(key):
return key == 'name' or \
(key.startswith('name:') \
and (not languages or key[5:] in languages))
(key.startswith('name:') and (not languages or key[5:] in languages))
with conn.cursor() as cur:
psycopg2.extras.register_hstore(cur)
@ -271,7 +273,7 @@ def create_country_names(conn, tokenizer, languages=None):
with tokenizer.name_analyzer() as analyzer:
for code, name in cur:
names = {'countrycode' : code}
names = {'countrycode': code}
if code == 'gb':
names['short_name'] = 'UK'
if code == 'us':

View File

@ -136,7 +136,7 @@ def run_osm2pgsql(options):
def get_url(url):
""" Get the contents from the given URL and return it as a UTF-8 string.
"""
headers = {"User-Agent" : "Nominatim/{0[0]}.{0[1]}.{0[2]}-{0[3]}".format(NOMINATIM_VERSION)}
headers = {"User-Agent": "Nominatim/{0[0]}.{0[1]}.{0[2]}-{0[3]}".format(NOMINATIM_VERSION)}
try:
with urlrequest.urlopen(urlrequest.Request(url, headers=headers)) as response:

View File

@ -142,7 +142,8 @@ def change_housenumber_transliteration(conn, **_):
BEGIN
SELECT array_to_string(array_agg(trans), ';')
INTO normtext
FROM (SELECT lookup_word as trans, getorcreate_housenumber_id(lookup_word)
FROM (SELECT lookup_word as trans,
getorcreate_housenumber_id(lookup_word)
FROM (SELECT make_standard_name(h) as lookup_word
FROM regexp_split_to_table(housenumber, '[,;]') h) x) y;
return normtext;

View File

@ -165,11 +165,14 @@ def update_postcodes(dsn, project_dir, tokenizer):
with conn.cursor(name="placex_postcodes") as cur:
cur.execute("""
SELECT cc as country_code, pc, ST_X(centroid), ST_Y(centroid)
FROM (SELECT
COALESCE(plx.country_code, get_country_code(ST_Centroid(pl.geometry))) as cc,
FROM (SELECT
COALESCE(plx.country_code,
get_country_code(ST_Centroid(pl.geometry))) as cc,
token_normalized_postcode(pl.address->'postcode') as pc,
ST_Centroid(ST_Collect(COALESCE(plx.centroid, ST_Centroid(pl.geometry)))) as centroid
FROM place AS pl LEFT OUTER JOIN placex AS plx ON pl.osm_id = plx.osm_id AND pl.osm_type = plx.osm_type
ST_Centroid(ST_Collect(COALESCE(plx.centroid,
ST_Centroid(pl.geometry)))) as centroid
FROM place AS pl LEFT OUTER JOIN placex AS plx
ON pl.osm_id = plx.osm_id AND pl.osm_type = plx.osm_type
WHERE pl.address ? 'postcode' AND pl.geometry IS NOT null
GROUP BY cc, pc) xx
WHERE pc IS NOT null AND cc IS NOT null

View File

@ -44,8 +44,8 @@ class SPImporter():
# This set will contain all existing phrases to be added.
# It contains tuples with the following format: (lable, class, type, operator)
self.word_phrases = set()
#This set will contain all existing place_classtype tables which doesn't match any
#special phrases class/type on the wiki.
# This set will contain all existing place_classtype tables which doesn't match any
# special phrases class/type on the wiki.
self.table_phrases_to_delete = set()
def import_phrases(self, tokenizer, should_replace):
@ -60,7 +60,7 @@ class SPImporter():
LOG.warning('Special phrases importation starting')
self._fetch_existing_place_classtype_tables()
#Store pairs of class/type for further processing
# Store pairs of class/type for further processing
class_type_pairs = set()
for loaded_phrases in self.sp_loader:
@ -131,17 +131,17 @@ class SPImporter():
Return the class/type pair corresponding to the phrase.
"""
#blacklisting: disallow certain class/type combinations
# blacklisting: disallow certain class/type combinations
if phrase.p_class in self.black_list.keys() \
and phrase.p_type in self.black_list[phrase.p_class]:
return None
#whitelisting: if class is in whitelist, allow only tags in the list
# whitelisting: if class is in whitelist, allow only tags in the list
if phrase.p_class in self.white_list.keys() \
and phrase.p_type not in self.white_list[phrase.p_class]:
return None
#sanity check, in case somebody added garbage in the wiki
# sanity check, in case somebody added garbage in the wiki
if not self._check_sanity(phrase):
self.statistics_handler.notify_one_phrase_invalid()
return None
@ -161,7 +161,7 @@ class SPImporter():
sql_tablespace = self.config.TABLESPACE_AUX_DATA
if sql_tablespace:
sql_tablespace = ' TABLESPACE '+sql_tablespace
sql_tablespace = ' TABLESPACE ' + sql_tablespace
with self.db_connection.cursor() as db_cursor:
db_cursor.execute("CREATE INDEX idx_placex_classtype ON placex (class, type)")
@ -174,19 +174,19 @@ class SPImporter():
if table_name in self.table_phrases_to_delete:
self.statistics_handler.notify_one_table_ignored()
#Remove this table from the ones to delete as it match a class/type
#still existing on the special phrases of the wiki.
# Remove this table from the ones to delete as it match a
# class/type still existing on the special phrases of the wiki.
self.table_phrases_to_delete.remove(table_name)
#So dont need to create the table and indexes.
# So don't need to create the table and indexes.
continue
#Table creation
# Table creation
self._create_place_classtype_table(sql_tablespace, phrase_class, phrase_type)
#Indexes creation
# Indexes creation
self._create_place_classtype_indexes(sql_tablespace, phrase_class, phrase_type)
#Grant access on read to the web user.
# Grant access on read to the web user.
self._grant_access_to_webuser(phrase_class, phrase_type)
self.statistics_handler.notify_one_table_created()
@ -202,8 +202,8 @@ class SPImporter():
table_name = _classtype_table(phrase_class, phrase_type)
with self.db_connection.cursor() as db_cursor:
db_cursor.execute(SQL("""
CREATE TABLE IF NOT EXISTS {{}} {}
AS SELECT place_id AS place_id,st_centroid(geometry) AS centroid FROM placex
CREATE TABLE IF NOT EXISTS {{}} {}
AS SELECT place_id AS place_id,st_centroid(geometry) AS centroid FROM placex
WHERE class = {{}} AND type = {{}}""".format(sql_tablespace))
.format(Identifier(table_name), Literal(phrase_class),
Literal(phrase_type)))
@ -215,7 +215,7 @@ class SPImporter():
"""
index_prefix = 'idx_place_classtype_{}_{}_'.format(phrase_class, phrase_type)
base_table = _classtype_table(phrase_class, phrase_type)
#Index on centroid
# Index on centroid
if not self.db_connection.index_exists(index_prefix + 'centroid'):
with self.db_connection.cursor() as db_cursor:
db_cursor.execute(SQL("""
@ -223,7 +223,7 @@ class SPImporter():
.format(Identifier(index_prefix + 'centroid'),
Identifier(base_table)), sql_tablespace)
#Index on place_id
# Index on place_id
if not self.db_connection.index_exists(index_prefix + 'place_id'):
with self.db_connection.cursor() as db_cursor:
db_cursor.execute(SQL(
@ -248,10 +248,12 @@ class SPImporter():
Delete the place_classtype tables.
"""
LOG.warning('Cleaning database...')
#Array containing all queries to execute. Contain tuples of format (query, parameters)
# Array containing all queries to execute.
# Contains tuples of format (query, parameters)
queries_parameters = []
#Delete place_classtype tables corresponding to class/type which are not on the wiki anymore
# Delete place_classtype tables corresponding to class/type which
# are not on the wiki anymore.
for table in self.table_phrases_to_delete:
self.statistics_handler.notify_one_table_deleted()
query = SQL('DROP TABLE IF EXISTS {}').format(Identifier(table))
@ -271,7 +273,7 @@ class SPImporter():
file, extension = os.path.splitext(file_path)
json_file_path = Path(file + '.json').resolve()
if extension not in('.php', '.json'):
if extension not in ('.php', '.json'):
raise UsageError('The custom NOMINATIM_PHRASE_CONFIG file has not a valid extension.')
if extension == '.php' and not isfile(json_file_path):

View File

@ -15,7 +15,7 @@ class SPWikiLoader(Iterator):
def __init__(self, config, languages=None):
super().__init__()
self.config = config
#Compile the regex here to increase performances.
# Compile the regex here to increase performances.
self.occurence_pattern = re.compile(
r'\| *([^\|]+) *\|\| *([^\|]+) *\|\| *([^\|]+) *\|\| *([^\|]+) *\|\| *([\-YN])'
)
@ -35,7 +35,7 @@ class SPWikiLoader(Iterator):
Parses XML content and extracts special phrases from it.
Return a list of SpecialPhrase.
"""
#One match will be of format [label, class, type, operator, plural]
# One match will be of format [label, class, type, operator, plural]
matches = self.occurence_pattern.findall(xml)
returned_phrases = set()
for match in matches:
@ -65,5 +65,6 @@ class SPWikiLoader(Iterator):
Requested URL Example :
https://wiki.openstreetmap.org/wiki/Special:Export/Nominatim/Special_Phrases/EN
"""
url = 'https://wiki.openstreetmap.org/wiki/Special:Export/Nominatim/Special_Phrases/' + lang.upper() # pylint: disable=line-too-long
url = 'https://wiki.openstreetmap.org/wiki/Special:Export/Nominatim/Special_Phrases/' \
+ lang.upper()
return get_url(url)

View File

@ -13,7 +13,7 @@ class SpecialPhrase():
def __init__(self, p_label, p_class, p_type, p_operator):
self.p_label = p_label.strip()
self.p_class = p_class.strip()
#Hack around a bug where building=yes was imported with quotes into the wiki
# Hack around a bug where building=yes was imported with quotes into the wiki
self.p_type = re.sub(r'\"|&quot;', '', p_type.strip())
#Needed if some operator in the wiki are not written in english
# Needed if some operator in the wiki are not written in english
self.p_operator = '-' if p_operator not in ('near', 'in') else p_operator