From 85797acf1ef6995c4c35b9ac3c2373a972fb48e1 Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann Date: Mon, 25 Oct 2021 21:33:27 +0200 Subject: [PATCH] ICU: add an index over word_ids Needed for keyword lookup in the details response. --- lib-sql/tokenizer/legacy_tokenizer_indices.sql | 1 + nominatim/tokenizer/icu_tokenizer.py | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/lib-sql/tokenizer/legacy_tokenizer_indices.sql b/lib-sql/tokenizer/legacy_tokenizer_indices.sql index b21f29d7..d653a26d 100644 --- a/lib-sql/tokenizer/legacy_tokenizer_indices.sql +++ b/lib-sql/tokenizer/legacy_tokenizer_indices.sql @@ -1,2 +1,3 @@ +-- Required for details lookup. CREATE INDEX IF NOT EXISTS idx_word_word_id ON word USING BTREE (word_id) {{db.tablespace.search_index}}; diff --git a/nominatim/tokenizer/icu_tokenizer.py b/nominatim/tokenizer/icu_tokenizer.py index 2af0bcb2..e7ee57ad 100644 --- a/nominatim/tokenizer/icu_tokenizer.py +++ b/nominatim/tokenizer/icu_tokenizer.py @@ -67,10 +67,13 @@ class LegacyICUTokenizer(AbstractTokenizer): self.term_normalization = get_property(conn, DBCFG_TERM_NORMALIZATION) - def finalize_import(self, _): + def finalize_import(self, config): """ Do any required postprocessing to make the tokenizer data ready for use. """ + with connect(self.dsn) as conn: + sqlp = SQLPreprocessor(conn, config) + sqlp.run_sql_file(conn, 'tokenizer/legacy_tokenizer_indices.sql') def update_sql_functions(self, config):