remove support for unindexed tokens

This was a special feature of the legacy tokenizer who would not
index very frequent tokens.
This commit is contained in:
Sarah Hoffmann 2024-09-22 10:39:10 +02:00
parent 290c22a153
commit a690605a96
6 changed files with 23 additions and 30 deletions

View File

@ -167,8 +167,7 @@ class SearchBuilder:
expected_count = sum(t.count for t in hnrs)
partials = {t.token: t.addr_count for trange in address
for t in self.query.get_partials_list(trange)
if t.is_indexed}
for t in self.query.get_partials_list(trange)}
if not partials:
# can happen when none of the partials is indexed
@ -219,11 +218,9 @@ class SearchBuilder:
addr_partials = [t for r in address for t in self.query.get_partials_list(r)]
addr_tokens = list({t.token for t in addr_partials})
partials_indexed = all(t.is_indexed for t in name_partials.values()) \
and all(t.is_indexed for t in addr_partials)
exp_count = min(t.count for t in name_partials.values()) / (2**(len(name_partials) - 1))
if (len(name_partials) > 3 or exp_count < 8000) and partials_indexed:
if (len(name_partials) > 3 or exp_count < 8000):
yield penalty, exp_count, dbf.lookup_by_names(list(name_partials.keys()), addr_tokens)
return
@ -232,8 +229,6 @@ class SearchBuilder:
name_fulls = self.query.get_tokens(name, TokenType.WORD)
if name_fulls:
fulls_count = sum(t.count for t in name_fulls)
if partials_indexed:
penalty += 1.2 * sum(t.penalty for t in addr_partials if not t.is_indexed)
if fulls_count < 50000 or addr_count < 30000:
yield penalty,fulls_count / (2**len(addr_tokens)), \
@ -243,8 +238,7 @@ class SearchBuilder:
# To catch remaining results, lookup by name and address
# We only do this if there is a reasonable number of results expected.
exp_count = exp_count / (2**len(addr_tokens)) if addr_tokens else exp_count
if exp_count < 10000 and addr_count < 20000\
and all(t.is_indexed for t in name_partials.values()):
if exp_count < 10000 and addr_count < 20000:
penalty += 0.35 * max(1 if name_fulls else 0.1,
5 - len(name_partials) - len(addr_tokens))
yield penalty, exp_count,\
@ -260,11 +254,10 @@ class SearchBuilder:
addr_restrict_tokens = []
addr_lookup_tokens = []
for t in addr_partials:
if t.is_indexed:
if t.addr_count > 20000:
addr_restrict_tokens.append(t.token)
else:
addr_lookup_tokens.append(t.token)
if t.addr_count > 20000:
addr_restrict_tokens.append(t.token)
else:
addr_lookup_tokens.append(t.token)
if addr_restrict_tokens:
lookup.append(dbf.FieldLookup('nameaddress_vector',
@ -289,13 +282,12 @@ class SearchBuilder:
addr_restrict_tokens = []
addr_lookup_tokens = []
for t in addr_partials:
if t.is_indexed:
if t.addr_count > 20000:
addr_restrict_tokens.append(t.token)
else:
addr_lookup_tokens.append(t.token)
if t.addr_count > 20000:
addr_restrict_tokens.append(t.token)
else:
addr_lookup_tokens.append(t.token)
else:
addr_restrict_tokens = [t.token for t in addr_partials if t.is_indexed]
addr_restrict_tokens = [t.token for t in addr_partials]
addr_lookup_tokens = []
return dbf.lookup_by_any_name([t.token for t in name_fulls],

View File

@ -123,7 +123,7 @@ class ICUToken(qmod.Token):
lookup_word = row.word_token
return ICUToken(penalty=penalty, token=row.word_id, count=max(1, count),
lookup_word=lookup_word, is_indexed=True,
lookup_word=lookup_word,
word_token=row.word_token, info=row.info,
addr_count=max(1, addr_count))
@ -259,7 +259,9 @@ class ICUQueryAnalyzer(AbstractQueryAnalyzer):
if len(part.token) <= 4 and part[0].isdigit()\
and not node.has_tokens(i+1, qmod.TokenType.HOUSENUMBER):
query.add_token(qmod.TokenRange(i, i+1), qmod.TokenType.HOUSENUMBER,
ICUToken(0.5, 0, 1, 1, part.token, True, part.token, None))
ICUToken(penalty=0.5, token=0,
count=1, addr_count=1, lookup_word=part.token,
word_token=part.token, info=None))
def rerank_tokens(self, query: qmod.QueryStruct, parts: QueryParts) -> None:

View File

@ -101,7 +101,6 @@ class Token(ABC):
count: int
addr_count: int
lookup_word: str
is_indexed: bool
@abstractmethod

View File

@ -19,7 +19,7 @@ class MyToken(query.Token):
def mktoken(tid: int):
return MyToken(penalty=3.0, token=tid, count=1, addr_count=1,
lookup_word='foo', is_indexed=True)
lookup_word='foo')
@pytest.mark.parametrize('ptype,ttype', [('NONE', 'WORD'),

View File

@ -33,7 +33,7 @@ def make_query(*args):
q.add_token(TokenRange(start, end), ttype,
MyToken(penalty=0.5 if ttype == TokenType.PARTIAL else 0.0,
token=tid, count=1, addr_count=1,
lookup_word=word, is_indexed=True))
lookup_word=word))
return q
@ -397,14 +397,14 @@ def make_counted_searches(name_part, name_full, address_part, address_full,
q.add_node(BreakType.END, PhraseType.NONE)
q.add_token(TokenRange(0, 1), TokenType.PARTIAL,
MyToken(0.5, 1, name_part, 1, 'name_part', True))
MyToken(0.5, 1, name_part, 1, 'name_part'))
q.add_token(TokenRange(0, 1), TokenType.WORD,
MyToken(0, 101, name_full, 1, 'name_full', True))
MyToken(0, 101, name_full, 1, 'name_full'))
for i in range(num_address_parts):
q.add_token(TokenRange(i + 1, i + 2), TokenType.PARTIAL,
MyToken(0.5, 2, address_part, 1, 'address_part', True))
MyToken(0.5, 2, address_part, 1, 'address_part'))
q.add_token(TokenRange(i + 1, i + 2), TokenType.WORD,
MyToken(0, 102, address_full, 1, 'address_full', True))
MyToken(0, 102, address_full, 1, 'address_full'))
builder = SearchBuilder(q, SearchDetails())

View File

@ -20,7 +20,7 @@ class MyToken(Token):
def make_query(*args):
q = QueryStruct([Phrase(args[0][1], '')])
dummy = MyToken(penalty=3.0, token=45, count=1, addr_count=1,
lookup_word='foo', is_indexed=True)
lookup_word='foo')
for btype, ptype, _ in args[1:]:
q.add_node(btype, ptype)