mirror of
https://github.com/khoj-ai/khoj.git
synced 2025-02-17 16:14:21 +00:00
Make bi-encoder return fewer results to reduce cross-encoder latency
This commit is contained in:
parent
9ab3edf6d6
commit
09727ac3be
1 changed files with 1 additions and 1 deletions
|
@ -20,7 +20,7 @@ def initialize_model(search_config: TextSearchConfig):
|
|||
torch.set_num_threads(4)
|
||||
|
||||
# Number of entries we want to retrieve with the bi-encoder
|
||||
top_k = 30
|
||||
top_k = 15
|
||||
|
||||
# The bi-encoder encodes all entries to use for semantic search
|
||||
bi_encoder = load_model(
|
||||
|
|
Loading…
Add table
Reference in a new issue