From 4da0499cd70a8f0a071e1d796c419a6695c2337c Mon Sep 17 00:00:00 2001 From: Debanjum Date: Tue, 19 Nov 2024 21:07:17 -0800 Subject: [PATCH] Stream responses by openai's o1 model series, as api now supports it Previously o1 models did not support streaming responses via API. Now they seem to do --- src/khoj/processor/conversation/openai/utils.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/khoj/processor/conversation/openai/utils.py b/src/khoj/processor/conversation/openai/utils.py index 36ebc679..a8a94a2e 100644 --- a/src/khoj/processor/conversation/openai/utils.py +++ b/src/khoj/processor/conversation/openai/utils.py @@ -56,7 +56,6 @@ def completion_with_backoff( # Update request parameters for compatability with o1 model series # Refer: https://platform.openai.com/docs/guides/reasoning/beta-limitations if model.startswith("o1"): - stream = False temperature = 1 model_kwargs.pop("stop", None) model_kwargs.pop("response_format", None) @@ -156,7 +155,6 @@ def llm_thread( # Update request parameters for compatability with o1 model series # Refer: https://platform.openai.com/docs/guides/reasoning/beta-limitations if model_name.startswith("o1"): - stream = False temperature = 1 model_kwargs.pop("stop", None) model_kwargs.pop("response_format", None)