Update handling of gemini response with new API changes. Per documentation:

finish_reason (google.ai.generativelanguage_v1beta.types.Candidate.FinishReason):
            Optional. Output only. The reason why the
            model stopped generating tokens.
            If empty, the model has not stopped generating
            the tokens.
This commit is contained in:
sabaimran 2024-10-17 09:00:01 -07:00
parent 27835628e6
commit 07ab8ab931

View file

@ -148,6 +148,10 @@ def handle_gemini_response(candidates, prompt_feedback=None):
elif candidates[0].finish_reason == FinishReason.SAFETY:
message = generate_safety_response(candidates[0].safety_ratings)
stopped = True
# Check if finish reason is empty, therefore generation is in progress
elif not candidates[0].finish_reason:
message = None
stopped = False
# Check if the response was stopped due to reaching maximum token limit or other reasons
elif candidates[0].finish_reason != FinishReason.STOP:
message = f"\nI can't talk further about that because of **{candidates[0].finish_reason.name} issue.**"