83 lines
2.6 KiB
Python
83 lines
2.6 KiB
Python
|
# Chat with an intelligent assistant in your terminal
|
||
|
import subprocess
|
||
|
import json
|
||
|
from openai import OpenAI
|
||
|
|
||
|
# Point to the local server
|
||
|
client = OpenAI(base_url="http://localhost:6789/v1", api_key="not-needed")
|
||
|
|
||
|
history = [
|
||
|
{"role": "system", "content": "You are a helpful AI assistant. Your role is to identify what exactly the user is asking for. Your response will be passed along to other AI assistants that act prepare a response to the user, but it is important that you focus on identifying what they actually want."},
|
||
|
]
|
||
|
|
||
|
print()
|
||
|
history.append({"role": "user", "content": input("> ")})
|
||
|
|
||
|
while True:
|
||
|
|
||
|
completion = client.chat.completions.create(
|
||
|
model="local-model", # this field is currently unused
|
||
|
messages=history,
|
||
|
temperature=0.7,
|
||
|
stream=True,
|
||
|
)
|
||
|
|
||
|
|
||
|
|
||
|
historyTemp = [
|
||
|
{"role": "system", "content": "You are a helpful AI assistant. Your role is to identify what is required in order to fulfill requests. Your response will be passed along to an AI assistant responsible for carrying out the steps you identify as being necessary."},
|
||
|
]
|
||
|
|
||
|
historyTempMsg = {"role": "user", "content": "Propose steps to fulfill this request: "}
|
||
|
|
||
|
for chunk in completion:
|
||
|
if chunk.choices[0].delta.content:
|
||
|
print(chunk.choices[0].delta.content, end="", flush=True)
|
||
|
historyTempMsg["content"] += chunk.choices[0].delta.content
|
||
|
|
||
|
historyTemp.append(historyTempMsg)
|
||
|
|
||
|
completion = client.chat.completions.create(
|
||
|
model="local-model", # this field is currently unused
|
||
|
messages=historyTemp,
|
||
|
temperature=0.7,
|
||
|
stream=True,
|
||
|
)
|
||
|
|
||
|
for chunk in completion:
|
||
|
if chunk.choices[0].delta.content:
|
||
|
print(chunk.choices[0].delta.content, end="", flush=True)
|
||
|
new_message["content"] += chunk.choices[0].delta.content
|
||
|
|
||
|
|
||
|
gray_color = "\033[90m"
|
||
|
reset_color = "\033[0m"
|
||
|
print(f"{gray_color}\n{'-'*20} History dump {'-'*20}\n")
|
||
|
print(json.dumps(history, indent=2))
|
||
|
print(f"\n{'-'*55}\n{reset_color}")
|
||
|
|
||
|
print()
|
||
|
history.append({"role": "user", "content": input("> ")})
|
||
|
|
||
|
|
||
|
#####
|
||
|
|
||
|
|
||
|
completion = client.chat.completions.create(
|
||
|
model="local-model",
|
||
|
messages=history,
|
||
|
temperature=0.7,
|
||
|
stream=True,
|
||
|
)
|
||
|
|
||
|
new_message = {"role": "assistant", "content": ""}
|
||
|
|
||
|
for chunk in completion:
|
||
|
if chunk.choices[0].delta.content:
|
||
|
print(chunk.choices[0].delta.content, end="", flush=True)
|
||
|
new_message["content"] += chunk.choices[0].delta.content
|
||
|
|
||
|
history.append(new_message)
|
||
|
|
||
|
print()
|
||
|
history.append({"role": "user", "content": input("> ")})
|