[Fork] Additions on franzbischoff resolution on ()

* Related to Issue , Implemented custom prompt in workspace settings.

* run linter

* Remove code duplication for chat prompt injection

---------

Co-authored-by: Francisco Bischoff <franzbischoff@gmail.com>
This commit is contained in:
Timothy Carambat 2023-07-20 11:14:23 -07:00 committed by GitHub
parent fdce3e99ea
commit 5a7d8add6f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 93 additions and 28 deletions
frontend
.eslintrc.cjs
src
components
Modals
MangeWorkspace/Settings
Settings/ExportImport
Sidebar
utils/chat
server
models
utils
chats
vectorDbProviders
chroma
lance
pinecone

View file

@ -1,15 +1,20 @@
module.exports = {
env: { browser: true, es2020: true },
extends: [
'eslint:recommended',
'plugin:react/recommended',
'plugin:react/jsx-runtime',
'plugin:react-hooks/recommended',
"env": { "browser": true, "es2020": true },
"extends": [
"eslint:recommended",
"plugin:react/recommended",
"plugin:react/jsx-runtime",
"plugin:react-hooks/recommended"
],
parserOptions: { ecmaVersion: 'latest', sourceType: 'module' },
settings: { react: { version: '18.2' } },
plugins: ['react-refresh'],
rules: {
'react-refresh/only-export-components': 'warn',
},
"files": ["**/*.js", "**/*.jsx"],
"linterOptions": { "reportUnusedDisableDirectives": true },
"parserOptions": { "ecmaVersion": "latest", "sourceType": "module", "ecmaFeatures": { "jsx": true } },
"settings": { "react": { "version": '18.2' } },
"plugins": [
"react-refresh",
"react-hooks"
],
"rules": {
"react-refresh/only-export-components": "warn"
}
}

View file

@ -1,6 +1,7 @@
import React, { useState, useRef, useEffect } from "react";
import Workspace from "../../../../models/workspace";
import paths from "../../../../utils/paths";
import { chatPrompt } from "../../../../utils/chat";
export default function WorkspaceSettings({ workspace }) {
const formEl = useRef(null);
@ -141,6 +142,35 @@ export default function WorkspaceSettings({ workspace }) {
/>
</div>
<div>
<div className="flex flex-col gap-y-1 mb-4">
<label
htmlFor="name"
className="block text-sm font-medium text-gray-900 dark:text-white"
>
Prompt
</label>
<p className="text-xs text-gray-600 dark:text-stone-400">
The prompt that will be used on this workspace. Define the
context and instructions for the AI to generate a response.
You should to provide a carefully crafted prompt so the AI can
generate a relevant and accurate response.
</p>
</div>
<textarea
name="openAiPrompt"
maxLength={500}
rows={5}
defaultValue={chatPrompt(workspace)}
className="bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5 dark:bg-stone-600 dark:border-stone-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500"
placeholder="Given the following conversation, relevant context, and a follow up question, reply with an answer to the current question the user is asking. Return only your response to the question given the above information following the users instructions as needed."
required={true}
wrap="soft"
autoComplete="off"
onChange={() => setHasChanges(true)}
/>
</div>
<div>
<div className="flex flex-col gap-y-1 mb-4">
<label

View file

@ -181,7 +181,9 @@ function ImportData() {
Import was completed successfully
</p>
</div>
<p className="text-green-800 text-xs italic">please reload the page to see the results of the import.</p>
<p className="text-green-800 text-xs italic">
please reload the page to see the results of the import.
</p>
</div>
);
}

View file

@ -185,10 +185,11 @@ export function SidebarMobileHeader() {
className={`z-99 fixed top-0 left-0 transition-all duration-500 w-[100vw] h-[100vh]`}
>
<div
className={`${showBgOverlay
className={`${
showBgOverlay
? "transition-all opacity-1"
: "transition-none opacity-0"
} duration-500 fixed top-0 left-0 bg-black-900 bg-opacity-75 w-screen h-screen`}
} duration-500 fixed top-0 left-0 bg-black-900 bg-opacity-75 w-screen h-screen`}
onClick={() => setShowSidebar(false)}
/>
<div

View file

@ -56,3 +56,10 @@ export default function handleChat(
});
}
}
export function chatPrompt(workspace) {
return (
workspace?.openAiPrompt ??
"Given the following conversation, relevant context, and a follow up question, reply with an answer to the current question the user is asking. Return only your response to the question given the above information following the users instructions as needed."
);
}

View file

@ -12,6 +12,7 @@ const Workspace = {
"openAiTemp",
"openAiHistory",
"lastUpdatedAt",
"openAiPrompt",
],
colsInit: `
id INTEGER PRIMARY KEY AUTOINCREMENT,
@ -21,7 +22,8 @@ const Workspace = {
createdAt TEXT DEFAULT CURRENT_TIMESTAMP,
openAiTemp REAL DEFAULT NULL,
openAiHistory INTEGER DEFAULT 20,
lastUpdatedAt TEXT DEFAULT CURRENT_TIMESTAMP
lastUpdatedAt TEXT DEFAULT CURRENT_TIMESTAMP,
openAiPrompt TEXT DEFAULT NULL
`,
migrateTable: async function () {
console.log(`\x1b[34m[MIGRATING]\x1b[0m Checking for Workspace migrations`);
@ -35,6 +37,11 @@ const Workspace = {
execCmd: `ALTER TABLE ${this.tablename} ADD COLUMN openAiTemp REAL DEFAULT NULL`,
doif: false,
},
{
colName: "openAiPrompt",
execCmd: `ALTER TABLE ${this.tablename} ADD COLUMN openAiPrompt TEXT DEFAULT NULL`,
doif: false,
},
{
colName: "id",
execCmd: `CREATE TRIGGER IF NOT EXISTS Trg_LastUpdated AFTER UPDATE ON ${this.tablename}

View file

@ -148,7 +148,16 @@ async function chatWithWorkspace(workspace, message, chatMode = "chat") {
};
}
}
function chatPrompt(workspace) {
return (
workspace?.openAiPrompt ??
"Given the following conversation, relevant context, and a follow up question, reply with an answer to the current question the user is asking. Return only your response to the question given the above information following the users instructions as needed."
);
}
module.exports = {
convertToChatHistory,
chatWithWorkspace,
chatPrompt,
};

View file

@ -8,6 +8,7 @@ const { storeVectorResult, cachedVectorInformation } = require("../../files");
const { Configuration, OpenAIApi } = require("openai");
const { v4: uuidv4 } = require("uuid");
const { toChunks, curateSources } = require("../../helpers");
const { chatPrompt } = require("../../chats");
const Chroma = {
name: "Chroma",
@ -303,7 +304,7 @@ const Chroma = {
{ collectionName: namespace, url: process.env.CHROMA_ENDPOINT }
);
const model = this.llm({
temperature: workspace?.openAiTemp,
temperature: workspace?.openAiTemp ?? 0.7,
});
const chain = VectorDBQAChain.fromLLM(model, vectorStore, {
@ -347,7 +348,7 @@ const Chroma = {
);
const prompt = {
role: "system",
content: `Given the following conversation, relevant context, and a follow up question, reply with an answer to the current question the user is asking. Return only your response to the question given the above information following the users instructions as needed.
content: `${chatPrompt(workspace)}
Context:
${contextTexts
.map((text, i) => {

View file

@ -5,6 +5,7 @@ const { RecursiveCharacterTextSplitter } = require("langchain/text_splitter");
const { storeVectorResult, cachedVectorInformation } = require("../../files");
const { Configuration, OpenAIApi } = require("openai");
const { v4: uuidv4 } = require("uuid");
const { chatPrompt } = require("../../chats");
// Since we roll our own results for prompting we
// have to manually curate sources as well.
@ -260,7 +261,7 @@ const LanceDb = {
);
const prompt = {
role: "system",
content: `Given the following conversation, relevant context, and a follow up question, reply with an answer to the current question the user is asking. Return only your response to the question given the above information following the users instructions as needed.
content: `${chatPrompt(workspace)}
Context:
${contextTexts
.map((text, i) => {
@ -309,7 +310,7 @@ const LanceDb = {
);
const prompt = {
role: "system",
content: `Given the following conversation, relevant context, and a follow up question, reply with an answer to the current question the user is asking. Return only your response to the question given the above information following the users instructions as needed.
content: `${chatPrompt(workspace)}
Context:
${contextTexts
.map((text, i) => {

View file

@ -10,6 +10,7 @@ const { storeVectorResult, cachedVectorInformation } = require("../../files");
const { Configuration, OpenAIApi } = require("openai");
const { v4: uuidv4 } = require("uuid");
const { toChunks, curateSources } = require("../../helpers");
const { chatPrompt } = require("../../chats");
const Pinecone = {
name: "Pinecone",
@ -278,7 +279,7 @@ const Pinecone = {
});
const model = this.llm({
temperature: workspace?.openAiTemp,
temperature: workspace?.openAiTemp ?? 0.7,
});
const chain = VectorDBQAChain.fromLLM(model, vectorStore, {
k: 5,
@ -318,14 +319,15 @@ const Pinecone = {
);
const prompt = {
role: "system",
content: `Given the following conversation, relevant context, and a follow up question, reply with an answer to the current question the user is asking. Return only your response to the question given the above information following the users instructions as needed.
Context:
${contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")}`,
content: `${chatPrompt(workspace)}
Context:
${contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")}`,
};
const memory = [prompt, ...chatHistory, { role: "user", content: input }];
const responseText = await this.getChatCompletion(this.openai(), memory, {