Compare commits

...

2 commits

Author SHA1 Message Date
Timothy Carambat
741ba8f2d0
Agent UI animations ()
* wip agent ui animation

* WIP agent ui revision

* linting

* simplify css

* memoize agent responses

* patch hook memo issue

* dev build

---------

Co-authored-by: shatfield4 <seanhatfield5@gmail.com>
2025-01-28 13:46:59 -08:00
timothycarambat
6192080635 Update NVIDIA branding 2025-01-28 10:27:31 -08:00
17 changed files with 327 additions and 108 deletions
.github/workflows
frontend
src
components
LLMSelection/NvidiaNimOptions
WorkspaceChat/ChatContainer/ChatHistory
StatusResponse
index.jsx
index.css
media/llmprovider
pages
GeneralSettings/LLMPreference
OnboardingFlow/Steps
DataHandling
LLMPreference
utils/chat
tailwind.config.js
server
models
utils
AiProviders/nvidiaNim
agents
aibitat/providers
index.js
helpers

View file

@ -6,7 +6,7 @@ concurrency:
on:
push:
branches: ['disable-default-agent-skills'] # put your current branch to create a build. Core team only.
branches: ['agent-ui-animations'] # put your current branch to create a build. Core team only.
paths-ignore:
- '**.md'
- 'cloud-deployments/*'

View file

@ -5,7 +5,7 @@ import { NVIDIA_NIM_COMMON_URLS } from "@/utils/constants";
import { useState, useEffect } from "react";
/**
* This component is used to select a remote Nvidia NIM model endpoint
* This component is used to select a remote NVIDIA NIM model endpoint
* This is the default component and way to connect to NVIDIA NIM
* as the "managed" provider can only work in the Desktop context.
*/
@ -26,7 +26,7 @@ export default function RemoteNvidiaNimOptions({ settings }) {
<div className="flex flex-col w-60">
<div className="flex justify-between items-center mb-2">
<label className="text-white text-sm font-semibold">
Nvidia Nim Base URL
NVIDIA Nim Base URL
</label>
{loading ? (
<PreLoader size="6" />
@ -56,7 +56,7 @@ export default function RemoteNvidiaNimOptions({ settings }) {
onBlur={basePath.onBlur}
/>
<p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
Enter the URL where Nvidia NIM is running.
Enter the URL where NVIDIA NIM is running.
</p>
</div>
{!settings?.credentialsOnly && (

View file

@ -0,0 +1,113 @@
import React, { useState } from "react";
import {
CaretDown,
CircleNotch,
Check,
CheckCircle,
} from "@phosphor-icons/react";
export default function StatusResponse({
messages = [],
isThinking = false,
showCheckmark = false,
}) {
const [isExpanded, setIsExpanded] = useState(false);
const currentThought = messages[messages.length - 1];
const previousThoughts = messages.slice(0, -1);
function handleExpandClick() {
if (!previousThoughts.length > 0) return;
setIsExpanded(!isExpanded);
}
return (
<div className="flex justify-center items-end w-full">
<div className="py-2 px-4 w-full flex gap-x-5 md:max-w-[80%] flex-col relative">
<div
onClick={handleExpandClick}
className={`${!previousThoughts?.length ? "cursor-text" : "cursor-pointer hover:bg-theme-sidebar-item-hover transition-all duration-200"} bg-theme-bg-chat-input rounded-full py-2 px-4 flex items-center gap-x-2 border border-theme-sidebar-border`}
>
{isThinking ? (
<CircleNotch
className="w-4 h-4 text-theme-text-secondary animate-spin"
aria-label="Agent is thinking..."
/>
) : showCheckmark ? (
<CheckCircle
className="w-4 h-4 text-green-400 transition-all duration-300"
aria-label="Thought complete"
/>
) : null}
<div className="flex-1 overflow-hidden">
<span
key={currentThought.content}
className="text-xs text-theme-text-secondary font-mono inline-block w-full animate-thoughtTransition"
>
{currentThought.content}
</span>
</div>
<div className="flex items-center gap-x-2">
{previousThoughts?.length > 0 && (
<div
data-tooltip-id="expand-cot"
data-tooltip-content={
isExpanded ? "Hide thought chain" : "Show thought chain"
}
className="border-none text-theme-text-secondary hover:text-theme-text-primary transition-colors p-1 rounded-full hover:bg-theme-sidebar-item-hover"
aria-label={
isExpanded ? "Hide thought chain" : "Show thought chain"
}
>
<CaretDown
className={`w-4 h-4 transform transition-transform duration-200 ${isExpanded ? "rotate-180" : ""}`}
/>
</div>
)}
</div>
</div>
{/* Previous thoughts dropdown */}
{previousThoughts?.length > 0 && (
<div
key={`cot-list-${currentThought.uuid}`}
className={`mt-2 bg-theme-bg-chat-input backdrop-blur-sm rounded-lg overflow-hidden transition-all duration-300 border border-theme-sidebar-border ${
isExpanded ? "max-h-[300px] opacity-100" : "max-h-0 opacity-0"
}`}
>
<div className="p-2">
{previousThoughts.map((thought, index) => (
<div
key={`cot-${thought.uuid || index}`}
className="flex gap-x-2"
>
<p className="text-xs text-theme-text-secondary font-mono">
{index + 1}/{previousThoughts.length}
</p>
<div
className="flex items-center gap-x-3 p-2 animate-fadeUpIn"
style={{ animationDelay: `${index * 50}ms` }}
>
<span className="text-xs text-theme-text-secondary font-mono">
{thought.content}
</span>
</div>
</div>
))}
{/* Append current thought to the end */}
<div key={`cot-${currentThought.uuid}`} className="flex gap-x-2">
<p className="text-xs text-theme-text-secondary font-mono">
{previousThoughts.length + 1}/{previousThoughts.length + 1}
</p>
<div className="flex items-center gap-x-3 p-2 animate-fadeUpIn">
<span className="text-xs text-theme-text-secondary font-mono">
{currentThought.content}
</span>
</div>
</div>
</div>
</div>
)}
</div>
</div>
);
}

View file

@ -1,6 +1,7 @@
import React, { useEffect, useRef, useState } from "react";
import { useEffect, useRef, useState, useMemo, useCallback } from "react";
import HistoricalMessage from "./HistoricalMessage";
import PromptReply from "./PromptReply";
import StatusResponse from "./StatusResponse";
import { useManageWorkspaceModal } from "../../../Modals/ManageWorkspace";
import ManageWorkspace from "../../../Modals/ManageWorkspace";
import { ArrowDown } from "@phosphor-icons/react";
@ -12,6 +13,7 @@ import { useParams } from "react-router-dom";
import paths from "@/utils/paths";
import Appearance from "@/models/appearance";
import useTextSize from "@/hooks/useTextSize";
import { v4 } from "uuid";
export default function ChatHistory({
history = [],
@ -136,6 +138,42 @@ export default function ChatHistory({
);
};
const compiledHistory = useMemo(
() =>
buildMessages({
workspace,
history,
regenerateAssistantMessage,
saveEditedMessage,
forkThread,
}),
[
workspace,
history,
regenerateAssistantMessage,
saveEditedMessage,
forkThread,
]
);
const lastMessageInfo = useMemo(() => getLastMessageInfo(history), [history]);
const renderStatusResponse = useCallback(
(item, index) => {
const hasSubsequentMessages = index < compiledHistory.length - 1;
return (
<StatusResponse
key={`status-group-${index}`}
messages={item}
isThinking={!hasSubsequentMessages && lastMessageInfo.isAnimating}
showCheckmark={
hasSubsequentMessages ||
(!lastMessageInfo.isAnimating && !lastMessageInfo.isStatusResponse)
}
/>
);
},
[compiledHistory.length, lastMessageInfo]
);
if (history.length === 0 && !hasAttachments) {
return (
<div className="flex flex-col h-full md:mt-0 pb-44 md:pb-40 w-full justify-end items-center">
@ -176,61 +214,14 @@ export default function ChatHistory({
return (
<div
className={`markdown text-white/80 light:text-theme-text-primary font-light ${textSizeClass} h-full md:h-[83%] pb-[100px] pt-6 md:pt-0 md:pb-20 md:mx-0 overflow-y-scroll flex flex-col justify-start ${
showScrollbar ? "show-scrollbar" : "no-scroll"
}`}
className={`markdown text-white/80 light:text-theme-text-primary font-light ${textSizeClass} h-full md:h-[83%] pb-[100px] pt-6 md:pt-0 md:pb-20 md:mx-0 overflow-y-scroll flex flex-col justify-start ${showScrollbar ? "show-scrollbar" : "no-scroll"}`}
id="chat-history"
ref={chatHistoryRef}
onScroll={handleScroll}
>
{history.map((props, index) => {
const isLastBotReply =
index === history.length - 1 && props.role === "assistant";
if (props?.type === "statusResponse" && !!props.content) {
return <StatusResponse key={props.uuid} props={props} />;
}
if (props.type === "rechartVisualize" && !!props.content) {
return (
<Chartable key={props.uuid} workspace={workspace} props={props} />
);
}
if (isLastBotReply && props.animate) {
return (
<PromptReply
key={props.uuid}
uuid={props.uuid}
reply={props.content}
pending={props.pending}
sources={props.sources}
error={props.error}
workspace={workspace}
closed={props.closed}
/>
);
}
return (
<HistoricalMessage
key={index}
message={props.content}
role={props.role}
workspace={workspace}
sources={props.sources}
feedbackScore={props.feedbackScore}
chatId={props.chatId}
error={props.error}
attachments={props.attachments}
regenerateMessage={regenerateAssistantMessage}
isLastMessage={isLastBotReply}
saveEditedMessage={saveEditedMessage}
forkThread={forkThread}
metrics={props.metrics}
/>
);
})}
{compiledHistory.map((item, index) =>
Array.isArray(item) ? renderStatusResponse(item, index) : item
)}
{showing && (
<ManageWorkspace hideModal={hideModal} providedSlug={workspace.slug} />
)}
@ -253,21 +244,13 @@ export default function ChatHistory({
);
}
function StatusResponse({ props }) {
return (
<div className="flex justify-center items-end w-full">
<div className="py-2 px-4 w-full flex gap-x-5 md:max-w-[80%] flex-col">
<div className="flex gap-x-5">
<span
className={`text-xs inline-block p-2 rounded-lg text-white/60 font-mono whitespace-pre-line`}
>
{props.content}
</span>
</div>
</div>
</div>
);
}
const getLastMessageInfo = (history) => {
const lastMessage = history?.[history.length - 1] || {};
return {
isAnimating: lastMessage?.animate,
isStatusResponse: lastMessage?.type === "statusResponse",
};
};
function WorkspaceChatSuggestions({ suggestions = [], sendSuggestion }) {
if (suggestions.length === 0) return null;
@ -286,3 +269,78 @@ function WorkspaceChatSuggestions({ suggestions = [], sendSuggestion }) {
</div>
);
}
/**
* Builds the history of messages for the chat.
* This is mostly useful for rendering the history in a way that is easy to understand.
* as well as compensating for agent thinking and other messages that are not part of the history, but
* are still part of the chat.
*
* @param {Object} param0 - The parameters for building the messages.
* @param {Array} param0.history - The history of messages.
* @param {Object} param0.workspace - The workspace object.
* @param {Function} param0.regenerateAssistantMessage - The function to regenerate the assistant message.
* @param {Function} param0.saveEditedMessage - The function to save the edited message.
* @param {Function} param0.forkThread - The function to fork the thread.
* @returns {Array} The compiled history of messages.
*/
function buildMessages({
history,
workspace,
regenerateAssistantMessage,
saveEditedMessage,
forkThread,
}) {
return history.reduce((acc, props, index) => {
const isLastBotReply =
index === history.length - 1 && props.role === "assistant";
if (props?.type === "statusResponse" && !!props.content) {
if (acc.length > 0 && Array.isArray(acc[acc.length - 1])) {
acc[acc.length - 1].push(props);
} else {
acc.push([props]);
}
return acc;
}
if (props.type === "rechartVisualize" && !!props.content) {
acc.push(
<Chartable key={props.uuid} workspace={workspace} props={props} />
);
} else if (isLastBotReply && props.animate) {
acc.push(
<PromptReply
key={props.uuid || v4()}
uuid={props.uuid}
reply={props.content}
pending={props.pending}
sources={props.sources}
error={props.error}
workspace={workspace}
closed={props.closed}
/>
);
} else {
acc.push(
<HistoricalMessage
key={index}
message={props.content}
role={props.role}
workspace={workspace}
sources={props.sources}
feedbackScore={props.feedbackScore}
chatId={props.chatId}
error={props.error}
attachments={props.attachments}
regenerateMessage={regenerateAssistantMessage}
isLastMessage={isLastBotReply}
saveEditedMessage={saveEditedMessage}
forkThread={forkThread}
metrics={props.metrics}
/>
);
}
return acc;
}, []);
}

View file

@ -583,22 +583,6 @@ dialog::backdrop {
animation: slideDown 0.3s ease-out forwards;
}
@keyframes slideUp {
from {
max-height: 400px;
opacity: 1;
}
to {
max-height: 0;
opacity: 0;
}
}
.slide-up {
animation: slideUp 0.3s ease-out forwards;
}
.input-label {
@apply text-[14px] font-bold text-white;
}
@ -946,3 +930,51 @@ does not extend the close button beyond the viewport. */
.rti--container {
@apply !bg-theme-settings-input-bg !text-white !placeholder-white !placeholder-opacity-60 !text-sm !rounded-lg !p-2.5;
}
@keyframes fadeUpIn {
0% {
opacity: 0;
transform: translateY(5px);
}
100% {
opacity: 1;
transform: translateY(0);
}
}
.animate-fadeUpIn {
animation: fadeUpIn 0.3s ease-out forwards;
}
@keyframes bounce-subtle {
0%,
100% {
transform: translateY(0);
}
50% {
transform: translateY(-2px);
}
}
@keyframes thoughtTransition {
0% {
opacity: 0;
transform: translateY(10px);
}
30% {
opacity: 1;
transform: translateY(0);
}
100% {
opacity: 1;
transform: translateY(0);
}
}
.animate-thoughtTransition {
animation: thoughtTransition 0.5s ease-out forwards;
}

Binary file not shown.

Before

(image error) Size: 64 KiB

After

(image error) Size: 25 KiB

View file

@ -96,12 +96,12 @@ export const AVAILABLE_LLM_PROVIDERS = [
requiredConfig: ["GeminiLLMApiKey"],
},
{
name: "Nvidia NIM",
name: "NVIDIA NIM",
value: "nvidia-nim",
logo: NvidiaNimLogo,
options: (settings) => <NvidiaNimOptions settings={settings} />,
description:
"Run full parameter LLMs directly on your GPU using Nvidia's inference microservice via Docker.",
"Run full parameter LLMs directly on your NVIDIA RTX GPU using NVIDIA NIM.",
requiredConfig: ["NvidiaNimLLMBasePath"],
},
{

View file

@ -78,9 +78,9 @@ export const LLM_SELECTION_PRIVACY = {
logo: GeminiLogo,
},
"nvidia-nim": {
name: "Nvidia NIM",
name: "NVIDIA NIM",
description: [
"Your model and chats are only accessible on the machine running the Nvidia NIM service",
"Your model and chats are only accessible on the machine running the NVIDIA NIM",
],
logo: NvidiaNimLogo,
},

View file

@ -92,12 +92,12 @@ const LLMS = [
description: "Google's largest and most capable AI model",
},
{
name: "Nvidia NIM",
name: "NVIDIA NIM",
value: "nvidia-nim",
logo: NvidiaNimLogo,
options: (settings) => <NvidiaNimOptions settings={settings} />,
description:
"Run full parameter LLMs directly on your GPU using Nvidia's inference microservice via Docker.",
"Run full parameter LLMs directly on your NVIDIA RTX GPU using NVIDIA NIM.",
},
{
name: "HuggingFace",

View file

@ -99,7 +99,7 @@ export default function handleSocketResponse(event, setChatHistory) {
sources: [],
closed: true,
error: null,
animate: false,
animate: data?.animate || false,
pending: false,
},
];

View file

@ -17,6 +17,7 @@ export default function handleChat(
sources = [],
error,
close,
animate = false,
chatId = null,
action = null,
metrics = {},
@ -34,7 +35,7 @@ export default function handleChat(
sources,
closed: true,
error,
animate: false,
animate,
pending: false,
metrics,
},
@ -47,7 +48,7 @@ export default function handleChat(
sources,
closed: true,
error,
animate: false,
animate,
pending: false,
metrics,
});

View file

@ -141,7 +141,10 @@ export default {
},
animation: {
sweep: "sweep 0.5s ease-in-out",
"pulse-glow": "pulse-glow 1.5s infinite"
"pulse-glow": "pulse-glow 1.5s infinite",
'fade-in': 'fade-in 0.3s ease-out',
'slide-up': 'slide-up 0.4s ease-out forwards',
'bounce-subtle': 'bounce-subtle 2s ease-in-out infinite'
},
keyframes: {
sweep: {
@ -175,6 +178,18 @@ export default {
boxShadow: "0 0 0 rgba(255, 255, 255, 0.0)",
backgroundColor: "rgba(255, 255, 255, 0.0)"
}
},
'fade-in': {
'0%': { opacity: '0' },
'100%': { opacity: '1' }
},
'slide-up': {
'0%': { transform: 'translateY(10px)', opacity: '0' },
'100%': { transform: 'translateY(0)', opacity: '1' }
},
'bounce-subtle': {
'0%, 100%': { transform: 'translateY(0)' },
'50%': { transform: 'translateY(-2px)' }
}
}
}

View file

@ -554,7 +554,7 @@ const SystemSettings = {
XAIApiKey: !!process.env.XAI_LLM_API_KEY,
XAIModelPref: process.env.XAI_LLM_MODEL_PREF,
// Nvidia NIM Keys
// NVIDIA NIM Keys
NvidiaNimLLMBasePath: process.env.NVIDIA_NIM_LLM_BASE_PATH,
NvidiaNimLLMModelPref: process.env.NVIDIA_NIM_LLM_MODEL_PREF,
NvidiaNimLLMTokenLimit: process.env.NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT,

View file

@ -10,7 +10,7 @@ const {
class NvidiaNimLLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.NVIDIA_NIM_LLM_BASE_PATH)
throw new Error("No Nvidia NIM API Base Path was set.");
throw new Error("No NVIDIA NIM API Base Path was set.");
const { OpenAI: OpenAIApi } = require("openai");
this.nvidiaNim = new OpenAIApi({
@ -85,7 +85,7 @@ class NvidiaNimLLM {
static promptWindowLimit(_modelName) {
const limit = process.env.NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT || 4096;
if (!limit || isNaN(Number(limit)))
throw new Error("No Nvidia NIM token context limit was set.");
throw new Error("No NVIDIA NIM token context limit was set.");
return Number(limit);
}
@ -94,7 +94,7 @@ class NvidiaNimLLM {
promptWindowLimit() {
const limit = process.env.NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT || 4096;
if (!limit || isNaN(Number(limit)))
throw new Error("No Nvidia NIM token context limit was set.");
throw new Error("No NVIDIA NIM token context limit was set.");
return Number(limit);
}
@ -154,7 +154,7 @@ class NvidiaNimLLM {
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!this.model)
throw new Error(
`Nvidia NIM chat: ${this.model} is not valid or defined model for chat completion!`
`NVIDIA NIM chat: ${this.model} is not valid or defined model for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
@ -190,7 +190,7 @@ class NvidiaNimLLM {
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!this.model)
throw new Error(
`Nvidia NIM chat: ${this.model} is not valid or defined model for chat completion!`
`NVIDIA NIM chat: ${this.model} is not valid or defined model for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(

View file

@ -37,9 +37,9 @@ class NvidiaNimProvider extends InheritMultiple([Provider, UnTooled]) {
})
.then((result) => {
if (!result.hasOwnProperty("choices"))
throw new Error("Nvidia NIM chat: No results!");
throw new Error("NVIDIA NIM chat: No results!");
if (result.choices.length === 0)
throw new Error("Nvidia NIM chat: No results length!");
throw new Error("NVIDIA NIM chat: No results length!");
return result.choices[0].message.content;
})
.catch((_) => {

View file

@ -180,7 +180,7 @@ class AgentHandler {
case "nvidia-nim":
if (!process.env.NVIDIA_NIM_LLM_BASE_PATH)
throw new Error(
"Nvidia NIM base path must be provided to use agents."
"NVIDIA NIM base path must be provided to use agents."
);
break;

View file

@ -550,8 +550,8 @@ async function getNvidiaNimModels(basePath = null) {
return { models, error: null };
} catch (e) {
console.error(`Nvidia NIM:getNvidiaNimModels`, e.message);
return { models: [], error: "Could not fetch Nvidia NIM Models" };
console.error(`NVIDIA NIM:getNvidiaNimModels`, e.message);
return { models: [], error: "Could not fetch NVIDIA NIM Models" };
}
}