264 lines
8.0 KiB
TypeScript
264 lines
8.0 KiB
TypeScript
import api from "./api"
|
|
|
|
export interface MessageResponse {
|
|
id: string
|
|
role: "user" | "assistant"
|
|
content: string
|
|
created_at: string
|
|
plots?: string[] // base64 encoded plots
|
|
steps?: string[] // reasoning steps
|
|
}
|
|
|
|
export interface ChatEvent {
|
|
type: string
|
|
name?: string
|
|
node?: string
|
|
data?: any
|
|
}
|
|
|
|
export interface StreamCallbacks {
|
|
onMessageUpdate: (messages: MessageResponse[]) => void
|
|
onDone?: () => void
|
|
onError?: (error: string) => void
|
|
}
|
|
|
|
export const ChatService = {
|
|
/**
|
|
* Parse a raw SSE chunk into one or more ChatEvent objects.
|
|
* Handles partial lines by returning the processed events and any remaining buffer.
|
|
*/
|
|
parseSSEBuffer(buffer: string): { events: ChatEvent[], remaining: string } {
|
|
const events: ChatEvent[] = []
|
|
const lines = buffer.split("\n")
|
|
|
|
// The last element might be a partial line if it doesn't end with \n
|
|
const remaining = buffer.endsWith("\n") ? "" : lines.pop() || ""
|
|
|
|
for (const line of lines) {
|
|
if (line.startsWith("data: ")) {
|
|
const dataStr = line.slice(6).trim()
|
|
if (!dataStr) continue
|
|
|
|
try {
|
|
const event = JSON.parse(dataStr)
|
|
events.push(event)
|
|
} catch (err) {
|
|
console.error("Failed to parse SSE event JSON:", err, dataStr)
|
|
}
|
|
}
|
|
}
|
|
|
|
return { events, remaining }
|
|
},
|
|
|
|
/**
|
|
* Legacy method for backward compatibility in tests
|
|
*/
|
|
parseSSEChunk(chunk: string): ChatEvent[] {
|
|
return this.parseSSEBuffer(chunk).events
|
|
},
|
|
|
|
/**
|
|
* Update a list of messages based on a new ChatEvent.
|
|
* This is a pure function designed for use with React state updates.
|
|
*/
|
|
updateMessagesWithEvent(messages: MessageResponse[], event: ChatEvent): MessageResponse[] {
|
|
const { type, name, node, data } = event
|
|
|
|
// 1. Handle incremental LLM chunks for terminal nodes
|
|
if (type === "on_chat_model_stream" && (node === "summarizer" || node === "researcher" || node === "clarification")) {
|
|
const chunk = data?.chunk?.content || ""
|
|
if (!chunk) return messages
|
|
|
|
const newMessages = [...messages]
|
|
const lastMsgIndex = newMessages.length - 1
|
|
const lastMsg = { ...newMessages[lastMsgIndex] }
|
|
|
|
if (lastMsg && lastMsg.role === "assistant") {
|
|
lastMsg.content = (lastMsg.content || "") + chunk
|
|
newMessages[lastMsgIndex] = lastMsg
|
|
}
|
|
return newMessages
|
|
}
|
|
|
|
// 2. Handle final node outputs
|
|
if (type === "on_chain_end") {
|
|
const newMessages = [...messages]
|
|
const lastMsgIndex = newMessages.length - 1
|
|
const lastMsg = { ...newMessages[lastMsgIndex] }
|
|
|
|
if (!lastMsg || lastMsg.role !== "assistant") return messages
|
|
|
|
// Terminal nodes final text
|
|
if (name === "summarizer" || name === "researcher" || name === "clarification") {
|
|
const messages_list = data?.output?.messages
|
|
const msg = messages_list ? messages_list[messages_list.length - 1]?.content : null
|
|
|
|
if (msg) {
|
|
lastMsg.content = msg
|
|
newMessages[lastMsgIndex] = lastMsg
|
|
return newMessages
|
|
}
|
|
}
|
|
|
|
// Plots from executor
|
|
if (name === "executor" && data?.encoded_plots) {
|
|
lastMsg.plots = [...(lastMsg.plots || []), ...data.encoded_plots]
|
|
// Filter out the 'active' step and replace with 'complete'
|
|
const filteredSteps = (lastMsg.steps || []).filter(s => s !== "Performing data analysis...");
|
|
lastMsg.steps = [...filteredSteps, "Data analysis and visualization complete."]
|
|
newMessages[lastMsgIndex] = lastMsg
|
|
return newMessages
|
|
}
|
|
|
|
// Status for intermediate nodes (completion)
|
|
const statusMap: Record<string, string> = {
|
|
"query_analyzer": "Query analysis complete.",
|
|
"planner": "Strategic plan generated.",
|
|
"coder": "Analysis code generated."
|
|
}
|
|
|
|
if (name && statusMap[name]) {
|
|
// Find and replace the active status if it exists
|
|
const activeStatus = name === "query_analyzer" ? "Analyzing query..." :
|
|
name === "planner" ? "Generating strategic plan..." :
|
|
name === "coder" ? "Writing analysis code..." : null;
|
|
|
|
let filteredSteps = lastMsg.steps || [];
|
|
if (activeStatus) {
|
|
filteredSteps = filteredSteps.filter(s => s !== activeStatus);
|
|
}
|
|
|
|
lastMsg.steps = [...filteredSteps, statusMap[name]]
|
|
newMessages[lastMsgIndex] = lastMsg
|
|
return newMessages
|
|
}
|
|
}
|
|
|
|
// 3. Handle node start events for progress feedback
|
|
if (type === "on_chain_start") {
|
|
const startStatusMap: Record<string, string> = {
|
|
"query_analyzer": "Analyzing query...",
|
|
"planner": "Generating strategic plan...",
|
|
"coder": "Writing analysis code...",
|
|
"executor": "Performing data analysis..."
|
|
}
|
|
|
|
if (name && startStatusMap[name]) {
|
|
const newMessages = [...messages]
|
|
const lastMsgIndex = newMessages.length - 1
|
|
const lastMsg = { ...newMessages[lastMsgIndex] }
|
|
|
|
if (lastMsg && lastMsg.role === "assistant") {
|
|
// Avoid duplicate start messages
|
|
if (!(lastMsg.steps || []).includes(startStatusMap[name])) {
|
|
lastMsg.steps = [...(lastMsg.steps || []), startStatusMap[name]]
|
|
newMessages[lastMsgIndex] = lastMsg
|
|
return newMessages
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return messages
|
|
},
|
|
|
|
/**
|
|
* Stream agent execution events via SSE.
|
|
* Uses fetch + ReadableStream because backend uses POST.
|
|
*/
|
|
async streamChat(
|
|
message: string,
|
|
threadId: string,
|
|
currentMessages: MessageResponse[],
|
|
callbacks: StreamCallbacks
|
|
) {
|
|
const { onMessageUpdate, onDone, onError } = callbacks
|
|
|
|
// Add user message and a placeholder assistant message
|
|
let activeMessages: MessageResponse[] = [
|
|
...currentMessages,
|
|
{
|
|
id: `user-${Date.now()}`,
|
|
role: "user",
|
|
content: message,
|
|
created_at: new Date().toISOString()
|
|
},
|
|
{
|
|
id: `assistant-${Date.now()}`,
|
|
role: "assistant",
|
|
content: "",
|
|
created_at: new Date().toISOString(),
|
|
plots: []
|
|
}
|
|
]
|
|
onMessageUpdate(activeMessages)
|
|
|
|
let buffer = ""
|
|
try {
|
|
const API_URL = import.meta.env.VITE_API_URL || ""
|
|
const response = await fetch(`${API_URL}/api/v1/chat/stream`, {
|
|
method: "POST",
|
|
headers: {
|
|
"Content-Type": "application/json",
|
|
},
|
|
body: JSON.stringify({
|
|
message,
|
|
thread_id: threadId
|
|
}),
|
|
credentials: "include"
|
|
})
|
|
|
|
if (!response.ok) {
|
|
throw new Error(`Streaming failed: ${response.statusText}`)
|
|
}
|
|
|
|
const reader = response.body?.getReader()
|
|
if (!reader) throw new Error("No readable stream in response body")
|
|
|
|
const decoder = new TextDecoder()
|
|
|
|
while (true) {
|
|
const { done, value } = await reader.read()
|
|
if (done) break
|
|
|
|
buffer += decoder.decode(value, { stream: true })
|
|
const { events, remaining } = this.parseSSEBuffer(buffer)
|
|
buffer = remaining
|
|
|
|
for (const event of events) {
|
|
if (event.type === "done") {
|
|
if (onDone) onDone()
|
|
continue
|
|
}
|
|
if (event.type === "error") {
|
|
if (onError) onError(event.data?.message || "Unknown error")
|
|
continue
|
|
}
|
|
|
|
activeMessages = this.updateMessagesWithEvent(activeMessages, event)
|
|
onMessageUpdate(activeMessages)
|
|
}
|
|
}
|
|
} catch (err: any) {
|
|
console.error("Streaming error:", err)
|
|
if (onError) onError(err.message || "Connection failed")
|
|
}
|
|
},
|
|
|
|
async listConversations() {
|
|
const response = await api.get("/conversations")
|
|
return response.data
|
|
},
|
|
|
|
async createConversation(name: string = "New Conversation") {
|
|
const response = await api.post("/conversations", { name })
|
|
return response.data
|
|
},
|
|
|
|
async getMessages(conversationId: string) {
|
|
const response = await api.get(`/conversations/${conversationId}/messages`)
|
|
return response.data
|
|
}
|
|
}
|