Refactor: Move backend files to backend/ directory and split .gitignore

This commit is contained in:
Yunxiao Xu
2026-02-11 17:40:44 -08:00
parent 48924affa0
commit 7a69133e26
96 changed files with 144 additions and 176 deletions

View File

@@ -0,0 +1,73 @@
from typing import List, Literal
from pydantic import BaseModel, Field
from ea_chatbot.graph.state import AgentState
from ea_chatbot.config import Settings
from ea_chatbot.utils.llm_factory import get_llm_model
from ea_chatbot.utils.logging import get_logger, LangChainLoggingHandler
from ea_chatbot.graph.prompts.query_analyzer import QUERY_ANALYZER_PROMPT
class QueryAnalysis(BaseModel):
"""Analysis of the user's query."""
data_required: List[str] = Field(description="List of data points or entities mentioned (e.g., ['2024 results', 'Florida']).")
unknowns: List[str] = Field(description="List of target information the user wants to know or needed for final answer (e.g., 'who won', 'total votes').")
ambiguities: List[str] = Field(description="List of CRITICAL missing details that prevent ANY analysis. Do NOT include database names or plot types if defaults can be used.")
conditions: List[str] = Field(description="List of any filters or constraints (e.g., ['year=2024', 'state=Florida']). Include context resolved from history.")
next_action: Literal["plan", "clarify", "research"] = Field(description="The next action to take. 'plan' for data analysis (even with defaults), 'research' for general knowledge, or 'clarify' ONLY for critical ambiguities.")
def query_analyzer_node(state: AgentState) -> dict:
"""Analyze the user's question and determine the next course of action."""
question = state["question"]
history = state.get("messages", [])
summary = state.get("summary", "")
# Keep last 3 turns (6 messages)
history = history[-6:]
settings = Settings()
logger = get_logger("query_analyzer")
logger.info(f"Analyzing question: [italic]\"{question}\"[/italic]")
# Initialize the LLM with structured output using the factory
# Pass logging callback to track LLM usage
llm = get_llm_model(
settings.query_analyzer_llm,
callbacks=[LangChainLoggingHandler(logger=logger)]
)
structured_llm = llm.with_structured_output(QueryAnalysis)
# Prepare messages using the prompt template
messages = QUERY_ANALYZER_PROMPT.format_messages(
question=question,
history=history,
summary=summary
)
try:
# Invoke the structured LLM directly with the list of messages
analysis_result = structured_llm.invoke(messages)
analysis_result = QueryAnalysis.model_validate(analysis_result)
analysis_dict = analysis_result.model_dump()
analysis_dict.pop("next_action")
next_action = analysis_result.next_action
logger.info(f"Analysis complete. Next action: [bold magenta]{next_action}[/bold magenta]")
except Exception as e:
logger.error(f"Error during query analysis: {str(e)}")
analysis_dict = {
"data_required": [],
"unknowns": [],
"ambiguities": [f"Error during analysis: {str(e)}"],
"conditions": []
}
next_action = "clarify"
return {
"analysis": analysis_dict,
"next_action": next_action,
"iterations": 0
}