from typing import List, Literal from ea_chatbot.graph.state import AgentState from ea_chatbot.config import Settings from ea_chatbot.utils.llm_factory import get_llm_model from ea_chatbot.utils.logging import get_logger, LangChainLoggingHandler from ea_chatbot.graph.prompts.query_analyzer import QUERY_ANALYZER_PROMPT from ea_chatbot.schemas import QueryAnalysis def query_analyzer_node(state: AgentState) -> dict: """Analyze the user's question and determine the next course of action.""" question = state["question"] history = state.get("messages", []) summary = state.get("summary", "") # Keep last 3 turns (6 messages) history = history[-6:] settings = Settings() logger = get_logger("query_analyzer") logger.info(f"Analyzing question: [italic]\"{question}\"[/italic]") # Initialize the LLM with structured output using the factory # Pass logging callback to track LLM usage llm = get_llm_model( settings.query_analyzer_llm, callbacks=[LangChainLoggingHandler(logger=logger)] ) structured_llm = llm.with_structured_output(QueryAnalysis) # Prepare messages using the prompt template messages = QUERY_ANALYZER_PROMPT.format_messages( question=question, history=history, summary=summary ) try: # Invoke the structured LLM directly with the list of messages analysis_result = structured_llm.invoke(messages) analysis_result = QueryAnalysis.model_validate(analysis_result) analysis_dict = analysis_result.model_dump() analysis_dict.pop("next_action") next_action = analysis_result.next_action logger.info(f"Analysis complete. Next action: [bold magenta]{next_action}[/bold magenta]") except Exception as e: logger.error(f"Error during query analysis: {str(e)}") analysis_dict = { "data_required": [], "unknowns": [], "ambiguities": [f"Error during analysis: {str(e)}"], "conditions": [] } next_action = "clarify" return { "analysis": analysis_dict, "next_action": next_action, "iterations": 0 }