chore(graph): Relocate QueryAnalysis schema and update existing tests for Orchestrator architecture

This commit is contained in:
Yunxiao Xu
2026-02-23 05:58:58 -08:00
parent ad7845cc6a
commit f4d09c07c4
7 changed files with 199 additions and 227 deletions

View File

@@ -1,24 +1,27 @@
import pytest
from unittest.mock import MagicMock, patch
from langchain_core.messages import HumanMessage, AIMessage
from ea_chatbot.graph.nodes.planner import planner_node
from ea_chatbot.graph.nodes.researcher import researcher_node
from ea_chatbot.graph.nodes.summarizer import summarizer_node
from ea_chatbot.schemas import TaskPlanResponse
from ea_chatbot.schemas import ChecklistResponse, ChecklistTask
from langchain_core.messages import HumanMessage, AIMessage
@pytest.fixture
def mock_state_with_history():
return {
"messages": [
HumanMessage(content="Show me the 2024 results for Florida"),
AIMessage(content="Here are the results for Florida in 2024...")
HumanMessage(content="What about NJ?"),
AIMessage(content="NJ has 9 million voters.")
],
"question": "What about in New Jersey?",
"analysis": {"data_required": ["2024 results", "New Jersey"], "unknowns": [], "ambiguities": [], "conditions": []},
"question": "Show me the breakdown by county for 2024",
"analysis": {
"data_required": ["2024 results", "New Jersey"],
"unknowns": [],
"ambiguities": [],
"conditions": []
},
"next_action": "plan",
"summary": "The user is asking about 2024 election results.",
"plan": "Plan steps...",
"code_output": "Code output..."
"summary": "The user is asking about NJ 2024 results.",
"checklist": [],
"current_step": 0
}
@patch("ea_chatbot.graph.nodes.planner.get_llm_model")
@@ -31,49 +34,17 @@ def test_planner_uses_history_and_summary(mock_prompt, mock_get_summary, mock_ge
mock_structured_llm = MagicMock()
mock_llm_instance.with_structured_output.return_value = mock_structured_llm
mock_structured_llm.invoke.return_value = TaskPlanResponse(
mock_structured_llm.invoke.return_value = ChecklistResponse(
goal="goal",
reflection="reflection",
context={
"initial_context": "context",
"assumptions": [],
"constraints": []
},
steps=["Step 1: test"]
checklist=[ChecklistTask(task="Step 1: test", worker="data_analyst")]
)
planner_node(mock_state_with_history)
mock_prompt.format_messages.assert_called_once()
kwargs = mock_prompt.format_messages.call_args[1]
assert kwargs["question"] == "What about in New Jersey?"
assert kwargs["summary"] == mock_state_with_history["summary"]
assert len(kwargs["history"]) == 2
@patch("ea_chatbot.graph.nodes.researcher.get_llm_model")
@patch("ea_chatbot.graph.nodes.researcher.RESEARCHER_PROMPT")
def test_researcher_uses_history_and_summary(mock_prompt, mock_get_llm, mock_state_with_history):
mock_llm_instance = MagicMock()
mock_get_llm.return_value = mock_llm_instance
researcher_node(mock_state_with_history)
mock_prompt.format_messages.assert_called_once()
kwargs = mock_prompt.format_messages.call_args[1]
assert kwargs["question"] == "What about in New Jersey?"
assert kwargs["summary"] == mock_state_with_history["summary"]
assert len(kwargs["history"]) == 2
@patch("ea_chatbot.graph.nodes.summarizer.get_llm_model")
@patch("ea_chatbot.graph.nodes.summarizer.SUMMARIZER_PROMPT")
def test_summarizer_uses_history_and_summary(mock_prompt, mock_get_llm, mock_state_with_history):
mock_llm_instance = MagicMock()
mock_get_llm.return_value = mock_llm_instance
summarizer_node(mock_state_with_history)
mock_prompt.format_messages.assert_called_once()
kwargs = mock_prompt.format_messages.call_args[1]
assert kwargs["question"] == "What about in New Jersey?"
assert kwargs["summary"] == mock_state_with_history["summary"]
assert len(kwargs["history"]) == 2
# Verify history and summary were passed to prompt format
# We check the arguments passed to the mock_prompt.format_messages
call_args = mock_prompt.format_messages.call_args[1]
assert call_args["summary"] == "The user is asking about NJ 2024 results."
assert len(call_args["history"]) == 2
assert "breakdown by county" in call_args["question"]