Files
ea-chatbot-lg/backend/tests/test_multi_turn_planner_researcher.py

51 lines
2.0 KiB
Python

import pytest
from unittest.mock import MagicMock, patch
from ea_chatbot.graph.nodes.planner import planner_node
from ea_chatbot.schemas import ChecklistResponse, ChecklistTask
from langchain_core.messages import HumanMessage, AIMessage
@pytest.fixture
def mock_state_with_history():
return {
"messages": [
HumanMessage(content="What about NJ?"),
AIMessage(content="NJ has 9 million voters.")
],
"question": "Show me the breakdown by county for 2024",
"analysis": {
"data_required": ["2024 results", "New Jersey"],
"unknowns": [],
"ambiguities": [],
"conditions": []
},
"next_action": "plan",
"summary": "The user is asking about NJ 2024 results.",
"checklist": [],
"current_step": 0
}
@patch("ea_chatbot.graph.nodes.planner.get_llm_model")
@patch("ea_chatbot.utils.database_inspection.get_data_summary")
@patch("ea_chatbot.graph.nodes.planner.PLANNER_PROMPT")
def test_planner_uses_history_and_summary(mock_prompt, mock_get_summary, mock_get_llm, mock_state_with_history):
mock_get_summary.return_value = "Data summary"
mock_llm_instance = MagicMock()
mock_get_llm.return_value = mock_llm_instance
mock_structured_llm = MagicMock()
mock_llm_instance.with_structured_output.return_value = mock_structured_llm
mock_structured_llm.invoke.return_value = ChecklistResponse(
goal="goal",
reflection="reflection",
checklist=[ChecklistTask(task="Step 1: test", worker="data_analyst")]
)
planner_node(mock_state_with_history)
# Verify history and summary were passed to prompt format
# We check the arguments passed to the mock_prompt.format_messages
call_args = mock_prompt.format_messages.call_args[1]
assert call_args["summary"] == "The user is asking about NJ 2024 results."
assert len(call_args["history"]) == 2
assert "breakdown by county" in call_args["question"]