feat: implement mvp with email-first login flow and langgraph architecture

This commit is contained in:
Yunxiao Xu
2026-02-09 23:22:30 -08:00
parent af227d40e6
commit 5a943b902a
79 changed files with 8200 additions and 1 deletions

View File

@@ -0,0 +1,79 @@
import pytest
from unittest.mock import MagicMock, patch
from langchain_core.messages import HumanMessage, AIMessage
from ea_chatbot.graph.nodes.planner import planner_node
from ea_chatbot.graph.nodes.researcher import researcher_node
from ea_chatbot.graph.nodes.summarizer import summarizer_node
from ea_chatbot.schemas import TaskPlanResponse
@pytest.fixture
def mock_state_with_history():
return {
"messages": [
HumanMessage(content="Show me the 2024 results for Florida"),
AIMessage(content="Here are the results for Florida in 2024...")
],
"question": "What about in New Jersey?",
"analysis": {"data_required": ["2024 results", "New Jersey"], "unknowns": [], "ambiguities": [], "conditions": []},
"next_action": "plan",
"summary": "The user is asking about 2024 election results.",
"plan": "Plan steps...",
"code_output": "Code output..."
}
@patch("ea_chatbot.graph.nodes.planner.get_llm_model")
@patch("ea_chatbot.utils.database_inspection.get_data_summary")
@patch("ea_chatbot.graph.nodes.planner.PLANNER_PROMPT")
def test_planner_uses_history_and_summary(mock_prompt, mock_get_summary, mock_get_llm, mock_state_with_history):
mock_get_summary.return_value = "Data summary"
mock_llm_instance = MagicMock()
mock_get_llm.return_value = mock_llm_instance
mock_structured_llm = MagicMock()
mock_llm_instance.with_structured_output.return_value = mock_structured_llm
mock_structured_llm.invoke.return_value = TaskPlanResponse(
goal="goal",
reflection="reflection",
context={
"initial_context": "context",
"assumptions": [],
"constraints": []
},
steps=["Step 1: test"]
)
planner_node(mock_state_with_history)
mock_prompt.format_messages.assert_called_once()
kwargs = mock_prompt.format_messages.call_args[1]
assert kwargs["question"] == "What about in New Jersey?"
assert kwargs["summary"] == mock_state_with_history["summary"]
assert len(kwargs["history"]) == 2
@patch("ea_chatbot.graph.nodes.researcher.get_llm_model")
@patch("ea_chatbot.graph.nodes.researcher.RESEARCHER_PROMPT")
def test_researcher_uses_history_and_summary(mock_prompt, mock_get_llm, mock_state_with_history):
mock_llm_instance = MagicMock()
mock_get_llm.return_value = mock_llm_instance
researcher_node(mock_state_with_history)
mock_prompt.format_messages.assert_called_once()
kwargs = mock_prompt.format_messages.call_args[1]
assert kwargs["question"] == "What about in New Jersey?"
assert kwargs["summary"] == mock_state_with_history["summary"]
assert len(kwargs["history"]) == 2
@patch("ea_chatbot.graph.nodes.summarizer.get_llm_model")
@patch("ea_chatbot.graph.nodes.summarizer.SUMMARIZER_PROMPT")
def test_summarizer_uses_history_and_summary(mock_prompt, mock_get_llm, mock_state_with_history):
mock_llm_instance = MagicMock()
mock_get_llm.return_value = mock_llm_instance
summarizer_node(mock_state_with_history)
mock_prompt.format_messages.assert_called_once()
kwargs = mock_prompt.format_messages.call_args[1]
assert kwargs["question"] == "What about in New Jersey?"
assert kwargs["summary"] == mock_state_with_history["summary"]
assert len(kwargs["history"]) == 2