- Refactored OIDC flow to implement PKCE, state/nonce validation, and BFF pattern. - Centralized configuration in Settings class (DEV_MODE, FRONTEND_URL, OIDC_REDIRECT_URI). - Updated auth routers to use conditional secure cookie flags based on DEV_MODE. - Modernized and cleaned up test suite by removing legacy Streamlit tests. - Fixed linting errors and unused imports across the backend.
76 lines
3.0 KiB
Python
76 lines
3.0 KiB
Python
import pytest
|
|
from unittest.mock import MagicMock, patch
|
|
from langchain_core.messages import HumanMessage, AIMessage
|
|
from ea_chatbot.graph.nodes.query_analyzer import query_analyzer_node, QueryAnalysis
|
|
|
|
@pytest.fixture
|
|
def mock_state_with_history():
|
|
return {
|
|
"messages": [
|
|
HumanMessage(content="Show me the 2024 results for Florida"),
|
|
AIMessage(content="Here are the results for Florida in 2024...")
|
|
],
|
|
"question": "What about in New Jersey?",
|
|
"analysis": None,
|
|
"next_action": "",
|
|
"summary": "The user is asking about 2024 election results."
|
|
}
|
|
|
|
@patch("ea_chatbot.graph.nodes.query_analyzer.get_llm_model")
|
|
@patch("ea_chatbot.graph.nodes.query_analyzer.QUERY_ANALYZER_PROMPT")
|
|
def test_query_analyzer_uses_history_and_summary(mock_prompt, mock_get_llm, mock_state_with_history):
|
|
"""Test that query_analyzer_node passes history and summary to the prompt."""
|
|
mock_llm_instance = MagicMock()
|
|
mock_get_llm.return_value = mock_llm_instance
|
|
mock_structured_llm = MagicMock()
|
|
mock_llm_instance.with_structured_output.return_value = mock_structured_llm
|
|
|
|
mock_structured_llm.invoke.return_value = QueryAnalysis(
|
|
data_required=["2024 results", "New Jersey"],
|
|
unknowns=[],
|
|
ambiguities=[],
|
|
conditions=[],
|
|
next_action="plan"
|
|
)
|
|
|
|
query_analyzer_node(mock_state_with_history)
|
|
|
|
# Verify that the prompt was formatted with the correct variables
|
|
mock_prompt.format_messages.assert_called_once()
|
|
kwargs = mock_prompt.format_messages.call_args[1]
|
|
|
|
assert kwargs["question"] == "What about in New Jersey?"
|
|
assert "summary" in kwargs
|
|
assert kwargs["summary"] == mock_state_with_history["summary"]
|
|
assert "history" in kwargs
|
|
# History should contain the messages from the state
|
|
assert len(kwargs["history"]) == 2
|
|
assert kwargs["history"][0].content == "Show me the 2024 results for Florida"
|
|
|
|
@patch("ea_chatbot.graph.nodes.query_analyzer.get_llm_model")
|
|
def test_query_analyzer_context_window(mock_get_llm):
|
|
"""Test that query_analyzer_node only uses the last 6 messages (3 turns)."""
|
|
messages = [HumanMessage(content=f"Msg {i}") for i in range(10)]
|
|
state = {
|
|
"messages": messages,
|
|
"question": "Latest question",
|
|
"analysis": None,
|
|
"next_action": "",
|
|
"summary": "Summary"
|
|
}
|
|
|
|
mock_llm_instance = MagicMock()
|
|
mock_get_llm.return_value = mock_llm_instance
|
|
mock_structured_llm = MagicMock()
|
|
mock_llm_instance.with_structured_output.return_value = mock_structured_llm
|
|
mock_structured_llm.invoke.return_value = QueryAnalysis(
|
|
data_required=[], unknowns=[], ambiguities=[], conditions=[], next_action="plan"
|
|
)
|
|
|
|
with patch("ea_chatbot.graph.nodes.query_analyzer.QUERY_ANALYZER_PROMPT") as mock_prompt:
|
|
query_analyzer_node(state)
|
|
kwargs = mock_prompt.format_messages.call_args[1]
|
|
# Should only have last 6 messages
|
|
assert len(kwargs["history"]) == 6
|
|
assert kwargs["history"][0].content == "Msg 4"
|