- Refactored OIDC flow to implement PKCE, state/nonce validation, and BFF pattern. - Centralized configuration in Settings class (DEV_MODE, FRONTEND_URL, OIDC_REDIRECT_URI). - Updated auth routers to use conditional secure cookie flags based on DEV_MODE. - Modernized and cleaned up test suite by removing legacy Streamlit tests. - Fixed linting errors and unused imports across the backend.
79 lines
3.0 KiB
Python
79 lines
3.0 KiB
Python
import pytest
|
|
from unittest.mock import MagicMock, patch
|
|
from ea_chatbot.graph.nodes.query_analyzer import query_analyzer_node, QueryAnalysis
|
|
|
|
@pytest.fixture
|
|
def mock_state():
|
|
return {
|
|
"messages": [],
|
|
"question": "Show me the 2024 results for Florida",
|
|
"analysis": None,
|
|
"next_action": ""
|
|
}
|
|
|
|
@patch("ea_chatbot.graph.nodes.query_analyzer.get_llm_model")
|
|
def test_query_analyzer_data_analysis(mock_get_llm, mock_state):
|
|
"""Test that a clear data analysis query is routed to the planner."""
|
|
# Mock the LLM and the structured output runnable
|
|
mock_llm_instance = MagicMock()
|
|
mock_get_llm.return_value = mock_llm_instance
|
|
mock_structured_llm = MagicMock()
|
|
mock_llm_instance.with_structured_output.return_value = mock_structured_llm
|
|
# Define the expected Pydantic result
|
|
expected_analysis = QueryAnalysis(
|
|
data_required=["2024 results", "Florida"],
|
|
unknowns=[],
|
|
ambiguities=[],
|
|
conditions=[],
|
|
next_action="plan"
|
|
)
|
|
# When structured_llm.invoke is called with messages, return the Pydantic object
|
|
mock_structured_llm.invoke.return_value = expected_analysis
|
|
|
|
new_state_update = query_analyzer_node(mock_state)
|
|
|
|
assert new_state_update["next_action"] == "plan"
|
|
assert "2024 results" in new_state_update["analysis"]["data_required"]
|
|
|
|
@patch("ea_chatbot.graph.nodes.query_analyzer.get_llm_model")
|
|
def test_query_analyzer_ambiguous(mock_get_llm, mock_state):
|
|
"""Test that an ambiguous query is routed to clarification."""
|
|
mock_state["question"] = "What happened?"
|
|
|
|
mock_llm_instance = MagicMock()
|
|
mock_get_llm.return_value = mock_llm_instance
|
|
mock_structured_llm = MagicMock()
|
|
mock_llm_instance.with_structured_output.return_value = mock_structured_llm
|
|
expected_analysis = QueryAnalysis(
|
|
data_required=[],
|
|
unknowns=["What event?"],
|
|
ambiguities=[],
|
|
conditions=[],
|
|
next_action="clarify"
|
|
)
|
|
|
|
mock_structured_llm.invoke.return_value = expected_analysis
|
|
|
|
new_state_update = query_analyzer_node(mock_state)
|
|
|
|
assert new_state_update["next_action"] == "clarify"
|
|
assert len(new_state_update["analysis"]["unknowns"]) > 0
|
|
|
|
@patch("ea_chatbot.graph.nodes.query_analyzer.get_llm_model")
|
|
def test_query_analyzer_uses_config(mock_get_llm, mock_state, monkeypatch):
|
|
"""Test that the node uses the configured LLM settings."""
|
|
monkeypatch.setenv("QUERY_ANALYZER_LLM__MODEL", "gpt-3.5-turbo")
|
|
|
|
mock_llm_instance = MagicMock()
|
|
mock_get_llm.return_value = mock_llm_instance
|
|
mock_structured_llm = MagicMock()
|
|
mock_llm_instance.with_structured_output.return_value = mock_structured_llm
|
|
mock_structured_llm.invoke.return_value = QueryAnalysis(
|
|
data_required=[], unknowns=[], ambiguities=[], conditions=[], next_action="plan"
|
|
)
|
|
|
|
query_analyzer_node(mock_state)
|
|
|
|
# Verify get_llm_model was called with the overridden config
|
|
called_config = mock_get_llm.call_args[0][0]
|
|
assert called_config.model == "gpt-3.5-turbo" |