- Refactored OIDC flow to implement PKCE, state/nonce validation, and BFF pattern. - Centralized configuration in Settings class (DEV_MODE, FRONTEND_URL, OIDC_REDIRECT_URI). - Updated auth routers to use conditional secure cookie flags based on DEV_MODE. - Modernized and cleaned up test suite by removing legacy Streamlit tests. - Fixed linting errors and unused imports across the backend.
83 lines
3.0 KiB
Python
83 lines
3.0 KiB
Python
import json
|
|
import pytest
|
|
import logging
|
|
from unittest.mock import patch
|
|
from ea_chatbot.graph.workflow import app
|
|
from ea_chatbot.graph.state import AgentState
|
|
from ea_chatbot.utils.logging import get_logger
|
|
from langchain_community.chat_models import FakeListChatModel
|
|
from langchain_core.runnables import RunnableLambda
|
|
|
|
@pytest.fixture(autouse=True)
|
|
def reset_logging():
|
|
"""Reset handlers on the root ea_chatbot logger."""
|
|
root = logging.getLogger("ea_chatbot")
|
|
for handler in root.handlers[:]:
|
|
root.removeHandler(handler)
|
|
yield
|
|
for handler in root.handlers[:]:
|
|
root.removeHandler(handler)
|
|
|
|
class FakeStructuredModel(FakeListChatModel):
|
|
def with_structured_output(self, schema, **kwargs):
|
|
# Return a runnable that returns a parsed object
|
|
def _invoke(input, config=None, **kwargs):
|
|
content = self.responses[0]
|
|
import json
|
|
data = json.loads(content)
|
|
if hasattr(schema, "model_validate"):
|
|
return schema.model_validate(data)
|
|
return data
|
|
|
|
return RunnableLambda(_invoke)
|
|
|
|
def test_logging_e2e_json_output(tmp_path):
|
|
"""Test that a full graph run produces structured JSON logs from multiple nodes."""
|
|
log_file = tmp_path / "e2e_test.jsonl"
|
|
|
|
# Configure the root logger
|
|
get_logger("ea_chatbot", log_file=str(log_file))
|
|
|
|
initial_state: AgentState = {
|
|
"messages": [],
|
|
"question": "Who won in 2024?",
|
|
"analysis": None,
|
|
"next_action": "",
|
|
"plan": None,
|
|
"code": None,
|
|
"code_output": None,
|
|
"error": None,
|
|
"plots": [],
|
|
"dfs": {}
|
|
}
|
|
|
|
# Create fake models that support callbacks and structured output
|
|
fake_analyzer_response = """{"data_required": [], "unknowns": [], "ambiguities": ["Which year?"], "conditions": [], "next_action": "clarify"}"""
|
|
fake_analyzer = FakeStructuredModel(responses=[fake_analyzer_response])
|
|
|
|
fake_clarify = FakeListChatModel(responses=["Please specify."])
|
|
|
|
with patch("ea_chatbot.graph.nodes.query_analyzer.get_llm_model") as mock_llm_factory:
|
|
mock_llm_factory.return_value = fake_analyzer
|
|
|
|
with patch("ea_chatbot.graph.nodes.clarification.get_llm_model") as mock_clarify_llm_factory:
|
|
mock_clarify_llm_factory.return_value = fake_clarify
|
|
|
|
# Run the graph
|
|
list(app.stream(initial_state))
|
|
|
|
# Verify file content
|
|
assert log_file.exists()
|
|
lines = log_file.read_text().splitlines()
|
|
assert len(lines) > 0
|
|
|
|
# Verify we have logs from different nodes
|
|
node_names = [json.loads(line)["name"] for line in lines]
|
|
assert "ea_chatbot.query_analyzer" in node_names
|
|
assert "ea_chatbot.clarification" in node_names
|
|
|
|
# Verify events
|
|
messages = [json.loads(line)["message"] for line in lines]
|
|
assert any("Analyzing question" in m for m in messages)
|
|
assert any("Clarification generated" in m for m in messages)
|