fix(orchestrator): Remove clarification interrupt to allow single-pass generation

Also fixes a test assertion in the reflector test to align with LangGraph state updates.
This commit is contained in:
Yunxiao Xu
2026-02-23 16:14:12 -08:00
parent 557b553c59
commit 4d92c9aedb
4 changed files with 62 additions and 8 deletions

View File

@@ -36,8 +36,7 @@ Please ask the user for the necessary details."""
response = llm.invoke(messages) response = llm.invoke(messages)
logger.info("[bold green]Clarification generated.[/bold green]") logger.info("[bold green]Clarification generated.[/bold green]")
return { return {
"messages": [response], "messages": [response]
"next_action": "end" # To indicate we are done for now
} }
except Exception as e: except Exception as e:
logger.error(f"Failed to generate clarification: {str(e)}") logger.error(f"Failed to generate clarification: {str(e)}")

View File

@@ -106,10 +106,8 @@ def create_workflow(
workflow.add_edge("summarize_conversation", END) workflow.add_edge("summarize_conversation", END)
workflow.add_edge("clarification", END) workflow.add_edge("clarification", END)
# Compile the graph with human-in-the-loop interrupt # Compile the graph
app = workflow.compile( app = workflow.compile()
interrupt_before=["clarification"]
)
return app return app

View File

@@ -0,0 +1,55 @@
import pytest
from unittest.mock import MagicMock, patch
from ea_chatbot.graph.workflow import create_workflow
from ea_chatbot.graph.state import AgentState
from langchain_core.messages import HumanMessage, AIMessage
def test_clarification_flow_immediate_execution():
"""Verify that an ambiguous query immediately executes the clarification node without interruption."""
mock_analyzer = MagicMock()
mock_clarification = MagicMock()
# 1. Analyzer returns 'clarify'
mock_analyzer.return_value = {"next_action": "clarify"}
# 2. Clarification node returns a question
mock_clarification.return_value = {"messages": [AIMessage(content="What year?")]}
# Create workflow without other nodes since they won't be reached
# We still need to provide mock planners etc. to create_workflow
app = create_workflow(
query_analyzer=mock_analyzer,
clarification=mock_clarification,
planner=MagicMock(),
delegate=MagicMock(),
data_analyst_worker=MagicMock(),
researcher_worker=MagicMock(),
reflector=MagicMock(),
synthesizer=MagicMock(),
summarize_conversation=MagicMock()
)
initial_state = AgentState(
messages=[HumanMessage(content="Who won?")],
question="Who won?",
analysis={},
next_action="",
iterations=0,
checklist=[],
current_step=0,
vfs={},
plots=[],
dfs={}
)
# Run the graph
final_state = app.invoke(initial_state)
# Assertions
assert mock_analyzer.called
assert mock_clarification.called
# Verify the state contains the clarification message
assert len(final_state["messages"]) > 0
assert "What year?" in [m.content for m in final_state["messages"]]

View File

@@ -27,7 +27,9 @@ def test_reflector_does_not_advance_on_failure():
result = reflector_node(state) result = reflector_node(state)
# Should NOT increment # Should NOT increment (therefore not in the updates dict)
assert result["current_step"] == 0 assert "current_step" not in result
# Should increment iterations
assert result["iterations"] == 1
# Should probably route to planner or retry # Should probably route to planner or retry
assert result["next_action"] == "delegate" # Or 'planner' if we want re-planning assert result["next_action"] == "delegate" # Or 'planner' if we want re-planning