From 11c14fb8a8617bce3a3251e3c013bc86fffe7296 Mon Sep 17 00:00:00 2001 From: Yunxiao Xu Date: Mon, 23 Feb 2026 14:37:15 -0800 Subject: [PATCH] feat(config): Implement asymmetric model configuration for Orchestrator and Workers --- backend/src/ea_chatbot/config.py | 2 ++ backend/src/ea_chatbot/graph/nodes/reflector.py | 2 +- backend/src/ea_chatbot/graph/nodes/synthesizer.py | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/backend/src/ea_chatbot/config.py b/backend/src/ea_chatbot/config.py index 05c85eb..d231cd4 100644 --- a/backend/src/ea_chatbot/config.py +++ b/backend/src/ea_chatbot/config.py @@ -52,6 +52,8 @@ class Settings(BaseSettings): coder_llm: LLMConfig = Field(default_factory=lambda: LLMConfig(model="gpt-5-mini", temperature=0.0)) summarizer_llm: LLMConfig = Field(default_factory=lambda: LLMConfig(model="gpt-5-mini", temperature=0.0)) researcher_llm: LLMConfig = Field(default_factory=lambda: LLMConfig(model="gpt-5-mini", temperature=0.0)) + reflector_llm: LLMConfig = Field(default_factory=lambda: LLMConfig(model="gpt-5-mini", temperature=0.0)) + synthesizer_llm: LLMConfig = Field(default_factory=lambda: LLMConfig(model="gpt-5-mini", temperature=0.0)) # Allow nested env vars like QUERY_ANALYZER_LLM__MODEL model_config = SettingsConfigDict(env_nested_delimiter='__', env_prefix='') diff --git a/backend/src/ea_chatbot/graph/nodes/reflector.py b/backend/src/ea_chatbot/graph/nodes/reflector.py index eee8f4a..0ce77ff 100644 --- a/backend/src/ea_chatbot/graph/nodes/reflector.py +++ b/backend/src/ea_chatbot/graph/nodes/reflector.py @@ -22,7 +22,7 @@ def reflector_node(state: AgentState) -> dict: logger.info(f"Evaluating worker output for task: {task_desc[:50]}...") llm = get_llm_model( - settings.planner_llm, # Using planner model for evaluation + settings.reflector_llm, callbacks=[LangChainLoggingHandler(logger=logger)] ) structured_llm = llm.with_structured_output(ReflectorResponse) diff --git a/backend/src/ea_chatbot/graph/nodes/synthesizer.py b/backend/src/ea_chatbot/graph/nodes/synthesizer.py index e1b6f6b..728396f 100644 --- a/backend/src/ea_chatbot/graph/nodes/synthesizer.py +++ b/backend/src/ea_chatbot/graph/nodes/synthesizer.py @@ -19,7 +19,7 @@ def synthesizer_node(state: AgentState) -> dict: logger.info("Synthesizing final answer from worker results...") llm = get_llm_model( - settings.summarizer_llm, + settings.synthesizer_llm, callbacks=[LangChainLoggingHandler(logger=logger)] )