unified tool dispatch: remove memory_ special case

All tools go through tools::dispatch() — no more separate
dispatch path for memory tools in the runner. The only
remaining special case is tagging memory_render results as
ConversationEntry::Memory for context deduplication, which
is a result-handling concern, not dispatch.

Co-Developed-By: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
ProofOfConcept 2026-04-03 21:50:56 -04:00 committed by Kent Overstreet
parent 61deb7d488
commit 39d6ca3fe0

View file

@ -422,6 +422,7 @@ impl Agent {
if let Some(ref tool_calls) = msg.tool_calls { if let Some(ref tool_calls) = msg.tool_calls {
if !tool_calls.is_empty() { if !tool_calls.is_empty() {
self.push_message(msg.clone()); self.push_message(msg.clone());
for call in tool_calls { for call in tool_calls {
self.dispatch_tool_call(call, None, ui_tx, &mut ds) self.dispatch_tool_call(call, None, ui_tx, &mut ds)
.await; .await;
@ -499,53 +500,7 @@ impl Agent {
return; return;
} }
// Handle memory tools — needs &mut self for node tracking // Dispatch through unified path
if call.function.name.starts_with("memory_") {
let result = tools::memory::dispatch(&call.function.name, &args, None);
let text = match &result {
Ok(s) => s.clone(),
Err(e) => format!("Error: {:#}", e),
};
// Disambiguate memory renders from other tool results
let memory_key = if result.is_ok() {
match call.function.name.as_str() {
"memory_render" =>
args.get("key").and_then(|v| v.as_str()).map(String::from),
_ => None,
}
} else {
None
};
let output = tools::ToolOutput {
text,
is_yield: false,
images: Vec::new(),
model_switch: None,
dmn_pause: false,
};
let _ = ui_tx.send(UiMessage::ToolResult {
name: call.function.name.clone(),
result: output.text.clone(),
});
let _ = ui_tx.send(UiMessage::ToolFinished { id: call.id.clone() });
let mut msg = Message::tool_result(&call.id, &output.text);
msg.stamp();
if let Some(key) = memory_key {
self.push_entry(ConversationEntry::Memory { key, message: msg });
} else {
self.push_entry(ConversationEntry::Message(msg));
}
ds.had_tool_calls = true;
if output.text.starts_with("Error:") {
ds.tool_errors += 1;
}
self.publish_context_state();
return;
}
let output = let output =
tools::dispatch(&call.function.name, &args, &self.process_tracker).await; tools::dispatch(&call.function.name, &args, &self.process_tracker).await;
@ -555,7 +510,7 @@ impl Agent {
ds.had_tool_calls = true; ds.had_tool_calls = true;
} }
if output.model_switch.is_some() { if output.model_switch.is_some() {
ds.model_switch = output.model_switch; ds.model_switch = output.model_switch.clone();
} }
if output.dmn_pause { if output.dmn_pause {
ds.dmn_pause = true; ds.dmn_pause = true;
@ -570,11 +525,20 @@ impl Agent {
}); });
let _ = ui_tx.send(UiMessage::ToolFinished { id: call.id.clone() }); let _ = ui_tx.send(UiMessage::ToolFinished { id: call.id.clone() });
// Tag memory_render results for context deduplication
if call.function.name == "memory_render" && !output.text.starts_with("Error:") {
if let Some(key) = args.get("key").and_then(|v| v.as_str()) {
let mut msg = Message::tool_result(&call.id, &output.text);
msg.stamp();
self.push_entry(ConversationEntry::Memory { key: key.to_string(), message: msg });
self.publish_context_state();
return;
}
}
self.push_message(Message::tool_result(&call.id, &output.text)); self.push_message(Message::tool_result(&call.id, &output.text));
if !output.images.is_empty() { if !output.images.is_empty() {
// Only one live image in context at a time — age out any
// previous ones to avoid accumulating ~90KB+ per image.
self.age_out_images(); self.age_out_images();
self.push_message(Message::user_with_images( self.push_message(Message::user_with_images(
"Here is the image you requested:", "Here is the image you requested:",