refactor: runner owns stream routing, suppress tool call XML from display

Split the streaming pipeline: API backends yield StreamEvents through
a channel, the runner reads them and routes to the appropriate UI pane.

- Add StreamEvent enum (Content, Reasoning, ToolCallDelta, etc.)
- API start_stream() spawns backend as a task, returns event receiver
- Runner loops over events, sends content to conversation pane but
  suppresses <tool_call> XML with a buffered tail for partial tags
- OpenAI backend refactored to stream_events() — no more UI coupling
- Anthropic backend gets a wrapper that synthesizes events from the
  existing stream() (TODO: native event streaming)
- chat_completion_stream() kept for subconscious agents, reimplemented
  on top of the event stream
- Usage derives Clone

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
This commit is contained in:
ProofOfConcept 2026-03-29 21:22:42 -04:00
parent 912626c5f0
commit 13453606ae
6 changed files with 338 additions and 114 deletions

View file

@ -19,6 +19,7 @@ use tiktoken_rs::CoreBPE;
use crate::agent::api::ApiClient;
use crate::agent::journal;
use crate::agent::log::ConversationLog;
use crate::agent::api::StreamEvent;
use crate::agent::tools;
use crate::agent::tools::ProcessTracker;
use crate::agent::types::*;
@ -251,21 +252,94 @@ impl Agent {
loop {
let _ = ui_tx.send(UiMessage::Activity("thinking...".into()));
let api_result = self
.client
.chat_completion_stream(
&self.messages,
Some(&self.tool_defs),
ui_tx,
target,
&self.reasoning_effort,
)
.await;
// Context overflow → compact and retry (max 2 attempts)
// Stream error → retry with backoff (max 2 attempts)
let (msg, usage) = match api_result {
Err(e) if crate::agent::context::is_context_overflow(&e) && overflow_retries < 2 => {
// Stream events from the API — we route each event to the
// appropriate UI pane rather than letting the API layer do it.
let mut rx = self.client.start_stream(
&self.messages,
Some(&self.tool_defs),
ui_tx,
&self.reasoning_effort,
None,
);
let mut content = String::new();
let mut tool_calls: Vec<ToolCall> = Vec::new();
let mut usage = None;
let mut finish_reason = None;
let mut in_tool_call = false;
let mut stream_error = None;
let mut first_content = true;
// Buffer for content not yet sent to UI — holds a tail
// that might be a partial <tool_call> tag.
let mut display_buf = String::new();
while let Some(event) = rx.recv().await {
match event {
StreamEvent::Content(text) => {
if first_content {
let _ = ui_tx.send(UiMessage::Activity("streaming...".into()));
first_content = false;
}
content.push_str(&text);
if in_tool_call {
// Already inside a tool call — suppress display.
} else {
display_buf.push_str(&text);
if let Some(pos) = display_buf.find("<tool_call>") {
// Flush content before the tag, suppress the rest.
let before = &display_buf[..pos];
if !before.is_empty() {
let _ = ui_tx.send(UiMessage::TextDelta(before.to_string(), target));
}
display_buf.clear();
in_tool_call = true;
} else {
// Flush display_buf except a tail that could be
// a partial "<tool_call>" (10 chars).
let safe = display_buf.len().saturating_sub(10);
if safe > 0 {
let flush = display_buf[..safe].to_string();
display_buf = display_buf[safe..].to_string();
let _ = ui_tx.send(UiMessage::TextDelta(flush, target));
}
}
}
}
StreamEvent::Reasoning(text) => {
let _ = ui_tx.send(UiMessage::Reasoning(text));
}
StreamEvent::ToolCallDelta { index, id, call_type, name, arguments } => {
while tool_calls.len() <= index {
tool_calls.push(ToolCall {
id: String::new(),
call_type: "function".to_string(),
function: FunctionCall { name: String::new(), arguments: String::new() },
});
}
if let Some(id) = id { tool_calls[index].id = id; }
if let Some(ct) = call_type { tool_calls[index].call_type = ct; }
if let Some(n) = name { tool_calls[index].function.name = n; }
if let Some(a) = arguments { tool_calls[index].function.arguments.push_str(&a); }
}
StreamEvent::Usage(u) => usage = Some(u),
StreamEvent::Finished { reason, .. } => {
finish_reason = Some(reason);
break;
}
StreamEvent::Error(e) => {
stream_error = Some(e);
break;
}
}
}
// Handle stream errors with retry logic
if let Some(e) = stream_error {
let err = anyhow::anyhow!("{}", e);
if crate::agent::context::is_context_overflow(&err) && overflow_retries < 2 {
overflow_retries += 1;
let _ = ui_tx.send(UiMessage::Info(format!(
"[context overflow — compacting and retrying ({}/2)]",
@ -274,7 +348,7 @@ impl Agent {
self.emergency_compact();
continue;
}
Err(e) if crate::agent::context::is_stream_error(&e) && empty_retries < 2 => {
if crate::agent::context::is_stream_error(&err) && empty_retries < 2 {
empty_retries += 1;
let _ = ui_tx.send(UiMessage::Info(format!(
"[stream error: {} — retrying ({}/2)]",
@ -283,8 +357,23 @@ impl Agent {
tokio::time::sleep(std::time::Duration::from_secs(2)).await;
continue;
}
other => other?,
};
return Err(err);
}
if finish_reason.as_deref() == Some("error") {
let detail = if content.is_empty() { "no details".into() } else { content };
return Err(anyhow::anyhow!("model stream error: {}", detail));
}
// Flush remaining display buffer (normal responses without tool calls).
if !in_tool_call && !display_buf.is_empty() {
let _ = ui_tx.send(UiMessage::TextDelta(display_buf, target));
}
if !content.is_empty() && !in_tool_call {
let _ = ui_tx.send(UiMessage::TextDelta("\n".to_string(), target));
}
let msg = crate::agent::api::build_response_message(content, tool_calls);
// Strip ephemeral tool calls (journal) that the API has
// now processed. They're persisted to disk; no need to keep