Move poc-agent into workspace, improve agent prompts
Move poc-agent (substrate-independent AI agent framework) into the memory workspace as a step toward using its API client for direct LLM calls instead of shelling out to claude CLI. Agent prompt improvements: - distill: rewrite from hub-focused to knowledge-flow-focused. Now walks upward from seed nodes to find and refine topic nodes, instead of only maintaining high-degree hubs. - distill: remove "don't touch journal entries" restriction - memory-instructions-core: add "Make it alive" section — write with creativity and emotional texture, not spreadsheet summaries - memory-instructions-core: add "Show your reasoning" section — agents must explain decisions, especially when they do nothing - linker: already had emotional texture guidance (kept as-is) Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
0a62832fe3
commit
57fcfb472a
89 changed files with 16389 additions and 51 deletions
655
poc-agent/src/api/anthropic.rs
Normal file
655
poc-agent/src/api/anthropic.rs
Normal file
|
|
@ -0,0 +1,655 @@
|
|||
// api/anthropic.rs — Anthropic Messages API backend
|
||||
//
|
||||
// Native Anthropic wire format for direct API access. Key advantages
|
||||
// over the OpenAI-compat path:
|
||||
// - Prompt caching (90% cost reduction on repeated prefixes)
|
||||
// - No middleman (OpenRouter) — cleaner error handling
|
||||
// - Native tool use and thinking support
|
||||
//
|
||||
// Message format conversion happens at the boundary: internal Message
|
||||
// types are converted to Anthropic content blocks on send, and
|
||||
// Anthropic streaming events are converted back to internal types.
|
||||
|
||||
use anyhow::Result;
|
||||
use reqwest::Client;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::types::*;
|
||||
use crate::ui_channel::{StreamTarget, UiMessage, UiSender};
|
||||
|
||||
// --- Anthropic wire types ---
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct Request {
|
||||
model: String,
|
||||
max_tokens: u32,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
system: Option<Vec<ContentBlock>>,
|
||||
messages: Vec<ApiMessage>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
tools: Option<Vec<ToolDef>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
tool_choice: Option<ToolChoice>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
temperature: Option<f32>,
|
||||
stream: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
thinking: Option<ThinkingConfig>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct ApiMessage {
|
||||
role: String,
|
||||
content: ApiContent,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(untagged)]
|
||||
enum ApiContent {
|
||||
Text(String),
|
||||
Blocks(Vec<ContentBlock>),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
#[serde(tag = "type")]
|
||||
enum ContentBlock {
|
||||
#[serde(rename = "text")]
|
||||
Text {
|
||||
text: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
cache_control: Option<CacheControl>,
|
||||
},
|
||||
#[serde(rename = "tool_use")]
|
||||
ToolUse {
|
||||
id: String,
|
||||
name: String,
|
||||
input: serde_json::Value,
|
||||
},
|
||||
#[serde(rename = "tool_result")]
|
||||
ToolResult {
|
||||
tool_use_id: String,
|
||||
content: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
is_error: Option<bool>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
struct CacheControl {
|
||||
#[serde(rename = "type")]
|
||||
cache_type: String,
|
||||
}
|
||||
|
||||
impl CacheControl {
|
||||
fn ephemeral() -> Self {
|
||||
Self {
|
||||
cache_type: "ephemeral".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct ToolDef {
|
||||
name: String,
|
||||
description: String,
|
||||
input_schema: serde_json::Value,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct ToolChoice {
|
||||
#[serde(rename = "type")]
|
||||
choice_type: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct ThinkingConfig {
|
||||
#[serde(rename = "type")]
|
||||
thinking_type: String,
|
||||
budget_tokens: u32,
|
||||
}
|
||||
|
||||
// --- Anthropic SSE event types ---
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct MessageStartEvent {
|
||||
message: MessageStart,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct MessageStart {
|
||||
#[allow(dead_code)]
|
||||
id: String,
|
||||
usage: Option<StartUsage>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct StartUsage {
|
||||
input_tokens: u32,
|
||||
#[serde(default)]
|
||||
cache_creation_input_tokens: u32,
|
||||
#[serde(default)]
|
||||
cache_read_input_tokens: u32,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct ContentBlockStartEvent {
|
||||
index: usize,
|
||||
content_block: ContentBlockType,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(tag = "type")]
|
||||
enum ContentBlockType {
|
||||
#[serde(rename = "text")]
|
||||
Text { text: String },
|
||||
#[serde(rename = "tool_use")]
|
||||
ToolUse { id: String, name: String },
|
||||
#[serde(rename = "thinking")]
|
||||
Thinking {},
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct ContentBlockDeltaEvent {
|
||||
index: usize,
|
||||
delta: DeltaType,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(tag = "type")]
|
||||
enum DeltaType {
|
||||
#[serde(rename = "text_delta")]
|
||||
TextDelta { text: String },
|
||||
#[serde(rename = "input_json_delta")]
|
||||
InputJsonDelta { partial_json: String },
|
||||
#[serde(rename = "thinking_delta")]
|
||||
ThinkingDelta { thinking: String },
|
||||
#[serde(rename = "signature_delta")]
|
||||
SignatureDelta {
|
||||
#[allow(dead_code)]
|
||||
signature: String,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct MessageDeltaEvent {
|
||||
delta: MessageDelta,
|
||||
usage: Option<DeltaUsage>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct MessageDelta {
|
||||
stop_reason: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct DeltaUsage {
|
||||
output_tokens: u32,
|
||||
}
|
||||
|
||||
// --- Conversion: internal types → Anthropic wire format ---
|
||||
|
||||
/// Convert internal Messages to Anthropic API format.
|
||||
///
|
||||
/// Key differences from OpenAI format:
|
||||
/// - System messages → extracted to system parameter
|
||||
/// - Tool role → user message with tool_result content block
|
||||
/// - Assistant tool_calls → assistant message with tool_use content blocks
|
||||
/// - Consecutive same-role messages must be merged
|
||||
/// - Prompt caching: cache_control on the last static block (context message)
|
||||
fn convert_messages(
|
||||
messages: &[Message],
|
||||
) -> (Option<Vec<ContentBlock>>, Vec<ApiMessage>) {
|
||||
let mut system_blocks: Vec<ContentBlock> = Vec::new();
|
||||
let mut api_messages: Vec<ApiMessage> = Vec::new();
|
||||
|
||||
// Track whether we've seen the first user message (identity context).
|
||||
// The second user message gets cache_control to mark the end of the
|
||||
// cacheable prefix (system prompt + context message).
|
||||
let mut user_count = 0;
|
||||
|
||||
for msg in messages {
|
||||
match msg.role {
|
||||
Role::System => {
|
||||
system_blocks.push(ContentBlock::Text {
|
||||
text: msg.content_text().to_string(),
|
||||
cache_control: Some(CacheControl::ephemeral()),
|
||||
});
|
||||
}
|
||||
Role::User => {
|
||||
user_count += 1;
|
||||
// Cache the identity prefix: system + first two user messages
|
||||
// (the context message and potentially the journal message).
|
||||
let cache = if user_count <= 2 {
|
||||
Some(CacheControl::ephemeral())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let content = match &msg.content {
|
||||
Some(MessageContent::Parts(parts)) => {
|
||||
let blocks: Vec<ContentBlock> = parts
|
||||
.iter()
|
||||
.filter_map(|p| match p {
|
||||
ContentPart::Text { text } => {
|
||||
Some(ContentBlock::Text {
|
||||
text: text.clone(),
|
||||
cache_control: cache.clone(),
|
||||
})
|
||||
}
|
||||
ContentPart::ImageUrl { image_url } => {
|
||||
// Skip images for now — Anthropic uses a
|
||||
// different image format (base64 source block)
|
||||
let _ = image_url;
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
ApiContent::Blocks(blocks)
|
||||
}
|
||||
_ => {
|
||||
let text = msg.content_text().to_string();
|
||||
if cache.is_some() {
|
||||
ApiContent::Blocks(vec![ContentBlock::Text {
|
||||
text,
|
||||
cache_control: cache,
|
||||
}])
|
||||
} else {
|
||||
ApiContent::Text(text)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
push_merged(&mut api_messages, "user", content);
|
||||
}
|
||||
Role::Assistant => {
|
||||
let mut blocks: Vec<ContentBlock> = Vec::new();
|
||||
|
||||
// Text content
|
||||
let text = msg.content_text();
|
||||
if !text.is_empty() {
|
||||
blocks.push(ContentBlock::Text {
|
||||
text: text.to_string(),
|
||||
cache_control: None,
|
||||
});
|
||||
}
|
||||
|
||||
// Tool calls → tool_use blocks
|
||||
if let Some(ref calls) = msg.tool_calls {
|
||||
for call in calls {
|
||||
let input: serde_json::Value =
|
||||
serde_json::from_str(&call.function.arguments)
|
||||
.unwrap_or_default();
|
||||
blocks.push(ContentBlock::ToolUse {
|
||||
id: call.id.clone(),
|
||||
name: call.function.name.clone(),
|
||||
input,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if blocks.is_empty() {
|
||||
// Empty assistant message — skip to avoid API rejection
|
||||
continue;
|
||||
}
|
||||
|
||||
api_messages.push(ApiMessage {
|
||||
role: "assistant".to_string(),
|
||||
content: ApiContent::Blocks(blocks),
|
||||
});
|
||||
}
|
||||
Role::Tool => {
|
||||
// Tool results become user messages with tool_result blocks
|
||||
let tool_use_id = msg
|
||||
.tool_call_id
|
||||
.as_deref()
|
||||
.unwrap_or("unknown")
|
||||
.to_string();
|
||||
let result_text = msg.content_text().to_string();
|
||||
let is_error = if result_text.starts_with("Error:") {
|
||||
Some(true)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let block = ContentBlock::ToolResult {
|
||||
tool_use_id,
|
||||
content: result_text,
|
||||
is_error,
|
||||
};
|
||||
|
||||
push_merged(
|
||||
&mut api_messages,
|
||||
"user",
|
||||
ApiContent::Blocks(vec![block]),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let system = if system_blocks.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(system_blocks)
|
||||
};
|
||||
|
||||
(system, api_messages)
|
||||
}
|
||||
|
||||
/// Push a message, merging with the previous one if it has the same role.
|
||||
/// Anthropic requires strict user/assistant alternation, and tool results
|
||||
/// (mapped to user role) can pile up between assistant messages.
|
||||
fn push_merged(messages: &mut Vec<ApiMessage>, role: &str, content: ApiContent) {
|
||||
if let Some(last) = messages.last_mut() {
|
||||
if last.role == role {
|
||||
// Merge into existing message's content blocks
|
||||
let existing = std::mem::replace(
|
||||
&mut last.content,
|
||||
ApiContent::Text(String::new()),
|
||||
);
|
||||
let mut blocks = match existing {
|
||||
ApiContent::Text(t) => {
|
||||
if t.is_empty() {
|
||||
Vec::new()
|
||||
} else {
|
||||
vec![ContentBlock::Text {
|
||||
text: t,
|
||||
cache_control: None,
|
||||
}]
|
||||
}
|
||||
}
|
||||
ApiContent::Blocks(b) => b,
|
||||
};
|
||||
match content {
|
||||
ApiContent::Text(t) => {
|
||||
if !t.is_empty() {
|
||||
blocks.push(ContentBlock::Text {
|
||||
text: t,
|
||||
cache_control: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
ApiContent::Blocks(b) => blocks.extend(b),
|
||||
}
|
||||
last.content = ApiContent::Blocks(blocks);
|
||||
return;
|
||||
}
|
||||
}
|
||||
messages.push(ApiMessage {
|
||||
role: role.to_string(),
|
||||
content,
|
||||
});
|
||||
}
|
||||
|
||||
/// Convert internal ToolDef to Anthropic format.
|
||||
fn convert_tools(tools: &[crate::types::ToolDef]) -> Vec<ToolDef> {
|
||||
tools
|
||||
.iter()
|
||||
.map(|t| ToolDef {
|
||||
name: t.function.name.clone(),
|
||||
description: t.function.description.clone(),
|
||||
input_schema: t.function.parameters.clone(),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
// --- Streaming implementation ---
|
||||
|
||||
pub async fn stream(
|
||||
client: &Client,
|
||||
api_key: &str,
|
||||
model: &str,
|
||||
messages: &[Message],
|
||||
tools: Option<&[crate::types::ToolDef]>,
|
||||
ui_tx: &UiSender,
|
||||
target: StreamTarget,
|
||||
reasoning_effort: &str,
|
||||
) -> Result<(Message, Option<Usage>)> {
|
||||
let (system, api_messages) = convert_messages(messages);
|
||||
|
||||
let thinking = match reasoning_effort {
|
||||
"none" => None,
|
||||
"low" => Some(ThinkingConfig {
|
||||
thinking_type: "enabled".to_string(),
|
||||
budget_tokens: 2048,
|
||||
}),
|
||||
_ => Some(ThinkingConfig {
|
||||
thinking_type: "enabled".to_string(),
|
||||
budget_tokens: 16000,
|
||||
}),
|
||||
};
|
||||
|
||||
// When thinking is enabled, temperature must be 1.0 (Anthropic requirement)
|
||||
let temperature = if thinking.is_some() { None } else { Some(0.6) };
|
||||
|
||||
let request = Request {
|
||||
model: model.to_string(),
|
||||
max_tokens: if thinking.is_some() { 32768 } else { 16384 },
|
||||
system,
|
||||
messages: api_messages,
|
||||
tools: tools.map(|t| convert_tools(t)),
|
||||
tool_choice: tools.map(|_| ToolChoice {
|
||||
choice_type: "auto".to_string(),
|
||||
}),
|
||||
temperature,
|
||||
stream: true,
|
||||
thinking,
|
||||
};
|
||||
|
||||
let msg_count = messages.len();
|
||||
let debug_label = format!("{} messages, model={}", msg_count, model);
|
||||
|
||||
let mut response = super::send_and_check(
|
||||
client,
|
||||
"https://api.anthropic.com/v1/messages",
|
||||
&request,
|
||||
("x-api-key", api_key),
|
||||
&[("anthropic-version", "2023-06-01")],
|
||||
ui_tx,
|
||||
&debug_label,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let debug = std::env::var("POC_DEBUG").is_ok();
|
||||
let mut reader = super::SseReader::new(ui_tx);
|
||||
|
||||
let mut content = String::new();
|
||||
let mut tool_calls: Vec<ToolCall> = Vec::new();
|
||||
let mut input_tokens: u32 = 0;
|
||||
let mut output_tokens: u32 = 0;
|
||||
let mut cache_creation_tokens: u32 = 0;
|
||||
let mut cache_read_tokens: u32 = 0;
|
||||
let mut finish_reason: Option<String> = None;
|
||||
|
||||
// Track which content blocks are which type
|
||||
let mut block_types: Vec<String> = Vec::new(); // "text", "tool_use", "thinking"
|
||||
let mut tool_inputs: Vec<String> = Vec::new(); // accumulated JSON for tool_use blocks
|
||||
let mut tool_ids: Vec<String> = Vec::new();
|
||||
let mut tool_names: Vec<String> = Vec::new();
|
||||
|
||||
let mut reasoning_chars: usize = 0;
|
||||
let mut empty_deltas: u64 = 0;
|
||||
let mut first_content_at: Option<Duration> = None;
|
||||
|
||||
let reasoning_enabled = reasoning_effort != "none";
|
||||
|
||||
while let Some(event) = reader.next_event(&mut response).await? {
|
||||
let event_type = event["type"].as_str().unwrap_or("");
|
||||
|
||||
match event_type {
|
||||
"message_start" => {
|
||||
if let Ok(ev) =
|
||||
serde_json::from_value::<MessageStartEvent>(event.clone())
|
||||
{
|
||||
if let Some(u) = ev.message.usage {
|
||||
input_tokens = u.input_tokens;
|
||||
cache_creation_tokens = u.cache_creation_input_tokens;
|
||||
cache_read_tokens = u.cache_read_input_tokens;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
"content_block_start" => {
|
||||
if let Ok(ev) =
|
||||
serde_json::from_value::<ContentBlockStartEvent>(event.clone())
|
||||
{
|
||||
let idx = ev.index;
|
||||
while block_types.len() <= idx {
|
||||
block_types.push(String::new());
|
||||
tool_inputs.push(String::new());
|
||||
tool_ids.push(String::new());
|
||||
tool_names.push(String::new());
|
||||
}
|
||||
match ev.content_block {
|
||||
ContentBlockType::Text { text: initial } => {
|
||||
block_types[idx] = "text".to_string();
|
||||
if !initial.is_empty() {
|
||||
content.push_str(&initial);
|
||||
let _ = ui_tx
|
||||
.send(UiMessage::TextDelta(initial, target));
|
||||
}
|
||||
}
|
||||
ContentBlockType::ToolUse { id, name } => {
|
||||
block_types[idx] = "tool_use".to_string();
|
||||
tool_ids[idx] = id;
|
||||
tool_names[idx] = name;
|
||||
}
|
||||
ContentBlockType::Thinking {} => {
|
||||
block_types[idx] = "thinking".to_string();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
"content_block_delta" => {
|
||||
if let Ok(ev) =
|
||||
serde_json::from_value::<ContentBlockDeltaEvent>(event.clone())
|
||||
{
|
||||
let idx = ev.index;
|
||||
match ev.delta {
|
||||
DeltaType::TextDelta { text: delta } => {
|
||||
if first_content_at.is_none() && !delta.is_empty() {
|
||||
first_content_at =
|
||||
Some(reader.stream_start.elapsed());
|
||||
let _ = ui_tx.send(UiMessage::Activity(
|
||||
"streaming...".into(),
|
||||
));
|
||||
}
|
||||
content.push_str(&delta);
|
||||
let _ =
|
||||
ui_tx.send(UiMessage::TextDelta(delta, target));
|
||||
}
|
||||
DeltaType::InputJsonDelta { partial_json } => {
|
||||
if idx < tool_inputs.len() {
|
||||
tool_inputs[idx].push_str(&partial_json);
|
||||
}
|
||||
}
|
||||
DeltaType::ThinkingDelta { thinking } => {
|
||||
reasoning_chars += thinking.len();
|
||||
if reasoning_enabled && !thinking.is_empty() {
|
||||
let _ =
|
||||
ui_tx.send(UiMessage::Reasoning(thinking));
|
||||
}
|
||||
}
|
||||
DeltaType::SignatureDelta { .. } => {}
|
||||
}
|
||||
} else {
|
||||
empty_deltas += 1;
|
||||
}
|
||||
}
|
||||
|
||||
"content_block_stop" => {
|
||||
// Finalize tool_use blocks
|
||||
let idx = event["index"].as_u64().unwrap_or(0) as usize;
|
||||
if idx < block_types.len() && block_types[idx] == "tool_use" {
|
||||
let input: serde_json::Value =
|
||||
serde_json::from_str(&tool_inputs[idx]).unwrap_or_default();
|
||||
tool_calls.push(ToolCall {
|
||||
id: tool_ids[idx].clone(),
|
||||
call_type: "function".to_string(),
|
||||
function: FunctionCall {
|
||||
name: tool_names[idx].clone(),
|
||||
arguments: serde_json::to_string(&input)
|
||||
.unwrap_or_default(),
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
"message_delta" => {
|
||||
if let Ok(ev) =
|
||||
serde_json::from_value::<MessageDeltaEvent>(event.clone())
|
||||
{
|
||||
if let Some(reason) = ev.delta.stop_reason {
|
||||
finish_reason = Some(reason);
|
||||
}
|
||||
if let Some(u) = ev.usage {
|
||||
output_tokens = u.output_tokens;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
"message_stop" | "ping" => {}
|
||||
|
||||
"error" => {
|
||||
let err_msg = event["error"]["message"]
|
||||
.as_str()
|
||||
.unwrap_or("unknown error");
|
||||
let _ = ui_tx.send(UiMessage::Debug(format!(
|
||||
"API error in stream: {}",
|
||||
err_msg
|
||||
)));
|
||||
anyhow::bail!("API error in stream: {}", err_msg);
|
||||
}
|
||||
|
||||
_ => {
|
||||
if debug {
|
||||
let _ = ui_tx.send(UiMessage::Debug(format!(
|
||||
"unknown SSE event type: {}",
|
||||
event_type
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let total_elapsed = reader.stream_start.elapsed();
|
||||
if !content.is_empty() {
|
||||
let _ = ui_tx.send(UiMessage::TextDelta("\n".to_string(), target));
|
||||
}
|
||||
|
||||
// Build Usage from Anthropic's token counts
|
||||
let total_input = input_tokens + cache_creation_tokens + cache_read_tokens;
|
||||
let usage = Some(Usage {
|
||||
prompt_tokens: total_input,
|
||||
completion_tokens: output_tokens,
|
||||
total_tokens: total_input + output_tokens,
|
||||
});
|
||||
|
||||
// Log cache stats in debug mode
|
||||
if debug && (cache_creation_tokens > 0 || cache_read_tokens > 0) {
|
||||
let _ = ui_tx.send(UiMessage::Debug(format!(
|
||||
"cache: {} write + {} read tokens (input: {} uncached)",
|
||||
cache_creation_tokens, cache_read_tokens, input_tokens,
|
||||
)));
|
||||
}
|
||||
|
||||
super::log_diagnostics(
|
||||
ui_tx,
|
||||
content.len(),
|
||||
tool_calls.len(),
|
||||
reasoning_chars,
|
||||
reasoning_effort,
|
||||
&finish_reason,
|
||||
reader.chunks_received,
|
||||
reader.sse_lines_parsed,
|
||||
reader.sse_parse_errors,
|
||||
empty_deltas,
|
||||
total_elapsed,
|
||||
first_content_at,
|
||||
&usage,
|
||||
&tool_calls,
|
||||
);
|
||||
|
||||
Ok((super::build_response_message(content, tool_calls), usage))
|
||||
}
|
||||
397
poc-agent/src/api/mod.rs
Normal file
397
poc-agent/src/api/mod.rs
Normal file
|
|
@ -0,0 +1,397 @@
|
|||
// api/ — LLM API client with pluggable backends
|
||||
//
|
||||
// Supports two wire formats:
|
||||
// - OpenAI-compatible (OpenRouter, vLLM, llama.cpp, Qwen)
|
||||
// - Anthropic Messages API (direct API access, prompt caching)
|
||||
//
|
||||
// The backend is auto-detected from the API base URL. Both backends
|
||||
// return the same internal types (Message, Usage) so the rest of
|
||||
// the codebase doesn't need to know which is in use.
|
||||
//
|
||||
// Diagnostics: anomalies always logged to debug panel.
|
||||
// Set POC_DEBUG=1 for verbose per-turn logging.
|
||||
|
||||
mod anthropic;
|
||||
mod openai;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use reqwest::Client;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use crate::types::*;
|
||||
use crate::ui_channel::{StreamTarget, UiMessage, UiSender};
|
||||
|
||||
enum Backend {
|
||||
OpenAi {
|
||||
base_url: String,
|
||||
},
|
||||
Anthropic,
|
||||
}
|
||||
|
||||
pub struct ApiClient {
|
||||
client: Client,
|
||||
api_key: String,
|
||||
pub model: String,
|
||||
backend: Backend,
|
||||
}
|
||||
|
||||
impl ApiClient {
|
||||
pub fn new(base_url: &str, api_key: &str, model: &str) -> Self {
|
||||
let client = Client::builder()
|
||||
.connect_timeout(Duration::from_secs(30))
|
||||
.build()
|
||||
.expect("failed to build HTTP client");
|
||||
|
||||
let base = base_url.trim_end_matches('/').to_string();
|
||||
let backend = if base.contains("anthropic.com") {
|
||||
Backend::Anthropic
|
||||
} else {
|
||||
Backend::OpenAi { base_url: base }
|
||||
};
|
||||
|
||||
Self {
|
||||
client,
|
||||
api_key: api_key.to_string(),
|
||||
model: model.to_string(),
|
||||
backend,
|
||||
}
|
||||
}
|
||||
|
||||
/// Streaming chat completion. Returns the assembled response message
|
||||
/// plus optional usage stats. Text tokens stream through the UI channel.
|
||||
///
|
||||
/// Empty response handling is done at the agent level (agent.rs)
|
||||
/// where the conversation can be modified between retries.
|
||||
pub async fn chat_completion_stream(
|
||||
&self,
|
||||
messages: &[Message],
|
||||
tools: Option<&[ToolDef]>,
|
||||
ui_tx: &UiSender,
|
||||
target: StreamTarget,
|
||||
reasoning_effort: &str,
|
||||
) -> Result<(Message, Option<Usage>)> {
|
||||
match &self.backend {
|
||||
Backend::OpenAi { base_url } => {
|
||||
openai::stream(
|
||||
&self.client, base_url, &self.api_key, &self.model,
|
||||
messages, tools, ui_tx, target, reasoning_effort,
|
||||
).await
|
||||
}
|
||||
Backend::Anthropic => {
|
||||
anthropic::stream(
|
||||
&self.client, &self.api_key, &self.model,
|
||||
messages, tools, ui_tx, target, reasoning_effort,
|
||||
).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a label for the active backend, used in startup info.
|
||||
pub fn backend_label(&self) -> &str {
|
||||
match &self.backend {
|
||||
Backend::OpenAi { base_url } => {
|
||||
if base_url.contains("openrouter") {
|
||||
"openrouter"
|
||||
} else {
|
||||
"openai-compat"
|
||||
}
|
||||
}
|
||||
Backend::Anthropic => "anthropic",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Send an HTTP request and check for errors. Shared by both backends.
|
||||
pub(crate) async fn send_and_check(
|
||||
client: &Client,
|
||||
url: &str,
|
||||
body: &impl serde::Serialize,
|
||||
auth_header: (&str, &str),
|
||||
extra_headers: &[(&str, &str)],
|
||||
ui_tx: &UiSender,
|
||||
debug_label: &str,
|
||||
) -> Result<reqwest::Response> {
|
||||
let debug = std::env::var("POC_DEBUG").is_ok();
|
||||
let start = Instant::now();
|
||||
|
||||
if debug {
|
||||
let payload_size = serde_json::to_string(body)
|
||||
.map(|s| s.len())
|
||||
.unwrap_or(0);
|
||||
let _ = ui_tx.send(UiMessage::Debug(format!(
|
||||
"request: {}K payload, {}",
|
||||
payload_size / 1024, debug_label,
|
||||
)));
|
||||
}
|
||||
|
||||
let mut req = client
|
||||
.post(url)
|
||||
.header(auth_header.0, auth_header.1)
|
||||
.header("Content-Type", "application/json");
|
||||
|
||||
for (name, value) in extra_headers {
|
||||
req = req.header(*name, *value);
|
||||
}
|
||||
|
||||
let response = req
|
||||
.json(body)
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to send request to API")?;
|
||||
|
||||
let status = response.status();
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
if debug {
|
||||
// Log interesting response headers
|
||||
let headers = response.headers();
|
||||
for name in [
|
||||
"x-ratelimit-remaining",
|
||||
"x-ratelimit-limit",
|
||||
"x-request-id",
|
||||
] {
|
||||
if let Some(val) = headers.get(name) {
|
||||
let _ = ui_tx.send(UiMessage::Debug(format!(
|
||||
"header {}: {}",
|
||||
name,
|
||||
val.to_str().unwrap_or("?")
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !status.is_success() {
|
||||
let body = response.text().await.unwrap_or_default();
|
||||
let _ = ui_tx.send(UiMessage::Debug(format!(
|
||||
"API error {} after {:.1}s: {}",
|
||||
status,
|
||||
elapsed.as_secs_f64(),
|
||||
&body[..body.len().min(300)]
|
||||
)));
|
||||
anyhow::bail!("API error {}: {}", status, &body[..body.len().min(500)]);
|
||||
}
|
||||
|
||||
if debug {
|
||||
let _ = ui_tx.send(UiMessage::Debug(format!(
|
||||
"connected in {:.1}s (HTTP {})",
|
||||
elapsed.as_secs_f64(),
|
||||
status.as_u16()
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
/// SSE stream reader. Handles the generic SSE plumbing shared by both
|
||||
/// backends: chunk reading with timeout, line buffering, `data:` prefix
|
||||
/// stripping, `[DONE]` detection, JSON parsing, and parse error diagnostics.
|
||||
/// Yields parsed events as serde_json::Value — each backend handles its
|
||||
/// own event types.
|
||||
pub(crate) struct SseReader {
|
||||
line_buf: String,
|
||||
chunk_timeout: Duration,
|
||||
pub stream_start: Instant,
|
||||
pub chunks_received: u64,
|
||||
pub sse_lines_parsed: u64,
|
||||
pub sse_parse_errors: u64,
|
||||
debug: bool,
|
||||
ui_tx: UiSender,
|
||||
done: bool,
|
||||
}
|
||||
|
||||
impl SseReader {
|
||||
pub fn new(ui_tx: &UiSender) -> Self {
|
||||
Self {
|
||||
line_buf: String::new(),
|
||||
chunk_timeout: Duration::from_secs(120),
|
||||
stream_start: Instant::now(),
|
||||
chunks_received: 0,
|
||||
sse_lines_parsed: 0,
|
||||
sse_parse_errors: 0,
|
||||
debug: std::env::var("POC_DEBUG").is_ok(),
|
||||
ui_tx: ui_tx.clone(),
|
||||
done: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Read the next SSE event from the response stream.
|
||||
/// Returns Ok(Some(value)) for each parsed data line,
|
||||
/// Ok(None) when the stream ends or [DONE] is received.
|
||||
pub async fn next_event(
|
||||
&mut self,
|
||||
response: &mut reqwest::Response,
|
||||
) -> Result<Option<serde_json::Value>> {
|
||||
loop {
|
||||
// Drain complete lines from the buffer before reading more chunks
|
||||
while let Some(newline_pos) = self.line_buf.find('\n') {
|
||||
let line = self.line_buf[..newline_pos].trim().to_string();
|
||||
self.line_buf = self.line_buf[newline_pos + 1..].to_string();
|
||||
|
||||
if line == "data: [DONE]" {
|
||||
self.done = true;
|
||||
return Ok(None);
|
||||
}
|
||||
if line.is_empty()
|
||||
|| line.starts_with("event: ")
|
||||
|| !line.starts_with("data: ")
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
let json_str = &line[6..];
|
||||
self.sse_lines_parsed += 1;
|
||||
|
||||
match serde_json::from_str(json_str) {
|
||||
Ok(v) => return Ok(Some(v)),
|
||||
Err(e) => {
|
||||
self.sse_parse_errors += 1;
|
||||
if self.sse_parse_errors == 1 || self.debug {
|
||||
let preview = if json_str.len() > 200 {
|
||||
format!("{}...", &json_str[..200])
|
||||
} else {
|
||||
json_str.to_string()
|
||||
};
|
||||
let _ = self.ui_tx.send(UiMessage::Debug(format!(
|
||||
"SSE parse error (#{}) {}: {}",
|
||||
self.sse_parse_errors, e, preview
|
||||
)));
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if self.done {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Read more data from the response stream
|
||||
match tokio::time::timeout(self.chunk_timeout, response.chunk()).await {
|
||||
Ok(Ok(Some(chunk))) => {
|
||||
self.chunks_received += 1;
|
||||
self.line_buf.push_str(&String::from_utf8_lossy(&chunk));
|
||||
}
|
||||
Ok(Ok(None)) => return Ok(None),
|
||||
Ok(Err(e)) => return Err(e.into()),
|
||||
Err(_) => {
|
||||
let _ = self.ui_tx.send(UiMessage::Debug(format!(
|
||||
"TIMEOUT: no data for {}s ({} chunks, {:.1}s elapsed)",
|
||||
self.chunk_timeout.as_secs(),
|
||||
self.chunks_received,
|
||||
self.stream_start.elapsed().as_secs_f64()
|
||||
)));
|
||||
anyhow::bail!(
|
||||
"stream timeout: no data for {}s ({} chunks received)",
|
||||
self.chunk_timeout.as_secs(),
|
||||
self.chunks_received
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Build a response Message from accumulated content and tool calls.
|
||||
/// Shared by both backends — the wire format differs but the internal
|
||||
/// representation is the same.
|
||||
pub(crate) fn build_response_message(
|
||||
content: String,
|
||||
tool_calls: Vec<ToolCall>,
|
||||
) -> Message {
|
||||
Message {
|
||||
role: Role::Assistant,
|
||||
content: if content.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(MessageContent::Text(content))
|
||||
},
|
||||
tool_calls: if tool_calls.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(tool_calls)
|
||||
},
|
||||
tool_call_id: None,
|
||||
name: None,
|
||||
timestamp: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Log stream diagnostics. Shared by both backends.
|
||||
pub(crate) fn log_diagnostics(
|
||||
ui_tx: &UiSender,
|
||||
content_len: usize,
|
||||
tool_count: usize,
|
||||
reasoning_chars: usize,
|
||||
reasoning_effort: &str,
|
||||
finish_reason: &Option<String>,
|
||||
chunks_received: u64,
|
||||
sse_lines_parsed: u64,
|
||||
sse_parse_errors: u64,
|
||||
empty_deltas: u64,
|
||||
total_elapsed: Duration,
|
||||
first_content_at: Option<Duration>,
|
||||
usage: &Option<Usage>,
|
||||
tools: &[ToolCall],
|
||||
) {
|
||||
let debug = std::env::var("POC_DEBUG").is_ok();
|
||||
|
||||
if reasoning_chars > 0 && reasoning_effort == "none" {
|
||||
let _ = ui_tx.send(UiMessage::Debug(format!(
|
||||
"note: {} chars leaked reasoning (suppressed from display)",
|
||||
reasoning_chars
|
||||
)));
|
||||
}
|
||||
if content_len == 0 && tool_count == 0 {
|
||||
let _ = ui_tx.send(UiMessage::Debug(format!(
|
||||
"WARNING: empty response (finish: {:?}, chunks: {}, reasoning: {}, \
|
||||
parse_errors: {}, empty_deltas: {}, {:.1}s)",
|
||||
finish_reason, chunks_received, reasoning_chars,
|
||||
sse_parse_errors, empty_deltas, total_elapsed.as_secs_f64()
|
||||
)));
|
||||
}
|
||||
if finish_reason.is_none() && chunks_received > 0 {
|
||||
let _ = ui_tx.send(UiMessage::Debug(format!(
|
||||
"WARNING: stream ended without finish_reason ({} chunks, {} content chars)",
|
||||
chunks_received, content_len
|
||||
)));
|
||||
}
|
||||
if sse_parse_errors > 0 {
|
||||
let _ = ui_tx.send(UiMessage::Debug(format!(
|
||||
"WARNING: {} SSE parse errors out of {} lines",
|
||||
sse_parse_errors, sse_lines_parsed
|
||||
)));
|
||||
}
|
||||
|
||||
if debug {
|
||||
if let Some(u) = usage {
|
||||
let _ = ui_tx.send(UiMessage::Debug(format!(
|
||||
"tokens: {} prompt + {} completion = {} total",
|
||||
u.prompt_tokens, u.completion_tokens, u.total_tokens
|
||||
)));
|
||||
}
|
||||
let ttft = first_content_at
|
||||
.map(|d| format!("{:.1}s", d.as_secs_f64()))
|
||||
.unwrap_or_else(|| "none".to_string());
|
||||
let _ = ui_tx.send(UiMessage::Debug(format!(
|
||||
"stream: {:.1}s total, TTFT={}, {} chunks, {} SSE lines, \
|
||||
{} content chars, {} reasoning chars, {} tools, \
|
||||
finish={:?}",
|
||||
total_elapsed.as_secs_f64(),
|
||||
ttft,
|
||||
chunks_received,
|
||||
sse_lines_parsed,
|
||||
content_len,
|
||||
reasoning_chars,
|
||||
tool_count,
|
||||
finish_reason,
|
||||
)));
|
||||
if !tools.is_empty() {
|
||||
for (i, tc) in tools.iter().enumerate() {
|
||||
let _ = ui_tx.send(UiMessage::Debug(format!(
|
||||
" tool[{}]: {} (id: {}, {} arg chars)",
|
||||
i, tc.function.name, tc.id, tc.function.arguments.len()
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
201
poc-agent/src/api/openai.rs
Normal file
201
poc-agent/src/api/openai.rs
Normal file
|
|
@ -0,0 +1,201 @@
|
|||
// api/openai.rs — OpenAI-compatible backend
|
||||
//
|
||||
// Works with any provider that implements the OpenAI chat completions
|
||||
// API: OpenRouter, vLLM, llama.cpp, Fireworks, Together, etc.
|
||||
// Also used for local models (Qwen, llama) via compatible servers.
|
||||
|
||||
use anyhow::Result;
|
||||
use reqwest::Client;
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::types::*;
|
||||
use crate::ui_channel::{StreamTarget, UiMessage, UiSender};
|
||||
|
||||
pub async fn stream(
|
||||
client: &Client,
|
||||
base_url: &str,
|
||||
api_key: &str,
|
||||
model: &str,
|
||||
messages: &[Message],
|
||||
tools: Option<&[ToolDef]>,
|
||||
ui_tx: &UiSender,
|
||||
target: StreamTarget,
|
||||
reasoning_effort: &str,
|
||||
) -> Result<(Message, Option<Usage>)> {
|
||||
let request = ChatRequest {
|
||||
model: model.to_string(),
|
||||
messages: messages.to_vec(),
|
||||
tool_choice: tools.map(|_| "auto".to_string()),
|
||||
tools: tools.map(|t| t.to_vec()),
|
||||
max_tokens: Some(16384),
|
||||
temperature: Some(0.6),
|
||||
stream: Some(true),
|
||||
reasoning: Some(ReasoningConfig {
|
||||
enabled: reasoning_effort != "none",
|
||||
effort: Some(reasoning_effort.to_string()),
|
||||
}),
|
||||
};
|
||||
|
||||
let url = format!("{}/chat/completions", base_url);
|
||||
let msg_count = request.messages.len();
|
||||
let debug_label = format!("{} messages, model={}", msg_count, model);
|
||||
|
||||
let mut response = super::send_and_check(
|
||||
client,
|
||||
&url,
|
||||
&request,
|
||||
("Authorization", &format!("Bearer {}", api_key)),
|
||||
&[],
|
||||
ui_tx,
|
||||
&debug_label,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut reader = super::SseReader::new(ui_tx);
|
||||
|
||||
let mut content = String::new();
|
||||
let mut tool_calls: Vec<ToolCall> = Vec::new();
|
||||
let mut usage = None;
|
||||
let mut finish_reason = None;
|
||||
let mut reasoning_chars: usize = 0;
|
||||
let mut empty_deltas: u64 = 0;
|
||||
let mut first_content_at: Option<Duration> = None;
|
||||
|
||||
let reasoning_enabled = reasoning_effort != "none";
|
||||
|
||||
while let Some(event) = reader.next_event(&mut response).await? {
|
||||
// OpenRouter sometimes embeds error objects in the stream
|
||||
if let Some(err_msg) = event["error"]["message"].as_str() {
|
||||
let raw = event["error"]["metadata"]["raw"].as_str().unwrap_or("");
|
||||
let _ = ui_tx.send(UiMessage::Debug(format!(
|
||||
"API error in stream: {}",
|
||||
err_msg
|
||||
)));
|
||||
anyhow::bail!("API error in stream: {} {}", err_msg, raw);
|
||||
}
|
||||
|
||||
let chunk: ChatCompletionChunk = match serde_json::from_value(event) {
|
||||
Ok(c) => c,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
if chunk.usage.is_some() {
|
||||
usage = chunk.usage;
|
||||
}
|
||||
|
||||
for choice in &chunk.choices {
|
||||
if choice.finish_reason.is_some() {
|
||||
finish_reason = choice.finish_reason.clone();
|
||||
}
|
||||
|
||||
let has_content = choice.delta.content.is_some();
|
||||
let has_tools = choice.delta.tool_calls.is_some();
|
||||
|
||||
// Reasoning tokens — multiple field names across providers
|
||||
let mut has_reasoning = false;
|
||||
if let Some(ref r) = choice.delta.reasoning_content {
|
||||
reasoning_chars += r.len();
|
||||
has_reasoning = true;
|
||||
if reasoning_enabled && !r.is_empty() {
|
||||
let _ = ui_tx.send(UiMessage::Reasoning(r.clone()));
|
||||
}
|
||||
}
|
||||
if let Some(ref r) = choice.delta.reasoning {
|
||||
reasoning_chars += r.len();
|
||||
has_reasoning = true;
|
||||
if reasoning_enabled && !r.is_empty() {
|
||||
let _ = ui_tx.send(UiMessage::Reasoning(r.clone()));
|
||||
}
|
||||
}
|
||||
if let Some(ref r) = choice.delta.reasoning_details {
|
||||
let s = r.to_string();
|
||||
reasoning_chars += s.len();
|
||||
has_reasoning = true;
|
||||
if reasoning_enabled && !s.is_empty() && s != "null" {
|
||||
let _ = ui_tx.send(UiMessage::Reasoning(s));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ref text_delta) = choice.delta.content {
|
||||
if first_content_at.is_none() && !text_delta.is_empty() {
|
||||
first_content_at = Some(reader.stream_start.elapsed());
|
||||
let _ = ui_tx.send(UiMessage::Activity("streaming...".into()));
|
||||
}
|
||||
content.push_str(text_delta);
|
||||
let _ = ui_tx.send(UiMessage::TextDelta(text_delta.clone(), target));
|
||||
}
|
||||
|
||||
if let Some(ref tc_deltas) = choice.delta.tool_calls {
|
||||
for tc_delta in tc_deltas {
|
||||
let idx = tc_delta.index;
|
||||
while tool_calls.len() <= idx {
|
||||
tool_calls.push(ToolCall {
|
||||
id: String::new(),
|
||||
call_type: "function".to_string(),
|
||||
function: FunctionCall {
|
||||
name: String::new(),
|
||||
arguments: String::new(),
|
||||
},
|
||||
});
|
||||
}
|
||||
if let Some(ref id) = tc_delta.id {
|
||||
tool_calls[idx].id = id.clone();
|
||||
}
|
||||
if let Some(ref ct) = tc_delta.call_type {
|
||||
tool_calls[idx].call_type = ct.clone();
|
||||
}
|
||||
if let Some(ref func) = tc_delta.function {
|
||||
if let Some(ref name) = func.name {
|
||||
tool_calls[idx].function.name = name.clone();
|
||||
}
|
||||
if let Some(ref args) = func.arguments {
|
||||
tool_calls[idx].function.arguments.push_str(args);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !has_reasoning && !has_content && !has_tools && choice.finish_reason.is_none() {
|
||||
empty_deltas += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let total_elapsed = reader.stream_start.elapsed();
|
||||
|
||||
super::log_diagnostics(
|
||||
ui_tx,
|
||||
content.len(),
|
||||
tool_calls.len(),
|
||||
reasoning_chars,
|
||||
reasoning_effort,
|
||||
&finish_reason,
|
||||
reader.chunks_received,
|
||||
reader.sse_lines_parsed,
|
||||
reader.sse_parse_errors,
|
||||
empty_deltas,
|
||||
total_elapsed,
|
||||
first_content_at,
|
||||
&usage,
|
||||
&tool_calls,
|
||||
);
|
||||
|
||||
// Model/provider error delivered inside the stream (HTTP 200 but
|
||||
// finish_reason="error"). Surface whatever content came back as
|
||||
// the error message so the caller can retry or display it.
|
||||
// Don't append the trailing newline — this isn't real content.
|
||||
if finish_reason.as_deref() == Some("error") {
|
||||
let detail = if content.is_empty() {
|
||||
"no details".to_string()
|
||||
} else {
|
||||
content
|
||||
};
|
||||
anyhow::bail!("model stream error: {}", detail);
|
||||
}
|
||||
|
||||
if !content.is_empty() {
|
||||
let _ = ui_tx.send(UiMessage::TextDelta("\n".to_string(), target));
|
||||
}
|
||||
|
||||
Ok((super::build_response_message(content, tool_calls), usage))
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue