// api/ — LLM API client (OpenAI-compatible) // // Works with any provider that implements the OpenAI chat completions // API: OpenRouter, vLLM, llama.cpp, Fireworks, Together, etc. // // Diagnostics: anomalies always logged to debug panel. // Set POC_DEBUG=1 for verbose per-turn logging. pub mod http; pub(crate) mod parsing; mod types; mod openai; // Public API types — used outside agent::api pub use types::{Message, MessageContent, ContentPart, ImageUrl, Role, ToolCall, FunctionCall, Usage}; use anyhow::Result; use std::time::{Duration, Instant}; use self::http::{HttpClient, HttpResponse}; use tokio::sync::mpsc; use crate::agent::tools::{self as agent_tools, summarize_args, ActiveToolCall}; /// A JoinHandle that aborts its task when dropped. pub(crate) struct AbortOnDrop(tokio::task::JoinHandle<()>); impl Drop for AbortOnDrop { fn drop(&mut self) { self.0.abort(); } } /// Sampling parameters for model generation. #[derive(Clone, Copy)] pub(crate) struct SamplingParams { pub temperature: f32, pub top_p: f32, pub top_k: u32, } // ───────────────────────────────────────────────────────────── // Stream events — yielded by backends, consumed by the runner // ───────────────────────────────────────────────────────────── /// Build the tools JSON string from a slice of Tools. fn tools_to_json_str(tools: &[agent_tools::Tool]) -> String { let inner: Vec = tools.iter().map(|t| t.to_json()).collect(); format!("[{}]", inner.join(",")) } /// One token from the streaming completions API. pub(crate) enum StreamToken { Token { text: String, id: u32 }, Done { usage: Option }, Error(String), } #[derive(Clone)] pub struct ApiClient { client: HttpClient, api_key: String, pub model: String, base_url: String, } impl ApiClient { pub fn new(base_url: &str, api_key: &str, model: &str) -> Self { let client = HttpClient::builder() .connect_timeout(Duration::from_secs(30)) .timeout(Duration::from_secs(600)) .build(); Self { client, api_key: api_key.to_string(), model: model.to_string(), base_url: base_url.trim_end_matches('/').to_string(), } } /// Stream a completion with raw token IDs. /// Returns (text, token_id) per token via channel. pub(crate) fn stream_completion( &self, prompt_tokens: &[u32], sampling: SamplingParams, priority: Option, ) -> (mpsc::UnboundedReceiver, AbortOnDrop) { let (tx, rx) = mpsc::unbounded_channel(); let client = self.client.clone(); let api_key = self.api_key.clone(); let model = self.model.clone(); let prompt_tokens = prompt_tokens.to_vec(); let base_url = self.base_url.clone(); let handle = tokio::spawn(async move { let result = openai::stream_completions( &client, &base_url, &api_key, &model, &prompt_tokens, &tx, sampling, priority, ).await; if let Err(e) = result { let _ = tx.send(StreamToken::Error(e.to_string())); } }); (rx, AbortOnDrop(handle)) } pub fn base_url(&self) -> &str { &self.base_url } pub fn api_key(&self) -> &str { &self.api_key } } /// Send an HTTP request and check for errors. Shared by both backends. pub(crate) async fn send_and_check( client: &HttpClient, url: &str, body: &impl serde::Serialize, auth_header: (&str, &str), extra_headers: &[(&str, &str)], debug_label: &str, request_json: Option<&str>, ) -> Result { let debug = std::env::var("POC_DEBUG").is_ok(); let start = Instant::now(); if debug { let payload_size = serde_json::to_string(body) .map(|s| s.len()) .unwrap_or(0); dbglog!( "request: {}K payload, {}", payload_size / 1024, debug_label, ); } let mut headers: Vec<(&str, &str)> = Vec::with_capacity(extra_headers.len() + 1); headers.push(auth_header); headers.extend_from_slice(extra_headers); let response = client .send_json("POST", url, &headers, body) .await .map_err(|e| { let msg = e.to_string(); let cause = if msg.contains("connect timeout") || msg.contains("TCP connect") { "connection refused" } else if msg.contains("request timeout") { "request timed out" } else { "request error" }; anyhow::anyhow!("{} ({}): {}", cause, url, msg) })?; let status = response.status(); let elapsed = start.elapsed(); if debug { for name in [ "x-ratelimit-remaining", "x-ratelimit-limit", "x-request-id", ] { if let Some(val) = response.header(name) { dbglog!("header {}: {}", name, val); } } } if !status.is_success() { let body = response.text().await.unwrap_or_default(); dbglog!( "HTTP {} after {:.1}s ({}): {}", status, elapsed.as_secs_f64(), url, &body[..body.len().min(500)] ); if let Some(json) = request_json { let log_dir = dirs::home_dir() .unwrap_or_default() .join(".consciousness/logs/failed-requests"); let _ = std::fs::create_dir_all(&log_dir); let ts = chrono::Local::now().format("%Y%m%dT%H%M%S"); let path = log_dir.join(format!("{}.json", ts)); if std::fs::write(&path, json).is_ok() { dbglog!( "saved failed request to {} (HTTP {})", path.display(), status ); } } anyhow::bail!("HTTP {} ({}): {}", status, url, &body[..body.len().min(1000)]); } if debug { dbglog!( "connected in {:.1}s (HTTP {})", elapsed.as_secs_f64(), status.as_u16() ); } Ok(response) } /// SSE stream reader. Handles the generic SSE plumbing shared by both /// backends: chunk reading with timeout, line buffering, `data:` prefix /// stripping, `[DONE]` detection, JSON parsing, and parse error diagnostics. /// Yields parsed events as serde_json::Value — each backend handles its /// own event types. pub(crate) struct SseReader { line_buf: String, chunk_timeout: Duration, pub stream_start: Instant, pub chunks_received: u64, pub sse_lines_parsed: u64, pub sse_parse_errors: u64, debug: bool, done: bool, /// Serialized request payload — saved to disk on errors for replay debugging. pub(crate) request_json: Option, } impl SseReader { pub(crate) fn new() -> Self { Self { line_buf: String::new(), chunk_timeout: Duration::from_secs(crate::config::get().api_stream_timeout_secs), stream_start: Instant::now(), chunks_received: 0, sse_lines_parsed: 0, sse_parse_errors: 0, debug: std::env::var("POC_DEBUG").is_ok(), done: false, request_json: None, } } /// Attach the serialized request payload for error diagnostics. /// Save the request payload to disk for replay debugging. fn save_failed_request(&self, reason: &str) { let Some(ref json) = self.request_json else { return }; let log_dir = dirs::home_dir() .unwrap_or_default() .join(".consciousness/logs/failed-requests"); let _ = std::fs::create_dir_all(&log_dir); let ts = chrono::Local::now().format("%Y%m%dT%H%M%S"); let path = log_dir.join(format!("{}.json", ts)); if std::fs::write(&path, json).is_ok() { dbglog!( "saved failed request to {} ({})", path.display(), reason ); } } /// Read the next SSE event from the response stream. /// Returns Ok(Some(value)) for each parsed data line, /// Ok(None) when the stream ends or [DONE] is received. pub(crate) async fn next_event( &mut self, response: &mut HttpResponse, ) -> Result> { loop { // Drain complete lines from the buffer before reading more chunks while let Some(newline_pos) = self.line_buf.find('\n') { let line = self.line_buf[..newline_pos].trim().to_string(); self.line_buf = self.line_buf[newline_pos + 1..].to_string(); if line == "data: [DONE]" { self.done = true; return Ok(None); } if line.is_empty() || line.starts_with("event: ") || !line.starts_with("data: ") { continue; } let json_str = &line[6..]; self.sse_lines_parsed += 1; match serde_json::from_str(json_str) { Ok(v) => return Ok(Some(v)), Err(e) => { self.sse_parse_errors += 1; if self.sse_parse_errors == 1 || self.debug { let preview = if json_str.len() > 200 { format!("{}...", &json_str[..200]) } else { json_str.to_string() }; dbglog!( "SSE parse error (#{}) {}: {}", self.sse_parse_errors, e, preview ); } continue; } } } if self.done { return Ok(None); } // Read more data from the response stream match tokio::time::timeout(self.chunk_timeout, response.chunk()).await { Ok(Ok(Some(chunk))) => { self.chunks_received += 1; self.line_buf.push_str(&String::from_utf8_lossy(&chunk)); } Ok(Ok(None)) => return Ok(None), Ok(Err(e)) => { let buf_preview = if self.line_buf.is_empty() { "(empty)".to_string() } else { let n = self.line_buf.len().min(500); format!("{}B: {}", self.line_buf.len(), &self.line_buf[..n]) }; let msg = format!( "stream error after {} chunks, {:.1}s, {} sse lines: {} | buf: {}", self.chunks_received, self.stream_start.elapsed().as_secs_f64(), self.sse_lines_parsed, e, buf_preview, ); dbglog!("{}", msg); self.save_failed_request(&msg); return Err(e.into()); } Err(_) => { let buf_preview = if self.line_buf.is_empty() { "(empty)".to_string() } else { let n = self.line_buf.len().min(500); format!("{}B: {}", self.line_buf.len(), &self.line_buf[..n]) }; let msg = format!( "stream timeout: {}s, {} chunks, {} sse lines, {:.1}s elapsed | buf: {}", self.chunk_timeout.as_secs(), self.chunks_received, self.sse_lines_parsed, self.stream_start.elapsed().as_secs_f64(), buf_preview, ); dbglog!("{}", msg); self.save_failed_request(&msg); anyhow::bail!( "stream timeout: no data for {}s ({} chunks received)", self.chunk_timeout.as_secs(), self.chunks_received ); } } } } } /// Build a response Message from accumulated content and tool calls. /// Shared by both backends — the wire format differs but the internal /// representation is the same. /// /// If no structured tool calls came from the API but the content /// contains leaked tool call XML (e.g. `...` /// from models that emit tool calls as text), parse them out and /// promote them to structured tool_calls. This way all consumers /// see tool calls uniformly regardless of backend. pub(crate) fn build_response_message( content: String, tool_calls: Vec, ) -> Message { // If the API returned structured tool calls, use them as-is. if !tool_calls.is_empty() { return Message { role: Role::Assistant, content: if content.is_empty() { None } else { Some(MessageContent::Text(content)) }, tool_calls: Some(tool_calls), tool_call_id: None, name: None, timestamp: None, }; } // Check for leaked tool calls in content text. let leaked = parsing::parse_leaked_tool_calls(&content); if !leaked.is_empty() { let cleaned = parsing::strip_leaked_artifacts(&content); return Message { role: Role::Assistant, content: if cleaned.trim().is_empty() { None } else { Some(MessageContent::Text(cleaned)) }, tool_calls: Some(leaked), tool_call_id: None, name: None, timestamp: None, }; } Message { role: Role::Assistant, content: if content.is_empty() { None } else { Some(MessageContent::Text(content)) }, tool_calls: None, tool_call_id: None, name: None, timestamp: None, } } /// Log stream diagnostics. Shared by both backends. pub(crate) fn log_diagnostics( content_len: usize, tool_count: usize, reasoning_chars: usize, reasoning_effort: &str, finish_reason: &Option, chunks_received: u64, sse_lines_parsed: u64, sse_parse_errors: u64, empty_deltas: u64, total_elapsed: Duration, first_content_at: Option, usage: &Option, tools: &[ToolCall], ) { let debug = std::env::var("POC_DEBUG").is_ok(); if reasoning_chars > 0 && reasoning_effort == "none" { dbglog!( "note: {} chars leaked reasoning (suppressed from display)", reasoning_chars ); } if content_len == 0 && tool_count == 0 { dbglog!( "WARNING: empty response (finish: {:?}, chunks: {}, reasoning: {}, \ parse_errors: {}, empty_deltas: {}, {:.1}s)", finish_reason, chunks_received, reasoning_chars, sse_parse_errors, empty_deltas, total_elapsed.as_secs_f64() ); } if finish_reason.is_none() && chunks_received > 0 { dbglog!( "WARNING: stream ended without finish_reason ({} chunks, {} content chars)", chunks_received, content_len ); } if sse_parse_errors > 0 { dbglog!( "WARNING: {} SSE parse errors out of {} lines", sse_parse_errors, sse_lines_parsed ); } if debug { if let Some(u) = usage { dbglog!( "tokens: {} prompt + {} completion = {} total", u.prompt_tokens, u.completion_tokens, u.total_tokens ); } let ttft = first_content_at .map(|d| format!("{:.1}s", d.as_secs_f64())) .unwrap_or_else(|| "none".to_string()); dbglog!( "stream: {:.1}s total, TTFT={}, {} chunks, {} SSE lines, \ {} content chars, {} reasoning chars, {} tools, \ finish={:?}", total_elapsed.as_secs_f64(), ttft, chunks_received, sse_lines_parsed, content_len, reasoning_chars, tool_count, finish_reason, ); if !tools.is_empty() { for (i, tc) in tools.iter().enumerate() { dbglog!( " tool[{}]: {} (id: {}, {} arg chars)", i, tc.function.name, tc.id, tc.function.arguments.len() ); } } } }