diff --git a/src/agent/context.rs b/src/agent/context.rs index eb5692b..08eeda7 100644 --- a/src/agent/context.rs +++ b/src/agent/context.rs @@ -87,7 +87,7 @@ pub fn trim_entries( + count_msg(&Message::user(render_journal(&context.journal))); let msg_costs: Vec = deduped.iter() - .map(|e| count_msg(e.api_message())).collect(); + .map(|e| if e.is_log() { 0 } else { count_msg(e.api_message()) }).collect(); let entry_total: usize = msg_costs.iter().sum(); let total: usize = fixed_cost + entry_total; @@ -199,6 +199,9 @@ pub enum ConversationEntry { Memory { key: String, message: Message }, /// DMN heartbeat/autonomous prompt — evicted aggressively during compaction. Dmn(Message), + /// Debug/status log line — written to conversation log for tracing, + /// skipped on read-back. + Log(String), } // Custom serde: serialize Memory with a "memory_key" field added to the message, @@ -219,6 +222,12 @@ impl Serialize for ConversationEntry { map.serialize_entry("memory_key", key)?; map.end() } + Self::Log(text) => { + use serde::ser::SerializeMap; + let mut map = s.serialize_map(Some(1))?; + map.serialize_entry("log", text)?; + map.end() + } } } } @@ -226,6 +235,11 @@ impl Serialize for ConversationEntry { impl<'de> Deserialize<'de> for ConversationEntry { fn deserialize>(d: D) -> Result { let mut json: serde_json::Value = serde_json::Value::deserialize(d)?; + // Log entries — skip on read-back + if json.get("log").is_some() { + let text = json["log"].as_str().unwrap_or("").to_string(); + return Ok(Self::Log(text)); + } if let Some(key) = json.as_object_mut().and_then(|o| o.remove("memory_key")) { let key = key.as_str().unwrap_or("").to_string(); let message: Message = serde_json::from_value(json).map_err(serde::de::Error::custom)?; @@ -239,10 +253,12 @@ impl<'de> Deserialize<'de> for ConversationEntry { impl ConversationEntry { /// Get the API message for sending to the model. + /// Panics on Log entries (which should be filtered before API calls). pub fn api_message(&self) -> &Message { match self { Self::Message(m) | Self::Dmn(m) => m, Self::Memory { message, .. } => message, + Self::Log(_) => panic!("Log entries have no API message"), } } @@ -254,19 +270,27 @@ impl ConversationEntry { matches!(self, Self::Dmn(_)) } + pub fn is_log(&self) -> bool { + matches!(self, Self::Log(_)) + } + /// Get a reference to the inner message. + /// Panics on Log entries. pub fn message(&self) -> &Message { match self { Self::Message(m) | Self::Dmn(m) => m, Self::Memory { message, .. } => message, + Self::Log(_) => panic!("Log entries have no message"), } } /// Get a mutable reference to the inner message. + /// Panics on Log entries. pub fn message_mut(&mut self) -> &mut Message { match self { Self::Message(m) | Self::Dmn(m) => m, Self::Memory { message, .. } => message, + Self::Log(_) => panic!("Log entries have no message"), } } } diff --git a/src/agent/mod.rs b/src/agent/mod.rs index a0f605e..feff433 100644 --- a/src/agent/mod.rs +++ b/src/agent/mod.rs @@ -290,7 +290,9 @@ impl Agent { if !jnl.is_empty() { msgs.push(Message::user(jnl)); } - msgs.extend(self.context.entries.iter().map(|e| e.api_message().clone())); + msgs.extend(self.context.entries.iter() + .filter(|e| !e.is_log()) + .map(|e| e.api_message().clone())); msgs } @@ -743,7 +745,7 @@ impl Agent { // Conversation — non-memory entries only (memories counted above) let conv_children: Vec = self.context.entries.iter().enumerate() - .filter(|(_, e)| !e.is_memory()) + .filter(|(_, e)| !e.is_memory() && !e.is_log()) .map(|(i, entry)| { let m = entry.message(); let text = m.content.as_ref() @@ -1032,7 +1034,7 @@ impl Agent { // Load extra — compact() will dedup, trim, reload identity + journal let all: Vec<_> = entries.into_iter() - .filter(|e| e.message().role != Role::System) + .filter(|e| !e.is_log() && e.message().role != Role::System) .collect(); let mem_count = all.iter().filter(|e| e.is_memory()).count(); let conv_count = all.len() - mem_count; diff --git a/src/agent/oneshot.rs b/src/agent/oneshot.rs index 36788e5..be34e5c 100644 --- a/src/agent/oneshot.rs +++ b/src/agent/oneshot.rs @@ -111,6 +111,18 @@ impl Backend { } } } + + fn log(&self, text: String) { + match self { + Backend::Forked(agent) => { + if let Some(ref log) = agent.conversation_log { + let entry = super::context::ConversationEntry::Log(text); + log.append(&entry).ok(); + } + } + _ => {} + } + } } impl AutoAgent { @@ -177,7 +189,6 @@ impl AutoAgent { pub async fn run( &mut self, bail_fn: Option<&(dyn Fn(usize) -> Result<(), String> + Sync)>, - log: &dyn Fn(&str), ) -> Result { // Inject first step prompt if self.next_step < self.steps.len() { @@ -192,15 +203,13 @@ impl AutoAgent { for _ in 0..max_turns { self.turn += 1; let messages = self.backend.messages(); - log(&format!("\n=== TURN {} ({} messages) ===\n", + self.backend.log(format!("turn {} ({} messages)", self.turn, messages.len())); - // API call with retries - let (msg, usage_opt) = self.api_call_with_retry( - &messages, &reasoning, log).await?; + let (msg, usage_opt) = self.api_call_with_retry(&messages, &reasoning).await?; if let Some(u) = &usage_opt { - log(&format!("tokens: {} prompt + {} completion", + self.backend.log(format!("tokens: {} prompt + {} completion", u.prompt_tokens, u.completion_tokens)); } @@ -208,14 +217,14 @@ impl AutoAgent { let has_tools = msg.tool_calls.as_ref().is_some_and(|tc| !tc.is_empty()); if has_tools { - self.dispatch_tools(&msg, log).await; + self.dispatch_tools(&msg).await; continue; } // Text-only response — step complete let text = msg.content_text().to_string(); if text.is_empty() && !has_content { - log("empty response, retrying"); + self.backend.log("empty response, retrying".into()); self.backend.push_message(Message::user( "[system] Your previous response was empty. \ Please respond with text or use a tool." @@ -223,7 +232,8 @@ impl AutoAgent { continue; } - log(&format!("\n=== RESPONSE ===\n\n{}", text)); + self.backend.log(format!("response: {}", + &text[..text.len().min(200)])); // More steps? Check bail, inject next prompt. if self.next_step < self.steps.len() { @@ -235,7 +245,7 @@ impl AutoAgent { self.backend.push_message( Message::user(&self.steps[self.next_step].prompt)); self.next_step += 1; - log(&format!("\n=== STEP {}/{} ===\n", + self.backend.log(format!("step {}/{}", self.next_step, self.steps.len())); continue; } @@ -250,7 +260,6 @@ impl AutoAgent { &self, messages: &[Message], reasoning: &str, - log: &dyn Fn(&str), ) -> Result<(Message, Option), String> { let client = self.backend.client(); let tools = self.backend.tools(); @@ -265,7 +274,8 @@ impl AutoAgent { ).await { Ok((msg, usage)) => { if let Some(ref e) = last_err { - log(&format!("succeeded after retry (previous error: {})", e)); + self.backend.log(format!( + "succeeded after retry (previous: {})", e)); } return Ok((msg, usage)); } @@ -277,7 +287,8 @@ impl AutoAgent { || err_str.contains("timed out") || err_str.contains("Connection refused"); if is_transient && attempt < 4 { - log(&format!("transient error (attempt {}): {}, retrying...", + self.backend.log(format!( + "transient error (attempt {}): {}, retrying", attempt + 1, err_str)); tokio::time::sleep(std::time::Duration::from_secs(2 << attempt)).await; last_err = Some(e); @@ -296,13 +307,13 @@ impl AutoAgent { unreachable!() } - async fn dispatch_tools(&mut self, msg: &Message, log: &dyn Fn(&str)) { - // Push sanitized assistant message with tool calls + async fn dispatch_tools(&mut self, msg: &Message) { let mut sanitized = msg.clone(); if let Some(ref mut calls) = sanitized.tool_calls { for call in calls { if serde_json::from_str::(&call.function.arguments).is_err() { - log(&format!("sanitizing malformed args for {}: {}", + self.backend.log(format!( + "sanitizing malformed args for {}: {}", call.function.name, &call.function.arguments)); call.function.arguments = "{}".to_string(); } @@ -311,13 +322,12 @@ impl AutoAgent { self.backend.push_raw(sanitized); for call in msg.tool_calls.as_ref().unwrap() { - log(&format!("\nTOOL CALL: {}({})", + self.backend.log(format!("tool: {}({})", call.function.name, &call.function.arguments)); let args: serde_json::Value = match serde_json::from_str(&call.function.arguments) { Ok(v) => v, Err(_) => { - log(&format!("malformed tool call args: {}", &call.function.arguments)); self.backend.push_raw(Message::tool_result( &call.id, "Error: your tool call had malformed JSON arguments. \ @@ -328,14 +338,7 @@ impl AutoAgent { }; let output = agent_tools::dispatch(&call.function.name, &args).await; - - if std::env::var("POC_AGENT_VERBOSE").is_ok() { - log(&format!("TOOL RESULT ({} chars):\n{}", output.len(), output)); - } else { - let preview: String = output.lines().next() - .unwrap_or("").chars().take(100).collect(); - log(&format!("Result: {}", preview)); - } + self.backend.log(format!("result: {} chars", output.len())); self.backend.push_raw(Message::tool_result(&call.id, &output)); } @@ -361,7 +364,6 @@ pub fn run_one_agent( agent_name: &str, count: usize, keys: Option<&[String]>, - log: &(dyn Fn(&str) + Sync), ) -> Result { let def = defs::get_def(agent_name) .ok_or_else(|| format!("no .agent file for {}", agent_name))?; @@ -376,7 +378,7 @@ pub fn run_one_agent( // Build prompt batch — either from explicit keys or the agent's query let agent_batch = if let Some(keys) = keys { - log(&format!("targeting: {}", keys.join(", "))); + dbglog!("[{}] targeting: {}", agent_name, keys.join(", ")); let graph = store.build_graph(); let mut resolved_steps = Vec::new(); let mut all_keys: Vec = keys.to_vec(); @@ -396,7 +398,6 @@ pub fn run_one_agent( } batch } else { - log("building prompt"); let effective_count = def.count.unwrap_or(count); defs::run_agent(store, &def, effective_count, &Default::default())? }; @@ -410,13 +411,8 @@ pub fn run_one_agent( .filter(|t| def.tools.iter().any(|w| w == &t.name)) .collect() }; - let tools_desc = effective_tools.iter().map(|t| t.name).collect::>().join(", "); let n_steps = agent_batch.steps.len(); - for key in &agent_batch.node_keys { - log(&format!(" node: {}", key)); - } - // Guard: reject oversized first prompt let max_prompt_bytes = 800_000; let first_len = agent_batch.steps[0].prompt.len(); @@ -429,7 +425,6 @@ pub fn run_one_agent( let header = format!("=== OVERSIZED PROMPT ===\nagent: {}\nsize: {}KB (max {}KB)\nnodes: {:?}\n\n", agent_name, prompt_kb, max_prompt_bytes / 1024, agent_batch.node_keys); fs::write(&oversize_path, format!("{}{}", header, &agent_batch.steps[0].prompt)).ok(); - log(&format!("oversized prompt logged to {}", oversize_path.display())); return Err(format!( "prompt too large: {}KB (max {}KB) — seed nodes may be oversized", prompt_kb, max_prompt_bytes / 1024, @@ -437,22 +432,14 @@ pub fn run_one_agent( } let phases: Vec<&str> = agent_batch.steps.iter().map(|s| s.phase.as_str()).collect(); - log(&format!("{} step(s) {:?}, {}KB initial, {}, {} nodes, output={}", - n_steps, phases, first_len / 1024, tools_desc, - agent_batch.node_keys.len(), state_dir.display())); + dbglog!("[{}] {} step(s) {:?}, {}KB initial, {} nodes", + agent_name, n_steps, phases, first_len / 1024, agent_batch.node_keys.len()); let prompts: Vec = agent_batch.steps.iter() .map(|s| s.prompt.clone()).collect(); let step_phases: Vec = agent_batch.steps.iter() .map(|s| s.phase.clone()).collect(); - if std::env::var("POC_AGENT_VERBOSE").is_ok() { - for (i, s) in agent_batch.steps.iter().enumerate() { - log(&format!("=== PROMPT {}/{} ({}) ===\n\n{}", i + 1, n_steps, s.phase, s.prompt)); - } - } - log("\n=== CALLING LLM ==="); - // Bail check: if the agent defines a bail script, run it between steps. let bail_script = def.bail.as_ref().map(|name| defs::agents_dir().join(name)); let state_dir_for_bail = state_dir.clone(); @@ -473,7 +460,7 @@ pub fn run_one_agent( let output = call_api_with_tools_sync( agent_name, &prompts, &step_phases, def.temperature, def.priority, - &effective_tools, Some(&bail_fn), log)?; + &effective_tools, Some(&bail_fn))?; Ok(AgentResult { output, @@ -496,7 +483,6 @@ pub async fn call_api_with_tools( priority: i32, tools: &[agent_tools::Tool], bail_fn: Option<&(dyn Fn(usize) -> Result<(), String> + Sync)>, - log: &dyn Fn(&str), ) -> Result { let steps: Vec = prompts.iter().zip( phases.iter().map(String::as_str) @@ -513,7 +499,7 @@ pub async fn call_api_with_tools( temperature.unwrap_or(0.6), priority, )?; - auto.run(bail_fn, log).await + auto.run(bail_fn).await } /// Synchronous wrapper — runs on a dedicated thread with its own @@ -526,7 +512,6 @@ pub fn call_api_with_tools_sync( priority: i32, tools: &[agent_tools::Tool], bail_fn: Option<&(dyn Fn(usize) -> Result<(), String> + Sync)>, - log: &(dyn Fn(&str) + Sync), ) -> Result { std::thread::scope(|s| { s.spawn(|| { @@ -535,7 +520,7 @@ pub fn call_api_with_tools_sync( .build() .map_err(|e| format!("tokio runtime: {}", e))?; rt.block_on( - call_api_with_tools(agent, prompts, phases, temperature, priority, tools, bail_fn, log) + call_api_with_tools(agent, prompts, phases, temperature, priority, tools, bail_fn) ) }).join().unwrap() }) diff --git a/src/cli/agent.rs b/src/cli/agent.rs index f5ed7de..78fa6e8 100644 --- a/src/cli/agent.rs +++ b/src/cli/agent.rs @@ -31,7 +31,6 @@ pub fn cmd_run_agent(agent: &str, count: usize, target: &[String], query: Option // Slow path: need the store for local execution or target resolution let mut store = store::Store::load()?; - let log = |msg: &str| println!("{}", msg); // Resolve targets: explicit --target, --query, or agent's default query let resolved_targets: Vec = if !target.is_empty() { @@ -60,7 +59,7 @@ pub fn cmd_run_agent(agent: &str, count: usize, target: &[String], query: Option println!("[{}] [{}/{}] {}", agent, i + 1, resolved_targets.len(), key); if i > 0 { store = store::Store::load()?; } if let Err(e) = crate::agent::oneshot::run_one_agent( - &mut store, agent, count, Some(&[key.clone()]), &log, + &mut store, agent, count, Some(&[key.clone()]), ) { println!("[{}] ERROR on {}: {}", agent, key, e); } @@ -80,7 +79,7 @@ pub fn cmd_run_agent(agent: &str, count: usize, target: &[String], query: Option } else { // Local execution (--local, --debug, dry-run, or daemon unavailable) crate::agent::oneshot::run_one_agent( - &mut store, agent, count, None, &log, + &mut store, agent, count, None, )?; } Ok(()) @@ -400,7 +399,7 @@ fn llm_compare( let _ = model; // model selection handled by API backend config let response = crate::agent::oneshot::call_api_with_tools_sync( - "compare", &[prompt], &[], None, 10, &[], None, &|_| {})?; + "compare", &[prompt], &[], None, 10, &[], None)?; let response = response.trim().to_uppercase(); if response.contains("BETTER: B") { diff --git a/src/subconscious/audit.rs b/src/subconscious/audit.rs index d20e6d6..7e297fa 100644 --- a/src/subconscious/audit.rs +++ b/src/subconscious/audit.rs @@ -211,7 +211,7 @@ pub fn link_audit(store: &mut Store, apply: bool) -> Result let batch_results: Vec<_> = batch_data.par_iter() .map(|(batch_idx, batch_infos, prompt)| { let response = crate::agent::oneshot::call_api_with_tools_sync( - "audit", &[prompt.clone()], &[], None, 10, &[], None, &|_| {}); + "audit", &[prompt.clone()], &[], None, 10, &[], None); let completed = done.fetch_add(1, Ordering::Relaxed) + 1; eprint!("\r Batches: {}/{} done", completed, total_batches); (*batch_idx, batch_infos, response) diff --git a/src/subconscious/consolidate.rs b/src/subconscious/consolidate.rs index e2b6f72..3c4040d 100644 --- a/src/subconscious/consolidate.rs +++ b/src/subconscious/consolidate.rs @@ -74,7 +74,7 @@ pub fn consolidate_full_with_progress( *store = Store::load()?; } - match oneshot::run_one_agent(store, agent_type, *count, None, &|_| {}) { + match oneshot::run_one_agent(store, agent_type, *count, None) { Ok(_) => { let msg = " Done".to_string(); log_line(&mut log_buf, &msg); diff --git a/src/subconscious/daemon.rs b/src/subconscious/daemon.rs index e793cbb..9ce1138 100644 --- a/src/subconscious/daemon.rs +++ b/src/subconscious/daemon.rs @@ -119,12 +119,8 @@ fn job_targeted_agent( run_job(ctx, &job_name, || { let mut store = crate::store::Store::load()?; ctx.log_line(format!("targeting: {}", key)); - let log = |msg: &str| { - ctx.log_line(msg); - log_event(&job_name, "progress", msg); - }; crate::agent::oneshot::run_one_agent( - &mut store, &agent, 5, Some(std::slice::from_ref(&key)), &log, + &mut store, &agent, 5, Some(std::slice::from_ref(&key)), )?; ctx.log_line("done"); Ok(()) @@ -202,14 +198,10 @@ fn job_consolidation_agent( } // in_flight lock released — run LLM without holding it - let log = |msg: &str| { - ctx.log_line(msg); - log_event(&job_name, "progress", msg); - }; // Use run_one_agent_with_keys — we already selected seeds above, // no need to re-run the query. let result = crate::agent::oneshot::run_one_agent( - &mut store, &agent, batch, Some(&claimed_keys), &log, + &mut store, &agent, batch, Some(&claimed_keys), ).map(|_| ()); // Release all claimed keys (seeds + neighbors) @@ -238,8 +230,7 @@ fn job_rename_agent( let batch = if batch_size == 0 { 10 } else { batch_size }; ctx.log_line(format!("running rename agent (batch={})", batch)); - let log = |msg: &str| ctx.log_line(msg); - let result = crate::agent::oneshot::run_one_agent(&mut store, "rename", batch, None, &log)?; + let result = crate::agent::oneshot::run_one_agent(&mut store, "rename", batch, None)?; // Parse RENAME actions from response (rename uses its own format, not WRITE_NODE/LINK/REFINE) let mut applied = 0; diff --git a/src/subconscious/digest.rs b/src/subconscious/digest.rs index 032e4c0..b9e36ed 100644 --- a/src/subconscious/digest.rs +++ b/src/subconscious/digest.rs @@ -286,7 +286,7 @@ fn generate_digest( }; let digest = crate::agent::oneshot::call_api_with_tools_sync( &def.agent, &prompts, &phases, def.temperature, def.priority, - &tools, None, &log)?; + &tools, None)?; let key = digest_node_key(level.name, label); store.upsert_provenance(&key, &digest, "digest:write")?;