diff --git a/poc-memory/src/agents/api.rs b/poc-memory/src/agents/api.rs index 07383f4..71db94d 100644 --- a/poc-memory/src/agents/api.rs +++ b/poc-memory/src/agents/api.rs @@ -51,7 +51,7 @@ pub async fn call_api_with_tools( let max_turns = 50; for turn in 0..max_turns { - log(&format!("API turn {} ({} messages)", turn, messages.len())); + log(&format!("\n=== TURN {} ({} messages) ===\n", turn, messages.len())); let (msg, usage) = client.chat_completion_stream( &messages, @@ -101,7 +101,7 @@ pub async fn call_api_with_tools( // Execute each tool call for call in msg.tool_calls.as_ref().unwrap() { - log(&format!("tool: {}({})", + log(&format!("\nTOOL CALL: {}({})", call.function.name, &call.function.arguments)); @@ -136,7 +136,7 @@ pub async fn call_api_with_tools( tools::dispatch(&call.function.name, &args, &tracker).await }; - log(&format!("tool result: {} chars", output.text.len())); + log(&format!("TOOL RESULT ({} chars):\n{}", output.text.len(), output.text)); messages.push(Message::tool_result(&call.id, &output.text)); } @@ -153,6 +153,7 @@ pub async fn call_api_with_tools( continue; } + log(&format!("\n=== RESPONSE ===\n\n{}", text)); return Ok(text); } diff --git a/poc-memory/src/agents/knowledge.rs b/poc-memory/src/agents/knowledge.rs index a5c253a..f4fd42d 100644 --- a/poc-memory/src/agents/knowledge.rs +++ b/poc-memory/src/agents/knowledge.rs @@ -39,7 +39,7 @@ pub fn run_and_apply_with_log( agent_name: &str, batch_size: usize, llm_tag: &str, - log: &dyn Fn(&str), + log: &(dyn Fn(&str) + Sync), ) -> Result<(), String> { run_and_apply_excluded(store, agent_name, batch_size, llm_tag, log, &Default::default()) } @@ -51,7 +51,7 @@ pub fn run_and_apply_excluded( agent_name: &str, batch_size: usize, llm_tag: &str, - log: &dyn Fn(&str), + log: &(dyn Fn(&str) + Sync), exclude: &std::collections::HashSet, ) -> Result<(), String> { let result = run_one_agent_excluded(store, agent_name, batch_size, llm_tag, log, false, exclude)?; @@ -71,7 +71,7 @@ pub fn run_one_agent_with_keys( keys: &[String], count: usize, llm_tag: &str, - log: &dyn Fn(&str), + log: &(dyn Fn(&str) + Sync), debug: bool, ) -> Result { let def = super::defs::get_def(agent_name) @@ -99,7 +99,7 @@ pub fn run_one_agent( agent_name: &str, batch_size: usize, llm_tag: &str, - log: &dyn Fn(&str), + log: &(dyn Fn(&str) + Sync), debug: bool, ) -> Result { run_one_agent_excluded(store, agent_name, batch_size, llm_tag, log, debug, &Default::default()) @@ -111,7 +111,7 @@ pub fn run_one_agent_excluded( agent_name: &str, batch_size: usize, llm_tag: &str, - log: &dyn Fn(&str), + log: &(dyn Fn(&str) + Sync), debug: bool, exclude: &std::collections::HashSet, ) -> Result { @@ -131,7 +131,7 @@ fn run_one_agent_inner( def: &super::defs::AgentDef, agent_batch: super::prompts::AgentBatch, _llm_tag: &str, - log: &dyn Fn(&str), + log: &(dyn Fn(&str) + Sync), debug: bool, ) -> Result { let prompt_kb = agent_batch.prompt.len() / 1024; @@ -163,25 +163,10 @@ fn run_one_agent_inner( log(&format!(" node: {}", key)); } - // Single log file: prompt then response - let log_dir = store::memory_dir().join("llm-logs").join(agent_name); - fs::create_dir_all(&log_dir).ok(); - let log_path = log_dir.join(format!("{}.txt", store::compact_timestamp())); - let prompt_section = format!("=== PROMPT ===\n\n{}\n\n=== CALLING LLM ===\n", agent_batch.prompt); - fs::write(&log_path, &prompt_section).ok(); - if debug { print!("{}", prompt_section); } - log(&format!("log: {}", log_path.display())); + log(&format!("=== PROMPT ===\n\n{}\n\n=== CALLING LLM ===", agent_batch.prompt)); - log("calling LLM"); - let output = llm::call_for_def(def, &agent_batch.prompt)?; + let output = llm::call_for_def(def, &agent_batch.prompt, log)?; - // Append response to same log file - use std::io::Write; - let response_section = format!("\n=== RESPONSE ===\n\n{}\n", output); - if let Ok(mut f) = fs::OpenOptions::new().append(true).open(&log_path) { - write!(f, "{}", response_section).ok(); - } - if debug { print!("{}", response_section); } log(&format!("response {}KB", output.len() / 1024)); Ok(AgentResult { diff --git a/poc-memory/src/agents/llm.rs b/poc-memory/src/agents/llm.rs index e4856ea..adf3305 100644 --- a/poc-memory/src/agents/llm.rs +++ b/poc-memory/src/agents/llm.rs @@ -186,18 +186,12 @@ pub(crate) fn call_haiku(agent: &str, prompt: &str) -> Result { /// Call a model using an agent definition's model and tool configuration. /// Uses the direct API backend when api_base_url is configured, /// otherwise falls back to claude CLI subprocess. -pub(crate) fn call_for_def(def: &super::defs::AgentDef, prompt: &str) -> Result { - let config = crate::config::get(); - if config.api_base_url.is_some() { - super::daemon::log_verbose(&def.agent, "llm-backend", - &format!("API: {}", config.api_base_url.as_deref().unwrap_or("?"))); - let log = |msg: &str| eprintln!("[{}] {}", def.agent, msg); - super::api::call_api_with_tools_sync(&def.agent, prompt, &log) - } else { - super::daemon::log_verbose(&def.agent, "llm-backend", - &format!("claude -p (model={}, tools={})", def.model, def.tools.len())); - call_model_with_tools(&def.agent, &def.model, prompt, &def.tools) - } +pub(crate) fn call_for_def( + def: &super::defs::AgentDef, + prompt: &str, + log: &(dyn Fn(&str) + Sync), +) -> Result { + super::api::call_api_with_tools_sync(&def.agent, prompt, log) } /// Parse a JSON response, handling markdown fences. diff --git a/poc-memory/src/cli/agent.rs b/poc-memory/src/cli/agent.rs index e9c2111..5c0f8d2 100644 --- a/poc-memory/src/cli/agent.rs +++ b/poc-memory/src/cli/agent.rs @@ -17,12 +17,12 @@ pub fn cmd_run_agent(agent: &str, count: usize, target: &[String], query: Option if crate::agents::daemon::send_rpc_pub("ping").is_some() { return crate::agents::daemon::rpc_run_agent(agent, count); } - eprintln!("Daemon not running — falling back to local execution"); + println!("Daemon not running — falling back to local execution"); } // Slow path: need the store for local execution or target resolution let mut store = store::Store::load()?; - let log = |msg: &str| eprintln!("[{}] {}", agent, msg); + let log = |msg: &str| println!("{}", msg); // Resolve targets: explicit --target, --query, or agent's default query let resolved_targets: Vec = if !target.is_empty() { @@ -35,7 +35,7 @@ pub fn cmd_run_agent(agent: &str, count: usize, target: &[String], query: Option return Err(format!("query returned no results: {}", q)); } let keys: Vec = results.into_iter().map(|(k, _)| k).collect(); - eprintln!("[{}] query matched {} nodes", agent, keys.len()); + println!("[{}] query matched {} nodes", agent, keys.len()); keys } else { vec![] // use agent's built-in query @@ -45,15 +45,15 @@ pub fn cmd_run_agent(agent: &str, count: usize, target: &[String], query: Option // --local or daemon unavailable: run directly if needs_local || crate::agents::daemon::send_rpc_pub("ping").is_none() { if !needs_local { - eprintln!("Daemon not running — falling back to local execution"); + println!("Daemon not running — falling back to local execution"); } for (i, key) in resolved_targets.iter().enumerate() { - eprintln!("[{}] [{}/{}] {}", agent, i + 1, resolved_targets.len(), key); + println!("[{}] [{}/{}] {}", agent, i + 1, resolved_targets.len(), key); if i > 0 { store = store::Store::load()?; } if let Err(e) = crate::agents::knowledge::run_one_agent_with_keys( &mut store, agent, &[key.clone()], count, "test", &log, debug, ) { - eprintln!("[{}] ERROR on {}: {}", agent, key, e); + println!("[{}] ERROR on {}: {}", agent, key, e); } } return Ok(()); @@ -67,7 +67,7 @@ pub fn cmd_run_agent(agent: &str, count: usize, target: &[String], query: Option queued += 1; } } - eprintln!("[{}] queued {} tasks to daemon", agent, queued); + println!("[{}] queued {} tasks to daemon", agent, queued); } else { // Local execution (--local, --debug, dry-run, or daemon unavailable) crate::agents::knowledge::run_one_agent(