agent logging: single log file, --debug prints to stdout

Consolidate agent logging to one file per run in llm-logs/{agent}/.
Prompt written before LLM call, response appended after. --debug
additionally prints the same content to stdout.

Remove duplicate eprintln! calls and AgentResult.prompt field.
Kill experience_mine and fact_mine job functions from daemon —
observation.agent handles all transcript mining.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Kent Overstreet 2026-03-16 20:44:09 -04:00
parent d7436b8b9c
commit 03310dafa4
3 changed files with 27 additions and 60 deletions

View file

@ -548,7 +548,6 @@ pub fn resolve_naming(
/// Result of running a single agent through the common pipeline.
pub struct AgentResult {
pub prompt: String,
pub output: String,
pub actions: Vec<Action>,
pub no_ops: usize,
@ -616,7 +615,7 @@ pub fn run_and_apply_with_log(
llm_tag: &str,
log: &dyn Fn(&str),
) -> Result<(usize, usize), String> {
let result = run_one_agent(store, agent_name, batch_size, llm_tag, log)?;
let result = run_one_agent(store, agent_name, batch_size, llm_tag, log, false)?;
let actions = resolve_action_names(store, result.actions);
let ts = store::compact_timestamp();
let mut applied = 0;
@ -653,6 +652,7 @@ pub fn run_one_agent(
batch_size: usize,
llm_tag: &str,
log: &dyn Fn(&str),
debug: bool,
) -> Result<AgentResult, String> {
let def = super::defs::get_def(agent_name)
.ok_or_else(|| format!("no .agent file for {}", agent_name))?;
@ -669,19 +669,26 @@ pub fn run_one_agent(
log(&format!(" node: {}", key));
}
// Single log file: prompt then response
let log_dir = store::memory_dir().join("llm-logs").join(agent_name);
fs::create_dir_all(&log_dir).ok();
let log_path = log_dir.join(format!("{}.txt", store::compact_timestamp()));
let prompt_section = format!("=== PROMPT ===\n\n{}\n\n=== CALLING LLM ===\n", agent_batch.prompt);
fs::write(&log_path, &prompt_section).ok();
if debug { print!("{}", prompt_section); }
log(&format!("log: {}", log_path.display()));
log("calling LLM");
let output = llm::call_for_def(&def, &agent_batch.prompt)?;
let output_kb = output.len() / 1024;
log(&format!("response {}KB", output_kb));
// Log raw output to file, not the graph
let ts = store::compact_timestamp();
let log_dir = store::memory_dir().join("llm-logs").join(agent_name);
fs::create_dir_all(&log_dir).ok();
let log_path = log_dir.join(format!("{}.txt", ts));
fs::write(&log_path, &output).ok();
log(&format!("logged to {}", log_path.display()));
// Append response to same log file
use std::io::Write;
let response_section = format!("\n=== RESPONSE ===\n\n{}\n", output);
if let Ok(mut f) = fs::OpenOptions::new().append(true).open(&log_path) {
write!(f, "{}", response_section).ok();
}
if debug { print!("{}", response_section); }
log(&format!("response {}KB", output.len() / 1024));
let actions = parse_all_actions(&output);
let no_ops = count_no_ops(&output);
@ -694,7 +701,6 @@ pub fn run_one_agent(
}
Ok(AgentResult {
prompt: agent_batch.prompt,
output,
actions,
no_ops,
@ -983,7 +989,7 @@ fn run_cycle(
for agent_name in &agent_names {
eprintln!("\n --- {} (n={}) ---", agent_name, config.batch_size);
let result = match run_one_agent(&mut store, agent_name, config.batch_size, "knowledge", &|msg| eprintln!(" {}", msg)) {
let result = match run_one_agent(&mut store, agent_name, config.batch_size, "knowledge", &|msg| eprintln!(" {}", msg), false) {
Ok(r) => r,
Err(e) => {
eprintln!(" ERROR: {}", e);