llm: full per-agent usage logging with prompts and responses
Log every model call to ~/.claude/memory/llm-logs/YYYY-MM-DD.md with full prompt, response, agent type, model, duration, and status. One file per day, markdown formatted for easy reading. Agent types: fact-mine, experience-mine, consolidate, knowledge, digest, enrich, audit. This gives visibility into what each agent is doing and whether to adjust prompts or frequency.
This commit is contained in:
parent
e33fd4ffbc
commit
82b33c449c
7 changed files with 51 additions and 17 deletions
|
|
@ -487,7 +487,7 @@ pub fn run_observation_extractor(store: &Store, graph: &Graph, batch_size: usize
|
|||
.replace("{{TOPOLOGY}}", &topology)
|
||||
.replace("{{CONVERSATIONS}}", &format!("### Session {}\n\n{}", session_id, text));
|
||||
|
||||
let response = llm::call_sonnet(&prompt, 600)?;
|
||||
let response = llm::call_sonnet("knowledge", &prompt)?;
|
||||
results.push(format!("## Session: {}\n\n{}", session_id, response));
|
||||
}
|
||||
Ok(results.join("\n\n---\n\n"))
|
||||
|
|
@ -569,7 +569,7 @@ pub fn run_extractor(store: &Store, graph: &Graph, batch_size: usize) -> Result<
|
|||
.replace("{{TOPOLOGY}}", &topology)
|
||||
.replace("{{NODES}}", &node_texts.join("\n\n"));
|
||||
|
||||
let response = llm::call_sonnet(&prompt, 600)?;
|
||||
let response = llm::call_sonnet("knowledge", &prompt)?;
|
||||
results.push(format!("## Cluster {}: {}...\n\n{}", i + 1,
|
||||
cluster.iter().take(3).cloned().collect::<Vec<_>>().join(", "), response));
|
||||
}
|
||||
|
|
@ -643,7 +643,7 @@ pub fn run_connector(store: &Store, graph: &Graph, batch_size: usize) -> Result<
|
|||
.replace("{{NODES_A}}", &nodes_a.join("\n\n"))
|
||||
.replace("{{NODES_B}}", &nodes_b.join("\n\n"));
|
||||
|
||||
let response = llm::call_sonnet(&prompt, 600)?;
|
||||
let response = llm::call_sonnet("knowledge", &prompt)?;
|
||||
results.push(format!("## Pair {}: {} ↔ {}\n\n{}",
|
||||
i + 1, group_a.join(", "), group_b.join(", "), response));
|
||||
}
|
||||
|
|
@ -677,7 +677,7 @@ pub fn run_challenger(store: &Store, graph: &Graph, batch_size: usize) -> Result
|
|||
.replace("{{NODE_KEY}}", key)
|
||||
.replace("{{NODE_CONTENT}}", content);
|
||||
|
||||
let response = llm::call_sonnet(&prompt, 600)?;
|
||||
let response = llm::call_sonnet("knowledge", &prompt)?;
|
||||
results.push(format!("## Challenge: {}\n\n{}", key, response));
|
||||
}
|
||||
Ok(results.join("\n\n---\n\n"))
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue